utility.h 4.7 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#pragma once

17
#include <functional>
18
#include <map>
19 20 21
#include <memory>
#include <string>
#include <vector>
22 23 24 25 26 27
#include "graph/buffer.h"
#include "graph/graph.h"
#include "graph/model.h"
#include "graph/op/all_ops.h"
#include "graph/operator.h"
#include "graph/operator_reg.h"
28
#include "lite/core/op_lite.h"
29
#include "lite/utils/macros.h"
30

31
// Extended ops based on HIAI DDK
32
namespace ge {
33
/*
34 35
 * Pads a tensor.
 * <Input>
36 37 38
 *    x : the input tensor
 *    padding : the input tensor must be 2-D
 *    constant_values : constant values must be a scalar
39
 * <Output>
40
 *    y : the output tensor
41
 * <Attr>
42 43 44
 *    mode : 0: CONSTANT, 1: REFLECT, 2: SYMMETRIC, 3:EDGE.
 * <Added in HiAI version>
 *    100.320.010.010
45 46
 */
REG_OP(Pad)
47 48 49
    .INPUT(x, TensorType({DT_FLOAT, DT_INT32}))
    .INPUT(padding, TensorType({DT_INT32}))
    .OPTIONAL_INPUT(constant_values, TensorType({DT_INT32, DT_FLOAT}))
50
    .OUTPUT(y, TensorType({DT_FLOAT, DT_INT32}))
51
    .ATTR(mode, AttrValue::INT{0})
52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95
    .OP_END()

    /*
     * The operation pads input according to the paddings and constant_values.
     * <Input>
     *    x : The input tensor.
     *    paddings : The values of paddings, as a role of dimensions to be added
     * on the input tensor x, must be a Const-OP. constant_values : A tensor of
     * the same type as x, that indicates the value to use for padding input,
     *                      must be a Const-OP.
     * <Output>
     *    y : The output tensor.
     * <Added in HiAI version>
     *    100.320.010.010
     */
    REG_OP(PadV2)
    .INPUT(x, TensorType({DT_FLOAT, DT_INT32}))
    .INPUT(paddings, TensorType({DT_INT32}))
    .INPUT(constant_values, TensorType({DT_FLOAT, DT_INT32}))
    .OUTPUT(y, TensorType({DT_FLOAT, DT_INT32}))
    .OP_END()

    /*
     * Computes instance norm
     * <Input>
     *    x : Input tensor which supports 4D dimension format.
     *    scale : A tesnor, multiple to result
     *    bias : A tensor, add to result
     * <Output>
     *    y : Output tensor
     * <Attr>
     *    reduction_indices : The dimensions to reduce
     *    epsilon : A very small float number used to avoid dividing by zero.
     * <Added in HiAI version>
     *    100.320.010.010
     */
    REG_OP(InstanceNorm)
    .INPUT(x, TensorType({DT_FLOAT}))
    .INPUT(scale, TensorType({DT_FLOAT}))
    .INPUT(bias, TensorType({DT_FLOAT}))
    .OUTPUT(y, TensorType({DT_FLOAT}))
    .REQUIRED_ATTR(reduction_indices, AttrValue::LIST_INT)
    .ATTR(epsilon, AttrValue::FLOAT{1e-7f})
    .OP_END()
96

97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120
    /*
     * Multiplies slices of two tensors in batches.
     * <Input>
     *      x : The input tensor
     *      y : The input tensor
     * <Output>
     *      z : The output tensor
     * <Attr>
     *      adj_x : adj_x is true, the input tensor x  is  transposed, otherwise
     * it will not be transposed. Default is false (The current version only
     * supports false).
     *      adj_y : adj_y is true, the input tensor y  is  transposed, otherwise
     * it will not be transposed. Default is false.
     * <Added in HiAI version>
     *      100.320.010.010
     */
    REG_OP(BatchMatMul)
    .INPUT(x, TensorType({DT_FLOAT}))
    .INPUT(y, TensorType({DT_FLOAT}))
    .OUTPUT(z, TensorType({DT_FLOAT}))
    .ATTR(adj_x, AttrValue::BOOL{false})
    .ATTR(adj_y, AttrValue::BOOL{false})
    .OP_END()

121 122 123 124
}  // namespace ge

namespace paddle {
namespace lite {
125
namespace subgraph {
126 127
namespace npu {

128 129 130 131
// Type/tensor converters for converting Paddle type/tensor to HiAI type/tensor
bool HasInputArg(const OpInfo* op_info,
                 const Scope* scope,
                 const std::string& argname);
132

133
ge::DataType CvtPrecisionType(PrecisionType itype);
134

135
ge::Format CvtDataLayoutType(DataLayoutType itype);
136

137 138 139 140 141
// Padding the shape to 4-dimensions(NCHW) for HiAI
std::vector<int64_t> CvtShape(const std::vector<int64_t>& in_shape);

std::vector<int64_t> CvtShape(const DDim& in_dims);

142
ge::TensorPtr CvtTensor(const Tensor& in_tensor,
143
                        std::vector<int64_t> out_shape = {},
144
                        DataLayoutType in_layout = DATALAYOUT(kNCHW));
145

146 147
int CvtActMode(std::string act_type);

148
}  // namespace npu
149
}  // namespace subgraph
150 151
}  // namespace lite
}  // namespace paddle