pd_dialect.cc 5.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "paddle/fluid/dialect/pd_dialect.h"
16
#include "paddle/fluid/dialect/pd_attribute.h"
17 18 19
// NOTE(zhangbo9674): File pd_op.h is generated by op_gen.py, see details in
// paddle/fluid/dialect/CMakeLists.txt.
#include "paddle/fluid/dialect/legacy_pd_op.h"
20
#include "paddle/fluid/dialect/pd_op.h"
21
#include "paddle/fluid/dialect/pd_type.h"
22
#include "paddle/fluid/dialect/pd_type_storage.h"
23 24 25 26 27 28 29 30 31
#include "paddle/fluid/dialect/utils.h"
#include "paddle/fluid/framework/convert_utils.h"
#include "paddle/fluid/framework/data_type.h"
#include "paddle/ir/dialect_interface.h"
#include "paddle/phi/core/dense_tensor.h"

namespace paddle {
namespace dialect {
std::shared_ptr<paddle::framework::Variable>
Z
zhangbo9674 已提交
32
ParameterConvertInterface::ParameterToVariable(ir::Parameter *parameter) {
33 34 35 36
  if (parameter->type().isa<DenseTensorType>()) {
    VLOG(4) << "Convert a DenseTensor Parameter to a variable.";
    std::shared_ptr<paddle::framework::Variable> var =
        std::make_shared<paddle::framework::Variable>();
Z
zhangbo9674 已提交
37
    phi::DenseTensor *tensor = var->GetMutable<phi::DenseTensor>();
38 39 40 41 42 43 44 45 46 47 48
    // Init DenseTensor
    auto dim = parameter->type().dyn_cast<DenseTensorType>().dim();
    phi::DenseTensorMeta meta(
        TransToPhiDataType(
            parameter->type().dyn_cast<DenseTensorType>().dtype()),
        phi::DDim(dim.data(), dim.size()),
        TransToPhiDataLayout(
            parameter->type().dyn_cast<DenseTensorType>().data_layout()),
        parameter->type().dyn_cast<DenseTensorType>().lod(),
        parameter->type().dyn_cast<DenseTensorType>().offset());
    tensor->set_meta(meta);
Z
zhangbo9674 已提交
49
    paddle::platform::DeviceContext *dev_ctx =
50 51 52 53 54 55 56 57 58 59 60 61 62 63 64
        paddle::platform::DeviceContextPool::Instance().Get(
            paddle::platform::CPUPlace());
    dev_ctx->Alloc(tensor,
                   TransToPhiDataType(
                       parameter->type().dyn_cast<DenseTensorType>().dtype()));
    memcpy(tensor->data(),
           parameter->data(),
           tensor->numel() * phi::SizeOf(tensor->dtype()));
    return var;
  } else {
    return nullptr;
  }
}

std::unique_ptr<ir::Parameter> ParameterConvertInterface::VariableToParameter(
Z
zhangbo9674 已提交
65
    paddle::framework::Variable *var) {
66
  if (var->IsType<phi::DenseTensor>()) {
Z
zhangbo9674 已提交
67
    phi::DenseTensor *tensor = var->GetMutable<phi::DenseTensor>();
68
    // Get Meta
Z
zhangbo9674 已提交
69
    ir::IrContext *ctx = ir::IrContext::Instance();
70 71 72 73 74 75 76 77 78
    ir::Type data_type = TransToIrDataType(tensor->dtype(), ctx);
    DenseTensorTypeStorage::Dim dims(tensor->dims().size());
    std::copy(tensor->dims().Get(),
              tensor->dims().Get() + tensor->dims().size(),
              dims.data());
    DenseTensorTypeStorage::DataLayout data_layout =
        TransToIrDataLayout(tensor->layout());
    DenseTensorTypeStorage::LoD lod = tensor->lod();
    size_t offset = tensor->meta().offset;
Z
zhangbo9674 已提交
79
    void *data = tensor->data();
80 81 82 83 84 85 86 87 88 89 90
    ir::Type dense_tensor_type =
        DenseTensorType::get(ctx, data_type, dims, data_layout, lod, offset);
    return std::make_unique<ir::Parameter>(
        data,
        tensor->numel() * phi::SizeOf(tensor->dtype()),
        dense_tensor_type);
  } else {
    return nullptr;
  }
}

Z
zhangbo9674 已提交
91
PaddleDialect::PaddleDialect(ir::IrContext *context)
92 93 94 95 96
    : ir::Dialect(name(), context, ir::TypeId::get<PaddleDialect>()) {
  initialize();
}

void PaddleDialect::initialize() {
97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112
  RegisterTypes<paddle::dialect::DenseTensorType>();

  RegisterAttributes<paddle::dialect::IntArrayAttribute,
                     paddle::dialect::ScalarAttribute,
                     paddle::dialect::DataTypeAttribute,
                     paddle::dialect::PlaceAttribute,
                     paddle::dialect::DataLayoutAttribute>();

  // NOTE(zhangbo9674): GET_OP_LIST is defined in pd_op.h which is
  // generated by op_gen.py, see details in
  // paddle/fluid/dialect/CMakeLists.txt.
  RegisterOps<
#define GET_OP_LIST
#include "paddle/fluid/dialect/pd_op.h"  // NOLINT
      >();

113
  RegisterInterfaces<ParameterConvertInterface>();
114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135
  RegisterOps<Conv2DOp,
              FeedOp,
              BatchNormOp,
              BatchNormOp_,
              ElementwiseAddOp,
              Pool2DOp,
              FlattenContiguousRangeOp,
              MatmulV2Op,
              Reshape2Op,
              SoftmaxWithCrossEntropyOp,
              ReduceMeanOp,
              TopKV2Op,
              FillConstantOp,
              ReduceMeanGradOp,
              SoftmaxWithCrossEntropyGradOp,
              ElementwiseAddGradOp,
              MatmulV2GradOp,
              FlattenContiguousRangeGradOp,
              Pool2DGradOp,
              BatchNormGradOp,
              Conv2DGradOp,
              SumOp,
136 137 138 139 140 141 142 143 144 145 146 147 148 149
              FetchV2Op,
              AddOp,
              MatMulOp,
              ReshapeOp,
              CrossEntropyOp,
              TopKOp,
              FullOp,
              MeanOp,
              AddNOp,
              AddGradOp,
              MatMulGradOp,
              ReshapeGradOp,
              CrossEntropyGradOp,
              TopKGradOp>();
150 151
}

Z
zhangbo9674 已提交
152
void PaddleDialect::PrintType(ir::Type type, std::ostream &os) {
153 154 155
  DenseTensorType tensor_type = type.dyn_cast<DenseTensorType>();

  os << "tensor<";
Z
zhangbo9674 已提交
156
  auto &dims = tensor_type.dim();
157 158 159 160 161 162
  for (auto d : dims) {
    os << d;
    os << "x";
  }
  tensor_type.dtype().print(os);
  os << ">";
163 164 165 166
}

}  // namespace dialect
}  // namespace paddle