reduce_sig.cc 11.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

15
#include "paddle/phi/core/compat/op_utils.h"
16

17
namespace phi {
18 19 20

KernelSignature ReduceSumOpArgumentMapping(const ArgumentMappingContext& ctx) {
  if (ctx.IsDenseTensorInput("X")) {
21 22 23
    bool reduce_all = paddle::any_cast<bool>(ctx.Attr("reduce_all"));
    // When ctx is InferShapeArgumentMappingContext, the reduce_all is used in
    // InferShape, so we must return the "sum_raw" KernelSignature.
24
    // And the InferMeta function(i.e. SumRawInferMeta) is accordance with
25 26 27 28 29 30
    // the "sum_raw" KernelSignature
    if (ctx.IsForInferShape() || reduce_all) {
      return KernelSignature("sum_raw",
                             {"X"},
                             {"dim", "keep_dim", "reduce_all", "out_dtype"},
                             {"Out"});
31
    }
32 33
    return KernelSignature(
        "sum", {"X"}, {"dim", "out_dtype", "keep_dim"}, {"Out"});
34 35 36 37 38 39
  }
  return KernelSignature("unregistered", {}, {}, {});
}

KernelSignature ReduceMeanOpArgumentMapping(const ArgumentMappingContext& ctx) {
  if (ctx.IsDenseTensorInput("X")) {
40 41 42
    bool reduce_all = paddle::any_cast<bool>(ctx.Attr("reduce_all"));
    // When ctx is InferShapeArgumentMappingContext, the reduce_all is used in
    // InferShape, so we must return the "mean_raw" KernelSignature.
43
    // And the InferMeta function(i.e. ReduceInferMetaBase) is accordance with
44
    // the "mean_raw" KernelSignature
45 46 47
    if (ctx.IsForInferShape() || reduce_all) {
      return KernelSignature(
          "mean_raw", {"X"}, {"dim", "keep_dim", "reduce_all"}, {"Out"});
48
    }
49
    return KernelSignature("mean", {"X"}, {"dim", "keep_dim"}, {"Out"});
50 51 52 53
  }
  return KernelSignature("unregistered", {}, {}, {});
}

54
KernelSignature ReduceProdOpArgumentMapping(const ArgumentMappingContext& ctx) {
55 56 57 58 59 60 61 62 63 64 65 66 67
  if (ctx.IsDenseTensorInput("X")) {
    bool reduce_all = paddle::any_cast<bool>(ctx.Attr("reduce_all"));
    // When ctx is InferShapeArgumentMappingContext, the reduce_all is used in
    // InferShape, so we must return the "max_raw" KernelSignature.
    // And the InferMeta function(i.e. ReduceInferMetaBase) is accordance with
    // the "max_raw" KernelSignature
    if (ctx.IsForInferShape() || reduce_all) {
      return KernelSignature(
          "prod_raw", {"X"}, {"dim", "keep_dim", "reduce_all"}, {"Out"});
    }
    return KernelSignature("prod", {"X"}, {"dim", "keep_dim"}, {"Out"});
  }
  return KernelSignature("unregistered", {}, {}, {});
68 69
}

70 71 72 73 74 75
KernelSignature ReduceMaxOpArgumentMapping(const ArgumentMappingContext& ctx) {
  if (ctx.IsDenseTensorInput("X")) {
    bool reduce_all = paddle::any_cast<bool>(ctx.Attr("reduce_all"));
    // When ctx is InferShapeArgumentMappingContext, the reduce_all is used in
    // InferShape, so we must return the "max_raw" KernelSignature.
    // And the InferMeta function(i.e. ReduceInferMetaBase) is accordance with
76
    // the "max_raw" KernelSignature
77 78 79 80 81 82 83 84 85
    if (ctx.IsForInferShape() || reduce_all) {
      return KernelSignature(
          "max_raw", {"X"}, {"dim", "keep_dim", "reduce_all"}, {"Out"});
    }
    return KernelSignature("max", {"X"}, {"dim", "keep_dim"}, {"Out"});
  }
  return KernelSignature("unregistered", {}, {}, {});
}

86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101
KernelSignature ReduceAMaxOpArgumentMapping(const ArgumentMappingContext& ctx) {
  if (ctx.IsDenseTensorInput("X")) {
    bool reduce_all = paddle::any_cast<bool>(ctx.Attr("reduce_all"));
    // When ctx is InferShapeArgumentMappingContext, the reduce_all is used in
    // InferShape, so we must return the "max_raw" KernelSignature.
    // And the InferMeta function(i.e. ReduceInferMetaBase) is accordance with
    // the "max_raw" KernelSignature
    if (ctx.IsForInferShape() || reduce_all) {
      return KernelSignature(
          "amax_raw", {"X"}, {"dim", "keep_dim", "reduce_all"}, {"Out"});
    }
    return KernelSignature("amax", {"X"}, {"dim", "keep_dim"}, {"Out"});
  }
  return KernelSignature("unregistered", {}, {}, {});
}

102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117
KernelSignature ReduceMinOpArgumentMapping(const ArgumentMappingContext& ctx) {
  if (ctx.IsDenseTensorInput("X")) {
    bool reduce_all = paddle::any_cast<bool>(ctx.Attr("reduce_all"));
    // When ctx is InferShapeArgumentMappingContext, the reduce_all is used in
    // InferShape, so we must return the "min_raw" KernelSignature.
    // And the InferMeta function(i.e. ReduceInferMetaBase) is accordance with
    // the "min_raw" KernelSignature
    if (ctx.IsForInferShape() || reduce_all) {
      return KernelSignature(
          "min_raw", {"X"}, {"dim", "keep_dim", "reduce_all"}, {"Out"});
    }
    return KernelSignature("min", {"X"}, {"dim", "keep_dim"}, {"Out"});
  }
  return KernelSignature("unregistered", {}, {}, {});
}

118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133
KernelSignature ReduceAMinOpArgumentMapping(const ArgumentMappingContext& ctx) {
  if (ctx.IsDenseTensorInput("X")) {
    bool reduce_all = paddle::any_cast<bool>(ctx.Attr("reduce_all"));
    // When ctx is InferShapeArgumentMappingContext, the reduce_all is used in
    // InferShape, so we must return the "min_raw" KernelSignature.
    // And the InferMeta function(i.e. ReduceInferMetaBase) is accordance with
    // the "min_raw" KernelSignature
    if (ctx.IsForInferShape() || reduce_all) {
      return KernelSignature(
          "amin_raw", {"X"}, {"dim", "keep_dim", "reduce_all"}, {"Out"});
    }
    return KernelSignature("amin", {"X"}, {"dim", "keep_dim"}, {"Out"});
  }
  return KernelSignature("unregistered", {}, {}, {});
}

134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161
KernelSignature ReduceAnyOpArgumentMapping(const ArgumentMappingContext& ctx) {
  if (ctx.IsDenseTensorInput("X")) {
    bool reduce_all = paddle::any_cast<bool>(ctx.Attr("reduce_all"));
    // When ctx is InferShapeArgumentMappingContext, the reduce_all is used in
    // InferShape, so we must return the "any_raw" KernelSignature.
    // And the InferMeta function(i.e. ReduceInferMetaBase) is accordance with
    // the "any_raw" KernelSignature
    if (ctx.IsForInferShape() || reduce_all) {
      return KernelSignature(
          "any_raw", {"X"}, {"dim", "keep_dim", "reduce_all"}, {"Out"});
    }
    return KernelSignature("any", {"X"}, {"dim", "keep_dim"}, {"Out"});
  }
  return KernelSignature("unregistered", {}, {}, {});
}

KernelSignature ReduceAllOpArgumentMapping(const ArgumentMappingContext& ctx) {
  if (ctx.IsDenseTensorInput("X")) {
    bool reduce_all = paddle::any_cast<bool>(ctx.Attr("reduce_all"));
    if (ctx.IsForInferShape() || reduce_all) {
      return KernelSignature(
          "all_raw", {"X"}, {"dim", "keep_dim", "reduce_all"}, {"Out"});
    }
    return KernelSignature("all", {"X"}, {"dim", "keep_dim"}, {"Out"});
  }
  return KernelSignature("unregistered", {}, {}, {});
}

C
chentianyu03 已提交
162 163
KernelSignature ReduceSumGradOpArgumentMapping(
    const ArgumentMappingContext& ctx) {
164
  return KernelSignature("sum_grad",
165
                         {"X", "Out@GRAD"},
166
                         {"dim", "keep_dim", "reduce_all"},
167
                         {"X@GRAD"});
C
chentianyu03 已提交
168 169
}

170 171
KernelSignature ReduceMeanGradOpArgumentMapping(
    const ArgumentMappingContext& ctx) {
172
  return KernelSignature("mean_grad",
173
                         {"X", "Out@GRAD"},
174
                         {"dim", "keep_dim", "reduce_all"},
175
                         {"X@GRAD"});
176 177 178 179
}

KernelSignature ReduceMaxGradOpArgumentMapping(
    const ArgumentMappingContext& ctx) {
180
  return KernelSignature("max_grad",
181
                         {"X", "Out", "Out@GRAD"},
182
                         {"dim", "keep_dim", "reduce_all"},
183
                         {"X@GRAD"});
184 185
}

186 187 188 189 190 191 192 193
KernelSignature ReduceAMaxGradOpArgumentMapping(
    const ArgumentMappingContext& ctx) {
  return KernelSignature("amax_grad",
                         {"X", "Out", "Out@GRAD"},
                         {"dim", "keep_dim", "reduce_all"},
                         {"X@GRAD"});
}

194 195
KernelSignature ReduceMinGradOpArgumentMapping(
    const ArgumentMappingContext& ctx) {
196
  return KernelSignature("min_grad",
197
                         {"X", "Out", "Out@GRAD"},
198
                         {"dim", "keep_dim", "reduce_all"},
199
                         {"X@GRAD"});
200 201
}

202 203 204 205 206 207 208 209
KernelSignature ReduceAMinGradOpArgumentMapping(
    const ArgumentMappingContext& ctx) {
  return KernelSignature("amin_grad",
                         {"X", "Out", "Out@GRAD"},
                         {"dim", "keep_dim", "reduce_all"},
                         {"X@GRAD"});
}

210 211
KernelSignature ReduceProdGradOpArgumentMapping(
    const ArgumentMappingContext& ctx) {
212
  return KernelSignature("prod_grad",
213
                         {"X", "Out", "Out@GRAD"},
214
                         {"dim", "keep_dim", "reduce_all"},
215
                         {"X@GRAD"});
216 217
}

218
}  // namespace phi
219

220 221
PD_REGISTER_BASE_KERNEL_NAME(reduce_sum, sum);
PD_REGISTER_BASE_KERNEL_NAME(reduce_mean, mean);
222
PD_REGISTER_BASE_KERNEL_NAME(reduce_max, max);
223
PD_REGISTER_BASE_KERNEL_NAME(reduce_min, min);
224 225
PD_REGISTER_BASE_KERNEL_NAME(reduce_amax, amax);
PD_REGISTER_BASE_KERNEL_NAME(reduce_amin, amin);
226
PD_REGISTER_BASE_KERNEL_NAME(reduce_prod, prod);
227 228 229
PD_REGISTER_BASE_KERNEL_NAME(reduce_all, all);
PD_REGISTER_BASE_KERNEL_NAME(reduce_any, any);

C
chentianyu03 已提交
230
PD_REGISTER_BASE_KERNEL_NAME(reduce_sum_grad, sum_grad);
231 232 233 234
PD_REGISTER_BASE_KERNEL_NAME(reduce_mean_grad, mean_grad);
PD_REGISTER_BASE_KERNEL_NAME(reduce_prod_grad, prod_grad);
PD_REGISTER_BASE_KERNEL_NAME(reduce_max_grad, max_grad);
PD_REGISTER_BASE_KERNEL_NAME(reduce_min_grad, min_grad);
235 236
PD_REGISTER_BASE_KERNEL_NAME(reduce_amax_grad, amax_grad);
PD_REGISTER_BASE_KERNEL_NAME(reduce_amin_grad, amin_grad);
237

238 239
PD_REGISTER_ARG_MAPPING_FN(reduce_sum, phi::ReduceSumOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(reduce_mean, phi::ReduceMeanOpArgumentMapping);
240
PD_REGISTER_ARG_MAPPING_FN(reduce_prod, phi::ReduceProdOpArgumentMapping);
241
PD_REGISTER_ARG_MAPPING_FN(reduce_max, phi::ReduceMaxOpArgumentMapping);
242
PD_REGISTER_ARG_MAPPING_FN(reduce_amax, phi::ReduceAMaxOpArgumentMapping);
243
PD_REGISTER_ARG_MAPPING_FN(reduce_min, phi::ReduceMinOpArgumentMapping);
244
PD_REGISTER_ARG_MAPPING_FN(reduce_amin, phi::ReduceAMinOpArgumentMapping);
245 246 247
PD_REGISTER_ARG_MAPPING_FN(reduce_all, phi::ReduceAllOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(reduce_any, phi::ReduceAnyOpArgumentMapping);

C
chentianyu03 已提交
248 249
PD_REGISTER_ARG_MAPPING_FN(reduce_sum_grad,
                           phi::ReduceSumGradOpArgumentMapping);
250 251 252 253 254 255
PD_REGISTER_ARG_MAPPING_FN(reduce_mean_grad,
                           phi::ReduceMeanGradOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(reduce_prod_grad,
                           phi::ReduceProdGradOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(reduce_max_grad,
                           phi::ReduceMaxGradOpArgumentMapping);
256 257
PD_REGISTER_ARG_MAPPING_FN(reduce_amax_grad,
                           phi::ReduceAMaxGradOpArgumentMapping);
258 259
PD_REGISTER_ARG_MAPPING_FN(reduce_min_grad,
                           phi::ReduceMinGradOpArgumentMapping);
260 261
PD_REGISTER_ARG_MAPPING_FN(reduce_amin_grad,
                           phi::ReduceAMinGradOpArgumentMapping);