未验证 提交 8e9e0659 编写于 作者: H hong 提交者: GitHub

[NewIR]Add feed with place op (#55343)

* add feed with place op

* remove useless unitest

* udpate mkldnn

* update

* add enable_static

* remove useless test case

* register int and doubel type

* fix bug
上级 927c0d50
......@@ -183,14 +183,6 @@ execute_process(
--dygraph_api_header_path ${dygraph_api_header_file_tmp}
--dygraph_api_source_path ${dygraph_api_source_file_tmp})
# generate wrapped infermeta
execute_process(
COMMAND
${PYTHON_EXECUTABLE} ${wrapped_infermeta_gen_file} --api_yaml_path
${api_yaml_file} ${legacy_api_yaml_file} --wrapped_infermeta_header_path
${wrapped_infermeta_header_file} --wrapped_infermeta_source_path
${wrapped_infermeta_source_file})
# generate tensor and tensor operants file
message("create or copy auto-geneated tensor files")
execute_process(
......@@ -264,7 +256,6 @@ collect_srcs(
collect_generated_srcs(
api_srcs
SRCS
${wrapped_infermeta_source_file}
${api_source_file}
${bw_api_source_file}
${fused_api_source_file}
......
......@@ -244,6 +244,18 @@
param : [num_rows, num_columns, dtype]
data_type : dtype
- op : feed_with_place
args : (int64_t index, DataType dtype, Place place)
output : Tensor(out)
infer_meta :
func : FeedWithPlaceInferMeta
param : [index, dtype]
kernel:
func : feed_with_place
param : [index, dtype]
data_type : dtype
backend : place
- op : floor_divide
args : (Tensor x, Tensor y, int axis = -1)
output : Tensor(out)
......
......@@ -82,6 +82,10 @@ void EyeInferMeta(const Scalar& num_rows,
out->set_dtype(dtype);
}
void FeedWithPlaceInferMeta(int64_t index,
phi::DataType data_type,
MetaTensor* out) {}
void GaussianInferMeta(const IntArray& shape,
float mean,
float std,
......
......@@ -52,6 +52,10 @@ void EyeInferMeta(const Scalar& num_rows,
MetaTensor* out,
MetaConfig config = MetaConfig());
void FeedWithPlaceInferMeta(int64_t index,
phi::DataType data_type,
MetaTensor* out);
void GaussianInferMeta(const IntArray& shape,
float mean,
float std,
......
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/feed_with_place_kernel.h"
#include "paddle/phi/backends/cpu/cpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
namespace phi {
template <typename T, typename Context>
void FeedWithPlaceKernel(const Context& ctx,
int64_t index,
phi::DataType data_type,
DenseTensor* out) {}
} // namespace phi
PD_REGISTER_KERNEL(feed_with_place,
CPU,
ALL_LAYOUT,
phi::FeedWithPlaceKernel,
float,
int32_t,
int64_t,
double) {}
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include "paddle/phi/core/dense_tensor.h"
namespace phi {
template <typename T, typename Context>
void FeedWithPlaceKernel(const Context& ctx,
int64_t index,
phi::DataType data_type,
DenseTensor* out);
} // namespace phi
......@@ -17,26 +17,11 @@ limitations under the License. */
#include "gtest/gtest.h"
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/core/infermeta_utils.h"
#include "paddle/phi/infermeta/generated.h"
#include "paddle/phi/infermeta/unary.h"
namespace phi {
namespace tests {
TEST(WrappedInferMeta, Scale) {
phi::DenseTensor dense_x;
dense_x.Resize(phi::make_ddim({3, 4}));
phi::MetaTensor meta_x(&dense_x);
phi::DenseTensor dense_out1;
phi::MetaTensor meta_out(&dense_out1);
phi::ScaleInferMeta(meta_x, 0, 0, false, &meta_out);
EXPECT_EQ(dense_out1.dims().size(), dense_x.dims().size());
EXPECT_EQ(dense_out1.dims()[0], dense_x.dims()[0]);
EXPECT_EQ(dense_out1.dims()[1], dense_x.dims()[1]);
}
TEST(MetaFnFactory, InferMetaFnExists) {
phi::DenseTensor dense_x;
dense_x.Resize(phi::make_ddim({3, 4}));
......@@ -45,50 +30,6 @@ TEST(MetaFnFactory, InferMetaFnExists) {
phi::DenseTensor dense_out1;
phi::MetaTensor meta_out(&dense_out1);
phi::UnchangedInferMeta(meta_x, &meta_out);
auto shared_meat_x = phi::MetaTensor(&dense_x);
phi::DenseTensor dense_out2;
auto shared_meta_out = phi::MetaTensor(&dense_out2);
phi::InferMetaContext ctx;
ctx.EmplaceBackInput(shared_meat_x);
ctx.EmplaceBackOutput(shared_meta_out);
ctx.SetMetaConfig({/*is_runtime =*/true, /*is_run_mkldnn_kernel=*/false});
phi::MetaFnFactory::Instance().Get("sign")(&ctx);
EXPECT_EQ(dense_out1.dims().size(), dense_out2.dims().size());
EXPECT_EQ(dense_out1.dims()[0], dense_out2.dims()[0]);
EXPECT_EQ(dense_out1.dims()[1], dense_out2.dims()[1]);
}
TEST(MetaFnFactory, SplitInferMetaFn) {
phi::DenseTensor dense_x;
dense_x.Resize({4, 10});
phi::MetaTensor meta_x(&dense_x);
auto shared_meat_x = phi::MetaTensor(&dense_x);
phi::DenseTensor dense_out1;
phi::DenseTensor dense_out2;
paddle::small_vector<phi::MetaTensor, kOutputSmallVectorSize> out;
out.emplace_back(phi::MetaTensor(&dense_out1));
out.emplace_back(phi::MetaTensor(&dense_out2));
phi::InferMetaContext ctx;
ctx.EmplaceBackInput(shared_meat_x);
IntArray num_or_sections{2, 2};
Scalar axis{0};
ctx.EmplaceBackAttr(num_or_sections);
ctx.EmplaceBackAttr(axis);
ctx.EmplaceBackOutputs(out);
ctx.SetMetaConfig({/*is_runtime =*/true, /*is_run_mkldnn_kernel=*/false});
phi::MetaFnFactory::Instance().Get("split")(&ctx);
ASSERT_EQ(dense_out1.dims().size(), 2);
ASSERT_EQ(dense_out1.dims()[0], 2);
ASSERT_EQ(dense_out1.dims()[1], 10);
ASSERT_EQ(dense_out2.dims().size(), 2);
ASSERT_EQ(dense_out2.dims()[0], 2);
ASSERT_EQ(dense_out2.dims()[1], 10);
}
void TestEmptyVectorInputInferMeta(const std::vector<const MetaTensor*>& inputs,
......
# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import paddle
from paddle.fluid.layer_helper import LayerHelper
def feed_with_place():
helper = LayerHelper('feed_with_place', **locals())
out = helper.create_variable_for_type_inference('float32')
helper.append_op(
type='feed_with_place',
inputs={},
outputs={'out': out},
attrs={
'index': 0,
'dtype': 0,
'place': 0,
},
)
return out
class TestNewIr(unittest.TestCase):
def test_with_new_ir(self):
paddle.enable_static()
place = paddle.CPUPlace()
exe = paddle.static.Executor(place)
main_program = paddle.static.Program()
new_scope = paddle.static.Scope()
with paddle.static.scope_guard(new_scope):
with paddle.static.program_guard(main_program):
out = feed_with_place()
if __name__ == "__main__":
paddle.enable_static()
unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册