未验证 提交 85334b04 编写于 作者: C Chen Weihang 提交者: GitHub

[PTen] Add infermeta utils for register infermeta funtion (#39135)

* add infermeta utils for register infermeta

* polish license format
上级 36d9a364
...@@ -19,6 +19,7 @@ cc_library(dense_tensor SRCS dense_tensor.cc DEPS convert_utils tensor_meta tens ...@@ -19,6 +19,7 @@ cc_library(dense_tensor SRCS dense_tensor.cc DEPS convert_utils tensor_meta tens
cc_library(pten_device_context SRCS device_context.cc DEPS tensor_base ) cc_library(pten_device_context SRCS device_context.cc DEPS tensor_base )
cc_library(meta_tensor SRCS meta_tensor.cc DEPS tensor_base tensor_meta dense_tensor) cc_library(meta_tensor SRCS meta_tensor.cc DEPS tensor_base tensor_meta dense_tensor)
cc_library(infermeta_utils SRCS infermeta_utils.cc DEPS meta_tensor)
cc_test(unroll_array_ops_test SRCS unroll_array_ops_test.cc) cc_test(unroll_array_ops_test SRCS unroll_array_ops_test.cc)
cc_library(ddim SRCS ddim.cc DEPS eigen3 boost enforce) cc_library(ddim SRCS ddim.cc DEPS eigen3 boost enforce)
......
/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/pten/core/infermeta_utils.h"
namespace pten {
void InferMetaContext::SetMetaConfig(MetaConfig config) {
config_ = std::move(config);
}
void InferMetaContext::EmplaceBackInput(
std::shared_ptr<pten::MetaTensor> input) {
int index = inputs_.size();
inputs_.emplace_back(std::move(input));
input_range_.emplace_back(std::pair<int, int>(index, index + 1));
}
void InferMetaContext::EmplaceBackOutput(
std::shared_ptr<pten::MetaTensor> output) {
int index = outputs_.size();
outputs_.emplace_back(std::move(output));
output_range_.emplace_back(std::pair<int, int>(index, index + 1));
}
void InferMetaContext::EmplaceBackAttr(paddle::any attr) {
attrs_.emplace_back(std::move(attr));
}
void InferMetaContext::EmplaceBackInputs(
paddle::SmallVector<std::shared_ptr<pten::MetaTensor>> inputs) {
int index = inputs_.size();
input_range_.emplace_back(std::pair<int, int>(index, index + inputs.size()));
inputs_.insert(inputs_.end(),
std::make_move_iterator(inputs.begin()),
std::make_move_iterator(inputs.end()));
}
void InferMetaContext::EmplaceBackOutputs(
paddle::SmallVector<std::shared_ptr<pten::MetaTensor>> outputs) {
int index = outputs_.size();
output_range_.emplace_back(
std::pair<int, int>(index, index + outputs.size()));
outputs_.insert(outputs_.end(),
std::make_move_iterator(outputs.begin()),
std::make_move_iterator(outputs.end()));
}
const std::pair<int, int>& InferMetaContext::InputRangeAt(size_t idx) const {
return input_range_.at(idx);
}
const std::pair<int, int>& InferMetaContext::OutputRangeAt(size_t idx) const {
return output_range_.at(idx);
}
const MetaConfig& InferMetaContext::GetMetaConfig() const { return config_; }
const MetaTensor& InferMetaContext::InputAt(size_t idx) const {
return *inputs_.at(idx);
}
MetaTensor* InferMetaContext::MutableOutputAt(size_t idx) {
return outputs_.at(idx).get();
}
} // namespace pten
/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <string>
#include <utility>
#include "paddle/pten/core/meta_tensor.h"
#include "paddle/utils/small_vector.h"
namespace pten {
// TODO(chenweihang): add other flags if needed
struct MetaConfig {
bool is_runtime{true};
MetaConfig() = default;
// supporting implicit construction is easier to use
MetaConfig(bool is_runtime) : is_runtime(is_runtime) {} // NOLINT
};
class InferMetaContext {
public:
InferMetaContext() = default;
explicit InferMetaContext(MetaConfig config) : config_(config) {}
void SetMetaConfig(MetaConfig config);
void EmplaceBackInput(std::shared_ptr<pten::MetaTensor> input);
void EmplaceBackOutput(std::shared_ptr<pten::MetaTensor> output);
void EmplaceBackAttr(paddle::any attr);
void EmplaceBackInputs(
paddle::SmallVector<std::shared_ptr<pten::MetaTensor>> inputs);
void EmplaceBackOutputs(
paddle::SmallVector<std::shared_ptr<pten::MetaTensor>> outputs);
const std::pair<int, int>& InputRangeAt(size_t idx) const;
const std::pair<int, int>& OutputRangeAt(size_t idx) const;
const MetaConfig& GetMetaConfig() const;
const MetaTensor& InputAt(size_t idx) const;
MetaTensor* MutableOutputAt(size_t idx);
template <typename AttrType>
AttrType AttrAt(size_t idx) {
try {
return paddle::any_cast<AttrType>(attrs_.at(idx));
} catch (paddle::bad_any_cast&) {
PADDLE_THROW(paddle::platform::errors::InvalidArgument(
"Attribute cast error in InferMeta Context."));
}
}
private:
MetaConfig config_;
// NOTE(chenweihang): Because the MetaTensor is a base class, and MetaTensor
// objects are all created in each round, so we have to use smart pointer
// here, maybe we can implemented a new InferMetaContext and a series utils
// specifically for fluid to avoid using shared_ptr
paddle::SmallVector<std::shared_ptr<pten::MetaTensor>> inputs_;
paddle::SmallVector<std::shared_ptr<pten::MetaTensor>> outputs_;
paddle::SmallVector<paddle::any> attrs_;
paddle::SmallVector<std::pair<int, int>> input_range_;
paddle::SmallVector<std::pair<int, int>> output_range_;
};
#define PT_INFER_META(...) \
::pten::InferMetaFnImpl<decltype(&__VA_ARGS__), &__VA_ARGS__>::Call
#define PT_SPECIALIZE_InferMetaFnCallHelper_FOR_ATTRIBUTE(attr_type) \
template <typename... Tail> \
struct InferMetaFnCallHelper<attr_type, Tail...> { \
template <int in_idx, int attr_idx, int out_idx, typename... PreviousArgs> \
static void Call(InferMetaContext* ctx, PreviousArgs&... pargs) { \
static_assert(out_idx == 0, \
"InferMeta's Attributes should appear before Outputs."); \
attr_type arg = ctx->AttrAt<attr_type>(attr_idx); \
InferMetaFnCallHelper< \
Tail...>::template Call<in_idx, attr_idx + 1, out_idx>(pargs..., \
arg); \
} \
}
template <typename T>
struct InferMetaTypeTag {};
template <typename Fn, Fn fn>
struct InferMetaFnImpl;
template <typename Return, typename... Args, Return (*infer_meta_fn)(Args...)>
struct InferMetaFnImpl<Return (*)(Args...), infer_meta_fn> {
static void Call(InferMetaContext* ctx) {
InferMetaFnCallHelper<Args...,
InferMetaTypeTag<int>>::template Call<0, 0, 0>(ctx);
}
private:
template <typename... RemainingArgs>
struct InferMetaFnCallHelper;
template <typename... Tail>
struct InferMetaFnCallHelper<const MetaTensor&, Tail...> {
template <int in_idx, int attr_idx, int out_idx, typename... PreviousArgs>
static void Call(InferMetaContext* ctx, PreviousArgs&... pargs) {
static_assert(attr_idx == 0,
"InferMeta's Input should appear before Attributes.");
static_assert(out_idx == 0,
"InferMeta's Input should appear before Outputs.");
const std::pair<int, int> range = ctx->InputRangeAt(in_idx);
const MetaTensor& arg = ctx->InputAt(range.first);
InferMetaFnCallHelper<
Tail...>::template Call<in_idx + 1, attr_idx, out_idx>(ctx,
pargs...,
arg);
}
};
// TODO(chenweihang): support vector<MetaTensor> input later
template <typename... Tail>
struct InferMetaFnCallHelper<MetaTensor*, Tail...> {
template <int in_idx, int attr_idx, int out_idx, typename... PreviousArgs>
static void Call(InferMetaContext* ctx, PreviousArgs&... pargs) {
const std::pair<int, int> range = ctx->OutputRangeAt(out_idx);
MetaTensor* arg = ctx->MutableOutputAt(range.first);
InferMetaFnCallHelper<
Tail...>::template Call<in_idx, attr_idx, out_idx + 1>(ctx,
pargs...,
arg);
}
};
// TODO(chenweihang): support vector<MetaTensor> output later
template <typename... Tail>
struct InferMetaFnCallHelper<MetaConfig, Tail...> {
template <int in_idx, int attr_idx, int out_idx, typename... PreviousArgs>
static void Call(InferMetaContext* ctx, PreviousArgs&... pargs) {
const MetaConfig& arg = ctx->GetMetaConfig();
InferMetaFnCallHelper<Tail...>::template Call<in_idx, attr_idx, out_idx>(
ctx, pargs..., arg);
}
};
/* End case */
template <typename T>
struct InferMetaFnCallHelper<InferMetaTypeTag<T>> {
template <int in_idx, int attr_idx, int out_idx>
static void Call(InferMetaContext* ctx, Args&... args) {
return infer_meta_fn(args...);
}
};
};
} // namespace pten
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册