未验证 提交 2cebcf4a 编写于 作者: C Chen Weihang 提交者: GitHub

Unify utils naming style (#42264)

* unify utils naming style

* polish details
上级 4c80385a
......@@ -323,7 +323,7 @@ void CompatInferMetaContext::EmplaceBackOutput(CompatMetaTensor output) {
}
void CompatInferMetaContext::EmplaceBackInputs(
paddle::SmallVector<CompatMetaTensor, phi::kInputSmallVectorSize> inputs) {
paddle::small_vector<CompatMetaTensor, phi::kInputSmallVectorSize> inputs) {
int index = compat_inputs_.size();
input_range_.emplace_back(std::pair<int, int>(index, index + inputs.size()));
compat_inputs_.insert(compat_inputs_.end(),
......@@ -332,7 +332,7 @@ void CompatInferMetaContext::EmplaceBackInputs(
}
void CompatInferMetaContext::EmplaceBackOutputs(
paddle::SmallVector<CompatMetaTensor, phi::kOutputSmallVectorSize>
paddle::small_vector<CompatMetaTensor, phi::kOutputSmallVectorSize>
outputs) {
int index = compat_outputs_.size();
output_range_.emplace_back(
......@@ -431,7 +431,7 @@ CompatInferMetaContext BuildInferMetaContext(InferShapeContext* ctx,
infer_meta_context.EmplaceBackInput(
std::move(CompatMetaTensor(input_var[0], ctx->IsRuntime())));
} else {
paddle::SmallVector<CompatMetaTensor, phi::kInputSmallVectorSize>
paddle::small_vector<CompatMetaTensor, phi::kInputSmallVectorSize>
inputs;
for (const auto& in : input_var) {
inputs.emplace_back(
......@@ -672,7 +672,7 @@ CompatInferMetaContext BuildInferMetaContext(InferShapeContext* ctx,
infer_meta_context.EmplaceBackOutput(
std::move(CompatMetaTensor(output_var[0], ctx->IsRuntime())));
} else {
paddle::SmallVector<CompatMetaTensor, phi::kOutputSmallVectorSize>
paddle::small_vector<CompatMetaTensor, phi::kOutputSmallVectorSize>
outputs;
for (const auto& out : output_var) {
if (ctx->IsRuntime()) {
......
......@@ -100,9 +100,10 @@ class CompatInferMetaContext : public phi::InferMetaContext {
void EmplaceBackOutput(CompatMetaTensor output);
void EmplaceBackInputs(
paddle::SmallVector<CompatMetaTensor, phi::kInputSmallVectorSize> inputs);
paddle::small_vector<CompatMetaTensor, phi::kInputSmallVectorSize>
inputs);
void EmplaceBackOutputs(
paddle::SmallVector<CompatMetaTensor, phi::kOutputSmallVectorSize>
paddle::small_vector<CompatMetaTensor, phi::kOutputSmallVectorSize>
outputs);
const phi::MetaTensor& InputAt(size_t idx) const override;
......@@ -121,9 +122,9 @@ class CompatInferMetaContext : public phi::InferMetaContext {
virtual ~CompatInferMetaContext() = default;
private:
paddle::SmallVector<CompatMetaTensor, phi::kInputSmallVectorSize>
paddle::small_vector<CompatMetaTensor, phi::kInputSmallVectorSize>
compat_inputs_;
paddle::SmallVector<CompatMetaTensor, phi::kOutputSmallVectorSize>
paddle::small_vector<CompatMetaTensor, phi::kOutputSmallVectorSize>
compat_outputs_;
};
......
......@@ -328,21 +328,21 @@ bool InterpretercoreInferShapeContext::IsRunMKLDNNKernel() const {
}
// TODO(paddle-dev): Can this be template?
paddle::SmallVector<InferShapeVarPtr, phi::kInputSmallVectorSize>
paddle::small_vector<InferShapeVarPtr, phi::kInputSmallVectorSize>
InterpretercoreInferShapeContext::GetInputVarPtrs(
const std::string& name) const {
const std::vector<Variable*>& vars = InputVars(name);
paddle::SmallVector<InferShapeVarPtr, phi::kInputSmallVectorSize> res;
paddle::small_vector<InferShapeVarPtr, phi::kInputSmallVectorSize> res;
res.reserve(vars.size());
res.insert(res.begin(), vars.begin(), vars.end());
return res;
}
paddle::SmallVector<InferShapeVarPtr, phi::kOutputSmallVectorSize>
paddle::small_vector<InferShapeVarPtr, phi::kOutputSmallVectorSize>
InterpretercoreInferShapeContext::GetOutputVarPtrs(
const std::string& name) const {
const std::vector<Variable*>& vars = OutputVars(name);
paddle::SmallVector<InferShapeVarPtr, phi::kOutputSmallVectorSize> res;
paddle::small_vector<InferShapeVarPtr, phi::kOutputSmallVectorSize> res;
res.reserve(vars.size());
res.insert(res.begin(), vars.begin(), vars.end());
return res;
......
......@@ -90,10 +90,10 @@ class InterpretercoreInferShapeContext : public InferShapeContext {
bool IsRunMKLDNNKernel() const override;
// TODO(paddle-dev): Can this be template?
paddle::SmallVector<InferShapeVarPtr, phi::kInputSmallVectorSize>
paddle::small_vector<InferShapeVarPtr, phi::kInputSmallVectorSize>
GetInputVarPtrs(const std::string& name) const override;
paddle::SmallVector<InferShapeVarPtr, phi::kOutputSmallVectorSize>
paddle::small_vector<InferShapeVarPtr, phi::kOutputSmallVectorSize>
GetOutputVarPtrs(const std::string& name) const override;
DDim GetInputDim(const std::string& name) const override;
......
......@@ -202,10 +202,10 @@ class CompileTimeInferShapeContext : public InferShapeContext {
}
}
paddle::SmallVector<InferShapeVarPtr, phi::kInputSmallVectorSize>
paddle::small_vector<InferShapeVarPtr, phi::kInputSmallVectorSize>
GetInputVarPtrs(const std::string &name) const override {
const std::vector<std::string> arg_names = Inputs(name);
paddle::SmallVector<InferShapeVarPtr, phi::kInputSmallVectorSize> res;
paddle::small_vector<InferShapeVarPtr, phi::kInputSmallVectorSize> res;
res.reserve(arg_names.size());
std::transform(arg_names.begin(), arg_names.end(), std::back_inserter(res),
[this](const std::string &name) {
......@@ -214,10 +214,10 @@ class CompileTimeInferShapeContext : public InferShapeContext {
return res;
}
paddle::SmallVector<InferShapeVarPtr, phi::kOutputSmallVectorSize>
paddle::small_vector<InferShapeVarPtr, phi::kOutputSmallVectorSize>
GetOutputVarPtrs(const std::string &name) const override {
const std::vector<std::string> arg_names = Outputs(name);
paddle::SmallVector<InferShapeVarPtr, phi::kOutputSmallVectorSize> res;
paddle::small_vector<InferShapeVarPtr, phi::kOutputSmallVectorSize> res;
res.reserve(arg_names.size());
std::transform(arg_names.begin(), arg_names.end(), std::back_inserter(res),
[this](const std::string &name) {
......
......@@ -946,19 +946,19 @@ class RuntimeInferShapeContext : public InferShapeContext {
}
// TODO(paddle-dev): Can this be template?
paddle::SmallVector<InferShapeVarPtr, phi::kInputSmallVectorSize>
paddle::small_vector<InferShapeVarPtr, phi::kInputSmallVectorSize>
GetInputVarPtrs(const std::string& name) const override {
const std::vector<Variable*>& vars = InputVars(name);
paddle::SmallVector<InferShapeVarPtr, phi::kInputSmallVectorSize> res;
paddle::small_vector<InferShapeVarPtr, phi::kInputSmallVectorSize> res;
res.reserve(vars.size());
res.insert(res.begin(), vars.begin(), vars.end());
return res;
}
paddle::SmallVector<InferShapeVarPtr, phi::kOutputSmallVectorSize>
paddle::small_vector<InferShapeVarPtr, phi::kOutputSmallVectorSize>
GetOutputVarPtrs(const std::string& name) const override {
const std::vector<Variable*>& vars = OutputVars(name);
paddle::SmallVector<InferShapeVarPtr, phi::kOutputSmallVectorSize> res;
paddle::small_vector<InferShapeVarPtr, phi::kOutputSmallVectorSize> res;
res.reserve(vars.size());
res.insert(res.begin(), vars.begin(), vars.end());
return res;
......@@ -2344,7 +2344,7 @@ void OperatorWithKernel::BuildPhiKernelContext(
tensor_in = &(var->Get<phi::SelectedRows>());
pt_kernel_context->EmplaceBackInputWithoutSetRange(tensor_in);
} else if (var->IsType<framework::LoDTensorArray>()) {
paddle::SmallVector<const phi::TensorBase*> tensor_vector;
paddle::small_vector<const phi::TensorBase*> tensor_vector;
auto& tensor_array = var->Get<framework::LoDTensorArray>();
for (auto& t : tensor_array) {
tensor_vector.emplace_back(&t);
......@@ -2393,7 +2393,7 @@ void OperatorWithKernel::BuildPhiKernelContext(
tensor_out = var->template GetMutable<phi::SelectedRows>();
pt_kernel_context->EmplaceBackOutputWithoutSetRange(tensor_out);
} else if (var->template IsType<framework::LoDTensorArray>()) {
paddle::SmallVector<phi::TensorBase*> tensor_vector;
paddle::small_vector<phi::TensorBase*> tensor_vector;
auto* tensor_array =
var->template GetMutable<framework::LoDTensorArray>();
// Note: If the input LoDTensorArray size is 0, the output
......
......@@ -333,8 +333,8 @@ class ExecutionContext {
return it->second;
}
virtual paddle::SmallVector<const std::string*> InNameList() const {
paddle::SmallVector<const std::string*> vec_temp;
virtual paddle::small_vector<const std::string*> InNameList() const {
paddle::small_vector<const std::string*> vec_temp;
vec_temp.reserve(ctx_.inputs.size());
for (auto& input : ctx_.inputs) {
......
......@@ -41,9 +41,9 @@ class KernelArgsNameMakerByOpProto : public KernelArgsNameMaker {
~KernelArgsNameMakerByOpProto() {}
const paddle::SmallVector<const char*>& GetInputArgsNames() override;
const paddle::SmallVector<const char*>& GetOutputArgsNames() override;
const paddle::SmallVector<const char*>& GetAttrsArgsNames() override;
const paddle::small_vector<const char*>& GetInputArgsNames() override;
const paddle::small_vector<const char*>& GetOutputArgsNames() override;
const paddle::small_vector<const char*>& GetAttrsArgsNames() override;
phi::KernelSignature GetKernelSignature();
......@@ -53,9 +53,9 @@ class KernelArgsNameMakerByOpProto : public KernelArgsNameMaker {
private:
const framework::proto::OpProto* op_proto_;
paddle::SmallVector<const char*> input_names_;
paddle::SmallVector<const char*> output_names_;
paddle::SmallVector<const char*> attr_names_;
paddle::small_vector<const char*> input_names_;
paddle::small_vector<const char*> output_names_;
paddle::small_vector<const char*> attr_names_;
};
OpKernelType TransPhiKernelKeyToOpKernelType(const phi::KernelKey& kernel_key) {
......@@ -149,7 +149,7 @@ phi::KernelKey FallBackToCpu(const OpKernelType& expected_kernel_key,
return phi::KernelKey();
}
const paddle::SmallVector<const char*>&
const paddle::small_vector<const char*>&
KernelArgsNameMakerByOpProto::GetInputArgsNames() {
for (int i = 0; i < op_proto_->inputs_size(); ++i) {
auto& in = op_proto_->inputs()[i];
......@@ -174,7 +174,7 @@ KernelArgsNameMakerByOpProto::GetInputArgsNames() {
return input_names_;
}
const paddle::SmallVector<const char*>&
const paddle::small_vector<const char*>&
KernelArgsNameMakerByOpProto::GetOutputArgsNames() {
for (int i = 0; i < op_proto_->outputs_size(); ++i) {
auto& out = op_proto_->outputs()[i];
......@@ -194,7 +194,7 @@ KernelArgsNameMakerByOpProto::GetOutputArgsNames() {
return output_names_;
}
const paddle::SmallVector<const char*>&
const paddle::small_vector<const char*>&
KernelArgsNameMakerByOpProto::GetAttrsArgsNames() {
for (int i = 0; i < op_proto_->attrs_size(); ++i) {
auto& attr = op_proto_->attrs()[i];
......
......@@ -53,9 +53,9 @@ phi::KernelKey FallBackToCpu(const OpKernelType& expected_kernel_key,
class KernelArgsNameMaker {
public:
virtual ~KernelArgsNameMaker() {}
virtual const paddle::SmallVector<const char*>& GetInputArgsNames() = 0;
virtual const paddle::SmallVector<const char*>& GetOutputArgsNames() = 0;
virtual const paddle::SmallVector<const char*>& GetAttrsArgsNames() = 0;
virtual const paddle::small_vector<const char*>& GetInputArgsNames() = 0;
virtual const paddle::small_vector<const char*>& GetOutputArgsNames() = 0;
virtual const paddle::small_vector<const char*>& GetAttrsArgsNames() = 0;
};
void InitDefaultKernelSignatureMap();
......
......@@ -110,9 +110,9 @@ class InferShapeContext {
virtual bool IsRunMKLDNNKernel() const = 0;
virtual paddle::SmallVector<InferShapeVarPtr, phi::kInputSmallVectorSize>
virtual paddle::small_vector<InferShapeVarPtr, phi::kInputSmallVectorSize>
GetInputVarPtrs(const std::string &name) const = 0;
virtual paddle::SmallVector<InferShapeVarPtr, phi::kOutputSmallVectorSize>
virtual paddle::small_vector<InferShapeVarPtr, phi::kOutputSmallVectorSize>
GetOutputVarPtrs(const std::string &name) const = 0;
virtual const phi::ArgumentMappingFn *GetPhiArgumentMappingFn() const = 0;
......
......@@ -117,8 +117,8 @@ class DygraphExecutionContext : public framework::ExecutionContext {
return it->second;
}
paddle::SmallVector<const std::string*> InNameList() const override {
paddle::SmallVector<const std::string*> vec_temp;
paddle::small_vector<const std::string*> InNameList() const override {
paddle::small_vector<const std::string*> vec_temp;
vec_temp.reserve(var_map_in_.size());
for (auto& v : var_map_in_) {
......
......@@ -239,9 +239,10 @@ class DygraphInferShapeContext : public framework::InferShapeContext {
(op_kernel_type_->data_layout_ == framework::DataLayout::kMKLDNN));
}
paddle::SmallVector<framework::InferShapeVarPtr, phi::kInputSmallVectorSize>
paddle::small_vector<framework::InferShapeVarPtr, phi::kInputSmallVectorSize>
GetInputVarPtrs(const std::string& name) const override {
paddle::SmallVector<framework::InferShapeVarPtr, phi::kInputSmallVectorSize>
paddle::small_vector<framework::InferShapeVarPtr,
phi::kInputSmallVectorSize>
res;
auto it = var_map_in_->find(name);
PADDLE_ENFORCE_NE(
......@@ -253,9 +254,9 @@ class DygraphInferShapeContext : public framework::InferShapeContext {
return res;
}
paddle::SmallVector<framework::InferShapeVarPtr, phi::kOutputSmallVectorSize>
paddle::small_vector<framework::InferShapeVarPtr, phi::kOutputSmallVectorSize>
GetOutputVarPtrs(const std::string& name) const override {
paddle::SmallVector<framework::InferShapeVarPtr,
paddle::small_vector<framework::InferShapeVarPtr,
phi::kOutputSmallVectorSize>
res;
auto it = var_map_out_->find(name);
......
......@@ -311,7 +311,7 @@ void BuildDygraphPhiKernelContext(const phi::KernelSignature& kernel_signature,
tensor_in = &(var.template Get<phi::SelectedRows>());
kernel_ctx->EmplaceBackInputWithoutSetRange(tensor_in);
} else if (var.template IsType<framework::LoDTensorArray>()) {
paddle::SmallVector<const phi::TensorBase*> tensor_vector;
paddle::small_vector<const phi::TensorBase*> tensor_vector;
auto& tensor_array = var.template Get<framework::LoDTensorArray>();
for (auto& t : tensor_array) {
tensor_vector.emplace_back(&t);
......@@ -357,7 +357,7 @@ void BuildDygraphPhiKernelContext(const phi::KernelSignature& kernel_signature,
tensor_out = var->template GetMutable<phi::SelectedRows>();
kernel_ctx->EmplaceBackOutputWithoutSetRange(tensor_out);
} else if (var->template IsType<framework::LoDTensorArray>()) {
paddle::SmallVector<phi::TensorBase*> tensor_vector;
paddle::small_vector<phi::TensorBase*> tensor_vector;
auto* tensor_array =
var->template GetMutable<framework::LoDTensorArray>();
for (auto& t : *tensor_array) {
......
......@@ -2028,8 +2028,7 @@ void BindImperative(py::module *m_ptr) {
*(imperative::AmpOperators::Instance().GetMutableAllowOps()),
*(imperative::AmpOperators::Instance().GetMutableBlockOps()));
})
.def(
"_get_kernel_signature",
.def("_get_kernel_signature",
[](imperative::Tracer &self, const std::string &type,
const PyNameVarBaseMap &ins, const PyNameVarBaseMap &outs,
framework::AttributeMap attrs) {
......@@ -2038,14 +2037,15 @@ void BindImperative(py::module *m_ptr) {
auto outs_map = ConvertToNameTensorMap(outs);
{
auto input_to_vector =
[](paddle::SmallVector<const char *> &vec) {
[](paddle::small_vector<const char *> &vec) {
return std::vector<std::string>(vec.begin(), vec.end());
};
auto output_to_vector =
[](paddle::SmallVector<const char *> &vec) {
[](paddle::small_vector<const char *> &vec) {
return std::vector<std::string>(vec.begin(), vec.end());
};
auto attr_to_vector = [](paddle::SmallVector<const char *> &vec) {
auto attr_to_vector =
[](paddle::small_vector<const char *> &vec) {
return std::vector<std::string>(vec.begin(), vec.end());
};
auto ret = self.GetExpectedKernelSignature(type, ins_map,
......
......@@ -93,9 +93,9 @@ std::vector<PhiKernelDesc> GetCandidateKernels(
phi_kernel_desc.input_types.clear();
phi_kernel_desc.output_types.clear();
phi::KernelArgsDef args_def = kernel_key_map.at(kernel_key).args_def();
const paddle::SmallVector<phi::TensorArgDef, phi::kInputSmallVectorSize>&
const paddle::small_vector<phi::TensorArgDef, phi::kInputSmallVectorSize>&
input_arg = args_def.input_defs();
const paddle::SmallVector<phi::TensorArgDef, phi::kOutputSmallVectorSize>&
const paddle::small_vector<phi::TensorArgDef, phi::kOutputSmallVectorSize>&
output_arg = args_def.output_defs();
for (auto tensor_arg : input_arg) {
phi_kernel_desc.input_types.emplace_back(ConvertPlaceFromPhi(tensor_arg));
......
......@@ -27,30 +27,30 @@ limitations under the License. */
namespace phi {
// tuple(input_names, attr_names, output_names)
using KernelArgsTuple = std::tuple<paddle::SmallVector<const char*>,
paddle::SmallVector<const char*>,
paddle::SmallVector<const char*>>;
using KernelArgsTuple = std::tuple<paddle::small_vector<const char*>,
paddle::small_vector<const char*>,
paddle::small_vector<const char*>>;
struct KernelSignature {
const char* name;
paddle::SmallVector<const char*> input_names;
paddle::SmallVector<const char*> attr_names;
paddle::SmallVector<const char*> output_names;
paddle::small_vector<const char*> input_names;
paddle::small_vector<const char*> attr_names;
paddle::small_vector<const char*> output_names;
KernelSignature() = default;
KernelSignature(const char* kernel_name,
paddle::SmallVector<const char*>&& inputs,
paddle::SmallVector<const char*>&& attrs,
paddle::SmallVector<const char*>&& outputs)
paddle::small_vector<const char*>&& inputs,
paddle::small_vector<const char*>&& attrs,
paddle::small_vector<const char*>&& outputs)
: name(kernel_name),
input_names(std::move(inputs)),
attr_names(std::move(attrs)),
output_names(std::move(outputs)) {}
KernelSignature(const char* kernel_name,
const paddle::SmallVector<const char*>& inputs,
const paddle::SmallVector<const char*>& attrs,
const paddle::SmallVector<const char*>& outputs)
const paddle::small_vector<const char*>& inputs,
const paddle::small_vector<const char*>& attrs,
const paddle::small_vector<const char*>& outputs)
: name(kernel_name),
input_names(inputs),
attr_names(attrs),
......
......@@ -35,7 +35,7 @@ void InferMetaContext::EmplaceBackAttr(Attribute attr) {
}
void InferMetaContext::EmplaceBackInputs(
paddle::SmallVector<MetaTensor, phi::kInputSmallVectorSize> inputs) {
paddle::small_vector<MetaTensor, phi::kInputSmallVectorSize> inputs) {
int index = inputs_.size();
input_range_.emplace_back(std::pair<int, int>(index, index + inputs.size()));
inputs_.insert(inputs_.end(),
......@@ -43,7 +43,7 @@ void InferMetaContext::EmplaceBackInputs(
std::make_move_iterator(inputs.end()));
}
void InferMetaContext::EmplaceBackOutputs(
paddle::SmallVector<MetaTensor, phi::kOutputSmallVectorSize> outputs) {
paddle::small_vector<MetaTensor, phi::kOutputSmallVectorSize> outputs) {
int index = outputs_.size();
output_range_.emplace_back(
std::pair<int, int>(index, index + outputs.size()));
......
......@@ -45,9 +45,9 @@ class InferMetaContext {
void EmplaceBackAttr(Attribute attr);
void EmplaceBackInputs(
paddle::SmallVector<MetaTensor, phi::kInputSmallVectorSize> inputs);
paddle::small_vector<MetaTensor, phi::kInputSmallVectorSize> inputs);
void EmplaceBackOutputs(
paddle::SmallVector<MetaTensor, phi::kOutputSmallVectorSize> outputs);
paddle::small_vector<MetaTensor, phi::kOutputSmallVectorSize> outputs);
virtual const MetaTensor& InputAt(size_t idx) const;
virtual paddle::optional<const MetaTensor&> OptionalInputAt(size_t idx) const;
......@@ -72,16 +72,16 @@ class InferMetaContext {
protected:
MetaConfig config_;
paddle::SmallVector<Attribute, kAttrSmallVectorSize> attrs_;
paddle::small_vector<Attribute, kAttrSmallVectorSize> attrs_;
paddle::SmallVector<std::pair<int, int>, phi::kInputSmallVectorSize>
paddle::small_vector<std::pair<int, int>, phi::kInputSmallVectorSize>
input_range_;
paddle::SmallVector<std::pair<int, int>, phi::kOutputSmallVectorSize>
paddle::small_vector<std::pair<int, int>, phi::kOutputSmallVectorSize>
output_range_;
private:
paddle::SmallVector<MetaTensor, phi::kInputSmallVectorSize> inputs_;
paddle::SmallVector<MetaTensor, phi::kOutputSmallVectorSize> outputs_;
paddle::small_vector<MetaTensor, phi::kInputSmallVectorSize> inputs_;
paddle::small_vector<MetaTensor, phi::kOutputSmallVectorSize> outputs_;
};
#define PD_INFER_META(...) \
......
......@@ -28,7 +28,7 @@ void KernelContext::EmplaceBackInputWithoutSetRange(const TensorBase* input) {
}
void KernelContext::EmplaceBackInputs(
paddle::SmallVector<const TensorBase*> inputs) {
paddle::small_vector<const TensorBase*> inputs) {
int index = inputs_.size();
// Record the start and end index of the input
input_range_.emplace_back(std::pair<int, int>(index, index + inputs.size()));
......@@ -38,7 +38,7 @@ void KernelContext::EmplaceBackInputs(
}
void KernelContext::EmplaceBackInputsWithoutSetRange(
paddle::SmallVector<const TensorBase*> inputs) {
paddle::small_vector<const TensorBase*> inputs) {
inputs_.insert(inputs_.end(),
std::make_move_iterator(inputs.begin()),
std::make_move_iterator(inputs.end()));
......@@ -56,7 +56,7 @@ void KernelContext::EmplaceBackOutputWithoutSetRange(TensorBase* output) {
}
void KernelContext::EmplaceBackOutputs(
paddle::SmallVector<TensorBase*> outputs) {
paddle::small_vector<TensorBase*> outputs) {
int index = outputs_.size();
// Record the start and end index of the input
output_range_.emplace_back(
......@@ -67,7 +67,7 @@ void KernelContext::EmplaceBackOutputs(
}
void KernelContext::EmplaceBackOutputsWithoutSetRange(
paddle::SmallVector<TensorBase*> outputs) {
paddle::small_vector<TensorBase*> outputs) {
outputs_.insert(outputs_.end(),
std::make_move_iterator(outputs.begin()),
std::make_move_iterator(outputs.end()));
......
......@@ -51,19 +51,19 @@ class KernelContext {
void EmplaceBackInputWithoutSetRange(const TensorBase* input);
void EmplaceBackInputs(paddle::SmallVector<const TensorBase*> inputs);
void EmplaceBackInputs(paddle::small_vector<const TensorBase*> inputs);
void EmplaceBackInputsWithoutSetRange(
paddle::SmallVector<const TensorBase*> inputs);
paddle::small_vector<const TensorBase*> inputs);
void EmplaceBackOutput(TensorBase* output);
void EmplaceBackOutputWithoutSetRange(TensorBase* output);
void EmplaceBackOutputs(paddle::SmallVector<TensorBase*> outputs);
void EmplaceBackOutputs(paddle::small_vector<TensorBase*> outputs);
void EmplaceBackOutputsWithoutSetRange(
paddle::SmallVector<TensorBase*> outputs);
paddle::small_vector<TensorBase*> outputs);
void EmplaceBackAttr(Attribute attr);
......@@ -138,12 +138,12 @@ class KernelContext {
private:
DeviceContext* dev_ctx_;
paddle::SmallVector<const TensorBase*> inputs_;
paddle::SmallVector<TensorBase*> outputs_;
paddle::SmallVector<Attribute, kAttrSmallVectorSize> attrs_;
paddle::small_vector<const TensorBase*> inputs_;
paddle::small_vector<TensorBase*> outputs_;
paddle::small_vector<Attribute, kAttrSmallVectorSize> attrs_;
paddle::SmallVector<std::pair<int, int>, kInputSmallVectorSize> input_range_;
paddle::SmallVector<std::pair<int, int>, kOutputSmallVectorSize>
paddle::small_vector<std::pair<int, int>, kInputSmallVectorSize> input_range_;
paddle::small_vector<std::pair<int, int>, kOutputSmallVectorSize>
output_range_;
};
......
......@@ -173,37 +173,38 @@ class KernelArgsDef {
attribute_defs_.emplace_back(AttributeArgDef(type_index));
}
const paddle::SmallVector<TensorArgDef, kInputSmallVectorSize>& input_defs()
const paddle::small_vector<TensorArgDef, kInputSmallVectorSize>& input_defs()
const {
return input_defs_;
}
const paddle::SmallVector<TensorArgDef, kOutputSmallVectorSize>& output_defs()
const {
const paddle::small_vector<TensorArgDef, kOutputSmallVectorSize>&
output_defs() const {
return output_defs_;
}
const paddle::SmallVector<AttributeArgDef, kAttrSmallVectorSize>&
const paddle::small_vector<AttributeArgDef, kAttrSmallVectorSize>&
attribute_defs() const {
return attribute_defs_;
}
paddle::SmallVector<TensorArgDef, kInputSmallVectorSize>& input_defs() {
paddle::small_vector<TensorArgDef, kInputSmallVectorSize>& input_defs() {
return input_defs_;
}
paddle::SmallVector<TensorArgDef, kOutputSmallVectorSize>& output_defs() {
paddle::small_vector<TensorArgDef, kOutputSmallVectorSize>& output_defs() {
return output_defs_;
}
paddle::SmallVector<AttributeArgDef, kAttrSmallVectorSize>& attribute_defs() {
paddle::small_vector<AttributeArgDef, kAttrSmallVectorSize>&
attribute_defs() {
return attribute_defs_;
}
private:
paddle::SmallVector<TensorArgDef, kInputSmallVectorSize> input_defs_{{}};
paddle::SmallVector<TensorArgDef, kOutputSmallVectorSize> output_defs_{{}};
paddle::SmallVector<AttributeArgDef, kAttrSmallVectorSize> attribute_defs_{
paddle::small_vector<TensorArgDef, kInputSmallVectorSize> input_defs_{{}};
paddle::small_vector<TensorArgDef, kOutputSmallVectorSize> output_defs_{{}};
paddle::small_vector<AttributeArgDef, kAttrSmallVectorSize> attribute_defs_{
{}};
};
......
......@@ -19,7 +19,7 @@
namespace phi {
KernelSignature AdamOpArgumentMapping(const ArgumentMappingContext& ctx) {
paddle::SmallVector<const char*> in_names = {"Param",
paddle::small_vector<const char*> in_names = {"Param",
"Grad",
"LearningRate",
"Moment1",
......@@ -28,13 +28,13 @@ KernelSignature AdamOpArgumentMapping(const ArgumentMappingContext& ctx) {
"Beta2Pow",
"MasterParam",
"SkipUpdate"};
paddle::SmallVector<const char*> out_names = {"ParamOut",
paddle::small_vector<const char*> out_names = {"ParamOut",
"Moment1Out",
"Moment2Out",
"Beta1PowOut",
"Beta2PowOut",
"MasterParamOut"};
paddle::SmallVector<const char*> attr_names;
paddle::small_vector<const char*> attr_names;
attr_names.emplace_back(ctx.HasInput("Beta1Tensor") ? "Beta1Tensor"
: "beta1");
......
......@@ -19,7 +19,7 @@
namespace phi {
KernelSignature AdamwOpArgumentMapping(const ArgumentMappingContext& ctx) {
paddle::SmallVector<const char*> in_names = {"Param",
paddle::small_vector<const char*> in_names = {"Param",
"Grad",
"LearningRate",
"Moment1",
......@@ -28,13 +28,13 @@ KernelSignature AdamwOpArgumentMapping(const ArgumentMappingContext& ctx) {
"Beta2Pow",
"MasterParam",
"SkipUpdate"};
paddle::SmallVector<const char*> out_names = {"ParamOut",
paddle::small_vector<const char*> out_names = {"ParamOut",
"Moment1Out",
"Moment2Out",
"Beta1PowOut",
"Beta2PowOut",
"MasterParamOut"};
paddle::SmallVector<const char*> attr_names;
paddle::small_vector<const char*> attr_names;
attr_names.emplace_back(ctx.HasInput("Beta1Tensor") ? "Beta1Tensor"
: "beta1");
......
......@@ -18,7 +18,7 @@
namespace phi {
KernelSignature ClipOpArgumentMapping(const ArgumentMappingContext& ctx) {
paddle::SmallVector<std::string, kAttrSmallVectorSize> attr_names;
paddle::small_vector<std::string, kAttrSmallVectorSize> attr_names;
attr_names.emplace_back(ctx.HasInput("Min") ? "Min" : "min");
attr_names.emplace_back(ctx.HasInput("Max") ? "Max" : "max");
if (ctx.IsDenseTensorInput("X")) {
......
......@@ -48,14 +48,14 @@ KernelSignature StridedSliceOpArgumentMapping(
? (use_attr_strides ? "strides" : "StridesTensorList")
: "strides");
paddle::SmallVector<const char*> inputs = {"Input"};
paddle::SmallVector<const char*> attrs = {"axes",
paddle::small_vector<const char*> inputs = {"Input"};
paddle::small_vector<const char*> attrs = {"axes",
starts_key,
ends_key,
strides_key,
"infer_flags",
"decrease_axis"};
paddle::SmallVector<const char*> outputs = {"Out"};
paddle::small_vector<const char*> outputs = {"Out"};
const char* kernel_name;
if (ctx.IsDenseTensorVectorInput("Input")) {
......@@ -97,14 +97,14 @@ KernelSignature StridedSliceGradOpArgumentMapping(
? (use_attr_strides ? "strides" : "StridesTensorList")
: "strides");
paddle::SmallVector<const char*> inputs = {"Input", "Out@GRAD"};
paddle::SmallVector<const char*> attrs = {"axes",
paddle::small_vector<const char*> inputs = {"Input", "Out@GRAD"};
paddle::small_vector<const char*> attrs = {"axes",
starts_key,
ends_key,
strides_key,
"infer_flags",
"decrease_axis"};
paddle::SmallVector<const char*> outputs = {"Input@GRAD"};
paddle::small_vector<const char*> outputs = {"Input@GRAD"};
const char* kernel_name;
if (ctx.IsDenseTensorVectorInput("Input")) {
......
......@@ -68,7 +68,7 @@ TEST(MetaFnFactory, SplitInferMetaFn) {
phi::DenseTensor dense_out1;
phi::DenseTensor dense_out2;
paddle::SmallVector<phi::MetaTensor, kOutputSmallVectorSize> out;
paddle::small_vector<phi::MetaTensor, kOutputSmallVectorSize> out;
out.emplace_back(phi::MetaTensor(&dense_out1));
out.emplace_back(phi::MetaTensor(&dense_out2));
......
......@@ -3,8 +3,10 @@
// 1. remove hash_value functions
// 2. replace with the llvm::NoneType with paddle::none_t
// 3. remove drop_while, drop_until, take_while, take_until methods
// 4. change ArrayRef to array_ref to unify naming style of utils
//===- ArrayRef.h - Array Reference Wrapper ---------------------*- C++ -*-===//
//===- ArrayRef.h - Array Reference Wrapper ---------------------*- C++
//-*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
......@@ -29,19 +31,19 @@
namespace paddle {
/// ArrayRef - Represent a constant reference to an array (0 or more elements
/// array_ref - Represent a constant reference to an array (0 or more elements
/// consecutively in memory), i.e. a start pointer and a length. It allows
/// various APIs to take consecutive elements easily and conveniently.
///
/// This class does not own the underlying data, it is expected to be used in
/// situations where the data resides in some other buffer, whose lifetime
/// extends past that of the ArrayRef. For this reason, it is not in general
/// safe to store an ArrayRef.
/// extends past that of the array_ref. For this reason, it is not in general
/// safe to store an array_ref.
///
/// This is intended to be trivially copyable, so it should be passed by
/// value.
template <typename T>
class ArrayRef {
class array_ref {
public:
using iterator = const T *;
using const_iterator = const T *;
......@@ -59,81 +61,81 @@ class ArrayRef {
/// @name Constructors
/// @{
/// Construct an empty ArrayRef.
/*implicit*/ ArrayRef() = default;
/// Construct an empty array_ref.
/*implicit*/ array_ref() = default;
/// Construct an empty ArrayRef from None.
/*implicit*/ ArrayRef(none_t) {}
/// Construct an empty array_ref from None.
/*implicit*/ array_ref(none_t) {}
/// Construct an ArrayRef from a single element.
/*implicit*/ ArrayRef(const T &OneElt) : Data(&OneElt), Length(1) {}
/// Construct an array_ref from a single element.
/*implicit*/ array_ref(const T &OneElt) : Data(&OneElt), Length(1) {}
/// Construct an ArrayRef from a pointer and length.
/*implicit*/ ArrayRef(const T *data, size_t length)
/// Construct an array_ref from a pointer and length.
/*implicit*/ array_ref(const T *data, size_t length)
: Data(data), Length(length) {}
/// Construct an ArrayRef from a range.
ArrayRef(const T *begin, const T *end) : Data(begin), Length(end - begin) {}
/// Construct an array_ref from a range.
array_ref(const T *begin, const T *end) : Data(begin), Length(end - begin) {}
/// Construct an ArrayRef from a SmallVector. This is templated in order to
/// avoid instantiating SmallVectorTemplateCommon<T> whenever we
/// copy-construct an ArrayRef.
/// Construct an array_ref from a small_vector. This is templated in order to
/// avoid instantiating small_vector_template_common<T> whenever we
/// copy-construct an array_ref.
template <typename U>
/*implicit*/ ArrayRef(const SmallVectorTemplateCommon<T, U> &Vec)
/*implicit*/ array_ref(const small_vector_template_common<T, U> &Vec)
: Data(Vec.data()), Length(Vec.size()) {}
/// Construct an ArrayRef from a std::vector.
/// Construct an array_ref from a std::vector.
template <typename A>
/*implicit*/ ArrayRef(const std::vector<T, A> &Vec)
/*implicit*/ array_ref(const std::vector<T, A> &Vec)
: Data(Vec.data()), Length(Vec.size()) {}
/// Construct an ArrayRef from a std::array
/// Construct an array_ref from a std::array
template <size_t N>
/*implicit*/ constexpr ArrayRef(const std::array<T, N> &Arr)
/*implicit*/ constexpr array_ref(const std::array<T, N> &Arr)
: Data(Arr.data()), Length(N) {}
/// Construct an ArrayRef from a C array.
/// Construct an array_ref from a C array.
template <size_t N>
/*implicit*/ constexpr ArrayRef(const T (&Arr)[N]) : Data(Arr), Length(N) {}
/*implicit*/ constexpr array_ref(const T (&Arr)[N]) : Data(Arr), Length(N) {}
/// Construct an ArrayRef from a std::initializer_list.
/// Construct an array_ref from a std::initializer_list.
#if defined(__GNUC__) && !defined(__clang__) && __GNUC__ >= 9
// Disable gcc's warning in this constructor as it generates an enormous
// amount
// of messages. Anyone using ArrayRef should already be aware of the fact that
// of messages. Anyone using array_ref should already be aware of the fact that
// it does not do lifetime extension.
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Winit-list-lifetime"
#endif
/*implicit*/ ArrayRef(const std::initializer_list<T> &Vec)
/*implicit*/ array_ref(const std::initializer_list<T> &Vec)
: Data(Vec.begin() == Vec.end() ? (T *)nullptr : Vec.begin()),
Length(Vec.size()) {}
#if defined(__GNUC__) && !defined(__clang__) && __GNUC__ >= 9
#pragma GCC diagnostic pop
#endif
/// Construct an ArrayRef<const T*> from ArrayRef<T*>. This uses SFINAE to
/// Construct an array_ref<const T*> from array_ref<T*>. This uses SFINAE to
/// ensure that only ArrayRefs of pointers can be converted.
template <typename U>
ArrayRef(const ArrayRef<U *> &A,
array_ref(const array_ref<U *> &A,
std::enable_if_t<std::is_convertible<U *const *, T const *>::value>
* = nullptr)
: Data(A.data()), Length(A.size()) {}
/// Construct an ArrayRef<const T*> from a SmallVector<T*>. This is
/// templated in order to avoid instantiating SmallVectorTemplateCommon<T>
/// whenever we copy-construct an ArrayRef.
/// Construct an array_ref<const T*> from a small_vector<T*>. This is
/// templated in order to avoid instantiating small_vector_template_common<T>
/// whenever we copy-construct an array_ref.
template <typename U, typename DummyT>
/*implicit*/ ArrayRef(
const SmallVectorTemplateCommon<U *, DummyT> &Vec,
/*implicit*/ array_ref(
const small_vector_template_common<U *, DummyT> &Vec,
std::enable_if_t<std::is_convertible<U *const *, T const *>::value> * =
nullptr)
: Data(Vec.data()), Length(Vec.size()) {}
/// Construct an ArrayRef<const T*> from std::vector<T*>. This uses SFINAE
/// Construct an array_ref<const T*> from std::vector<T*>. This uses SFINAE
/// to ensure that only vectors of pointers can be converted.
template <typename U, typename A>
ArrayRef(
array_ref(
const std::vector<U *, A> &Vec,
std::enable_if_t<std::is_convertible<U *const *, T const *>::value> * = 0)
: Data(Vec.data()), Length(Vec.size()) {}
......@@ -168,50 +170,50 @@ class ArrayRef {
return Data[Length - 1];
}
// copy - Allocate copy in Allocator and return ArrayRef<T> to it.
// copy - Allocate copy in Allocator and return array_ref<T> to it.
template <typename Allocator>
ArrayRef<T> copy(Allocator &A) {
array_ref<T> copy(Allocator &A) {
T *Buff = A.template Allocate<T>(Length);
std::uninitialized_copy(begin(), end(), Buff);
return ArrayRef<T>(Buff, Length);
return array_ref<T>(Buff, Length);
}
/// equals - Check for element-wise equality.
bool equals(ArrayRef RHS) const {
bool equals(array_ref RHS) const {
if (Length != RHS.Length) return false;
return std::equal(begin(), end(), RHS.begin());
}
/// slice(n, m) - Chop off the first N elements of the array, and keep M
/// elements in the array.
ArrayRef<T> slice(size_t N, size_t M) const {
array_ref<T> slice(size_t N, size_t M) const {
assert(N + M <= size() && "Invalid specifier");
return ArrayRef<T>(data() + N, M);
return array_ref<T>(data() + N, M);
}
/// slice(n) - Chop off the first N elements of the array.
ArrayRef<T> slice(size_t N) const { return slice(N, size() - N); }
array_ref<T> slice(size_t N) const { return slice(N, size() - N); }
/// Drop the first \p N elements of the array.
ArrayRef<T> drop_front(size_t N = 1) const {
array_ref<T> drop_front(size_t N = 1) const {
assert(size() >= N && "Dropping more elements than exist");
return slice(N, size() - N);
}
/// Drop the last \p N elements of the array.
ArrayRef<T> drop_back(size_t N = 1) const {
array_ref<T> drop_back(size_t N = 1) const {
assert(size() >= N && "Dropping more elements than exist");
return slice(0, size() - N);
}
/// Return a copy of *this with only the first \p N elements.
ArrayRef<T> take_front(size_t N = 1) const {
array_ref<T> take_front(size_t N = 1) const {
if (N >= size()) return *this;
return drop_back(size() - N);
}
/// Return a copy of *this with only the last \p N elements.
ArrayRef<T> take_back(size_t N = 1) const {
array_ref<T> take_back(size_t N = 1) const {
if (N >= size()) return *this;
return drop_front(size() - N);
}
......@@ -229,7 +231,7 @@ class ArrayRef {
/// The declaration here is extra complicated so that "arrayRef = {}"
/// continues to select the move assignment operator.
template <typename U>
std::enable_if_t<std::is_same<U, T>::value, ArrayRef<T>> &operator=(
std::enable_if_t<std::is_same<U, T>::value, array_ref<T>> &operator=(
U &&Temporary) = delete;
/// Disallow accidental assignment from a temporary.
......@@ -237,7 +239,7 @@ class ArrayRef {
/// The declaration here is extra complicated so that "arrayRef = {}"
/// continues to select the move assignment operator.
template <typename U>
std::enable_if_t<std::is_same<U, T>::value, ArrayRef<T>> &operator=(
std::enable_if_t<std::is_same<U, T>::value, array_ref<T>> &operator=(
std::initializer_list<U>) = delete;
/// @}
......@@ -255,90 +257,90 @@ class ArrayRef {
/// @}
};
/// @name ArrayRef Convenience constructors
/// @name array_ref Convenience constructors
/// @{
/// Construct an ArrayRef from a single element.
/// Construct an array_ref from a single element.
template <typename T>
ArrayRef<T> makeArrayRef(const T &OneElt) {
array_ref<T> make_array_ref(const T &OneElt) {
return OneElt;
}
/// Construct an ArrayRef from a pointer and length.
/// Construct an array_ref from a pointer and length.
template <typename T>
ArrayRef<T> makeArrayRef(const T *data, size_t length) {
return ArrayRef<T>(data, length);
array_ref<T> make_array_ref(const T *data, size_t length) {
return array_ref<T>(data, length);
}
/// Construct an ArrayRef from a range.
/// Construct an array_ref from a range.
template <typename T>
ArrayRef<T> makeArrayRef(const T *begin, const T *end) {
return ArrayRef<T>(begin, end);
array_ref<T> make_array_ref(const T *begin, const T *end) {
return array_ref<T>(begin, end);
}
/// Construct an ArrayRef from a SmallVector.
/// Construct an array_ref from a small_vector.
template <typename T>
ArrayRef<T> makeArrayRef(const SmallVectorImpl<T> &Vec) {
array_ref<T> make_array_ref(const small_vector_impl<T> &Vec) {
return Vec;
}
/// Construct an ArrayRef from a SmallVector.
/// Construct an array_ref from a small_vector.
template <typename T, unsigned N>
ArrayRef<T> makeArrayRef(const SmallVector<T, N> &Vec) {
array_ref<T> make_array_ref(const small_vector<T, N> &Vec) {
return Vec;
}
/// Construct an ArrayRef from a std::vector.
/// Construct an array_ref from a std::vector.
template <typename T>
ArrayRef<T> makeArrayRef(const std::vector<T> &Vec) {
array_ref<T> make_array_ref(const std::vector<T> &Vec) {
return Vec;
}
/// Construct an ArrayRef from a std::array.
/// Construct an array_ref from a std::array.
template <typename T, std::size_t N>
ArrayRef<T> makeArrayRef(const std::array<T, N> &Arr) {
array_ref<T> make_array_ref(const std::array<T, N> &Arr) {
return Arr;
}
/// Construct an ArrayRef from an ArrayRef (no-op) (const)
/// Construct an array_ref from an array_ref (no-op) (const)
template <typename T>
ArrayRef<T> makeArrayRef(const ArrayRef<T> &Vec) {
array_ref<T> make_array_ref(const array_ref<T> &Vec) {
return Vec;
}
/// Construct an ArrayRef from an ArrayRef (no-op)
/// Construct an array_ref from an array_ref (no-op)
template <typename T>
ArrayRef<T> &makeArrayRef(ArrayRef<T> &Vec) {
array_ref<T> &make_array_ref(array_ref<T> &Vec) {
return Vec;
}
/// Construct an ArrayRef from a C array.
/// Construct an array_ref from a C array.
template <typename T, size_t N>
ArrayRef<T> makeArrayRef(const T (&Arr)[N]) {
return ArrayRef<T>(Arr);
array_ref<T> make_array_ref(const T (&Arr)[N]) {
return array_ref<T>(Arr);
}
/// @}
/// @name ArrayRef Comparison Operators
/// @name array_ref Comparison Operators
/// @{
template <typename T>
inline bool operator==(ArrayRef<T> LHS, ArrayRef<T> RHS) {
inline bool operator==(array_ref<T> LHS, array_ref<T> RHS) {
return LHS.equals(RHS);
}
template <typename T>
inline bool operator==(SmallVectorImpl<T> &LHS, ArrayRef<T> RHS) {
return ArrayRef<T>(LHS).equals(RHS);
inline bool operator==(small_vector_impl<T> &LHS, array_ref<T> RHS) {
return array_ref<T>(LHS).equals(RHS);
}
template <typename T>
inline bool operator!=(ArrayRef<T> LHS, ArrayRef<T> RHS) {
inline bool operator!=(array_ref<T> LHS, array_ref<T> RHS) {
return !(LHS == RHS);
}
template <typename T>
inline bool operator!=(SmallVectorImpl<T> &LHS, ArrayRef<T> RHS) {
inline bool operator!=(small_vector_impl<T> &LHS, array_ref<T> RHS) {
return !(LHS == RHS);
}
......
......@@ -21,53 +21,53 @@
#include "gtest/gtest.h"
TEST(array_ref, array_ref) {
paddle::ArrayRef<int> a;
paddle::array_ref<int> a;
CHECK_EQ(a.size(), size_t(0));
CHECK_EQ(a.data(), static_cast<int*>(nullptr));
paddle::ArrayRef<int> b(paddle::none);
paddle::array_ref<int> b(paddle::none);
CHECK_EQ(b.size(), size_t(0));
CHECK_EQ(b.data(), static_cast<int*>(nullptr));
int v = 1;
paddle::ArrayRef<int> c(v);
paddle::array_ref<int> c(v);
CHECK_EQ(c.size(), size_t(1));
CHECK_EQ(c.data(), &v);
CHECK_EQ(c.equals(paddle::makeArrayRef(v)), true);
CHECK_EQ(c.equals(paddle::make_array_ref(v)), true);
int v1[5] = {1, 2, 3, 4, 5};
paddle::ArrayRef<int> d(v1, 5);
paddle::array_ref<int> d(v1, 5);
CHECK_EQ(d.size(), size_t(5));
CHECK_EQ(d.data(), v1);
CHECK_EQ(d.equals(paddle::makeArrayRef(v1, 5)), true);
CHECK_EQ(d.equals(paddle::make_array_ref(v1, 5)), true);
paddle::ArrayRef<int> e(&v1[0], &v1[4]);
paddle::array_ref<int> e(&v1[0], &v1[4]);
CHECK_EQ(e.size(), size_t(4));
CHECK_EQ(e.data(), v1);
CHECK_EQ(e.equals(paddle::makeArrayRef(&v1[0], &v1[4])), true);
CHECK_EQ(e.equals(paddle::make_array_ref(&v1[0], &v1[4])), true);
paddle::SmallVector<int, 3> small_vector{1, 2, 3};
paddle::ArrayRef<int> f(small_vector);
paddle::small_vector<int, 3> small_vector{1, 2, 3};
paddle::array_ref<int> f(small_vector);
CHECK_EQ(f.size(), size_t(3));
CHECK_EQ(f.data(), small_vector.data());
CHECK_EQ(f.equals(paddle::makeArrayRef(small_vector)), true);
CHECK_EQ(f.equals(paddle::make_array_ref(small_vector)), true);
std::vector<int> vector{1, 2, 3};
paddle::ArrayRef<int> g(vector);
paddle::array_ref<int> g(vector);
CHECK_EQ(g.size(), size_t(3));
CHECK_EQ(g.data(), vector.data());
CHECK_EQ(g.equals(paddle::makeArrayRef(vector)), true);
CHECK_EQ(g.equals(paddle::make_array_ref(vector)), true);
std::initializer_list<int> list = {1, 2, 3};
paddle::ArrayRef<int> h(list);
paddle::array_ref<int> h(list);
CHECK_EQ(h.size(), size_t(3));
CHECK_EQ(h.data(), list.begin());
paddle::ArrayRef<int> i(h);
paddle::array_ref<int> i(h);
CHECK_EQ(i.size(), size_t(3));
CHECK_EQ(i.data(), list.begin());
CHECK_EQ(i.equals(h), true);
CHECK_EQ(i.equals(paddle::makeArrayRef(h)), true);
CHECK_EQ(i.equals(paddle::make_array_ref(h)), true);
auto slice = i.slice(1, 2);
CHECK_EQ(slice.size(), size_t(2));
......@@ -78,7 +78,7 @@ TEST(array_ref, array_ref) {
CHECK_EQ(drop.size(), size_t(1));
CHECK_EQ(drop[0], 3);
paddle::ArrayRef<int> nums = {1, 2, 3, 4, 5, 6, 7, 8};
paddle::array_ref<int> nums = {1, 2, 3, 4, 5, 6, 7, 8};
auto front = nums.take_front(3);
CHECK_EQ(front.size(), size_t(3));
for (size_t i = 0; i < 3; ++i) {
......
此差异已折叠。
......@@ -21,7 +21,7 @@
#include "gtest/gtest.h"
template <typename T, unsigned N>
static std::vector<T> ToStdVector(const paddle::SmallVector<T, N> &vec) {
static std::vector<T> ToStdVector(const paddle::small_vector<T, N> &vec) {
std::vector<T> std_vec;
std_vec.reserve(vec.size());
for (size_t i = 0; i < vec.size(); ++i) {
......@@ -35,7 +35,7 @@ void SmallVectorCheck(size_t n) {
std::srand(std::time(nullptr));
std::vector<int> std_vec;
paddle::SmallVector<int, N> vec;
paddle::small_vector<int, N> vec;
for (size_t i = 0; i < n; ++i) {
int value = rand(); // NOLINT
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册