未验证 提交 927c0d50 编写于 作者: C Chen Weihang 提交者: GitHub

[AutoParallel] Polish DistTensor details (#55436)

* polish dist_tensor details

* add unittest for coverage

* revert uselesss change

* skip test without dist
上级 552ed8d8
......@@ -298,13 +298,13 @@ void InitDistTensorWithTensor(
std::shared_ptr<phi::DenseTensor> tensor =
std::static_pointer_cast<phi::DenseTensor>(src.impl());
self->tensor.set_impl(std::make_shared<DistTensor>(tensor, dist_attr));
VLOG(4) << "Same place, do ShareDataWith";
VLOG(4) << "Same place, do ShareDataWith for DistTensor.";
} else {
std::shared_ptr<phi::DenseTensor> tensor =
std::static_pointer_cast<phi::DenseTensor>(
src.copy_to(place, true).impl());
self->tensor.set_impl(std::make_shared<DistTensor>(tensor, dist_attr));
VLOG(4) << "Different place, do TensorCopy";
VLOG(4) << "Different place, do TensorCopy for DistTensor.";
}
if (src.get_autograd_meta()) {
egr::EagerUtils::autograd_meta(&(self->tensor))
......@@ -721,7 +721,7 @@ void AutoInitStringTensorByStringTensor(
* ** zero_copy: bool,
* ** name: std::string,
* ** stop_gradient: bool,
* ** dist_attr: phi::distributed::TensorDistAttr)
* ** dist_attr: phi::distributed::auto_parallel::TensorDistAttr)
* 4.
* def __init__ (
* ** value: ndarray)
......@@ -735,7 +735,7 @@ void AutoInitStringTensorByStringTensor(
* ** tensor: Tensor,
* ** place: paddle::platform::Place,
* ** name: std::string,
* ** dist_attr: phi::distributed::TensorDistAttr)
* ** dist_attr: phi::distributed::auto_parallel::TensorDistAttr)
* 7. (multi-place) (should have at least one parameter, one parameter similar
* to case 5, zero parameter equals to case 1.)
* def __init__ (
......
......@@ -803,7 +803,7 @@ static PyObject* tensor_method_get_underline_tensor(TensorObject* self,
#ifdef PADDLE_WITH_DISTRIBUTE
auto* tensor = static_cast<phi::distributed::auto_parallel::DistTensor*>(
self->tensor.impl().get());
VLOG(6) << "dist tensor: " << tensor->IsInitialized();
VLOG(6) << "dist tensor: " << tensor->defined();
return ToPyObject(tensor);
#else
RETURN_PY_NONE
......
......@@ -26,10 +26,11 @@ void* DistTensor::AllocateFrom(Allocator* allocator,
}
const Place& DistTensor::place() const {
PADDLE_ENFORCE_NOT_NULL(
value_->holder_,
PADDLE_ENFORCE_EQ(
value_ && value_->holder_,
true,
phi::errors::PreconditionNotMet(
"Tensor not initialized yet when DenseTensor::place() is called."));
"Tensor not initialized yet when DistTensor::place() is called."));
return value_->holder_->place();
}
......@@ -55,13 +56,7 @@ void DistTensor::set_meta(const DenseTensorMeta& meta) {
true,
phi::errors::InvalidArgument(
"Input meta is invalid, please check the meta attribute."));
meta_.dims = meta.dims;
meta_.dtype = meta.dtype;
meta_.is_scalar = meta.is_scalar;
meta_.layout = meta.layout;
meta_.lod = meta.lod;
meta_.offset = meta.offset;
meta_.use_gpudnn = meta.use_gpudnn;
meta_ = meta;
}
} // namespace auto_parallel
......
......@@ -82,10 +82,10 @@ class DistTensor final
/// \brief Test whether the storage is allocated.
/// \return Whether the storage is allocated.
bool initialized() const override {
return value_->holder_ && value_->holder_->ptr();
return value_ && value_->holder_ && value_->holder_->ptr();
}
bool IsInitialized() const { return value_->holder_ != nullptr; }
bool defined() const { return value_ && value_->holder_; }
/// \brief Test whether the metadata is valid.
/// \return Whether the metadata is valid.
......
......@@ -4,6 +4,13 @@ cc_test(process_mesh_test SRCS process_mesh_test.cc)
cc_test(dist_attr_test SRCS dist_attr_test.cc)
if(WITH_DISTRIBUTE)
cc_test(
dist_tensor_test
SRCS dist_tensor_test.cc
DEPS phi)
endif()
cc_test_old(dist_mapper_test SRCS dist_mapper_test.cc DEPS phi)
cc_test_old(spmd_rule_test SRCS spmd_rule_test.cc DEPS spmd_rules)
/* Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/phi/core/distributed/auto_parallel/dist_tensor.h"
#include "gtest/gtest.h"
#include "paddle/phi/core/distributed/auto_parallel/dist_attr.h"
#include "test/cpp/phi/core/allocator.h"
namespace phi {
namespace distributed {
namespace auto_parallel {
namespace tests {
TEST(dist_tensor, constructor) {
auto fancy_allocator =
std::unique_ptr<Allocator>(new phi::tests::FancyAllocator);
auto* alloc = fancy_allocator.get();
DataType dtype{DataType::FLOAT16};
DDim dims({3, 4});
DenseTensorMeta meta(dtype, dims);
auto dist_attr = std::make_shared<TensorDistAttr>(phi::vectorize(dims));
DistTensor x1(alloc, meta, dist_attr);
EXPECT_TRUE(x1.defined());
EXPECT_TRUE(x1.initialized());
DistTensor x2(alloc, DenseTensorMeta(dtype, dims), dist_attr);
EXPECT_TRUE(x2.defined());
EXPECT_TRUE(x2.initialized());
DistTensor x3(x2.value().Holder(), meta, dist_attr);
EXPECT_TRUE(x3.defined());
EXPECT_TRUE(x3.initialized());
auto a = std::make_shared<DenseTensor>(alloc, DenseTensorMeta(dtype, dims));
DistTensor x4(a, dist_attr);
EXPECT_TRUE(x4.defined());
EXPECT_TRUE(x4.initialized());
}
} // namespace tests
} // namespace auto_parallel
} // namespace distributed
} // namespace phi
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册