提交 f92d6845 编写于 作者: E ervinzhang

fixed clang format

上级 6639149e
......@@ -8,169 +8,164 @@
namespace mindspore {
namespace tensor {
dataset::DataType MSTypeToDEType(TypeId data_type) {
switch (data_type) {
case kNumberTypeBool:
return dataset::DataType(dataset::DataType::DE_BOOL);
case kNumberTypeInt8:
return dataset::DataType(dataset::DataType::DE_INT8);
case kNumberTypeUInt8:
return dataset::DataType(dataset::DataType::DE_UINT8);
case kNumberTypeInt16:
return dataset::DataType(dataset::DataType::DE_INT16);
case kNumberTypeUInt16:
return dataset::DataType(dataset::DataType::DE_UINT16);
case kNumberTypeInt32:
return dataset::DataType(dataset::DataType::DE_INT32);
case kNumberTypeUInt32:
return dataset::DataType(dataset::DataType::DE_UINT32);
case kNumberTypeInt64:
return dataset::DataType(dataset::DataType::DE_INT64);
case kNumberTypeUInt64:
return dataset::DataType(dataset::DataType::DE_UINT64);
case kNumberTypeFloat16:
return dataset::DataType(dataset::DataType::DE_FLOAT16);
case kNumberTypeFloat32:
return dataset::DataType(dataset::DataType::DE_FLOAT32);
case kNumberTypeFloat64:
return dataset::DataType(dataset::DataType::DE_FLOAT64);
default:
// maybe throw?
return dataset::DataType(dataset::DataType::DE_UNKNOWN);
}
switch (data_type) {
case kNumberTypeBool:
return dataset::DataType(dataset::DataType::DE_BOOL);
case kNumberTypeInt8:
return dataset::DataType(dataset::DataType::DE_INT8);
case kNumberTypeUInt8:
return dataset::DataType(dataset::DataType::DE_UINT8);
case kNumberTypeInt16:
return dataset::DataType(dataset::DataType::DE_INT16);
case kNumberTypeUInt16:
return dataset::DataType(dataset::DataType::DE_UINT16);
case kNumberTypeInt32:
return dataset::DataType(dataset::DataType::DE_INT32);
case kNumberTypeUInt32:
return dataset::DataType(dataset::DataType::DE_UINT32);
case kNumberTypeInt64:
return dataset::DataType(dataset::DataType::DE_INT64);
case kNumberTypeUInt64:
return dataset::DataType(dataset::DataType::DE_UINT64);
case kNumberTypeFloat16:
return dataset::DataType(dataset::DataType::DE_FLOAT16);
case kNumberTypeFloat32:
return dataset::DataType(dataset::DataType::DE_FLOAT32);
case kNumberTypeFloat64:
return dataset::DataType(dataset::DataType::DE_FLOAT64);
default:
return dataset::DataType(dataset::DataType::DE_UNKNOWN);
}
}
TypeId DETypeToMSType(dataset::DataType data_type) {
switch (data_type.value()) {
case dataset::DataType::DE_BOOL:
return mindspore::TypeId::kNumberTypeBool;
case dataset::DataType::DE_INT8:
return mindspore::TypeId::kNumberTypeInt8;
case dataset::DataType::DE_UINT8:
return mindspore::TypeId::kNumberTypeUInt8;
case dataset::DataType::DE_INT16:
return mindspore::TypeId::kNumberTypeInt16;
case dataset::DataType::DE_UINT16:
return mindspore::TypeId::kNumberTypeUInt16;
case dataset::DataType::DE_INT32:
return mindspore::TypeId::kNumberTypeInt32;
case dataset::DataType::DE_UINT32:
return mindspore::TypeId::kNumberTypeUInt32;
case dataset::DataType::DE_INT64:
return mindspore::TypeId::kNumberTypeInt64;
case dataset::DataType::DE_UINT64:
return mindspore::TypeId::kNumberTypeUInt64;
case dataset::DataType::DE_FLOAT16:
return mindspore::TypeId::kNumberTypeFloat16;
case dataset::DataType::DE_FLOAT32:
return mindspore::TypeId::kNumberTypeFloat32;
case dataset::DataType::DE_FLOAT64:
return mindspore::TypeId::kNumberTypeFloat64;
default:
// maybe throw?
return kTypeUnknown;
}
switch (data_type.value()) {
case dataset::DataType::DE_BOOL:
return mindspore::TypeId::kNumberTypeBool;
case dataset::DataType::DE_INT8:
return mindspore::TypeId::kNumberTypeInt8;
case dataset::DataType::DE_UINT8:
return mindspore::TypeId::kNumberTypeUInt8;
case dataset::DataType::DE_INT16:
return mindspore::TypeId::kNumberTypeInt16;
case dataset::DataType::DE_UINT16:
return mindspore::TypeId::kNumberTypeUInt16;
case dataset::DataType::DE_INT32:
return mindspore::TypeId::kNumberTypeInt32;
case dataset::DataType::DE_UINT32:
return mindspore::TypeId::kNumberTypeUInt32;
case dataset::DataType::DE_INT64:
return mindspore::TypeId::kNumberTypeInt64;
case dataset::DataType::DE_UINT64:
return mindspore::TypeId::kNumberTypeUInt64;
case dataset::DataType::DE_FLOAT16:
return mindspore::TypeId::kNumberTypeFloat16;
case dataset::DataType::DE_FLOAT32:
return mindspore::TypeId::kNumberTypeFloat32;
case dataset::DataType::DE_FLOAT64:
return mindspore::TypeId::kNumberTypeFloat64;
default:
return kTypeUnknown;
}
}
MSTensor *DETensor::CreateTensor(TypeId data_type, const std::vector<int> &shape) {
return new DETensor(data_type, shape);
return new DETensor(data_type, shape);
}
MSTensor *DETensor::CreateTensor(const std::string &path) {
std::shared_ptr<dataset::Tensor> t;
(void) dataset::Tensor::CreateFromFile(path, &t);
return new DETensor(std::move(t));
std::shared_ptr<dataset::Tensor> t;
(void)dataset::Tensor::CreateFromFile(path, &t);
return new DETensor(std::move(t));
}
DETensor::DETensor(TypeId data_type, const std::vector<int> &shape) {
std::vector<dataset::dsize_t> t_shape;
t_shape.reserve(shape.size());
std::transform(shape.begin(), shape.end(),
std::back_inserter(t_shape),
[](int s) -> dataset::dsize_t {return static_cast<dataset::dsize_t>(s);});
dataset::Tensor::CreateEmpty(dataset::TensorShape(t_shape), MSTypeToDEType(data_type), &this->tensor_impl_);
std::vector<dataset::dsize_t> t_shape;
t_shape.reserve(shape.size());
std::transform(shape.begin(), shape.end(), std::back_inserter(t_shape),
[](int s) -> dataset::dsize_t { return static_cast<dataset::dsize_t>(s); });
dataset::Tensor::CreateEmpty(dataset::TensorShape(t_shape), MSTypeToDEType(data_type), &this->tensor_impl_);
}
DETensor::DETensor(std::shared_ptr<dataset::Tensor> tensor_ptr) { this->tensor_impl_ = std::move(tensor_ptr); }
MSTensor *DETensor::ConvertToLiteTensor() {
// static MSTensor::CreateTensor is only for the LiteTensor
MSTensor *tensor = MSTensor::CreateTensor(this->data_type(), this->shape());
MS_ASSERT(tensor->Size() == this->Size());
memcpy_s(tensor->MutableData(), tensor->Size(), this->MutableData(), this->Size());
return tensor;
// static MSTensor::CreateTensor is only for the LiteTensor
MSTensor *tensor = MSTensor::CreateTensor(this->data_type(), this->shape());
MS_ASSERT(tensor->Size() == this->Size());
memcpy_s(tensor->MutableData(), tensor->Size(), this->MutableData(), this->Size());
return tensor;
}
std::shared_ptr<dataset::Tensor> DETensor::tensor() const {
MS_ASSERT(this->tensor_impl_ != nullptr);
return this->tensor_impl_;
MS_ASSERT(this->tensor_impl_ != nullptr);
return this->tensor_impl_;
}
TypeId DETensor::data_type() const {
MS_ASSERT(this->tensor_impl_ != nullptr);
return DETypeToMSType(this->tensor_impl_->type());
MS_ASSERT(this->tensor_impl_ != nullptr);
return DETypeToMSType(this->tensor_impl_->type());
}
TypeId DETensor::set_data_type(TypeId data_type) {
MS_ASSERT(this->tensor_impl_ != nullptr);
if (data_type != this->data_type()) {
std::shared_ptr<dataset::Tensor> temp;
dataset::Tensor::CreateFromMemory(this->tensor_impl_->shape(), MSTypeToDEType(data_type),
this->tensor_impl_->GetBuffer(), &temp);
this->tensor_impl_ = temp;
}
return data_type;
MS_ASSERT(this->tensor_impl_ != nullptr);
if (data_type != this->data_type()) {
std::shared_ptr<dataset::Tensor> temp;
dataset::Tensor::CreateFromMemory(this->tensor_impl_->shape(), MSTypeToDEType(data_type),
this->tensor_impl_->GetBuffer(), &temp);
this->tensor_impl_ = temp;
}
return data_type;
}
std::vector<int> DETensor::shape() const {
MS_ASSERT(this->tensor_impl_ != nullptr);
std::vector<dataset::dsize_t> t_shape = this->tensor_impl_->shape().AsVector();
std::vector<int> shape;
shape.reserve(t_shape.size());
std::transform(t_shape.begin(), t_shape.end(),
std::back_inserter(shape),
[](dataset::dsize_t s) -> int {return static_cast<int>(s);});
return shape;
MS_ASSERT(this->tensor_impl_ != nullptr);
std::vector<dataset::dsize_t> t_shape = this->tensor_impl_->shape().AsVector();
std::vector<int> shape;
shape.reserve(t_shape.size());
std::transform(t_shape.begin(), t_shape.end(), std::back_inserter(shape),
[](dataset::dsize_t s) -> int { return static_cast<int>(s); });
return shape;
}
size_t DETensor::set_shape(const std::vector<int> &shape) {
MS_ASSERT(this->tensor_impl_ != nullptr);
std::vector<dataset::dsize_t> t_shape;
t_shape.reserve(shape.size());
std::transform(shape.begin(), shape.end(),
std::back_inserter(t_shape),
[](int s) -> dataset::dsize_t {return static_cast<dataset::dsize_t>(s);});
dataset::Status rc = this->tensor_impl_->Reshape(dataset::TensorShape(t_shape));
return shape.size();
MS_ASSERT(this->tensor_impl_ != nullptr);
std::vector<dataset::dsize_t> t_shape;
t_shape.reserve(shape.size());
std::transform(shape.begin(), shape.end(), std::back_inserter(t_shape),
[](int s) -> dataset::dsize_t { return static_cast<dataset::dsize_t>(s); });
dataset::Status rc = this->tensor_impl_->Reshape(dataset::TensorShape(t_shape));
return shape.size();
}
int DETensor::DimensionSize(size_t index) const {
MS_ASSERT(this->tensor_impl_ != nullptr);
int dim_size = -1;
auto shape = this->shape();
if (index < shape.size()) {
dim_size = shape[index];
} else {
MS_LOG(ERROR) << "Dimension index is wrong: " << index;
}
return dim_size;
MS_ASSERT(this->tensor_impl_ != nullptr);
int dim_size = -1;
auto shape = this->shape();
if (index < shape.size()) {
dim_size = shape[index];
} else {
MS_LOG(ERROR) << "Dimension index is wrong: " << index;
}
return dim_size;
}
int DETensor::ElementsNum() const {
MS_ASSERT(this->tensor_impl_ != nullptr);
return this->tensor_impl_->Size();
MS_ASSERT(this->tensor_impl_ != nullptr);
return this->tensor_impl_->Size();
}
std::size_t DETensor::hash() const {
MS_ASSERT(this->tensor_impl_ != nullptr);
auto shape = this->shape();
std::size_t hash_value = std::hash<int>{}(SizeToInt(this->data_type()));
hash_value = hash_combine(hash_value, std::hash<size_t>{}(shape.size()));
// hash all elements may costly, so only take at most 4 elements into account based on
// some experiments.
for (size_t i = 0; (i < shape.size()) && (i < 4); ++i) {
hash_value = hash_combine(hash_value, (std::hash<int>{}(shape[i])));
}
return hash_value;
MS_ASSERT(this->tensor_impl_ != nullptr);
auto shape = this->shape();
std::size_t hash_value = std::hash<int>{}(SizeToInt(this->data_type()));
hash_value = hash_combine(hash_value, std::hash<size_t>{}(shape.size()));
// hash all elements may costly, so only take at most 4 elements into account based on
// some experiments.
for (size_t i = 0; (i < shape.size()) && (i < 4); ++i) {
hash_value = hash_combine(hash_value, (std::hash<int>{}(shape[i])));
}
return hash_value;
}
size_t DETensor::Size() const {
......
......@@ -49,7 +49,6 @@ std::shared_ptr<tensor::MSTensor> Execute::operator()(std::shared_ptr<tensor::MS
return std::make_shared<tensor::DETensor>(std::move(de_output));
}
} // namespace api
} // namespace dataset
} // namespace mindspore
......@@ -44,7 +44,6 @@ class Execute {
std::shared_ptr<TensorOperation> op_;
};
} // namespace api
} // namespace dataset
} // namespace mindspore
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册