提交 3c08fa63 编写于 作者: M mxm

fixed:

1. delete useless code
2. add const to Parameter which not been changed
3. check return code when call safe function memcpy_s
上级 49da4e79
......@@ -541,6 +541,9 @@ class PConstant : public PBase<PConstant<T> > {
data_out[i] *= data_2[0];
}
} else {
if (in_data_2_size < out_data_size) {
MS_EXCEPTION(ValueError) << "in_data_2_size is smaller than out_data_size.";
}
for (int i = 0; i < out_data_size; i++) {
data_out[i] *= data_2[i];
}
......@@ -595,33 +598,41 @@ class PConstant : public PBase<PConstant<T> > {
return nullptr;
}
void *data_out;
auto new_tensor_ptr = std::make_shared<tensor::Tensor>(tensor_3_type_ptr->type_id(), tensor_out_shape);
size_t mem_size = GetTypeByte(tensor_3_type_ptr) * IntToSize(new_tensor_ptr->ElementsNum());
char *data = reinterpret_cast<char *>(new_tensor_ptr->data_c());
int ret = 0;
void *data_out = nullptr;
if ((tensor_3_type_ptr->type_id() == TypeId::kNumberTypeFloat32) ||
(tensor_3_type_ptr->type_id() == TypeId::kNumberTypeFloat)) {
Multiply<float>(tensor_ptr_1->data_c(), tensor_ptr_1->DataSize(), tensor_ptr_2->data_c(),
tensor_ptr_2->DataSize(), &data_out, data_out_size);
ret = memcpy_s(data, mem_size, data_out, mem_size);
delete[] reinterpret_cast<float *>(data_out);
} else {
if (tensor_3_type_ptr->type_id() == TypeId::kNumberTypeFloat64) {
Multiply<double>(tensor_ptr_1->data_c(), tensor_ptr_1->DataSize(), tensor_ptr_2->data_c(),
tensor_ptr_2->DataSize(), &data_out, data_out_size);
ret = memcpy_s(data, mem_size, data_out, mem_size);
delete[] reinterpret_cast<double *>(data_out);
} else {
if ((tensor_3_type_ptr->type_id() == TypeId::kNumberTypeInt32) ||
(tensor_3_type_ptr->type_id() == TypeId::kNumberTypeInt)) {
Multiply<int>(tensor_ptr_1->data_c(), tensor_ptr_1->DataSize(), tensor_ptr_2->data_c(),
tensor_ptr_2->DataSize(), &data_out, data_out_size);
ret = memcpy_s(data, mem_size, data_out, mem_size);
delete[] reinterpret_cast<int *>(data_out);
} else {
// Un-support data types
return nullptr;
}
}
}
auto new_tensor_ptr = std::make_shared<tensor::Tensor>(tensor_3_type_ptr->type_id(), tensor_out_shape);
size_t mem_size = GetTypeByte(tensor_3_type_ptr) * IntToSize(new_tensor_ptr->ElementsNum());
char *data = reinterpret_cast<char *>(new_tensor_ptr->data_c());
memcpy(data, data_out, mem_size);
if (ret != 0) {
MS_LOG(EXCEPTION) << "memcpy_s error, errorno " << ret << ", source size " << mem_size << "dest size"
<< new_tensor_ptr->DataSize();
}
auto new_vnode = NewValueNode(new_tensor_ptr);
new_vnode->set_abstract(new_tensor_ptr->ToAbstract());
return new_vnode;
......
......@@ -125,6 +125,7 @@ template <typename T>
class TensorDataImpl : public TensorData {
public:
explicit TensorDataImpl(const std::vector<int> &shape) : ndim_(shape.size()), data_size_(SizeOf(shape)) {}
~TensorDataImpl() = default;
TensorDataImpl(const std::vector<int> &shape, void *data, size_t data_len)
: ndim_(shape.size()), data_size_(SizeOf(shape)), data_(CopyData<T>(shape, data, data_len)) {}
......@@ -288,7 +289,7 @@ class TensorDataImpl : public TensorData {
};
template <typename... Args>
TensorDataPtr MakeTensorData(TypeId data_type, const std::vector<int> &shape, Args... args) {
TensorDataPtr MakeTensorData(TypeId data_type, const std::vector<int> &shape, const Args... args) {
switch (data_type) {
case kNumberTypeBool:
case kNumberTypeUInt8:
......
......@@ -99,14 +99,14 @@ void DFunctor::BackPropagateFv(const AnfNodePtr &fv, const AnfNodePtr &din) {
fv_adjoint = anfnode_to_adjoin_indirect_fv_.find(fv);
}
}
auto key = tape_->NewCNode({NewValueNode(prim::kPrimEmbed), fv_adjoint->second->k()});
fv_adjoint->second->RegisterKUser(key, 1);
auto node = tape_->NewCNode({NewValueNode(prim::kPrimEmbed), fv_adjoint->second->k()});
fv_adjoint->second->RegisterKUser(node, 1);
auto default_val = tape_->NewCNode({NewValueNode(prim::GetPythonOps("zeros_like")), fv_adjoint->second->k()});
fv_adjoint->second->RegisterKUser(default_val, 1);
auto dfv = tape_->NewCNode({NewValueNode(prim::kPrimEnvGetItem), din, key, default_val});
auto dfv = tape_->NewCNode({NewValueNode(prim::kPrimEnvGetItem), din, node, default_val});
MS_LOG(DEBUG) << "BackPropagateFv find adjoint in anfnode_to_adjoin_ or anfnode_to_adjoin_indirect_fv_ fv "
<< fv->func_graph()->ToString() << " " << fv->ToString() << ".";
MS_LOG(DEBUG) << "BackPropagateFv get item from " << din->ToString() << " key " << key->ToString() << ".";
MS_LOG(DEBUG) << "BackPropagateFv get item from " << din->ToString() << " key " << node->ToString() << ".";
fv_adjoint->second->AccumulateDout(dfv);
}
......@@ -279,13 +279,13 @@ AnfNodePtr DFunctor::AttachFvDoutToTape(const AnfNodePtr &grad_fv) {
if (fv_adjoint == anfnode_to_adjoin_.end()) {
MS_LOG(EXCEPTION) << "AttachFvDoutToTape fv adjoint does not exist " << fv->ToString() << ".";
}
auto key = tape_->NewCNode({NewValueNode(prim::kPrimEmbed), fv_adjoint->second->k()});
fv_adjoint->second->RegisterKUser(key, 1);
auto node = tape_->NewCNode({NewValueNode(prim::kPrimEmbed), fv_adjoint->second->k()});
fv_adjoint->second->RegisterKUser(node, 1);
auto sens = fv_adjoint->second->dout();
new_grad_fv = tape_->NewCNode({
NewValueNode(prim::kPrimEnvSetItem),
new_grad_fv,
key,
node,
sens,
});
fv_adjoint->second->RegisterDoutUser(new_grad_fv->cast<CNodePtr>(), 3);
......@@ -301,13 +301,13 @@ AnfNodePtr DFunctor::AttachIndirectFvDoutToTape(const AnfNodePtr &grad_fv) {
for (auto &fv_adjoint : anfnode_to_adjoin_indirect_fv_) {
MS_LOG(DEBUG) << "AttachIndirectFvDoutToTape backprop indirect fv " << fv_adjoint.first->ToString() << " "
<< primal_graph_->ToString() << ".";
auto key = tape_->NewCNode({NewValueNode(prim::kPrimEmbed), fv_adjoint.second->k()});
fv_adjoint.second->RegisterKUser(key, 1);
auto node = tape_->NewCNode({NewValueNode(prim::kPrimEmbed), fv_adjoint.second->k()});
fv_adjoint.second->RegisterKUser(node, 1);
auto sens = fv_adjoint.second->dout();
new_grad_fv = tape_->NewCNode({
NewValueNode(prim::kPrimEnvSetItem),
new_grad_fv,
key,
node,
sens,
});
fv_adjoint.second->RegisterDoutUser(new_grad_fv->cast<CNodePtr>(), 3);
......
......@@ -60,6 +60,9 @@ int AnfConverter::ValidateFileStr(const std::string &modelFile, std::string file
bool AnfConverter::ReadOnnxFromBinary(const std::string &modelFile, google::protobuf::Message *onnx_model) {
std::unique_ptr<char> onnx_file(new (std::nothrow) char[PATH_MAX]{0});
int fd = open(onnx_file.get(), O_RDONLY);
if (fd < 0) {
MS_LOG(EXCEPTION) << "failed to open file";
}
google::protobuf::io::FileInputStream input(fd);
google::protobuf::io::CodedInputStream code_input(&input);
code_input.SetTotalBytesLimit(INT_MAX, 536870912);
......@@ -85,7 +88,7 @@ std::shared_ptr<FuncGraph> AnfConverter::RunAnfConverter(const std::string &file
MS_LOG(ERROR) << "Trans data not support input format!";
} else {
modelFile = flagItem.substr(pos + 1);
std::cout << "input protobuf file path is: " << flagItem.substr(pos + 1) << std::endl;
std::cout << "input protobuf file path is: " << modelFile << std::endl;
}
if (ValidateFileStr(modelFile, ".pb") != 0) {
......
......@@ -119,7 +119,10 @@ bool MSANFModelParser::BuildParameterForFuncGraph(const ParameterPtr &node, cons
std::string initial_data = initialize_proto.raw_data();
auto *tensor_data_buf = reinterpret_cast<uint8_t *>(tensor_info->data_c());
MS_EXCEPTION_IF_NULL(tensor_data_buf);
memcpy_s(tensor_data_buf, tensor_info->data().nbytes(), initial_data.data(), initial_data.size());
auto ret = memcpy_s(tensor_data_buf, tensor_info->data().nbytes(), initial_data.data(), initial_data.size());
if (ret != 0) {
MS_LOG(EXCEPTION) << "memcpy_s error, errorno" << ret;
}
auto param_value = std::make_shared<ParamValue>();
MS_EXCEPTION_IF_NULL(param_value);
......@@ -249,7 +252,11 @@ bool MSANFModelParser::ObtainValueNodeInTensorForm(const std::string &value_node
tensor::TensorPtr tensor_info = std::make_shared<tensor::Tensor>(kDefaultValueSwitchMap[attr_tensor_type], shape);
const std::string &tensor_buf = attr_tensor.raw_data();
auto *tensor_data_buf = reinterpret_cast<uint8_t *>(tensor_info->data_c());
memcpy_s(tensor_data_buf, tensor_info->data().nbytes(), tensor_buf.data(), tensor_buf.size());
auto ret = memcpy_s(tensor_data_buf, tensor_info->data().nbytes(), tensor_buf.data(), tensor_buf.size());
if (ret != 0) {
MS_LOG(EXCEPTION) << "memcpy_s error, errorno" << ret;
}
auto new_value_node = NewValueNode(MakeValue(tensor_info));
MS_EXCEPTION_IF_NULL(new_value_node);
auto tensor_abstract = tensor_info->ToAbstract();
......@@ -336,7 +343,6 @@ bool MSANFModelParser::GetAttrValueForValueNode(const std::string &ref_attr_name
MS_LOG(ERROR) << "parse ValueNode value don't support input of ref_attr_name";
return false;
}
return true;
}
bool MSANFModelParser::BuildValueNodeForFuncGraph(const onnx::NodeProto &node_proto) {
......
......@@ -32,7 +32,7 @@ using uint64 = uint64_t;
using float16 = Eigen::half;
class MSANFModelParser {
public:
MSANFModelParser() = default;
MSANFModelParser() : producer_name_(""), model_version_(0), ir_version_(0) {}
~MSANFModelParser() = default;
FuncGraphPtr Parse(const onnx::ModelProto &model_proto);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册