提交 fdb47860 编写于 作者: L leonwanghui

Fix some typo errors in session and device module

Signed-off-by: Nleonwanghui <leon.wanghui@huawei.com>
上级 2aae218f
......@@ -66,7 +66,7 @@ void CPUKernelRuntime::AssignValueNodeAddress(session::KernelGraph *kernel_graph
address->ptr_ = resource_manager_.MemMalloc(tensor_size);
if (!address->SyncHostToDevice(data_shape, LongToSize(tensor->data().nbytes()), tensor->data_type(),
tensor->data_c(false))) {
MS_LOG(EXCEPTION) << "value node sync host to device failed!";
MS_LOG(EXCEPTION) << "Value node sync host to device failed!";
}
}
address->ref_count_ = INIT_NODE_REF;
......@@ -141,7 +141,7 @@ BaseRef CPUKernelRuntime::CreatTensorForOutput(const AnfNodePtr &input_node, siz
MS_EXCEPTION_IF_NULL(node);
size_t output_size = AnfAlgo::GetOutputTensorNum(node);
if (index >= output_size) {
MS_LOG(EXCEPTION) << "invalid input index " << index;
MS_LOG(EXCEPTION) << "Invalid input index " << index;
}
auto address = AnfAlgo::GetMutableOutputAddr(node, index);
MS_EXCEPTION_IF_NULL(address);
......@@ -157,7 +157,7 @@ BaseRef CPUKernelRuntime::CreatTensorForOutput(const AnfNodePtr &input_node, siz
type_id = kNumberTypeFloat32;
}
if (type_id != kNumberTypeInt32 && type_id != kNumberTypeFloat32) {
MS_LOG(EXCEPTION) << "check output type failed.";
MS_LOG(EXCEPTION) << "Check output type failed.";
}
tensor::TensorPtr tensor = std::make_shared<tensor::Tensor>(type_id, temp_shape);
MS_EXCEPTION_IF_NULL(tensor);
......@@ -181,7 +181,7 @@ void CPUKernelRuntime::BindInputOutput(const session::KernelGraph *kernel_graph,
// bind input ptr
auto &input_nodes = kernel_graph->inputs();
if (input_nodes.size() != inputs.size()) {
MS_LOG(EXCEPTION) << "input size not equal to input node size!";
MS_LOG(EXCEPTION) << "Input size not equal to input node size!";
}
std::unordered_map<AnfNode *, tensor::TensorPtr> input_map;
......@@ -203,7 +203,7 @@ void CPUKernelRuntime::BindInputOutput(const session::KernelGraph *kernel_graph,
address->ptr_ = resource_manager_.MemMalloc(tensor_size);
if (!address->SyncHostToDevice(data_shape, LongToSize(tensor->data().nbytes()), tensor->data_type(),
tensor->data_c(false))) {
MS_LOG(EXCEPTION) << "parameter node sync host to device failed!";
MS_LOG(EXCEPTION) << "Parameter node sync host to device failed!";
}
tensor->set_dirty(true);
}
......
......@@ -44,7 +44,7 @@ void CPUResourceManager::MemPlan(const session::KernelGraph *graph) {
mem_size_ = graph_mem_size;
dynamic_malloc_ = false;
} else {
MS_LOG(INFO) << "switch to dynamic malloc";
MS_LOG(INFO) << "Switch to dynamic malloc";
dynamic_malloc_ = true;
}
}
......@@ -63,7 +63,7 @@ void *CPUResourceManager::MemMalloc(size_t mem_size) {
dynamic_mem_[ptr] = mem_size;
return ptr;
} else {
MS_LOG(EXCEPTION) << "malloc memory failed: size " << mem_size;
MS_LOG(EXCEPTION) << "Malloc memory failed: size " << mem_size;
}
}
......
......@@ -31,12 +31,12 @@ GraphId CPUSession::CompileGraph(const AnfNodePtrList &lst, const AnfNodePtrList
auto graph_id = graph_sum_;
auto graph = ConstructKernelGraph(lst, outputs);
MS_EXCEPTION_IF_NULL(graph);
MS_LOG(INFO) << "set kernel info";
MS_LOG(INFO) << "Set kernel info";
SetKernelInfo(graph.get());
predictmodel::StepConvertGraph(graph);
MS_LOG(INFO) << "build kernel";
MS_LOG(INFO) << "Build kernel";
BuildKernel(graph.get());
MS_LOG(INFO) << "assign kernel address";
MS_LOG(INFO) << "Assign kernel address";
runtime_.AssignKernelAddress(graph.get());
return graph_id;
}
......@@ -44,18 +44,18 @@ GraphId CPUSession::CompileGraph(const AnfNodePtrList &lst, const AnfNodePtrList
void CPUSession::RunGraph(const GraphId &graph_id, const std::vector<tensor::TensorPtr> &inputs, VectorRef *outputs) {
auto &kernel_graph = graphs_[graph_id];
MS_EXCEPTION_IF_NULL(kernel_graph);
MS_LOG(INFO) << "bind input output address";
MS_LOG(INFO) << "Bind input output address";
runtime_.BindInputOutput(kernel_graph.get(), inputs, outputs);
MS_LOG(INFO) << "run graph start";
MS_LOG(INFO) << "Run graph start";
predictmodel::StepConvertWeight(inputs);
auto execution_order = kernel_graph->execution_order();
Reorder(&execution_order);
kernel_graph->set_execution_order(execution_order);
bool ret = runtime_.Run(kernel_graph.get());
if (!ret) {
MS_LOG(EXCEPTION) << "run graph failed";
MS_LOG(EXCEPTION) << "Run graph failed";
}
MS_LOG(INFO) << "run graph end";
MS_LOG(INFO) << "Run graph end";
}
void CPUSession::SetKernelInfo(const KernelGraph *kernel_graph) {
......
......@@ -49,7 +49,7 @@ void KernelAdjust::Reorder(const std::shared_ptr<session::KernelGraph> &kernel_g
std::vector<CNodePtr> momentum_list;
std::vector<CNodePtr> other_list;
for (const auto &cnode : origin_cnode_list) {
if (kOptOpeatorSet.find(AnfAlgo::GetCNodeName(cnode)) != kOptOpeatorSet.end()) {
if (kOptOperatorSet.find(AnfAlgo::GetCNodeName(cnode)) != kOptOperatorSet.end()) {
momentum_list.emplace_back(cnode);
} else {
other_list.emplace_back(cnode);
......@@ -118,7 +118,7 @@ void KernelAdjust::CreateSwitchOpParameters(const std::shared_ptr<session::Kerne
MS_EXCEPTION_IF_NULL(tensor_ptr);
mindspore::abstract::AbstractBasePtr paremeter_abstract_ptr = tensor_ptr->ToAbstract();
if (paremeter_abstract_ptr == nullptr) {
MS_LOG(EXCEPTION) << "create abstract brfore insert switch op failed!";
MS_LOG(EXCEPTION) << "create abstract before insert switch op failed!";
}
ParameterPtr loop_count = std::make_shared<Parameter>(kernel_graph_ptr);
......@@ -371,7 +371,7 @@ bool KernelAdjust::StepLoadCtrlInputs(const std::shared_ptr<session::Context> &c
auto tensor = inputs[i];
size_t deal_index = input_nodes.size() - input_ctrl_size + i;
if (deal_index >= input_nodes.size()) {
MS_LOG(EXCEPTION) << "deak_index[" << deal_index << "] outof range";
MS_LOG(EXCEPTION) << "deal_index[" << deal_index << "] out of range";
}
auto input_node = input_nodes[deal_index];
bool need_sync = false;
......@@ -439,7 +439,7 @@ void KernelAdjust::LoadSwitchInputs(std::vector<tensor::TensorPtr> *inputs) {
void KernelAdjust::Profiling(const std::shared_ptr<session::KernelGraph> &kernel_graph_ptr) {
if (!ascend::ProfilingManager::GetInstance().IsProfiling()) {
MS_LOG(INFO) << "no need to profiling";
MS_LOG(INFO) << "No need to profiling";
return;
}
ProfilingTraceInfo profiling_trace_info;
......@@ -452,10 +452,10 @@ void KernelAdjust::Profiling(const std::shared_ptr<session::KernelGraph> &kernel
void KernelAdjust::InsertProfilingKernel(const std::shared_ptr<session::KernelGraph> &kernel_graph_ptr,
const ProfilingTraceInfo &profiling_trace_info) {
MS_LOG(INFO) << "[profiling] insert profiling kernel start";
MS_LOG(INFO) << "[profiling] Insert profiling kernel start";
MS_EXCEPTION_IF_NULL(kernel_graph_ptr);
if (!profiling_trace_info.IsValid()) {
MS_LOG(WARNING) << "profiling trace point not found";
MS_LOG(WARNING) << "Profiling trace point not found";
return;
}
std::vector<CNodePtr> new_cnode_list;
......
......@@ -241,7 +241,7 @@ void KernelRuntime::AssignStaticMemoryInput(const session::KernelGraph *graph) {
auto output_size = AnfAlgo::GetOutputTensorNum(item);
for (size_t index = 0; index < output_size; index++) {
TypeId output_type_id = AnfAlgo::GetOutputDeviceDataType(item, index);
// if graph output is a weight and doesn't link to any cnode,it's data type will be unkonwn
// if graph output is a weight and doesn't link to any cnode, it's data type will be unknown
if (output_type_id == kTypeUnknown) {
MS_LOG(WARNING) << "It is not suggested to use a lonely weight parameter as the output of graph";
output_type_id = AnfAlgo::GetOutputInferDataType(item, index);
......@@ -372,7 +372,7 @@ void KernelRuntime::AssignNodeOutputMem(int flag, const AnfNodePtr &node, int in
continue;
}
if (AnfAlgo::OutputAddrExist(node, i)) {
MS_LOG(INFO) << "already malloc index:" << i;
MS_LOG(INFO) << "Already malloc index:" << i;
continue;
}
auto ptr = CalDeviceMem(node, output_sizes[i], flag, i);
......@@ -392,7 +392,7 @@ void KernelRuntime::AssignValueNodeTensor(const ValueNodePtr &value_node, const
MS_EXCEPTION_IF_NULL(node_value);
auto tensor = node_value->cast<TensorPtr>();
if (tensor == nullptr) {
MS_LOG(WARNING) << "tensor is null";
MS_LOG(WARNING) << "Tensor is null";
return;
}
size_t tensor_size = tensor->data().nbytes();
......@@ -595,7 +595,7 @@ void KernelRuntime::GenLaunchArgs(const mindspore::kernel::KernelMod &kernel_mod
void KernelRuntime::GenAddrCleanLaunchArgs(const CNodePtr &cnode, AddressPtrList *kernel_inputs) {
if (cnode->inputs().size() != 2) {
MS_LOG(EXCEPTION) << "atomic Addr clean Node Input nodes not equal 2.";
MS_LOG(EXCEPTION) << "Atomic Addr clean Node Input nodes not equal 2.";
}
auto pre_node = cnode->inputs()[1];
// set clean output address
......@@ -721,11 +721,11 @@ uint8_t *KernelRuntime::MallocDynamicMem(size_t size, bool communication_mem) {
bool KernelRuntime::LaunchKernel(const session::KernelGraph *graph) {
MS_EXCEPTION_IF_NULL(graph);
if (!LaunchKernelMod(*graph)) {
MS_LOG(ERROR) << "LaunchKernelMod failed.";
MS_LOG(ERROR) << "LaunchKernelMod failed!";
return false;
}
if (!SyncStream()) {
MS_LOG(ERROR) << "SyncStream failed.";
MS_LOG(ERROR) << "SyncStream failed!";
return false;
}
return true;
......
......@@ -67,7 +67,7 @@ KernelRuntime *KernelRuntimeManager::GetKernelRuntime(const std::string &device_
MS_EXCEPTION_IF_NULL(kernel_runtime);
runtime_map_[runtime_key] = kernel_runtime;
} else {
MS_LOG(EXCEPTION) << "no kernel runtime creator for " << device_name << " with device id " << device_id;
MS_LOG(EXCEPTION) << "No kernel runtime creator for " << device_name << " with device id " << device_id;
}
return kernel_runtime.get();
......
......@@ -65,7 +65,7 @@ KernelWithIndex AnfRuntimeAlgorithm::VisitKernel(const AnfNodePtr &anf_node, siz
return VisitKernel(node, 0);
} else if (IsPrimitive(input0, prim::kPrimTupleGetItem)) {
if (cnode->inputs().size() != kTupleGetItemInputSize) {
MS_LOG(EXCEPTION) << "the node tuple_get_item must have 2 inputs!";
MS_LOG(EXCEPTION) << "The node tuple_get_item must have 2 inputs!";
}
auto input2 = cnode->input(kInputNodeOutputIndexInTupleGetItem);
MS_EXCEPTION_IF_NULL(input2);
......@@ -102,7 +102,7 @@ KernelWithIndex AnfRuntimeAlgorithm::VisitKernelWithReturnType(const AnfNodePtr
MS_EXCEPTION_IF_NULL(input0);
if (IsPrimitive(input0, prim::kPrimTupleGetItem)) {
if (cnode->inputs().size() != kTupleGetItemInputSize) {
MS_LOG(EXCEPTION) << "the node tuple_get_item must have 2 inputs!";
MS_LOG(EXCEPTION) << "The node tuple_get_item must have 2 inputs!";
}
auto input2 = cnode->input(kInputNodeOutputIndexInTupleGetItem);
MS_EXCEPTION_IF_NULL(input2);
......@@ -188,7 +188,7 @@ std::string AnfRuntimeAlgorithm::GetNodeDebugString(const AnfNodePtr &node) {
void AnfRuntimeAlgorithm::SetNodeAttr(const std::string &key, const ValuePtr &value, const AnfNodePtr &node) {
MS_EXCEPTION_IF_NULL(node);
if (!node->isa<CNode>()) {
MS_LOG(EXCEPTION) << "only cnode has attr,but this anf is " << node->DebugString();
MS_LOG(EXCEPTION) << "Only cnode has attr, but this anf is " << node->DebugString();
}
auto primitive = AnfAlgo::GetCNodePrimitive(node);
MS_EXCEPTION_IF_NULL(primitive);
......@@ -204,7 +204,7 @@ void AnfRuntimeAlgorithm::CopyNodeAttr(const std::string &old_key, const std::st
MS_EXCEPTION_IF_NULL(from);
MS_EXCEPTION_IF_NULL(to);
if (!from->isa<CNode>() || !to->isa<CNode>()) {
MS_LOG(EXCEPTION) << "only cnode has attr,but this from_anf is " << from->DebugString() << " ,to_node is "
MS_LOG(EXCEPTION) << "Only cnode has attr, but this from_anf is " << from->DebugString() << " ,to_node is "
<< to->DebugString();
}
auto from_primitive = AnfAlgo::GetCNodePrimitive(from);
......@@ -218,7 +218,7 @@ void AnfRuntimeAlgorithm::CopyNodeAttrs(const AnfNodePtr &from, const AnfNodePtr
MS_EXCEPTION_IF_NULL(from);
MS_EXCEPTION_IF_NULL(to);
if (!from->isa<CNode>() || !to->isa<CNode>()) {
MS_LOG(EXCEPTION) << "only cnode has attr,but this from_anf is " << from->DebugString() << ",to_node is "
MS_LOG(EXCEPTION) << "Only cnode has attr, but this from_anf is " << from->DebugString() << ",to_node is "
<< from->DebugString();
}
auto from_primitive = AnfAlgo::GetCNodePrimitive(from);
......@@ -231,7 +231,7 @@ void AnfRuntimeAlgorithm::CopyNodeAttrs(const AnfNodePtr &from, const AnfNodePtr
void AnfRuntimeAlgorithm::EraseNodeAttr(const std::string &key, const AnfNodePtr node) {
MS_EXCEPTION_IF_NULL(node);
if (!node->isa<CNode>()) {
MS_LOG(EXCEPTION) << "only cnode has attr,but this anf is " << node->DebugString();
MS_LOG(EXCEPTION) << "Only cnode has attr, but this anf is " << node->DebugString();
}
auto primitive = AnfAlgo::GetCNodePrimitive(node);
MS_EXCEPTION_IF_NULL(primitive);
......@@ -241,7 +241,7 @@ void AnfRuntimeAlgorithm::EraseNodeAttr(const std::string &key, const AnfNodePtr
bool AnfRuntimeAlgorithm::HasNodeAttr(const std::string &key, const AnfNodePtr &node) {
MS_EXCEPTION_IF_NULL(node);
if (!node->isa<CNode>()) {
MS_LOG(WARNING) << "only cnode has attr,but this anf is " << node->DebugString();
MS_LOG(WARNING) << "Only cnode has attr, but this anf is " << node->DebugString();
return false;
}
auto primitive = AnfAlgo::GetCNodePrimitive(node);
......@@ -252,7 +252,7 @@ bool AnfRuntimeAlgorithm::HasNodeAttr(const std::string &key, const AnfNodePtr &
size_t AnfRuntimeAlgorithm::GetInputTensorNum(const AnfNodePtr &node) {
MS_EXCEPTION_IF_NULL(node);
if (!node->isa<CNode>()) {
MS_LOG(EXCEPTION) << "only cnode has real input,but this anf is " << node->DebugString();
MS_LOG(EXCEPTION) << "Only cnode has real input, but this anf is " << node->DebugString();
}
auto cnode = node->cast<CNodePtr>();
MS_EXCEPTION_IF_NULL(cnode);
......@@ -404,7 +404,7 @@ std::vector<kernel::Axis> AnfRuntimeAlgorithm::GetInputReshapeType(const AnfNode
MS_EXCEPTION_IF_NULL(build_info);
std::vector<kernel::Axis> result;
if (!build_info->GetInputReshapeType(input_idx, &result)) {
MS_LOG(EXCEPTION) << "filed to ge the node's[ " << node->DebugString() << "] reshape type !";
MS_LOG(EXCEPTION) << "Failed to get the node's[ " << node->DebugString() << "] reshape type !";
}
return result;
}
......@@ -417,7 +417,7 @@ std::vector<kernel::Axis> AnfRuntimeAlgorithm::GetOutputReshapeType(const AnfNod
MS_EXCEPTION_IF_NULL(build_info);
std::vector<kernel::Axis> result;
if (!build_info->GetOutputReshapeType(output_idx, &result)) {
MS_LOG(EXCEPTION) << "filed to ge the node's[ " << node->DebugString() << "] reshape type !";
MS_LOG(EXCEPTION) << "Failed to get the node's[ " << node->DebugString() << "] reshape type !";
}
return result;
}
......@@ -593,7 +593,7 @@ void AnfRuntimeAlgorithm::SetOutputAddr(const DeviceAddressPtr &addr, size_t out
auto kernel_info = node->kernel_info();
MS_EXCEPTION_IF_NULL(kernel_info);
if (!kernel_info->SetOutputAddr(addr, output_idx)) {
MS_LOG(EXCEPTION) << "node " << node->DebugString() << "set adr" << output_idx << " fail";
MS_LOG(EXCEPTION) << "Node " << node->DebugString() << "set adr" << output_idx << " fail";
}
}
......@@ -603,7 +603,7 @@ void AnfRuntimeAlgorithm::SetWorkspaceAddr(const DeviceAddressPtr &addr, size_t
auto kernel_info = node->kernel_info();
MS_EXCEPTION_IF_NULL(kernel_info);
if (!kernel_info->SetWorkspaceAddr(addr, output_idx)) {
MS_LOG(EXCEPTION) << "node " << node->DebugString() << "set adr" << output_idx << " fail";
MS_LOG(EXCEPTION) << "Node " << node->DebugString() << "set adr" << output_idx << " fail";
}
}
......@@ -614,7 +614,7 @@ DeviceAddress *AnfRuntimeAlgorithm::GetWorkspaceAddr(const AnfNodePtr &node, siz
MS_EXCEPTION_IF_NULL(kernel_info);
auto addr = kernel_info->GetWorkspaceAddr(output_idx);
if (addr == nullptr) {
MS_LOG(EXCEPTION) << "output_idx " << output_idx << " of node " << node->DebugString()
MS_LOG(EXCEPTION) << "Output_idx " << output_idx << " of node " << node->DebugString()
<< "] workspace addr is not exist";
}
return addr;
......@@ -625,7 +625,7 @@ void AnfRuntimeAlgorithm::SetOutputInferTypeAndShape(const std::vector<TypeId> &
const std::vector<std::vector<size_t>> &shapes, AnfNode *node) {
MS_EXCEPTION_IF_NULL(node);
if (types.size() != shapes.size()) {
MS_LOG(EXCEPTION) << "types size " << types.size() << "should be same with shapes size " << shapes.size();
MS_LOG(EXCEPTION) << "Types size " << types.size() << "should be same with shapes size " << shapes.size();
}
if (shapes.empty()) {
MS_LOG(EXCEPTION) << "Illegal empty output_types_shapes";
......@@ -636,7 +636,7 @@ void AnfRuntimeAlgorithm::SetOutputInferTypeAndShape(const std::vector<TypeId> &
auto abstract = std::make_shared<AbstractTensor>(TypeIdToType(types[0]), shape_int);
node->set_abstract(abstract);
} else {
// mutiple output handle
// multiple output handle
std::vector<AbstractBasePtr> abstract_list;
for (size_t i = 0; i < types.size(); ++i) {
std::vector<int> shape_int;
......@@ -647,12 +647,12 @@ void AnfRuntimeAlgorithm::SetOutputInferTypeAndShape(const std::vector<TypeId> &
node->set_abstract(abstract_tuple);
}
}
// copy a abstract of a node to another node
// copy an abstract of a node to another node
void AnfRuntimeAlgorithm::CopyAbstract(const AnfNodePtr &from_node, AnfNode *to_node) {
to_node->set_abstract(from_node->abstract());
}
// get KernelBuildType of node ,such as ATT,RT,FWK and so on
// get KernelBuildType of node, such as ATT,RT,FWK and so on
KernelType AnfRuntimeAlgorithm::GetKernelType(const AnfNodePtr &node) {
MS_EXCEPTION_IF_NULL(node);
auto kernel_info = node->kernel_info();
......@@ -846,7 +846,7 @@ size_t AnfRuntimeAlgorithm::GetRealInputIndex(const mindspore::AnfNodePtr &anf_n
auto find = spec_node_list.find(node_name);
if (find != spec_node_list.end()) {
ret = find->second[cur_index];
MS_LOG(INFO) << "real input index change to" << ret << ", node name:" << node_name;
MS_LOG(INFO) << "Real input index change to" << ret << ", node name:" << node_name;
}
}
return ret;
......
......@@ -61,7 +61,7 @@ class AnfRuntimeAlgorithm {
MS_EXCEPTION_IF_NULL(node);
if (!node->isa<CNode>()) {
std::string node_debug_log = node->DebugString();
MS_LOG(EXCEPTION) << "only cnode has attr,but this anf is " << node_debug_log.c_str();
MS_LOG(EXCEPTION) << "Only cnode has attr, but this anf is " << node_debug_log.c_str();
}
auto primitive = GetCNodePrimitive(node);
MS_EXCEPTION_IF_NULL(primitive);
......@@ -105,7 +105,7 @@ class AnfRuntimeAlgorithm {
static TypeId GetOutputInferDataType(const AnfNodePtr &node, size_t output_idx);
// get output original data type from prev node,input_index is the input index of current node related to prev node
static TypeId GetPrevNodeOutputInferDataType(const AnfNodePtr &node, size_t input_idx);
// get output select data typpe of anf node
// get output select data type of anf node
static TypeId GetOutputDeviceDataType(const AnfNodePtr &node, size_t output_idx);
// get input select data type of anf node
static TypeId GetInputDeviceDataType(const AnfNodePtr &node, size_t input_idx);
......
......@@ -48,13 +48,14 @@ class AscendSession : public SessionBasic {
GraphId SetFinalGraphInput(const std::vector<AnfNodePtr> &args) override;
// set output of final graph
void SetFinalGraphOutput(const BaseRef &output) override;
// insert switch and set the relative acitve ops
// insert switch and set the relative active ops
void SwitchCompile(GraphId cond_g, GraphId true_g, GraphId false_g) override;
// set args of child graph.the arg maybe come from a output of other child graphs,or from final graph's parameter
// set args of child graph. the arg maybe come from a output of other child graphs,
// or from final graph's parameter
void SetChildGraphInput(GraphId g, const VectorRef &args) override;
// get graph id in child graphs by ME front anf node pointer
GraphId GetGraphIdByNode(const AnfNodePtr &front_anf) const override;
// get grpah id of final graph
// get graph id of final graph
GraphId GetFinalRunGraph() const override { return final_graph_id_; }
// insert active to graph
void SetActive(GraphId, GraphId) override;
......@@ -112,9 +113,9 @@ class AscendSession : public SessionBasic {
// key is final_graph_id,value is the graph types of child graphs
std::unordered_map<GraphId, std::vector<GraphType>> graph_order_types_;
// record condition graph of while
std::unordered_map<GraphId, GraphId> while_condtion_graphs_;
// record all conditons
std::unordered_map<GraphId, std::pair<GraphId, GraphId>> switchs_;
std::unordered_map<GraphId, GraphId> while_condition_graphs_;
// record all conditions
std::unordered_map<GraphId, std::pair<GraphId, GraphId>> switches_;
// final_graph_id is used in every root graph has it's own session situation
GraphId final_graph_id_;
};
......
......@@ -83,7 +83,7 @@ void GPUSession::Execute(const std::shared_ptr<KernelGraph> &kernel_graph) const
}
GraphId GPUSession::CompileGraph(const AnfNodePtrList &lst, const AnfNodePtrList &outputs) {
// Construct graph, if construct successs, graph_sum_ + 1
// Construct graph, if successfully, graph_sum_ + 1
auto graph_id = graph_sum_;
auto graph = ConstructKernelGraph(lst, outputs);
// Select kernel build info
......@@ -100,7 +100,7 @@ GraphId GPUSession::CompileGraph(const AnfNodePtrList &lst, const AnfNodePtrList
auto execution_order = graph->execution_order();
Reorder(&execution_order);
graph->set_execution_order(execution_order);
// Alloc memeory, include static memory and dynamic memory
// Alloc memory, including static memory and dynamic memory
AllocateMemory(graph.get());
// Reset memory resource
auto runtime_instance = device::KernelRuntimeManager::Instance().GetSingleKernelRuntime(kGPUDevice, device_id_);
......
......@@ -34,7 +34,7 @@ void PushNoVisitedNode(const AnfNodePtr &node, std::queue<AnfNodePtr> *que,
if (visited_nodes->find(node) == visited_nodes->end()) {
que->push(node);
(void)visited_nodes->insert(node);
MS_LOG(DEBUG) << "push que:" << node->DebugString();
MS_LOG(DEBUG) << "Push que:" << node->DebugString();
}
}
} // namespace
......@@ -58,7 +58,7 @@ void KernelGraph::SetExecOrderByDefault() {
auto clear_output = [&zero_output_nodes, &allreduce_nodes, &visited_nodes, this](const AnfNodePtr &input) -> void {
if (node_output_num_[input] == 0 && visited_nodes.find(input) == visited_nodes.end()) {
MS_EXCEPTION_IF_NULL(input);
MS_LOG(DEBUG) << "clear output num:" << input->DebugString();
MS_LOG(DEBUG) << "Clear output num:" << input->DebugString();
(void)visited_nodes.insert(input);
if (input->isa<CNode>() && AnfAlgo::GetCNodeName(input) == kAllReduceOpName) {
allreduce_nodes.push(input);
......@@ -85,21 +85,21 @@ void KernelGraph::SetExecOrderByDefault() {
if (it == node_input_edges_.end()) {
// value node and parameter has no input,no need to print log
if (node->isa<CNode>()) {
MS_LOG(DEBUG) << "can not find node [" << node->DebugString() << "]";
MS_LOG(DEBUG) << "Can not find node [" << node->DebugString() << "]";
}
continue;
}
for (const auto &input_edge : it->second) {
if (node_output_num_.find(input_edge.first) == node_output_num_.end()) {
MS_EXCEPTION_IF_NULL(input_edge.first);
MS_LOG(EXCEPTION) << "can't find node[" << input_edge.first->DebugString() << "]";
MS_LOG(EXCEPTION) << "Can't find node[" << input_edge.first->DebugString() << "]";
}
MS_EXCEPTION_IF_NULL(input_edge.first);
MS_LOG(DEBUG) << "decrese input:" << input_edge.first->DebugString() << ",node:" << node->DebugString()
MS_LOG(DEBUG) << "Decrease input:" << input_edge.first->DebugString() << ",node:" << node->DebugString()
<< ",num: " << node_output_num_[input_edge.first] << ",decrease num:" << input_edge.second;
if (node_output_num_[input_edge.first] < input_edge.second) {
MS_LOG(EXCEPTION) << "input node:" << input_edge.first->DebugString() << ",node_output_num"
<< node_output_num_[input_edge.first] << "depend edege:" << input_edge.second;
MS_LOG(EXCEPTION) << "Input node:" << input_edge.first->DebugString() << ",node_output_num"
<< node_output_num_[input_edge.first] << "depend edge:" << input_edge.second;
}
node_output_num_[input_edge.first] = node_output_num_[input_edge.first] - input_edge.second;
clear_output(input_edge.first);
......@@ -120,20 +120,20 @@ void KernelGraph::CheckLoop() {
string str;
auto node_output_it = node_output_edges_.find(it.first);
if (node_output_it == node_output_edges_.end()) {
MS_LOG(EXCEPTION) << "can't find node [" << it.first->DebugString() << "]";
MS_LOG(EXCEPTION) << "Can't find node [" << it.first->DebugString() << "]";
}
for (const auto &output_edge : node_output_edges_[it.first]) {
MS_EXCEPTION_IF_NULL(output_edge.first);
str = str.append(output_edge.first->DebugString()).append("|");
}
if (it.second != 0) {
MS_LOG(WARNING) << "node:" << it.first->DebugString() << ",outputs:" << str << ",output num:" << it.second;
MS_LOG(WARNING) << "Node:" << it.first->DebugString() << ",outputs:" << str << ",output num:" << it.second;
none_zero_output[it.first] = it.second;
}
}
// if don't consider control depend and loop exit,a exception will be throw
if (!none_zero_output.empty()) {
MS_LOG(EXCEPTION) << "nodes have loop,left node num:" << none_zero_output.size();
MS_LOG(EXCEPTION) << "Nodes have loop, left node num:" << none_zero_output.size();
}
}
......@@ -152,7 +152,7 @@ CNodePtr KernelGraph::NewCNode(const CNodePtr &cnode) {
MS_EXCEPTION_IF_NULL(cnode);
auto new_cnode = std::make_shared<CNode>(*cnode);
// if a cnode is created not from front,this cnode won't be in map,so when replace it,we shouldn't update map
if (BakcendNodeExistInFrontBackendMap(cnode)) {
if (BackendNodeExistInFrontBackendMap(cnode)) {
FrontBackendlMapUpdate(cnode, new_cnode);
}
AnfAlgo::SetGraphId(graph_id_, cnode.get());
......@@ -299,7 +299,7 @@ AnfNodePtr KernelGraph::GetBackendAnfByFrontAnf(const AnfNodePtr &front_anf) {
return front_backend_anf_map_[front_anf];
}
bool KernelGraph::BakcendNodeExistInFrontBackendMap(const AnfNodePtr &backend_anf) {
bool KernelGraph::BackendNodeExistInFrontBackendMap(const AnfNodePtr &backend_anf) {
return backend_front_anf_map_.find(backend_anf) != backend_front_anf_map_.end();
}
......@@ -317,9 +317,9 @@ void KernelGraph::TensorValueNodeMapAdd(const tensor::TensorPtr &tensor, const V
}
void KernelGraph::AddDependEdge(const AnfNodePtr &node, const AnfNodePtr &input, size_t depend_edge_num) {
MS_LOG(DEBUG) << "input:" << input->DebugString() << ", node:" << node->DebugString() << ",num:" << depend_edge_num;
MS_LOG(DEBUG) << "Input:" << input->DebugString() << ", node:" << node->DebugString() << ",num:" << depend_edge_num;
auto output_depend_edge = std::pair<AnfNodePtr, size_t>(node, depend_edge_num);
// add output depend eddge of input
// add output depend edge of input
auto output_it = node_output_edges_.find(input);
if (output_it == node_output_edges_.end()) {
node_output_edges_[input] = std::vector<std::pair<AnfNodePtr, size_t>>{output_depend_edge};
......@@ -346,7 +346,7 @@ std::vector<AnfNodePtr> KernelGraph::GetOutputNodes(const AnfNodePtr &node) {
MS_EXCEPTION_IF_NULL(node);
auto it = node_output_edges_.find(node);
if (it == node_output_edges_.end()) {
MS_LOG(EXCEPTION) << "can'f find node[" << node->DebugString() << "]";
MS_LOG(EXCEPTION) << "Can't find node[" << node->DebugString() << "]";
}
std::vector<AnfNodePtr> output_nodes;
auto trans = [](const std::pair<AnfNodePtr, size_t> &pair) -> AnfNodePtr { return pair.first; };
......@@ -372,7 +372,7 @@ void KernelGraph::UpdateControlDependRelations(const std::vector<AnfNodePtr> &de
MS_EXCEPTION_IF_NULL(depend_node);
std::vector<AnfNodePtr> prior_nodes = {prior_node};
std::vector<AnfNodePtr> depend_nodes = {depend_node};
MS_LOG(INFO) << "prior node[" << prior_node->DebugString() << "],depend node[" << depend_node->DebugString()
MS_LOG(INFO) << "Prior node[" << prior_node->DebugString() << "],depend node[" << depend_node->DebugString()
<< "],depend_mode=[" << AnfAlgo::GetNodeAttr<int>(cnode, "depend_mode") << "]";
if (prior_node->isa<Parameter>()) {
prior_nodes = GetOutputNodes(prior_node);
......@@ -384,7 +384,7 @@ void KernelGraph::UpdateControlDependRelations(const std::vector<AnfNodePtr> &de
for (auto &second_node : depend_nodes) {
MS_EXCEPTION_IF_NULL(first_node);
MS_EXCEPTION_IF_NULL(second_node);
MS_LOG(INFO) << "add first node:" << first_node->DebugString() << ",second node:" << second_node->DebugString();
MS_LOG(INFO) << "Add first node:" << first_node->DebugString() << ",second node:" << second_node->DebugString();
AddDependEdge(second_node, first_node, 1);
}
}
......@@ -437,18 +437,18 @@ void KernelGraph::BfsToUpdateNodeOutput() {
MS_EXCEPTION_IF_NULL(cnode);
// handle data links
for (const auto &input : cnode->inputs()) {
size_t dpend_edge_num = 1;
size_t depend_edge_num = 1;
// handle control depend,all inputs of control depend has no depend edge
if (HandleControlDependNode(input, &que, &visited_nodes)) {
control_depends.push_back(input);
dpend_edge_num = 0;
depend_edge_num = 0;
}
// the 2rd input of depend is no depend edge
if (AnfAlgo::CheckPrimitiveType(node, prim::kPrimDepend) && input == cnode->input(kDependAttachNodeIndex)) {
dpend_edge_num = 0;
depend_edge_num = 0;
}
PushNoVisitedNode(input, &que, &visited_nodes);
AddDependEdge(node, input, dpend_edge_num);
AddDependEdge(node, input, depend_edge_num);
}
}
UpdateControlDependRelations(control_depends);
......
......@@ -62,8 +62,8 @@ class KernelGraph : public FuncGraph {
void FrontBackendlMapUpdate(const AnfNodePtr &old_backend_anf, const AnfNodePtr &new_backend_anf);
// get backend anf by front anf
AnfNodePtr GetBackendAnfByFrontAnf(const AnfNodePtr &front_anf);
// check backend node wheteher exist in map
bool BakcendNodeExistInFrontBackendMap(const AnfNodePtr &backend_anf);
// check backend node whether exist in map
bool BackendNodeExistInFrontBackendMap(const AnfNodePtr &backend_anf);
// get value node by tensor
ValueNodePtr GetValueNodeByTensor(const tensor::TensorPtr &tensor);
// add value node tensor relation map
......
......@@ -281,7 +281,7 @@ std::vector<AnfNodePtr> CreateParameterFromTuple(const AnfNodePtr &node, KernelG
}
continue;
}
// creata single parameter if is a abstract real kernel
// create single parameter if is a abstract real kernel
create_parameter(out_node->abstract());
}
return parameters;
......@@ -413,7 +413,7 @@ CNodePtr SessionBasic::CreateNewCNode(const CNodePtr &cnode, KernelGraph *graph)
cnode_inputs.emplace_back(graph->GetBackendAnfByFrontAnf(anf));
continue;
} else if (anf->isa<ValueNode>() && !IsValueNode<FuncGraph>(anf)) {
// if input is a value ndoe,
// if input is a value node,
auto new_value_node = CreateNewValueNode(anf, graph);
if (new_value_node != nullptr) {
cnode_inputs.emplace_back(new_value_node);
......@@ -549,7 +549,7 @@ void SessionBasic::Reorder(std::vector<CNodePtr> *node_list) {
for (const auto &node : *node_list) {
MS_EXCEPTION_IF_NULL(node);
if (kOptOpeatorSet.find(AnfAlgo::GetCNodeName(node)) != kOptOpeatorSet.end()) {
if (kOptOperatorSet.find(AnfAlgo::GetCNodeName(node)) != kOptOperatorSet.end()) {
all_opt_list.emplace_back(node);
} else {
non_opt_list.emplace_back(node);
......@@ -599,7 +599,7 @@ void SessionBasic::ToTensorPtr(const OpRunInfo &op_run_info, std::vector<tensor:
MS_EXCEPTION_IF_NULL(inputs);
MS_EXCEPTION_IF_NULL(tensor_mask);
if (op_run_info.op_inputs.size() != op_run_info.inputs_mask.size()) {
MS_LOG(EXCEPTION) << "op input size " << op_run_info.op_inputs.size() << " should be equal to op input mask size "
MS_LOG(EXCEPTION) << "Op input size " << op_run_info.op_inputs.size() << " should be equal to op input mask size "
<< op_run_info.inputs_mask.size();
}
size_t input_num = op_run_info.op_inputs.size();
......@@ -636,7 +636,7 @@ CNodePtr SessionBasic::ConstructOutput(const AnfNodePtrList &outputs, const std:
if (backend_anf != nullptr) {
return backend_anf;
}
MS_LOG(EXCEPTION) << "did not find the node in the equiv map!";
MS_LOG(EXCEPTION) << "Can not find the node in the equiv map!";
};
output_args.push_back(NewValueNode(prim::kPrimMakeTuple));
(void)std::transform(outputs.begin(), outputs.end(), std::back_inserter(output_args),
......@@ -645,7 +645,7 @@ CNodePtr SessionBasic::ConstructOutput(const AnfNodePtrList &outputs, const std:
}
void SessionBasic::CreateOutputNode(const CNodePtr &cnode, const std::shared_ptr<KernelGraph> &graph) {
MS_LOG(INFO) << "start";
MS_LOG(INFO) << "Start!";
std::vector<AnfNodePtr> make_tuple_inputs;
make_tuple_inputs.push_back(NewValueNode(prim::kPrimMakeTuple));
if (AnfRuntimeAlgorithm::GetOutputTensorNum(cnode) > 1) {
......@@ -667,14 +667,14 @@ void SessionBasic::CreateOutputNode(const CNodePtr &cnode, const std::shared_ptr
// create output
auto g_output = graph->NewCNode(make_tuple_inputs);
graph->set_output(g_output);
// set graph manager,which now is only used to get valuendoes and hardware optimizing
// set graph manager,which now is only used to get valuenodes and hardware optimizing
MS_EXCEPTION_IF_NULL(context_);
FuncGraphManagerPtr manager = context_->manager();
if (manager != nullptr) {
manager->AddFuncGraph(graph);
graph->set_manager(manager);
}
MS_LOG(INFO) << "end";
MS_LOG(INFO) << "Finish!";
}
std::shared_ptr<KernelGraph> SessionBasic::ConstructSingleOpGraph(const OpRunInfo &op_run_info) {
......@@ -694,9 +694,9 @@ std::shared_ptr<KernelGraph> SessionBasic::ConstructSingleOpGraph(const OpRunInf
std::vector<tensor::TensorPtr> input_tensors;
std::vector<bool> tensors_mask;
ToTensorPtr(op_run_info, &input_tensors, &tensors_mask);
MS_LOG(INFO) << "input tensor size" << input_tensors.size();
MS_LOG(INFO) << "Input tensor size" << input_tensors.size();
if (input_tensors.size() != tensors_mask.size()) {
MS_LOG(EXCEPTION) << "input tensors size " << input_tensors.size() << " should be equal to tensors mask size "
MS_LOG(EXCEPTION) << "Input tensors size " << input_tensors.size() << " should be equal to tensors mask size "
<< tensors_mask.size();
}
for (size_t i = 0; i < input_tensors.size(); ++i) {
......@@ -711,7 +711,7 @@ std::shared_ptr<KernelGraph> SessionBasic::ConstructSingleOpGraph(const OpRunInf
cnode->set_abstract(op_run_info.abstract);
// set const input to attr if value is not a tensor,such as scalar or tuple
RunOpConvertConstInputToAttr(op_run_info, cnode);
// set exectuion order
// set execution order
std::vector<CNodePtr> exe_order = {cnode};
graph->set_execution_order(exe_order);
// set output
......@@ -734,14 +734,14 @@ BaseRef SessionBasic::TransformBaseRefListToTuple(const BaseRef &base_ref) {
py::tuple tensor_tuple = py::cast<py::tuple>(obj);
output_tensors[i] = tensor_tuple;
} else {
MS_LOG(EXCEPTION) << "The output is not a base ref list or a tensor !";
MS_LOG(EXCEPTION) << "The output is not a base ref list or a tensor!";
}
}
return output_tensors; // turn tuple to py::object and store in PyObjectRef
} else if (utils::isa<tensor::TensorPtr>(base_ref)) {
return base_ref;
} else {
MS_LOG(EXCEPTION) << "The output is not a base ref list or a tensor !";
MS_LOG(EXCEPTION) << "The output is not a base ref list or a tensor!";
}
}
} // namespace session
......
......@@ -56,7 +56,7 @@ class SessionBasic {
virtual ~SessionBasic() { summary_callback_ = nullptr; }
virtual GraphId CompileGraph(const AnfNodePtrList &lst, const AnfNodePtrList &outputs) = 0;
// build graph ,used to handle mupltiple child graphs
// build graph, used to handle multiple child graphs
virtual void BuildGraph(GraphId) {}
virtual void RunGraph(const GraphId &graph_id, const std::vector<tensor::TensorPtr> &inputs, VectorRef *outputs) = 0;
......@@ -75,7 +75,7 @@ class SessionBasic {
virtual GraphId SetFinalGraphInput(const std::vector<AnfNodePtr> &) { return kInvalidGraphId; }
// set output of final graph
virtual void SetFinalGraphOutput(const BaseRef &) {}
// insert switch and set the relative acitve ops
// insert switch and set the relative active ops
virtual void SwitchCompile(GraphId, GraphId, GraphId) {}
// set args of child graph.the arg maybe come from a output of other child graphs,or from final graph's parameter
virtual void SetChildGraphInput(GraphId, const VectorRef &) {}
......
......@@ -186,7 +186,7 @@ const std::vector<std::set<std::string>> kShapeSupportFormatMap = {k1DSupportFor
k4DSupportFormat};
const std::set<std::string> kDefaultCompatibleFormat = {kOpFormat_ND, kOpFormat_NCHW, kOpFormat_NHWC, kOpFormat_HWCN};
const std::set<std::string> kOptOpeatorSet = {
const std::set<std::string> kOptOperatorSet = {
kMomentumOpName, kApplyMomentumOpName, kApplyAdadeltaOpName,
kApplyAdagradOpName, kApplyAdagradDAName, kApplyAdamOpName,
kApplyAdaMaxOpName, kApplyAddSignOpName, kApplyCenteredRMSPOpName,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册