提交 6f8863b6 编写于 作者: M mindspore-ci-bot 提交者: Gitee

!3198 synchronize latest Ascend software suite 18 Jul 2020, and merging branches

Merge pull request !3198 from yanghaoran/code_sync_0718
......@@ -10,9 +10,9 @@
[submodule "third_party/protobuf"]
path = third_party/protobuf
url = https://github.com/protocolbuffers/protobuf.git
[submodule "graphengine"]
path = graphengine
url = https://gitee.com/mindspore/graphengine.git
[submodule "akg"]
path = akg
url = https://gitee.com/mindspore/akg.git
[submodule "graphengine"]
path = graphengine
url = https://gitee.com/mindspore/graphengine.git
......@@ -15,6 +15,7 @@ include(${GE_SOURCE_DIR}/cmake/external_libs/securec.cmake)
if (NOT ENABLE_D)
set(GE_PREBUILD_PATH ${GE_SOURCE_DIR}/third_party/prebuild/${CMAKE_HOST_SYSTEM_PROCESSOR})
find_library(slog libslog.so ${GE_PREBUILD_PATH})
find_library(error_manager liberror_manager.so ${GE_PREBUILD_PATH})
elseif (DEFINED ENV{D_LINK_PATH})
set(GE_LIB_PATH $ENV{D_LINK_PATH})
set(GE_SYS_ARCH "")
......
......@@ -156,6 +156,7 @@ if (NOT ENABLE_GE)
set(ASCEND_PATH /usr/local/Ascend)
endif ()
set(ASCEND_DRIVER_PATH ${ASCEND_PATH}/driver/lib64/common)
set(ASCEND_FWK_PATH ${ASCEND_PATH}/fwkacllib/lib64)
install(
FILES
......@@ -164,6 +165,7 @@ if (NOT ENABLE_GE)
${CMAKE_BINARY_DIR}/graphengine/src/ge/ge_runtime/libge_runtime.so
${ASCEND_DRIVER_PATH}/libslog.so
${ASCEND_DRIVER_PATH}/libc_sec.so
${ASCEND_FWK_PATH}/liberror_manager.so
DESTINATION ${INSTALL_LIB_DIR}
COMPONENT mindspore
)
......@@ -172,6 +174,7 @@ if (NOT ENABLE_GE)
FILES
${CMAKE_BINARY_DIR}/graphengine/src/common/graph/libgraph.so
${CMAKE_SOURCE_DIR}/graphengine/third_party/prebuild/${CMAKE_HOST_SYSTEM_PROCESSOR}/libslog.so
${CMAKE_SOURCE_DIR}/graphengine/third_party/prebuild/${CMAKE_HOST_SYSTEM_PROCESSOR}/liberror_manager.so
${CMAKE_SOURCE_DIR}/build/graphengine/libc_sec.so
DESTINATION ${INSTALL_LIB_DIR}
COMPONENT mindspore
......
graphengine @ 103f2d10
Subproject commit 18cf690152add623ffbddfbbb4674d1b34484ca7
Subproject commit 103f2d1019dc50d781d7a964551d9f1f50b3b009
......@@ -40,7 +40,7 @@ def get_ddk_version():
with open(backup_ddk_info_file, "r") as fp:
ddk_version = json.load(fp)["VERSION"]
else:
ddk_version = "1.60.T17.B830"
ddk_version = "Ascend910"
return ddk_version
......
......@@ -185,7 +185,7 @@ if (ENABLE_GE)
else ()
target_link_libraries(mindspore ge_client)
endif ()
target_link_libraries(mindspore graph tsdclient)
target_link_libraries(mindspore graph tsdclient datatransfer)
endif()
if (ENABLE_D)
......@@ -216,8 +216,9 @@ if (ENABLE_D)
find_library(CCE_LIB cce ${ASCEND_RUNTIME_PATH})
find_library(RUNTIME_LIB runtime ${ASCEND_RUNTIME_PATH})
find_library(TSDCLIENT tsdclient HINTS ${ASCEND_RUNTIME_PATH} ${ASCEND_DRIVER_BACK_PATH})
find_library(DATATRANSFER datatransfer HINTS ${ASCEND_RUNTIME_PATH} ${ASCEND_DRIVER_BACK_PATH})
find_library(PROFILING msprof ${ASCEND_DRIVER_PATH})
target_link_libraries(mindspore ge_runtime ${CCE_LIB} ${RUNTIME_LIB} ${TSDCLIENT} ${PROFILING} ${HCCL} ${TSDCLIENT})
target_link_libraries(mindspore ge_runtime ${CCE_LIB} ${RUNTIME_LIB} ${TSDCLIENT} ${PROFILING} ${HCCL} ${DATATRANSFER})
endif()
# link protobuf
......
......@@ -292,7 +292,6 @@ bool TbeKernelSelect::TbeCheckSupported(
parallel::TOPK,
parallel::IN_TOPK,
parallel::PACK,
parallel::GATHER_ND,
parallel::UNSORTEF_SEGMENT_MIND,
parallel::UNSORTEF_SEGMENT_PRODD,
parallel::CAST};
......
......@@ -23,6 +23,7 @@
#include "backend/optimizer/ascend/ir_fission/batch_norm_grad_split.h"
#include "backend/optimizer/ascend/ir_fission/batch_norm_bert_fission.h"
#include "backend/optimizer/ascend/ir_fission/single_batch_norm_fission.h"
#include "backend/optimizer/ascend/ir_fission/tensor_scatter_update_fission.h"
#include "backend/optimizer/ascend/ir_fusion/fused_batch_norm_fusion.h"
#include "backend/optimizer/ascend/ir_fission/layer_norm_grad_split.h"
#include "backend/optimizer/pass/communication_op_fusion.h"
......@@ -154,6 +155,7 @@ void AddAscendBackendOptionalIRFusion(PassManager *ir_fusion_pm) {
ir_fusion_pm->AddPass(std::make_shared<BatchNormGrad2BNInferGrad>());
ir_fusion_pm->AddPass(std::make_shared<BatchNormGradInferFission>());
ir_fusion_pm->AddPass(std::make_shared<SplitFission>());
ir_fusion_pm->AddPass(std::make_shared<TensorScatterUpdateFission>());
ir_fusion_pm->AddPass(std::make_shared<GetitemTuple>());
ir_fusion_pm->AddPass(std::make_shared<PackFission>());
ir_fusion_pm->AddPass(std::make_shared<ConcatFission>());
......@@ -303,6 +305,7 @@ void RunOpAscendBackendIRFusionOptimization(const std::shared_ptr<session::Kerne
ir_fusion_pm->AddPass(std::make_shared<TopKSplit>());
ir_fusion_pm->AddPass(std::make_shared<AddnFission>());
ir_fusion_pm->AddPass(std::make_shared<InsertPadForNMSWithMask>());
ir_fusion_pm->AddPass(std::make_shared<TensorScatterUpdateFission>());
optimizer->AddPassManager(ir_fusion_pm);
(void)optimizer->Optimize(kernel_graph);
......
......@@ -94,7 +94,7 @@ AnfNodePtr AddAdditionalToRefOutput(const FuncGraphPtr &func_graph, const CNodeP
origin_pair = FindRefOriginNode(input_node);
MS_EXCEPTION_IF_NULL(origin_pair.first);
if (!origin_pair.first->isa<Parameter>()) {
MS_LOG(EXCEPTION) << "ref op origin node is not parameter";
MS_LOG(WARNING) << "ref op origin node is not parameter";
}
MS_LOG(DEBUG) << "DealRefTransAndCast the node input index " << input_index << ", find origin op is "
<< origin_pair.first->DebugString() << ", index is " << origin_pair.second;
......
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "backend/optimizer/ascend/ir_fission/tensor_scatter_update_fission.h"
#include <vector>
#include <memory>
#include "backend/session/anf_runtime_algorithm.h"
#include "backend/optimizer/common/helper.h"
namespace mindspore {
namespace opt {
namespace {
CNodePtr CreateTensorMove(const FuncGraphPtr &graph, const CNodePtr &tensor_scatter_update) {
MS_EXCEPTION_IF_NULL(graph);
MS_EXCEPTION_IF_NULL(tensor_scatter_update);
std::vector<AnfNodePtr> inputs = {NewValueNode(std::make_shared<Primitive>(kTensorMoveOpName)),
tensor_scatter_update->input(1)};
auto tensor_move = graph->NewCNode(inputs);
MS_EXCEPTION_IF_NULL(tensor_move);
tensor_move->set_scope(tensor_scatter_update->scope());
tensor_move->set_abstract(tensor_scatter_update->abstract());
AnfAlgo::SetNodeAttr(kAttrUseLocking, MakeValue(false), tensor_move);
return tensor_move;
}
CNodePtr CreateScatterNdUpdate(const FuncGraphPtr &graph, const CNodePtr &tensor_scatter_update,
const CNodePtr &tensor_move) {
MS_EXCEPTION_IF_NULL(graph);
MS_EXCEPTION_IF_NULL(tensor_scatter_update);
MS_EXCEPTION_IF_NULL(tensor_move);
std::vector<AnfNodePtr> inputs = {NewValueNode(std::make_shared<Primitive>(kScatterNdUpdateOpName)), tensor_move,
tensor_scatter_update->input(2), tensor_scatter_update->input(3)};
auto scatter_nd_update = graph->NewCNode(inputs);
MS_EXCEPTION_IF_NULL(scatter_nd_update);
scatter_nd_update->set_scope(tensor_scatter_update->scope());
scatter_nd_update->set_abstract(tensor_scatter_update->abstract());
return scatter_nd_update;
}
} // namespace
const BaseRef TensorScatterUpdateFission::DefinePattern() const {
VarPtr Xs = std::make_shared<SeqVar>();
auto prim = std::make_shared<Primitive>(kTensorScatterUpdateOpName);
return VectorRef({prim, Xs});
}
const AnfNodePtr TensorScatterUpdateFission::Process(const FuncGraphPtr &func_graph, const AnfNodePtr &node,
const EquivPtr &) const {
MS_EXCEPTION_IF_NULL(func_graph);
MS_EXCEPTION_IF_NULL(node);
auto tensor_scatter_update = node->cast<CNodePtr>();
if (tensor_scatter_update == nullptr || tensor_scatter_update->size() != 4) {
return nullptr;
}
auto tensor_move = CreateTensorMove(func_graph, tensor_scatter_update);
return CreateScatterNdUpdate(func_graph, tensor_scatter_update, tensor_move);
}
} // namespace opt
} // namespace mindspore
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_PRE_ACTIVATE_ASCEND_IR_FISSION_TENSOR_SCATTER_UPDATE_FISSION_H_
#define MINDSPORE_CCSRC_PRE_ACTIVATE_ASCEND_IR_FISSION_TENSOR_SCATTER_UPDATE_FISSION_H_
#include "backend/optimizer/common/optimizer.h"
namespace mindspore {
namespace opt {
class TensorScatterUpdateFission : public PatternProcessPass {
public:
explicit TensorScatterUpdateFission(bool multigraph = true)
: PatternProcessPass("tensor_scatter_update_fission", multigraph) {}
~TensorScatterUpdateFission() override = default;
const BaseRef DefinePattern() const override;
const AnfNodePtr Process(const FuncGraphPtr &, const AnfNodePtr &, const EquivPtr &) const override;
};
} // namespace opt
} // namespace mindspore
#endif // MINDSPORE_CCSRC_PRE_ACTIVATE_ASCEND_IR_FISSION_TENSOR_SCATTER_UPDATE_FISSION_H_
此差异已折叠。
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_OPTIMIZER_IRPASS_REF_ELIMINATE_H_
#define MINDSPORE_CCSRC_OPTIMIZER_IRPASS_REF_ELIMINATE_H_
#include <memory>
#include "ir/pattern_matcher.h"
#include "optimizer/irpass.h"
#include "optimizer/optimizer.h"
namespace mindspore {
namespace opt {
namespace irpass {
// {prim::kPrimMakeRef, X, Y, Z} -> Y
class MakeRefEliminater : public OptimizerCaller {
public:
AnfNodePtr operator()(const OptimizerPtr &, const AnfNodePtr &node) override {
PatternNode<AnfNodePtr> x, y, z;
MATCH_REPLACE(node, PPrimitive(prim::kPrimMakeRef, x, y, z), y);
return nullptr;
}
};
// {prim::kPrimGetRefValue, Parameter} -> Parameter
// {prim::kPrimGetRefOrigin, Parameter} -> Parameter
class GetRefParamEliminater : public OptimizerCaller {
public:
AnfNodePtr operator()(const OptimizerPtr &, const AnfNodePtr &node) override {
PatternNode<AnfNodePtr> x;
MATCH_REPLACE_IF(node, PPrimitive(prim::kPrimGetRefValue, x), x, x.CheckFunc(IsParam, node));
MATCH_REPLACE_IF(node, PPrimitive(prim::kPrimGetRefOrigin, x), x, x.CheckFunc(IsParam, node));
return nullptr;
}
};
// {prim::kPrimGetRefKey, {prim::kPrimMakeRef, X, Y, Z}} -> X
// {prim::kPrimGetRefValue, {prim::kPrimMakeRef, X, Y, Z}} -> Y
// {prim::kPrimGetRefOrigin, {prim::kPrimMakeRef, X, Y, Z}} -> Z
class GetMakeRefEliminater : public OptimizerCaller {
public:
AnfNodePtr operator()(const OptimizerPtr &, const AnfNodePtr &node) override {
PatternNode<AnfNodePtr> x, y, z;
MATCH_REPLACE(node, PPrimitive(prim::kPrimGetRefKey, PPrimitive(prim::kPrimMakeRef, x, y, z)), x);
MATCH_REPLACE(node, PPrimitive(prim::kPrimGetRefValue, PPrimitive(prim::kPrimMakeRef, x, y, z)), y);
MATCH_REPLACE(node, PPrimitive(prim::kPrimGetRefOrigin, PPrimitive(prim::kPrimMakeRef, x, y, z)), z);
return nullptr;
}
};
// IsValueNode<RefKey>
class ReplaceRefkeyByParam : public OptimizerCaller {
public:
AnfNodePtr operator()(const OptimizerPtr &optimizer, const AnfNodePtr &node) override {
auto RefKeyLambda = [&node, &optimizer]() -> AnfNodePtr {
auto refkey = GetValueNode<RefKeyPtr>(node);
auto resource = std::dynamic_pointer_cast<pipeline::Resource>(optimizer->resource());
MS_EXCEPTION_IF_NULL(resource);
auto top_graph = resource->func_graph();
MS_EXCEPTION_IF_NULL(top_graph);
for (const auto &tnode : top_graph->parameters()) {
auto para = tnode->cast<ParameterPtr>();
if (para != nullptr && para->name() == refkey->tag()) {
return para;
}
}
return nullptr;
};
PatternNode<AnfNodePtr> x;
MATCH_REPLACE_LAMBDA_IF(node, x, RefKeyLambda, x.CheckFunc(IsValueNode<RefKey>, node));
return nullptr;
}
};
} // namespace irpass
} // namespace opt
} // namespace mindspore
#endif // MINDSPORE_CCSRC_OPTIMIZER_IRPASS_REF_ELIMINATE_H_
/**
* Copyright 2019 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "parallel/graph_util/generate_graph.h"
#include <algorithm>
#include <memory>
#include <string>
#include <utility>
using mindspore::tensor::Tensor;
namespace mindspore {
namespace parallel {
std::string GetOpPythonPath(const OperatorName &op_name) {
// almost all ops are defined in two main paths
const std::string ops_module = OP_PATH;
const std::string inner_ops_module = INNER_OP_PATH;
py::module mod = py::module::import(common::SafeCStr(ops_module));
py::module inner_mod = py::module::import(common::SafeCStr(inner_ops_module));
if (!py::hasattr(inner_mod, common::SafeCStr(op_name))) {
if (!py::hasattr(mod, common::SafeCStr(op_name))) {
MS_LOG(EXCEPTION) << ops_module << " or " << inner_ops_module << " don't have op:" << op_name;
}
return ops_module;
}
return inner_ops_module;
}
ValuePtr CreatOpInstance(const OperatorAttrs &attrs, const OperatorName &op_name, const std::string &instance_name) {
std::string op_path = GetOpPythonPath(op_name);
py::module mod = py::module::import(common::SafeCStr(op_path));
if (!py::hasattr(mod, common::SafeCStr(op_name))) {
MS_LOG(ERROR) << "Failure: op_path:" << op_path << " don't have attr " << op_name;
return nullptr;
}
std::vector<py::object> arg_list;
(void)std::transform(attrs.begin(), attrs.end(), std::back_inserter(arg_list),
[](const Attr &attr) { return ValuePtrToPyData(attr.second); });
py::object obj =
parse::python_adapter::CallPyFn(GET_OP_FUNCTION_PATH, GET_OP_FUNCTION, op_name, op_path, instance_name, arg_list);
ValuePtr op_instance = nullptr;
bool succ = parse::ConvertData(obj, &op_instance);
if (!succ) {
MS_LOG(ERROR) << "Failure:get Python op " << op_path << " from " << op_name << " fail";
return nullptr;
}
return op_instance;
}
AnfNodePtr ValuePtrToAnfNodePtr(const ValuePtr &value_ptr) {
auto value_node = NewValueNode(value_ptr);
MS_EXCEPTION_IF_NULL(value_node);
return value_node->cast<AnfNodePtr>();
}
static std::unordered_map<int32_t, AnfNodePtr> int_tensor_map = {};
AnfNodePtr CreateInt32Tensor(int32_t value) {
auto it = int_tensor_map.find(value);
if (it != int_tensor_map.end()) {
return it->second;
}
mindspore::tensor::TensorPtr tensor_ptr = std::make_shared<tensor::Tensor>(py::int_(value), kInt32);
ValuePtr value_ptr = MakeValue(tensor_ptr);
auto anf_node_ptr = ValuePtrToAnfNodePtr(value_ptr);
int_tensor_map[value] = anf_node_ptr;
return anf_node_ptr;
}
AnfNodePtr CreatTypeInt(int32_t value) {
ValuePtr value_ptr = MakeValue(std::make_shared<Int>(value));
return ValuePtrToAnfNodePtr(value_ptr);
}
AnfNodePtr CreatInt32Imm(int32_t value) {
ValuePtr value_ptr = MakeValue(std::make_shared<Int32Imm>(value));
return ValuePtrToAnfNodePtr(value_ptr);
}
std::string GetInstanceNameByCNode(const CNodePtr &cnode) {
PrimitivePtr prim = GetValueNode<PrimitivePtr>(cnode->input(0));
if (!prim) {
MS_LOG(EXCEPTION) << "The first input of the cnode is not a PrimitivePtr.";
}
std::string instance_name = prim->instance_name();
return HashInstanceName(instance_name);
}
std::string HashInstanceName(const std::string &name) {
auto using_hash_name = common::GetEnv(USING_HASH_NAME);
std::string instance_name;
if ((using_hash_name.empty()) || (using_hash_name == "on")) {
instance_name = HashName(name);
} else {
instance_name = name;
}
return instance_name;
}
Status GenerateGraph::Init(const CNodePtr &cnode) {
if (!cnode) {
MS_LOG(ERROR) << "Init:cnode is nullptr";
return FAILED;
}
cnode_ = cnode;
func_graph_ = cnode->func_graph();
if (!func_graph_) {
MS_LOG(ERROR) << "Init:func_graph_ is nullptr";
return FAILED;
}
manager_ = func_graph_->manager();
if (!manager_) {
MS_LOG(ERROR) << "Init:manager_ is nullptr";
return FAILED;
}
scope_ = cnode_->scope();
if (!scope_) {
MS_LOG(ERROR) << "Init:scope_ is nullptr";
return FAILED;
}
virtual_input_node_ = std::make_shared<AnfNode>(nullptr);
virtual_input_node_->set_scope(scope_);
instance_name_base_ = GetInstanceNameByCNode(cnode_);
name_idx_ = 0;
return SUCCESS;
}
AnfNodePtr GenerateGraph::PushBack(const std::vector<AnfNodePtr> &inputs) {
CNodePtr cnode = func_graph_->NewCNode(inputs); // using NewCNode to creat anfnode
MS_EXCEPTION_IF_NULL(cnode);
cnode->set_scope(scope_);
if (inputs.size() < 2) {
MS_LOG(EXCEPTION) << "inputs.size() must be more than 1";
}
(void)manager_->Replace(inputs.at(1), cnode); // using Replace function to insert cnode after inputs[0]
auto new_anf_node_ptr = cnode->cast<AnfNodePtr>();
MS_EXCEPTION_IF_NULL(new_anf_node_ptr);
return new_anf_node_ptr;
}
AnfNodePtr GenerateGraph::NewOpInst(const OperatorName &op_name, const OperatorAttrs &attrs) {
name_idx_++;
ValuePtr pyop_instance = CreatOpInstance(attrs, op_name, instance_name_base_ + op_name + std::to_string(name_idx_));
if (pyop_instance == nullptr) {
MS_LOG(EXCEPTION) << "Failure:" << op_name << " CreatOpInstance failed";
}
auto value_node = NewValueNode(pyop_instance);
return value_node->cast<AnfNodePtr>();
}
AnfNodePtr GenerateGraph::NewOpInst(const OperatorName &op_name) {
name_idx_++;
OperatorAttrs attrs;
ValuePtr pyop_instance = CreatOpInstance(attrs, op_name, instance_name_base_ + std::to_string(name_idx_));
if (pyop_instance == nullptr) {
MS_LOG(EXCEPTION) << "Failure:" << op_name << " CreatOpInstance failed";
}
auto value_node = NewValueNode(pyop_instance);
return value_node->cast<AnfNodePtr>();
}
} // namespace parallel
} // namespace mindspore
此差异已折叠。
......@@ -192,21 +192,18 @@ bool MsContext::OpenTsd() {
}
MS_LOG(INFO) << "Device id = " << device_id << ", rank size = " << rank_size << ".";
TDT_StatusT status = tdt::TsdClient::GetInstance()->Open(device_id, rank_size);
if (status != TDT_OK) {
MS_LOG(EXCEPTION) << "Device " << device_id << " is occupied, open tsd failed, status = " << status << ".";
return false;
}
tsd_ref_++;
#ifdef ENABLE_TDTQUE
int32_t initStatus = tdt::TdtHostInit(device_id);
if (initStatus != TDT_OK_CODE) {
MS_LOG(EXCEPTION) << "Init tsd failed, status = " << initStatus << ".";
return false;
}
tdt_print_ = std::thread(TensorPrint());
#endif
TDT_StatusT status = tdt::TsdClient::GetInstance()->Open(device_id, rank_size);
if (status != TDT_OK) {
MS_LOG(EXCEPTION) << "Device " << device_id << " is occupied, open tsd failed, status = " << status << ".";
return false;
}
tsd_ref_++;
MS_LOG(INFO) << "Open and init tsd successful, tsd reference = " << tsd_ref_ << ".";
return true;
}
......
......@@ -173,6 +173,9 @@ constexpr auto kSparseApplyProximalAdagradOpName = "SparseApplyProximalAdagrad";
constexpr auto kSparseApplyRMSPropOpName = "SparseApplyRMSProp";
constexpr auto kSparseApplyAdadeltaOpName = "SparseApplyAdadelta";
constexpr auto kApplyAdamWithAmsgradOpName = "ApplyAdamWithAmsgrad";
constexpr auto kTensorMoveOpName = "TensorMove";
constexpr auto kTensorScatterUpdateOpName = "TensorScatterUpdate";
constexpr auto kScatterNdUpdateOpName = "ScatterNdUpdate";
constexpr auto kPushOpName = "Push";
constexpr auto kPullOpName = "Pull";
constexpr auto kEmbeddingLookupOpName = "EmbeddingLookup";
......@@ -236,6 +239,8 @@ constexpr auto kAttrNumSplit = "num_split";
constexpr auto kAttrOutputNum = "output_num";
constexpr auto kAttrSizeSplits = "size_splits";
constexpr auto kAttrOutputDefault = "output_default";
constexpr auto kAttrPrimitiveTarget = "primitive_target";
constexpr auto kAttrUseLocking = "use_locking";
constexpr auto kAttrReduceScatterFlag = "reduce_scatter_flag";
constexpr auto kAttrOffset = "offset";
constexpr auto kAttrPsKey = "ps_key";
......
此差异已折叠。
......@@ -283,6 +283,7 @@ class AvgPool1d(_PoolNd):
self.reduce_mean = P.ReduceMean(keep_dims=True)
self.slice = P.Slice()
self.expand = P.ExpandDims()
self.squeeze = P.Squeeze(2)
def construct(self, x):
_shape_check(self.shape(x))
......@@ -295,4 +296,5 @@ class AvgPool1d(_PoolNd):
else:
x = self.expand(x, 2)
x = self.avg_pool(x)
x = self.squeeze(x)
return x
......@@ -393,7 +393,6 @@ class Optimizer(Cell):
current_dynamic_lr = self.gather(self.learning_rate[i], self.global_step, 0)
lr += (current_dynamic_lr,)
F.control_depend(lr, self.assignadd(self.global_step, 1))
else:
lr = self.learning_rate
if self.dynamic_lr:
......
......@@ -15,7 +15,7 @@
"""grad impl."""
from . import grad_array_ops, grad_comm_ops, grad_debug_ops, grad_implementations, \
grad_math_ops, grad_nn_ops, grad_other_ops, grad_quant_ops
grad_inner_ops, grad_math_ops, grad_nn_ops, grad_other_ops, grad_quant_ops
from .grad_base import get_bprop_fn
__all__ = ['get_bprop_fn']
......@@ -211,6 +211,25 @@ def get_bprop_embedding_lookup(self):
return bprop_sparse
@bprop_getters.register(P.EmbeddingLookup)
def get_bprop_embedding_look_up(self):
"""Generate bprop for EmbeddingLookup"""
sub_op = P.Sub()
reshape_op = P.Reshape()
def bprop(x, indices, offset, out, dout):
x_shp = shape_op(x)
new_indices = sub_op(indices, offset)
# Reshape the 'new_indices'
new_indices_shape_changed = (size_op(new_indices),)
new_indices = reshape_op(new_indices, new_indices_shape_changed)
actual_dout_shape_changed = new_indices_shape_changed
if len(x_shp) > 1:
actual_dout_shape_changed += x_shp[1:]
actual_dout = reshape_op(dout, actual_dout_shape_changed)
return (new_indices, actual_dout, x_shp), zeros_like(indices), zeros_like(offset)
return bprop
@bprop_getters.register(P.Transpose)
def get_bprop_transpose(self):
"""Generate bprop for Transpose"""
......
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""array_ops"""
from .. import operations as P
from ..operations import _grad_ops as G
from ..operations import _inner_ops as inner
from ..composite.multitype_ops.zeros_like_impl import zeros_like
from .grad_base import bprop_getters
@bprop_getters.register(inner.StridedSliceAICPU)
def get_bprop_strided_slice_aicpu(self):
"""Generate bprop for StridedSlice"""
shape_op = P.Shape()
input_grad = G.StridedSliceGradAICPU(self.begin_mask,
self.end_mask,
self.ellipsis_mask,
self.new_axis_mask,
self.shrink_axis_mask)
def bprop(x, begin, end, strides, out, dout):
dx = input_grad(dout, shape_op(x), begin, end, strides)
return dx, zeros_like(begin), zeros_like(end), zeros_like(strides)
return bprop
......@@ -673,7 +673,7 @@ def get_bprop_mirror_pad(self):
mirror_pad_grad = G.MirrorPadGrad(self.mode)
def bprop(x, paddings, out, dout):
dx = mirror_pad_grad(dout, paddings, x)
dx = mirror_pad_grad(dout, paddings)
return (dx, zeros_like(paddings))
return bprop
......
......@@ -14,6 +14,7 @@
"""aicpu ops"""
from .init_data_set_queue import _init_data_set_queue_aicpu
from .embedding_lookup import _embedding_lookup_aicpu
from .dropout_genmask import _dropout_genmask_aicpu
from .get_next import _get_next_aicpu
from .print_tensor import _print_aicpu
......@@ -25,10 +26,20 @@ from .squeeze import _squeeze_aicpu
from .expand_dims import _expand_dims_aicpu
from .random_choice_with_mask import _random_choice_with_mask_aicpu
from .pack import _pack_aicpu
from .normal import _normal_aicpu
from .ctcloss import _ctcloss_aicpu
from .reverse_sequence import _reverse_sequence_aicpu
from .crop_and_resize import _crop_and_resize_aicpu
from .end_of_sequence import _end_of_sequence_aicpu
from .rnnt_loss import _rnnt_loss_aicpu
from .random_categorical import _random_categorical_aicpu
from .cast import _cast_aicpu
from .mirror_pad import _mirror_pad_aicpu
from .mirror_pad_grad import _mirror_pad_grad_aicpu
from .standard_normal import _standard_normal_aicpu
from .gamma import _gamma_aicpu
from .poisson import _poisson_aicpu
from .uniform_int import _uniform_int_aicpu
from .uniform_real import _uniform_real_aicpu
from .laplace import _laplace_aicpu
from .strided_slice import _strided_slice_aicpu
from .strided_slice_grad import _strided_slice_grad_aicpu
from .end_of_sequence import _end_of_sequence_aicpu
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Cast op"""
from mindspore.ops.op_info_register import op_info_register, AiCPURegOp, DataType
cast_op_info = AiCPURegOp("Cast") \
.fusion_type("OPAQUE") \
.input(0, "x", "required") \
.output(0, "y", "required") \
.dtype_format(DataType.U8_Default, DataType.U8_Default) \
.dtype_format(DataType.U8_Default, DataType.U16_Default) \
.dtype_format(DataType.U8_Default, DataType.U32_Default) \
.dtype_format(DataType.U8_Default, DataType.U64_Default) \
.dtype_format(DataType.U8_Default, DataType.I8_Default) \
.dtype_format(DataType.U8_Default, DataType.I16_Default) \
.dtype_format(DataType.U8_Default, DataType.I32_Default) \
.dtype_format(DataType.U8_Default, DataType.I64_Default) \
.dtype_format(DataType.U8_Default, DataType.F16_Default) \
.dtype_format(DataType.U8_Default, DataType.F32_Default) \
.dtype_format(DataType.U8_Default, DataType.F64_Default) \
.dtype_format(DataType.U8_Default, DataType.BOOL_Default) \
.dtype_format(DataType.U16_Default, DataType.U8_Default) \
.dtype_format(DataType.U16_Default, DataType.U16_Default) \
.dtype_format(DataType.U16_Default, DataType.U32_Default) \
.dtype_format(DataType.U16_Default, DataType.U64_Default) \
.dtype_format(DataType.U16_Default, DataType.I8_Default) \
.dtype_format(DataType.U16_Default, DataType.I16_Default) \
.dtype_format(DataType.U16_Default, DataType.I32_Default) \
.dtype_format(DataType.U16_Default, DataType.I64_Default) \
.dtype_format(DataType.U16_Default, DataType.F16_Default) \
.dtype_format(DataType.U16_Default, DataType.F32_Default) \
.dtype_format(DataType.U16_Default, DataType.F64_Default) \
.dtype_format(DataType.U16_Default, DataType.BOOL_Default) \
.dtype_format(DataType.U32_Default, DataType.U8_Default) \
.dtype_format(DataType.U32_Default, DataType.U16_Default) \
.dtype_format(DataType.U32_Default, DataType.U32_Default) \
.dtype_format(DataType.U32_Default, DataType.U64_Default) \
.dtype_format(DataType.U32_Default, DataType.I8_Default) \
.dtype_format(DataType.U32_Default, DataType.I16_Default) \
.dtype_format(DataType.U32_Default, DataType.I32_Default) \
.dtype_format(DataType.U32_Default, DataType.I64_Default) \
.dtype_format(DataType.U32_Default, DataType.F16_Default) \
.dtype_format(DataType.U32_Default, DataType.F32_Default) \
.dtype_format(DataType.U32_Default, DataType.F64_Default) \
.dtype_format(DataType.U32_Default, DataType.BOOL_Default) \
.dtype_format(DataType.U64_Default, DataType.U8_Default) \
.dtype_format(DataType.U64_Default, DataType.U16_Default) \
.dtype_format(DataType.U64_Default, DataType.U32_Default) \
.dtype_format(DataType.U64_Default, DataType.U64_Default) \
.dtype_format(DataType.U64_Default, DataType.I8_Default) \
.dtype_format(DataType.U64_Default, DataType.I16_Default) \
.dtype_format(DataType.U64_Default, DataType.I32_Default) \
.dtype_format(DataType.U64_Default, DataType.I64_Default) \
.dtype_format(DataType.U64_Default, DataType.F16_Default) \
.dtype_format(DataType.U64_Default, DataType.F32_Default) \
.dtype_format(DataType.U64_Default, DataType.F64_Default) \
.dtype_format(DataType.U64_Default, DataType.BOOL_Default) \
.dtype_format(DataType.I8_Default, DataType.U8_Default) \
.dtype_format(DataType.I8_Default, DataType.U16_Default) \
.dtype_format(DataType.I8_Default, DataType.U32_Default) \
.dtype_format(DataType.I8_Default, DataType.U64_Default) \
.dtype_format(DataType.I8_Default, DataType.I8_Default) \
.dtype_format(DataType.I8_Default, DataType.I16_Default) \
.dtype_format(DataType.I8_Default, DataType.I32_Default) \
.dtype_format(DataType.I8_Default, DataType.I64_Default) \
.dtype_format(DataType.I8_Default, DataType.F16_Default) \
.dtype_format(DataType.I8_Default, DataType.F32_Default) \
.dtype_format(DataType.I8_Default, DataType.F64_Default) \
.dtype_format(DataType.I8_Default, DataType.BOOL_Default) \
.dtype_format(DataType.I16_Default, DataType.U8_Default) \
.dtype_format(DataType.I16_Default, DataType.U16_Default) \
.dtype_format(DataType.I16_Default, DataType.U32_Default) \
.dtype_format(DataType.I16_Default, DataType.U64_Default) \
.dtype_format(DataType.I16_Default, DataType.I8_Default) \
.dtype_format(DataType.I16_Default, DataType.I16_Default) \
.dtype_format(DataType.I16_Default, DataType.I32_Default) \
.dtype_format(DataType.I16_Default, DataType.I64_Default) \
.dtype_format(DataType.I16_Default, DataType.F16_Default) \
.dtype_format(DataType.I16_Default, DataType.F32_Default) \
.dtype_format(DataType.I16_Default, DataType.F64_Default) \
.dtype_format(DataType.I16_Default, DataType.BOOL_Default) \
.dtype_format(DataType.I32_Default, DataType.U8_Default) \
.dtype_format(DataType.I32_Default, DataType.U16_Default) \
.dtype_format(DataType.I32_Default, DataType.U32_Default) \
.dtype_format(DataType.I32_Default, DataType.U64_Default) \
.dtype_format(DataType.I32_Default, DataType.I8_Default) \
.dtype_format(DataType.I32_Default, DataType.I16_Default) \
.dtype_format(DataType.I32_Default, DataType.I32_Default) \
.dtype_format(DataType.I32_Default, DataType.I64_Default) \
.dtype_format(DataType.I32_Default, DataType.F16_Default) \
.dtype_format(DataType.I32_Default, DataType.F32_Default) \
.dtype_format(DataType.I32_Default, DataType.F64_Default) \
.dtype_format(DataType.I32_Default, DataType.BOOL_Default) \
.dtype_format(DataType.I64_Default, DataType.U8_Default) \
.dtype_format(DataType.I64_Default, DataType.U16_Default) \
.dtype_format(DataType.I64_Default, DataType.U32_Default) \
.dtype_format(DataType.I64_Default, DataType.U64_Default) \
.dtype_format(DataType.I64_Default, DataType.I8_Default) \
.dtype_format(DataType.I64_Default, DataType.I16_Default) \
.dtype_format(DataType.I64_Default, DataType.I32_Default) \
.dtype_format(DataType.I64_Default, DataType.I64_Default) \
.dtype_format(DataType.I64_Default, DataType.F16_Default) \
.dtype_format(DataType.I64_Default, DataType.F32_Default) \
.dtype_format(DataType.I64_Default, DataType.F64_Default) \
.dtype_format(DataType.I64_Default, DataType.BOOL_Default) \
.dtype_format(DataType.F16_Default, DataType.U8_Default) \
.dtype_format(DataType.F16_Default, DataType.U16_Default) \
.dtype_format(DataType.F16_Default, DataType.U32_Default) \
.dtype_format(DataType.F16_Default, DataType.U64_Default) \
.dtype_format(DataType.F16_Default, DataType.I8_Default) \
.dtype_format(DataType.F16_Default, DataType.I16_Default) \
.dtype_format(DataType.F16_Default, DataType.I32_Default) \
.dtype_format(DataType.F16_Default, DataType.I64_Default) \
.dtype_format(DataType.F16_Default, DataType.F16_Default) \
.dtype_format(DataType.F16_Default, DataType.F32_Default) \
.dtype_format(DataType.F16_Default, DataType.F64_Default) \
.dtype_format(DataType.F16_Default, DataType.BOOL_Default) \
.dtype_format(DataType.F32_Default, DataType.U8_Default) \
.dtype_format(DataType.F32_Default, DataType.U16_Default) \
.dtype_format(DataType.F32_Default, DataType.U32_Default) \
.dtype_format(DataType.F32_Default, DataType.U64_Default) \
.dtype_format(DataType.F32_Default, DataType.I8_Default) \
.dtype_format(DataType.F32_Default, DataType.I16_Default) \
.dtype_format(DataType.F32_Default, DataType.I32_Default) \
.dtype_format(DataType.F32_Default, DataType.I64_Default) \
.dtype_format(DataType.F32_Default, DataType.F16_Default) \
.dtype_format(DataType.F32_Default, DataType.F32_Default) \
.dtype_format(DataType.F32_Default, DataType.F64_Default) \
.dtype_format(DataType.F32_Default, DataType.BOOL_Default) \
.dtype_format(DataType.F64_Default, DataType.U8_Default) \
.dtype_format(DataType.F64_Default, DataType.U16_Default) \
.dtype_format(DataType.F64_Default, DataType.U32_Default) \
.dtype_format(DataType.F64_Default, DataType.U64_Default) \
.dtype_format(DataType.F64_Default, DataType.I8_Default) \
.dtype_format(DataType.F64_Default, DataType.I16_Default) \
.dtype_format(DataType.F64_Default, DataType.I32_Default) \
.dtype_format(DataType.F64_Default, DataType.I64_Default) \
.dtype_format(DataType.F64_Default, DataType.F16_Default) \
.dtype_format(DataType.F64_Default, DataType.F32_Default) \
.dtype_format(DataType.F64_Default, DataType.F64_Default) \
.dtype_format(DataType.F64_Default, DataType.BOOL_Default) \
.dtype_format(DataType.BOOL_Default, DataType.U8_Default) \
.dtype_format(DataType.BOOL_Default, DataType.U16_Default) \
.dtype_format(DataType.BOOL_Default, DataType.U32_Default) \
.dtype_format(DataType.BOOL_Default, DataType.U64_Default) \
.dtype_format(DataType.BOOL_Default, DataType.I8_Default) \
.dtype_format(DataType.BOOL_Default, DataType.I16_Default) \
.dtype_format(DataType.BOOL_Default, DataType.I32_Default) \
.dtype_format(DataType.BOOL_Default, DataType.I64_Default) \
.dtype_format(DataType.BOOL_Default, DataType.F16_Default) \
.dtype_format(DataType.BOOL_Default, DataType.F32_Default) \
.dtype_format(DataType.BOOL_Default, DataType.F64_Default) \
.dtype_format(DataType.BOOL_Default, DataType.BOOL_Default) \
.get_op_info()
@op_info_register(cast_op_info)
def _cast_aicpu():
"""Cast AiCPU register"""
return
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""EmbeddingLookup op"""
from mindspore.ops.op_info_register import op_info_register, AiCPURegOp, DataType
embeddingLookup_op_info = AiCPURegOp("EmbeddingLookup") \
.fusion_type("OPAQUE") \
.input(0, "params", "required") \
.input(1, "indices", "required") \
.input(2, "offset", "required") \
.output(0, "output", "required") \
.dtype_format(DataType.I8_Default, DataType.I32_Default, \
DataType.I32_Default, DataType.I8_Default) \
.dtype_format(DataType.I16_Default, DataType.I32_Default, \
DataType.I32_Default, DataType.I16_Default) \
.dtype_format(DataType.I32_Default, DataType.I32_Default, \
DataType.I32_Default, DataType.I32_Default) \
.dtype_format(DataType.I64_Default, DataType.I32_Default, \
DataType.I32_Default, DataType.I64_Default) \
.dtype_format(DataType.U8_Default, DataType.I32_Default, \
DataType.I32_Default, DataType.U8_Default) \
.dtype_format(DataType.U16_Default, DataType.I32_Default, \
DataType.I32_Default, DataType.U16_Default) \
.dtype_format(DataType.U32_Default, DataType.I32_Default, \
DataType.I32_Default, DataType.U32_Default) \
.dtype_format(DataType.U64_Default, DataType.I32_Default, \
DataType.I32_Default, DataType.U64_Default) \
.dtype_format(DataType.F16_Default, DataType.I32_Default, \
DataType.I32_Default, DataType.F16_Default) \
.dtype_format(DataType.F32_Default, DataType.I32_Default, \
DataType.I32_Default, DataType.F32_Default) \
.dtype_format(DataType.F64_Default, DataType.I32_Default, \
DataType.I32_Default, DataType.F64_Default) \
.dtype_format(DataType.BOOL_Default, DataType.I32_Default, \
DataType.I32_Default, DataType.BOOL_Default) \
.dtype_format(DataType.I8_Default, DataType.I64_Default, \
DataType.I64_Default, DataType.I8_Default) \
.dtype_format(DataType.I16_Default, DataType.I64_Default, \
DataType.I64_Default, DataType.I16_Default) \
.dtype_format(DataType.I32_Default, DataType.I64_Default, \
DataType.I64_Default, DataType.I32_Default) \
.dtype_format(DataType.I64_Default, DataType.I64_Default, \
DataType.I64_Default, DataType.I64_Default) \
.dtype_format(DataType.U8_Default, DataType.I64_Default, \
DataType.I64_Default, DataType.U8_Default) \
.dtype_format(DataType.U16_Default, DataType.I64_Default, \
DataType.I64_Default, DataType.U16_Default) \
.dtype_format(DataType.U32_Default, DataType.I64_Default, \
DataType.I64_Default, DataType.U32_Default) \
.dtype_format(DataType.U64_Default, DataType.I64_Default, \
DataType.I64_Default, DataType.U64_Default) \
.dtype_format(DataType.F16_Default, DataType.I64_Default, \
DataType.I64_Default, DataType.F16_Default) \
.dtype_format(DataType.F32_Default, DataType.I64_Default, \
DataType.I64_Default, DataType.F32_Default) \
.dtype_format(DataType.F64_Default, DataType.I64_Default, \
DataType.I64_Default, DataType.F64_Default) \
.dtype_format(DataType.BOOL_Default, DataType.I64_Default, \
DataType.I64_Default, DataType.BOOL_Default) \
.dtype_format(DataType.I8_Default, DataType.I64_Default, \
DataType.I32_Default, DataType.I8_Default) \
.dtype_format(DataType.I16_Default, DataType.I64_Default, \
DataType.I32_Default, DataType.I16_Default) \
.dtype_format(DataType.I32_Default, DataType.I64_Default, \
DataType.I32_Default, DataType.I32_Default) \
.dtype_format(DataType.I64_Default, DataType.I64_Default, \
DataType.I32_Default, DataType.I64_Default) \
.dtype_format(DataType.U8_Default, DataType.I64_Default, \
DataType.I32_Default, DataType.U8_Default) \
.dtype_format(DataType.U16_Default, DataType.I64_Default, \
DataType.I32_Default, DataType.U16_Default) \
.dtype_format(DataType.U32_Default, DataType.I64_Default, \
DataType.I32_Default, DataType.U32_Default) \
.dtype_format(DataType.U64_Default, DataType.I64_Default, \
DataType.I32_Default, DataType.U64_Default) \
.dtype_format(DataType.F16_Default, DataType.I64_Default, \
DataType.I32_Default, DataType.F16_Default) \
.dtype_format(DataType.F32_Default, DataType.I64_Default, \
DataType.I32_Default, DataType.F32_Default) \
.dtype_format(DataType.F64_Default, DataType.I64_Default, \
DataType.I32_Default, DataType.F64_Default) \
.dtype_format(DataType.BOOL_Default, DataType.I64_Default, \
DataType.I32_Default, DataType.BOOL_Default) \
.get_op_info()
@op_info_register(embeddingLookup_op_info)
def _embedding_lookup_aicpu():
"""EmbeddingLookup AiCPU register"""
return
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""RandomGamma op"""
from mindspore.ops.op_info_register import op_info_register, AiCPURegOp, DataType
gamma_op_info = AiCPURegOp("Gamma") \
.fusion_type("OPAQUE") \
.input(0, "shape", "required") \
.input(1, "alpha", "required") \
.input(2, "beta", "required") \
.output(0, "output", "required") \
.attr("seed", "int") \
.dtype_format(DataType.I32_Default, DataType.F32_Default, DataType.F32_Default, DataType.F32_Default) \
.dtype_format(DataType.I32_NCHW, DataType.F32_NCHW, DataType.F32_NCHW, DataType.F32_NCHW) \
.get_op_info()
@op_info_register(gamma_op_info)
def _gamma_aicpu():
"""RandomGamma AiCPU register"""
return
......@@ -13,21 +13,21 @@
# limitations under the License.
# ============================================================================
"""Normal op"""
"""RandomLaplace op"""
from mindspore.ops.op_info_register import op_info_register, AiCPURegOp, DataType
normal_op_info = AiCPURegOp("Normal") \
laplace_op_info = AiCPURegOp("Laplace") \
.fusion_type("OPAQUE") \
.input(0, "shape", "required") \
.input(1, "mean", "required") \
.input(2, "stddev", "required") \
.output(0, "y", "required") \
.input(2, "lambda_param", "required") \
.output(0, "output", "required") \
.attr("seed", "int") \
.dtype_format(DataType.I32_Default, DataType.F32_Default, DataType.F32_Default, DataType.F32_Default) \
.dtype_format(DataType.I32_NCHW, DataType.F32_NCHW, DataType.F32_NCHW, DataType.F32_NCHW) \
.get_op_info()
@op_info_register(normal_op_info)
def _normal_aicpu():
"""Normal AiCPU register"""
@op_info_register(laplace_op_info)
def _laplace_aicpu():
"""RandomLaplace AiCPU register"""
return
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""MirrorPad op"""
from mindspore.ops.op_info_register import op_info_register, AiCPURegOp, DataType
mirror_pad_op_info = AiCPURegOp("MirrorPad") \
.fusion_type("OPAQUE") \
.input(0, "x", "required") \
.input(1, "paddings", "required") \
.output(0, "y", "required") \
.attr("mode", "str") \
.dtype_format(DataType.I8_Default, DataType.I32_Default, DataType.I8_Default) \
.dtype_format(DataType.I16_Default, DataType.I32_Default, DataType.I16_Default) \
.dtype_format(DataType.I32_Default, DataType.I32_Default, DataType.I32_Default) \
.dtype_format(DataType.I64_Default, DataType.I32_Default, DataType.I64_Default) \
.dtype_format(DataType.U8_Default, DataType.I32_Default, DataType.U8_Default) \
.dtype_format(DataType.U16_Default, DataType.I32_Default, DataType.U16_Default) \
.dtype_format(DataType.U32_Default, DataType.I32_Default, DataType.U32_Default) \
.dtype_format(DataType.U64_Default, DataType.I32_Default, DataType.U64_Default) \
.dtype_format(DataType.F16_Default, DataType.I32_Default, DataType.F16_Default) \
.dtype_format(DataType.F32_Default, DataType.I32_Default, DataType.F32_Default) \
.dtype_format(DataType.F64_Default, DataType.I32_Default, DataType.F64_Default) \
.dtype_format(DataType.I8_Default, DataType.I64_Default, DataType.I8_Default) \
.dtype_format(DataType.I16_Default, DataType.I64_Default, DataType.I16_Default) \
.dtype_format(DataType.I32_Default, DataType.I64_Default, DataType.I32_Default) \
.dtype_format(DataType.I64_Default, DataType.I64_Default, DataType.I64_Default) \
.dtype_format(DataType.U8_Default, DataType.I64_Default, DataType.U8_Default) \
.dtype_format(DataType.U16_Default, DataType.I64_Default, DataType.U16_Default) \
.dtype_format(DataType.U32_Default, DataType.I64_Default, DataType.U32_Default) \
.dtype_format(DataType.U64_Default, DataType.I64_Default, DataType.U64_Default) \
.dtype_format(DataType.F16_Default, DataType.I64_Default, DataType.F16_Default) \
.dtype_format(DataType.F32_Default, DataType.I64_Default, DataType.F32_Default) \
.dtype_format(DataType.F64_Default, DataType.I64_Default, DataType.F64_Default) \
.get_op_info()
@op_info_register(mirror_pad_op_info)
def _mirror_pad_aicpu():
"""MirrorPad AiCPU register"""
return
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""MirrorPadGrad op"""
from mindspore.ops.op_info_register import op_info_register, AiCPURegOp, DataType
mirror_pad_grad_op_info = AiCPURegOp("MirrorPadGrad") \
.fusion_type("OPAQUE") \
.input(0, "x", "required") \
.input(1, "paddings", "required") \
.output(0, "y", "required") \
.attr("mode", "str") \
.dtype_format(DataType.I8_Default, DataType.I32_Default, DataType.I8_Default) \
.dtype_format(DataType.I16_Default, DataType.I32_Default, DataType.I16_Default) \
.dtype_format(DataType.I32_Default, DataType.I32_Default, DataType.I32_Default) \
.dtype_format(DataType.I64_Default, DataType.I32_Default, DataType.I64_Default) \
.dtype_format(DataType.U8_Default, DataType.I32_Default, DataType.U8_Default) \
.dtype_format(DataType.U16_Default, DataType.I32_Default, DataType.U16_Default) \
.dtype_format(DataType.U32_Default, DataType.I32_Default, DataType.U32_Default) \
.dtype_format(DataType.U64_Default, DataType.I32_Default, DataType.U64_Default) \
.dtype_format(DataType.F16_Default, DataType.I32_Default, DataType.F16_Default) \
.dtype_format(DataType.F32_Default, DataType.I32_Default, DataType.F32_Default) \
.dtype_format(DataType.F64_Default, DataType.I32_Default, DataType.F64_Default) \
.dtype_format(DataType.I8_Default, DataType.I64_Default, DataType.I8_Default) \
.dtype_format(DataType.I16_Default, DataType.I64_Default, DataType.I16_Default) \
.dtype_format(DataType.I32_Default, DataType.I64_Default, DataType.I32_Default) \
.dtype_format(DataType.I64_Default, DataType.I64_Default, DataType.I64_Default) \
.dtype_format(DataType.U8_Default, DataType.I64_Default, DataType.U8_Default) \
.dtype_format(DataType.U16_Default, DataType.I64_Default, DataType.U16_Default) \
.dtype_format(DataType.U32_Default, DataType.I64_Default, DataType.U32_Default) \
.dtype_format(DataType.U64_Default, DataType.I64_Default, DataType.U64_Default) \
.dtype_format(DataType.F16_Default, DataType.I64_Default, DataType.F16_Default) \
.dtype_format(DataType.F32_Default, DataType.I64_Default, DataType.F32_Default) \
.dtype_format(DataType.F64_Default, DataType.I64_Default, DataType.F64_Default) \
.get_op_info()
@op_info_register(mirror_pad_grad_op_info)
def _mirror_pad_grad_aicpu():
"""MirrorPadGrad AiCPU register"""
return
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""RandomPoisson op"""
from mindspore.ops.op_info_register import op_info_register, AiCPURegOp, DataType
poisson_op_info = AiCPURegOp("Poisson") \
.fusion_type("OPAQUE") \
.input(0, "shape", "required") \
.input(1, "mean", "required") \
.output(0, "output", "required") \
.attr("seed", "int") \
.dtype_format(DataType.I32_Default, DataType.F32_Default, DataType.I32_Default) \
.dtype_format(DataType.I32_NCHW, DataType.F32_NCHW, DataType.I32_NCHW) \
.get_op_info()
@op_info_register(poisson_op_info)
def _poisson_aicpu():
"""RandomPoisson AiCPU register"""
return
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""RandomNormal op"""
from mindspore.ops.op_info_register import op_info_register, AiCPURegOp, DataType
normal_op_info = AiCPURegOp("StandardNormal") \
.fusion_type("OPAQUE") \
.input(0, "shape", "required") \
.output(0, "output", "required") \
.attr("seed", "int") \
.attr("seed2", "int") \
.dtype_format(DataType.I32_Default, DataType.F32_Default) \
.dtype_format(DataType.I32_NCHW, DataType.F32_NCHW) \
.get_op_info()
@op_info_register(normal_op_info)
def _standard_normal_aicpu():
"""RandomNormal AiCPU register"""
return
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""StridedSlice op"""
from mindspore.ops.op_info_register import op_info_register, AiCPURegOp, DataType
strided_slice_op_info = AiCPURegOp("StridedSliceAICPU") \
.fusion_type("OPAQUE") \
.input(0, "input", "required") \
.input(1, "begin", "required") \
.input(2, "end", "required") \
.input(3, "stride", "required") \
.output(0, "output", "required") \
.attr("begin_mask", "int") \
.attr("end_mask", "int") \
.attr("ellipsis_mask", "int") \
.attr("new_axis_mask", "int") \
.attr("shrink_axis_mask", "int") \
.dtype_format(DataType.F32_Default,
DataType.I32_Default,
DataType.I32_Default,
DataType.I32_Default,
DataType.F32_Default) \
.get_op_info()
@op_info_register(strided_slice_op_info)
def _strided_slice_aicpu():
"""StridedSlice AiCPU register"""
return
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""StridedSliceGrad op"""
from mindspore.ops.op_info_register import op_info_register, AiCPURegOp, DataType
strided_slice_grad_op_info = AiCPURegOp("StridedSliceGradAICPU") \
.fusion_type("OPAQUE") \
.input(0, "dy", "required") \
.input(1, "shape", "required") \
.input(2, "begin", "required") \
.input(3, "end", "required") \
.input(4, "stride", "required") \
.output(0, "output", "required") \
.attr("begin_mask", "int") \
.attr("end_mask", "int") \
.attr("ellipsis_mask", "int") \
.attr("new_axis_mask", "int") \
.attr("shrink_axis_mask", "int") \
.dtype_format(DataType.F32_Default,
DataType.I32_Default,
DataType.I32_Default,
DataType.I32_Default,
DataType.I32_Default,
DataType.F32_Default) \
.get_op_info()
@op_info_register(strided_slice_grad_op_info)
def _strided_slice_grad_aicpu():
"""StridedSliceGrad AiCPU register"""
return
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""RandomUniformInt op"""
from mindspore.ops.op_info_register import op_info_register, AiCPURegOp, DataType
uniform_int_op_info = AiCPURegOp("UniformInt") \
.fusion_type("OPAQUE") \
.input(0, "shape", "required") \
.input(1, "a", "required") \
.input(2, "b", "required") \
.output(0, "output", "required") \
.attr("seed", "int") \
.dtype_format(DataType.I32_Default, DataType.I32_Default, DataType.I32_Default, DataType.I32_Default) \
.dtype_format(DataType.I32_NCHW, DataType.I32_NCHW, DataType.I32_NCHW, DataType.I32_NCHW) \
.get_op_info()
@op_info_register(uniform_int_op_info)
def _uniform_int_aicpu():
"""RandomUniformInt AiCPU register"""
return
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""RandomUniformReal op"""
from mindspore.ops.op_info_register import op_info_register, AiCPURegOp, DataType
uniform_real_op_info = AiCPURegOp("UniformReal") \
.fusion_type("OPAQUE") \
.input(0, "shape", "required") \
.input(1, "a", "required") \
.input(2, "b", "required") \
.output(0, "output", "required") \
.attr("seed", "int") \
.dtype_format(DataType.I32_Default, DataType.F32_Default, DataType.F32_Default, DataType.F32_Default) \
.dtype_format(DataType.I32_NCHW, DataType.F32_NCHW, DataType.F32_NCHW, DataType.F32_NCHW) \
.get_op_info()
@op_info_register(uniform_real_op_info)
def _uniform_real_aicpu():
"""RandomUniformReal AiCPU register"""
return
......@@ -288,5 +288,6 @@ from .scatter_div import _scatter_div_tbe
from .mod import _mod_tbe
from .max_pool_grad_grad import _max_pool_grad_grad_tbe
from .max_pool_grad_grad_with_argmax import _max_pool_grad_grad_with_argmax_tbe
from .tensor_move import _tensor_move_tbe
from .population_count import _population_count_tbe
from .parallel_concat import _parallel_concat_tbe
......@@ -28,8 +28,11 @@ avg_pool_op_info = TBERegOp("AvgPool") \
.attr("padding", "required", "str", "all") \
.attr("data_format", "optional", "str", "all") \
.input(0, "x", False, "required", "all") \
.input(1, "filter", False, "optional", "all") \
.input(2, "bias", False, "optional", "all") \
.output(0, "y", False, "required", "all") \
.dtype_format(DataType.F16_5HD, DataType.F16_5HD) \
.dtype_format(DataType.F16_5HD, DataType.F16_FracZ, DataType.F16_Default, DataType.F16_5HD) \
.dtype_format(DataType.I8_5HD, DataType.I8_C1HWNCoC0, DataType.I32_Default, DataType.I32_5HD) \
.get_op_info()
......
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""TensorMove op"""
from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType
tensor_move_op_info = TBERegOp("TensorMove") \
.fusion_type("OPAQUE") \
.async_flag(False) \
.binfile_name("tensor_move.so") \
.compute_cost(10) \
.kernel_name("tensor_move") \
.partial_flag(True) \
.input(0, "x", False, "required", "all") \
.output(0, "y", False, "required", "all") \
.op_pattern("formatAgnostic") \
.dtype_format(DataType.I32_None, DataType.I32_None) \
.dtype_format(DataType.F16_None, DataType.F16_None) \
.dtype_format(DataType.F32_None, DataType.F32_None) \
.dtype_format(DataType.I8_None, DataType.I8_None) \
.dtype_format(DataType.U8_None, DataType.U8_None) \
.dtype_format(DataType.BOOL_None, DataType.BOOL_None) \
.get_op_info()
@op_info_register(tensor_move_op_info)
def _tensor_move_tbe():
"""TensorMove TBE register"""
return
......@@ -27,8 +27,7 @@ from .array_ops import (Argmax, Argmin, Cast, Concat, Pack, Unpack,
Rank, Reshape, ResizeNearestNeighbor, ArgMinWithValue,
SameTypeShape, ScatterAdd, ScatterSub, ScatterMul, ScatterDiv, ScatterMax, ScatterMin,
ScatterUpdate, ScalarToArray, ScalarToTensor, ScatterNd, ScatterNdUpdate, Select,
Shape, Size, Slice, Split, TransShape,
ParallelConcat,
Shape, Size, Slice, Split, TransShape, ParallelConcat,
Squeeze, StridedSlice, Tile, TensorScatterUpdate,
Transpose, TruncatedNormal, TupleToArray, UnsortedSegmentMin, UnsortedSegmentProd,
UnsortedSegmentSum, SpaceToDepth, DepthToSpace, SpaceToBatch, BatchToSpace,
......@@ -55,7 +54,8 @@ from .math_ops import (Abs, ACos, Asin, Asinh, AddN, AccumulateNV2, AssignAdd, A
Sin, Sqrt, Rsqrt, BesselI0e, BesselI1e, TruncateDiv, TruncateMod,
Square, Sub, TensorAdd, Sign, Round, SquareSumAll, Atan, Atanh, Cosh, Sinh, Eps, Tan)
from .random_ops import (RandomChoiceWithMask, Normal, RandomCategorical)
from .random_ops import (RandomChoiceWithMask, Normal, Gamma, Poisson, UniformInt, UniformReal,
RandomCategorical, Laplace)
from .nn_ops import (LSTM, SGD, Adam, SparseApplyAdam, SparseApplyLazyAdam, ApplyMomentum, BatchNorm,
BiasAdd, Conv2D,
DepthwiseConv2dNative,
......@@ -69,8 +69,7 @@ from .nn_ops import (LSTM, SGD, Adam, SparseApplyAdam, SparseApplyLazyAdam, Appl
MaxPoolWithArgmax, OneHot, Pad, MirrorPad, PReLU, ReLU, ReLU6, ReLUV2, HSwish, HSigmoid,
ResizeBilinear, Sigmoid,
SigmoidCrossEntropyWithLogits,
SmoothL1Loss, Softmax, Softsign, Softplus, LRN,
RNNTLoss,
SmoothL1Loss, Softmax, Softsign, Softplus, LRN, RNNTLoss,
SoftmaxCrossEntropyWithLogits, ROIAlign,
SparseSoftmaxCrossEntropyWithLogits, Tanh,
TopK, BinaryCrossEntropy, SparseApplyAdagrad, LARSUpdate, ApplyFtrl, SparseApplyFtrl,
......@@ -78,6 +77,8 @@ from .nn_ops import (LSTM, SGD, Adam, SparseApplyAdam, SparseApplyLazyAdam, Appl
ApplyAdaMax, ApplyAdadelta, ApplyAdagrad, ApplyAdagradV2,
ApplyAddSign, ApplyPowerSign, ApplyGradientDescent, ApplyProximalGradientDescent,
ApplyRMSProp, ApplyCenteredRMSProp, BasicLSTMCell, InTopK)
from . import _quant_ops
from ._quant_ops import *
from .other_ops import (Assign, IOU, BoundingBoxDecode, BoundingBoxEncode, PopulationCount,
CheckValid, MakeRefKey, Partial, Depend, CheckBprop, Push, Pull)
from .thor_ops import *
......@@ -135,6 +136,7 @@ __all__ = [
'OneHot',
'GatherV2',
'SparseGatherV2',
'EmbeddingLookup',
'Concat',
'Pack',
'Unpack',
......@@ -172,6 +174,11 @@ __all__ = [
'Tanh',
'RandomChoiceWithMask',
'Normal',
'Gamma',
'Poisson',
'UniformInt',
'UniformReal',
'Laplace',
'RandomCategorical',
'ResizeBilinear',
'ScalarSummary',
......@@ -320,6 +327,7 @@ __all__ = [
"ApplyCenteredRMSProp",
"SpaceToBatchND",
"BatchToSpaceND",
"ReverseSequence",
"SquareSumAll",
"BitwiseAnd",
"BitwiseOr",
......@@ -335,6 +343,7 @@ __all__ = [
"ApproximateEqual",
"InplaceUpdate",
"InTopK",
"CropAndResize",
"LRN",
"Mod",
"PopulationCount",
......
......@@ -1204,6 +1204,54 @@ class StridedSliceGrad(PrimitiveWithInfer):
'value': None}
class StridedSliceGradAICPU(PrimitiveWithInfer):
"""
Performs grad of StridedSlice operation.
Args:
begin_mask (int): Start indexing the slice. Default: 0.
end_mask (int): End indexing the slice. Default: 0.
ellipsis_mask (int): An int32 mask. Default: 0.
new_axis_mask (int): An int32 mask. Default: 0.
shrink_axis_mask (int): An int32 mask. Default: 0.
Returns:
Tensor, has the same shape of input.
"""
@prim_attr_register
def __init__(self,
begin_mask=0,
end_mask=0,
ellipsis_mask=0,
new_axis_mask=0,
shrink_axis_mask=0):
"""init StrideSliceGrad"""
validator.check_value_type('begin_mask', begin_mask, [int], self.name)
validator.check_value_type('end_mask', end_mask, [int], self.name)
validator.check_value_type('ellipsis_mask', ellipsis_mask, [int], self.name)
validator.check_value_type('new_axis_mask', new_axis_mask, [int], self.name)
validator.check_value_type('shrink_axis_mask', shrink_axis_mask, [int], self.name)
self.init_prim_io_names(inputs=['dy', 'shapex', 'begin', 'end', 'strides'], outputs=['output'])
def __infer__(self, dy, shapex, begin, end, strides):
args = {"dy": dy['dtype']}
validator.check_tensor_type_same(args, mstype.number_type, self.name)
for idx, item in enumerate(shapex['value']):
validator.check_value_type("shapex[%d]" % idx, item, [int], self.name)
for idx, item in enumerate(begin['value']):
validator.check_value_type("begin[%d]" % idx, item, [int], self.name)
for idx, item in enumerate(end['value']):
validator.check_value_type("end[%d]" % idx, item, [int], self.name)
for idx, item in enumerate(strides['value']):
validator.check_value_type("strides[%d]" % idx, item, [int], self.name)
return {'shape': shapex['value'],
'dtype': dy['dtype'],
'value': None}
class SoftplusGrad(PrimitiveWithInfer):
"""Computes gradient for the Log Softmax activation."""
......@@ -1246,11 +1294,20 @@ class MirrorPadGrad(PrimitiveWithInfer):
validator.check_string('mode', mode, ['REFLECT', 'SYMMETRIC'], self.name)
self.mode = mode
def __infer__(self, dout, paddings, x):
def __infer__(self, dout, paddings):
validator.check_subclass("dout", dout['dtype'], mstype.tensor, self.name)
validator.check_subclass("paddings", paddings['dtype'], mstype.tensor, self.name)
validator.check_subclass("input_x", x['dtype'], mstype.tensor, self.name)
return {'shape': x['shape'],
validator.check("paddings rank", len(paddings['shape']), "expected", 2, Rel.EQ, self.name)
validator.check("paddings dim_1", paddings['shape'][1], "expected", 2, Rel.EQ, self.name)
if paddings['value'] is None:
raise ValueError(f"For {self.name}, paddings must be const.")
paddings_value = paddings['value'].asnumpy()
y_shape = ()
dout_shape = dout['shape']
for i, val in enumerate(dout_shape):
y_shape += (val - paddings_value[i][0] - paddings_value[i][1],)
return {'shape': y_shape,
'dtype': dout['dtype'],
'value': None}
......
......@@ -24,6 +24,137 @@ from ..._c_expression import signature_dtype as sig_dtype
from ..primitive import PrimitiveWithInfer, prim_attr_register
class StridedSliceAICPU(PrimitiveWithInfer):
r"""
Extracts a strided slice of a tensor.
Given an input tensor, this operation inserts a dimension of length 1 at the dimension.
This operation extracts a fragment of size (end-begin)/stride from the given
'input_tensor'. Starting from the position specified by the begin, the fragment
continues adding stride to the index until all dimensions are not less than end.
Note:
The stride may be negative value, which causes reverse slicing.
The shape of `begin`, `end` and `strides` should be the same.
Args:
begin_mask (int): Starting index of the slice. Default: 0.
end_mask (int): Ending index of the slice. Default: 0.
ellipsis_mask (int): An int mask. Default: 0.
new_axis_mask (int): An int mask. Default: 0.
shrink_axis_mask (int): An int mask. Default: 0.
Currently all the masks are not in used. Use default 0 only.
Inputs:
- **input_x** (Tensor) - The input Tensor.
- **begin** (tuple[int]) - A tuple which represents the location where to start. Only
constant value is allowed.
- **end** (tuple[int]) - A tuple or which represents the maximum location where to stop.
Only constant value is allowed.
- **strides** (tuple[int]) - A tuple which represents the stride continuously added
before reach the maximum location. Only constant value is allowed.
Outputs:
Tensor.
Explain with the following example.
- In the 0th dim, begin is 1, end is 2, and strides is 1,
because :math:`1+1=2\geq2`, the interval is :math:`[1,2)`.
Thus, return the element with :math:`index = 1` in 0th dim, i.e., [[3, 3, 3], [4, 4, 4]].
- In the 1st dim, similarly, the interval is :math:`[0,1)`.
Based on the return value of the 0th dim, return the element with :math:`index = 0`,
i.e., [3, 3, 3].
- In the 2nd dim, similarly, the interval is :math:`[0,3)`.
Based on the return value of the 1st dim, return the element with :math:`index = 0,1,2`,
i.e., [3, 3, 3].
- Finally, the output is [3, 3, 3].
Examples
>>> input_x = Tensor([[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]],
>>> [[5, 5, 5], [6, 6, 6]]], mindspore.float32)
>>> slice = P.StridedSliceAICPU()
>>> output = slice(input_x, (1, 0, 0), (2, 1, 3), (1, 1, 2))
>>> output.shape
(1, 1, 2)
>>> output
[[[3, 3]]]
"""
@prim_attr_register
def __init__(self,
begin_mask=0,
end_mask=0,
ellipsis_mask=0,
new_axis_mask=0,
shrink_axis_mask=0):
"""init StrideSlice"""
self.init_prim_io_names(inputs=['x', 'begin', 'end', 'strides'], outputs=['output'])
validator.check_value_type('begin_mask', begin_mask, [int], self.name)
validator.check_value_type('end_mask', end_mask, [int], self.name)
validator.check_value_type('ellipsis_mask', ellipsis_mask, [int], self.name)
validator.check_value_type('new_axis_mask', new_axis_mask, [int], self.name)
validator.check_value_type('shrink_axis_mask', shrink_axis_mask, [int], self.name)
def __infer__(self, x, begin, end, strides):
begin_v, end_v, strides_v = begin['value'], end['value'], strides['value']
validator.check_value_type("begin", begin_v, [tuple], self.name)
validator.check_value_type("end", end_v, [tuple], self.name)
validator.check_value_type("strides", strides_v, [tuple], self.name)
x_shape = x['shape']
x_shp_len = len(x_shape)
if len(begin_v) != x_shp_len or len(end_v) != x_shp_len or len(strides_v) != x_shp_len:
raise ValueError(f"For \'{self.name}\' the length of begin index{begin_v}, end index{end_v} and "
f"strides{strides_v} must be equal to the dims({x_shp_len}) of input.")
ret_shape = []
append_dimensions = []
shrink_pos = bin(self.shrink_axis_mask)[::-1]
new_pos = bin(self.new_axis_mask)[::-1]
for i in range(x_shp_len):
# After the integer is converted to binary, it is a str and the first two chars are the flag char '0b'
if i < (len(new_pos) - 2) and new_pos[i] == '1':
ret_shape.append(1)
append_dimensions.append(x_shape[x_shp_len - 1 - len(append_dimensions)])
continue
if i < (len(shrink_pos) - 2) and shrink_pos[i] == '1':
validator.check_integer(f'begin[{i}]', begin_v[i], -x_shape[i], Rel.GE, self.name)
validator.check_integer(f'begin[{i}]', begin_v[i], x_shape[i], Rel.LT, self.name)
continue
begin_idx = begin_v[i]
end_idx = end_v[i]
strides_idx = strides_v[i]
if self.begin_mask:
begin_idx = 0
if self.end_mask:
end_idx = x_shape[i]
validator.check_integer(f'begin[{i}]', begin_idx, x_shape[i], Rel.LE, self.name)
validator.check_integer(f'end[{i}]', end_idx, x_shape[i], Rel.LE, self.name)
validator.check_integer(f'strides[{i}]', strides_idx, 0, Rel.NE, self.name)
if strides_idx > 0:
# If sliced forward , end_idx >= begin_idx
validator.check(f'begin[{i}]', begin_idx, f'end[{i}]', end_idx, Rel.LE)
if begin_idx < 0 < end_idx:
# Turn negative begin_idx into positive values
begin_idx = x_shape[i] + begin_idx
num_elems = (end_idx - begin_idx + strides_idx - 1) // strides_idx
else:
# If sliced backwards, end_idx <= begin_idx
validator.check(f'begin[{i}]', begin_idx, f'end[{i}]', end_idx, Rel.GE)
if end_idx < 0 < begin_idx:
# Turn negative end_idx into positive values
end_idx = x_shape[i] + end_idx
num_elems = (end_idx - begin_idx + strides_idx + 1) // strides_idx
ret_shape.append(num_elems)
if append_dimensions:
ret_shape += append_dimensions[::-1]
return {'shape': ret_shape,
'dtype': x['dtype'],
'value': None}
class ExtractImagePatches(PrimitiveWithInfer):
"""
Extract patches from images.
......
......@@ -780,7 +780,9 @@ class Conv2D(PrimitiveWithInfer):
mode (int): 0 Math convolutiuon, 1 cross-correlation convolution ,
2 deconvolution, 3 depthwise convolution. Default: 1.
pad_mode (str): "valid", "same", "pad" the mode to fill padding. Default: "valid".
pad (int): The pad value to fill. Default: 0.
pad (Union(int, tuple[int])): The pad value to fill. Default: 0. If `pad` is one integer, the padding of
top, bottom, left and right is same, equal to pad. If `pad` is tuple with four integer, the padding
of top, bottom, left and right equal to pad[0], pad[1], pad[2], pad[3] with corresponding.
stride (Union(int, tuple[int])): The stride to apply conv filter. Default: 1.
dilation (Union(int, tuple[int])): Specify the space to use between kernel elements. Default: 1.
group (int): Split input into groups. Default: 1.
......@@ -820,11 +822,19 @@ class Conv2D(PrimitiveWithInfer):
self.add_prim_attr('stride', self.stride)
self.dilation = _check_positive_int_or_tuple('dilation', dilation, self.name, allow_four=True, ret_four=True)
self.add_prim_attr('dilation', self.dilation)
validator.check_value_type('pad', pad, (int,), self.name)
validator.check_value_type('pad', pad, (int, tuple), self.name)
if isinstance(pad, int):
pad = (pad,) * 4
else:
validator.check_integer('pad size', len(pad), 4, Rel.EQ, self.name)
self.padding = pad
self.pad_mode = validator.check_string('pad_mode', pad_mode, ['valid', 'same', 'pad'], self.name)
self.pad = validator.check_pad_value_by_mode(pad_mode, pad, self.name)
if pad_mode != 'pad' and pad != (0, 0, 0, 0):
raise ValueError(f"For '{self.name}', padding must be zero when pad_mode is '{pad_mode}'.")
if self.pad_mode == 'pad':
validator.check_integer('pad', self.pad, 0, Rel.GE, self.name)
for item in pad:
validator.check_integer('pad item', item, 0, Rel.GE, self.name)
self.mode = validator.check_integer('mode', mode, 1, Rel.EQ, self.name)
self.add_prim_attr('data_format', "NCHW")
......@@ -862,11 +872,11 @@ class Conv2D(PrimitiveWithInfer):
pad_left = math.floor(pad_needed_w / 2)
pad_right = pad_needed_w - pad_left
elif self.pad_mode == 'pad':
pad_top, pad_bottom, pad_left, pad_right = self.pad, self.pad, self.pad, self.pad
pad_top, pad_bottom, pad_left, pad_right = self.padding
h_out = 1 + (x_shape[2] + 2 * self.pad - kernel_size_h - (kernel_size_h - 1) * (dilation_h - 1)) \
h_out = 1 + (x_shape[2] + pad_top + pad_bottom - kernel_size_h - (kernel_size_h - 1) * (dilation_h - 1)) \
/ stride_h
w_out = 1 + (x_shape[3] + 2 * self.pad - kernel_size_w - (kernel_size_w - 1) * (dilation_w - 1)) \
w_out = 1 + (x_shape[3] + pad_left + pad_right - kernel_size_w - (kernel_size_w - 1) * (dilation_w - 1)) \
/ stride_w
h_out = math.floor(h_out)
w_out = math.floor(w_out)
......@@ -1279,7 +1289,9 @@ class Conv2DBackpropInput(PrimitiveWithInfer):
out_channel (int): The dimensionality of the output space.
kernel_size (Union[int, tuple[int]]): The size of the convolution window.
pad_mode (str): "valid", "same", "pad" the mode to fill padding. Default: "valid".
pad (int): The pad value to fill. Default: 0.
pad (Union[int, tuple[int]]): The pad value to fill. Default: 0. If `pad` is one integer, the padding of
top, bottom, left and right is same, equal to pad. If `pad` is tuple with four integer, the padding
of top, bottom, left and right equal to pad[0], pad[1], pad[2], pad[3] with corresponding.
mode (int): 0 Math convolutiuon, 1 cross-correlation convolution ,
2 deconvolution, 3 depthwise convolution. Default: 1.
stride (Union[int. tuple[int]]): The stride to apply conv filter. Default: 1.
......@@ -1316,9 +1328,21 @@ class Conv2DBackpropInput(PrimitiveWithInfer):
self.add_prim_attr('stride', self.stride)
self.dilation = _check_positive_int_or_tuple('dilation', dilation, self.name, allow_four=True, ret_four=True)
self.add_prim_attr('dilation', self.dilation)
validator.check_value_type('pad', pad, (int,), self.name)
validator.check_value_type('pad', pad, (int, tuple), self.name)
if isinstance(pad, int):
pad = (pad,) * 4
self.padding = pad
else:
validator.check_integer('pad size', len(pad), 4, Rel.EQ, self.name)
self.pad_mode = validator.check_string('pad_mode', pad_mode, ['valid', 'same', 'pad'], self.name)
self.pad = validator.check_pad_value_by_mode(pad_mode, pad, self.name)
if pad_mode != 'pad' and pad != (0, 0, 0, 0):
raise ValueError(f"For '{self.name}', padding must be zero when pad_mode is '{pad_mode}'.")
if self.pad_mode == 'pad':
for item in pad:
validator.check_integer('pad item', item, 0, Rel.GE, self.name)
pad_mode = pad_mode.upper()
self.add_prim_attr('pad_mode', pad_mode)
self.mode = validator.check_integer('mode', mode, 1, Rel.EQ, self.name)
......@@ -1360,7 +1384,7 @@ class Conv2DBackpropInput(PrimitiveWithInfer):
pad_right = pad_needed_w - pad_left
pad_list = (pad_top, pad_bottom, pad_left, pad_right)
elif self.pad_mode == 'PAD':
pad_list = (self.pad,) * 4
pad_list = self.padding
self.add_prim_attr('pad_list', pad_list)
out = {
'value': None,
......@@ -1735,7 +1759,6 @@ class DataFormatDimMap(PrimitiveWithInfer):
validator.check_tensor_type_same({"x": x_type}, valid_types, self.name)
return x_type
class RNNTLoss(PrimitiveWithInfer):
"""
Computes the RNNTLoss and its gradient with respect to the softmax outputs.
......
......@@ -19,6 +19,277 @@ from ..._checkparam import Validator as validator
from ..._checkparam import Rel
from ...common import dtype as mstype
from ..primitive import PrimitiveWithInfer, prim_attr_register
from .._utils import get_broadcast_shape
class Laplace(PrimitiveWithInfer):
r"""
Generates random numbers according to the Laplace random number distribution.
It is defined as:
.. math::
\text{f}(x;μ,λ) = \frac{1}{2λ}\exp(-\frac{|x-μ|}{λ}),
Args:
seed (int): Seed data is used as entropy source for Random number engines generating pseudo-random numbers.
Default: 0.
Inputs:
- **shape** (tuple) - The shape of random tensor to be generated. Only constant value is allowed.
- **mean** (Tensor) - The mean μ distribution parameter, which specifies the location of the peak.
With float32 data type.
- **lambda_param** (Tensor) - The parameter used for controling the variance of this random distribution. The
variance of Laplace distribution is equal to twice the square of lambda_param. With float32 data type.
Outputs:
Tensor, has the shape 'shape' input and dtype as float32.
Examples:
>>> shape = (4, 16)
>>> mean = Tensor(1.0, mstype.float32)
>>> lambda_param = Tensor(1.0, mstype.float32)
>>> laplace = P.Laplace(seed=2)
>>> output = laplace(shape, mean, lambda_param)
"""
@prim_attr_register
def __init__(self, seed=0):
"""Init Laplace"""
self.init_prim_io_names(inputs=['shape', 'mean', 'lambda_param'], outputs=['output'])
validator.check_value_type('seed', seed, [int], self.name)
def __infer__(self, shape, mean, lambda_param):
shape_v = shape["value"]
if shape_v is None:
raise ValueError(f"For {self.name}, shape must be const.")
validator.check_value_type("shape", shape_v, [tuple], self.name)
for i, shape_i in enumerate(shape_v):
validator.check_integer("shape[%d]" % i, shape_i, 0, Rel.GT, self.name)
validator.check_tensor_type_same({"mean": mean["dtype"]}, [mstype.float32], self.name)
validator.check_tensor_type_same({"lambda_param": lambda_param["dtype"]}, [mstype.float32], self.name)
broadcast_shape = get_broadcast_shape(mean['shape'], lambda_param['shape'], self.name)
broadcast_shape = get_broadcast_shape(broadcast_shape, shape_v, self.name)
out = {
'shape': broadcast_shape,
'dtype': mstype.float32,
'value': None}
return out
class Gamma(PrimitiveWithInfer):
r"""
Produces random positive floating-point values x, distributed according to probability density function:
.. math::
\text{P}(x|α,β) = \frac{\exp(-x/β)}{{β^α}\cdot{\Gamma(α)}}\cdot{x^{α-1}},
Args:
seed (int): Seed data is used as entropy source for Random number engines generating pseudo-random numbers.
Default: 0.
Inputs:
- **shape** (tuple) - The shape of random tensor to be generated. Only constant value is allowed.
- **alpha** (Tensor) - The α distribution parameter.
It is also known as the shape parameter. With float32 data type.
- **beta** (Tensor) - The β distribution parameter.
It is also known as the scale parameter. With float32 data type.
Outputs:
Tensor. The shape should be the broadcasted shape of Input "shape" and shapes of alpha and beta.
The dtype is float32.
Examples:
>>> shape = (4, 16)
>>> alpha = Tensor(1.0, mstype.float32)
>>> beta = Tensor(1.0, mstype.float32)
>>> gamma = P.Gamma(seed=3)
>>> output = Gamma(shape, alpha, beta)
"""
@prim_attr_register
def __init__(self, seed=0):
"""Init Gamma"""
self.init_prim_io_names(inputs=['shape', 'alpha', 'beta'], outputs=['output'])
validator.check_value_type('seed', seed, [int], self.name)
def __infer__(self, shape, alpha, beta):
shape_v = shape["value"]
if shape_v is None:
raise ValueError(f"For {self.name}, shape must be const.")
validator.check_value_type("shape", shape_v, [tuple], self.name)
for i, shape_i in enumerate(shape_v):
validator.check_integer("shape[%d]" % i, shape_i, 0, Rel.GT, self.name)
validator.check_tensor_type_same({"alpha": alpha["dtype"]}, [mstype.float32], self.name)
validator.check_tensor_type_same({"beta": beta["dtype"]}, [mstype.float32], self.name)
broadcast_shape = get_broadcast_shape(alpha['shape'], beta['shape'], self.name)
broadcast_shape = get_broadcast_shape(broadcast_shape, shape_v, self.name)
out = {
'shape': broadcast_shape,
'dtype': mstype.float32,
'value': None}
return out
class Poisson(PrimitiveWithInfer):
r"""
Produces random non-negative integer values i, distributed according to discrete probability function:
.. math::
\text{P}(i|μ) = \frac{\exp(-μ)μ^{i}}{i!},
Args:
seed (int): Seed data is used as entropy source for Random number engines generating pseudo-random numbers.
Default: 0.
Inputs:
- **shape** (tuple) - The shape of random tensor to be generated. Only constant value is allowed.
- **mean** (Tensor) - μ parameter the distribution was constructed with.
The parameter defines mean number of occurrences of the event. With float32 data type.
Outputs:
Tensor. The shape should be the broadcasted shape of Input "shape" and shape of mean.
The dtype is int32.
Examples:
>>> shape = (4, 16)
>>> mean = Tensor(5.0, mstype.float32)
>>> poisson = P.Poisson(seed=5)
>>> output = poisson(shape, mean)
"""
@prim_attr_register
def __init__(self, seed=0):
"""Init Poisson"""
self.init_prim_io_names(inputs=['shape', 'mean'], outputs=['output'])
validator.check_value_type('seed', seed, [int], self.name)
def __infer__(self, shape, mean):
shape_v = shape["value"]
if shape_v is None:
raise ValueError(f"For {self.name}, shape must be const.")
validator.check_value_type("shape", shape_v, [tuple], self.name)
for i, shape_i in enumerate(shape_v):
validator.check_integer("shape[%d]" % i, shape_i, 0, Rel.GT, self.name)
validator.check_tensor_type_same({"mean": mean["dtype"]}, [mstype.float32], self.name)
broadcast_shape = get_broadcast_shape(mean['shape'], shape_v, self.name)
out = {
'shape': broadcast_shape,
'dtype': mstype.int32,
'value': None}
return out
class UniformInt(PrimitiveWithInfer):
r"""
Produces random integer values i, uniformly distributed on the closed interval [a, b], that is,
distributed according to the discrete probability function:
.. math::
\text{P}(i|a,b) = \frac{1}{b-a+1},
Note:
The number in tensor a should be strictly less than b at any position after broadcasting.
Args:
seed (int): Seed data is used as entropy source for Random number engines generating pseudo-random numbers.
Default: 0.
Inputs:
- **shape** (tuple) - The shape of random tensor to be generated. Only constant value is allowed.
- **a** (Tensor) - The a distribution parameter.
It defines the minimum possibly generated value. With int32 data type.
- **b** (Tensor) - The b distribution parameter.
It defines the maximum possibly generated value. With int32 data type.
Outputs:
Tensor. The shape should be the broadcasted shape of Input "shape" and shapes of a and b.
The dtype is int32.
Examples:
>>> shape = (4, 16)
>>> a = Tensor(1, mstype.int32)
>>> b = Tensor(5, mstype.int32)
>>> uniform_int = P.UniformInt(seed=10)
>>> output = uniform_int(shape, a, b)
"""
@prim_attr_register
def __init__(self, seed=0):
"""Init UniformInt"""
self.init_prim_io_names(inputs=['shape', 'a', 'b'], outputs=['output'])
validator.check_value_type('seed', seed, [int], self.name)
def __infer__(self, shape, a, b):
shape_v = shape["value"]
if shape_v is None:
raise ValueError(f"For {self.name}, shape must be const.")
validator.check_value_type("shape", shape_v, [tuple], self.name)
for i, shape_i in enumerate(shape_v):
validator.check_integer("shape[%d]" % i, shape_i, 0, Rel.GT, self.name)
validator.check_tensor_type_same({"a": a["dtype"]}, [mstype.int32], self.name)
validator.check_tensor_type_same({"b": b["dtype"]}, [mstype.int32], self.name)
broadcast_shape = get_broadcast_shape(a['shape'], b['shape'], self.name)
broadcast_shape = get_broadcast_shape(broadcast_shape, shape_v, self.name)
out = {
'shape': broadcast_shape,
'dtype': mstype.int32,
'value': None}
return out
class UniformReal(PrimitiveWithInfer):
r"""
Produces random floating-point values i, uniformly distributed on the interval [min(a, b), max(a, b)), that is,\
distributed according to the probability density function:
.. math::
\text{P}(i|a,b) = \frac{1}{b-a},
Args:
seed (int): Seed data is used as entropy source for Random number engines generating pseudo-random numbers.
Default: 0.
Inputs:
- **shape** (tuple) - The shape of random tensor to be generated. Only constant value is allowed.
- **a** (Tensor) - The a distribution parameter.
It defines the minimum possibly generated value. With float32 data type.
- **b** (Tensor) - The b distribution parameter.
It defines the maximum possibly generated value. With float32 data type.
Outputs:
Tensor. The shape should be the broadcasted shape of Input "shape" and shapes of a and b.
The dtype is float32.
Examples:
>>> shape = (4, 16)
>>> a = Tensor(1.0, mstype.float32)
>>> b = Tensor(5.0, mstype.float32)
>>> uniform_real = P.UniformReal(seed=10)
>>> output = uniform_real(shape, a, b)
"""
@prim_attr_register
def __init__(self, seed=0):
"""Init UniformReal"""
self.init_prim_io_names(inputs=['shape', 'a', 'b'], outputs=['output'])
validator.check_value_type('seed', seed, [int], self.name)
def __infer__(self, shape, a, b):
shape_v = shape["value"]
if shape_v is None:
raise ValueError(f"For {self.name}, shape must be const.")
validator.check_value_type("shape", shape_v, [tuple], self.name)
for i, shape_i in enumerate(shape_v):
validator.check_integer("shape[%d]" % i, shape_i, 0, Rel.GT, self.name)
validator.check_tensor_type_same({"a": a["dtype"]}, [mstype.float32], self.name)
validator.check_tensor_type_same({"b": b["dtype"]}, [mstype.float32], self.name)
broadcast_shape = get_broadcast_shape(a['shape'], b['shape'], self.name)
broadcast_shape = get_broadcast_shape(broadcast_shape, shape_v, self.name)
out = {
'shape': broadcast_shape,
'dtype': mstype.float32,
'value': None}
return out
class RandomChoiceWithMask(PrimitiveWithInfer):
......@@ -66,50 +337,6 @@ class RandomChoiceWithMask(PrimitiveWithInfer):
return (mstype.int32, mstype.bool_)
class Normal(PrimitiveWithInfer):
"""
Generates random samples from a normal(Gaussian) distribution.
Args:
seed (int): Random seed. Default: 0.
Inputs:
- **shape** (tuple[int]) - The shape of output tensor. Only constant value is allowed.
- **mean** (Tensor) - The mean of the distribution, with float32 data type.
- **stddev** (Tensor) - The standard deviation of the distribution, with float32 data type.
Outputs:
Tensor, with the given shape from the specific distribution and float32 data type.
Examples:
>>> normal = P.Normal()
>>> mean = Tensor(0., mstype.float32)
>>> stddev = Tensor(1., mstype.float32)
>>> out = normal((32, 3, 3), mean, stddev)
"""
@prim_attr_register
def __init__(self, seed=0):
"""Init Normal"""
validator.check_value_type("seed", seed, [int], self.name)
def __infer__(self, shape, mean, stddev):
shape_value = shape["value"]
if shape_value is None:
raise ValueError(f"For {self.name}, shape must be const.")
validator.check_value_type("shape", shape_value, [tuple], self.name)
for i, shape_i in enumerate(shape_value):
validator.check_integer("shape[%d]" % i, shape_i, 0, Rel.GE, self.name)
validator.check_tensor_type_same({"mean": mean["dtype"]}, [mstype.float32], self.name)
validator.check_tensor_type_same({"stddev": stddev["dtype"]}, [mstype.float32], self.name)
out = {"shape": shape_value,
"dtype": mstype.float32,
"value": None}
return out
class RandomCategorical(PrimitiveWithInfer):
"""
Generates random samples from a given categorical distribution tensor.
......@@ -166,3 +393,46 @@ class RandomCategorical(PrimitiveWithInfer):
return {'shape': (x_shape),
'dtype': (self.dtype),
'value': None}
class Normal(PrimitiveWithInfer):
"""
Generates random samples from a normal(Gaussian) distribution.
Args:
seed (int): Random seed. Default: 0.
Inputs:
- **shape** (tuple[int]) - The shape of output tensor. Only constant value is allowed.
- **mean** (Tensor) - The mean of the distribution, with float32 data type.
- **stddev** (Tensor) - The standard deviation of the distribution, with float32 data type.
Outputs:
Tensor, with the given shape from the specific distribution and float32 data type.
Examples:
>>> normal = P.Normal()
>>> mean = Tensor(0., mstype.float32)
>>> stddev = Tensor(1., mstype.float32)
>>> out = normal((32, 3, 3), mean, stddev)
"""
@prim_attr_register
def __init__(self, seed=0):
"""Init Normal"""
validator.check_value_type("seed", seed, [int], self.name)
def __infer__(self, shape, mean, stddev):
shape_value = shape["value"]
if shape_value is None:
raise ValueError(f"For {self.name}, shape must be const.")
validator.check_value_type("shape", shape_value, [tuple], self.name)
for i, shape_i in enumerate(shape_value):
validator.check_integer("shape[%d]" % i, shape_i, 0, Rel.GE, self.name)
validator.check_tensor_type_same({"mean": mean["dtype"]}, [mstype.float32], self.name)
validator.check_tensor_type_same({"stddev": stddev["dtype"]}, [mstype.float32], self.name)
out = {"shape": shape_value,
"dtype": mstype.float32,
"value": None}
return out
......@@ -126,10 +126,6 @@ class ModelCallback(Callback):
print("epoch: {}, outputs are: {}".format(cb_params.cur_epoch_num, str(cb_params.net_outputs)))
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_bert_tdt():
"""test bert tdt"""
np.random.seed(0)
......
......@@ -154,10 +154,6 @@ class TimeMonitor(Callback):
self.epoch_mseconds_list.append(epoch_mseconds)
self.per_step_mseconds_list.append(epoch_mseconds / self.data_size)
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_bert_percision():
"""test bert percision"""
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend", reserve_class_name_in_scope=False)
......
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import mindspore.common.dtype as mstype
import mindspore.context as context
import mindspore.nn as nn
from mindspore import Tensor
from mindspore.ops import operations as P
context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend")
class Net(nn.Cell):
def __init__(self, x, dtype):
super(Net, self).__init__()
self.cast = P.Cast()
self.x = x
self.dtype = dtype
def construct(self):
return self.cast(self.x, self.dtype)
def test_net_f32_bool():
x = np.random.randn(3, 4).astype(np.float32)
x[:, 1] = 0
net = Net(Tensor(x), mstype.bool_)
output = net()
print(output.asnumpy())
print(Tensor(x).dtype)
print(output.dtype)
def test_net_f16_bool():
x = np.random.randn(3, 4).astype(np.float16)
x[:, 1] = 0
net = Net(Tensor(x), mstype.bool_)
output = net()
print(output.asnumpy())
print(Tensor(x).dtype)
print(output.dtype)
def test_net_f64_bool():
x = np.random.randn(3, 4).astype(np.float64)
x[:, 1] = 0
net = Net(Tensor(x), mstype.bool_)
output = net()
print(output.asnumpy())
print(Tensor(x).dtype)
print(output.dtype)
def test_net_int16_float16():
x = np.random.randint(-512, 512, size=(3, 4)).astype(np.int16)
net = Net(Tensor(x), mstype.float16)
output = net()
print(output.asnumpy())
print(Tensor(x).dtype)
print(output.dtype)
def test_net_int64_float16():
x = np.random.randint(-512, 512, size=(3, 4)).astype(np.int64)
net = Net(Tensor(x), mstype.float16)
output = net()
print(output.asnumpy())
print(Tensor(x).dtype)
print(output.dtype)
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import mindspore.context as context
import mindspore.nn as nn
from mindspore import Tensor
from mindspore.ops import operations as P
from mindspore.common import dtype as mstype
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
class Net(nn.Cell):
def __init__(self, shape, seed=0):
super(Net, self).__init__()
self.gamma = P.Gamma(seed=seed)
self.shape = shape
def construct(self, alpha, beta):
return self.gamma(self.shape, alpha, beta)
def test_net_1D():
seed = 10
shape = (3, 2, 4)
alpha = 1.0
beta = 1.0
net = Net(shape, seed)
talpha, tbeta = Tensor(alpha, mstype.float32), Tensor(beta, mstype.float32)
output = net(talpha, tbeta)
print(output.asnumpy())
assert output.shape == (3, 2, 4)
def test_net_ND():
seed = 10
shape = (3, 1, 2)
alpha = np.array([[[1], [2]], [[3], [4]], [[5], [6]]]).astype(np.float32)
beta = np.array([1.0]).astype(np.float32)
net = Net(shape, seed)
talpha, tbeta = Tensor(alpha), Tensor(beta)
output = net(talpha, tbeta)
print(output.asnumpy())
assert output.shape == (3, 2, 2)
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import mindspore.context as context
import mindspore.nn as nn
from mindspore import Tensor
from mindspore.ops import operations as P
from mindspore.common import dtype as mstype
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
class Net(nn.Cell):
def __init__(self, shape, seed=0):
super(Net, self).__init__()
self.laplace = P.Laplace(seed=seed)
self.shape = shape
def construct(self, mean, lambda_param):
return self.laplace(self.shape, mean, lambda_param)
def test_net_1D():
seed = 10
shape = (3, 2, 4)
mean = 1.0
lambda_param = 1.0
net = Net(shape, seed)
tmean, tlambda_param = Tensor(mean, mstype.float32), Tensor(lambda_param, mstype.float32)
output = net(tmean, tlambda_param)
print(output.asnumpy())
assert output.shape == (3, 2, 4)
def test_net_ND():
seed = 10
shape = (3, 1, 2)
mean = np.array([[[1], [2]], [[3], [4]], [[5], [6]]]).astype(np.float32)
lambda_param = np.array([1.0]).astype(np.float32)
net = Net(shape, seed)
tmean, tlambda_param = Tensor(mean), Tensor(lambda_param)
output = net(tmean, tlambda_param)
print(output.asnumpy())
assert output.shape == (3, 2, 2)
......@@ -12,32 +12,48 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
import mindspore.context as context
import mindspore.nn as nn
from mindspore.ops import operations as P
from mindspore.common import Tensor
from mindspore import Tensor
from mindspore.common import dtype as mstype
from mindspore.ops import composite as C
context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend")
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
class Net(nn.Cell):
def __init__(self, shape=None, mean=0.0, stddev=1.0, seed=0):
def __init__(self, shape, seed=0):
super(Net, self).__init__()
self._mean = Tensor(mean, mstype.float32)
self._stddev = Tensor(stddev, mstype.float32)
self._normal = P.Normal(seed=seed)
self._shape = shape
self.shape = shape
self.seed = seed
def construct(self):
return self._normal(self._shape, self._mean, self._stddev)
def construct(self, mean, stddev):
return C.normal(self.shape, mean, stddev, self.seed)
def test_net_3x2x4():
mean = 0.0
def test_net_1D():
seed = 10
shape = (3, 2, 4)
mean = 1.0
stddev = 1.0
seed = 0
net = Net((3, 2, 4), mean, stddev, seed)
out = net()
assert out.shape == (3, 2, 4)
net = Net(shape, seed)
tmean, tstddev = Tensor(mean, mstype.float32), Tensor(stddev, mstype.float32)
output = net(tmean, tstddev)
print(output.asnumpy())
assert output.shape == (3, 2, 4)
def test_net_ND():
seed = 10
shape = (3, 1, 2)
mean = np.array([[[1], [2]], [[3], [4]], [[5], [6]]]).astype(np.float32)
stddev = np.array([1.0]).astype(np.float32)
net = Net(shape, seed)
tmean, tstddev = Tensor(mean, mstype.float32), Tensor(stddev, mstype.float32)
output = net(tmean, tstddev)
print(output.asnumpy())
assert output.shape == (3, 2, 2)
......@@ -127,7 +127,6 @@ def test_net_int64():
print(output.asnumpy())
assert np.array_equal(output.asnumpy(), np.stack([x, y], axis))
def test_net_uint64():
x = np.random.randn(3, 5, 4).astype(np.uint64)
y = np.random.randn(3, 5, 4).astype(np.uint64)
......
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import mindspore.context as context
import mindspore.nn as nn
from mindspore import Tensor
from mindspore.ops import operations as P
from mindspore.common import dtype as mstype
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
class Net(nn.Cell):
def __init__(self, shape):
super(Net, self).__init__()
self.poisson = P.Poisson()
self.shape = shape
def construct(self, mean):
return self.poisson(self.shape, mean)
def test_net_1():
shape = (2, 16)
mean = np.array([5.0]).astype(np.float32)
net = Net(shape)
tmean = Tensor(mean)
output = net(tmean)
print(output.asnumpy())
assert output.shape == (2, 16)
def test_net_2():
shape = (4, 1)
mean = np.array([5.0, 10.0]).astype(np.float32)
net = Net(shape)
tmean = Tensor(mean)
output = net(tmean)
print(output.asnumpy())
assert output.shape == (4, 2)
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
import mindspore.context as context
import mindspore.nn as nn
from mindspore import Tensor
from mindspore.common import dtype as mstype
from mindspore.ops import operations as P
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
class Net(nn.Cell):
def __init__(self, shape, seed=0, seed2=0):
super(Net, self).__init__()
self.shape = shape
self.seed = seed
self.seed2 = seed2
self.stdnormal = P.StandardNormal(seed, seed2)
def construct(self):
return self.stdnormal(self.shape, self.seed, self.seed2)
def test_net():
seed = 10
seed2 = 10
shape = (3, 2, 4)
net = Net(shape, seed, seed2)
output = net()
print(output.asnumpy())
assert output.shape == (3, 2, 4)
此差异已折叠。
此差异已折叠。
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import mindspore.context as context
import mindspore.nn as nn
import mindspore.common.dtype as mstype
from mindspore import Tensor
from mindspore.ops import operations as P
context.set_context(mode=context.GRAPH_MODE,
device_target="Ascend")
class Net(nn.Cell):
def __init__(self, offset):
super(Net, self).__init__()
self.embedding = P.EmbeddingLookup()
self.offset = offset
def construct(self, param, index):
return self.embedding(param, index, self.offset)
def test_embedding_lookup_sparse():
params = Tensor(np.array([[8, 9], [10, 11], [12, 13], [14, 15]]), mstype.int32)
indices = Tensor(np.array([[5, 2], [8, 5]]), mstype.int32)
offset = 4
embedding = Net(offset)
out = embedding(params, indices)
assert(out.asnumpy() == [[[10, 11], [0, 0]], [[0, 0], [10, 11]]]).all()
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册