diff --git a/docs/source_en/operator_list.md b/docs/source_en/operator_list.md index 3a79b0a2b9e79d7375608e4ed9a421cf1360a1f4..8ef84d3f5a59077265397daa37779b14c5ae1c97 100644 --- a/docs/source_en/operator_list.md +++ b/docs/source_en/operator_list.md @@ -67,7 +67,7 @@ | [mindspore.nn.L1Loss](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.L1Loss) |Supported |Supported | Doing |loss/loss | [mindspore.nn.MSELoss](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.MSELoss) | Supported |Doing | Doing |loss/loss | [mindspore.nn.SmoothL1Loss](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.SmoothL1Loss) |Supported |Doing | Doing |loss/loss -| [mindspore.nn.SoftmaxCrossEntropyWithLogits](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.SoftmaxCrossEntropyWithLogits) | Supported | Supported | Doing |loss/loss +| [mindspore.nn.SoftmaxCrossEntropyWithLogits](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.SoftmaxCrossEntropyWithLogits) | Supported | Supported | Supported |loss/loss | [mindspore.nn.SoftmaxCrossEntropyExpand](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.SoftmaxCrossEntropyExpand) | Supported |Supported | Doing |loss/loss | [mindspore.nn.CosineEmbeddingLoss](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.CosineEmbeddingLoss) |Supported |Supported | Doing |loss/loss | [mindspore.nn.ProximalAdagrad](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.ProximalAdagrad) | Supported | Doing | Doing |optim/ProximalAdagrad @@ -128,7 +128,7 @@ | [mindspore.ops.operations.Conv2DBackpropInput](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Conv2DBackpropInput) | Supported | Supported |Doing | nn_ops | [mindspore.ops.operations.BiasAdd](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.BiasAdd) | Supported | Supported | Supported | nn_ops | [mindspore.ops.operations.TopK](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.TopK) | Supported | Supported |Doing | nn_ops -| [mindspore.ops.operations.SoftmaxCrossEntropyWithLogits](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.SoftmaxCrossEntropyWithLogits) | Supported | Supported |Doing | nn_ops +| [mindspore.ops.operations.SoftmaxCrossEntropyWithLogits](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.SoftmaxCrossEntropyWithLogits) | Supported | Supported |Supported | nn_ops | [mindspore.ops.operations.SparseSoftmaxCrossEntropyWithLogits](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.SparseSoftmaxCrossEntropyWithLogits) | Doing | Supported | Supported | nn_ops | [mindspore.ops.operations.ApplyMomentum](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ApplyMomentum) | Supported | Supported | Supported | nn_ops | [mindspore.ops.operations.ApplyAddSign](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ApplyAddSign) | Supported | Doing | Doing | nn_ops diff --git a/docs/source_zh_cn/operator_list.md b/docs/source_zh_cn/operator_list.md index db8e29e3935556c95e4aabc477cc297d2561c8d0..e5b75be66457a90389f4dce79ee707500bdc1203 100644 --- a/docs/source_zh_cn/operator_list.md +++ b/docs/source_zh_cn/operator_list.md @@ -67,7 +67,7 @@ | [mindspore.nn.L1Loss](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.L1Loss) |Supported |Supported | Doing |loss/loss | [mindspore.nn.MSELoss](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.MSELoss) | Supported |Doing | Doing |loss/loss | [mindspore.nn.SmoothL1Loss](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.SmoothL1Loss) | Supported |Doing | Doing |loss/loss -| [mindspore.nn.SoftmaxCrossEntropyWithLogits](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.SoftmaxCrossEntropyWithLogits) | Supported | Supported | Doing |loss/loss +| [mindspore.nn.SoftmaxCrossEntropyWithLogits](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.SoftmaxCrossEntropyWithLogits) | Supported | Supported | Supported |loss/loss | [mindspore.nn.SoftmaxCrossEntropyExpand](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.SoftmaxCrossEntropyExpand) | Supported |Supported | Doing |loss/loss | [mindspore.nn.CosineEmbeddingLoss](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.CosineEmbeddingLoss) |Supported |Supported | Doing |loss/loss | [mindspore.nn.ProximalAdagrad](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.ProximalAdagrad) | Supported |Doing | Doing |optim/ProximalAdagrad @@ -128,7 +128,7 @@ | [mindspore.ops.operations.Conv2DBackpropInput](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Conv2DBackpropInput) | Supported | Supported |Doing | nn_ops | [mindspore.ops.operations.BiasAdd](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.BiasAdd) | Supported | Supported | Supported | nn_ops | [mindspore.ops.operations.TopK](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.TopK) | Supported | Supported |Doing | nn_ops -| [mindspore.ops.operations.SoftmaxCrossEntropyWithLogits](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.SoftmaxCrossEntropyWithLogits) | Supported | Supported |Doing | nn_ops +| [mindspore.ops.operations.SoftmaxCrossEntropyWithLogits](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.SoftmaxCrossEntropyWithLogits) | Supported | Supported |Supported | nn_ops | [mindspore.ops.operations.SparseSoftmaxCrossEntropyWithLogits](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.SparseSoftmaxCrossEntropyWithLogits) | Doing | Supported | Supported | nn_ops | [mindspore.ops.operations.ApplyMomentum](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ApplyMomentum) | Supported | Supported | Supported | nn_ops | [mindspore.ops.operations.ApplyAddSign](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ApplyAddSign) | Supported | Doing | Doing | nn_ops diff --git a/tutorials/notebook/computer_vision_application.ipynb b/tutorials/notebook/computer_vision_application.ipynb index f6a65a867c7f1be61b2a417ff1f4273718940790..b9a4efc032fa7ae24d8b7681d62cf5aab8cf3167 100644 --- a/tutorials/notebook/computer_vision_application.ipynb +++ b/tutorials/notebook/computer_vision_application.ipynb @@ -387,7 +387,7 @@ "from mindspore.nn.optim.momentum import Momentum\n", "from mindspore.nn.loss import SoftmaxCrossEntropyWithLogits\n", "\n", - "ls = SoftmaxCrossEntropyWithLogits(sparse=True, is_grad=False, reduction=\"mean\")\n", + "ls = SoftmaxCrossEntropyWithLogits(sparse=True, reduction=\"mean\")\n", "opt = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), 0.01, 0.9)" ] }, diff --git a/tutorials/notebook/customized_debugging_information.ipynb b/tutorials/notebook/customized_debugging_information.ipynb index 7ef6762a17ec7e4a8d8ce8389d6bed61fea52422..44be7bd3a753b3c00b1851729badec85be8b4584 100644 --- a/tutorials/notebook/customized_debugging_information.ipynb +++ b/tutorials/notebook/customized_debugging_information.ipynb @@ -386,7 +386,7 @@ "train_data_path = \"./MNIST_Data/train\"\n", "eval_data_path = \"./MNIST_Data/train\"\n", "\n", - "net_loss = SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True, reduction='mean')\n", + "net_loss = SoftmaxCrossEntropyWithLogits(sparse=True, reduction=\"mean\")\n", "repeat_size = epoch_size\n", "network = LeNet5()\n", "\n", diff --git a/tutorials/notebook/debugging_in_pynative_mode.ipynb b/tutorials/notebook/debugging_in_pynative_mode.ipynb index b068dddd05fc8bde544cf34f234b405b02db40dd..ce3d50557b55592afefaca452b1ecbd56d45521a 100644 --- a/tutorials/notebook/debugging_in_pynative_mode.ipynb +++ b/tutorials/notebook/debugging_in_pynative_mode.ipynb @@ -488,7 +488,7 @@ "\n", "net = LeNet5()\n", "optimizer = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), 0.1, 0.9)\n", - "criterion = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True)\n", + "criterion = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean')\n", "net_with_criterion = WithLossCell(net, criterion)\n", "train_network = GradWrap(net_with_criterion)\n", "train_network.set_train()\n", diff --git a/tutorials/notebook/mindinsight/calculate_and_datagraphic.ipynb b/tutorials/notebook/mindinsight/calculate_and_datagraphic.ipynb index 2eb475d31541d52e7af5edc5c2b76a55cc3374b2..39bffb88e5b016f15bd05e5057c4450eaf9d103f 100644 --- a/tutorials/notebook/mindinsight/calculate_and_datagraphic.ipynb +++ b/tutorials/notebook/mindinsight/calculate_and_datagraphic.ipynb @@ -311,7 +311,7 @@ " ds_train = create_dataset(data_path=\"./MNIST_Data/train/\")\n", "\n", " network = LeNet5()\n", - " net_loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True, reduction=\"mean\")\n", + " net_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction=\"mean\")\n", " net_opt = nn.Momentum(network.trainable_params(), learning_rate=0.01, momentum=0.9)\n", " time_cb = TimeMonitor(data_size=ds_train.get_dataset_size())\n", " model = Model(network, net_loss, net_opt, metrics={\"Accuracy\": Accuracy()})\n", diff --git a/tutorials/notebook/mindinsight/mindinsight_image_histogram_scalar_tensor.ipynb b/tutorials/notebook/mindinsight/mindinsight_image_histogram_scalar_tensor.ipynb index 08a68bdb080a475a114bfbbc57f463113e64f289..082a64b00e3f4f48d27703742e137aa6f11e6d64 100644 --- a/tutorials/notebook/mindinsight/mindinsight_image_histogram_scalar_tensor.ipynb +++ b/tutorials/notebook/mindinsight/mindinsight_image_histogram_scalar_tensor.ipynb @@ -544,7 +544,7 @@ "source": [ "\n", "network = AlexNet(num_classes=10)\n", - "net_loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True, reduction=\"mean\")\n", + "net_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction=\"mean\")\n", "lr = Tensor(get_lr(0, 0.002, 10, ds_train.get_dataset_size()))\n", "net_opt = nn.Momentum(network.trainable_params(), learning_rate=lr, momentum=0.9)\n", "time_cb = TimeMonitor(data_size=ds_train.get_dataset_size())\n", @@ -777,7 +777,7 @@ "\n", "lr = Tensor(get_lr(0, 0.002, 10, ds_train.get_dataset_size()))\n", "network = AlexNet(num_classes=10)\n", - "net_loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True, reduction=\"mean\")\n", + "net_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction=\"mean\")\n", "net_opt = nn.Momentum(network.trainable_params(), learning_rate=lr, momentum=0.9)\n", "time_cb = TimeMonitor(data_size=ds_train.get_dataset_size())\n", "config_ck = CheckpointConfig(save_checkpoint_steps=1562, keep_checkpoint_max=10)\n", @@ -873,7 +873,7 @@ "source": [ "lr = Tensor(get_lr(0, 0.002, 1, ds_train.get_dataset_size()))\n", "network = AlexNet(num_classes=10)\n", - "net_loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True, reduction=\"mean\")\n", + "net_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction=\"mean\")\n", "net_opt = nn.Momentum(network.trainable_params(), learning_rate=lr, momentum=0.9)\n", "time_cb = TimeMonitor(data_size=ds_train.get_dataset_size())\n", "config_ck = CheckpointConfig(save_checkpoint_steps=1562, keep_checkpoint_max=10)\n", @@ -1017,7 +1017,7 @@ "\n", "lr = Tensor(get_lr(0, 0.002, 1, ds_train.get_dataset_size()))\n", "network = AlexNet(num_classes=10)\n", - "net_loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True, reduction=\"mean\")\n", + "net_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction=\"mean\")\n", "net_opt = nn.Momentum(network.trainable_params(), learning_rate=lr, momentum=0.9)\n", "time_cb = TimeMonitor(data_size=ds_train.get_dataset_size())\n", "config_ck = CheckpointConfig(save_checkpoint_steps=1562, keep_checkpoint_max=10)\n", @@ -1153,7 +1153,7 @@ "\n", "lr = Tensor(get_lr(0, 0.002, 1, ds_train.get_dataset_size()))\n", "network = AlexNet(num_classes=10)\n", - "net_loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True, reduction=\"mean\")\n", + "net_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction=\"mean\")\n", "net_opt = nn.Momentum(network.trainable_params(), learning_rate=lr, momentum=0.9)\n", "time_cb = TimeMonitor(data_size=ds_train.get_dataset_size())\n", "config_ck = CheckpointConfig(save_checkpoint_steps=1562, keep_checkpoint_max=10)\n", diff --git a/tutorials/notebook/mindinsight/mindinsight_model_lineage_and_data_lineage.ipynb b/tutorials/notebook/mindinsight/mindinsight_model_lineage_and_data_lineage.ipynb index 3a09e78dc423d7634c1f32c9ba7ff91109afb600..c55cbd60f470f84b998ca828d6ed39d2d87257cc 100644 --- a/tutorials/notebook/mindinsight/mindinsight_model_lineage_and_data_lineage.ipynb +++ b/tutorials/notebook/mindinsight/mindinsight_model_lineage_and_data_lineage.ipynb @@ -313,7 +313,7 @@ " epoch_size = 10\n", " mnist_path = \"./MNIST_Data\"\n", " \n", - " net_loss = SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True, reduction='mean')\n", + " net_loss = SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean')\n", " repeat_size = 1\n", " # create the network\n", " network = LeNet5()\n", diff --git a/tutorials/notebook/mixed_precision.ipynb b/tutorials/notebook/mixed_precision.ipynb index b57154f1e1a6bfb4cde295dbbe8a36c48cd1c006..53a7b4b7f3732c0e4a917a5eff55d8c7ead0d13e 100644 --- a/tutorials/notebook/mixed_precision.ipynb +++ b/tutorials/notebook/mixed_precision.ipynb @@ -859,7 +859,7 @@ " weight_decay = 1e-4\n", " \n", " # define loss, model\n", - " loss = SoftmaxCrossEntropyWithLogits(sparse=True, is_grad=False, reduction='mean')\n", + " loss = SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean')\n", " opt = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), lr, momentum)\n", " model = Model(net, loss_fn=loss, optimizer=opt, metrics={'acc'},amp_level=\"O2\")\n", " \n", diff --git a/tutorials/notebook/model_security.ipynb b/tutorials/notebook/model_security.ipynb index f1c00155f64a3c3bde554b7628e17e1fd94d0bc3..d958155df781b04021d7fa920f1c03b65b69f6a9 100644 --- a/tutorials/notebook/model_security.ipynb +++ b/tutorials/notebook/model_security.ipynb @@ -422,7 +422,7 @@ "lr = 0.01\n", "momentum = 0.9\n", "network = LeNet5()\n", - "net_loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True, reduction=\"mean\")\n", + "net_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction=\"mean\")\n", "net_opt = nn.Momentum(network.trainable_params(), lr, momentum)\n", "time_cb = TimeMonitor(data_size=ds_train.get_dataset_size())\n", "config_ck = CheckpointConfig(save_checkpoint_steps=1875,\n", @@ -752,7 +752,7 @@ "from mindarmour.defenses import NaturalAdversarialDefense\n", "\n", "\n", - "loss = SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=False)\n", + "loss = SoftmaxCrossEntropyWithLogits(sparse=False, reduction='mean')\n", "opt = nn.Momentum(net.trainable_params(), 0.01, 0.09)\n", "\n", "nad = NaturalAdversarialDefense(net, loss_fn=loss, optimizer=opt,\n", diff --git a/tutorials/notebook/nlp_application.ipynb b/tutorials/notebook/nlp_application.ipynb index 8920dfef4cd45971f0711cba8d898f1065df77ec..02cf130217ea5634fb6d87cc6059dc4c71809427 100644 --- a/tutorials/notebook/nlp_application.ipynb +++ b/tutorials/notebook/nlp_application.ipynb @@ -821,7 +821,7 @@ "from mindspore import nn\n", "\n", "\n", - "loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True)\n", + "loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean')\n", "opt = nn.Momentum(network.trainable_params(), cfg.learning_rate, cfg.momentum)" ] }, diff --git a/tutorials/notebook/quick_start.ipynb b/tutorials/notebook/quick_start.ipynb index c1b390cedfb72b09cc83cc6c5099035fcd070e28..50146fffc1248b49747b3f021f1a712c9bcca48b 100644 --- a/tutorials/notebook/quick_start.ipynb +++ b/tutorials/notebook/quick_start.ipynb @@ -858,7 +858,7 @@ "net_opt = nn.Momentum(network.trainable_params(), lr, momentum)\n", "\n", "# define the loss function\n", - "net_loss = SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True, reduction='mean')\n", + "net_loss = SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean')\n", "\n", "# define the model\n", "model = Model(network, net_loss, net_opt, metrics={\"Accuracy\": Accuracy()} )\n", diff --git a/tutorials/notebook/synchronization_training_and_evaluation.ipynb b/tutorials/notebook/synchronization_training_and_evaluation.ipynb index 236ae433c882d620ead10a0247dc321bab8122d3..80f857391986c557ac75db948419f81a400a3473 100644 --- a/tutorials/notebook/synchronization_training_and_evaluation.ipynb +++ b/tutorials/notebook/synchronization_training_and_evaluation.ipynb @@ -371,7 +371,7 @@ " eval_data = create_dataset(eval_data_path, repeat_size=repeat_size)\n", " \n", " # define the loss function\n", - " net_loss = SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True, reduction='mean')\n", + " net_loss = SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean')\n", " # define the optimizer\n", " net_opt = nn.Momentum(network.trainable_params(), learning_rate=0.01, momentum=0.9)\n", " config_ck = CheckpointConfig(save_checkpoint_steps=eval_per_epoch*1875, keep_checkpoint_max=15)\n", diff --git a/tutorials/source_en/advanced_use/computer_vision_application.md b/tutorials/source_en/advanced_use/computer_vision_application.md index 13fa54ac4c57c24d0cf5c8becf909ba0bc369355..f340d987782d99ef426d62714fd9b23a8a166887 100644 --- a/tutorials/source_en/advanced_use/computer_vision_application.md +++ b/tutorials/source_en/advanced_use/computer_vision_application.md @@ -167,7 +167,7 @@ An example of the code for defining the loss function and optimizer in MindSpore ```python # loss function definition -ls = SoftmaxCrossEntropyWithLogits(sparse=True, is_grad=False, reduction="mean") +ls = SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean") # optimization definition opt = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), 0.01, 0.9) diff --git a/tutorials/source_en/advanced_use/debugging_in_pynative_mode.md b/tutorials/source_en/advanced_use/debugging_in_pynative_mode.md index 877b07e0a6dcc8e7980a883927f5d1f235bcd213..42c3fbe92887490eb7116526de3f1d3a91a6bd47 100644 --- a/tutorials/source_en/advanced_use/debugging_in_pynative_mode.md +++ b/tutorials/source_en/advanced_use/debugging_in_pynative_mode.md @@ -361,7 +361,7 @@ class GradWrap(nn.Cell): net = LeNet5() optimizer = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), 0.1, 0.9) -criterion = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True) +criterion = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean') net_with_criterion = WithLossCell(net, criterion) train_network = GradWrap(net_with_criterion) train_network.set_train() diff --git a/tutorials/source_en/advanced_use/differential_privacy.md b/tutorials/source_en/advanced_use/differential_privacy.md index 33635e67bdc654970158e0bafa8fe4184b0ee77d..746f969dbbcd4a8bb517f393f07af02b97734595 100644 --- a/tutorials/source_en/advanced_use/differential_privacy.md +++ b/tutorials/source_en/advanced_use/differential_privacy.md @@ -233,7 +233,7 @@ Load the LeNet network, define the loss function, configure the checkpoint param ```python network = LeNet5() -net_loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True, reduction="mean") +net_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean") config_ck = CheckpointConfig(save_checkpoint_steps=cfg.save_checkpoint_steps, keep_checkpoint_max=cfg.keep_checkpoint_max) ckpoint_cb = ModelCheckpoint(prefix="checkpoint_lenet", diff --git a/tutorials/source_en/advanced_use/model_security.md b/tutorials/source_en/advanced_use/model_security.md index 3075c95afc17ce627519c7fc729e97e0ff48376d..1af2ab041609546249ad740879d8d9ac3e2ab636 100644 --- a/tutorials/source_en/advanced_use/model_security.md +++ b/tutorials/source_en/advanced_use/model_security.md @@ -185,7 +185,7 @@ The LeNet model is used as an example. You can also create and train your own mo batch_size=batch_size, repeat_size=1, sparse=False) net = LeNet5() - loss = SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=False) + loss = SoftmaxCrossEntropyWithLogits(sparse=False) opt = nn.Momentum(net.trainable_params(), 0.01, 0.09) model = Model(net, loss, opt, metrics=None) model.train(10, ds_train, callbacks=[LossMonitor()], diff --git a/tutorials/source_en/advanced_use/network_migration.md b/tutorials/source_en/advanced_use/network_migration.md index 0e5e4fd8845dbd76f498d6963f1ef13718b063e3..71511c4356005ffbe0d23aa0bff5b82cf747b451 100644 --- a/tutorials/source_en/advanced_use/network_migration.md +++ b/tutorials/source_en/advanced_use/network_migration.md @@ -223,7 +223,7 @@ The ResNet-50 network migration and training on the Ascend 910 is used as an exa After the network is defined, the loss function and optimizer need to be defined accordingly. ```python - loss = SoftmaxCrossEntropyWithLogits(sparse=True) + loss = SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean') opt = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), lr, config.momentum, config.weight_decay, config.loss_scale) ``` diff --git a/tutorials/source_en/advanced_use/nlp_application.md b/tutorials/source_en/advanced_use/nlp_application.md index e42856478bac7fa3fe53405003e1e517bab00fca..f33da2ffbbb114b44873cf6da67f7ee4cdc0832d 100644 --- a/tutorials/source_en/advanced_use/nlp_application.md +++ b/tutorials/source_en/advanced_use/nlp_application.md @@ -193,7 +193,7 @@ if args.pre_trained: The sample code for defining the optimizer and loss function is as follows: ```python -loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True) +loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean') opt = nn.Momentum(network.trainable_params(), cfg.learning_rate, cfg.momentum) loss_cb = LossMonitor() ``` diff --git a/tutorials/source_en/advanced_use/summary_record.md b/tutorials/source_en/advanced_use/summary_record.md index c8e52b400e6defdbb4dab5428947cc0c8dea22a9..eb80a56643dbd343b8e64c4b30cda2ffc8887975 100644 --- a/tutorials/source_en/advanced_use/summary_record.md +++ b/tutorials/source_en/advanced_use/summary_record.md @@ -106,7 +106,7 @@ class AlexNet(nn.Cell): context.set_context(mode=context.GRAPH_MODE) network = AlexNet(num_classes=10) -loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True, reduction="mean") +loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean") lr = Tensor(0.1) opt = nn.Momentum(network.trainable_params(), lr, momentum=0.9) model = Model(network, loss, opt) diff --git a/tutorials/source_en/quick_start/quick_start.md b/tutorials/source_en/quick_start/quick_start.md index 4e37ef22432b2afc587d9ea73ff92c9b41d08e34..4acb74f0774bb0be97a06847191eaf00d7c7d00c 100644 --- a/tutorials/source_en/quick_start/quick_start.md +++ b/tutorials/source_en/quick_start/quick_start.md @@ -291,7 +291,7 @@ Call the defined loss function in the `__main__` function. if __name__ == "__main__": ... #define the loss function - net_loss = SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True, reduction='mean') + net_loss = SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean') ... ``` diff --git a/tutorials/source_en/use/multi_platform_inference.md b/tutorials/source_en/use/multi_platform_inference.md index 704b96d460f744f09f9ff57645f44865ecc4eb12..f18a61689547df6ab7f969bd64f83a73ab427022 100644 --- a/tutorials/source_en/use/multi_platform_inference.md +++ b/tutorials/source_en/use/multi_platform_inference.md @@ -63,7 +63,7 @@ MindSpore supports the following inference scenarios based on the hardware platf ```python network = LeNet5(cfg.num_classes) - net_loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True, reduction="mean") + net_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean") net_opt = nn.Momentum(network.trainable_params(), cfg.lr, cfg.momentum) model = Model(network, net_loss, net_opt, metrics={"Accuracy": Accuracy()}) @@ -86,7 +86,7 @@ MindSpore supports the following inference scenarios based on the hardware platf ```python network = LeNet5(cfg.num_classes) - net_loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True, reduction="mean") + net_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean") net_opt = nn.Momentum(network.trainable_params(), cfg.lr, cfg.momentum) model = Model(network, net_loss, net_opt, metrics={"Accuracy": Accuracy()}) diff --git a/tutorials/source_zh_cn/advanced_use/computer_vision_application.md b/tutorials/source_zh_cn/advanced_use/computer_vision_application.md index b40d13a7fa7902d980e836074462d76ae3732081..9d3f271063bac46d059708c22a9a7c0ca062e9d7 100644 --- a/tutorials/source_zh_cn/advanced_use/computer_vision_application.md +++ b/tutorials/source_zh_cn/advanced_use/computer_vision_application.md @@ -170,7 +170,7 @@ MindSpore中定义损失函数和优化器的代码样例如下: ```python # loss function definition -ls = SoftmaxCrossEntropyWithLogits(sparse=True, is_grad=False, reduction="mean") +ls = SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean") # optimization definition opt = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), 0.01, 0.9) diff --git a/tutorials/source_zh_cn/advanced_use/debugging_in_pynative_mode.md b/tutorials/source_zh_cn/advanced_use/debugging_in_pynative_mode.md index a8c87f9ba8f6df44d9f5c4193b4d2b14ba1db147..fd4a8fed7eb57116f43100741ea9018d69b202b8 100644 --- a/tutorials/source_zh_cn/advanced_use/debugging_in_pynative_mode.md +++ b/tutorials/source_zh_cn/advanced_use/debugging_in_pynative_mode.md @@ -363,7 +363,7 @@ class GradWrap(nn.Cell): net = LeNet5() optimizer = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), 0.1, 0.9) -criterion = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True) +criterion = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean') net_with_criterion = WithLossCell(net, criterion) train_network = GradWrap(net_with_criterion) train_network.set_train() diff --git a/tutorials/source_zh_cn/advanced_use/differential_privacy.md b/tutorials/source_zh_cn/advanced_use/differential_privacy.md index 0f09b27154658ff26c1f03bb73613eb28f4f7ca3..7e9dac091d64dedfc3c694701871173bd858fc3e 100644 --- a/tutorials/source_zh_cn/advanced_use/differential_privacy.md +++ b/tutorials/source_zh_cn/advanced_use/differential_privacy.md @@ -233,7 +233,7 @@ class LeNet5(nn.Cell): ```python network = LeNet5() -net_loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True, reduction="mean") +net_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean") config_ck = CheckpointConfig(save_checkpoint_steps=cfg.save_checkpoint_steps, keep_checkpoint_max=cfg.keep_checkpoint_max) ckpoint_cb = ModelCheckpoint(prefix="checkpoint_lenet", diff --git a/tutorials/source_zh_cn/advanced_use/gradient_accumulation.md b/tutorials/source_zh_cn/advanced_use/gradient_accumulation.md index 574ed6e6c80be76174a9773e89034993aad1f645..f982312405044dbad3b7ba9457a80a629029dee4 100644 --- a/tutorials/source_zh_cn/advanced_use/gradient_accumulation.md +++ b/tutorials/source_zh_cn/advanced_use/gradient_accumulation.md @@ -218,7 +218,7 @@ if __name__ == "__main__": ds_train = create_dataset(os.path.join(args.data_path, "train"), 32) network = LeNet5(10) - net_loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True, reduction="mean") + net_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean") net_opt = nn.Momentum(network.trainable_params(), 0.01, 0.9) model = GradientAccumulation(network, net_loss, net_opt) diff --git a/tutorials/source_zh_cn/advanced_use/model_security.md b/tutorials/source_zh_cn/advanced_use/model_security.md index 13850029cdc90c80586c0e6cd8c58438068c55a2..1d445b489b6f98f7dc6c2440b768c982f68084a5 100644 --- a/tutorials/source_zh_cn/advanced_use/model_security.md +++ b/tutorials/source_zh_cn/advanced_use/model_security.md @@ -185,7 +185,7 @@ def generate_mnist_dataset(data_path, batch_size=32, repeat_size=1, batch_size=batch_size, repeat_size=1, sparse=False) net = LeNet5() - loss = SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=False) + loss = SoftmaxCrossEntropyWithLogits(sparse=False) opt = nn.Momentum(net.trainable_params(), 0.01, 0.09) model = Model(net, loss, opt, metrics=None) model.train(10, ds_train, callbacks=[LossMonitor()], diff --git a/tutorials/source_zh_cn/advanced_use/nlp_application.md b/tutorials/source_zh_cn/advanced_use/nlp_application.md index 3b8d9e3c48329f9eaa6832ad6fd0aee2c54400f3..dd6df6a34bf637c5a757dd9de7b75a09e5bc1f86 100644 --- a/tutorials/source_zh_cn/advanced_use/nlp_application.md +++ b/tutorials/source_zh_cn/advanced_use/nlp_application.md @@ -193,7 +193,7 @@ if args.pre_trained: 定义优化器及损失函数的示例代码如下: ```python -loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True) +loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean') opt = nn.Momentum(network.trainable_params(), cfg.learning_rate, cfg.momentum) loss_cb = LossMonitor() ``` diff --git a/tutorials/source_zh_cn/advanced_use/summary_record.md b/tutorials/source_zh_cn/advanced_use/summary_record.md index 16c459c1800d0b9126fe7941dab15e7823bdd83f..8854964cdb985b00cb8dc104b3b37171ae3ac636 100644 --- a/tutorials/source_zh_cn/advanced_use/summary_record.md +++ b/tutorials/source_zh_cn/advanced_use/summary_record.md @@ -108,7 +108,7 @@ class AlexNet(nn.Cell): context.set_context(mode=context.GRAPH_MODE) network = AlexNet(num_classes=10) -loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True, reduction="mean") +loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean") lr = Tensor(0.1) opt = nn.Momentum(network.trainable_params(), lr, momentum=0.9) model = Model(network, loss, opt) diff --git a/tutorials/source_zh_cn/quick_start/quick_start.md b/tutorials/source_zh_cn/quick_start/quick_start.md index 2cc7e51cff73c29199ed76c82c50bc28257f15f4..85a0b2da6edf1ce033f2da751b53f10a5b9c7f17 100644 --- a/tutorials/source_zh_cn/quick_start/quick_start.md +++ b/tutorials/source_zh_cn/quick_start/quick_start.md @@ -291,7 +291,7 @@ from mindspore.nn.loss import SoftmaxCrossEntropyWithLogits if __name__ == "__main__": ... #define the loss function - net_loss = SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True, reduction='mean') + net_loss = SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean') ... ``` diff --git a/tutorials/source_zh_cn/use/multi_platform_inference.md b/tutorials/source_zh_cn/use/multi_platform_inference.md index 77698182e542f89bc8270c7b3d5ba298c31453a1..83b588e41ac2d16d8816fbe1294079b5451d903f 100644 --- a/tutorials/source_zh_cn/use/multi_platform_inference.md +++ b/tutorials/source_zh_cn/use/multi_platform_inference.md @@ -62,7 +62,7 @@ CPU | ONNX格式 | 支持ONNX推理的runtime/SDK,如TensorRT。 首先构建模型,然后使用`mindspore.train.serialization`模块的`load_checkpoint`和`load_param_into_net`从本地加载模型与参数,传入验证数据集后即可进行模型推理,验证数据集的处理方式与训练数据集相同。 ```python network = LeNet5(cfg.num_classes) - net_loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True, reduction="mean") + net_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean") net_opt = nn.Momentum(network.trainable_params(), cfg.lr, cfg.momentum) model = Model(network, net_loss, net_opt, metrics={"Accuracy": Accuracy()}) @@ -84,7 +84,7 @@ CPU | ONNX格式 | 支持ONNX推理的runtime/SDK,如TensorRT。 首先构建模型,然后使用`hub.load_weights`从云端加载模型参数,传入验证数据集后即可进行推理,验证数据集的处理方式与训练数据集相同。 ```python network = LeNet5(cfg.num_classes) - net_loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True, reduction="mean") + net_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean") net_opt = nn.Momentum(network.trainable_params(), cfg.lr, cfg.momentum) model = Model(network, net_loss, net_opt, metrics={"Accuracy": Accuracy()}) diff --git a/tutorials/tutorial_code/gradient_accumulation/train.py b/tutorials/tutorial_code/gradient_accumulation/train.py index e9ff0f6c1f280a0338bb97937a1aa60e9ea126e5..c52fd0d63fbb62bde920a77413e9c73198a8464e 100644 --- a/tutorials/tutorial_code/gradient_accumulation/train.py +++ b/tutorials/tutorial_code/gradient_accumulation/train.py @@ -139,7 +139,7 @@ if __name__ == "__main__": ds_train = create_dataset(os.path.join(args.data_path, "train"), 32) network = LeNet5(10) - net_loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True, reduction="mean") + net_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean") net_opt = nn.Momentum(network.trainable_params(), 0.01, 0.9) model = GradientAccumulation(network, net_loss, net_opt) diff --git a/tutorials/tutorial_code/lenet.py b/tutorials/tutorial_code/lenet.py index 5f5dfffb22d5e8f6f6a77250b73af033c8e32955..dc9348c5d45d578971e9a5ec17ceaa9d093c2c4b 100644 --- a/tutorials/tutorial_code/lenet.py +++ b/tutorials/tutorial_code/lenet.py @@ -205,7 +205,7 @@ if __name__ == "__main__": epoch_size = 1 mnist_path = "./MNIST_Data" # define the loss function - net_loss = SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True, reduction='mean') + net_loss = SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean') repeat_size = 1 # create the network network = LeNet5() diff --git a/tutorials/tutorial_code/model_safety/mnist_defense_nad.py b/tutorials/tutorial_code/model_safety/mnist_defense_nad.py index a76c2a6016a34d6cdde18b897295021aef384935..d587f960acefeaf494c48964fe344d39b209fa15 100644 --- a/tutorials/tutorial_code/model_safety/mnist_defense_nad.py +++ b/tutorials/tutorial_code/model_safety/mnist_defense_nad.py @@ -57,7 +57,7 @@ def test_nad_method(): load_dict = load_checkpoint(ckpt_name) load_param_into_net(net, load_dict) - loss = SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=False) + loss = SoftmaxCrossEntropyWithLogits(sparse=False) opt = nn.Momentum(net.trainable_params(), 0.01, 0.09) nad = NaturalAdversarialDefense(net, loss_fn=loss, optimizer=opt, diff --git a/tutorials/tutorial_code/resnet/cifar_resnet50.py b/tutorials/tutorial_code/resnet/cifar_resnet50.py index 94cca8b461eb6d9336c4fdabb70bf19fdd8fbc9d..cf6740e2cf10d53c9186f2f5ea466f6cd88be21f 100644 --- a/tutorials/tutorial_code/resnet/cifar_resnet50.py +++ b/tutorials/tutorial_code/resnet/cifar_resnet50.py @@ -111,7 +111,7 @@ if __name__ == '__main__': epoch_size = args_opt.epoch_size net = resnet50(args_opt.batch_size, args_opt.num_classes) - ls = SoftmaxCrossEntropyWithLogits(sparse=True, is_grad=False, reduction="mean") + ls = SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean") opt = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), 0.01, 0.9) model = Model(net, loss_fn=ls, optimizer=opt, metrics={'acc'})