From 0d776004c2ddcbc959eded8b640359da25672837 Mon Sep 17 00:00:00 2001 From: wanyiming Date: Fri, 4 Sep 2020 11:07:01 +0800 Subject: [PATCH] Mod_SoftmaxCrossEntropyWithLogits --- docs/source_en/operator_list.md | 4 ++-- docs/source_zh_cn/operator_list.md | 4 ++-- tutorials/notebook/computer_vision_application.ipynb | 2 +- .../notebook/customized_debugging_information.ipynb | 2 +- tutorials/notebook/debugging_in_pynative_mode.ipynb | 2 +- .../mindinsight/calculate_and_datagraphic.ipynb | 2 +- .../mindinsight_image_histogram_scalar_tensor.ipynb | 10 +++++----- .../mindinsight_model_lineage_and_data_lineage.ipynb | 2 +- tutorials/notebook/mixed_precision.ipynb | 2 +- tutorials/notebook/model_security.ipynb | 4 ++-- tutorials/notebook/nlp_application.ipynb | 2 +- tutorials/notebook/quick_start.ipynb | 2 +- .../synchronization_training_and_evaluation.ipynb | 2 +- .../advanced_use/computer_vision_application.md | 2 +- .../advanced_use/debugging_in_pynative_mode.md | 2 +- .../source_en/advanced_use/differential_privacy.md | 2 +- tutorials/source_en/advanced_use/model_security.md | 2 +- tutorials/source_en/advanced_use/network_migration.md | 2 +- tutorials/source_en/advanced_use/nlp_application.md | 2 +- tutorials/source_en/advanced_use/summary_record.md | 2 +- tutorials/source_en/quick_start/quick_start.md | 2 +- tutorials/source_en/use/multi_platform_inference.md | 4 ++-- .../advanced_use/computer_vision_application.md | 2 +- .../advanced_use/debugging_in_pynative_mode.md | 2 +- .../source_zh_cn/advanced_use/differential_privacy.md | 2 +- .../source_zh_cn/advanced_use/gradient_accumulation.md | 2 +- tutorials/source_zh_cn/advanced_use/model_security.md | 2 +- tutorials/source_zh_cn/advanced_use/nlp_application.md | 2 +- tutorials/source_zh_cn/advanced_use/summary_record.md | 2 +- tutorials/source_zh_cn/quick_start/quick_start.md | 2 +- tutorials/source_zh_cn/use/multi_platform_inference.md | 4 ++-- tutorials/tutorial_code/gradient_accumulation/train.py | 2 +- tutorials/tutorial_code/lenet.py | 2 +- .../tutorial_code/model_safety/mnist_defense_nad.py | 2 +- tutorials/tutorial_code/resnet/cifar_resnet50.py | 2 +- 35 files changed, 44 insertions(+), 44 deletions(-) diff --git a/docs/source_en/operator_list.md b/docs/source_en/operator_list.md index 3a79b0a2..8ef84d3f 100644 --- a/docs/source_en/operator_list.md +++ b/docs/source_en/operator_list.md @@ -67,7 +67,7 @@ | [mindspore.nn.L1Loss](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.L1Loss) |Supported |Supported | Doing |loss/loss | [mindspore.nn.MSELoss](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.MSELoss) | Supported |Doing | Doing |loss/loss | [mindspore.nn.SmoothL1Loss](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.SmoothL1Loss) |Supported |Doing | Doing |loss/loss -| [mindspore.nn.SoftmaxCrossEntropyWithLogits](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.SoftmaxCrossEntropyWithLogits) | Supported | Supported | Doing |loss/loss +| [mindspore.nn.SoftmaxCrossEntropyWithLogits](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.SoftmaxCrossEntropyWithLogits) | Supported | Supported | Supported |loss/loss | [mindspore.nn.SoftmaxCrossEntropyExpand](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.SoftmaxCrossEntropyExpand) | Supported |Supported | Doing |loss/loss | [mindspore.nn.CosineEmbeddingLoss](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.CosineEmbeddingLoss) |Supported |Supported | Doing |loss/loss | [mindspore.nn.ProximalAdagrad](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.ProximalAdagrad) | Supported | Doing | Doing |optim/ProximalAdagrad @@ -128,7 +128,7 @@ | [mindspore.ops.operations.Conv2DBackpropInput](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Conv2DBackpropInput) | Supported | Supported |Doing | nn_ops | [mindspore.ops.operations.BiasAdd](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.BiasAdd) | Supported | Supported | Supported | nn_ops | [mindspore.ops.operations.TopK](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.TopK) | Supported | Supported |Doing | nn_ops -| [mindspore.ops.operations.SoftmaxCrossEntropyWithLogits](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.SoftmaxCrossEntropyWithLogits) | Supported | Supported |Doing | nn_ops +| [mindspore.ops.operations.SoftmaxCrossEntropyWithLogits](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.SoftmaxCrossEntropyWithLogits) | Supported | Supported |Supported | nn_ops | [mindspore.ops.operations.SparseSoftmaxCrossEntropyWithLogits](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.SparseSoftmaxCrossEntropyWithLogits) | Doing | Supported | Supported | nn_ops | [mindspore.ops.operations.ApplyMomentum](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ApplyMomentum) | Supported | Supported | Supported | nn_ops | [mindspore.ops.operations.ApplyAddSign](https://www.mindspore.cn/api/en/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ApplyAddSign) | Supported | Doing | Doing | nn_ops diff --git a/docs/source_zh_cn/operator_list.md b/docs/source_zh_cn/operator_list.md index db8e29e3..e5b75be6 100644 --- a/docs/source_zh_cn/operator_list.md +++ b/docs/source_zh_cn/operator_list.md @@ -67,7 +67,7 @@ | [mindspore.nn.L1Loss](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.L1Loss) |Supported |Supported | Doing |loss/loss | [mindspore.nn.MSELoss](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.MSELoss) | Supported |Doing | Doing |loss/loss | [mindspore.nn.SmoothL1Loss](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.SmoothL1Loss) | Supported |Doing | Doing |loss/loss -| [mindspore.nn.SoftmaxCrossEntropyWithLogits](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.SoftmaxCrossEntropyWithLogits) | Supported | Supported | Doing |loss/loss +| [mindspore.nn.SoftmaxCrossEntropyWithLogits](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.SoftmaxCrossEntropyWithLogits) | Supported | Supported | Supported |loss/loss | [mindspore.nn.SoftmaxCrossEntropyExpand](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.SoftmaxCrossEntropyExpand) | Supported |Supported | Doing |loss/loss | [mindspore.nn.CosineEmbeddingLoss](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.CosineEmbeddingLoss) |Supported |Supported | Doing |loss/loss | [mindspore.nn.ProximalAdagrad](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.nn.html#mindspore.nn.ProximalAdagrad) | Supported |Doing | Doing |optim/ProximalAdagrad @@ -128,7 +128,7 @@ | [mindspore.ops.operations.Conv2DBackpropInput](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.Conv2DBackpropInput) | Supported | Supported |Doing | nn_ops | [mindspore.ops.operations.BiasAdd](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.BiasAdd) | Supported | Supported | Supported | nn_ops | [mindspore.ops.operations.TopK](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.TopK) | Supported | Supported |Doing | nn_ops -| [mindspore.ops.operations.SoftmaxCrossEntropyWithLogits](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.SoftmaxCrossEntropyWithLogits) | Supported | Supported |Doing | nn_ops +| [mindspore.ops.operations.SoftmaxCrossEntropyWithLogits](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.SoftmaxCrossEntropyWithLogits) | Supported | Supported |Supported | nn_ops | [mindspore.ops.operations.SparseSoftmaxCrossEntropyWithLogits](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.SparseSoftmaxCrossEntropyWithLogits) | Doing | Supported | Supported | nn_ops | [mindspore.ops.operations.ApplyMomentum](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ApplyMomentum) | Supported | Supported | Supported | nn_ops | [mindspore.ops.operations.ApplyAddSign](https://www.mindspore.cn/api/zh-CN/master/api/python/mindspore/mindspore.ops.operations.html#mindspore.ops.operations.ApplyAddSign) | Supported | Doing | Doing | nn_ops diff --git a/tutorials/notebook/computer_vision_application.ipynb b/tutorials/notebook/computer_vision_application.ipynb index f6a65a86..b9a4efc0 100644 --- a/tutorials/notebook/computer_vision_application.ipynb +++ b/tutorials/notebook/computer_vision_application.ipynb @@ -387,7 +387,7 @@ "from mindspore.nn.optim.momentum import Momentum\n", "from mindspore.nn.loss import SoftmaxCrossEntropyWithLogits\n", "\n", - "ls = SoftmaxCrossEntropyWithLogits(sparse=True, is_grad=False, reduction=\"mean\")\n", + "ls = SoftmaxCrossEntropyWithLogits(sparse=True, reduction=\"mean\")\n", "opt = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), 0.01, 0.9)" ] }, diff --git a/tutorials/notebook/customized_debugging_information.ipynb b/tutorials/notebook/customized_debugging_information.ipynb index 7ef6762a..44be7bd3 100644 --- a/tutorials/notebook/customized_debugging_information.ipynb +++ b/tutorials/notebook/customized_debugging_information.ipynb @@ -386,7 +386,7 @@ "train_data_path = \"./MNIST_Data/train\"\n", "eval_data_path = \"./MNIST_Data/train\"\n", "\n", - "net_loss = SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True, reduction='mean')\n", + "net_loss = SoftmaxCrossEntropyWithLogits(sparse=True, reduction=\"mean\")\n", "repeat_size = epoch_size\n", "network = LeNet5()\n", "\n", diff --git a/tutorials/notebook/debugging_in_pynative_mode.ipynb b/tutorials/notebook/debugging_in_pynative_mode.ipynb index b068dddd..ce3d5055 100644 --- a/tutorials/notebook/debugging_in_pynative_mode.ipynb +++ b/tutorials/notebook/debugging_in_pynative_mode.ipynb @@ -488,7 +488,7 @@ "\n", "net = LeNet5()\n", "optimizer = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), 0.1, 0.9)\n", - "criterion = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True)\n", + "criterion = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean')\n", "net_with_criterion = WithLossCell(net, criterion)\n", "train_network = GradWrap(net_with_criterion)\n", "train_network.set_train()\n", diff --git a/tutorials/notebook/mindinsight/calculate_and_datagraphic.ipynb b/tutorials/notebook/mindinsight/calculate_and_datagraphic.ipynb index 2eb475d3..39bffb88 100644 --- a/tutorials/notebook/mindinsight/calculate_and_datagraphic.ipynb +++ b/tutorials/notebook/mindinsight/calculate_and_datagraphic.ipynb @@ -311,7 +311,7 @@ " ds_train = create_dataset(data_path=\"./MNIST_Data/train/\")\n", "\n", " network = LeNet5()\n", - " net_loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True, reduction=\"mean\")\n", + " net_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction=\"mean\")\n", " net_opt = nn.Momentum(network.trainable_params(), learning_rate=0.01, momentum=0.9)\n", " time_cb = TimeMonitor(data_size=ds_train.get_dataset_size())\n", " model = Model(network, net_loss, net_opt, metrics={\"Accuracy\": Accuracy()})\n", diff --git a/tutorials/notebook/mindinsight/mindinsight_image_histogram_scalar_tensor.ipynb b/tutorials/notebook/mindinsight/mindinsight_image_histogram_scalar_tensor.ipynb index 08a68bdb..082a64b0 100644 --- a/tutorials/notebook/mindinsight/mindinsight_image_histogram_scalar_tensor.ipynb +++ b/tutorials/notebook/mindinsight/mindinsight_image_histogram_scalar_tensor.ipynb @@ -544,7 +544,7 @@ "source": [ "\n", "network = AlexNet(num_classes=10)\n", - "net_loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True, reduction=\"mean\")\n", + "net_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction=\"mean\")\n", "lr = Tensor(get_lr(0, 0.002, 10, ds_train.get_dataset_size()))\n", "net_opt = nn.Momentum(network.trainable_params(), learning_rate=lr, momentum=0.9)\n", "time_cb = TimeMonitor(data_size=ds_train.get_dataset_size())\n", @@ -777,7 +777,7 @@ "\n", "lr = Tensor(get_lr(0, 0.002, 10, ds_train.get_dataset_size()))\n", "network = AlexNet(num_classes=10)\n", - "net_loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True, reduction=\"mean\")\n", + "net_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction=\"mean\")\n", "net_opt = nn.Momentum(network.trainable_params(), learning_rate=lr, momentum=0.9)\n", "time_cb = TimeMonitor(data_size=ds_train.get_dataset_size())\n", "config_ck = CheckpointConfig(save_checkpoint_steps=1562, keep_checkpoint_max=10)\n", @@ -873,7 +873,7 @@ "source": [ "lr = Tensor(get_lr(0, 0.002, 1, ds_train.get_dataset_size()))\n", "network = AlexNet(num_classes=10)\n", - "net_loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True, reduction=\"mean\")\n", + "net_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction=\"mean\")\n", "net_opt = nn.Momentum(network.trainable_params(), learning_rate=lr, momentum=0.9)\n", "time_cb = TimeMonitor(data_size=ds_train.get_dataset_size())\n", "config_ck = CheckpointConfig(save_checkpoint_steps=1562, keep_checkpoint_max=10)\n", @@ -1017,7 +1017,7 @@ "\n", "lr = Tensor(get_lr(0, 0.002, 1, ds_train.get_dataset_size()))\n", "network = AlexNet(num_classes=10)\n", - "net_loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True, reduction=\"mean\")\n", + "net_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction=\"mean\")\n", "net_opt = nn.Momentum(network.trainable_params(), learning_rate=lr, momentum=0.9)\n", "time_cb = TimeMonitor(data_size=ds_train.get_dataset_size())\n", "config_ck = CheckpointConfig(save_checkpoint_steps=1562, keep_checkpoint_max=10)\n", @@ -1153,7 +1153,7 @@ "\n", "lr = Tensor(get_lr(0, 0.002, 1, ds_train.get_dataset_size()))\n", "network = AlexNet(num_classes=10)\n", - "net_loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True, reduction=\"mean\")\n", + "net_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction=\"mean\")\n", "net_opt = nn.Momentum(network.trainable_params(), learning_rate=lr, momentum=0.9)\n", "time_cb = TimeMonitor(data_size=ds_train.get_dataset_size())\n", "config_ck = CheckpointConfig(save_checkpoint_steps=1562, keep_checkpoint_max=10)\n", diff --git a/tutorials/notebook/mindinsight/mindinsight_model_lineage_and_data_lineage.ipynb b/tutorials/notebook/mindinsight/mindinsight_model_lineage_and_data_lineage.ipynb index 3a09e78d..c55cbd60 100644 --- a/tutorials/notebook/mindinsight/mindinsight_model_lineage_and_data_lineage.ipynb +++ b/tutorials/notebook/mindinsight/mindinsight_model_lineage_and_data_lineage.ipynb @@ -313,7 +313,7 @@ " epoch_size = 10\n", " mnist_path = \"./MNIST_Data\"\n", " \n", - " net_loss = SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True, reduction='mean')\n", + " net_loss = SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean')\n", " repeat_size = 1\n", " # create the network\n", " network = LeNet5()\n", diff --git a/tutorials/notebook/mixed_precision.ipynb b/tutorials/notebook/mixed_precision.ipynb index b57154f1..53a7b4b7 100644 --- a/tutorials/notebook/mixed_precision.ipynb +++ b/tutorials/notebook/mixed_precision.ipynb @@ -859,7 +859,7 @@ " weight_decay = 1e-4\n", " \n", " # define loss, model\n", - " loss = SoftmaxCrossEntropyWithLogits(sparse=True, is_grad=False, reduction='mean')\n", + " loss = SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean')\n", " opt = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), lr, momentum)\n", " model = Model(net, loss_fn=loss, optimizer=opt, metrics={'acc'},amp_level=\"O2\")\n", " \n", diff --git a/tutorials/notebook/model_security.ipynb b/tutorials/notebook/model_security.ipynb index f1c00155..d958155d 100644 --- a/tutorials/notebook/model_security.ipynb +++ b/tutorials/notebook/model_security.ipynb @@ -422,7 +422,7 @@ "lr = 0.01\n", "momentum = 0.9\n", "network = LeNet5()\n", - "net_loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True, reduction=\"mean\")\n", + "net_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction=\"mean\")\n", "net_opt = nn.Momentum(network.trainable_params(), lr, momentum)\n", "time_cb = TimeMonitor(data_size=ds_train.get_dataset_size())\n", "config_ck = CheckpointConfig(save_checkpoint_steps=1875,\n", @@ -752,7 +752,7 @@ "from mindarmour.defenses import NaturalAdversarialDefense\n", "\n", "\n", - "loss = SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=False)\n", + "loss = SoftmaxCrossEntropyWithLogits(sparse=False, reduction='mean')\n", "opt = nn.Momentum(net.trainable_params(), 0.01, 0.09)\n", "\n", "nad = NaturalAdversarialDefense(net, loss_fn=loss, optimizer=opt,\n", diff --git a/tutorials/notebook/nlp_application.ipynb b/tutorials/notebook/nlp_application.ipynb index 8920dfef..02cf1302 100644 --- a/tutorials/notebook/nlp_application.ipynb +++ b/tutorials/notebook/nlp_application.ipynb @@ -821,7 +821,7 @@ "from mindspore import nn\n", "\n", "\n", - "loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True)\n", + "loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean')\n", "opt = nn.Momentum(network.trainable_params(), cfg.learning_rate, cfg.momentum)" ] }, diff --git a/tutorials/notebook/quick_start.ipynb b/tutorials/notebook/quick_start.ipynb index c1b390ce..50146fff 100644 --- a/tutorials/notebook/quick_start.ipynb +++ b/tutorials/notebook/quick_start.ipynb @@ -858,7 +858,7 @@ "net_opt = nn.Momentum(network.trainable_params(), lr, momentum)\n", "\n", "# define the loss function\n", - "net_loss = SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True, reduction='mean')\n", + "net_loss = SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean')\n", "\n", "# define the model\n", "model = Model(network, net_loss, net_opt, metrics={\"Accuracy\": Accuracy()} )\n", diff --git a/tutorials/notebook/synchronization_training_and_evaluation.ipynb b/tutorials/notebook/synchronization_training_and_evaluation.ipynb index 236ae433..80f85739 100644 --- a/tutorials/notebook/synchronization_training_and_evaluation.ipynb +++ b/tutorials/notebook/synchronization_training_and_evaluation.ipynb @@ -371,7 +371,7 @@ " eval_data = create_dataset(eval_data_path, repeat_size=repeat_size)\n", " \n", " # define the loss function\n", - " net_loss = SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True, reduction='mean')\n", + " net_loss = SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean')\n", " # define the optimizer\n", " net_opt = nn.Momentum(network.trainable_params(), learning_rate=0.01, momentum=0.9)\n", " config_ck = CheckpointConfig(save_checkpoint_steps=eval_per_epoch*1875, keep_checkpoint_max=15)\n", diff --git a/tutorials/source_en/advanced_use/computer_vision_application.md b/tutorials/source_en/advanced_use/computer_vision_application.md index 13fa54ac..f340d987 100644 --- a/tutorials/source_en/advanced_use/computer_vision_application.md +++ b/tutorials/source_en/advanced_use/computer_vision_application.md @@ -167,7 +167,7 @@ An example of the code for defining the loss function and optimizer in MindSpore ```python # loss function definition -ls = SoftmaxCrossEntropyWithLogits(sparse=True, is_grad=False, reduction="mean") +ls = SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean") # optimization definition opt = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), 0.01, 0.9) diff --git a/tutorials/source_en/advanced_use/debugging_in_pynative_mode.md b/tutorials/source_en/advanced_use/debugging_in_pynative_mode.md index 877b07e0..42c3fbe9 100644 --- a/tutorials/source_en/advanced_use/debugging_in_pynative_mode.md +++ b/tutorials/source_en/advanced_use/debugging_in_pynative_mode.md @@ -361,7 +361,7 @@ class GradWrap(nn.Cell): net = LeNet5() optimizer = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), 0.1, 0.9) -criterion = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True) +criterion = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean') net_with_criterion = WithLossCell(net, criterion) train_network = GradWrap(net_with_criterion) train_network.set_train() diff --git a/tutorials/source_en/advanced_use/differential_privacy.md b/tutorials/source_en/advanced_use/differential_privacy.md index 33635e67..746f969d 100644 --- a/tutorials/source_en/advanced_use/differential_privacy.md +++ b/tutorials/source_en/advanced_use/differential_privacy.md @@ -233,7 +233,7 @@ Load the LeNet network, define the loss function, configure the checkpoint param ```python network = LeNet5() -net_loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True, reduction="mean") +net_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean") config_ck = CheckpointConfig(save_checkpoint_steps=cfg.save_checkpoint_steps, keep_checkpoint_max=cfg.keep_checkpoint_max) ckpoint_cb = ModelCheckpoint(prefix="checkpoint_lenet", diff --git a/tutorials/source_en/advanced_use/model_security.md b/tutorials/source_en/advanced_use/model_security.md index 3075c95a..1af2ab04 100644 --- a/tutorials/source_en/advanced_use/model_security.md +++ b/tutorials/source_en/advanced_use/model_security.md @@ -185,7 +185,7 @@ The LeNet model is used as an example. You can also create and train your own mo batch_size=batch_size, repeat_size=1, sparse=False) net = LeNet5() - loss = SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=False) + loss = SoftmaxCrossEntropyWithLogits(sparse=False) opt = nn.Momentum(net.trainable_params(), 0.01, 0.09) model = Model(net, loss, opt, metrics=None) model.train(10, ds_train, callbacks=[LossMonitor()], diff --git a/tutorials/source_en/advanced_use/network_migration.md b/tutorials/source_en/advanced_use/network_migration.md index 0e5e4fd8..71511c43 100644 --- a/tutorials/source_en/advanced_use/network_migration.md +++ b/tutorials/source_en/advanced_use/network_migration.md @@ -223,7 +223,7 @@ The ResNet-50 network migration and training on the Ascend 910 is used as an exa After the network is defined, the loss function and optimizer need to be defined accordingly. ```python - loss = SoftmaxCrossEntropyWithLogits(sparse=True) + loss = SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean') opt = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), lr, config.momentum, config.weight_decay, config.loss_scale) ``` diff --git a/tutorials/source_en/advanced_use/nlp_application.md b/tutorials/source_en/advanced_use/nlp_application.md index e4285647..f33da2ff 100644 --- a/tutorials/source_en/advanced_use/nlp_application.md +++ b/tutorials/source_en/advanced_use/nlp_application.md @@ -193,7 +193,7 @@ if args.pre_trained: The sample code for defining the optimizer and loss function is as follows: ```python -loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True) +loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean') opt = nn.Momentum(network.trainable_params(), cfg.learning_rate, cfg.momentum) loss_cb = LossMonitor() ``` diff --git a/tutorials/source_en/advanced_use/summary_record.md b/tutorials/source_en/advanced_use/summary_record.md index c8e52b40..eb80a566 100644 --- a/tutorials/source_en/advanced_use/summary_record.md +++ b/tutorials/source_en/advanced_use/summary_record.md @@ -106,7 +106,7 @@ class AlexNet(nn.Cell): context.set_context(mode=context.GRAPH_MODE) network = AlexNet(num_classes=10) -loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True, reduction="mean") +loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean") lr = Tensor(0.1) opt = nn.Momentum(network.trainable_params(), lr, momentum=0.9) model = Model(network, loss, opt) diff --git a/tutorials/source_en/quick_start/quick_start.md b/tutorials/source_en/quick_start/quick_start.md index 4e37ef22..4acb74f0 100644 --- a/tutorials/source_en/quick_start/quick_start.md +++ b/tutorials/source_en/quick_start/quick_start.md @@ -291,7 +291,7 @@ Call the defined loss function in the `__main__` function. if __name__ == "__main__": ... #define the loss function - net_loss = SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True, reduction='mean') + net_loss = SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean') ... ``` diff --git a/tutorials/source_en/use/multi_platform_inference.md b/tutorials/source_en/use/multi_platform_inference.md index 704b96d4..f18a6168 100644 --- a/tutorials/source_en/use/multi_platform_inference.md +++ b/tutorials/source_en/use/multi_platform_inference.md @@ -63,7 +63,7 @@ MindSpore supports the following inference scenarios based on the hardware platf ```python network = LeNet5(cfg.num_classes) - net_loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True, reduction="mean") + net_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean") net_opt = nn.Momentum(network.trainable_params(), cfg.lr, cfg.momentum) model = Model(network, net_loss, net_opt, metrics={"Accuracy": Accuracy()}) @@ -86,7 +86,7 @@ MindSpore supports the following inference scenarios based on the hardware platf ```python network = LeNet5(cfg.num_classes) - net_loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True, reduction="mean") + net_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean") net_opt = nn.Momentum(network.trainable_params(), cfg.lr, cfg.momentum) model = Model(network, net_loss, net_opt, metrics={"Accuracy": Accuracy()}) diff --git a/tutorials/source_zh_cn/advanced_use/computer_vision_application.md b/tutorials/source_zh_cn/advanced_use/computer_vision_application.md index b40d13a7..9d3f2710 100644 --- a/tutorials/source_zh_cn/advanced_use/computer_vision_application.md +++ b/tutorials/source_zh_cn/advanced_use/computer_vision_application.md @@ -170,7 +170,7 @@ MindSpore中定义损失函数和优化器的代码样例如下: ```python # loss function definition -ls = SoftmaxCrossEntropyWithLogits(sparse=True, is_grad=False, reduction="mean") +ls = SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean") # optimization definition opt = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), 0.01, 0.9) diff --git a/tutorials/source_zh_cn/advanced_use/debugging_in_pynative_mode.md b/tutorials/source_zh_cn/advanced_use/debugging_in_pynative_mode.md index a8c87f9b..fd4a8fed 100644 --- a/tutorials/source_zh_cn/advanced_use/debugging_in_pynative_mode.md +++ b/tutorials/source_zh_cn/advanced_use/debugging_in_pynative_mode.md @@ -363,7 +363,7 @@ class GradWrap(nn.Cell): net = LeNet5() optimizer = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), 0.1, 0.9) -criterion = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True) +criterion = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean') net_with_criterion = WithLossCell(net, criterion) train_network = GradWrap(net_with_criterion) train_network.set_train() diff --git a/tutorials/source_zh_cn/advanced_use/differential_privacy.md b/tutorials/source_zh_cn/advanced_use/differential_privacy.md index 0f09b271..7e9dac09 100644 --- a/tutorials/source_zh_cn/advanced_use/differential_privacy.md +++ b/tutorials/source_zh_cn/advanced_use/differential_privacy.md @@ -233,7 +233,7 @@ class LeNet5(nn.Cell): ```python network = LeNet5() -net_loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True, reduction="mean") +net_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean") config_ck = CheckpointConfig(save_checkpoint_steps=cfg.save_checkpoint_steps, keep_checkpoint_max=cfg.keep_checkpoint_max) ckpoint_cb = ModelCheckpoint(prefix="checkpoint_lenet", diff --git a/tutorials/source_zh_cn/advanced_use/gradient_accumulation.md b/tutorials/source_zh_cn/advanced_use/gradient_accumulation.md index 574ed6e6..f9823124 100644 --- a/tutorials/source_zh_cn/advanced_use/gradient_accumulation.md +++ b/tutorials/source_zh_cn/advanced_use/gradient_accumulation.md @@ -218,7 +218,7 @@ if __name__ == "__main__": ds_train = create_dataset(os.path.join(args.data_path, "train"), 32) network = LeNet5(10) - net_loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True, reduction="mean") + net_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean") net_opt = nn.Momentum(network.trainable_params(), 0.01, 0.9) model = GradientAccumulation(network, net_loss, net_opt) diff --git a/tutorials/source_zh_cn/advanced_use/model_security.md b/tutorials/source_zh_cn/advanced_use/model_security.md index 13850029..1d445b48 100644 --- a/tutorials/source_zh_cn/advanced_use/model_security.md +++ b/tutorials/source_zh_cn/advanced_use/model_security.md @@ -185,7 +185,7 @@ def generate_mnist_dataset(data_path, batch_size=32, repeat_size=1, batch_size=batch_size, repeat_size=1, sparse=False) net = LeNet5() - loss = SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=False) + loss = SoftmaxCrossEntropyWithLogits(sparse=False) opt = nn.Momentum(net.trainable_params(), 0.01, 0.09) model = Model(net, loss, opt, metrics=None) model.train(10, ds_train, callbacks=[LossMonitor()], diff --git a/tutorials/source_zh_cn/advanced_use/nlp_application.md b/tutorials/source_zh_cn/advanced_use/nlp_application.md index 3b8d9e3c..dd6df6a3 100644 --- a/tutorials/source_zh_cn/advanced_use/nlp_application.md +++ b/tutorials/source_zh_cn/advanced_use/nlp_application.md @@ -193,7 +193,7 @@ if args.pre_trained: 定义优化器及损失函数的示例代码如下: ```python -loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True) +loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean') opt = nn.Momentum(network.trainable_params(), cfg.learning_rate, cfg.momentum) loss_cb = LossMonitor() ``` diff --git a/tutorials/source_zh_cn/advanced_use/summary_record.md b/tutorials/source_zh_cn/advanced_use/summary_record.md index 16c459c1..8854964c 100644 --- a/tutorials/source_zh_cn/advanced_use/summary_record.md +++ b/tutorials/source_zh_cn/advanced_use/summary_record.md @@ -108,7 +108,7 @@ class AlexNet(nn.Cell): context.set_context(mode=context.GRAPH_MODE) network = AlexNet(num_classes=10) -loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True, reduction="mean") +loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean") lr = Tensor(0.1) opt = nn.Momentum(network.trainable_params(), lr, momentum=0.9) model = Model(network, loss, opt) diff --git a/tutorials/source_zh_cn/quick_start/quick_start.md b/tutorials/source_zh_cn/quick_start/quick_start.md index 2cc7e51c..85a0b2da 100644 --- a/tutorials/source_zh_cn/quick_start/quick_start.md +++ b/tutorials/source_zh_cn/quick_start/quick_start.md @@ -291,7 +291,7 @@ from mindspore.nn.loss import SoftmaxCrossEntropyWithLogits if __name__ == "__main__": ... #define the loss function - net_loss = SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True, reduction='mean') + net_loss = SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean') ... ``` diff --git a/tutorials/source_zh_cn/use/multi_platform_inference.md b/tutorials/source_zh_cn/use/multi_platform_inference.md index 77698182..83b588e4 100644 --- a/tutorials/source_zh_cn/use/multi_platform_inference.md +++ b/tutorials/source_zh_cn/use/multi_platform_inference.md @@ -62,7 +62,7 @@ CPU | ONNX格式 | 支持ONNX推理的runtime/SDK,如TensorRT。 首先构建模型,然后使用`mindspore.train.serialization`模块的`load_checkpoint`和`load_param_into_net`从本地加载模型与参数,传入验证数据集后即可进行模型推理,验证数据集的处理方式与训练数据集相同。 ```python network = LeNet5(cfg.num_classes) - net_loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True, reduction="mean") + net_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean") net_opt = nn.Momentum(network.trainable_params(), cfg.lr, cfg.momentum) model = Model(network, net_loss, net_opt, metrics={"Accuracy": Accuracy()}) @@ -84,7 +84,7 @@ CPU | ONNX格式 | 支持ONNX推理的runtime/SDK,如TensorRT。 首先构建模型,然后使用`hub.load_weights`从云端加载模型参数,传入验证数据集后即可进行推理,验证数据集的处理方式与训练数据集相同。 ```python network = LeNet5(cfg.num_classes) - net_loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True, reduction="mean") + net_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean") net_opt = nn.Momentum(network.trainable_params(), cfg.lr, cfg.momentum) model = Model(network, net_loss, net_opt, metrics={"Accuracy": Accuracy()}) diff --git a/tutorials/tutorial_code/gradient_accumulation/train.py b/tutorials/tutorial_code/gradient_accumulation/train.py index e9ff0f6c..c52fd0d6 100644 --- a/tutorials/tutorial_code/gradient_accumulation/train.py +++ b/tutorials/tutorial_code/gradient_accumulation/train.py @@ -139,7 +139,7 @@ if __name__ == "__main__": ds_train = create_dataset(os.path.join(args.data_path, "train"), 32) network = LeNet5(10) - net_loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True, reduction="mean") + net_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean") net_opt = nn.Momentum(network.trainable_params(), 0.01, 0.9) model = GradientAccumulation(network, net_loss, net_opt) diff --git a/tutorials/tutorial_code/lenet.py b/tutorials/tutorial_code/lenet.py index 5f5dfffb..dc9348c5 100644 --- a/tutorials/tutorial_code/lenet.py +++ b/tutorials/tutorial_code/lenet.py @@ -205,7 +205,7 @@ if __name__ == "__main__": epoch_size = 1 mnist_path = "./MNIST_Data" # define the loss function - net_loss = SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True, reduction='mean') + net_loss = SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean') repeat_size = 1 # create the network network = LeNet5() diff --git a/tutorials/tutorial_code/model_safety/mnist_defense_nad.py b/tutorials/tutorial_code/model_safety/mnist_defense_nad.py index a76c2a60..d587f960 100644 --- a/tutorials/tutorial_code/model_safety/mnist_defense_nad.py +++ b/tutorials/tutorial_code/model_safety/mnist_defense_nad.py @@ -57,7 +57,7 @@ def test_nad_method(): load_dict = load_checkpoint(ckpt_name) load_param_into_net(net, load_dict) - loss = SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=False) + loss = SoftmaxCrossEntropyWithLogits(sparse=False) opt = nn.Momentum(net.trainable_params(), 0.01, 0.09) nad = NaturalAdversarialDefense(net, loss_fn=loss, optimizer=opt, diff --git a/tutorials/tutorial_code/resnet/cifar_resnet50.py b/tutorials/tutorial_code/resnet/cifar_resnet50.py index 94cca8b4..cf6740e2 100644 --- a/tutorials/tutorial_code/resnet/cifar_resnet50.py +++ b/tutorials/tutorial_code/resnet/cifar_resnet50.py @@ -111,7 +111,7 @@ if __name__ == '__main__': epoch_size = args_opt.epoch_size net = resnet50(args_opt.batch_size, args_opt.num_classes) - ls = SoftmaxCrossEntropyWithLogits(sparse=True, is_grad=False, reduction="mean") + ls = SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean") opt = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), 0.01, 0.9) model = Model(net, loss_fn=ls, optimizer=opt, metrics={'acc'}) -- GitLab