diff --git a/tutorials/notebook/debugging_in_pynative_mode.ipynb b/tutorials/notebook/debugging_in_pynative_mode.ipynb index d6c6f81bf49ab31481c9ee4f64375f31ff49f9f8..b068dddd05fc8bde544cf34f234b405b02db40dd 100644 --- a/tutorials/notebook/debugging_in_pynative_mode.ipynb +++ b/tutorials/notebook/debugging_in_pynative_mode.ipynb @@ -373,7 +373,7 @@ "\n", " def construct(self, x, label):\n", " weights = self.weights\n", - " return C.GradOperation('get_by_list', get_by_list=True)(self.network, weights)(x, label)" + " return C.GradOperation(get_by_list=True)(self.network, weights)(x, label)" ] }, { diff --git a/tutorials/notebook/linear_regression.ipynb b/tutorials/notebook/linear_regression.ipynb index 1d301aa1d00b589dcf06d3664424524e848f6339..b67175937c7ec66376c1431e764e81fbfb150e7e 100644 --- a/tutorials/notebook/linear_regression.ipynb +++ b/tutorials/notebook/linear_regression.ipynb @@ -524,7 +524,7 @@ "\n", " def construct(self, data, label):\n", " weights = self.weights\n", - " return C.GradOperation('get_by_list', get_by_list=True) \\\n", + " return C.GradOperation(get_by_list=True) \\\n", " (self.network, weights)(data, label)\n" ] }, diff --git a/tutorials/source_en/advanced_use/debugging_in_pynative_mode.md b/tutorials/source_en/advanced_use/debugging_in_pynative_mode.md index 5769bbfb6333a74959eaafeedaef67ddf855be26..877b07e0a6dcc8e7980a883927f5d1f235bcd213 100644 --- a/tutorials/source_en/advanced_use/debugging_in_pynative_mode.md +++ b/tutorials/source_en/advanced_use/debugging_in_pynative_mode.md @@ -262,7 +262,7 @@ def mul(x, y): return x * y def mainf(x, y): - return C.GradOperation('get_all', get_all=True)(mul)(x, y) + return C.GradOperation(get_all=True)(mul)(x, y) print(mainf(Tensor(1, mstype.int32), Tensor(2, mstype.int32))) ``` @@ -357,7 +357,7 @@ class GradWrap(nn.Cell): def construct(self, x, label): weights = self.weights - return C.GradOperation('get_by_list', get_by_list=True)(self.network, weights)(x, label) + return C.GradOperation(get_by_list=True)(self.network, weights)(x, label) net = LeNet5() optimizer = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), 0.1, 0.9) diff --git a/tutorials/source_en/use/custom_operator.md b/tutorials/source_en/use/custom_operator.md index bbb0ac9b352cf5f88885dbf77f98b9d2b8b9f499..cff9317dd13be9efd5d52200234cc4256eecf862 100644 --- a/tutorials/source_en/use/custom_operator.md +++ b/tutorials/source_en/use/custom_operator.md @@ -232,7 +232,7 @@ def test_grad_net(): x = np.array([1.0, 4.0, 9.0]).astype(np.float32) sens = np.array([1.0, 1.0, 1.0]).astype(np.float32) square = Net() - grad = C.GradOperation('grad_with_sens', sens_param=True) + grad = C.GradOperation(sens_param=True) dx = grad(square)(Tensor(x), Tensor(sens)) print("x: ", x) print("dx: ", dx) diff --git a/tutorials/source_zh_cn/advanced_use/debugging_in_pynative_mode.md b/tutorials/source_zh_cn/advanced_use/debugging_in_pynative_mode.md index 4289a3fd501f8247312f7c62f2d5fd4b34e3d7bb..a8c87f9ba8f6df44d9f5c4193b4d2b14ba1db147 100644 --- a/tutorials/source_zh_cn/advanced_use/debugging_in_pynative_mode.md +++ b/tutorials/source_zh_cn/advanced_use/debugging_in_pynative_mode.md @@ -264,7 +264,7 @@ def mul(x, y): return x * y def mainf(x, y): - return C.GradOperation('get_all', get_all=True)(mul)(x, y) + return C.GradOperation(get_all=True)(mul)(x, y) print(mainf(Tensor(1, mstype.int32), Tensor(2, mstype.int32))) ``` @@ -359,7 +359,7 @@ class GradWrap(nn.Cell): def construct(self, x, label): weights = self.weights - return C.GradOperation('get_by_list', get_by_list=True)(self.network, weights)(x, label) + return C.GradOperation(get_by_list=True)(self.network, weights)(x, label) net = LeNet5() optimizer = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), 0.1, 0.9) diff --git a/tutorials/source_zh_cn/advanced_use/gradient_accumulation.md b/tutorials/source_zh_cn/advanced_use/gradient_accumulation.md index 2526411468dd2fee7a376f703caec139734642cc..5ea2290e4647d865fdef6ddaf50032efb22a5fe8 100644 --- a/tutorials/source_zh_cn/advanced_use/gradient_accumulation.md +++ b/tutorials/source_zh_cn/advanced_use/gradient_accumulation.md @@ -100,7 +100,7 @@ class TrainForwardBackward(Cell): self.weights = ParameterTuple(network.trainable_params()) self.optimizer = optimizer self.grad_sum = grad_sum - self.grad = C.GradOperation('grad', get_by_list=True, sens_param=True) + self.grad = C.GradOperation(get_by_list=True, sens_param=True) self.sens = sens self.hyper_map = C.HyperMap() diff --git a/tutorials/source_zh_cn/quick_start/linear_regression.md b/tutorials/source_zh_cn/quick_start/linear_regression.md index 18ea005e760af3f6c6cebb7d218811c6ddb65d71..9f7ab617c692f9e04a200e87f2a5203a9d1bbae8 100644 --- a/tutorials/source_zh_cn/quick_start/linear_regression.md +++ b/tutorials/source_zh_cn/quick_start/linear_regression.md @@ -297,7 +297,7 @@ class GradWrap(nn.Cell): def construct(self, data, label): weights = self.weights - return C.GradOperation('get_by_list', get_by_list=True) \ + return C.GradOperation(get_by_list=True) \ (self.network, weights)(data, label) ``` diff --git a/tutorials/source_zh_cn/use/custom_operator.md b/tutorials/source_zh_cn/use/custom_operator.md index 9eb6dec635b3bbe89d9a723570fc7e6601bc450c..5ca1b6d2103d261a5c0c5fd643583d12c13925ce 100644 --- a/tutorials/source_zh_cn/use/custom_operator.md +++ b/tutorials/source_zh_cn/use/custom_operator.md @@ -232,7 +232,7 @@ def test_grad_net(): x = np.array([1.0, 4.0, 9.0]).astype(np.float32) sens = np.array([1.0, 1.0, 1.0]).astype(np.float32) square = Net() - grad = C.GradOperation('grad_with_sens', sens_param=True) + grad = C.GradOperation(sens_param=True) dx = grad(square)(Tensor(x), Tensor(sens)) print("x: ", x) print("dx: ", dx) diff --git a/tutorials/tutorial_code/gradient_accumulation/train.py b/tutorials/tutorial_code/gradient_accumulation/train.py index a26e79f3102e3d141f65318800b59fae6584d29e..123bf80a6c88bf324e8b1465e1029b8af68452d8 100644 --- a/tutorials/tutorial_code/gradient_accumulation/train.py +++ b/tutorials/tutorial_code/gradient_accumulation/train.py @@ -41,7 +41,7 @@ class TrainForwardBackward(Cell): self.weights = ParameterTuple(network.trainable_params()) self.optimizer = optimizer self.grad_sum = grad_sum - self.grad = C.GradOperation('grad', get_by_list=True, sens_param=True) + self.grad = C.GradOperation(get_by_list=True, sens_param=True) self.sens = sens self.hyper_map = C.HyperMap() diff --git a/tutorials/tutorial_code/linear_regression.py b/tutorials/tutorial_code/linear_regression.py index 952607a19d43e1ed0e739d8f606681ab65881d60..bfed5b389e5d1de61e14336c14b5ab1663437784 100644 --- a/tutorials/tutorial_code/linear_regression.py +++ b/tutorials/tutorial_code/linear_regression.py @@ -41,7 +41,7 @@ class GradWrap(nn.Cell): def construct(self, data, label): weights = self.weights - return C.GradOperation('get_by_list', get_by_list=True) \ + return C.GradOperation(get_by_list=True) \ (self.network, weights)(data, label) # Initializing model functions