提交 af1ad154 编写于 作者: M mindspore-ci-bot 提交者: Gitee

!758 remove name arg from gradoperation

Merge pull request !758 from riemann_penn/remove_name_arg_from_gradoperation
......@@ -373,7 +373,7 @@
"\n",
" def construct(self, x, label):\n",
" weights = self.weights\n",
" return C.GradOperation('get_by_list', get_by_list=True)(self.network, weights)(x, label)"
" return C.GradOperation(get_by_list=True)(self.network, weights)(x, label)"
]
},
{
......
......@@ -524,7 +524,7 @@
"\n",
" def construct(self, data, label):\n",
" weights = self.weights\n",
" return C.GradOperation('get_by_list', get_by_list=True) \\\n",
" return C.GradOperation(get_by_list=True) \\\n",
" (self.network, weights)(data, label)\n"
]
},
......
......@@ -262,7 +262,7 @@ def mul(x, y):
return x * y
def mainf(x, y):
return C.GradOperation('get_all', get_all=True)(mul)(x, y)
return C.GradOperation(get_all=True)(mul)(x, y)
print(mainf(Tensor(1, mstype.int32), Tensor(2, mstype.int32)))
```
......@@ -357,7 +357,7 @@ class GradWrap(nn.Cell):
def construct(self, x, label):
weights = self.weights
return C.GradOperation('get_by_list', get_by_list=True)(self.network, weights)(x, label)
return C.GradOperation(get_by_list=True)(self.network, weights)(x, label)
net = LeNet5()
optimizer = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), 0.1, 0.9)
......
......@@ -232,7 +232,7 @@ def test_grad_net():
x = np.array([1.0, 4.0, 9.0]).astype(np.float32)
sens = np.array([1.0, 1.0, 1.0]).astype(np.float32)
square = Net()
grad = C.GradOperation('grad_with_sens', sens_param=True)
grad = C.GradOperation(sens_param=True)
dx = grad(square)(Tensor(x), Tensor(sens))
print("x: ", x)
print("dx: ", dx)
......
......@@ -264,7 +264,7 @@ def mul(x, y):
return x * y
def mainf(x, y):
return C.GradOperation('get_all', get_all=True)(mul)(x, y)
return C.GradOperation(get_all=True)(mul)(x, y)
print(mainf(Tensor(1, mstype.int32), Tensor(2, mstype.int32)))
```
......@@ -359,7 +359,7 @@ class GradWrap(nn.Cell):
def construct(self, x, label):
weights = self.weights
return C.GradOperation('get_by_list', get_by_list=True)(self.network, weights)(x, label)
return C.GradOperation(get_by_list=True)(self.network, weights)(x, label)
net = LeNet5()
optimizer = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), 0.1, 0.9)
......
......@@ -100,7 +100,7 @@ class TrainForwardBackward(Cell):
self.weights = ParameterTuple(network.trainable_params())
self.optimizer = optimizer
self.grad_sum = grad_sum
self.grad = C.GradOperation('grad', get_by_list=True, sens_param=True)
self.grad = C.GradOperation(get_by_list=True, sens_param=True)
self.sens = sens
self.hyper_map = C.HyperMap()
......
......@@ -297,7 +297,7 @@ class GradWrap(nn.Cell):
def construct(self, data, label):
weights = self.weights
return C.GradOperation('get_by_list', get_by_list=True) \
return C.GradOperation(get_by_list=True) \
(self.network, weights)(data, label)
```
......
......@@ -232,7 +232,7 @@ def test_grad_net():
x = np.array([1.0, 4.0, 9.0]).astype(np.float32)
sens = np.array([1.0, 1.0, 1.0]).astype(np.float32)
square = Net()
grad = C.GradOperation('grad_with_sens', sens_param=True)
grad = C.GradOperation(sens_param=True)
dx = grad(square)(Tensor(x), Tensor(sens))
print("x: ", x)
print("dx: ", dx)
......
......@@ -41,7 +41,7 @@ class TrainForwardBackward(Cell):
self.weights = ParameterTuple(network.trainable_params())
self.optimizer = optimizer
self.grad_sum = grad_sum
self.grad = C.GradOperation('grad', get_by_list=True, sens_param=True)
self.grad = C.GradOperation(get_by_list=True, sens_param=True)
self.sens = sens
self.hyper_map = C.HyperMap()
......
......@@ -41,7 +41,7 @@ class GradWrap(nn.Cell):
def construct(self, data, label):
weights = self.weights
return C.GradOperation('get_by_list', get_by_list=True) \
return C.GradOperation(get_by_list=True) \
(self.network, weights)(data, label)
# Initializing model functions
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册