未验证 提交 86d4af39 编写于 作者: Z zyfncg 提交者: GitHub

Change the invoking method of settiem from numpy to set_value op when value isn't tensor (#35701)

* Change the invoking method of settiem from numpy to set_value op when value is not tensor

* fix the check logic for inplace in setitem

* fix the unittest problem caused by setitem doesn't support fp16

* modify some code format in setitem
上级 fc5fb2a1
...@@ -36,17 +36,18 @@ inline void CheckAndUpdateSliceAttrs(const framework::DDim in_dims, ...@@ -36,17 +36,18 @@ inline void CheckAndUpdateSliceAttrs(const framework::DDim in_dims,
if (infer_flags != nullptr && (*infer_flags)[i] == -1) { if (infer_flags != nullptr && (*infer_flags)[i] == -1) {
continue; continue;
} }
T start = (*starts)[i] < 0 ? ((*starts)[i] + dim_value) : (*starts)[i];
start = std::max(start, static_cast<T>(0));
T end = (*ends)[i] < 0 ? ((*ends)[i] + dim_value) : (*ends)[i];
end = std::min(end, dim_value);
T step = steps == nullptr ? 1 : (*steps)[i]; T step = steps == nullptr ? 1 : (*steps)[i];
PADDLE_ENFORCE_NE( PADDLE_ENFORCE_NE(
step, 0, platform::errors::InvalidArgument( step, 0, platform::errors::InvalidArgument(
"Step should not be 0, but received step = %d.", step)); "Step should not be 0, but received step = %d.", step));
T start = (*starts)[i] < 0 ? ((*starts)[i] + dim_value) : (*starts)[i];
start = std::max(start, static_cast<T>(0));
T end =
0 < step && (*ends)[i] < 0 ? ((*ends)[i] + dim_value) : (*ends)[i];
end = std::min(end, dim_value);
if (step > 0) { if (step > 0) {
start = std::min(start, dim_value); start = std::min(start, dim_value);
end = std::max(end, static_cast<T>(0)); end = std::max(end, static_cast<T>(0));
......
此差异已折叠。
...@@ -40,12 +40,11 @@ class TensorFill_Test(unittest.TestCase): ...@@ -40,12 +40,11 @@ class TensorFill_Test(unittest.TestCase):
for dtype in typelist: for dtype in typelist:
var = 1. var = 1.
tensor = paddle.to_tensor(np_arr, place=p, dtype=dtype) tensor = paddle.to_tensor(np_arr, place=p, dtype=dtype)
newtensor = tensor.clone() target = tensor.numpy()
newtensor[...] = var target[...] = var
tensor.fill_(var) #var type is basic type in typelist tensor.fill_(var) #var type is basic type in typelist
self.assertEqual((tensor.numpy() == newtensor.numpy()).all(), self.assertEqual((tensor.numpy() == target).all(), True)
True)
def test_tensor_fill_backward(self): def test_tensor_fill_backward(self):
typelist = ['float32'] typelist = ['float32']
......
...@@ -35,12 +35,11 @@ class TensorFill_Test(unittest.TestCase): ...@@ -35,12 +35,11 @@ class TensorFill_Test(unittest.TestCase):
np.array(six.moves.range(np.prod(self.shape))), self.shape) np.array(six.moves.range(np.prod(self.shape))), self.shape)
for dtype in typelist: for dtype in typelist:
tensor = paddle.to_tensor(np_arr, place=p, dtype=dtype) tensor = paddle.to_tensor(np_arr, place=p, dtype=dtype)
newtensor = tensor.clone() target = tensor.numpy()
newtensor[...] = 0 target[...] = 0
tensor.zero_() tensor.zero_()
self.assertEqual( self.assertEqual((tensor.numpy() == target).all().item(), True)
(tensor.numpy() == newtensor.numpy()).all().item(), True)
if __name__ == '__main__': if __name__ == '__main__':
......
...@@ -391,6 +391,11 @@ class TestVarBase(unittest.TestCase): ...@@ -391,6 +391,11 @@ class TestVarBase(unittest.TestCase):
self.assertTrue(cmp_float(x.grad.numpy(), [20.0])) self.assertTrue(cmp_float(x.grad.numpy(), [20.0]))
self.assertTrue(cmp_float(detach_x.grad.numpy(), [60.0])) self.assertTrue(cmp_float(detach_x.grad.numpy(), [60.0]))
with self.assertRaises(ValueError):
detach_x[:] = 5.0
detach_x.stop_gradient = True
# Due to sharing of data with origin Tensor, There are some unsafe operations: # Due to sharing of data with origin Tensor, There are some unsafe operations:
with self.assertRaises(RuntimeError): with self.assertRaises(RuntimeError):
y = 2**x y = 2**x
...@@ -438,10 +443,11 @@ class TestVarBase(unittest.TestCase): ...@@ -438,10 +443,11 @@ class TestVarBase(unittest.TestCase):
self.assertTrue(np.array_equal(y.numpy(), y_copy.numpy())) self.assertTrue(np.array_equal(y.numpy(), y_copy.numpy()))
self.assertNotEqual(id(x), id(x_copy)) self.assertNotEqual(id(x), id(x_copy))
x_copy[:] = 5.
self.assertTrue(np.array_equal(x_copy.numpy(), [5.]))
self.assertTrue(np.array_equal(x.numpy(), [2.])) self.assertTrue(np.array_equal(x.numpy(), [2.]))
with self.assertRaises(ValueError):
x_copy[:] = 5.
with self.assertRaises(RuntimeError): with self.assertRaises(RuntimeError):
copy.deepcopy(z) copy.deepcopy(z)
...@@ -805,8 +811,8 @@ class TestVarBase(unittest.TestCase): ...@@ -805,8 +811,8 @@ class TestVarBase(unittest.TestCase):
# case2: # case2:
tensor_x = paddle.to_tensor( tensor_x = paddle.to_tensor(
np.zeros(12).reshape(2, 6).astype(np.float32)) np.zeros(12).reshape(2, 6).astype(np.float32))
tensor_y1 = paddle.zeros([1]) + 2 tensor_y1 = paddle.zeros([1], dtype='int32') + 2
tensor_y2 = paddle.zeros([1]) + 5 tensor_y2 = paddle.zeros([1], dtype='int32') + 5
tensor_x[:, tensor_y1:tensor_y2] = 42 tensor_x[:, tensor_y1:tensor_y2] = 42
res = tensor_x.numpy() res = tensor_x.numpy()
exp = np.array([[0., 0., 42., 42., 42., 0.], exp = np.array([[0., 0., 42., 42., 42., 0.],
......
...@@ -1390,7 +1390,8 @@ class Embedding(Layer): ...@@ -1390,7 +1390,8 @@ class Embedding(Layer):
is_bias=False) is_bias=False)
if in_dygraph_mode() and padding_idx != -1: if in_dygraph_mode() and padding_idx != -1:
self.weight[padding_idx] = 0.0 with paddle.no_grad():
self.weight[padding_idx] = 0.0
def forward(self, x): def forward(self, x):
return F.embedding( return F.embedding(
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册