提交 fd24ab47 编写于 作者: X Xin Pan

polish

test=develop
上级 1f89249a
......@@ -235,6 +235,7 @@ PYBIND11_MODULE(core, m) {
self.forward_id_ = forward_id;
},
py::return_value_policy::reference)
.def_property_readonly("type", &imperative::OpBase::Type)
.def_property(
"backward_id",
[](const imperative::OpBase &self) { return self.backward_id_; },
......
......@@ -906,7 +906,10 @@ class Operator(object):
@property
def type(self):
return self.desc.type()
if _in_imperative_mode():
return self.iop.type
else:
return self.desc.type()
def input(self, name):
"""
......
......@@ -14,9 +14,7 @@
from __future__ import print_function
import sys
import six
from six.moves import reduce
from collections import defaultdict
from paddle.fluid import core
......@@ -51,16 +49,7 @@ class Tracer(core.Tracer):
def trace_op(self, op, stop_gradient=False):
# record op's trace id
op.iop._trace_id = self._trace_id
"""
all_input_stop_grads = True
for vars in op.inputs.values():
for v in vars:
sys.stderr.write('%s %s\n' % (v.name, v.stop_gradient))
all_input_stop_grads &= v.stop_gradient
stop_gradient = False if not stop_gradient else True
stop_gradient = all_input_stop_grads | stop_gradient
"""
backward_refs = self.trace(op.iop, op.inputs, op.outputs, op.attrs,
framework._current_expected_place(),
stop_gradient)
......@@ -73,7 +62,7 @@ class Tracer(core.Tracer):
if len(backward_refs) > 0:
op.iop.register_backward_hooks(release_op)
# TODO(minqiyang): remove all inputs and outputs after seperate
# TODO(minqiyang): remove all inputs and outputs after separate
# var and grad
op.backward_refs = defaultdict(list)
for k, v in six.iteritems(op.inputs):
......
......@@ -212,7 +212,7 @@ class UniformInitializer(Initializer):
if self._seed == 0:
self._seed = block.program.random_seed
# to be compatible of fp16 initalizers
# to be compatible of fp16 initializers
if var.dtype == VarDesc.VarType.FP16:
out_dtype = VarDesc.VarType.FP32
out_var = block.create_var(
......
......@@ -165,6 +165,8 @@ class Optimizer(object):
name = self._name + "_" + name
if (name in self._accumulators and
param.name in self._accumulators[name]):
if framework._in_imperative_mode():
return self._accumulators[name][param.name]
raise Exception("Accumulator {} already exists for parameter {}".
format(name, param.name))
if shape == None:
......
......@@ -24,9 +24,11 @@ import paddle.fluid.core as core
from test_imperative_base import new_program_scope
from paddle.fluid.imperative.base import to_variable
# Can use Amusic dataset as the DeepCF describes.
DATA_PATH = os.environ.get('DATA_PATH', '')
BATCH_SIZE = int(os.environ.get('BATCH_SIZE', 256))
NUM_BATCHES = int(os.environ.get('NUM_BATCHES', 2))
BATCH_SIZE = int(os.environ.get('BATCH_SIZE', 128))
NUM_BATCHES = int(os.environ.get('NUM_BATCHES', 5))
NUM_EPOCHES = int(os.environ.get('NUM_EPOCHES', 1))
......@@ -92,18 +94,13 @@ class DeepCF(fluid.imperative.Layer):
self._num_users = num_users
self._num_items = num_items
self._rating_matrix = self.create_parameter(
None,
fluid.ParamAttr(trainable=False),
matrix.shape,
matrix.dtype,
is_bias=False,
default_initializer=fluid.initializer.NumpyArrayInitializer(matrix))
self._rating_matrix._stop_gradient = True
# self._user_emb = fluid.imperative.Embedding(self.full_name(),
# [self._num_users, 256])
# self._item_emb = fluid.imperative.Embedding(self.full_name(),
# [self._num_items, 256])
self._mlp = MLP(self.full_name())
self._dmf = DMF(self.full_name())
self._match_fc = fluid.imperative.FC(self.full_name(), 1, act='sigmoid')
......@@ -111,7 +108,6 @@ class DeepCF(fluid.imperative.Layer):
def forward(self, users, items):
# users_emb = self._user_emb(users)
# items_emb = self._item_emb(items)
sys.stderr.write('forward: %s\n' % users._stop_gradient)
users_emb = fluid.layers.gather(self._rating_matrix, users)
items_emb = fluid.layers.gather(
fluid.layers.transpose(self._rating_matrix, [1, 0]), items)
......@@ -131,10 +127,10 @@ def get_data():
user_ids = []
item_ids = []
labels = []
matrix = np.zeros([100, 1000], dtype=np.float32)
NUM_USERS = 100
NUM_ITEMS = 1000
matrix = np.zeros([NUM_USERS, NUM_ITEMS], dtype=np.float32)
for uid in range(NUM_USERS):
for iid in range(NUM_ITEMS):
label = float(random.randint(1, 6) == 1)
......@@ -209,7 +205,7 @@ class TestImperativeDeepCF(unittest.TestCase):
startup.random_seed = seed
main = fluid.Program()
main.random_seed = seed
"""
scope = fluid.core.Scope()
with new_program_scope(main=main, startup=startup, scope=scope):
users = fluid.layers.data('users', [1], dtype='int32')
......@@ -240,17 +236,18 @@ class TestImperativeDeepCF(unittest.TestCase):
},
fetch_list=[loss])[0]
sys.stderr.write('static loss %s\n' % static_loss)
"""
with fluid.imperative.guard():
fluid.default_startup_program().random_seed = seed
fluid.default_main_program().random_seed = seed
deepcf = DeepCF('deepcf', num_users, num_items, matrix)
sys.stderr.write('matrix: %s\n' % deepcf._rating_matrix._numpy())
adam = fluid.optimizer.AdamOptimizer(0.01)
for e in range(NUM_EPOCHES):
sys.stderr.write('epoch %d\n' % e)
for slice in range(0, BATCH_SIZE * NUM_BATCHES, BATCH_SIZE):
if slice + BATCH_SIZE >= users_np.shape[0]:
break
prediction = deepcf(
to_variable(users_np[slice:slice + BATCH_SIZE]),
to_variable(items_np[slice:slice + BATCH_SIZE]))
......@@ -259,12 +256,10 @@ class TestImperativeDeepCF(unittest.TestCase):
to_variable(labels_np[
slice:slice + BATCH_SIZE])))
loss._backward()
adam = fluid.optimizer.AdamOptimizer(0.01)
adam.minimize(loss)
deepcf.clear_gradients()
dy_loss = loss._numpy()
sys.stderr.write('dynamic loss: %s\n' % dy_loss)
sys.stderr.write('matrix: %s\n' % deepcf._rating_matrix._numpy())
sys.stderr.write('dynamic loss: %s %s\n' % (slice, dy_loss))
self.assertEqual(static_loss, dy_loss)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册