未验证 提交 b74a5918 编写于 作者: Y Yibing Liu 提交者: GitHub

Fix core.xxx usage in rec (#2180)

上级 e64bd9c4
......@@ -11,9 +11,7 @@ import paddle.fluid as fluid
import reader
from network_conf import ctr_dnn_model
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(message)s')
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger("fluid")
logger.setLevel(logging.INFO)
......@@ -53,21 +51,25 @@ def infer():
args = parse_args()
place = fluid.CPUPlace()
inference_scope = fluid.core.Scope()
inference_scope = fluid.Scope()
dataset = reader.CriteoDataset(args.sparse_feature_dim)
test_reader = paddle.batch(dataset.test([args.data_path]), batch_size=args.batch_size)
test_reader = paddle.batch(
dataset.test([args.data_path]), batch_size=args.batch_size)
startup_program = fluid.framework.Program()
test_program = fluid.framework.Program()
with fluid.framework.program_guard(test_program, startup_program):
loss, auc_var, batch_auc_var, _, data_list = ctr_dnn_model(args.embedding_size, args.sparse_feature_dim, False)
loss, auc_var, batch_auc_var, _, data_list = ctr_dnn_model(
args.embedding_size, args.sparse_feature_dim, False)
exe = fluid.Executor(place)
feeder = fluid.DataFeeder(feed_list=data_list, place=place)
fluid.io.load_persistables(executor=exe, dirname=args.model_path,
fluid.io.load_persistables(
executor=exe,
dirname=args.model_path,
main_program=fluid.default_main_program())
def set_zero(var_name):
......@@ -84,7 +86,8 @@ def infer():
feed=feeder.feed(data),
fetch_list=[loss, auc_var])
if batch_id % 100 == 0:
logger.info("TEST --> batch: {} loss: {} auc: {}".format(batch_id, loss_val/args.batch_size, auc_val))
logger.info("TEST --> batch: {} loss: {} auc: {}".format(
batch_id, loss_val / args.batch_size, auc_val))
if __name__ == '__main__':
......
......@@ -28,9 +28,15 @@ logger.setLevel(logging.INFO)
def parse_args():
parser = argparse.ArgumentParser(description="PaddlePaddle DIN example")
parser.add_argument(
'--model_path', type=str, required=True, help="path of model parameters")
'--model_path',
type=str,
required=True,
help="path of model parameters")
parser.add_argument(
'--test_path', type=str, default='data/paddle_test.txt.bak', help='dir of test file')
'--test_path',
type=str,
default='data/paddle_test.txt.bak',
help='dir of test file')
parser.add_argument(
'--use_cuda', type=int, default=0, help='whether to use gpu')
......@@ -64,7 +70,7 @@ def infer():
data_reader, _ = reader.prepare_reader(args.test_path, 32 * 16)
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
inference_scope = fluid.core.Scope()
inference_scope = fluid.Scope()
exe = fluid.Executor(place)
......
......@@ -37,7 +37,7 @@ def infer(test_reader, use_cuda, model_path):
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
exe = fluid.Executor(place)
with fluid.scope_guard(fluid.core.Scope()):
with fluid.scope_guard(fluid.Scope()):
infer_program, feed_target_names, fetch_vars = fluid.io.load_inference_model(
model_path, exe)
accum_num_recall = 0.0
......
......@@ -40,7 +40,7 @@ def infer(args, vocab_size, test_reader, use_cuda):
exe = fluid.Executor(place)
hid_size = args.hid_size
batch_size = args.batch_size
with fluid.scope_guard(fluid.core.Scope()):
with fluid.scope_guard(fluid.Scope()):
main_program = fluid.Program()
with fluid.program_guard(main_program):
acc = net.infer_network(vocab_size, batch_size, hid_size)
......
......@@ -33,32 +33,51 @@ logger.setLevel(logging.INFO)
def parse_args():
parser = argparse.ArgumentParser("multi-view simnet")
parser.add_argument(
"--train_file", type=str, help="Training file")
parser.add_argument(
"--valid_file", type=str, help="Validation file")
parser.add_argument("--train_file", type=str, help="Training file")
parser.add_argument("--valid_file", type=str, help="Validation file")
parser.add_argument(
"--epochs", type=int, default=10, help="Number of epochs for training")
parser.add_argument(
"--model_dir", type=str, default='model_output', help="Model output folder")
"--model_dir",
type=str,
default='model_output',
help="Model output folder")
parser.add_argument(
"--query_slots", type=int, default=1, help="Number of query slots")
parser.add_argument(
"--title_slots", type=int, default=1, help="Number of title slots")
parser.add_argument(
"--query_encoder", type=str, default="bow", help="Encoder module for slot encoding")
"--query_encoder",
type=str,
default="bow",
help="Encoder module for slot encoding")
parser.add_argument(
"--title_encoder", type=str, default="bow", help="Encoder module for slot encoding")
"--title_encoder",
type=str,
default="bow",
help="Encoder module for slot encoding")
parser.add_argument(
"--query_encode_dim", type=int, default=128, help="Dimension of query encoder output")
"--query_encode_dim",
type=int,
default=128,
help="Dimension of query encoder output")
parser.add_argument(
"--title_encode_dim", type=int, default=128, help="Dimension of title encoder output")
"--title_encode_dim",
type=int,
default=128,
help="Dimension of title encoder output")
parser.add_argument(
"--batch_size", type=int, default=128, help="Batch size for training")
parser.add_argument(
"--embedding_dim", type=int, default=128, help="Default Dimension of Embedding")
"--embedding_dim",
type=int,
default=128,
help="Default Dimension of Embedding")
parser.add_argument(
"--sparse_feature_dim", type=int, default=1000001, help="Sparse feature hashing space for index processing")
"--sparse_feature_dim",
type=int,
default=1000001,
help="Sparse feature hashing space for index processing")
parser.add_argument(
"--hidden_size", type=int, default=128, help="Hidden dim")
return parser.parse_args()
......@@ -74,12 +93,13 @@ def start_infer(args, model_path):
place = fluid.CPUPlace()
exe = fluid.Executor(place)
with fluid.scope_guard(fluid.core.Scope()):
with fluid.scope_guard(fluid.Scope()):
infer_program, feed_target_names, fetch_vars = fluid.io.load_inference_model(
args.model_dir, exe)
t0 = time.time()
step_id = 0
feeder = fluid.DataFeeder(program=infer_program, feed_list=feed_target_names, place=place)
feeder = fluid.DataFeeder(
program=infer_program, feed_list=feed_target_names, place=place)
for batch_id, data in enumerate(test_reader()):
step_id += 1
loss_val, correct_val = exe.run(infer_program,
......@@ -89,9 +109,11 @@ def start_infer(args, model_path):
.format(step_id, batch_id, loss_val,
float(correct_val) / args.batch_size))
def main():
args = parse_args()
start_infer(args, args.model_dir)
if __name__ == "__main__":
main()
......@@ -76,7 +76,7 @@ def infer(args, vocab_size, test_reader):
hid_size = args.hid_size
batch_size = args.batch_size
model_path = args.model_dir
with fluid.scope_guard(fluid.core.Scope()):
with fluid.scope_guard(fluid.Scope()):
main_program = fluid.Program()
start_up_program = fluid.Program()
with fluid.program_guard(main_program, start_up_program):
......
......@@ -39,7 +39,7 @@ def infer(test_reader, vocab_tag, use_cuda, model_path, epoch):
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
exe = fluid.Executor(place)
with fluid.scope_guard(fluid.core.Scope()):
with fluid.scope_guard(fluid.Scope()):
infer_program, feed_target_names, fetch_vars = fluid.io.load_inference_model(
model_path, exe)
t0 = time.time()
......
......@@ -60,7 +60,7 @@ def infer_epoch(args, vocab_size, test_reader, use_cuda, i2w):
exe = fluid.Executor(place)
emb_size = args.emb_size
batch_size = args.batch_size
with fluid.scope_guard(fluid.core.Scope()):
with fluid.scope_guard(fluid.Scope()):
main_program = fluid.Program()
with fluid.program_guard(main_program):
values, pred = net.infer_network(vocab_size, emb_size)
......@@ -123,7 +123,7 @@ def infer_step(args, vocab_size, test_reader, use_cuda, i2w):
exe = fluid.Executor(place)
emb_size = args.emb_size
batch_size = args.batch_size
with fluid.scope_guard(fluid.core.Scope()):
with fluid.scope_guard(fluid.Scope()):
main_program = fluid.Program()
with fluid.program_guard(main_program):
values, pred = net.infer_network(vocab_size, emb_size)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册