未验证 提交 627e60c1 编写于 作者: H hutuxian 提交者: GitHub

Revert upgrade 1.6API (#3569)

* Revert "add check version (#3530)"

This reverts commit 2a167ec9.

* Revert "Upgrade API for gnn (#3503)"

This reverts commit 80b01801.

* Revert "upgrade API for DIN (#3499)"

This reverts commit 734812c3.
上级 7de29491
...@@ -29,7 +29,6 @@ DIN通过一个兴趣激活模块(Activation Unit),用预估目标Candidate AD ...@@ -29,7 +29,6 @@ DIN通过一个兴趣激活模块(Activation Unit),用预估目标Candidate AD
最后我们将这相关的用户兴趣表达、用户静态特征和上下文相关特征,以及ad相关的特征拼接起来,输入到后续的多层DNN网络,最后预测得到用户对当前目标ADs的点击概率。 最后我们将这相关的用户兴趣表达、用户静态特征和上下文相关特征,以及ad相关的特征拼接起来,输入到后续的多层DNN网络,最后预测得到用户对当前目标ADs的点击概率。
**目前模型库下模型均要求使用PaddlePaddle 1.6及以上版本或适当的develop版本。**
## 数据下载及预处理 ## 数据下载及预处理
......
...@@ -9,7 +9,6 @@ import time ...@@ -9,7 +9,6 @@ import time
import network import network
import reader import reader
import random import random
import sys
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s') logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger("fluid") logger = logging.getLogger("fluid")
...@@ -86,7 +85,7 @@ def train(): ...@@ -86,7 +85,7 @@ def train():
#data_reader, max_len = reader.prepare_reader(train_path, args.batch_size) #data_reader, max_len = reader.prepare_reader(train_path, args.batch_size)
logger.info("reading data completes") logger.info("reading data completes")
avg_cost, pred = network.network(item_count, cat_count) avg_cost, pred = network.network(item_count, cat_count, 433)
#fluid.clip.set_gradient_clip(clip=fluid.clip.GradientClipByGlobalNorm(clip_norm=5.0)) #fluid.clip.set_gradient_clip(clip=fluid.clip.GradientClipByGlobalNorm(clip_norm=5.0))
base_lr = args.base_lr base_lr = args.base_lr
boundaries = [410000] boundaries = [410000]
...@@ -168,21 +167,6 @@ def train(): ...@@ -168,21 +167,6 @@ def train():
logger.info("run trainer") logger.info("run trainer")
train_loop(t.get_trainer_program()) train_loop(t.get_trainer_program())
def check_version():
"""
Log error and exit when the installed version of paddlepaddle is
not satisfied.
"""
err = "PaddlePaddle version 1.6 or higher is required, " \
"or a suitable develop version is satisfied as well. \n" \
"Please make sure the version is good with your code." \
try:
fluid.require_version('1.6.0')
except Exception as e:
logger.error(err)
sys.exit(1)
if __name__ == "__main__": if __name__ == "__main__":
check_version()
train() train()
...@@ -19,7 +19,6 @@ import os ...@@ -19,7 +19,6 @@ import os
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
import reader import reader
import sys
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s') logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger("fluid") logger = logging.getLogger("fluid")
...@@ -100,21 +99,6 @@ def infer(): ...@@ -100,21 +99,6 @@ def infer():
auc = calc_auc(score) auc = calc_auc(score)
logger.info("TEST --> loss: {}, auc: {}".format(loss_sum / count, auc)) logger.info("TEST --> loss: {}, auc: {}".format(loss_sum / count, auc))
def check_version():
"""
Log error and exit when the installed version of paddlepaddle is
not satisfied.
"""
err = "PaddlePaddle version 1.6 or higher is required, " \
"or a suitable develop version is satisfied as well. \n" \
"Please make sure the version is good with your code." \
try:
fluid.require_version('1.6.0')
except Exception as e:
logger.error(err)
sys.exit(1)
if __name__ == '__main__': if __name__ == '__main__':
check_version()
infer() infer()
...@@ -14,7 +14,7 @@ ...@@ -14,7 +14,7 @@
import paddle.fluid as fluid import paddle.fluid as fluid
def din_attention(hist, target_expand, mask): def din_attention(hist, target_expand, max_len, mask):
"""activation weight""" """activation weight"""
hidden_size = hist.shape[-1] hidden_size = hist.shape[-1]
...@@ -45,10 +45,9 @@ def din_attention(hist, target_expand, mask): ...@@ -45,10 +45,9 @@ def din_attention(hist, target_expand, mask):
return out return out
def network(item_count, cat_count): def network(item_count, cat_count, max_len):
"""network definition""" """network definition"""
seq_len = -1
item_emb_size = 64 item_emb_size = 64
cat_emb_size = 64 cat_emb_size = 64
is_sparse = False is_sparse = False
...@@ -57,60 +56,60 @@ def network(item_count, cat_count): ...@@ -57,60 +56,60 @@ def network(item_count, cat_count):
item_emb_attr = fluid.ParamAttr(name="item_emb") item_emb_attr = fluid.ParamAttr(name="item_emb")
cat_emb_attr = fluid.ParamAttr(name="cat_emb") cat_emb_attr = fluid.ParamAttr(name="cat_emb")
hist_item_seq = fluid.data( hist_item_seq = fluid.layers.data(
name="hist_item_seq", shape=[None, seq_len], dtype="int64") name="hist_item_seq", shape=[max_len, 1], dtype="int64")
hist_cat_seq = fluid.data( hist_cat_seq = fluid.layers.data(
name="hist_cat_seq", shape=[None, seq_len], dtype="int64") name="hist_cat_seq", shape=[max_len, 1], dtype="int64")
target_item = fluid.data( target_item = fluid.layers.data(
name="target_item", shape=[None], dtype="int64") name="target_item", shape=[1], dtype="int64")
target_cat = fluid.data( target_cat = fluid.layers.data(
name="target_cat", shape=[None], dtype="int64") name="target_cat", shape=[1], dtype="int64")
label = fluid.data( label = fluid.layers.data(
name="label", shape=[None, 1], dtype="float32") name="label", shape=[1], dtype="float32")
mask = fluid.data( mask = fluid.layers.data(
name="mask", shape=[None, seq_len, 1], dtype="float32") name="mask", shape=[max_len, 1], dtype="float32")
target_item_seq = fluid.data( target_item_seq = fluid.layers.data(
name="target_item_seq", shape=[None, seq_len], dtype="int64") name="target_item_seq", shape=[max_len, 1], dtype="int64")
target_cat_seq = fluid.data( target_cat_seq = fluid.layers.data(
name="target_cat_seq", shape=[None, seq_len], dtype="int64") name="target_cat_seq", shape=[max_len, 1], dtype="int64", lod_level=0)
hist_item_emb = fluid.embedding( hist_item_emb = fluid.layers.embedding(
input=hist_item_seq, input=hist_item_seq,
size=[item_count, item_emb_size], size=[item_count, item_emb_size],
param_attr=item_emb_attr, param_attr=item_emb_attr,
is_sparse=is_sparse) is_sparse=is_sparse)
hist_cat_emb = fluid.embedding( hist_cat_emb = fluid.layers.embedding(
input=hist_cat_seq, input=hist_cat_seq,
size=[cat_count, cat_emb_size], size=[cat_count, cat_emb_size],
param_attr=cat_emb_attr, param_attr=cat_emb_attr,
is_sparse=is_sparse) is_sparse=is_sparse)
target_item_emb = fluid.embedding( target_item_emb = fluid.layers.embedding(
input=target_item, input=target_item,
size=[item_count, item_emb_size], size=[item_count, item_emb_size],
param_attr=item_emb_attr, param_attr=item_emb_attr,
is_sparse=is_sparse) is_sparse=is_sparse)
target_cat_emb = fluid.embedding( target_cat_emb = fluid.layers.embedding(
input=target_cat, input=target_cat,
size=[cat_count, cat_emb_size], size=[cat_count, cat_emb_size],
param_attr=cat_emb_attr, param_attr=cat_emb_attr,
is_sparse=is_sparse) is_sparse=is_sparse)
target_item_seq_emb = fluid.embedding( target_item_seq_emb = fluid.layers.embedding(
input=target_item_seq, input=target_item_seq,
size=[item_count, item_emb_size], size=[item_count, item_emb_size],
param_attr=item_emb_attr, param_attr=item_emb_attr,
is_sparse=is_sparse) is_sparse=is_sparse)
target_cat_seq_emb = fluid.embedding( target_cat_seq_emb = fluid.layers.embedding(
input=target_cat_seq, input=target_cat_seq,
size=[cat_count, cat_emb_size], size=[cat_count, cat_emb_size],
param_attr=cat_emb_attr, param_attr=cat_emb_attr,
is_sparse=is_sparse) is_sparse=is_sparse)
item_b = fluid.embedding( item_b = fluid.layers.embedding(
input=target_item, input=target_item,
size=[item_count, 1], size=[item_count, 1],
param_attr=fluid.initializer.Constant(value=0.0)) param_attr=fluid.initializer.Constant(value=0.0))
...@@ -121,7 +120,7 @@ def network(item_count, cat_count): ...@@ -121,7 +120,7 @@ def network(item_count, cat_count):
target_concat = fluid.layers.concat( target_concat = fluid.layers.concat(
[target_item_emb, target_cat_emb], axis=1) [target_item_emb, target_cat_emb], axis=1)
out = din_attention(hist_seq_concat, target_seq_concat, mask) out = din_attention(hist_seq_concat, target_seq_concat, max_len, mask)
out_fc = fluid.layers.fc(name="out_fc", out_fc = fluid.layers.fc(name="out_fc",
input=out, input=out,
size=item_emb_size + cat_emb_size, size=item_emb_size + cat_emb_size,
......
...@@ -20,7 +20,7 @@ import pickle ...@@ -20,7 +20,7 @@ import pickle
def pad_batch_data(input, max_len): def pad_batch_data(input, max_len):
res = np.array([x + [0] * (max_len - len(x)) for x in input]) res = np.array([x + [0] * (max_len - len(x)) for x in input])
res = res.astype("int64").reshape([-1, max_len]) res = res.astype("int64").reshape([-1, max_len, 1])
return res return res
...@@ -34,10 +34,10 @@ def make_data(b): ...@@ -34,10 +34,10 @@ def make_data(b):
[-1, max_len, 1]) [-1, max_len, 1])
target_item_seq = np.array( target_item_seq = np.array(
[[x[2]] * max_len for x in b]).astype("int64").reshape( [[x[2]] * max_len for x in b]).astype("int64").reshape(
[-1, max_len]) [-1, max_len, 1])
target_cat_seq = np.array( target_cat_seq = np.array(
[[x[3]] * max_len for x in b]).astype("int64").reshape( [[x[3]] * max_len for x in b]).astype("int64").reshape(
[-1, max_len]) [-1, max_len, 1])
res = [] res = []
for i in range(len(b)): for i in range(len(b)):
res.append([ res.append([
......
...@@ -78,7 +78,7 @@ def train(): ...@@ -78,7 +78,7 @@ def train():
args.num_devices) args.num_devices)
logger.info("reading data completes") logger.info("reading data completes")
avg_cost, pred = network.network(item_count, cat_count) avg_cost, pred = network.network(item_count, cat_count, max_len)
fluid.clip.set_gradient_clip(clip=fluid.clip.GradientClipByGlobalNorm( fluid.clip.set_gradient_clip(clip=fluid.clip.GradientClipByGlobalNorm(
clip_norm=5.0)) clip_norm=5.0))
base_lr = args.base_lr base_lr = args.base_lr
...@@ -173,21 +173,6 @@ def get_cards(args): ...@@ -173,21 +173,6 @@ def get_cards(args):
else: else:
return args.num_devices return args.num_devices
def check_version():
"""
Log error and exit when the installed version of paddlepaddle is
not satisfied.
"""
err = "PaddlePaddle version 1.6 or higher is required, " \
"or a suitable develop version is satisfied as well. \n" \
"Please make sure the version is good with your code." \
try:
fluid.require_version('1.6.0')
except Exception as e:
logger.error(err)
sys.exit(1)
if __name__ == "__main__": if __name__ == "__main__":
check_version()
train() train()
...@@ -31,7 +31,6 @@ SR-GNN模型的介绍可以参阅论文[Session-based Recommendation with Graph ...@@ -31,7 +31,6 @@ SR-GNN模型的介绍可以参阅论文[Session-based Recommendation with Graph
我们复现了论文效果,在DIGINETICA数据集上P@20可以达到50.7 我们复现了论文效果,在DIGINETICA数据集上P@20可以达到50.7
**目前模型库下模型均要求使用PaddlePaddle 1.6及以上版本或适当的develop版本。**
同时推荐用户参考[ IPython Notebook demo](https://aistudio.baidu.com/aistudio/projectDetail/124382) 同时推荐用户参考[ IPython Notebook demo](https://aistudio.baidu.com/aistudio/projectDetail/124382)
......
...@@ -20,7 +20,6 @@ import paddle ...@@ -20,7 +20,6 @@ import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
import reader import reader
import network import network
import sys
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s') logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger("fluid") logger = logging.getLogger("fluid")
...@@ -56,7 +55,7 @@ def infer(args): ...@@ -56,7 +55,7 @@ def infer(args):
test_data = reader.Data(args.test_path, False) test_data = reader.Data(args.test_path, False)
place = fluid.CUDAPlace(0) if args.use_cuda else fluid.CPUPlace() place = fluid.CUDAPlace(0) if args.use_cuda else fluid.CPUPlace()
exe = fluid.Executor(place) exe = fluid.Executor(place)
loss, acc, py_reader, feed_datas = network.network(items_num, args.hidden_size, args.step, batch_size) loss, acc, py_reader, feed_datas = network.network(items_num, args.hidden_size, args.step)
exe.run(fluid.default_startup_program()) exe.run(fluid.default_startup_program())
infer_program = fluid.default_main_program().clone(for_test=True) infer_program = fluid.default_main_program().clone(for_test=True)
...@@ -88,22 +87,6 @@ def infer(args): ...@@ -88,22 +87,6 @@ def infer(args):
logger.info("TEST --> error: there is no model in " + model_path) logger.info("TEST --> error: there is no model in " + model_path)
def check_version():
"""
Log error and exit when the installed version of paddlepaddle is
not satisfied.
"""
err = "PaddlePaddle version 1.6 or higher is required, " \
"or a suitable develop version is satisfied as well. \n" \
"Please make sure the version is good with your code." \
try:
fluid.require_version('1.6.0')
except Exception as e:
logger.error(err)
sys.exit(1)
if __name__ == "__main__": if __name__ == "__main__":
check_version()
args = parse_args() args = parse_args()
infer(args) infer(args)
...@@ -19,36 +19,36 @@ import paddle.fluid as fluid ...@@ -19,36 +19,36 @@ import paddle.fluid as fluid
import paddle.fluid.layers as layers import paddle.fluid.layers as layers
def network(items_num, hidden_size, step, bs): def network(items_num, hidden_size, step):
stdv = 1.0 / math.sqrt(hidden_size) stdv = 1.0 / math.sqrt(hidden_size)
items = fluid.data( items = layers.data(
name="items", name="items",
shape=[bs, -1], shape=[1, 1],
dtype="int64") #[batch_size, uniq_max] dtype="int64") #[batch_size, uniq_max, 1]
seq_index = fluid.data( seq_index = layers.data(
name="seq_index", name="seq_index",
shape=[bs, -1, 2], shape=[1],
dtype="int32") #[batch_size, seq_max, 2] dtype="int32") #[batch_size, seq_max]
last_index = fluid.data( last_index = layers.data(
name="last_index", name="last_index",
shape=[bs, 2], shape=[1],
dtype="int32") #[batch_size, 2] dtype="int32") #[batch_size, 1]
adj_in = fluid.data( adj_in = layers.data(
name="adj_in", name="adj_in",
shape=[bs, -1, -1], shape=[1,1],
dtype="float32") #[batch_size, seq_max, seq_max] dtype="float32") #[batch_size, seq_max, seq_max]
adj_out = fluid.data( adj_out = layers.data(
name="adj_out", name="adj_out",
shape=[bs, -1, -1], shape=[1,1],
dtype="float32") #[batch_size, seq_max, seq_max] dtype="float32") #[batch_size, seq_max, seq_max]
mask = fluid.data( mask = layers.data(
name="mask", name="mask",
shape=[bs, -1, 1], shape=[1, 1],
dtype="float32") #[batch_size, seq_max, 1] dtype="float32") #[batch_size, seq_max, 1]
label = fluid.data( label = layers.data(
name="label", name="label",
shape=[bs, 1], shape=[1],
dtype="int64") #[batch_size, 1] dtype="int64") #[batch_size, 1]
datas = [items, seq_index, last_index, adj_in, adj_out, mask, label] datas = [items, seq_index, last_index, adj_in, adj_out, mask, label]
...@@ -57,17 +57,19 @@ def network(items_num, hidden_size, step, bs): ...@@ -57,17 +57,19 @@ def network(items_num, hidden_size, step, bs):
feed_datas = fluid.layers.read_file(py_reader) feed_datas = fluid.layers.read_file(py_reader)
items, seq_index, last_index, adj_in, adj_out, mask, label = feed_datas items, seq_index, last_index, adj_in, adj_out, mask, label = feed_datas
items_emb = fluid.embedding( items_emb = layers.embedding(
input=items, input=items,
param_attr=fluid.ParamAttr( param_attr=fluid.ParamAttr(
name="emb", name="emb",
initializer=fluid.initializer.Uniform( initializer=fluid.initializer.Uniform(
low=-stdv, high=stdv)), low=-stdv, high=stdv)),
size=[items_num, hidden_size]) #[batch_size, uniq_max, h] size=[items_num, hidden_size]) #[batch_size, uniq_max, h]
items_emb_shape = layers.shape(items_emb)
pre_state = items_emb pre_state = items_emb
for i in range(step): for i in range(step):
pre_state = layers.reshape(x=pre_state, shape=[bs, -1, hidden_size]) pre_state = layers.reshape(
x=pre_state, shape=[-1, 1, hidden_size], actual_shape=items_emb_shape)
state_in = layers.fc( state_in = layers.fc(
input=pre_state, input=pre_state,
name="state_in", name="state_in",
...@@ -102,12 +104,24 @@ def network(items_num, hidden_size, step, bs): ...@@ -102,12 +104,24 @@ def network(items_num, hidden_size, step, bs):
bias_attr=False) bias_attr=False)
pre_state, _, _ = fluid.layers.gru_unit( pre_state, _, _ = fluid.layers.gru_unit(
input=gru_fc, input=gru_fc,
hidden=layers.reshape(x=pre_state, shape=[-1, hidden_size]), hidden=layers.reshape(
x=pre_state, shape=[-1, hidden_size]),
size=3 * hidden_size) size=3 * hidden_size)
final_state = layers.reshape(pre_state, shape=[bs, -1, hidden_size]) final_state = pre_state #[batch_size * uniq_max, h]
seq = layers.gather_nd(final_state, seq_index)
last = layers.gather_nd(final_state, last_index) seq_origin_shape = layers.assign(np.array([0,0,hidden_size-1]).astype("int32"))
seq_origin_shape += layers.shape(layers.unsqueeze(seq_index,[2])) #value: [batch_size, seq_max, h]
seq_origin_shape.stop_gradient = True
seq_index = layers.reshape(seq_index, shape=[-1])
seq = layers.gather(final_state, seq_index) #[batch_size * seq_max, h]
last = layers.gather(final_state, last_index) #[batch_size, h]
seq = layers.reshape(
seq, shape=[-1, 1, hidden_size], actual_shape=seq_origin_shape) #[batch_size, seq_max, h]
last = layers.reshape(
last, shape=[-1, hidden_size]) #[batch_size, h]
seq_fc = layers.fc( seq_fc = layers.fc(
input=seq, input=seq,
...@@ -170,13 +184,13 @@ def network(items_num, hidden_size, step, bs): ...@@ -170,13 +184,13 @@ def network(items_num, hidden_size, step, bs):
low=-stdv, high=stdv))) #[batch_size, h] low=-stdv, high=stdv))) #[batch_size, h]
all_vocab = layers.create_global_var( all_vocab = layers.create_global_var(
shape=[items_num - 1], shape=[items_num - 1, 1],
value=0, value=0,
dtype="int64", dtype="int64",
persistable=True, persistable=True,
name="all_vocab") name="all_vocab")
all_emb = fluid.embedding( all_emb = layers.embedding(
input=all_vocab, input=all_vocab,
param_attr=fluid.ParamAttr( param_attr=fluid.ParamAttr(
name="emb", name="emb",
......
...@@ -64,19 +64,19 @@ class Data(): ...@@ -64,19 +64,19 @@ class Data():
adj_out.append(np.divide(adj.transpose(), u_deg_out).transpose()) adj_out.append(np.divide(adj.transpose(), u_deg_out).transpose())
seq_index.append( seq_index.append(
[[id, np.where(node == i)[0][0]] for i in e[0]]) [np.where(node == i)[0][0] + id * max_uniq_len for i in e[0]])
last_index.append( last_index.append(
[id, np.where(node == e[0][last_id[id]])[0][0]]) np.where(node == e[0][last_id[id]])[0][0] + id * max_uniq_len)
label.append(e[1] - 1) label.append(e[1] - 1)
mask.append([[1] * (last_id[id] + 1) + [0] * mask.append([[1] * (last_id[id] + 1) + [0] *
(max_seq_len - last_id[id] - 1)]) (max_seq_len - last_id[id] - 1)])
id += 1 id += 1
items = np.array(items).astype("int64").reshape((batch_size, -1)) items = np.array(items).astype("int64").reshape((batch_size, -1, 1))
seq_index = np.array(seq_index).astype("int32").reshape( seq_index = np.array(seq_index).astype("int32").reshape(
(batch_size, -1, 2)) (batch_size, -1))
last_index = np.array(last_index).astype("int32").reshape( last_index = np.array(last_index).astype("int32").reshape(
(batch_size, 2)) (batch_size))
adj_in = np.array(adj_in).astype("float32").reshape( adj_in = np.array(adj_in).astype("float32").reshape(
(batch_size, max_uniq_len, max_uniq_len)) (batch_size, max_uniq_len, max_uniq_len))
adj_out = np.array(adj_out).astype("float32").reshape( adj_out = np.array(adj_out).astype("float32").reshape(
...@@ -110,10 +110,8 @@ class Data(): ...@@ -110,10 +110,8 @@ class Data():
cur_batch = remain_data[i:i + batch_size] cur_batch = remain_data[i:i + batch_size]
yield self.make_data(cur_batch, batch_size) yield self.make_data(cur_batch, batch_size)
else: else:
# Due to fixed batch_size, discard the remaining ins cur_batch = remain_data[i:]
return yield self.make_data(cur_batch, group_remain % batch_size)
#cur_batch = remain_data[i:]
#yield self.make_data(cur_batch, group_remain % batch_size)
return _reader return _reader
......
...@@ -72,7 +72,7 @@ def train(): ...@@ -72,7 +72,7 @@ def train():
batch_size = args.batch_size batch_size = args.batch_size
items_num = reader.read_config(args.config_path) items_num = reader.read_config(args.config_path)
loss, acc, py_reader, feed_datas = network.network(items_num, args.hidden_size, loss, acc, py_reader, feed_datas = network.network(items_num, args.hidden_size,
args.step, batch_size) args.step)
data_reader = reader.Data(args.train_path, True) data_reader = reader.Data(args.train_path, True)
logger.info("load data complete") logger.info("load data complete")
...@@ -96,7 +96,7 @@ def train(): ...@@ -96,7 +96,7 @@ def train():
all_vocab = fluid.global_scope().var("all_vocab").get_tensor() all_vocab = fluid.global_scope().var("all_vocab").get_tensor()
all_vocab.set( all_vocab.set(
np.arange(1, items_num).astype("int64").reshape((-1)), place) np.arange(1, items_num).astype("int64").reshape((-1, 1)), place)
feed_list = [e.name for e in feed_datas] feed_list = [e.name for e in feed_datas]
...@@ -170,21 +170,6 @@ def get_cards(args): ...@@ -170,21 +170,6 @@ def get_cards(args):
num = len(cards.split(",")) num = len(cards.split(","))
return num return num
def check_version():
"""
Log error and exit when the installed version of paddlepaddle is
not satisfied.
"""
err = "PaddlePaddle version 1.6 or higher is required, " \
"or a suitable develop version is satisfied as well. \n" \
"Please make sure the version is good with your code." \
try:
fluid.require_version('1.6.0')
except Exception as e:
logger.error(err)
sys.exit(1)
if __name__ == "__main__": if __name__ == "__main__":
check_version()
train() train()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册