提交 1c9d0635 编写于 作者: C chengmo

merge

......@@ -78,6 +78,7 @@ class Reader(dg.MultiSlotDataGenerator):
v = i[1]
for j in v:
s += " " + k + ":" + str(j)
print(s.strip())
yield None
return data_iter
......
......@@ -92,7 +92,7 @@ class Reader(dg.MultiSlotDataGenerator):
v = i[1]
for j in v:
s += " " + k + ":" + str(j)
print s.strip()
print(s.strip())
yield None
return data_iter
......
......@@ -79,7 +79,7 @@ class Reader(dg.MultiSlotDataGenerator):
v = i[1]
for j in v:
s += " " + k + ":" + str(j)
print s.strip()
print(s.strip())
yield None
return data_iter
......
......@@ -185,7 +185,7 @@ inputs = [dense_input] + sparse_input_ids + [label]
### CTR-DNN模型组网
CTR-DNN模型的组网比较直观,本质是一个二分类任务,代码参考`network_conf.py`。模型主要组成是一个`Embedding`层,三个`FC`层,以及相应的分类任务的loss计算和auc计算。
CTR-DNN模型的组网比较直观,本质是一个二分类任务,代码参考`model.py`。模型主要组成是一个`Embedding`层,三个`FC`层,以及相应的分类任务的loss计算和auc计算。
#### Embedding层
首先介绍Embedding层的搭建方式:`Embedding`层的输入是`sparse_input`,shape由超参的`sparse_feature_dim``embedding_size`定义。需要特别解释的是`is_sparse`参数,当我们指定`is_sprase=True`后,计算图会将该参数视为稀疏参数,反向更新以及分布式通信时,都以稀疏的方式进行,会极大的提升运行效率,同时保证效果一致。
......@@ -235,7 +235,7 @@ fc3 = fluid.layers.fc(
)
```
#### Loss及Auc计算
- 预测的结果通过一个输出shape为2的FC层给出,该FC层的激活函数softmax,会给出每条样本分属于正负样本的概率。
- 预测的结果通过一个输出shape为2的FC层给出,该FC层的激活函数softmax,会给出每条样本分属于正负样本的概率。
- 每条样本的损失由交叉熵给出,交叉熵的输入维度为[batch_size,2],数据类型为float,label的输入维度为[batch_size,1],数据类型为int。
- 该batch的损失`avg_cost`是各条样本的损失之和
- 我们同时还会计算预测的auc,auc的结果由`fluid.layers.auc()`给出,该层的返回值有三个,分别是全局auc: `auc_var`,当前batch的auc: `batch_auc_var`,以及auc_states: `auc_states`,auc_states包含了`batch_stat_pos, batch_stat_neg, stat_pos, stat_neg`信息。`batch_auc`我们取近20个batch的平均,由参数`slide_steps=20`指定,roc曲线的离散化的临界数值设置为4096,由`num_thresholds=2**12`指定。
......
......@@ -61,7 +61,7 @@ class CriteoDataset(dg.MultiSlotDataGenerator):
s += " dense_feature:" + str(i)
for i in range(1, 1 + len(categorical_range_)):
s += " " + str(i) + ":" + str(sparse_feature[i - 1][0])
print s.strip()
print(s.strip())
yield None
return reader
......
......@@ -79,6 +79,7 @@ class Reader(dg.MultiSlotDataGenerator):
v = i[1]
for j in v:
s += " " + k + ":" + str(j)
print(s.strip())
yield None
return data_iter
......
......@@ -78,6 +78,7 @@ class Reader(dg.MultiSlotDataGenerator):
v = i[1]
for j in v:
s += " " + k + ":" + str(j)
print(s.strip())
yield None
return data_iter
......
......@@ -50,6 +50,7 @@ class Reader(dg.MultiSlotDataGenerator):
v = i[1]
for j in v:
s += " " + k + ":" + str(j)
print(s.strip())
yield None
return data_iter
......
......@@ -49,7 +49,7 @@ class Reader(dg.MultiSlotDataGenerator):
v = i[1]
for j in v:
s += " " + k + ":" + str(j)
print s.strip()
print(s.strip())
yield None
return data_iter
......
......@@ -95,7 +95,8 @@ class Reader(ReaderBase):
(batch_size, max_uniq_len, max_uniq_len))
mask = np.array(mask).astype("float32").reshape((batch_size, -1, 1))
label = np.array(label).astype("int64").reshape((batch_size, 1))
return zip(items, seq_index, last_index, adj_in, adj_out, mask, label)
return list(
zip(items, seq_index, last_index, adj_in, adj_out, mask, label))
def batch_reader(self, batch_size, batch_group_size, train=True):
def _reader():
......
......@@ -94,7 +94,8 @@ class Reader(ReaderBase):
(batch_size, max_uniq_len, max_uniq_len))
mask = np.array(mask).astype("float32").reshape((batch_size, -1, 1))
label = np.array(label).astype("int64").reshape((batch_size, 1))
return zip(items, seq_index, last_index, adj_in, adj_out, mask, label)
return list(
zip(items, seq_index, last_index, adj_in, adj_out, mask, label))
def batch_reader(self, batch_size, batch_group_size, train=True):
def _reader():
......
......@@ -37,6 +37,6 @@ class Reader(ReaderBase):
trg_seq = l[1:]
trg_seq = [int(e) for e in trg_seq]
feature_name = ["src_wordseq", "dst_wordseq"]
yield zip(feature_name, [src_seq] + [trg_seq])
yield list(zip(feature_name, [src_seq] + [trg_seq]))
return reader
......@@ -35,7 +35,7 @@ class Reader(ReaderBase):
features = line.strip().split(',')
feature_name = ["user_input", "item_input"]
yield zip(feature_name,
[[int(features[0])]] + [[int(features[1])]])
yield list(
zip(feature_name, [[int(features[0])]] + [[int(features[1])]]))
return reader
......@@ -35,7 +35,8 @@ class Reader(ReaderBase):
features = line.strip().split(',')
feature_name = ["user_input", "item_input", "label"]
yield zip(feature_name, [[int(features[0])]] +
[[int(features[1])]] + [[int(features[2])]])
yield list(
zip(feature_name, [[int(features[0])]] + [[int(features[1])]] +
[[int(features[2])]]))
return reader
......@@ -40,9 +40,9 @@ class Reader(ReaderBase):
src = conv_ids[:boundary]
pos_tgt = [conv_ids[boundary]]
feature_name = ["user", "all_item", "p_item"]
yield zip(
feature_name,
[src] + [np.arange(self.vocab_size).astype("int64").tolist()] +
[pos_tgt])
yield list(
zip(feature_name, [src] + [
np.arange(self.vocab_size).astype("int64").tolist()
] + [pos_tgt]))
return reader
......@@ -42,6 +42,6 @@ class Reader(ReaderBase):
pos_tgt = [conv_ids[boundary]]
neg_tgt = [self.sample_neg_from_seq(src)]
feature_name = ["user", "p_item", "n_item"]
yield zip(feature_name, [src] + [pos_tgt] + [neg_tgt])
yield list(zip(feature_name, [src] + [pos_tgt] + [neg_tgt]))
return reader
......@@ -41,10 +41,11 @@ class Reader(ReaderBase):
"""
feature_name = ["watch_vec", "search_vec", "other_feat", "label"]
yield zip(feature_name,
[np.random.rand(self.watch_vec_size).tolist()] +
[np.random.rand(self.search_vec_size).tolist()] +
[np.random.rand(self.other_feat_size).tolist()] +
[[np.random.randint(self.output_size)]])
yield list(
zip(feature_name, [
np.random.rand(self.watch_vec_size).tolist()
] + [np.random.rand(self.search_vec_size).tolist()] + [
np.random.rand(self.other_feat_size).tolist()
] + [[np.random.randint(self.output_size)]]))
return reader
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册