From be02448517d4aa1c42928f673b0679aab4013c40 Mon Sep 17 00:00:00 2001 From: xjqbest <173596896@qq.com> Date: Mon, 1 Jun 2020 22:51:04 +0800 Subject: [PATCH] fix --- models/contentunderstanding/classification/config.yaml | 1 + models/contentunderstanding/classification/model.py | 3 ++- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/models/contentunderstanding/classification/config.yaml b/models/contentunderstanding/classification/config.yaml index d8ec7780..9e0bdd1e 100644 --- a/models/contentunderstanding/classification/config.yaml +++ b/models/contentunderstanding/classification/config.yaml @@ -25,6 +25,7 @@ hyper_parameters: optimizer: class: Adagrad learning_rate: 0.001 + is_sparse: False mode: runner1 diff --git a/models/contentunderstanding/classification/model.py b/models/contentunderstanding/classification/model.py index f7e6b6c3..45c50b9b 100644 --- a/models/contentunderstanding/classification/model.py +++ b/models/contentunderstanding/classification/model.py @@ -27,6 +27,7 @@ class Model(ModelBase): self.emb_dim = 8 self.hid_dim = 128 self.class_dim = 2 + self.is_sparse = envs.get_global_env("hyper_parameters.is_sparse", False) def input_data(self, is_infer=False, **kwargs): data = fluid.data( @@ -42,7 +43,7 @@ class Model(ModelBase): seq_len = input[2] # embedding layer - emb = fluid.embedding(input=data, size=[self.dict_dim, self.emb_dim]) + emb = fluid.embedding(input=data, size=[self.dict_dim, self.emb_dim], is_sparse=self.is_sparse) emb = fluid.layers.sequence_unpad(emb, length=seq_len) # convolution layer conv = fluid.nets.sequence_conv_pool( -- GitLab