diff --git a/models/contentunderstanding/classification/config.yaml b/models/contentunderstanding/classification/config.yaml index d8ec77807ac4cd4f1b1584160fce38f3596d36e7..9e0bdd1e851ada704eb2377efe0a82154fd2b371 100644 --- a/models/contentunderstanding/classification/config.yaml +++ b/models/contentunderstanding/classification/config.yaml @@ -25,6 +25,7 @@ hyper_parameters: optimizer: class: Adagrad learning_rate: 0.001 + is_sparse: False mode: runner1 diff --git a/models/contentunderstanding/classification/model.py b/models/contentunderstanding/classification/model.py index f7e6b6c344c913bf082be501ae16470488758160..45c50b9bd746200ee51391a32d6fb59cd9876828 100644 --- a/models/contentunderstanding/classification/model.py +++ b/models/contentunderstanding/classification/model.py @@ -27,6 +27,7 @@ class Model(ModelBase): self.emb_dim = 8 self.hid_dim = 128 self.class_dim = 2 + self.is_sparse = envs.get_global_env("hyper_parameters.is_sparse", False) def input_data(self, is_infer=False, **kwargs): data = fluid.data( @@ -42,7 +43,7 @@ class Model(ModelBase): seq_len = input[2] # embedding layer - emb = fluid.embedding(input=data, size=[self.dict_dim, self.emb_dim]) + emb = fluid.embedding(input=data, size=[self.dict_dim, self.emb_dim], is_sparse=self.is_sparse) emb = fluid.layers.sequence_unpad(emb, length=seq_len) # convolution layer conv = fluid.nets.sequence_conv_pool(