diff --git a/core/model.py b/core/model.py index 2771c26e9b13c9addc07e27434685da091bef8a4..212db44c8dc60a20f6e5ed3f7c338b5336f41e2a 100755 --- a/core/model.py +++ b/core/model.py @@ -37,7 +37,6 @@ class Model(object): self._fetch_interval = 20 self._namespace = "train.model" self._platform = envs.get_platform() - self._init_slots() def _init_slots(self): sparse_slots = envs.get_global_env("sparse_slots", None, "train.reader") diff --git a/core/trainers/transpiler_trainer.py b/core/trainers/transpiler_trainer.py index 2fc2e19d6f3d8e426f3f01aa2689be37d513f670..a67d4759be7ae27c4a8c57eb43409102a8400c53 100755 --- a/core/trainers/transpiler_trainer.py +++ b/core/trainers/transpiler_trainer.py @@ -234,7 +234,6 @@ class TranspileTrainer(Trainer): startup_program = fluid.Program() with fluid.unique_name.guard(): with fluid.program_guard(infer_program, startup_program): - self.model._init_slots() self.model.infer_net() if self.model._infer_data_loader is None: diff --git a/models/rank/dcn/model.py b/models/rank/dcn/model.py index 930646428714d1f77df9e6539dc4db231d8fc0a3..bf3e3051ca3ac92d1e354c6f59313ce496ff2921 100755 --- a/models/rank/dcn/model.py +++ b/models/rank/dcn/model.py @@ -112,6 +112,7 @@ class Model(ModelBase): return fluid.layers.reduce_sum(fluid.layers.square(w)) def train_net(self): + self.model._init_slots() self.init_network() self.net_input = self._create_embedding_input() @@ -149,4 +150,5 @@ class Model(ModelBase): return optimizer def infer_net(self, parameter_list): + self.model._init_slots() self.deepfm_net() diff --git a/models/rank/deepfm/model.py b/models/rank/deepfm/model.py index 3addd00025e1f821e757926fd4f4592b7a824e18..bfda02a21dcc7949b487ef074a783d2f24bcd1f5 100755 --- a/models/rank/deepfm/model.py +++ b/models/rank/deepfm/model.py @@ -124,6 +124,7 @@ class Model(ModelBase): self.predict = fluid.layers.sigmoid(y_first_order + y_second_order + y_dnn) def train_net(self): + self.model._init_slots() self.deepfm_net() # ------------------------- Cost(logloss) -------------------------- @@ -149,4 +150,5 @@ class Model(ModelBase): return optimizer def infer_net(self, parameter_list): + self.model._init_slots() self.deepfm_net() diff --git a/models/rank/dnn/model.py b/models/rank/dnn/model.py index 85986ae4742b89c3bf42a8331720b86cd993d228..3a61d288b40545619a49e81df1f6160670a6a0c1 100755 --- a/models/rank/dnn/model.py +++ b/models/rank/dnn/model.py @@ -88,6 +88,7 @@ class Model(ModelBase): self._metrics["BATCH_AUC"] = batch_auc def train_net(self): + self.model._init_slots() self.input() self.net() self.avg_loss() @@ -99,5 +100,6 @@ class Model(ModelBase): return optimizer def infer_net(self): + self.model._init_slots() self.input() self.net() diff --git a/models/rank/wide_deep/model.py b/models/rank/wide_deep/model.py index d1901ce495e29a4a0c55a619ef76d3c0d59cdbca..27eb5e1f0c1588d7634407a3dcd250726dea28bb 100755 --- a/models/rank/wide_deep/model.py +++ b/models/rank/wide_deep/model.py @@ -57,6 +57,7 @@ class Model(ModelBase): return l3 def train_net(self): + self.model._init_slots() wide_input = self._dense_data_var[0] deep_input = self._dense_data_var[1] label = self._sparse_data_var[0] @@ -102,4 +103,5 @@ class Model(ModelBase): return optimizer def infer_net(self, parameter_list): + self.model._init_slots() self.deepfm_net() diff --git a/models/rank/xdeepfm/model.py b/models/rank/xdeepfm/model.py index c83a6f5bf5e4bcfba9b1eeb816cb4e0d43f677df..059e83d40290d713e9bc5f25f59fbb285e5a855a 100755 --- a/models/rank/xdeepfm/model.py +++ b/models/rank/xdeepfm/model.py @@ -145,6 +145,7 @@ class Model(ModelBase): self.predict = fluid.layers.sigmoid(y_linear + y_cin + y_dnn) def train_net(self): + self.model._init_slots() self.xdeepfm_net() cost = fluid.layers.log_loss(input=self.predict, label=fluid.layers.cast(self.label, "float32"), epsilon=0.0000001) @@ -166,4 +167,5 @@ class Model(ModelBase): return optimizer def infer_net(self, parameter_list): + self.model._init_slots() self.xdeepfm_net()