diff --git a/demo/ctr-dnn_train.yaml b/demo/ctr-dnn_train.yaml index 0f49138c0825a3f82db3779b678347f94db3a7db..66f5053a0f2364a9038e6b9b5f3ccaad6481d6ca 100644 --- a/demo/ctr-dnn_train.yaml +++ b/demo/ctr-dnn_train.yaml @@ -18,14 +18,15 @@ train: strategy: "async" epochs: 10 + workspace: "fleetrec.models.rank.dnn" reader: batch_size: 2 - class: "fleetrec.models.rank.criteo_reader" - train_data_path: "fleetrec::models/rank/dnn/data/train" + class: "{workspace}/../criteo_reader.py" + train_data_path: "{workspace}/data/train" model: - models: "fleetrec.models.rank.dnn.model" + models: "{workspace}/model.py" hyper_parameters: sparse_inputs_slots: 27 sparse_feature_number: 1000001 @@ -33,22 +34,14 @@ train: dense_input_dim: 13 fc_sizes: [512, 256, 128, 32] learning_rate: 0.001 + optimizer: adam save: increment: - dirname: "models_for_increment" + dirname: "increment" epoch_interval: 2 save_last: True inference: - dirname: "models_for_inference" + dirname: "inference" epoch_interval: 4 - feed_varnames: ["C1", "C2", "C3"] - fetch_varnames: "predict" save_last: True - -evaluate: - batch_size: 32 - train_thread_num: 12 - reader: "reader.py" - - diff --git a/fleet_rec/core/factory.py b/fleet_rec/core/factory.py index 631bb1f6bfbc9603fd796cdba12f573050994f54..78821dc27e929e2059d409cd4d1f92aa47b4a633 100644 --- a/fleet_rec/core/factory.py +++ b/fleet_rec/core/factory.py @@ -67,6 +67,8 @@ class TrainerFactory(object): raise ValueError("fleetrec's config only support yaml") envs.set_global_envs(_config) + envs.update_workspace() + trainer = TrainerFactory._build_trainer(config) return trainer diff --git a/fleet_rec/core/model.py b/fleet_rec/core/model.py index 9f940b6c0a204a0d1270f9ea46933583dc6307e0..528be0bf66312eab9e0f9b0d96d43ae4a75ac672 100644 --- a/fleet_rec/core/model.py +++ b/fleet_rec/core/model.py @@ -1,6 +1,10 @@ import abc + +import paddle.fluid as fluid + from fleetrec.core.utils import envs + class Model(object): """R """ @@ -33,11 +37,35 @@ class Model(object): def get_fetch_period(self): return self._fetch_interval + def _build_optimizer(self, name, lr): + name = name.upper() + optimizers = ["SGD", "ADAM", "ADAGRAD"] + if name not in optimizers: + raise ValueError("configured optimizer can only supported SGD/Adam/Adagrad") + + if name == "SGD": + optimizer_i = fluid.optimizer.Adam(lr, lazy_mode=True) + elif name == "ADAM": + optimizer_i = fluid.optimizer.Adam(lr, lazy_mode=True) + elif name == "ADAGRAD": + optimizer_i = fluid.optimizer.Adam(lr, lazy_mode=True) + else: + raise ValueError("configured optimizer can only supported SGD/Adam/Adagrad") + + return optimizer_i + + def optimizer(self): + learning_rate = envs.get_global_env("hyper_parameters.learning_rate", None, self._namespace) + optimizer = envs.get_global_env("hyper_parameters.optimizer", None, self._namespace) + + return self._build_optimizer(optimizer, learning_rate) + @abc.abstractmethod def train_net(self): """R """ pass + @abc.abstractmethod def infer_net(self): pass diff --git a/fleet_rec/core/utils/envs.py b/fleet_rec/core/utils/envs.py index 4ee08cf0730d67a471d0f1e0367d7c6cf290297a..d3452b88be6585ea623d7b35153c79f4d03f26a1 100644 --- a/fleet_rec/core/utils/envs.py +++ b/fleet_rec/core/utils/envs.py @@ -46,9 +46,11 @@ def set_runtime_environs(environs): for k, v in environs.items(): os.environ[k] = str(v) + def get_runtime_environ(key): return os.getenv(key, None) + def get_trainer(): train_mode = get_runtime_environ("train.trainer.trainer") return train_mode @@ -83,6 +85,25 @@ def get_global_envs(): return global_envs +def update_workspace(): + workspace = global_envs.get("train.workspace", None) + if not workspace: + return + workspace = "" + + # is fleet inner models + if workspace.startswith("fleetrec."): + fleet_package = get_runtime_environ("PACKAGE_BASE") + workspace_dir = workspace.split("fleetrec.")[1].replace(".", "/") + path = os.path.join(fleet_package, workspace_dir) + else: + path = workspace + + for name, value in global_envs.items(): + if isinstance(value, str): + value = value.replace("{workspace}", path) + global_envs[name] = value + def pretty_print_envs(envs, header=None): spacing = 5 max_k = 45 diff --git a/models/rank/dnn/model.py b/models/rank/dnn/model.py index a4e6cf6868ccac57e457c9c58cab2b8ee2310d94..71f88d627213d8ddab2f3ecaa97e105553d4a99a 100644 --- a/models/rank/dnn/model.py +++ b/models/rank/dnn/model.py @@ -63,12 +63,9 @@ class Model(ModelBase): feed_list=self._data_var, capacity=64, use_double_buffer=False, iterable=False) def net(self): - trainer = envs.get_trainer() - - is_distributed = True if trainer == "CtrTrainer" else False + is_distributed = True if envs.get_trainer() == "CtrTrainer" else False sparse_feature_number = envs.get_global_env("hyper_parameters.sparse_feature_number", None, self._namespace) sparse_feature_dim = envs.get_global_env("hyper_parameters.sparse_feature_dim", None, self._namespace) - sparse_feature_dim = 9 if trainer == "CtrTrainer" else sparse_feature_dim def embedding_layer(input): emb = fluid.layers.embedding( @@ -106,8 +103,7 @@ class Model(ModelBase): size=2, act="softmax", param_attr=fluid.ParamAttr(initializer=fluid.initializer.Normal( - scale=1 / math.sqrt(fcs[-1].shape[1]))), - ) + scale=1 / math.sqrt(fcs[-1].shape[1])))) self.predict = predict