run.py 16.2 KB
Newer Older
T
tangwei 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

T
tangwei 已提交
15
import os
T
tangwei 已提交
16
import subprocess
X
test  
xjqbest 已提交
17
import sys
T
tangwei 已提交
18 19
import argparse
import tempfile
C
Chengmo 已提交
20

X
fix  
xjqbest 已提交
21
import copy
22 23 24
from paddlerec.core.factory import TrainerFactory
from paddlerec.core.utils import envs
from paddlerec.core.utils import util
X
test  
xjqbest 已提交
25
from paddlerec.core.utils import validation
T
tangwei 已提交
26

T
tangwei 已提交
27 28
engines = {}
device = ["CPU", "GPU"]
T
tangwei 已提交
29
engine_choices = [
C
Chengmo 已提交
30 31
    "TRAIN", "SINGLE_TRAIN", "INFER", "SINGLE_INFER", "LOCAL_CLUSTER",
    "LOCAL_CLUSTER_TRAIN", "CLUSTER_TRAIN"
T
tangwei 已提交
32
]
T
tangwei 已提交
33 34


T
tangwei 已提交
35
def engine_registry():
T
tangwei 已提交
36 37 38
    engines["TRANSPILER"] = {}
    engines["PSLIB"] = {}

C
Chengmo 已提交
39
    engines["TRANSPILER"]["TRAIN"] = single_train_engine
X
fix  
xjqbest 已提交
40
    engines["TRANSPILER"]["SINGLE_TRAIN"] = single_train_engine
C
Chengmo 已提交
41
    engines["TRANSPILER"]["INFER"] = single_infer_engine
X
fix  
xjqbest 已提交
42
    engines["TRANSPILER"]["SINGLE_INFER"] = single_infer_engine
T
tangwei 已提交
43
    engines["TRANSPILER"]["LOCAL_CLUSTER"] = local_cluster_engine
C
Chengmo 已提交
44
    engines["TRANSPILER"]["LOCAL_CLUSTER_TRAIN"] = local_cluster_engine
T
tangwei 已提交
45
    engines["TRANSPILER"]["CLUSTER"] = cluster_engine
C
Chengmo 已提交
46 47 48
    engines["PSLIB"]["SINGLE_TRAIN"] = local_mpi_engine
    engines["PSLIB"]["TRAIN"] = local_mpi_engine
    engines["PSLIB"]["LOCAL_CLUSTER_TRAIN"] = local_mpi_engine
T
tangwei 已提交
49
    engines["PSLIB"]["LOCAL_CLUSTER"] = local_mpi_engine
C
Chengmo 已提交
50
    engines["PSLIB"]["CLUSTER_TRAIN"] = cluster_mpi_engine
T
tangwei 已提交
51
    engines["PSLIB"]["CLUSTER"] = cluster_mpi_engine
T
tangwei 已提交
52

T
tangwei 已提交
53

X
fix  
xjqbest 已提交
54
def get_inters_from_yaml(file, filters):
X
test  
xjqbest 已提交
55
    _envs = envs.load_yaml(file)
T
tangwei 已提交
56 57 58
    flattens = envs.flatten_environs(_envs)
    inters = {}
    for k, v in flattens.items():
X
fix  
xjqbest 已提交
59 60 61
        for f in filters:
            if k.startswith(f):
                inters[k] = v
T
tangwei 已提交
62
    return inters
T
tangwei 已提交
63 64


X
fix  
xjqbest 已提交
65
def get_all_inters_from_yaml(file, filters):
C
Chengmo 已提交
66
    _envs = envs.load_yaml(file)
X
fix  
xjqbest 已提交
67 68 69 70 71 72 73 74 75 76 77 78
    all_flattens = {}

    def fatten_env_namespace(namespace_nests, local_envs):
        for k, v in local_envs.items():
            if isinstance(v, dict):
                nests = copy.deepcopy(namespace_nests)
                nests.append(k)
                fatten_env_namespace(nests, v)
            elif (k == "dataset" or k == "phase" or
                  k == "runner") and isinstance(v, list):
                for i in v:
                    if i.get("name") is None:
C
Chengmo 已提交
79
                        raise ValueError("name must be in dataset list. ", v)
X
fix  
xjqbest 已提交
80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96
                    nests = copy.deepcopy(namespace_nests)
                    nests.append(k)
                    nests.append(i["name"])
                    fatten_env_namespace(nests, i)
            else:
                global_k = ".".join(namespace_nests + [k])
                all_flattens[global_k] = v

    fatten_env_namespace([], _envs)
    ret = {}
    for k, v in all_flattens.items():
        for f in filters:
            if k.startswith(f):
                ret[k] = v
    return ret


T
tangwei 已提交
97 98 99 100 101 102 103 104
def get_modes(running_config):
    if not isinstance(running_config, dict):
        raise ValueError("get_modes arguments must be [dict]")

    modes = running_config.get("mode")
    if not modes:
        raise ValueError("yaml mast have config: mode")

T
tangwei 已提交
105 106 107
    if isinstance(modes, str):
        modes = [modes]

T
tangwei 已提交
108 109 110 111
    return modes


def get_engine(args, running_config, mode):
T
tangwei 已提交
112
    transpiler = get_transpiler()
T
tangwei 已提交
113
    _envs = envs.load_yaml(args.model)
X
fix  
xjqbest 已提交
114

T
tangwei 已提交
115 116 117 118 119
    engine_class = ".".join(["runner", mode, "class"])
    engine_device = ".".join(["runner", mode, "device"])
    device_gpu_choices = ".".join(["runner", mode, "device", "selected_gpus"])

    engine = running_config.get(engine_class, None)
X
fix  
xjqbest 已提交
120
    if engine is None:
T
tangwei 已提交
121 122 123 124 125 126 127
        raise ValueError("not find {} in yaml, please check".format(
            mode, engine_class))
    device = running_config.get(engine_device, None)

    if device is None:
        print("not find device be specified in yaml, set CPU as default")
        device = "CPU"
C
Chengmo 已提交
128 129

    if device.upper() == "GPU":
T
tangwei 已提交
130 131 132 133 134 135 136 137 138 139 140
        selected_gpus = running_config.get(device_gpu_choices, None)

        if selected_gpus is None:
            print(
                "not find selected_gpus be specified in yaml, set `0` as default"
            )
            selected_gpus = ["0"]
        else:
            print("selected_gpus {} will be specified for running".format(
                selected_gpus))

C
Chengmo 已提交
141 142 143 144
        selected_gpus_num = len(selected_gpus.split(","))
        if selected_gpus_num > 1:
            engine = "LOCAL_CLUSTER"

T
tangwei 已提交
145 146
    engine = engine.upper()
    if engine not in engine_choices:
T
tangwei 已提交
147 148
        raise ValueError("{} can not be chosen in {}".format(engine_class,
                                                             engine_choices))
T
tangwei 已提交
149

T
tangwei 已提交
150
    run_engine = engines[transpiler].get(engine, None)
T
tangwei 已提交
151 152 153 154
    return run_engine


def get_transpiler():
T
tangwei 已提交
155
    FNULL = open(os.devnull, 'w')
T
tangwei 已提交
156 157 158 159
    cmd = [
        "python", "-c",
        "import paddle.fluid as fluid; fleet_ptr = fluid.core.Fleet(); [fleet_ptr.copy_table_by_feasign(10, 10, [2020, 1010])];"
    ]
T
tangwei 已提交
160 161 162
    proc = subprocess.Popen(cmd, stdout=FNULL, stderr=FNULL, cwd=os.getcwd())
    ret = proc.wait()
    if ret == -11:
T
tangwei 已提交
163
        return "PSLIB"
T
tangwei 已提交
164
    else:
T
tangwei 已提交
165
        return "TRANSPILER"
T
tangwei 已提交
166 167


T
tangwei 已提交
168 169 170
def set_runtime_envs(cluster_envs, engine_yaml):
    if cluster_envs is None:
        cluster_envs = {}
T
tangwei 已提交
171 172

    envs.set_runtime_environs(cluster_envs)
T
fix bug  
tangwei 已提交
173 174 175

    need_print = {}
    for k, v in os.environ.items():
T
tangwei 已提交
176
        if k.startswith("train.trainer."):
T
fix bug  
tangwei 已提交
177 178 179
            need_print[k] = v

    print(envs.pretty_print_envs(need_print, ("Runtime Envs", "Value")))
T
tangwei 已提交
180 181


C
Chengmo 已提交
182 183
def single_train_engine(args):
    _envs = envs.load_yaml(args.model)
T
tangwei 已提交
184 185 186 187 188 189 190 191 192 193 194 195 196
    run_extras = get_all_inters_from_yaml(args.model, ["runner."])

    mode = envs.get_runtime_environ("mode")
    trainer_class = ".".join(["runner", mode, "trainer_class"])
    fleet_class = ".".join(["runner", mode, "fleet_mode"])
    device_class = ".".join(["runner", mode, "device"])
    selected_gpus_class = ".".join(["runner", mode, "selected_gpus"])

    trainer = run_extras.get(trainer_class, "GeneralTrainer")
    fleet_mode = run_extras.get(fleet_class, "ps")
    device = run_extras.get(device_class, "cpu")
    selected_gpus = run_extras.get(selected_gpus_class, "0")
    executor_mode = "train"
T
tangwei 已提交
197

T
tangwei 已提交
198
    single_envs = {}
C
Chengmo 已提交
199 200

    if device.upper() == "GPU":
T
tangwei 已提交
201 202 203 204 205 206 207 208
        selected_gpus_num = len(selected_gpus.split(","))
        if selected_gpus_num != 1:
            raise ValueError(
                "Single Mode Only Support One GPU, Set Local Cluster Mode to use Multi-GPUS"
            )

        single_envs["selsected_gpus"] = selected_gpus
        single_envs["FLAGS_selected_gpus"] = selected_gpus
C
chengmo 已提交
209

C
chengmo 已提交
210
    single_envs["train.trainer.trainer"] = trainer
C
Chengmo 已提交
211 212
    single_envs["fleet_mode"] = fleet_mode
    single_envs["train.trainer.executor_mode"] = executor_mode
C
chengmo 已提交
213 214
    single_envs["train.trainer.threads"] = "2"
    single_envs["train.trainer.platform"] = envs.get_platform()
C
Chengmo 已提交
215 216
    single_envs["train.trainer.engine"] = "single"

X
fix  
xjqbest 已提交
217 218 219
    set_runtime_envs(single_envs, args.model)
    trainer = TrainerFactory.create(args.model)
    return trainer
X
fix  
xjqbest 已提交
220

X
fix  
xjqbest 已提交
221 222

def single_infer_engine(args):
C
Chengmo 已提交
223
    _envs = envs.load_yaml(args.model)
T
tangwei 已提交
224
    run_extras = get_all_inters_from_yaml(args.model, ["runner."])
C
Chengmo 已提交
225

T
tangwei 已提交
226 227 228 229 230
    mode = envs.get_runtime_environ("mode")
    trainer_class = ".".join(["runner", mode, "trainer_class"])
    fleet_class = ".".join(["runner", mode, "fleet_mode"])
    device_class = ".".join(["runner", mode, "device"])
    selected_gpus_class = ".".join(["runner", mode, "selected_gpus"])
C
Chengmo 已提交
231

T
tangwei 已提交
232 233 234 235
    trainer = run_extras.get(trainer_class, "GeneralTrainer")
    fleet_mode = run_extras.get(fleet_class, "ps")
    device = run_extras.get(device_class, "cpu")
    selected_gpus = run_extras.get(selected_gpus_class, "0")
C
Chengmo 已提交
236 237
    executor_mode = "infer"

T
tangwei 已提交
238 239
    single_envs = {}

C
Chengmo 已提交
240
    if device.upper() == "GPU":
T
tangwei 已提交
241 242 243 244 245 246 247 248
        selected_gpus_num = len(selected_gpus.split(","))
        if selected_gpus_num != 1:
            raise ValueError(
                "Single Mode Only Support One GPU, Set Local Cluster Mode to use Multi-GPUS"
            )

        single_envs["selsected_gpus"] = selected_gpus
        single_envs["FLAGS_selected_gpus"] = selected_gpus
C
Chengmo 已提交
249

X
fix  
xjqbest 已提交
250
    single_envs["train.trainer.trainer"] = trainer
C
Chengmo 已提交
251 252
    single_envs["train.trainer.executor_mode"] = executor_mode
    single_envs["fleet_mode"] = fleet_mode
X
fix  
xjqbest 已提交
253 254
    single_envs["train.trainer.threads"] = "2"
    single_envs["train.trainer.platform"] = envs.get_platform()
C
Chengmo 已提交
255 256
    single_envs["train.trainer.engine"] = "single"

X
fix  
xjqbest 已提交
257 258 259
    set_runtime_envs(single_envs, args.model)
    trainer = TrainerFactory.create(args.model)
    return trainer
C
chengmo 已提交
260

X
fix  
xjqbest 已提交
261

T
tangwei 已提交
262
def cluster_engine(args):
T
tangwei 已提交
263
    def master():
T
tangwei 已提交
264
        role = "MASTER"
265
        from paddlerec.core.engine.cluster.cluster import ClusterEngine
X
test  
xjqbest 已提交
266
        _envs = envs.load_yaml(args.backend)
T
tangwei 已提交
267
        flattens = envs.flatten_environs(_envs, "_")
T
tangwei 已提交
268
        flattens["engine_role"] = role
T
tangwei 已提交
269
        flattens["engine_run_config"] = args.model
T
tangwei 已提交
270 271
        flattens["engine_temp_path"] = tempfile.mkdtemp()
        envs.set_runtime_environs(flattens)
C
Chengmo 已提交
272
        print(envs.pretty_print_envs(flattens, ("Submit Envs", "Value")))
T
tangwei 已提交
273 274 275 276 277

        launch = ClusterEngine(None, args.model)
        return launch

    def worker():
T
tangwei 已提交
278
        role = "WORKER"
C
Chengmo 已提交
279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299

        _envs = envs.load_yaml(args.model)
        run_extras = get_all_inters_from_yaml(args.model,
                                              ["train.", "runner."])
        trainer_class = run_extras.get(
            "runner." + _envs["mode"] + ".trainer_class", None)

        if trainer_class:
            trainer = trainer_class
        else:
            trainer = "GeneralTrainer"

        executor_mode = "train"

        distributed_strategy = run_extras.get(
            "runner." + _envs["mode"] + ".distribute_strategy", "async")
        selected_gpus = run_extras.get(
            "runner." + _envs["mode"] + ".selected_gpus", "0")
        fleet_mode = run_extras.get("runner." + _envs["mode"] + ".fleet_mode",
                                    "ps")

T
tangwei 已提交
300
        cluster_envs = {}
C
Chengmo 已提交
301 302
        cluster_envs["selected_gpus"] = selected_gpus
        cluster_envs["fleet_mode"] = fleet_mode
T
tangwei 已提交
303
        cluster_envs["train.trainer.trainer"] = trainer
C
Chengmo 已提交
304
        cluster_envs["train.trainer.executor_mode"] = executor_mode
T
tangwei 已提交
305
        cluster_envs["train.trainer.engine"] = "cluster"
C
Chengmo 已提交
306
        cluster_envs["train.trainer.strategy"] = distributed_strategy
T
tangwei 已提交
307 308
        cluster_envs["train.trainer.threads"] = envs.get_runtime_environ(
            "CPU_NUM")
T
tangwei 已提交
309
        cluster_envs["train.trainer.platform"] = envs.get_platform()
C
chengmo 已提交
310 311
        print("launch {} engine with cluster to with model: {}".format(
            trainer, args.model))
T
tangwei 已提交
312
        set_runtime_envs(cluster_envs, args.model)
T
tangwei 已提交
313

T
bug fix  
tangwei12 已提交
314 315
        trainer = TrainerFactory.create(args.model)
        return trainer
T
tangwei 已提交
316

T
tangwei 已提交
317 318 319
    role = os.getenv("PADDLE_PADDLEREC_ROLE", "MASTER")

    if role == "WORKER":
T
tangwei 已提交
320 321 322
        return worker()
    else:
        return master()
C
chengmo 已提交
323 324


T
tangwei 已提交
325
def cluster_mpi_engine(args):
T
tangwei 已提交
326 327
    print("launch cluster engine with cluster to run model: {}".format(
        args.model))
T
tangwei 已提交
328

T
fix bug  
tangwei 已提交
329
    cluster_envs = {}
T
tangwei 已提交
330
    cluster_envs["train.trainer.trainer"] = "CtrCodingTrainer"
T
tangwei 已提交
331
    cluster_envs["train.trainer.platform"] = envs.get_platform()
T
tangwei 已提交
332

T
tangwei 已提交
333
    set_runtime_envs(cluster_envs, args.model)
T
tangwei 已提交
334

T
tangwei 已提交
335 336 337 338 339
    trainer = TrainerFactory.create(args.model)
    return trainer


def local_cluster_engine(args):
340
    from paddlerec.core.engine.local_cluster import LocalClusterEngine
C
chengmo 已提交
341

C
Chengmo 已提交
342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368
    _envs = envs.load_yaml(args.model)
    run_extras = get_all_inters_from_yaml(args.model, ["train.", "runner."])
    trainer_class = run_extras.get("runner." + _envs["mode"] + ".runner_class",
                                   None)

    if trainer_class:
        trainer = trainer_class
    else:
        trainer = "GeneralTrainer"

    executor_mode = "train"
    distributed_strategy = run_extras.get(
        "runner." + _envs["mode"] + ".distribute_strategy", "async")

    worker_num = run_extras.get("runner." + _envs["mode"] + ".worker_num", 1)
    server_num = run_extras.get("runner." + _envs["mode"] + ".server_num", 1)
    selected_gpus = run_extras.get(
        "runner." + _envs["mode"] + ".selected_gpus", "0")

    fleet_mode = run_extras.get("runner." + _envs["mode"] + ".fleet_mode", "")
    if fleet_mode == "":
        device = run_extras.get("runner." + _envs["mode"] + ".device", "cpu")
        if len(selected_gpus.split(",")) > 1 and device.upper() == "GPU":
            fleet_mode = "COLLECTIVE"
        else:
            fleet_mode = "PS"

C
chengmo 已提交
369
    cluster_envs = {}
C
Chengmo 已提交
370 371 372
    cluster_envs["server_num"] = server_num
    cluster_envs["worker_num"] = worker_num
    cluster_envs["selected_gpus"] = selected_gpus
C
chengmo 已提交
373
    cluster_envs["start_port"] = envs.find_free_port()
C
Chengmo 已提交
374
    cluster_envs["fleet_mode"] = fleet_mode
C
chengmo 已提交
375
    cluster_envs["log_dir"] = "logs"
C
chengmo 已提交
376
    cluster_envs["train.trainer.trainer"] = trainer
C
Chengmo 已提交
377 378
    cluster_envs["train.trainer.executor_mode"] = executor_mode
    cluster_envs["train.trainer.strategy"] = distributed_strategy
C
chengmo 已提交
379 380 381 382 383
    cluster_envs["train.trainer.threads"] = "2"
    cluster_envs["train.trainer.engine"] = "local_cluster"
    cluster_envs["train.trainer.platform"] = envs.get_platform()

    cluster_envs["CPU_NUM"] = "2"
T
tangwei 已提交
384 385
    print("launch {} engine with cluster to run model: {}".format(trainer,
                                                                  args.model))
C
chengmo 已提交
386 387 388 389 390 391

    set_runtime_envs(cluster_envs, args.model)
    launch = LocalClusterEngine(cluster_envs, args.model)
    return launch


T
tangwei 已提交
392
def local_mpi_engine(args):
T
tangwei 已提交
393 394
    print("launch cluster engine with cluster to run model: {}".format(
        args.model))
395
    from paddlerec.core.engine.local_mpi import LocalMPIEngine
T
tangwei 已提交
396

T
tangwei 已提交
397 398
    print("use 1X1 MPI ClusterTraining at localhost to run model: {}".format(
        args.model))
T
tangwei 已提交
399

T
tangwei 已提交
400 401 402
    mpi = util.run_which("mpirun")
    if not mpi:
        raise RuntimeError("can not find mpirun, please check environment")
C
Chengmo 已提交
403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418

    _envs = envs.load_yaml(args.model)
    run_extras = get_all_inters_from_yaml(args.model, ["train.", "runner."])
    trainer_class = run_extras.get("runner." + _envs["mode"] + ".runner_class",
                                   None)
    executor_mode = "train"
    distributed_strategy = run_extras.get(
        "runner." + _envs["mode"] + ".distribute_strategy", "async")
    fleet_mode = run_extras.get("runner." + _envs["mode"] + ".fleet_mode",
                                "ps")

    if trainer_class:
        trainer = trainer_class
    else:
        trainer = "GeneralTrainer"

T
fix bug  
tangwei 已提交
419 420
    cluster_envs = {}
    cluster_envs["mpirun"] = mpi
C
Chengmo 已提交
421
    cluster_envs["train.trainer.trainer"] = trainer
T
fix bug  
tangwei 已提交
422
    cluster_envs["log_dir"] = "logs"
T
tangwei 已提交
423
    cluster_envs["train.trainer.engine"] = "local_cluster"
C
Chengmo 已提交
424 425 426 427 428
    cluster_envs["train.trainer.executor_mode"] = executor_mode
    cluster_envs["fleet_mode"] = fleet_mode
    cluster_envs["train.trainer.strategy"] = distributed_strategy
    cluster_envs["train.trainer.threads"] = "2"
    cluster_envs["train.trainer.engine"] = "local_cluster"
T
tangwei 已提交
429
    cluster_envs["train.trainer.platform"] = envs.get_platform()
T
tangwei 已提交
430

T
tangwei 已提交
431
    set_runtime_envs(cluster_envs, args.model)
T
tangwei 已提交
432 433 434 435
    launch = LocalMPIEngine(cluster_envs, args.model)
    return launch


T
tangwei 已提交
436
def get_abs_model(model):
437
    if model.startswith("paddlerec."):
T
tangwei 已提交
438
        dir = envs.path_adapter(model)
T
tangwei 已提交
439
        path = os.path.join(dir, "config.yaml")
T
tangwei 已提交
440 441 442 443 444 445 446
    else:
        if not os.path.isfile(model):
            raise IOError("model config: {} invalid".format(model))
        path = model
    return path


T
tangwei 已提交
447
if __name__ == "__main__":
448
    parser = argparse.ArgumentParser(description='paddle-rec run')
T
tangwei 已提交
449
    parser.add_argument("-m", "--model", type=str)
T
tangwei 已提交
450
    parser.add_argument("-b", "--backend", type=str, default=None)
T
tangwei 已提交
451

T
tangwei 已提交
452 453 454
    abs_dir = os.path.dirname(os.path.abspath(__file__))
    envs.set_runtime_environs({"PACKAGE_BASE": abs_dir})

T
tangwei 已提交
455
    args = parser.parse_args()
C
fix  
chengmo 已提交
456
    model_name = args.model.split('.')[-1]
T
tangwei 已提交
457
    args.model = get_abs_model(args.model)
T
tangwei 已提交
458

X
test  
xjqbest 已提交
459 460
    if not validation.yaml_validation(args.model):
        sys.exit(-1)
T
tangwei 已提交
461
    engine_registry()
T
tangwei 已提交
462 463 464 465 466 467 468 469 470

    running_config = get_all_inters_from_yaml(args.model, ["mode", "runner."])
    modes = get_modes(running_config)

    for mode in modes:
        envs.set_runtime_environs({"mode": mode})
        which_engine = get_engine(args, running_config, mode)
        engine = which_engine(args)
        engine.run()