{ "train_batch_size": 8, "gradient_accumulation_steps": 1, "steps_per_print": 1, "zero_optimization": true, "disable_allgather": true, "optimizer": { "type": "Adam", "legacy_fusion": false, "params": { "lr": 0.00015, "max_grad_norm": 1.0 } }, "fp16": { "enabled": true, "loss_scale": 0, "loss_scale_window": 1000, "hysteresis": 2, "min_loss_scale": 1 } }