提交 3e6d6a4d 编写于 作者: J junjun315

add dygraph models:resnet, test=develop

上级 18a01039
......@@ -29,7 +29,7 @@ env CUDA_VISIBLE_DEVICES=0 python train.py
## 输出
执行训练开始后,将得到类似如下的输出。每一轮`batch`训练将会打印当前epoch、step以及loss值。当前默认执行`epoch=10`, `batch_size=8`。您可以调整参数以得到更好的训练效果,同时也意味着消耗更多的内存(显存)以及需要花费更长的时间。
```text
0 0 [5.0672207]
0 1 [5.5643945]
0 2 [4.6319003]
epoch id: 0, batch step: 0, loss: 4.951202
epoch id: 0, batch step: 1, loss: 5.268410
epoch id: 0, batch step: 2, loss: 5.123999
```
......@@ -23,39 +23,9 @@ from paddle.fluid.dygraph.base import to_variable
batch_size = 8
epoch = 10
train_parameters = {
"input_size": [3, 224, 224],
"input_mean": [0.485, 0.456, 0.406],
"input_std": [0.229, 0.224, 0.225],
"learning_strategy": {
"name": "piecewise_decay",
"batch_size": batch_size,
"epochs": [30, 60, 90],
"steps": [0.1, 0.01, 0.001, 0.0001]
},
"batch_size": batch_size,
"lr": 0.1,
"total_images": 1281164,
}
def optimizer_setting(params):
ls = params["learning_strategy"]
if ls["name"] == "piecewise_decay":
if "total_images" not in params:
total_images = 1281167
else:
total_images = params["total_images"]
batch_size = ls["batch_size"]
step = int(total_images / batch_size + 1)
bd = [step * e for e in ls["epochs"]]
base_lr = params["lr"]
lr = []
lr = [base_lr * (0.1**i) for i in range(len(bd) + 1)]
optimizer = fluid.optimizer.SGD(learning_rate=0.01)
return optimizer
def optimizer_setting():
return fluid.optimizer.SGD(learning_rate=0.01)
class ConvBNLayer(fluid.dygraph.Layer):
......@@ -216,35 +186,21 @@ class ResNet(fluid.dygraph.Layer):
return y
class DygraphResnet():
def train(self):
batch_size = train_parameters["batch_size"]
batch_num = 10000
def train_resnet():
with fluid.dygraph.guard():
resnet = ResNet("resnet")
optimizer = optimizer_setting(train_parameters)
optimizer = optimizer_setting()
train_reader = paddle.batch(
paddle.dataset.flowers.train(use_xmap=False),
paddle.dataset.flowers.train(),
batch_size=batch_size)
dy_param_init_value = {}
for param in resnet.parameters():
dy_param_init_value[param.name] = param.numpy()
for eop in range(epoch):
for batch_id, data in enumerate(train_reader()):
if batch_id >= batch_num:
break
dy_x_data = np.array(
[x[0].reshape(3, 224, 224)
for x in data]).astype('float32')
if len(np.array([x[1] for x in data]).astype(
'int64')) != batch_size:
[x[0].reshape(3, 224, 224) for x in data]).astype('float32')
if len(np.array([x[1] for x in data]).astype('int64')) != batch_size:
continue
y_data = np.array(
[x[1] for x in data]).astype('int64').reshape(
y_data = np.array([x[1] for x in data]).astype('int64').reshape(
batch_size, 1)
img = to_variable(dy_x_data)
......@@ -261,9 +217,8 @@ class DygraphResnet():
optimizer.minimize(avg_loss)
resnet.clear_gradients()
print(eop, batch_id, dy_out)
print("epoch id: %d, batch step: %d, loss: %f" % (eop, batch_id, dy_out))
if __name__ == '__main__':
resnet = DygraphResnet()
resnet.train()
train_resnet()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册