Set graph configuration to the IpuStrategy instance.
Set graph configuration to the IpuStrategy instance.
...
@@ -571,7 +571,7 @@ class IpuStrategy(object):
...
@@ -571,7 +571,7 @@ class IpuStrategy(object):
ipu_strategy = static.IpuStrategy()
ipu_strategy = static.IpuStrategy()
ipu_strategy.set_graph_config(num_ipus=1,
ipu_strategy.set_graph_config(num_ipus=1,
is_training=True,
is_training=True,
batch_size=1,
micro_batch_size=1,
enable_manual_shard=False)
enable_manual_shard=False)
"""
"""
ifnum_ipus==1andenable_manual_shard:
ifnum_ipus==1andenable_manual_shard:
...
@@ -581,7 +581,7 @@ class IpuStrategy(object):
...
@@ -581,7 +581,7 @@ class IpuStrategy(object):
options={
options={
'num_ipus':num_ipus,
'num_ipus':num_ipus,
'is_training':is_training,
'is_training':is_training,
'micro_batch_size':batch_size,
'micro_batch_size':micro_batch_size,
'enable_manual_shard':enable_manual_shard,
'enable_manual_shard':enable_manual_shard,
}
}
self.set_options(options)
self.set_options(options)
...
@@ -589,6 +589,7 @@ class IpuStrategy(object):
...
@@ -589,6 +589,7 @@ class IpuStrategy(object):
defset_pipelining_config(self,
defset_pipelining_config(self,
enable_pipelining=False,
enable_pipelining=False,
batches_per_step=1,
batches_per_step=1,
enable_gradient_accumulation=False,
accumulation_factor=1):
accumulation_factor=1):
"""
"""
Set pipelining configuration to the IpuStrategy instance. Used to optimize the throughput performance.
Set pipelining configuration to the IpuStrategy instance. Used to optimize the throughput performance.
...
@@ -598,6 +599,8 @@ class IpuStrategy(object):
...
@@ -598,6 +599,8 @@ class IpuStrategy(object):
Default False, which means disabled.
Default False, which means disabled.
batches_per_step (int, optional): Set the batches per run in data pipelining mode. Only if enable_pipelining=True, batches_per_step is able to be set > 1.
batches_per_step (int, optional): Set the batches per run in data pipelining mode. Only if enable_pipelining=True, batches_per_step is able to be set > 1.
Default 1, which means no data pipelining.
Default 1, which means no data pipelining.
enable_gradient_accumulation (bool, optional): Enable to accumulate gradients before updating the weights in training mode. Only if enable_pipelining=True,
enable_gradient_accumulation is able to be set True. Default False, which means no gradient accumulation.
accumulation_factor (int, optional): Specify the number of micro-batches to accumulate
accumulation_factor (int, optional): Specify the number of micro-batches to accumulate
before applying the varUpdate. Default 1, which means disable the accumulation.
before applying the varUpdate. Default 1, which means disable the accumulation.