executor.py 8.1 KB
Newer Older
D
dzhwinter 已提交
1
import numpy as np
2
from . import core
T
typhoonzero 已提交
3
from framework import Program, default_main_program, Parameter, Variable
T
wip  
typhoonzero 已提交
4
import distribute_planner
5 6

__all__ = ['Executor', 'g_scope']
Y
Yu Yang 已提交
7

Y
Yu Yang 已提交
8 9
g_scope = core.Scope()

Y
Yu Yang 已提交
10

D
dzhwinter 已提交
11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38
def as_numpy(tensor):
    if isinstance(tensor, list):
        return [as_numpy(t) for t in tensor]
    assert isinstance(tensor, core.LoDTensor)
    lod = tensor.lod()
    tensor_data = np.array(tensor)
    if len(lod) == 0:
        ans = tensor_data
    else:
        raise RuntimeError("LoD Calculate lacks unit tests and buggy")
    # elif len(lod) == 1:
    #     ans = []
    #     idx = 0
    #     while idx < len(lod) - 1:
    #         ans.append(tensor_data[lod[idx]:lod[idx + 1]])
    #         idx += 1
    # else:
    #     for l in reversed(lod):
    #         ans = []
    #         idx = 0
    #         while idx < len(l) - 1:
    #             ans.append(tensor_data[l[idx]:l[idx + 1]])
    #             idx += 1
    #         tensor_data = ans
    #     ans = tensor_data
    return ans


Y
Yu Yang 已提交
39 40 41 42 43 44 45 46 47 48 49 50
class Executor(object):
    def __init__(self, places):
        if not isinstance(places, list) and not isinstance(places, tuple):
            places = [places]

        act_places = []
        for each in places:
            p = core.Place()
            p.set_place(each)
            act_places.append(p)

        self.executor = core.Executor(act_places)
D
dzhwinter 已提交
51 52
        self.places = places

T
typhoonzero 已提交
53
    def optimize(self, optimize_ops, params_grads, program=None, **kwargs):
T
wip  
typhoonzero 已提交
54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69
        """
            optimize the program for different runtime environment

            :param optimize_ops: op list of optimization, should be the
                                 return value of Optimizer.minimize
            :type optimize_ops: list
            :param program: program to optimize, default default_main_program
            :param pservers: parameter server endpoints like "m1:6174,m2:6174"
            :type pservers: string

            :return: return a list of programs
        """
        if program is None:
            program = default_main_program()

        if kwargs.has_key("pservers"):
T
typhoonzero 已提交
70 71
            return self._optimize_distributed(optimize_ops, program,
                                              params_grads, **kwargs)
T
wip  
typhoonzero 已提交
72

T
update  
typhoonzero 已提交
73 74
    def _optimize_distributed(self, optimize_ops, program, params_and_grads,
                              **kwargs):
T
wip  
typhoonzero 已提交
75 76 77 78 79 80 81 82 83 84 85 86 87
        # remove optimize ops and add a send op to main_program
        # FIXME(typhoonzero): delete_op only remove the first accurence,
        # need to consider about multiple same optimize op?
        for op in optimize_ops:
            program.global_block().delete_op(op)
        if kwargs.has_key("split_method"):
            split_method = kwargs["split_method"]
        else:
            split_method = distribute_planner.round_robin

        assert (callable(split_method))
        pserver_endpoints = kwargs["pservers"].split(",")
        params = program.global_block().all_parameters()
T
update  
typhoonzero 已提交
88
        self.param_grad_map = split_method(params, pserver_endpoints)
T
wip  
typhoonzero 已提交
89 90 91 92 93

        for ep in pserver_endpoints:
            # FIXME(typhoonzero): send to different servers can run in parrallel.
            send_op = program.global_block().append_op(
                type="send",
T
typhoonzero 已提交
94
                inputs={"X": self.param_grad_map[ep]["grads"]
T
wip  
typhoonzero 已提交
95
                        },  # inputs is a list of tensors to be send
T
typhoonzero 已提交
96
                outputs={},
T
wip  
typhoonzero 已提交
97
                attrs={"endpoint": ep})
T
update  
typhoonzero 已提交
98 99 100 101
        # -------------- generate optimize sub program --------------
        self.optimize_sub_program = Program()
        for opt_op in optimize_ops:
            self.optimize_sub_program.global_block().ops.append(opt_op)
T
wip  
typhoonzero 已提交
102 103

    def get_pserver_program(self, endpoint):
T
update  
typhoonzero 已提交
104
        pserver_program = Program()
T
typhoonzero 已提交
105 106 107 108 109 110 111 112 113 114 115 116 117 118
        for v in self.param_grad_map[endpoint]["params"]:
            assert isinstance(v, Parameter)
            new_p = Parameter(
                block=pserver_program.global_block(),
                shape=v.shape,
                dtype=v.dtype,
                type=v.type,
                lod_level=v.lod_level,
                stop_gradient=v.stop_gradient,
                trainable=v.trainable,
                optimize_attr=v.optimize_attr,
                regularizer=v.regularizer,
                name=v.name)
            pserver_program.global_block().vars[new_p.name] = new_p
T
update  
typhoonzero 已提交
119 120 121 122 123 124 125

        pserver_program.global_block().append_op(
            type="recv",
            inputs={"RX":
                    self.param_grad_map[endpoint]["grads"]},  # grads to recv
            outputs={},
            attrs={
T
typhoonzero 已提交
126 127 128 129
                "OptimizeProgram": self.optimize_sub_program.to_string(True),
                "endpoint": endpoint,
                "ParamList": self.param_grad_map[endpoint]["params"],
                "GradList": self.param_grad_map[endpoint]["grads"]
T
update  
typhoonzero 已提交
130
            })
T
typhoonzero 已提交
131
        return pserver_program
T
wip  
typhoonzero 已提交
132

D
dzhwinter 已提交
133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171
    def aslodtensor(self, data):
        def accumulate(data):
            if not isinstance(data, list):
                return 1
            return sum([accumulate(sub) for sub in data])

        def parselod(data):
            seq_lens = [accumulate(seq) for seq in data]
            cur_len = 0
            lod = [cur_len]
            for l in seq_lens:
                cur_len += l
                lod.append(cur_len)
            return lod

        assert len(self.places) != 0
        if not isinstance(data, list):
            # pure tensor case
            tensor = core.LoDTensor()
            tensor.set(data, self.places[0])
            return tensor
        else:
            raise RuntimeError("Current implementation lacks unittests")
            # lodtensor case
            lod = []
            if not isinstance(data[0], list):
                lod.append(parselod(data))
                flattened_data = np.concatenate(data, axis=0).astype("int64")
            else:
                while isinstance(data[0], list):
                    lod.append(parselod(seq))
                    flattened_data = [item for seq in data for item in seq]
                    data = flattened_data
                flattened_data = np.concatenate(data, axis=0).astype("int64")
            flattened_data = flattened_data.reshape([len(flattened_data), 1])
            tensor = core.LoDTensor()
            tensor.set(flattened_data, self.places[0])
            tensor.set_lod(lod)
            return tensor
Y
Yu Yang 已提交
172 173

    def run(self,
Y
Yu Yang 已提交
174
            program=None,
175 176
            feed=None,
            fetch_list=None,
Y
Yu Yang 已提交
177
            feed_var_name='feed',
Y
Yu Yang 已提交
178
            fetch_var_name='fetch',
D
dzhwinter 已提交
179 180
            scope=None,
            return_numpy=True):
181 182 183 184 185
        if feed is None:
            feed = {}
        if fetch_list is None:
            fetch_list = []

Y
Yu Yang 已提交
186
        if program is None:
Y
Yu Yang 已提交
187
            program = default_main_program()
Y
Yu Yang 已提交
188

Y
Yu Yang 已提交
189 190 191
        if not isinstance(program, Program):
            raise TypeError()

Y
Yu Yang 已提交
192 193 194
        if scope is None:
            scope = g_scope

Y
Yu Yang 已提交
195 196 197 198 199 200 201 202 203 204 205 206 207 208
        program = program.clone()
        global_block = program.global_block()
        feed_var = global_block.create_var(
            name=feed_var_name,
            type=core.VarDesc.VarType.FEED_MINIBATCH,
            persistable=True)

        for i, name in enumerate(feed):
            out = global_block.var(name)
            global_block.prepend_op(
                'feed',
                inputs={'X': [feed_var]},
                outputs={'Out': [out]},
                attrs={'col': i})
D
dzhwinter 已提交
209 210 211 212
            cur_feed = feed[name]
            if not isinstance(cur_feed, core.LoDTensor):
                cur_feed = self.aslodtensor(cur_feed)
            core.set_feed_variable(scope, cur_feed, feed_var.name, i)
Y
Yu Yang 已提交
213 214 215 216 217 218 219 220 221 222 223 224

        fetch_var = global_block.create_var(
            name=fetch_var_name,
            type=core.VarDesc.VarType.FETCH_LIST,
            persistable=True)
        for i, var in enumerate(fetch_list):
            global_block.append_op(
                type='fetch',
                inputs={'X': [var]},
                outputs={'Out': [fetch_var]},
                attrs={'col': i})

Y
Yu Yang 已提交
225
        self.executor.run(program.desc, scope, 0, True)
D
dzhwinter 已提交
226
        outs = [
Y
Yu Yang 已提交
227
            core.get_fetch_variable(scope, fetch_var_name, i)
Y
Yu Yang 已提交
228 229
            for i in xrange(len(fetch_list))
        ]
D
dzhwinter 已提交
230 231 232 233

        if return_numpy:
            outs = as_numpy(outs)
        return outs