optimizer.py 15.5 KB
Newer Older
D
dogsheng 已提交
1 2 3
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# Copyright (c) 2019 Huawei Technologies Co., Ltd.
4 5 6
# A-Tune is licensed under the Mulan PSL v2.
# You can use this software according to the terms and conditions of the Mulan PSL v2.
# You may obtain a copy of Mulan PSL v2 at:
7
#     http://license.coscl.org.cn/MulanPSL2
D
dogsheng 已提交
8 9 10
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR
# PURPOSE.
11
# See the Mulan PSL v2 for more details.
D
dogsheng 已提交
12 13 14 15 16 17
# Create: 2019-10-29

"""
This class is used to find optimal settings and generate optimized profile.
"""

Z
Zhipeng Xie 已提交
18
import logging
19
import numbers
20
import multiprocessing
21
import collections
G
gaoruoshu 已提交
22
import numpy as np
23 24
from sklearn.linear_model import Lasso
from sklearn.preprocessing import StandardScaler
D
dogsheng 已提交
25

26 27
from skopt import Optimizer as baseOpt
from skopt.utils import normalize_dimensions
28
from skopt.utils import cook_estimator
29

30
from analysis.optimizer.abtest_tuning_manager import ABtestTuningManager
31
from analysis.optimizer.knob_sampling_manager import KnobSamplingManager
32
from analysis.optimizer.tpe_optimizer import TPEOptimizer
33
from analysis.optimizer.weighted_ensemble_feature_selector import WeightedEnsembleFeatureSelector
34

Z
Zhipeng Xie 已提交
35
LOGGER = logging.getLogger(__name__)
D
dogsheng 已提交
36 37


38
class Optimizer(multiprocessing.Process):
Z
Zhipeng Xie 已提交
39 40
    """find optimal settings and generate optimized profile"""

G
gaoruoshu 已提交
41 42
    def __init__(self, name, params, child_conn, engine="bayes", max_eval=50,
                 x0=None, y0=None, n_random_starts=20, split_count=5, noise=0.00001 ** 2):
D
dogsheng 已提交
43 44 45 46
        super(Optimizer, self).__init__(name=name)
        self.knobs = params
        self.child_conn = child_conn
        self.engine = engine
47
        self.noise = noise
D
dogsheng 已提交
48
        self.max_eval = int(max_eval)
49
        self.split_count = split_count
G
gaoruoshu 已提交
50 51 52
        self.x_ref = x0
        self.y_ref = y0
        if self.x_ref is not None and len(self.x_ref) == 1:
H
hanxinke 已提交
53
            ref_x, _ = self.transfer()
54 55 56
            self.ref = ref_x[0]
        else:
            self.ref = []
57
        self._n_random_starts = 20 if n_random_starts is None else n_random_starts
D
dogsheng 已提交
58 59

    def build_space(self):
Z
Zhipeng Xie 已提交
60
        """build space"""
D
dogsheng 已提交
61
        objective_params_list = []
62
        for i, p_nob in enumerate(self.knobs):
Z
Zhipeng Xie 已提交
63
            if p_nob['type'] == 'discrete':
64
                items = self.handle_discrete_data(p_nob, i)
65
                objective_params_list.append(items)
Z
Zhipeng Xie 已提交
66 67
            elif p_nob['type'] == 'continuous':
                r_range = p_nob['range']
68 69
                if r_range is None or len(r_range) != 2:
                    raise ValueError("the item of the scope value of {} must be 2"
70
                                     .format(p_nob['name']))
71 72 73 74 75
                if p_nob['dtype'] == 'int':
                    try:
                        r_range[0] = int(r_range[0])
                        r_range[1] = int(r_range[1])
                    except ValueError:
76
                        raise ValueError("the range value of {} is not an integer value"
G
gaoruoshu 已提交
77
                                         .format(p_nob['name']))
78 79 80 81 82
                elif p_nob['dtype'] == 'float':
                    try:
                        r_range[0] = float(r_range[0])
                        r_range[1] = float(r_range[1])
                    except ValueError:
83
                        raise ValueError("the range value of {} is not an float value"
G
gaoruoshu 已提交
84
                                         .format(p_nob['name']))
85

86 87
                if len(self.ref) > 0:
                    if self.ref[i] < r_range[0] or self.ref[i] > r_range[1]:
G
gaoruoshu 已提交
88 89
                        raise ValueError("the ref value of {} is out of range"
                                         .format(p_nob['name']))
Z
Zhipeng Xie 已提交
90
                objective_params_list.append((r_range[0], r_range[1]))
91 92
            else:
                raise ValueError("the type of {} is not supported".format(p_nob['name']))
D
dogsheng 已提交
93 94
        return objective_params_list

95
    def handle_discrete_data(self, p_nob, index):
96 97 98 99 100 101 102 103
        """handle discrete data"""
        if p_nob['dtype'] == 'int':
            items = p_nob['items']
            if items is None:
                items = []
            r_range = p_nob['range']
            step = 1
            if 'step' in p_nob.keys():
104
                step = 1 if p_nob['step'] < 1 else p_nob['step']
105
            if r_range is not None:
106 107
                length = len(r_range) if len(r_range) % 2 == 0 else len(r_range) - 1
                for i in range(0, length, 2):
108 109
                    items.extend(list(np.arange(r_range[i], r_range[i + 1] + 1, step=step)))
            items = list(set(items))
110 111 112 113 114 115 116 117
            if len(self.ref) > 0:
                try:
                    ref_value = int(self.ref[index])
                except ValueError:
                    raise ValueError("the ref value of {} is not an integer value"
                                     .format(p_nob['name']))
                if ref_value not in items:
                    items.append(ref_value)
118
            return items
119 120 121 122 123 124 125 126 127 128 129 130 131
        if p_nob['dtype'] == 'float':
            items = p_nob['items']
            if items is None:
                items = []
            r_range = p_nob['range']
            step = 0.1
            if 'step' in p_nob.keys():
                step = 0.1 if p_nob['step'] <= 0 else p_nob['step']
            if r_range is not None:
                length = len(r_range) if len(r_range) % 2 == 0 else len(r_range) - 1
                for i in range(0, length, 2):
                    items.extend(list(np.arange(r_range[i], r_range[i + 1], step=step)))
            items = list(set(items))
132 133 134 135 136 137 138 139
            if len(self.ref) > 0:
                try:
                    ref_value = float(self.ref[index])
                except ValueError:
                    raise ValueError("the ref value of {} is not a float value"
                                     .format(p_nob['name']))
                if ref_value not in items:
                    items.append(ref_value)
140
            return items
141 142
        if p_nob['dtype'] == 'string':
            items = p_nob['options']
143 144 145 146 147 148 149 150 151
            if len(self.ref) > 0:
                try:
                    ref_value = str(self.ref[index])
                except ValueError:
                    raise ValueError("the ref value of {} is not a string value"
                                     .format(p_nob['name']))
                if ref_value not in items:
                    items.append(ref_value)
            return items
152 153
        raise ValueError("the dtype of {} is not supported".format(p_nob['name']))

154 155 156 157 158 159 160
    @staticmethod
    def feature_importance(options, performance, labels):
        """feature importance"""
        options = StandardScaler().fit_transform(options)
        lasso = Lasso()
        lasso.fit(options, performance)
        result = zip(lasso.coef_, labels)
161 162 163
        total_sum = sum(map(abs, lasso.coef_))
        if total_sum == 0:
            return ", ".join("%s: 0" % label for label in labels)
164
        result = sorted(result, key=lambda x: -np.abs(x[0]))
165 166
        rank = ", ".join("%s: %s%%" % (label, round(coef * 100 / total_sum, 2))
                         for coef, label in result)
167 168
        return rank

169
    def _get_value_from_knobs(self, kv):
170 171 172 173
        x_each = []
        for p_nob in self.knobs:
            if p_nob['name'] not in kv.keys():
                raise ValueError("the param {} is not in the x0 ref".format(p_nob['name']))
174
            if p_nob['dtype'] == 'int':
175
                x_each.append(int(kv[p_nob['name']]))
176 177 178 179
            elif p_nob['dtype'] == 'float':
                x_each.append(float(kv[p_nob['name']]))
            else:
                x_each.append(kv[p_nob['name']])
180 181 182 183 184 185
        return x_each

    def transfer(self):
        """transfer ref x0 to int, y0 to float"""
        list_ref_x = []
        list_ref_y = []
G
gaoruoshu 已提交
186
        if self.x_ref is None or self.y_ref is None:
187 188
            return (list_ref_x, list_ref_y)

G
gaoruoshu 已提交
189
        for x_value in self.x_ref:
190
            kv = {}
G
gaoruoshu 已提交
191
            if len(x_value) != len(self.knobs):
192 193
                raise ValueError("x0 is not the same length with knobs")

G
gaoruoshu 已提交
194
            for val in x_value:
195 196 197 198 199
                params = val.split("=")
                if len(params) != 2:
                    raise ValueError("the param format of {} is not correct".format(params))
                kv[params[0]] = params[1]

200
            ref_x = self._get_value_from_knobs(kv)
201 202 203
            if len(ref_x) != len(self.knobs):
                raise ValueError("tuning parameter is not the same length with knobs")
            list_ref_x.append(ref_x)
G
gaoruoshu 已提交
204
        list_ref_y = [float(y) for y in self.y_ref]
205 206
        return (list_ref_x, list_ref_y)

D
dogsheng 已提交
207
    def run(self):
208
        """start the tuning process"""
G
gaoruoshu 已提交
209

D
dogsheng 已提交
210
        def objective(var):
211
            """objective method receive the benchmark result and send the next parameters"""
G
gaoruoshu 已提交
212
            iter_result = {}
213
            option = []
D
dogsheng 已提交
214
            for i, knob in enumerate(self.knobs):
215
                params[knob['name']] = var[i]
Z
Zhipeng Xie 已提交
216
                if knob['dtype'] == 'string':
217
                    option.append(knob['options'].index(var[i]))
Z
Zhipeng Xie 已提交
218
                else:
219
                    option.append(var[i])
G
gaoruoshu 已提交
220 221 222

            iter_result["param"] = params
            self.child_conn.send(iter_result)
D
dogsheng 已提交
223
            result = self.child_conn.recv()
Z
Zhipeng Xie 已提交
224 225 226
            x_num = 0.0
            eval_list = result.split(',')
            for value in eval_list:
D
dogsheng 已提交
227
                num = float(value)
Z
Zhipeng Xie 已提交
228
                x_num = x_num + num
229
            options.append(option)
230
            performance.append(x_num)
Z
Zhipeng Xie 已提交
231
            return x_num
D
dogsheng 已提交
232 233

        params = {}
234 235 236
        options = []
        performance = []
        labels = []
237
        estimator = None
D
dogsheng 已提交
238
        try:
239
            if self.engine == 'random' or self.engine == 'forest' or \
240
                    self.engine == 'gbrt' or self.engine == 'bayes' or self.engine == 'extraTrees':
241 242 243
                params_space = self.build_space()
                ref_x, ref_y = self.transfer()
                if len(ref_x) == 0:
244 245 246 247
                    if len(self.ref) == 0:
                        ref_x = None
                    else:
                        ref_x = self.ref
248
                    ref_y = None
249
                if ref_x is not None and not isinstance(ref_x[0], (list, tuple)):
250 251 252 253 254 255
                    ref_x = [ref_x]
                LOGGER.info('x0: %s', ref_x)
                LOGGER.info('y0: %s', ref_y)

                if ref_x is not None and isinstance(ref_x[0], (list, tuple)):
                    self._n_random_starts = 0 if len(ref_x) >= self._n_random_starts \
G
gaoruoshu 已提交
256
                        else self._n_random_starts - len(ref_x) + 1
257 258 259 260 261 262 263

                LOGGER.info('n_random_starts parameter is: %d', self._n_random_starts)
                LOGGER.info("Running performance evaluation.......")
                if self.engine == 'random':
                    estimator = 'dummy'
                elif self.engine == 'forest':
                    estimator = 'RF'
264 265
                elif self.engine == 'extraTrees':
                    estimator = 'ET'
266 267 268 269
                elif self.engine == 'gbrt':
                    estimator = 'GBRT'
                elif self.engine == 'bayes':
                    params_space = normalize_dimensions(params_space)
270
                    estimator = cook_estimator("GP", space=params_space, noise=self.noise)
271 272 273 274 275 276 277 278 279 280 281 282

                LOGGER.info("base_estimator is: %s", estimator)
                optimizer = baseOpt(
                    dimensions=params_space,
                    n_random_starts=self._n_random_starts,
                    random_state=1,
                    base_estimator=estimator
                )
                n_calls = self.max_eval
                # User suggested points at which to evaluate the objective first
                if ref_x and ref_y is None:
                    ref_y = list(map(objective, ref_x))
283
                    LOGGER.info("ref_y is: %s", ref_y)
284 285 286 287
                    n_calls -= len(ref_y)

                # Pass user suggested initialisation points to the optimizer
                if ref_x:
H
hanxinke 已提交
288
                    if not isinstance(ref_y, (collections.Iterable, numbers.Number)):
G
gaoruoshu 已提交
289 290
                        raise ValueError("`ref_y` should be an iterable or a scalar, "
                                         "got %s" % type(ref_y))
291
                    if len(ref_x) != len(ref_y):
G
gaoruoshu 已提交
292 293
                        raise ValueError("`ref_x` and `ref_y` should "
                                         "have the same length")
294 295 296 297 298 299 300
                    LOGGER.info("ref_x: %s", ref_x)
                    LOGGER.info("ref_y: %s", ref_y)
                    ret = optimizer.tell(ref_x, ref_y)

                for i in range(n_calls):
                    next_x = optimizer.ask()
                    LOGGER.info("next_x: %s", next_x)
301
                    LOGGER.info("Running performance evaluation.......")
302 303 304 305
                    next_y = objective(next_x)
                    LOGGER.info("next_y: %s", next_y)
                    ret = optimizer.tell(next_x, next_y)
                    LOGGER.info("finish (ref_x, ref_y) tell")
306
            elif self.engine == 'abtest':
G
gaoruoshu 已提交
307 308
                abtuning_manager = ABtestTuningManager(self.knobs, self.child_conn,
                                                       self.split_count)
309 310
                options, performance = abtuning_manager.do_abtest_tuning_abtest()
                params = abtuning_manager.get_best_params()
G
gaoruoshu 已提交
311 312
                # convert string option into index
                options = abtuning_manager.get_options_index(options)
313
            elif self.engine == 'lhs':
G
gaoruoshu 已提交
314 315
                knobsampling_manager = KnobSamplingManager(self.knobs, self.child_conn,
                                                           self.max_eval, self.split_count)
316 317 318
                options = knobsampling_manager.get_knob_samples()
                performance = knobsampling_manager.do_knob_sampling_test(options)
                params = knobsampling_manager.get_best_params(options, performance)
319
                options = knobsampling_manager.get_options_index(options)
320 321 322
            elif self.engine == 'tpe':
                tpe_opt = TPEOptimizer(self.knobs, self.child_conn, self.max_eval)
                best_params = tpe_opt.tpe_minimize_tuning()
323 324 325 326
                final_param = {}
                final_param["finished"] = True
                final_param["param"] = best_params
                self.child_conn.send(final_param)
327
                return best_params
Z
Zhipeng Xie 已提交
328
            LOGGER.info("Minimization procedure has been completed.")
329 330
        except ValueError as value_error:
            LOGGER.error('Value Error: %s', repr(value_error))
331 332
            self.child_conn.send(value_error)
            return None
333 334 335 336
        except RuntimeError as runtime_error:
            LOGGER.error('Runtime Error: %s', repr(runtime_error))
            self.child_conn.send(runtime_error)
            return None
G
gaoruoshu 已提交
337 338 339
        except Exception as err:
            LOGGER.error('Unexpected Error: %s', repr(err))
            self.child_conn.send(Exception("Unexpected Error:", repr(err)))
340
            return None
D
dogsheng 已提交
341 342

        for i, knob in enumerate(self.knobs):
343
            if estimator is not None:
344
                params[knob['name']] = ret.x[i]
345
            labels.append(knob['name'])
346

Z
Zhipeng Xie 已提交
347 348
        LOGGER.info("Optimized result: %s", params)
        LOGGER.info("The optimized profile has been generated.")
349
        final_param = {}
350 351
        wefs = WeightedEnsembleFeatureSelector()
        rank = wefs.get_ensemble_feature_importance(options, performance, labels)
352

353 354 355 356
        final_param["param"] = params
        final_param["rank"] = rank
        final_param["finished"] = True
        self.child_conn.send(final_param)
357
        LOGGER.info("The feature importances of current evaluation are: %s", rank)
D
dogsheng 已提交
358 359
        return params

Z
Zhipeng Xie 已提交
360 361
    def stop_process(self):
        """stop process"""
D
dogsheng 已提交
362 363
        self.child_conn.close()
        self.terminate()