dataset.py 43.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This is definition of dataset class, which is high performance IO."""

import paddle
from paddle.fluid.proto import data_feed_pb2
from google.protobuf import text_format
import paddle.fluid.core as core


class DatasetBase(object):
    """ Base dataset class. """

    def __init__(self):
        """ Init. """
        # define class name here
        # to decide whether we need create in memory instance
        self.proto_desc = data_feed_pb2.DataFeedDesc()
        self.proto_desc.pipe_command = "cat"
        self.dataset = core.Dataset("MultiSlotDataset")
        self.thread_num = 1
        self.filelist = []

35 36 37 38 39 40 41 42 43
    def init(self,
             batch_size=1,
             thread_num=1,
             use_var=[],
             pipe_command="cat",
             input_type=0,
             fs_name="",
             fs_ugi="",
             download_cmd="cat"):
44
        """
45 46
        should be called only once in user's python scripts to initialize setings of dataset instance. 
        Normally, it is called by InMemoryDataset or QueueDataset.
47 48

        Args:
49 50 51 52 53 54 55 56
            batch_size(int): batch size. It will be effective during training. default is 1.
            thread_num(int): thread num, it is the num of readers. default is 1.
            use_var(list): list of variables. Variables which you will use. default is [].
            pipe_command(str): pipe command of current dataset. A pipe command is a UNIX pipeline command that can be used only. default is "cat"
            input_type(int): the input type of generated input. 0 is for one sample, 1 is for one batch. defalut is 0.
            fs_name(str): fs name. default is "".
            fs_ugi(str): fs ugi. default is "".
            download_cmd(str): customized download command. default is "cat"
57 58 59


        """
60 61 62 63 64 65 66
        self._set_batch_size(batch_size)
        self._set_thread(thread_num)
        self._set_use_var(use_var)
        self._set_pipe_command(pipe_command)
        self._set_input_type(input_type)
        self._set_hdfs_config(fs_name, fs_ugi)
        self._set_download_cmd(download_cmd)
67

68
    def _set_pipe_command(self, pipe_command):
69
        """
70 71
        Set pipe command of current dataset
        A pipe command is a UNIX pipeline command that can be used only
72 73 74 75

        Examples:
            .. code-block:: python

76 77 78
              import paddle
              dataset = paddle.distributed.fleet.dataset.DatasetBase()
              dataset._set_pipe_command("python my_script.py")
79 80

        Args:
81
            pipe_command(str): pipe command
82 83

        """
84
        self.proto_desc.pipe_command = pipe_command
85

86
    def _set_batch_size(self, batch_size):
87 88 89 90 91 92
        """
        Set batch size. Will be effective during training

        Examples:
            .. code-block:: python

93 94 95
              import paddle
              dataset = paddle.distributed.fleet.DatasetBase()
              dataset._set_batch_size(128)
96 97 98 99 100 101 102

        Args:
            batch_size(int): batch size

        """
        self.proto_desc.batch_size = batch_size

103
    def _set_thread(self, thread_num):
104 105 106 107 108 109
        """
        Set thread num, it is the num of readers.

        Examples:
            .. code-block:: python

110 111 112
              import paddle
              dataset = paddle.distributed.fleet.DatasetBase()
              dataset._set_thread(12)
113 114 115 116 117 118 119 120 121 122 123 124 125 126

        Args:
            thread_num(int): thread num
        """
        self.dataset.set_thread_num(thread_num)
        self.thread_num = thread_num

    def set_filelist(self, filelist):
        """
        Set file list in current worker.

        Examples:
            .. code-block:: python

127 128
              import paddle
              dataset = paddle.distributed.fleet.DatasetBase()
129 130 131 132 133 134 135 136
              dataset.set_filelist(['a.txt', 'b.txt'])

        Args:
            filelist(list): file list
        """
        self.dataset.set_filelist(filelist)
        self.filelist = filelist

137
    def _set_input_type(self, input_type):
138 139
        self.proto_desc.input_type = input_type

140
    def _set_use_var(self, var_list):
141 142 143 144 145 146
        """
        Set Variables which you will use.

        Examples:
            .. code-block:: python

147 148 149
              import paddle
              dataset = paddle.distributed.fleet.DatasetBase()
              dataset._set_use_var([data, label])
150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167

        Args:
            var_list(list): variable list
        """
        multi_slot = self.proto_desc.multi_slot_desc
        for var in var_list:
            slot_var = multi_slot.slots.add()
            slot_var.is_used = True
            slot_var.name = var.name
            if var.lod_level == 0:
                slot_var.is_dense = True
                slot_var.shape.extend(var.shape)
            if var.dtype == core.VarDesc.VarType.FP32:
                slot_var.type = "float"
            elif var.dtype == core.VarDesc.VarType.INT64:
                slot_var.type = "uint64"
            else:
                raise ValueError(
168
                    "Currently, paddle.distributed.fleet.dataset only supports dtype=float32 and dtype=int64"
169 170
                )

171
    def _set_hdfs_config(self, fs_name, fs_ugi):
172 173 174 175 176 177
        """
        Set hdfs config: fs name ad ugi

        Examples:
            .. code-block:: python

178 179 180
              import paddle
              dataset = paddle.distributed.fleet.DatasetBase()
              dataset._set_hdfs_config("my_fs_name", "my_fs_ugi")
181 182 183 184 185 186 187

        Args:
            fs_name(str): fs name
            fs_ugi(str): fs ugi
        """
        self.dataset.set_hdfs_config(fs_name, fs_ugi)

188
    def _set_download_cmd(self, download_cmd):
189 190 191 192 193 194
        """
        Set customized download cmd: download_cmd

        Examples:
            .. code-block:: python

195 196 197
              import paddle
              dataset = paddle.distributed.fleet.DatasetBase()
              dataset._set_download_cmd("./read_from_afs")
198 199 200 201 202 203 204 205 206 207 208 209 210 211

        Args:
            download_cmd(str): customized download command
        """
        self.dataset.set_download_cmd(download_cmd)

    def _prepare_to_run(self):
        """
        Set data_feed_desc before load or shuffle,
        user no need to call this function.
        """
        if self.thread_num > len(self.filelist):
            self.thread_num = len(self.filelist)
        self.dataset.set_thread_num(self.thread_num)
212
        self.dataset.set_data_feed_desc(self._desc())
213 214 215 216 217
        self.dataset.create_readers()

    def _finish_to_run(self):
        self.dataset.destroy_readers()

218
    def _desc(self):
219 220 221 222 223 224
        """
        Returns a protobuf message for this DataFeedDesc

        Examples:
            .. code-block:: python

225 226 227
              import paddle
              dataset = paddle.distributed.fleet.DatasetBase()
              print(dataset._desc())
228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246

        Returns:
            A string message
        """
        return text_format.MessageToString(self.proto_desc)

    def _dynamic_adjust_before_train(self, thread_num):
        pass

    def _dynamic_adjust_after_train(self):
        pass


class InMemoryDataset(DatasetBase):
    """
    InMemoryDataset, it will load data into memory
    and shuffle data before training.

    Example:
247 248
        import paddle
        dataset = paddle.distributed.InMemoryDataset()
249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265
    """

    def __init__(self):
        """ Init. """
        super(InMemoryDataset, self).__init__()
        self.proto_desc.name = "MultiSlotInMemoryDataFeed"
        self.fleet_send_batch_size = None
        self.is_user_set_queue_num = False
        self.queue_num = None
        self.parse_ins_id = False
        self.parse_content = False
        self.parse_logkey = False
        self.merge_by_sid = True
        self.enable_pv_merge = False
        self.merge_by_lineid = False
        self.fleet_send_sleep_seconds = None

266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488
    def _init_distributed_settings(self, **kwargs):
        """
        should be called only once in user's python scripts to initialize distributed-related setings of dataset instance
        Args:
            kwargs: Keyword arguments. Currently, we support following keys in **kwargs:

            merge_size(int): ins size to merge, if merge_size > 0, set merge by line id, 
                             instances of same line id will be merged after shuffle, 
                             you should parse line id in data generator. default is -1.
            parse_ins_id(bool): Set if Dataset need to parse ins_id. default is False.
            parse_content(bool): Set if Dataset need to parse content. default is False.
            fleet_send_batch_size(int): Set fleet send batch size in one rpc, default is 1024
            fleet_send_sleep_seconds(int): Set fleet send sleep time, default is 0
            fea_eval(bool): Set if Dataset need to do feature importance evaluation using slots shuffle.
                            default is False.
            candidate_size(int): if fea_eval is set True, set the candidate size used in slots shuffle.

        Examples:
            .. code-block:: python

              import paddle
              dataset = paddle.distributed.InMemoryDataset()
              dataset.init(
                    batch_size=1,
                    thread_num=2,
                    input_type=1,
                    pipe_command="cat",
                    use_var=[])
              dataset._init_distributed_settings(
                    parse_ins_id=True,
                    parse_content=True,
                    fea_eval=True,
                    candidate_size=10000)
              
        """
        merge_size = kwargs.get("merge_size", -1)
        if merge_size > 0:
            self._set_merge_by_lineid(merge_size)

        parse_ins_id = kwargs.get("parse_ins_id", False)
        self._set_parse_ins_id(parse_ins_id)

        parse_content = kwargs.get("parse_content", False)
        self._set_parse_content(parse_content)

        fleet_send_batch_size = kwargs.get("fleet_send_batch_size", None)
        if fleet_send_batch_size:
            self._set_fleet_send_batch_size(fleet_send_batch_size)

        fleet_send_sleep_seconds = kwargs.get("fleet_send_sleep_seconds", None)
        if fleet_send_sleep_seconds:
            self._set_fleet_send_sleep_seconds(fleet_send_sleep_seconds)

        fea_eval = kwargs.get("fea_eval", False)
        if fea_eval:
            candidate_size = kwargs.get("candidate_size", 10000)
            self._set_fea_eval(candidate_size, True)

    def update_settings(self, **kwargs):
        """
        should be called in user's python scripts to update setings of dataset instance
        Args:
            kwargs: Keyword arguments. Currently, we support following keys in **kwargs,
                    including single node settings and advanced distributed related settings:

            batch_size(int): batch size. It will be effective during training. default is 1.
            thread_num(int): thread num, it is the num of readers. default is 1.
            use_var(list): list of variables. Variables which you will use. default is [].
            input_type(int): the input type of generated input. 0 is for one sample, 1 is for one batch. defalut is 0.
            fs_name(str): fs name. default is "".
            fs_ugi(str): fs ugi. default is "".
            pipe_command(str): pipe command of current dataset. A pipe command is a UNIX pipeline command that can be used only. default is "cat"
            download_cmd(str): customized download command. default is "cat"
            data_feed_type(str): data feed type used in c++ code. default is "MultiSlotInMemoryDataFeed".
            queue_num(int): Dataset output queue num, training threads get data from queues. default is-1, which is set same as thread number in c++.

            merge_size(int): ins size to merge, if merge_size > 0, set merge by line id, 
                             instances of same line id will be merged after shuffle, 
                             you should parse line id in data generator. default is -1.
            parse_ins_id(bool): Set if Dataset need to parse ins_id. default is False.
            parse_content(bool): Set if Dataset need to parse content. default is False.
            fleet_send_batch_size(int): Set fleet send batch size in one rpc, default is 1024
            fleet_send_sleep_seconds(int): Set fleet send sleep time, default is 0
            fea_eval(bool): Set if Dataset need to do feature importance evaluation using slots shuffle.
                            default is False.
            candidate_size(int): if fea_eval is set True, set the candidate size used in slots shuffle.

        Examples:
            .. code-block:: python

              import paddle
              dataset = paddle.distributed.InMemoryDataset()
              dataset.init(
                    batch_size=1,
                    thread_num=2,
                    input_type=1,
                    pipe_command="cat",
                    use_var=[])
              dataset._init_distributed_settings(
                    parse_ins_id=True,
                    parse_content=True,
                    fea_eval=True,
                    candidate_size=10000)
              dataset.update_settings(batch_size=2)
            
        """
        for key in kwargs:
            if key == "pipe_command":
                self._set_pipe_command(kwargs[key])
            elif key == "batch_size":
                self._set_batch_size(kwargs[key])
            elif key == "thread_num":
                self._set_thread(kwargs[key])
            elif key == "use_var":
                self._set_use_var(kwargs[key])
            elif key == "input_type":
                self._set_input_type(kwargs[key])
            elif key == "fs_name" and "fs_ugi" in kwargs:
                self._set_hdfs_config(kwargs[key], kwargs["fs_ugi"])
            elif key == "download_cmd":
                self._set_download_cmd(kwargs[key])
            elif key == "merge_size" and kwargs.get("merge_size", -1) > 0:
                self._set_merge_by_lineid(kwargs[key])
            elif key == "parse_ins_id":
                self._set_parse_ins_id(kwargs[key])
            elif key == "parse_content":
                self._set_parse_content(kwargs[key])
            elif key == "fleet_send_batch_size":
                self._set_fleet_send_batch_size(kwargs[key])
            elif key == "fleet_send_sleep_seconds":
                self._set_fleet_send_sleep_seconds(kwargs[key])
            elif key == "fea_eval" and kwargs[key] == True:
                candidate_size = kwargs.get("candidate_size", 10000)
                self._set_fea_eval(candidate_size, True)

    def init(self, **kwargs):
        """
        should be called only once in user's python scripts to initialize setings of dataset instance
        Args:
            kwargs: Keyword arguments. Currently, we support following keys in **kwargs:
            
            batch_size(int): batch size. It will be effective during training. default is 1.
            thread_num(int): thread num, it is the num of readers. default is 1.
            use_var(list): list of variables. Variables which you will use. default is [].
            input_type(int): the input type of generated input. 0 is for one sample, 1 is for one batch. defalut is 0.
            fs_name(str): fs name. default is "".
            fs_ugi(str): fs ugi. default is "".
            pipe_command(str): pipe command of current dataset. A pipe command is a UNIX pipeline command that can be used only. default is "cat"
            download_cmd(str): customized download command. default is "cat"
            data_feed_type(str): data feed type used in c++ code. default is "MultiSlotInMemoryDataFeed".
            queue_num(int): Dataset output queue num, training threads get data from queues. default is -1, which is set same as thread number in c++.

        Examples:
            .. code-block:: python

                import paddle
                with open("test_queue_dataset_run_a.txt", "w") as f:
                    data = "2 1 2 2 5 4 2 2 7 2 1 3\n"
                    data += "2 6 2 2 1 4 2 2 4 2 2 3\n"
                    data += "2 5 2 2 9 9 2 2 7 2 1 3\n"
                    data += "2 7 2 2 1 9 2 3 7 2 5 3\n"
                    f.write(data)
                with open("test_queue_dataset_run_b.txt", "w") as f:
                    data = "2 1 2 2 5 4 2 2 7 2 1 3\n"
                    data += "2 6 2 2 1 4 2 2 4 2 2 3\n"
                    data += "2 5 2 2 9 9 2 2 7 2 1 3\n"
                    data += "2 7 2 2 1 9 2 3 7 2 5 3\n"
                    f.write(data)

                slots = ["slot1", "slot2", "slot3", "slot4"]
                slots_vars = []
                for slot in slots:
                    var = fluid.data(
                        name=slot, shape=[None, 1], dtype="int64", lod_level=1)
                    slots_vars.append(var)

                dataset = paddle.distributed.InMemoryDataset()
                dataset.init(
                    batch_size=1,
                    thread_num=2,
                    input_type=1,
                    pipe_command="cat",
                    use_var=slots_vars)
                dataset.set_filelist(
                    ["test_queue_dataset_run_a.txt", "test_queue_dataset_run_b.txt"])
                dataset.load_into_memory()

                exe = fluid.Executor(fluid.CPUPlace() if not core.is_compiled_with_cuda(
                ) else fluid.CUDAPlace(0))
                exe.run(fluid.default_startup_program())
                exe.train_from_dataset(fluid.default_main_program(),
                                           dataset)
                os.remove("./test_queue_dataset_run_a.txt")
                os.remove("./test_queue_dataset_run_b.txt")
        """
        batch_size = kwargs.get("batch_size", 1)
        thread_num = kwargs.get("thread_num", 1)
        use_var = kwargs.get("use_var", [])
        input_type = kwargs.get("input_type", 0)
        fs_name = kwargs.get("fs_name", "")
        fs_ugi = kwargs.get("fs_ugi", "")
        pipe_command = kwargs.get("pipe_command", "cat")
        download_cmd = kwargs.get("download_cmd", "cat")

        super(InMemoryDataset, self).init(
            batch_size=batch_size,
            thread_num=thread_num,
            use_var=use_var,
            pipe_command=pipe_command,
            input_type=input_type,
            fs_name=fs_name,
            fs_ugi=fs_ugi,
            download_cmd=download_cmd)

        data_feed_type = kwargs.get("data_feed_type",
                                    "MultiSlotInMemoryDataFeed")
        self._set_feed_type(data_feed_type)

        if kwargs.get("queue_num", -1) > 0:
            queue_num = kwargs.get("queue_num", -1)
            self._set_queue_num(queue_num)

    def _set_feed_type(self, data_feed_type):
489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509
        """
        Set data_feed_desc
        """
        self.proto_desc.name = data_feed_type

    def _prepare_to_run(self):
        """
        Set data_feed_desc before load or shuffle,
        user no need to call this function.
        """
        if self.thread_num <= 0:
            self.thread_num = 1
        self.dataset.set_thread_num(self.thread_num)
        if self.queue_num is None:
            self.queue_num = self.thread_num
        self.dataset.set_queue_num(self.queue_num)
        self.dataset.set_parse_ins_id(self.parse_ins_id)
        self.dataset.set_parse_content(self.parse_content)
        self.dataset.set_parse_logkey(self.parse_logkey)
        self.dataset.set_merge_by_sid(self.merge_by_sid)
        self.dataset.set_enable_pv_merge(self.enable_pv_merge)
510
        self.dataset.set_data_feed_desc(self._desc())
511 512 513 514 515 516 517 518 519 520 521 522 523
        self.dataset.create_channel()
        self.dataset.create_readers()

    def _dynamic_adjust_before_train(self, thread_num):
        if not self.is_user_set_queue_num:
            self.dataset.dynamic_adjust_channel_num(thread_num, False)
        self.dataset.dynamic_adjust_readers_num(thread_num)

    def _dynamic_adjust_after_train(self):
        if not self.is_user_set_queue_num:
            self.dataset.dynamic_adjust_channel_num(self.thread_num, False)
        self.dataset.dynamic_adjust_readers_num(self.thread_num)

524
    def _set_queue_num(self, queue_num):
525 526 527 528 529 530 531 532 533
        """
        Set Dataset output queue num, training threads get data from queues

        Args:
            queue_num(int): dataset output queue num

        Examples:
            .. code-block:: python

534 535 536
              import paddle
              dataset = paddle.distributed.InMemoryDataset()
              dataset._set_queue_num(12)
537 538 539 540 541

        """
        self.is_user_set_queue_num = True
        self.queue_num = queue_num

542
    def _set_parse_ins_id(self, parse_ins_id):
543
        """
544
        Set if Dataset need to parse insid
545 546 547 548 549 550 551

        Args:
            parse_ins_id(bool): if parse ins_id or not

        Examples:
            .. code-block:: python

552 553 554
              import paddle
              dataset = paddle.distributed.InMemoryDataset()
              dataset._set_parse_ins_id(True)
555 556 557 558

        """
        self.parse_ins_id = parse_ins_id

559
    def _set_parse_content(self, parse_content):
560 561 562 563 564 565 566 567 568
        """
        Set if Dataset need to parse content

        Args:
            parse_content(bool): if parse content or not

        Examples:
            .. code-block:: python

569 570 571
              import paddle
              dataset = paddle.distributed.InMemoryDataset()
              dataset._set_parse_content(True)
572 573 574 575

        """
        self.parse_content = parse_content

576
    def _set_fleet_send_batch_size(self, fleet_send_batch_size=1024):
577 578 579 580 581 582 583 584 585
        """
        Set fleet send batch size, default is 1024

        Args:
            fleet_send_batch_size(int): fleet send batch size

        Examples:
            .. code-block:: python

586 587 588
              import paddle
              dataset = paddle.distributed.InMemoryDataset()
              dataset._set_fleet_send_batch_size(800)
589 590 591 592

        """
        self.fleet_send_batch_size = fleet_send_batch_size

593
    def _set_fleet_send_sleep_seconds(self, fleet_send_sleep_seconds=0):
594 595 596 597 598 599 600 601 602
        """
        Set fleet send sleep time, default is 0

        Args:
            fleet_send_sleep_seconds(int): fleet send sleep time

        Examples:
            .. code-block:: python

603 604 605
              import paddle
              dataset = paddle.distributed.InMemoryDataset()
              dataset._set_fleet_send_sleep_seconds(2)
606 607 608 609

        """
        self.fleet_send_sleep_seconds = fleet_send_sleep_seconds

610
    def _set_merge_by_lineid(self, merge_size=2):
611 612 613 614 615 616 617 618 619 620
        """
        Set merge by line id, instances of same line id will be merged after
        shuffle, you should parse line id in data generator.

        Args:
            merge_size(int): ins size to merge. default is 2.

        Examples:
            .. code-block:: python

621 622 623
              import paddle
              dataset = paddle.distributed.InMemoryDataset()
              dataset._set_merge_by_lineid()
624 625 626 627 628 629

        """
        self.dataset.set_merge_by_lineid(merge_size)
        self.merge_by_lineid = True
        self.parse_ins_id = True

630
    def _set_generate_unique_feasigns(self, generate_uni_feasigns, shard_num):
631 632 633 634
        self.dataset.set_generate_unique_feasigns(generate_uni_feasigns)
        self.gen_uni_feasigns = generate_uni_feasigns
        self.local_shard_num = shard_num

635 636
    def _generate_local_tables_unlock(self, table_id, fea_dim, read_thread_num,
                                      consume_thread_num, shard_num):
637 638 639 640 641 642 643 644 645 646
        self.dataset.generate_local_tables_unlock(
            table_id, fea_dim, read_thread_num, consume_thread_num, shard_num)

    def load_into_memory(self):
        """
        Load data into memory

        Examples:
            .. code-block:: python

647 648
              import paddle
              dataset = paddle.distributed.InMemoryDataset()
649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665
              filelist = ["a.txt", "b.txt"]
              dataset.set_filelist(filelist)
              dataset.load_into_memory()
        """
        self._prepare_to_run()
        self.dataset.load_into_memory()

    def preload_into_memory(self, thread_num=None):
        """
        Load data into memory in async mode

        Args:
            thread_num(int): preload thread num

        Examples:
            .. code-block:: python

666 667
              import paddle
              dataset = paddle.distributed.InMemoryDataset()
668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686
              filelist = ["a.txt", "b.txt"]
              dataset.set_filelist(filelist)
              dataset.preload_into_memory()
              dataset.wait_preload_done()
        """
        self._prepare_to_run()
        if thread_num is None:
            thread_num = self.thread_num
        self.dataset.set_preload_thread_num(thread_num)
        self.dataset.create_preload_readers()
        self.dataset.preload_into_memory()

    def wait_preload_done(self):
        """
        Wait preload_into_memory done

        Examples:
            .. code-block:: python

687 688
              import paddle
              dataset = paddle.distributed.InMemoryDataset()
689 690 691 692 693 694 695 696 697 698 699 700 701 702 703
              filelist = ["a.txt", "b.txt"]
              dataset.set_filelist(filelist)
              dataset.preload_into_memory()
              dataset.wait_preload_done()
        """
        self.dataset.wait_preload_done()
        self.dataset.destroy_preload_readers()

    def local_shuffle(self):
        """
        Local shuffle

        Examples:
            .. code-block:: python

704 705
              import paddle
              dataset = paddle.distributed.InMemoryDataset()
706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722
              filelist = ["a.txt", "b.txt"]
              dataset.set_filelist(filelist)
              dataset.load_into_memory()
              dataset.local_shuffle()
        """
        self.dataset.local_shuffle()

    def global_shuffle(self, fleet=None, thread_num=12):
        """
        Global shuffle.
        Global shuffle can be used only in distributed mode. i.e. multiple
        processes on single machine or multiple machines training together.
        If you run in distributed mode, you should pass fleet instead of None.

        Examples:
            .. code-block:: python

723
              import paddle
724
              from paddle.fluid.incubate.fleet.parameter_server.pslib import fleet
725
              dataset = paddle.distributed.InMemoryDataset()
726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766
              filelist = ["a.txt", "b.txt"]
              dataset.set_filelist(filelist)
              dataset.load_into_memory()
              dataset.global_shuffle(fleet)

        Args:
            fleet(Fleet): fleet singleton. Default None.
            thread_num(int): shuffle thread num. Default is 12.

        """
        trainer_num = 1
        if fleet is not None:
            fleet._role_maker.barrier_worker()
            trainer_num = fleet.worker_num()
        if self.fleet_send_batch_size is None:
            self.fleet_send_batch_size = 1024
        if self.fleet_send_sleep_seconds is None:
            self.fleet_send_sleep_seconds = 0
        self.dataset.register_client2client_msg_handler()
        self.dataset.set_trainer_num(trainer_num)
        self.dataset.set_fleet_send_batch_size(self.fleet_send_batch_size)
        self.dataset.set_fleet_send_sleep_seconds(self.fleet_send_sleep_seconds)
        if fleet is not None:
            fleet._role_maker.barrier_worker()
        self.dataset.global_shuffle(thread_num)
        if fleet is not None:
            fleet._role_maker.barrier_worker()
        if self.merge_by_lineid:
            self.dataset.merge_by_lineid()
        if fleet is not None:
            fleet._role_maker.barrier_worker()

    def release_memory(self):
        """
        :api_attr: Static Graph
        
        Release InMemoryDataset memory data, when data will not be used again.

        Examples:
            .. code-block:: python

767
              import paddle
768
              from paddle.fluid.incubate.fleet.parameter_server.pslib import fleet
769
              dataset = paddle.distributed.InMemoryDataset()
770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798
              filelist = ["a.txt", "b.txt"]
              dataset.set_filelist(filelist)
              dataset.load_into_memory()
              dataset.global_shuffle(fleet)
              exe = fluid.Executor(fluid.CPUPlace())
              exe.run(fluid.default_startup_program())
              exe.train_from_dataset(fluid.default_main_program(), dataset)
              dataset.release_memory()

        """
        self.dataset.release_memory()

    def get_memory_data_size(self, fleet=None):
        """
        Get memory data size, user can call this function to know the num
        of ins in all workers after load into memory.

        Note:
            This function may cause bad performance, because it has barrier

        Args:
            fleet(Fleet): Fleet Object.

        Returns:
            The size of memory data.

        Examples:
            .. code-block:: python

799
              import paddle
800
              from paddle.fluid.incubate.fleet.parameter_server.pslib import fleet
801
              dataset = paddle.distributed.InMemoryDataset()
802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835
              filelist = ["a.txt", "b.txt"]
              dataset.set_filelist(filelist)
              dataset.load_into_memory()
              print dataset.get_memory_data_size(fleet)

        """
        import numpy as np
        local_data_size = self.dataset.get_memory_data_size()
        local_data_size = np.array([local_data_size])
        if fleet is not None:
            global_data_size = local_data_size * 0
            fleet._role_maker.all_reduce_worker(local_data_size,
                                                global_data_size)
            return global_data_size[0]
        return local_data_size[0]

    def get_shuffle_data_size(self, fleet=None):
        """
        Get shuffle data size, user can call this function to know the num
        of ins in all workers after local/global shuffle.

        Note:
            This function may cause bad performance to local shuffle,
            because it has barrier. It does not affect global shuffle.

        Args:
            fleet(Fleet): Fleet Object.

        Returns:
            The size of shuffle data.

        Examples:
            .. code-block:: python

836
              import paddle
837
              from paddle.fluid.incubate.fleet.parameter_server.pslib import fleet
838
              dataset = paddle.distributed.InMemoryDataset()
839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855
              filelist = ["a.txt", "b.txt"]
              dataset.set_filelist(filelist)
              dataset.load_into_memory()
              dataset.global_shuffle(fleet)
              print dataset.get_shuffle_data_size(fleet)

        """
        import numpy as np
        local_data_size = self.dataset.get_shuffle_data_size()
        local_data_size = np.array([local_data_size])
        if fleet is not None:
            global_data_size = local_data_size * 0
            fleet._role_maker.all_reduce_worker(local_data_size,
                                                global_data_size)
            return global_data_size[0]
        return local_data_size[0]

856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900
    def _set_fea_eval(self, record_candidate_size, fea_eval=True):
        """
        set fea eval mode for slots shuffle to debug the importance level of
        slots(features), fea_eval need to be set True for slots shuffle.
        
        Args:
            record_candidate_size(int): size of instances candidate to shuffle 
                                        one slot
            fea_eval(bool): whether enable fea eval mode to enable slots shuffle.
                            default is True.
            
        Examples:
            .. code-block:: python

            import paddle
            dataset = paddle.distributed.InMemoryDataset()
            dataset._set_fea_eval(1000000, True)

        """
        if fea_eval:
            self.dataset.set_fea_eval(fea_eval, record_candidate_size)
        self.fea_eval = fea_eval

    def slots_shuffle(self, slots):
        """
        Slots Shuffle 
        Slots Shuffle is a shuffle method in slots level, which is usually used 
        in sparse feature with large scale of instances. To compare the metric, i.e.
        auc while doing slots shuffle on one or several slots with baseline to 
        evaluate the importance level of slots(features).
        
        Args:
            slots(list[string]): the set of slots(string) to do slots shuffle.

        Examples:
            import paddle
            dataset = paddle.distributed.InMemoryDataset()
            dataset.set_merge_by_lineid()
            #suppose there is a slot 0
            dataset.slots_shuffle(['0'])
        """
        if self.fea_eval:
            slots_set = set(slots)
            self.dataset.slots_shuffle(slots_set)

901 902 903 904 905 906 907 908

class QueueDataset(DatasetBase):
    """
    QueueDataset, it will process data streamly.

    Examples:
        .. code-block:: python

909 910
          import paddle
          dataset = paddle.distributed.QueueDataset()
911 912 913 914 915 916 917 918 919 920

    """

    def __init__(self):
        """
        Initialize QueueDataset
        """
        super(QueueDataset, self).__init__()
        self.proto_desc.name = "MultiSlotDataFeed"

921 922 923 924 925 926
    def init(self, **kwargs):
        """
        should be called only once in user's python scripts to initialize setings of dataset instance
        """
        super(QueueDataset, self).init(**kwargs)

927 928 929 930 931 932 933 934 935 936 937
    def _prepare_to_run(self):
        """
        Set data_feed_desc/thread num/filelist before run,
        user no need to call this function.
        """
        if self.thread_num > len(self.filelist):
            self.thread_num = len(self.filelist)
        if self.thread_num == 0:
            self.thread_num = 1
        self.dataset.set_thread_num(self.thread_num)
        self.dataset.set_filelist(self.filelist)
938
        self.dataset.set_data_feed_desc(self._desc())
939 940 941 942 943 944 945 946 947 948
        self.dataset.create_readers()


class FileInstantDataset(DatasetBase):
    """
    FileInstantDataset, it will process data streamly.

    Examples:
        .. code-block:: python

949 950
          import paddle
          dataset = paddle.distributed.fleet.FileInstantDataset()
951 952 953 954 955 956 957 958 959
    """

    def __init__(self):
        """
        Initialize FileInstantDataset
        """
        super(FileInstantDataset, self).__init__()
        self.proto_desc.name = "MultiSlotFileInstantDataFeed"

960
    def init(self, **kwargs):
961
        """
962
        should be called only once in user's python scripts to initialize setings of dataset instance
963
        """
964
        super(FileInstantDataset, self).init(**kwargs)
965 966 967 968 969 970 971 972 973


class BoxPSDataset(InMemoryDataset):
    """
    BoxPSDataset: derived from InMemoryDataset.

    Examples:
        .. code-block:: python

974 975
          import paddle
          dataset = paddle.distributed.fleet.BoxPSDataset()
976 977 978 979 980 981 982 983 984 985
    """

    def __init__(self):
        """
        Initialize BoxPSDataset
        """
        super(BoxPSDataset, self).__init__()
        self.boxps = core.BoxPS(self.dataset)
        self.proto_desc.name = "PaddleBoxDataFeed"

986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086
    def init(self, **kwargs):
        """
        should be called only once in user's python scripts to initialize setings of dataset instance
        """
        super(BoxPSDataset, self).init(**kwargs)

        rank_offset = kwargs.get("rank_offset", "")
        self._set_rank_offset(rank_offset)
        pv_batch_size = kwargs.get("pv_batch_size", 1)
        self._set_pv_batch_size(pv_batch_size)
        parse_logkey = kwargs.get("parse_logkey", False)
        self._set_parse_logkey(parse_logkey)
        merge_by_sid = kwargs.get("merge_by_sid", False)
        self._set_merge_by_sid(merge_by_sid)
        enable_pv_merge = kwargs.get("enable_pv_merge", False)
        self._set_enable_pv_merge(enable_pv_merge)

    def _set_rank_offset(self, rank_offset):
        """
        Set rank_offset for merge_pv. It set the message of Pv.

        Examples:
            .. code-block:: python

              import paddle
              dataset = paddle.distributed.fleet.BoxPSDataset()
              dataset._set_rank_offset("rank_offset")

        Args:
            rank_offset(str): rank_offset's name

        """
        self.proto_desc.rank_offset = rank_offset

    def _set_pv_batch_size(self, pv_batch_size):
        """
        Set pv batch size. It will be effective during enable_pv_merge

        Examples:
            .. code-block:: python

              import paddle
              dataset = paddle.distributed.fleet.BoxPSDataset()
              dataset._set_pv_batch_size(128)
        Args:
            pv_batch_size(int): pv batch size

        """
        self.proto_desc.pv_batch_size = pv_batch_size

    def _set_parse_logkey(self, parse_logkey):
        """
        Set if Dataset need to parse logkey

        Args:
            parse_content(bool): if parse logkey or not

        Examples:
            .. code-block:: python

              import paddle
              dataset = paddle.distributed.fleet.BoxPSDataset()
              dataset._set_parse_logkey(True)

        """
        self.parse_logkey = parse_logkey

    def _set_merge_by_sid(self, merge_by_sid):
        """
        Set if Dataset need to merge sid. If not, one ins means one Pv.

        Args:
            merge_by_sid(bool): if merge sid or not

        Examples:
            .. code-block:: python

              import paddle
              dataset = paddle.distributed.fleet.BoxPSDataset()
              dataset._set_merge_by_sid(True)

        """
        self.merge_by_sid = merge_by_sid

    def _set_enable_pv_merge(self, enable_pv_merge):
        """
        Set if Dataset need to merge pv.

        Args:
            enable_pv_merge(bool): if enable_pv_merge or not

        Examples:
            .. code-block:: python

              import paddle
              dataset = paddle.distributed.fleet.BoxPSDataset()
              dataset._set_enable_pv_merge(True)

        """
        self.enable_pv_merge = enable_pv_merge

1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103
    def set_date(self, date):
        """
        Workaround for date
        """
        year = int(date[:4])
        month = int(date[4:6])
        day = int(date[6:])
        self.boxps.set_date(year, month, day)

    def begin_pass(self):
        """
        Begin Pass
        Notify BoxPS to load sparse parameters of next pass to GPU Memory 

        Examples:
            .. code-block:: python

1104 1105
              import paddle
              dataset = paddle.distributed.fleet.BoxPSDataset()
1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116
              dataset.begin_pass()
        """
        self.boxps.begin_pass()

    def end_pass(self, need_save_delta):
        """
        End Pass
        Notify BoxPS that current pass ended 
        Examples:
            .. code-block:: python

1117 1118
              import paddle
              dataset = paddle.distributed.fleet.BoxPSDataset()
1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129
              dataset.end_pass(True)
        """
        self.boxps.end_pass(need_save_delta)

    def wait_preload_done(self):
        """
        Wait async preload done
        Wait Until Feed Pass Done
        Examples:
            .. code-block:: python

1130 1131
              import paddle
              dataset = paddle.distributed.fleet.BoxPSDataset()
1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144
              filelist = ["a.txt", "b.txt"]
              dataset.set_filelist(filelist)
              dataset.preload_into_memory()
              dataset.wait_preload_done()
        """
        self.boxps.wait_feed_pass_done()

    def load_into_memory(self):
        """
        Load next pass into memory and notify boxps to fetch its emb from SSD
        Examples:
            .. code-block:: python

1145 1146
              import paddle
              dataset = paddle.distributed.fleet.BoxPSDataset()
1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159
              filelist = ["a.txt", "b.txt"]
              dataset.set_filelist(filelist)
              dataset.load_into_memory()
	    """
        self._prepare_to_run()
        self.boxps.load_into_memory()

    def preload_into_memory(self):
        """
        Begin async preload next pass while current pass may be training
        Examples:
            .. code-block:: python

1160 1161
              import paddle
              dataset = paddle.distributed.fleet.BoxPSDataset()
1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188
              filelist = ["a.txt", "b.txt"]
              dataset.set_filelist(filelist)
              dataset.preload_into_memory()
        """
        self._prepare_to_run()
        self.boxps.preload_into_memory()

    def _dynamic_adjust_before_train(self, thread_num):
        if not self.is_user_set_queue_num:
            self.dataset.dynamic_adjust_channel_num(thread_num, True)
        self.dataset.dynamic_adjust_readers_num(thread_num)

    def _dynamic_adjust_after_train(self):
        pass

    def slots_shuffle(self, slots):
        """
        Slots Shuffle 
        Slots Shuffle is a shuffle method in slots level, which is usually used 
        in sparse feature with large scale of instances. To compare the metric, i.e.
        auc while doing slots shuffle on one or several slots with baseline to 
        evaluate the importance level of slots(features).
        
        Args:
            slots(list[string]): the set of slots(string) to do slots shuffle.

        Examples:
1189 1190
            import paddle
            dataset = paddle.distributed.fleet.BoxPSDataset()
1191 1192 1193 1194 1195 1196
            dataset.set_merge_by_lineid()
            #suppose there is a slot 0
            dataset.slots_shuffle(['0'])
        """
        slots_set = set(slots)
        self.boxps.slots_shuffle(slots_set)
1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275

    def set_current_phase(self, current_phase):
        """
        Set current phase in train. It is useful for untest.
        current_phase : 1 for join, 0 for update.

        Examples:
            .. code-block:: python

              import paddle
              dataset = paddle.distributed.fleet.BoxPSDataset()
              filelist = ["a.txt", "b.txt"]
              dataset.set_filelist(filelist)
              dataset.load_into_memory()
              dataset.set_current_phase(1)

        """
        self.dataset.set_current_phase(current_phase)

    def get_pv_data_size(self):
        """
        Get memory data size of Pv, user can call this function to know the pv num
        of ins in all workers after load into memory.

        Note:
            This function may cause bad performance, because it has barrier

        Returns:
            The size of memory pv data.

        Examples:
            .. code-block:: python

              import paddle
              dataset = paddle.distributed.fleet.BoxPSDataset()
              filelist = ["a.txt", "b.txt"]
              dataset.set_filelist(filelist)
              dataset.load_into_memory()
              print dataset.get_pv_data_size()

        """
        return self.dataset.get_pv_data_size()

    def preprocess_instance(self):
        """
        Merge pv instance and convey it from input_channel to input_pv_channel. 
        It will be effective when enable_pv_merge_ is True.

        Examples:
            .. code-block:: python

              import paddle
              dataset = paddle.distributed.fleet.BoxPSDataset()
              filelist = ["a.txt", "b.txt"]
              dataset.set_filelist(filelist)
              dataset.load_into_memory()
              dataset.preprocess_instance()

        """
        self.dataset.preprocess_instance()

    def postprocess_instance(self):
        """
        Divide pv instance and convey it to input_channel.

        Examples:
            .. code-block:: python

              import paddle
              dataset = paddle.distributed.fleet.BoxPSDataset()
              filelist = ["a.txt", "b.txt"]
              dataset.set_filelist(filelist)
              dataset.load_into_memory()
              dataset.preprocess_instance()
              exe.train_from_dataset(dataset)
              dataset.postprocess_instance()

        """
        self.dataset.postprocess_instance()