# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # workspace workspace: "paddlerec.models.rank.flen" # list of dataset dataset: - name: dataloader_train # name of dataset to distinguish different datasets batch_size: 2 type: QueueDataset data_path: "{workspace}/data/sample_data/train" sparse_slots: "click user_0 user_1 user_2 user_3 user_4 user_5 user_6 user_7 user_8 user_9 user_10 user_11 item_0 item_1 item_2 contex_0 contex_1 contex_2 contex_3 contex_4 contex_5" dense_slots: "" - name: dataset_infer # name batch_size: 2 type: QueueDataset data_path: "{workspace}/data/sample_data/train" sparse_slots: "click user_0 user_1 user_2 user_3 user_4 user_5 user_6 user_7 user_8 user_9 user_10 user_11 item_0 item_1 item_2 contex_0 contex_1 contex_2 contex_3 contex_4 contex_5" dense_slots: "" # hyper parameters of user-defined network hyper_parameters: # optimizer config optimizer: class: Adam learning_rate: 0.001 strategy: async # user-defined pairs sparse_inputs_slots: 21 sparse_feature_number: 100 sparse_feature_dim: 8 dense_input_dim: 1 dropout_rate: 0.5 # select runner by name mode: [single_cpu_train, single_cpu_infer] # config of each runner. # runner is a kind of paddle training class, which wraps the train/infer process. runner: - name: single_cpu_train class: train # num of epochs epochs: 1 # device to run training or infer device: cpu save_checkpoint_interval: 1 # save model interval of epochs save_inference_interval: 4 # save inference save_checkpoint_path: "increment_model" # save checkpoint path save_inference_path: "inference" # save inference path save_inference_feed_varnames: [] # feed vars of save inference save_inference_fetch_varnames: [] # fetch vars of save inference init_model_path: "" # load model path print_interval: 2 phases: [phase1] - name: single_gpu_train class: train # num of epochs epochs: 1 # device to run training or infer device: gpu save_checkpoint_interval: 1 # save model interval of epochs save_inference_interval: 4 # save inference save_checkpoint_path: "increment_model" # save checkpoint path save_inference_path: "inference" # save inference path save_inference_feed_varnames: [] # feed vars of save inference save_inference_fetch_varnames: [] # fetch vars of save inference init_model_path: "" # load model path print_interval: 2 phases: [phase1] - name: single_cpu_infer class: infer # device to run training or infer device: cpu init_model_path: "increment_model" # load model path phases: [phase2] - name: single_gpu_infer class: infer # device to run training or infer device: gpu init_model_path: "increment_model" # load model path phases: [phase2] # runner will run all the phase in each epoch phase: - name: phase1 model: "{workspace}/model.py" # user-defined model dataset_name: dataloader_train # select dataset by name thread_num: 2 - name: phase2 model: "{workspace}/model.py" # user-defined model dataset_name: dataset_infer # select dataset by name thread_num: 2