提交 eb04ccbf 编写于 作者: Y Yancey1989

Merge branch 'develop' of github.com:PaddlePaddle/Paddle into prefetch_on_server

repos:
- repo: https://github.com/Lucas-C/pre-commit-hooks.git
sha: v1.0.1
hooks:
......@@ -25,6 +26,14 @@
entry: bash ./.clang_format.hook -i
language: system
files: \.(c|cc|cxx|cpp|cu|h|hpp|hxx|proto)$
- repo: local
hooks:
- id: cpplint-cpp-source
name: cpplint
description: Check C++ code style using cpplint.py.
entry: bash ./tools/codestyle/cpplint_pre_commit.hook
language: system
files: \.(c|cc|cxx|cpp|cu|h|hpp|hxx)$
- repo: https://github.com/PaddlePaddle/pre-commit-golang
sha: 8337620115c25ff8333f1b1a493bd031049bd7c0
hooks:
......
......@@ -34,7 +34,7 @@ addons:
- automake
- libtool
- ccache
ssh_known_hosts: 52.76.173.135
ssh_known_hosts: 13.229.163.131
before_install:
- if [[ "$JOB" == "check_style" ]]; then sudo ln -s /usr/bin/clang-format-3.8 /usr/bin/clang-format; fi
# Paddle is using protobuf 3.1 currently. Protobuf 3.2 breaks the compatibility. So we specify the python
......
add_custom_target(paddle_apis ALL
DEPENDS paddle_v2_apis paddle_fluid_apis)
add_custom_target(paddle_docs ALL
DEPENDS paddle_v2_docs paddle_v2_docs_cn
paddle_fluid_docs paddle_fluid_docs_cn)
add_subdirectory(v2)
add_subdirectory(fluid)
......@@ -27,6 +27,8 @@ sphinx_add_target(paddle_fluid_docs
${CMAKE_CURRENT_SOURCE_DIR}
${SPHINX_HTML_DIR_EN})
add_dependencies(paddle_fluid_docs gen_proto_py)
# configured documentation tools and intermediate build results
set(BINARY_BUILD_DIR_CN "${CMAKE_CURRENT_BINARY_DIR}/cn/_build")
......@@ -47,3 +49,7 @@ sphinx_add_target(paddle_fluid_docs_cn
${SPHINX_CACHE_DIR_CN}
${CMAKE_CURRENT_SOURCE_DIR}
${SPHINX_HTML_DIR_CN})
add_dependencies(paddle_fluid_docs_cn gen_proto_py)
add_subdirectory(api)
# configured documentation tools and intermediate build results
set(BINARY_BUILD_DIR_EN "${CMAKE_CURRENT_BINARY_DIR}/en/_build")
# Sphinx cache with pickled ReST documents
set(SPHINX_CACHE_DIR_EN "${CMAKE_CURRENT_BINARY_DIR}/en/_doctrees")
# HTML output director
set(SPHINX_HTML_DIR_EN "${CMAKE_CURRENT_BINARY_DIR}/en/html")
configure_file(
"${CMAKE_CURRENT_SOURCE_DIR}/../../templates/conf.py.en.in"
"${BINARY_BUILD_DIR_EN}/conf.py"
@ONLY)
sphinx_add_target(paddle_fluid_apis
html
${BINARY_BUILD_DIR_EN}
${SPHINX_CACHE_DIR_EN}
${CMAKE_CURRENT_SOURCE_DIR}
${SPHINX_HTML_DIR_EN})
add_dependencies(paddle_fluid_apis gen_proto_py framework_py_proto copy_paddle_pybind)
# API Doc Standard
- [API Doc Structure](#API Doc Structure)
- [Format and Examples](#Format and Examples)
- [Complete Example](#Complete Example)
## API Doc Structure
API Doc should contain the following parts(please write them in order):
- Python API Definition
The definition of API
- Function Description
Description of API's function.
The description includes: meaning, purpose and operation on input of API, reference and corresponding link(if any), formula(if necessary) and explanations of key variables in the formula.
- Args Description
Description of API parameters.
Introduce parameters one by one according to the order in API definition.
The introduction includes: data type, default value(if any), meaning, etc.
- Returns
Introduction of API returned value.
Introduce meaning of returned value, provide correspoding format if necessary.
If returned value is a tuple containing multiple parameters, then introduce parameters one by one in order.
- Raises(if any)
Abnormality, error that may occur, and possible reasons. If there are more than one possible abnormity or error, they should be listed in order.
- Note(if any)
Matters needing attention. If there are more than one matters, they should be listed in order.
- Examples
Examples of how to use API.
## Format and Examples
API documentation must obey reStructuredText format, please refer to [here](http://sphinx-doc-zh.readthedocs.io/en/latest/rest.html).
Format and examples of each part of API documantation are as follows: (take fc for example)
- Python API Definition
- Format
[Python API Definition]
- Example
```
fc(input,
size,
num_flatten_dims=1,
param_attr=None,
bias_attr=None,
act=None,
name=None,
main_program=None,
startup_program=None)
```
- Function Description
- Format
This part contains (please write them in order):
[Function Description]
[Formula]
[Symbols' Descriptions if necessary]
[References if necessary]
- Example
[Function Description]
```
**Fully Connected Layer**
The fully connected layer can take multiple tensors as its inputs. It
creates a variable called weights for each input tensor, which represents
a fully connected weight matrix from each input unit to each output unit.
The fully connected layer multiplies each input tensor with its coresponding
weight to produce an output Tensor. If multiple input tensors are given,
the results of multiple multiplications will be sumed up. If bias_attr is
not None, a bias variable will be created and added to the output. Finally,
if activation is not None, it will be applied to the output as well.
```
[Formula]
```
This process can be formulated as follows:
.. math::
Out = Act({\sum_{i=0}^{N-1}X_iW_i + b})
```
[Symbols' Descriptions if necessary]
```
In the above equation:
* :math:`N`: Number of the input.
* :math:`X_i`: The input tensor.
* :math:`W`: The weights created by this layer.
* :math:`b`: The bias parameter created by this layer (if needed).
* :math:`Act`: The activation function.
* :math:`Out`: The output tensor.
```
[References if necessary]
Since there is no need for reference of fc, we omit them here. Under other circumstances, please provide explicit reference and link, take layer_norm for example:
```
Refer to `Layer Normalization <https://arxiv.org/pdf/1607.06450v1.pdf>`_ for more details.
```
- Args Description
- Format
\[Arg's Name\][(Data Type, Default Value)][Description]
- Example
part of fc parameters are as follows:
```
Args:
input (Variable|list of Variable): The input tensor(s) of this layer, and the dimension of
the input tensor(s) is at least 2.
param_attr (ParamAttr|list of ParamAttr, default None): The parameter attribute for learnable
parameters/weights of this layer.
name (str, default None): The name of this layer.
```
- Returns
- Format
[Name][Shape]
- Example
```
Returns:
A tensor variable storing the transformation result.
```
when returned value contain more than one tuple, please introduce every parameter in order, take dynamic_lstm for example:
```
Returns:
A tuple containing:
The hidden state of LSTM whose shape is (T X D).
The cell state of LSTM whose shape is (T X D).
```
- Raises
- Format
[Exception Type][Condition]
- Example
```
Raises:
ValueError: If the rank of the input is less than 2.
```
- Note
- Format
[Note]
- Example
there is no Note in fc, so we omit this part. If there is any note, please write clearly. If there are more than one notes, please list them in order. Take scaled\_dot\_product\_attention for example:
```
Note:
1. When num_heads > 1, three linear projections are learned respectively
to map input queries, keys and values into queries', keys' and values'.
queries', keys' and values' have the same shapes with queries, keys
and values.
2. When num_heads == 1, scaled_dot_product_attention has no learnable
parameters.
```
- Examples
- Format
\[Python Code Snipper]
- Example
```
Examples:
.. code-block:: python
data = fluid.layers.data(name="data", shape=[32, 32], dtype="float32")
fc = fluid.layers.fc(input=data, size=1000, act="tanh")
```
## Complete Example
Complete Example of fc please see [here](src/fc.py)
......@@ -20,13 +20,15 @@ configure_file(
"${BINARY_BUILD_DIR_EN}/conf.py"
@ONLY)
sphinx_add_target(paddle_docs
sphinx_add_target(paddle_v2_docs
html
${BINARY_BUILD_DIR_EN}
${SPHINX_CACHE_DIR_EN}
${CMAKE_CURRENT_SOURCE_DIR}
${SPHINX_HTML_DIR_EN})
add_dependencies(paddle_v2_docs gen_proto_py)
# configured documentation tools and intermediate build results
set(BINARY_BUILD_DIR_CN "${CMAKE_CURRENT_BINARY_DIR}/cn/_build")
......@@ -41,11 +43,13 @@ configure_file(
"${BINARY_BUILD_DIR_CN}/conf.py"
@ONLY)
sphinx_add_target(paddle_docs_cn
sphinx_add_target(paddle_v2_docs_cn
html
${BINARY_BUILD_DIR_CN}
${SPHINX_CACHE_DIR_CN}
${CMAKE_CURRENT_SOURCE_DIR}
${SPHINX_HTML_DIR_CN})
add_dependencies(paddle_v2_docs_cn gen_proto_py)
add_subdirectory(api)
......@@ -12,9 +12,11 @@ configure_file(
"${BINARY_BUILD_DIR_EN}/conf.py"
@ONLY)
sphinx_add_target(paddle_api_docs
sphinx_add_target(paddle_v2_apis
html
${BINARY_BUILD_DIR_EN}
${SPHINX_CACHE_DIR_EN}
${CMAKE_CURRENT_SOURCE_DIR}
${SPHINX_HTML_DIR_EN})
add_dependencies(paddle_v2_apis gen_proto_py framework_py_proto copy_paddle_pybind)
......@@ -2,10 +2,25 @@
Set Command-line Parameters
===========================
The implementation of deep learning algorithms has a variety of characteristics, such as running environment, running stage, structure of the model and the traning strategy. PaddlePaddle supports the user to set various command-line parameters flexibly, which helps to achieve control of the model training or prediction process.
In this part, we take several actual scenarios as an example, and the use of some command-line parameters is displayed:
.. toctree::
:maxdepth: 1
use_case_en.md
Then, we summarize and classify the use of all command-line parameters:
.. toctree::
:maxdepth: 1
arguments_en.md
Finally, the detailed descriptions are given, and we try to explain the propeties and significance of these command-line parameters in detail:
.. toctree::
:maxdepth: 1
detail_introduction_en.md
......@@ -104,7 +104,7 @@ cc_test(init_test SRCS init_test.cc DEPS init)
cc_test(op_kernel_type_test SRCS op_kernel_type_test.cc DEPS place device_context framework_proto)
cc_test(cow_ptr_tests SRCS details/cow_ptr_test.cc)
# cc_test(channel_test SRCS channel_test.cc)
cc_test(channel_test SRCS channel_test.cc)
cc_test(tuple_test SRCS tuple_test.cc )
cc_test(concurrency_test SRCS concurrency_test.cc DEPS go_op channel_close_op channel_create_op
channel_send_op channel_recv_op sum_op select_op elementwise_add_op compare_op
......
......@@ -138,8 +138,8 @@ void ChannelImpl<T>::Send(T *item) {
// If channel is closed, throw exception
if (closed_) {
lock.unlock();
send_return();
lock.unlock();
PADDLE_THROW("Cannot send on closed channel");
}
......@@ -152,11 +152,9 @@ void ChannelImpl<T>::Send(T *item) {
if (m != nullptr) {
*(m->data) = std::move(*item);
m->Notify();
lock.unlock();
send_return();
return;
} else {
lock.unlock();
Send(item);
send_return();
return;
......@@ -169,8 +167,6 @@ void ChannelImpl<T>::Send(T *item) {
if (buf_.size() < cap_) {
// Copy to buffer
buf_.push_back(std::move(*item));
// Release lock and return true
lock.unlock();
send_return();
return;
}
......@@ -181,8 +177,8 @@ void ChannelImpl<T>::Send(T *item) {
sendq.push_back(m);
m->Wait(lock);
if (m->chan_closed) {
lock.unlock();
send_return();
lock.unlock();
PADDLE_THROW("Cannot send on closed channel");
}
send_return();
......@@ -195,10 +191,7 @@ bool ChannelImpl<T>::Receive(T *item) {
// If channel is closed and buffer is empty or
// channel is unbuffered
if (closed_ && buf_.empty()) {
lock.unlock();
return recv_return(false);
}
if (closed_ && buf_.empty()) return recv_return(false);
// If there is a sender, directly receive the value we want
// from the sender. In case of a buffered channel, read from
......@@ -229,7 +222,6 @@ bool ChannelImpl<T>::Receive(T *item) {
} else
return recv_return(Receive(item));
}
lock.unlock();
return recv_return(true);
}
......@@ -238,8 +230,7 @@ bool ChannelImpl<T>::Receive(T *item) {
// Directly read from buffer
*item = std::move(buf_.front());
buf_.pop_front();
// Release lock and return true
lock.unlock();
// return true
return recv_return(true);
}
......
......@@ -2,8 +2,8 @@ if(WITH_DISTRIBUTE)
grpc_library(sendrecvop_grpc SRCS bytebuffer_stream.cc sendrecvop_utils.cc grpc_client.cc
grpc_server.cc variable_response.cc PROTO send_recv.proto DEPS lod_tensor selected_rows)
set(DISTRIBUTE_COMPILE_FLAGS "-Wno-non-virtual-dtor -Wno-error=non-virtual-dtor -Wno-error=delete-non-virtual-dtor")
set_source_files_properties(test_serde.cc grpc_server_test.cc PROPERTIES COMPILE_FLAGS ${DISTRIBUTE_COMPILE_FLAGS})
cc_test(serde_test SRCS test_serde.cc variable_response.cc DEPS grpc++_unsecure grpc_unsecure gpr
set_source_files_properties(serde_test.cc grpc_server_test PROPERTIES COMPILE_FLAGS ${DISTRIBUTE_COMPILE_FLAGS})
cc_test(serde_test SRCS serde_test.cc variable_response.cc DEPS grpc++_unsecure grpc_unsecure gpr
cares zlib protobuf sendrecvop_grpc)
cc_test(grpc_server_test SRCS grpc_server_test.cc DEPS sendrecvop_grpc grpc++_unsecure grpc_unsecure gpr cares zlib protobuf)
endif()
......@@ -221,13 +221,13 @@ void AsyncGRPCServer::ShutdownQueue() {
std::unique_lock<std::mutex> lock(cq_mutex_);
cq_send_->Shutdown();
cq_get_->Shutdown();
is_shut_down_ = true;
}
// This URL explains why shutdown is complicate:
void AsyncGRPCServer::ShutDown() {
server_->Shutdown();
is_shut_down_ = true;
ShutdownQueue();
server_->Shutdown();
}
void AsyncGRPCServer::TryToRegisterNewSendOne() {
......@@ -272,14 +272,14 @@ void AsyncGRPCServer::HandleRequest(::grpc::ServerCompletionQueue* cq,
bool ok = false;
while (true) {
if (!cq->Next(&tag, &ok)) {
LOG(INFO) << cq_name << " get CompletionQueue shutdown!";
LOG(INFO) << cq_name << " CompletionQueue shutdown!";
break;
}
PADDLE_ENFORCE(tag);
// FIXME(typhoonzero): de-couple the barriers with recv_op
if (cq_name == "cq_get") WaitCond(1);
if (cq_name == "cq_send") WaitCond(0);
if (!is_shut_down_ && cq_name == "cq_get") WaitCond(1);
if (!is_shut_down_ && cq_name == "cq_send") WaitCond(0);
RequestBase* base = (RequestBase*)tag;
// reference:
......
......@@ -11,9 +11,10 @@ distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <random>
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/op_registry.h"
......
......@@ -13,8 +13,10 @@ See the License for the specific language governing permissions and
limitations under the License. */
#include <unistd.h>
#include <string>
#include <thread>
#include <thread> // NOLINT
#include <vector>
#include "gtest/gtest.h"
#include "paddle/fluid/framework/op_registry.h"
......@@ -30,9 +32,9 @@ namespace m = paddle::operators::math;
USE_OP(dropout);
void Compare(f::Scope& scope, p::DeviceContext& ctx) {
void Compare(f::Scope* scope, const p::DeviceContext& ctx) {
// init
auto var = scope.Var("X");
auto var = scope->Var("X");
auto tensor = var->GetMutable<f::LoDTensor>();
tensor->Resize({10, 10});
......@@ -44,12 +46,12 @@ void Compare(f::Scope& scope, p::DeviceContext& ctx) {
TensorFromVector(init, ctx, tensor);
auto place = ctx.GetPlace();
auto out_var = scope.Var("Out");
auto out_var = scope->Var("Out");
auto out_tensor = out_var->GetMutable<f::LoDTensor>();
out_tensor->Resize({10, 10});
out_tensor->mutable_data<float>(place); // allocate
auto mask_var = scope.Var("Mask");
auto mask_var = scope->Var("Mask");
auto mask_tensor = mask_var->GetMutable<f::LoDTensor>();
mask_tensor->Resize({10, 10});
mask_tensor->mutable_data<float>(place); // allocate
......@@ -63,7 +65,7 @@ void Compare(f::Scope& scope, p::DeviceContext& ctx) {
auto dropout_op = f::OpRegistry::CreateOp(
"dropout", {{"X", {"X"}}}, {{"Out", {"Out"}}, {"Mask", {"Mask"}}}, attrs);
dropout_op->Run(scope, place);
dropout_op->Run(*scope, place);
std::vector<float> out_vec;
TensorToVector(*out_tensor, ctx, &out_vec);
......@@ -81,6 +83,11 @@ void Compare(f::Scope& scope, p::DeviceContext& ctx) {
}
}
// TODO(wyi): Due to
// https://github.com/PaddlePaddle/Paddle/issues/9507, I temporarily
// disable this test to remove the prevention of the merge of
// unrelated PRs.
/*
TEST(Dropout, CPUDense) {
f::Scope scope;
p::CPUPlace place;
......@@ -94,3 +101,4 @@ TEST(Dropout, GPUDense) {
p::CUDADeviceContext ctx(place);
Compare(scope, ctx);
}
*/
......@@ -88,7 +88,6 @@ class ListenAndServOp : public framework::OperatorBase {
void Stop() override {
rpc_service_->Push(LISTEN_TERMINATE_MESSAGE);
rpc_service_->ShutDown();
server_thread_->join();
}
......
......@@ -214,7 +214,10 @@ class LRNOpMaker : public framework::OpProtoAndCheckerMaker {
"Defaults to \"NHWC\". Specify the data format of the output data, "
"the input will be transformed automatically. ")
.SetDefault("AnyLayout");
AddAttr<bool>("is_test", "").SetDefault(false);
AddAttr<bool>("is_test",
"Turns on memory optimization that optimizes away "
"unnecessary memory allocations. Used by MKLDNN.")
.SetDefault(false);
AddComment(R"DOC(
Local Response Normalization Operator.
......
......@@ -121,6 +121,10 @@ class LRNGradKernel : public framework::OpKernel<T> {
T alpha = ctx.Attr<T>("alpha");
T beta = ctx.Attr<T>("beta");
PADDLE_ENFORCE(
!ctx.Attr<bool>("is_test"),
"is_test attribute should be set to False in training phase.");
LRNGradFunctor<DeviceContext, T> f;
f(ctx, x, out, mid, x_g, out_g, N, C, H, W, n, alpha, beta);
}
......
......@@ -122,7 +122,8 @@ void StartServerNet(bool is_sparse) {
// sub program run in listen_and_serv_op, for simple test we use sum
f::ProgramDesc program;
f::BlockDesc *optimize_block = program.MutableBlock(0);
const auto &root_block = program.Block(0);
auto *optimize_block = program.AppendBlock(root_block);
// X for server side tensors, RX for received tensers, must be of same shape.
AddOp("sum", {{"X", {"x0", "x1"}}}, {{"Out", {"Out"}}}, {}, optimize_block);
......
......@@ -125,9 +125,8 @@ EOF
-DWITH_AVX=${WITH_AVX:-ON} \
-DWITH_SWIG_PY=ON \
-DWITH_STYLE_CHECK=OFF
make -j `nproc` gen_proto_py framework_py_proto
make -j `nproc` copy_paddle_pybind
make -j `nproc` paddle_docs paddle_docs_cn paddle_api_docs
make -j `nproc` paddle_docs paddle_apis
popd
fi
......
......@@ -7,9 +7,8 @@ cd $TRAVIS_BUILD_DIR/build
# Compile Documentation only.
cmake .. -DCMAKE_BUILD_TYPE=Release -DWITH_GPU=OFF -DWITH_MKL=OFF -DWITH_DOC=ON -DWITH_STYLE_CHECK=OFF
make -j `nproc` gen_proto_py framework_py_proto
make -j `nproc` copy_paddle_pybind
make -j `nproc` paddle_docs paddle_docs_cn paddle_api_docs
make -j `nproc` paddle_docs paddle_apis
# check websites for broken links
linkchecker doc/v2/en/html/index.html
......
......@@ -73,12 +73,13 @@ add_custom_target(paddle_python ALL DEPENDS ${paddle_python_deps})
set(PADDLE_PYTHON_PACKAGE_DIR ${CMAKE_CURRENT_BINARY_DIR}/dist/)
if (WITH_TESTING)
add_subdirectory(paddle/reader/tests)
add_subdirectory(paddle/dataset/tests)
if(NOT WITH_FLUID_ONLY)
add_subdirectory(paddle/trainer_config_helpers/tests)
if (WITH_SWIG_PY)
# enable v2 API unittest only when paddle swig api is compiled
add_subdirectory(paddle/v2/tests)
add_subdirectory(paddle/v2/reader/tests)
add_subdirectory(paddle/v2/plot/tests)
endif()
endif()
......
......@@ -14,8 +14,14 @@
try:
from version import full_version as __version__
from version import commit as __git_commit__
except ImportError:
import sys
sys.stderr.write('''Warning with import paddle: you should not
import paddle from the source directory; please install paddlepaddle*.whl firstly.'''
)
import reader
import dataset
import batch
batch = batch.batch
......@@ -28,6 +28,7 @@ import wmt16
import mq2007
import flowers
import voc2012
import image
__all__ = [
'mnist',
......@@ -43,4 +44,5 @@ __all__ = [
'mq2007',
'flowers',
'voc2012',
'image',
]
......@@ -31,7 +31,7 @@ images per class.
import cPickle
import itertools
import numpy
import paddle.v2.dataset.common
import paddle.dataset.common
import tarfile
__all__ = ['train100', 'test100', 'train10', 'test10', 'convert']
......@@ -75,7 +75,7 @@ def train100():
:rtype: callable
"""
return reader_creator(
paddle.v2.dataset.common.download(CIFAR100_URL, 'cifar', CIFAR100_MD5),
paddle.dataset.common.download(CIFAR100_URL, 'cifar', CIFAR100_MD5),
'train')
......@@ -90,7 +90,7 @@ def test100():
:rtype: callable
"""
return reader_creator(
paddle.v2.dataset.common.download(CIFAR100_URL, 'cifar', CIFAR100_MD5),
paddle.dataset.common.download(CIFAR100_URL, 'cifar', CIFAR100_MD5),
'test')
......@@ -105,7 +105,7 @@ def train10():
:rtype: callable
"""
return reader_creator(
paddle.v2.dataset.common.download(CIFAR10_URL, 'cifar', CIFAR10_MD5),
paddle.dataset.common.download(CIFAR10_URL, 'cifar', CIFAR10_MD5),
'data_batch')
......@@ -120,20 +120,20 @@ def test10():
:rtype: callable
"""
return reader_creator(
paddle.v2.dataset.common.download(CIFAR10_URL, 'cifar', CIFAR10_MD5),
paddle.dataset.common.download(CIFAR10_URL, 'cifar', CIFAR10_MD5),
'test_batch')
def fetch():
paddle.v2.dataset.common.download(CIFAR10_URL, 'cifar', CIFAR10_MD5)
paddle.v2.dataset.common.download(CIFAR100_URL, 'cifar', CIFAR100_MD5)
paddle.dataset.common.download(CIFAR10_URL, 'cifar', CIFAR10_MD5)
paddle.dataset.common.download(CIFAR100_URL, 'cifar', CIFAR100_MD5)
def convert(path):
"""
Converts dataset to recordio format
"""
paddle.v2.dataset.common.convert(path, train100(), 1000, "cifar_train100")
paddle.v2.dataset.common.convert(path, test100(), 1000, "cifar_test100")
paddle.v2.dataset.common.convert(path, train10(), 1000, "cifar_train10")
paddle.v2.dataset.common.convert(path, test10(), 1000, "cifar_test10")
paddle.dataset.common.convert(path, train100(), 1000, "cifar_train100")
paddle.dataset.common.convert(path, test100(), 1000, "cifar_test100")
paddle.dataset.common.convert(path, train10(), 1000, "cifar_train10")
paddle.dataset.common.convert(path, test10(), 1000, "cifar_test10")
......@@ -19,7 +19,7 @@ import errno
import shutil
import sys
import importlib
import paddle.v2.dataset
import paddle.dataset
import cPickle
import glob
import cPickle as pickle
......@@ -105,24 +105,24 @@ def download(url, module_name, md5sum, save_name=None):
def fetch_all():
for module_name in filter(lambda x: not x.startswith("__"),
dir(paddle.v2.dataset)):
dir(paddle.dataset)):
if "fetch" in dir(
importlib.import_module("paddle.v2.dataset.%s" % module_name)):
importlib.import_module("paddle.dataset.%s" % module_name)):
getattr(
importlib.import_module("paddle.v2.dataset.%s" % module_name),
importlib.import_module("paddle.dataset.%s" % module_name),
"fetch")()
def fetch_all_recordio(path):
for module_name in filter(lambda x: not x.startswith("__"),
dir(paddle.v2.dataset)):
dir(paddle.dataset)):
if "convert" in dir(
importlib.import_module("paddle.v2.dataset.%s" % module_name)) and \
importlib.import_module("paddle.dataset.%s" % module_name)) and \
not module_name == "common":
ds_path = os.path.join(path, module_name)
must_mkdirs(ds_path)
getattr(
importlib.import_module("paddle.v2.dataset.%s" % module_name),
importlib.import_module("paddle.dataset.%s" % module_name),
"convert")(ds_path)
......@@ -130,7 +130,7 @@ def split(reader, line_count, suffix="%05d.pickle", dumper=cPickle.dump):
"""
you can call the function as:
split(paddle.v2.dataset.cifar.train10(), line_count=1000,
split(paddle.dataset.cifar.train10(), line_count=1000,
suffix="imikolov-train-%05d.pickle")
the output files as:
......
......@@ -23,7 +23,7 @@ to initialize SRL model.
import tarfile
import gzip
import itertools
import paddle.v2.dataset.common
import paddle.dataset.common
__all__ = ['test, get_dict', 'get_embedding', 'convert']
......@@ -203,14 +203,11 @@ def get_dict():
Get the word, verb and label dictionary of Wikipedia corpus.
"""
word_dict = load_dict(
paddle.v2.dataset.common.download(WORDDICT_URL, 'conll05st',
WORDDICT_MD5))
paddle.dataset.common.download(WORDDICT_URL, 'conll05st', WORDDICT_MD5))
verb_dict = load_dict(
paddle.v2.dataset.common.download(VERBDICT_URL, 'conll05st',
VERBDICT_MD5))
paddle.dataset.common.download(VERBDICT_URL, 'conll05st', VERBDICT_MD5))
label_dict = load_label_dict(
paddle.v2.dataset.common.download(TRGDICT_URL, 'conll05st',
TRGDICT_MD5))
paddle.dataset.common.download(TRGDICT_URL, 'conll05st', TRGDICT_MD5))
return word_dict, verb_dict, label_dict
......@@ -218,7 +215,7 @@ def get_embedding():
"""
Get the trained word vector based on Wikipedia corpus.
"""
return paddle.v2.dataset.common.download(EMB_URL, 'conll05st', EMB_MD5)
return paddle.dataset.common.download(EMB_URL, 'conll05st', EMB_MD5)
def test():
......@@ -235,23 +232,23 @@ def test():
"""
word_dict, verb_dict, label_dict = get_dict()
reader = corpus_reader(
paddle.v2.dataset.common.download(DATA_URL, 'conll05st', DATA_MD5),
paddle.dataset.common.download(DATA_URL, 'conll05st', DATA_MD5),
words_name='conll05st-release/test.wsj/words/test.wsj.words.gz',
props_name='conll05st-release/test.wsj/props/test.wsj.props.gz')
return reader_creator(reader, word_dict, verb_dict, label_dict)
def fetch():
paddle.v2.dataset.common.download(WORDDICT_URL, 'conll05st', WORDDICT_MD5)
paddle.v2.dataset.common.download(VERBDICT_URL, 'conll05st', VERBDICT_MD5)
paddle.v2.dataset.common.download(TRGDICT_URL, 'conll05st', TRGDICT_MD5)
paddle.v2.dataset.common.download(EMB_URL, 'conll05st', EMB_MD5)
paddle.v2.dataset.common.download(DATA_URL, 'conll05st', DATA_MD5)
paddle.dataset.common.download(WORDDICT_URL, 'conll05st', WORDDICT_MD5)
paddle.dataset.common.download(VERBDICT_URL, 'conll05st', VERBDICT_MD5)
paddle.dataset.common.download(TRGDICT_URL, 'conll05st', TRGDICT_MD5)
paddle.dataset.common.download(EMB_URL, 'conll05st', EMB_MD5)
paddle.dataset.common.download(DATA_URL, 'conll05st', DATA_MD5)
def convert(path):
"""
Converts dataset to recordio format
"""
paddle.v2.dataset.common.convert(path, test(), 1000, "conl105_train")
paddle.v2.dataset.common.convert(path, test(), 1000, "conl105_test")
paddle.dataset.common.convert(path, test(), 1000, "conl105_train")
paddle.dataset.common.convert(path, test(), 1000, "conl105_test")
......@@ -34,8 +34,8 @@ import functools
from common import download
import tarfile
import scipy.io as scio
from paddle.v2.image import *
from paddle.v2.reader import *
from paddle.dataset.image import *
from paddle.reader import *
import os
import numpy as np
from multiprocessing import cpu_count
......
......@@ -20,7 +20,7 @@ of 25,000 highly polar movie reviews for training, and 25,000 for testing.
Besides, this module also provides API for building dictionary.
"""
import paddle.v2.dataset.common
import paddle.dataset.common
import collections
import tarfile
import re
......@@ -37,8 +37,7 @@ def tokenize(pattern):
Read files that match the given pattern. Tokenize and yield each file.
"""
with tarfile.open(paddle.v2.dataset.common.download(URL, 'imdb',
MD5)) as tarf:
with tarfile.open(paddle.dataset.common.download(URL, 'imdb', MD5)) as tarf:
# Note that we should use tarfile.next(), which does
# sequential access of member files, other than
# tarfile.extractfile, which does random access and might
......@@ -136,7 +135,7 @@ def word_dict():
def fetch():
paddle.v2.dataset.common.download(URL, 'imdb', MD5)
paddle.dataset.common.download(URL, 'imdb', MD5)
def convert(path):
......@@ -144,5 +143,5 @@ def convert(path):
Converts dataset to recordio format
"""
w = word_dict()
paddle.v2.dataset.common.convert(path, lambda: train(w), 1000, "imdb_train")
paddle.v2.dataset.common.convert(path, lambda: test(w), 1000, "imdb_test")
paddle.dataset.common.convert(path, lambda: train(w), 1000, "imdb_train")
paddle.dataset.common.convert(path, lambda: test(w), 1000, "imdb_test")
......@@ -18,7 +18,7 @@ This module will download dataset from
http://www.fit.vutbr.cz/~imikolov/rnnlm/ and parse training set and test set
into paddle reader creators.
"""
import paddle.v2.dataset.common
import paddle.dataset.common
import collections
import tarfile
......@@ -54,9 +54,9 @@ def build_dict(min_word_freq=50):
train_filename = './simple-examples/data/ptb.train.txt'
test_filename = './simple-examples/data/ptb.valid.txt'
with tarfile.open(
paddle.v2.dataset.common.download(
paddle.v2.dataset.imikolov.URL, 'imikolov',
paddle.v2.dataset.imikolov.MD5)) as tf:
paddle.dataset.common.download(paddle.dataset.imikolov.URL,
'imikolov',
paddle.dataset.imikolov.MD5)) as tf:
trainf = tf.extractfile(train_filename)
testf = tf.extractfile(test_filename)
word_freq = word_count(testf, word_count(trainf))
......@@ -77,9 +77,9 @@ def build_dict(min_word_freq=50):
def reader_creator(filename, word_idx, n, data_type):
def reader():
with tarfile.open(
paddle.v2.dataset.common.download(
paddle.v2.dataset.imikolov.URL, 'imikolov',
paddle.v2.dataset.imikolov.MD5)) as tf:
paddle.dataset.common.download(
paddle.dataset.imikolov.URL, 'imikolov',
paddle.dataset.imikolov.MD5)) as tf:
f = tf.extractfile(filename)
UNK = word_idx['<unk>']
......@@ -145,7 +145,7 @@ def test(word_idx, n, data_type=DataType.NGRAM):
def fetch():
paddle.v2.dataset.common.download(URL, "imikolov", MD5)
paddle.dataset.common.download(URL, "imikolov", MD5)
def convert(path):
......@@ -154,8 +154,7 @@ def convert(path):
"""
N = 5
word_dict = build_dict()
paddle.v2.dataset.common.convert(path,
train(word_dict, N), 1000,
"imikolov_train")
paddle.v2.dataset.common.convert(path,
test(word_dict, N), 1000, "imikolov_test")
paddle.dataset.common.convert(path,
train(word_dict, N), 1000, "imikolov_train")
paddle.dataset.common.convert(path,
test(word_dict, N), 1000, "imikolov_test")
......@@ -17,7 +17,7 @@ MNIST dataset.
This module will download dataset from http://yann.lecun.com/exdb/mnist/ and
parse training set and test set into paddle reader creators.
"""
import paddle.v2.dataset.common
import paddle.dataset.common
import subprocess
import numpy
import platform
......@@ -85,10 +85,10 @@ def train():
:rtype: callable
"""
return reader_creator(
paddle.v2.dataset.common.download(TRAIN_IMAGE_URL, 'mnist',
TRAIN_IMAGE_MD5),
paddle.v2.dataset.common.download(TRAIN_LABEL_URL, 'mnist',
TRAIN_LABEL_MD5), 100)
paddle.dataset.common.download(TRAIN_IMAGE_URL, 'mnist',
TRAIN_IMAGE_MD5),
paddle.dataset.common.download(TRAIN_LABEL_URL, 'mnist',
TRAIN_LABEL_MD5), 100)
def test():
......@@ -102,22 +102,21 @@ def test():
:rtype: callable
"""
return reader_creator(
paddle.v2.dataset.common.download(TEST_IMAGE_URL, 'mnist',
TEST_IMAGE_MD5),
paddle.v2.dataset.common.download(TEST_LABEL_URL, 'mnist',
TEST_LABEL_MD5), 100)
paddle.dataset.common.download(TEST_IMAGE_URL, 'mnist', TEST_IMAGE_MD5),
paddle.dataset.common.download(TEST_LABEL_URL, 'mnist', TEST_LABEL_MD5),
100)
def fetch():
paddle.v2.dataset.common.download(TRAIN_IMAGE_URL, 'mnist', TRAIN_IMAGE_MD5)
paddle.v2.dataset.common.download(TRAIN_LABEL_URL, 'mnist', TRAIN_LABEL_MD5)
paddle.v2.dataset.common.download(TEST_IMAGE_URL, 'mnist', TEST_IMAGE_MD5)
paddle.v2.dataset.common.download(TEST_LABEL_URL, 'mnist', TRAIN_LABEL_MD5)
paddle.dataset.common.download(TRAIN_IMAGE_URL, 'mnist', TRAIN_IMAGE_MD5)
paddle.dataset.common.download(TRAIN_LABEL_URL, 'mnist', TRAIN_LABEL_MD5)
paddle.dataset.common.download(TEST_IMAGE_URL, 'mnist', TEST_IMAGE_MD5)
paddle.dataset.common.download(TEST_LABEL_URL, 'mnist', TRAIN_LABEL_MD5)
def convert(path):
"""
Converts dataset to recordio format
"""
paddle.v2.dataset.common.convert(path, train(), 1000, "minist_train")
paddle.v2.dataset.common.convert(path, test(), 1000, "minist_test")
paddle.dataset.common.convert(path, train(), 1000, "minist_train")
paddle.dataset.common.convert(path, test(), 1000, "minist_test")
......@@ -23,7 +23,7 @@ set and test set into paddle reader creators.
"""
import zipfile
import paddle.v2.dataset.common
import paddle.dataset.common
import re
import random
import functools
......@@ -100,7 +100,7 @@ USER_INFO = None
def __initialize_meta_info__():
fn = paddle.v2.dataset.common.download(URL, "movielens", MD5)
fn = paddle.dataset.common.download(URL, "movielens", MD5)
global MOVIE_INFO
if MOVIE_INFO is None:
pattern = re.compile(r'^(.*)\((\d+)\)$')
......@@ -247,15 +247,15 @@ def unittest():
def fetch():
paddle.v2.dataset.common.download(URL, "movielens", MD5)
paddle.dataset.common.download(URL, "movielens", MD5)
def convert(path):
"""
Converts dataset to recordio format
"""
paddle.v2.dataset.common.convert(path, train(), 1000, "movielens_train")
paddle.v2.dataset.common.convert(path, test(), 1000, "movielens_test")
paddle.dataset.common.convert(path, train(), 1000, "movielens_train")
paddle.dataset.common.convert(path, test(), 1000, "movielens_test")
if __name__ == '__main__':
......
......@@ -26,7 +26,7 @@ from itertools import chain
import nltk
from nltk.corpus import movie_reviews
import paddle.v2.dataset.common
import paddle.dataset.common
__all__ = ['train', 'test', 'get_word_dict', 'convert']
NUM_TRAINING_INSTANCES = 1600
......@@ -39,13 +39,13 @@ def download_data_if_not_yet():
"""
try:
# make sure that nltk can find the data
if paddle.v2.dataset.common.DATA_HOME not in nltk.data.path:
nltk.data.path.append(paddle.v2.dataset.common.DATA_HOME)
if paddle.dataset.common.DATA_HOME not in nltk.data.path:
nltk.data.path.append(paddle.dataset.common.DATA_HOME)
movie_reviews.categories()
except LookupError:
print "Downloading movie_reviews data set, please wait....."
nltk.download(
'movie_reviews', download_dir=paddle.v2.dataset.common.DATA_HOME)
'movie_reviews', download_dir=paddle.dataset.common.DATA_HOME)
print "Download data set success....."
print "Path is " + nltk.data.find('corpora/movie_reviews').path
......@@ -129,13 +129,12 @@ def test():
def fetch():
nltk.download(
'movie_reviews', download_dir=paddle.v2.dataset.common.DATA_HOME)
nltk.download('movie_reviews', download_dir=paddle.dataset.common.DATA_HOME)
def convert(path):
"""
Converts dataset to recordio format
"""
paddle.v2.dataset.common.convert(path, train, 1000, "sentiment_train")
paddle.v2.dataset.common.convert(path, test, 1000, "sentiment_test")
paddle.dataset.common.convert(path, train, 1000, "sentiment_train")
paddle.dataset.common.convert(path, test, 1000, "sentiment_test")
py_test(test_image SRCS test_image.py)
......@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle.v2.dataset.cifar
import paddle.dataset.cifar
import unittest
......@@ -29,25 +29,25 @@ class TestCIFAR(unittest.TestCase):
def test_test10(self):
instances, max_label_value = self.check_reader(
paddle.v2.dataset.cifar.test10())
paddle.dataset.cifar.test10())
self.assertEqual(instances, 10000)
self.assertEqual(max_label_value, 9)
def test_train10(self):
instances, max_label_value = self.check_reader(
paddle.v2.dataset.cifar.train10())
paddle.dataset.cifar.train10())
self.assertEqual(instances, 50000)
self.assertEqual(max_label_value, 9)
def test_test100(self):
instances, max_label_value = self.check_reader(
paddle.v2.dataset.cifar.test100())
paddle.dataset.cifar.test100())
self.assertEqual(instances, 10000)
self.assertEqual(max_label_value, 99)
def test_train100(self):
instances, max_label_value = self.check_reader(
paddle.v2.dataset.cifar.train100())
paddle.dataset.cifar.train100())
self.assertEqual(instances, 50000)
self.assertEqual(max_label_value, 99)
......
......@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle.v2.dataset.common
import paddle.dataset.common
import unittest
import tempfile
import glob
......@@ -24,14 +24,14 @@ class TestCommon(unittest.TestCase):
with open(temp_path, 'w') as f:
f.write("Hello\n")
self.assertEqual('09f7e02f1290be211da707a266f153b3',
paddle.v2.dataset.common.md5file(temp_path))
paddle.dataset.common.md5file(temp_path))
def test_download(self):
yi_avatar = 'https://avatars0.githubusercontent.com/u/1548775?v=3&s=460'
self.assertEqual(
paddle.v2.dataset.common.DATA_HOME + '/test/1548775?v=3&s=460',
paddle.v2.dataset.common.download(
yi_avatar, 'test', 'f75287202d6622414c706c36c16f8e0d'))
paddle.dataset.common.DATA_HOME + '/test/1548775?v=3&s=460',
paddle.dataset.common.download(yi_avatar, 'test',
'f75287202d6622414c706c36c16f8e0d'))
def test_split(self):
def test_reader():
......@@ -42,7 +42,7 @@ class TestCommon(unittest.TestCase):
return reader
_, temp_path = tempfile.mkstemp()
paddle.v2.dataset.common.split(
paddle.dataset.common.split(
test_reader(), 4, suffix=temp_path + '/test-%05d.pickle')
files = glob.glob(temp_path + '/test-%05d.pickle')
self.assertEqual(len(files), 3)
......@@ -52,7 +52,7 @@ class TestCommon(unittest.TestCase):
for x in xrange(5):
with open(temp_path + '/%05d.test' % x) as f:
f.write('%d\n' % x)
reader = paddle.v2.dataset.common.cluster_files_reader(
reader = paddle.dataset.common.cluster_files_reader(
temp_path + '/*.test', 5, 0)
for idx, e in enumerate(reader()):
self.assertEqual(e, str("0"))
......@@ -69,9 +69,9 @@ class TestCommon(unittest.TestCase):
return reader
path = tempfile.mkdtemp()
paddle.v2.dataset.common.convert(path,
test_reader(), num_shards,
'random_images')
paddle.dataset.common.convert(path,
test_reader(), num_shards,
'random_images')
files = glob.glob(path + '/random_images-*')
self.assertEqual(len(files), num_shards)
......
......@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle.v2.dataset.flowers
import paddle.dataset.flowers
import unittest
......@@ -30,19 +30,19 @@ class TestFlowers(unittest.TestCase):
def test_train(self):
instances, max_label_value = self.check_reader(
paddle.v2.dataset.flowers.train())
paddle.dataset.flowers.train())
self.assertEqual(instances, 6149)
self.assertEqual(max_label_value, 102)
def test_test(self):
instances, max_label_value = self.check_reader(
paddle.v2.dataset.flowers.test())
paddle.dataset.flowers.test())
self.assertEqual(instances, 1020)
self.assertEqual(max_label_value, 102)
def test_valid(self):
instances, max_label_value = self.check_reader(
paddle.v2.dataset.flowers.valid())
paddle.dataset.flowers.valid())
self.assertEqual(instances, 1020)
self.assertEqual(max_label_value, 102)
......
......@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle.v2.dataset.imdb
import paddle.dataset.imdb
import unittest
import re
......@@ -30,15 +30,13 @@ class TestIMDB(unittest.TestCase):
def test_build_dict(self):
if self.word_idx == None:
self.word_idx = paddle.v2.dataset.imdb.build_dict(TRAIN_PATTERN,
150)
self.word_idx = paddle.dataset.imdb.build_dict(TRAIN_PATTERN, 150)
self.assertEqual(len(self.word_idx), 7036)
def check_dataset(self, dataset, expected_size):
if self.word_idx == None:
self.word_idx = paddle.v2.dataset.imdb.build_dict(TRAIN_PATTERN,
150)
self.word_idx = paddle.dataset.imdb.build_dict(TRAIN_PATTERN, 150)
sum = 0
for l in dataset(self.word_idx):
......@@ -47,10 +45,10 @@ class TestIMDB(unittest.TestCase):
self.assertEqual(sum, expected_size)
def test_train(self):
self.check_dataset(paddle.v2.dataset.imdb.train, 25000)
self.check_dataset(paddle.dataset.imdb.train, 25000)
def test_test(self):
self.check_dataset(paddle.v2.dataset.imdb.test, 25000)
self.check_dataset(paddle.dataset.imdb.test, 25000)
if __name__ == '__main__':
......
......@@ -12,10 +12,10 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle.v2.dataset.imikolov
import paddle.dataset.imikolov
import unittest
WORD_DICT = paddle.v2.dataset.imikolov.build_dict()
WORD_DICT = paddle.dataset.imikolov.build_dict()
class TestMikolov(unittest.TestCase):
......@@ -25,7 +25,7 @@ class TestMikolov(unittest.TestCase):
def test_train(self):
n = 5
self.check_reader(paddle.v2.dataset.imikolov.train(WORD_DICT, n), n)
self.check_reader(paddle.dataset.imikolov.train(WORD_DICT, n), n)
first_line = 'aer banknote berlitz calloway centrust cluett fromstein '\
'gitano guterman hydro-quebec ipo kia memotec mlx nahb punts '\
......@@ -34,16 +34,16 @@ class TestMikolov(unittest.TestCase):
WORD_DICT.get(ch, WORD_DICT['<unk>'])
for ch in first_line.split(' ')
]
for l in paddle.v2.dataset.imikolov.train(
for l in paddle.dataset.imikolov.train(
WORD_DICT, n=-1,
data_type=paddle.v2.dataset.imikolov.DataType.SEQ)():
data_type=paddle.dataset.imikolov.DataType.SEQ)():
read_line = l[0][1:]
break
self.assertEqual(first_line, read_line)
def test_test(self):
n = 5
self.check_reader(paddle.v2.dataset.imikolov.test(WORD_DICT, n), n)
self.check_reader(paddle.dataset.imikolov.test(WORD_DICT, n), n)
first_line = 'consumers may want to move their telephones a little '\
'closer to the tv set'
......@@ -51,9 +51,9 @@ class TestMikolov(unittest.TestCase):
WORD_DICT.get(ch, WORD_DICT['<unk>'])
for ch in first_line.split(' ')
]
for l in paddle.v2.dataset.imikolov.test(
for l in paddle.dataset.imikolov.test(
WORD_DICT, n=-1,
data_type=paddle.v2.dataset.imikolov.DataType.SEQ)():
data_type=paddle.dataset.imikolov.DataType.SEQ)():
read_line = l[0][1:]
break
self.assertEqual(first_line, read_line)
......
......@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle.v2.dataset.mnist
import paddle.dataset.mnist
import unittest
......@@ -29,13 +29,13 @@ class TestMNIST(unittest.TestCase):
def test_train(self):
instances, max_label_value = self.check_reader(
paddle.v2.dataset.mnist.train())
paddle.dataset.mnist.train())
self.assertEqual(instances, 60000)
self.assertEqual(max_label_value, 9)
def test_test(self):
instances, max_label_value = self.check_reader(
paddle.v2.dataset.mnist.test())
paddle.dataset.mnist.test())
self.assertEqual(instances, 10000)
self.assertEqual(max_label_value, 9)
......
......@@ -12,19 +12,19 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle.v2.dataset.mq2007
import paddle.dataset.mq2007
import unittest
class TestMQ2007(unittest.TestCase):
def test_pairwise(self):
for label, query_left, query_right in paddle.v2.dataset.mq2007.test(
for label, query_left, query_right in paddle.dataset.mq2007.test(
format="pairwise"):
self.assertEqual(query_left.shape(), (46, ))
self.assertEqual(query_right.shape(), (46, ))
def test_listwise(self):
for label_array, query_array in paddle.v2.dataset.mq2007.test(
for label_array, query_array in paddle.dataset.mq2007.test(
format="listwise"):
self.assertEqual(len(label_array), len(query_array))
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
import paddle.v2.image as image
import paddle.dataset.image as image
class Image(unittest.TestCase):
......
......@@ -17,7 +17,7 @@
import unittest
import nltk
import paddle.v2.dataset.sentiment as st
import paddle.dataset.sentiment as st
from nltk.corpus import movie_reviews
......
......@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle.v2.dataset.voc2012
import paddle.dataset.voc2012
import unittest
......@@ -26,15 +26,15 @@ class TestVOC(unittest.TestCase):
return sum
def test_train(self):
count = self.check_reader(paddle.v2.dataset.voc_seg.train())
count = self.check_reader(paddle.dataset.voc_seg.train())
self.assertEqual(count, 2913)
def test_test(self):
count = self.check_reader(paddle.v2.dataset.voc_seg.test())
count = self.check_reader(paddle.dataset.voc_seg.test())
self.assertEqual(count, 1464)
def test_val(self):
count = self.check_reader(paddle.v2.dataset.voc_seg.val())
count = self.check_reader(paddle.dataset.voc_seg.val())
self.assertEqual(count, 1449)
......
......@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle.v2.dataset.wmt16
import paddle.dataset.wmt16
import unittest
......@@ -34,28 +34,28 @@ class TestWMT16(unittest.TestCase):
def test_train(self):
for idx, sample in enumerate(
paddle.v2.dataset.wmt16.train(
paddle.dataset.wmt16.train(
src_dict_size=100000, trg_dict_size=100000)()):
if idx >= 10: break
self.checkout_one_sample(sample)
def test_test(self):
for idx, sample in enumerate(
paddle.v2.dataset.wmt16.test(
paddle.dataset.wmt16.test(
src_dict_size=1000, trg_dict_size=1000)()):
if idx >= 10: break
self.checkout_one_sample(sample)
def test_val(self):
for idx, sample in enumerate(
paddle.v2.dataset.wmt16.validation(
paddle.dataset.wmt16.validation(
src_dict_size=1000, trg_dict_size=1000)()):
if idx >= 10: break
self.checkout_one_sample(sample)
def test_get_dict(self):
dict_size = 1000
word_dict = paddle.v2.dataset.wmt16.get_dict("en", dict_size, True)
word_dict = paddle.dataset.wmt16.get_dict("en", dict_size, True)
self.assertEqual(len(word_dict), dict_size)
self.assertEqual(word_dict[0], "<s>")
self.assertEqual(word_dict[1], "<e>")
......
......@@ -21,8 +21,7 @@ parse training set and test set into paddle reader creators.
import numpy as np
import os
import paddle.v2.dataset.common
from paddle.v2.parameters import Parameters
import paddle.dataset.common
__all__ = ['train', 'test']
......@@ -85,7 +84,7 @@ def train():
:rtype: callable
"""
global UCI_TRAIN_DATA
load_data(paddle.v2.dataset.common.download(URL, 'uci_housing', MD5))
load_data(paddle.dataset.common.download(URL, 'uci_housing', MD5))
def reader():
for d in UCI_TRAIN_DATA:
......@@ -105,7 +104,7 @@ def test():
:rtype: callable
"""
global UCI_TEST_DATA
load_data(paddle.v2.dataset.common.download(URL, 'uci_housing', MD5))
load_data(paddle.dataset.common.download(URL, 'uci_housing', MD5))
def reader():
for d in UCI_TEST_DATA:
......@@ -114,21 +113,13 @@ def test():
return reader
def model():
tar_file = paddle.v2.dataset.common.download(URL_MODEL, 'fit_a_line.tar',
MD5_MODEL)
with open(tar_file, 'r') as f:
parameters = Parameters.from_tar(f)
return parameters
def fetch():
paddle.v2.dataset.common.download(URL, 'uci_housing', MD5)
paddle.dataset.common.download(URL, 'uci_housing', MD5)
def convert(path):
"""
Converts dataset to recordio format
"""
paddle.v2.dataset.common.convert(path, train(), 1000, "uci_housing_train")
paddle.v2.dataset.common.convert(path, test(), 1000, "uci_houseing_test")
paddle.dataset.common.convert(path, train(), 1000, "uci_housing_train")
paddle.dataset.common.convert(path, test(), 1000, "uci_houseing_test")
......@@ -22,8 +22,8 @@ with segmentation has been increased from 7,062 to 9,993.
import tarfile
import io
import numpy as np
from paddle.v2.dataset.common import download
from paddle.v2.image import *
from paddle.dataset.common import download
from paddle.dataset.image import *
from PIL import Image
__all__ = ['train', 'test', 'val']
......
......@@ -22,8 +22,7 @@ parse training set and test set into paddle reader creators.
import tarfile
import gzip
import paddle.v2.dataset.common
from paddle.v2.parameters import Parameters
import paddle.dataset.common
__all__ = [
'train',
......@@ -123,7 +122,7 @@ def train(dict_size):
:rtype: callable
"""
return reader_creator(
paddle.v2.dataset.common.download(URL_TRAIN, 'wmt14', MD5_TRAIN),
paddle.dataset.common.download(URL_TRAIN, 'wmt14', MD5_TRAIN),
'train/train', dict_size)
......@@ -139,27 +138,20 @@ def test(dict_size):
:rtype: callable
"""
return reader_creator(
paddle.v2.dataset.common.download(URL_TRAIN, 'wmt14', MD5_TRAIN),
paddle.dataset.common.download(URL_TRAIN, 'wmt14', MD5_TRAIN),
'test/test', dict_size)
def gen(dict_size):
return reader_creator(
paddle.v2.dataset.common.download(URL_TRAIN, 'wmt14', MD5_TRAIN),
paddle.dataset.common.download(URL_TRAIN, 'wmt14', MD5_TRAIN),
'gen/gen', dict_size)
def model():
tar_file = paddle.v2.dataset.common.download(URL_MODEL, 'wmt14', MD5_MODEL)
with gzip.open(tar_file, 'r') as f:
parameters = Parameters.from_tar(f)
return parameters
def get_dict(dict_size, reverse=True):
# if reverse = False, return dict = {'a':'001', 'b':'002', ...}
# else reverse = true, return dict = {'001':'a', '002':'b', ...}
tar_file = paddle.v2.dataset.common.download(URL_TRAIN, 'wmt14', MD5_TRAIN)
tar_file = paddle.dataset.common.download(URL_TRAIN, 'wmt14', MD5_TRAIN)
src_dict, trg_dict = __read_to_dict(tar_file, dict_size)
if reverse:
src_dict = {v: k for k, v in src_dict.items()}
......@@ -168,8 +160,8 @@ def get_dict(dict_size, reverse=True):
def fetch():
paddle.v2.dataset.common.download(URL_TRAIN, 'wmt14', MD5_TRAIN)
paddle.v2.dataset.common.download(URL_MODEL, 'wmt14', MD5_MODEL)
paddle.dataset.common.download(URL_TRAIN, 'wmt14', MD5_TRAIN)
paddle.dataset.common.download(URL_MODEL, 'wmt14', MD5_MODEL)
def convert(path):
......@@ -177,6 +169,5 @@ def convert(path):
Converts dataset to recordio format
"""
dict_size = 30000
paddle.v2.dataset.common.convert(path,
train(dict_size), 1000, "wmt14_train")
paddle.v2.dataset.common.convert(path, test(dict_size), 1000, "wmt14_test")
paddle.dataset.common.convert(path, train(dict_size), 1000, "wmt14_train")
paddle.dataset.common.convert(path, test(dict_size), 1000, "wmt14_test")
......@@ -33,7 +33,7 @@ import tarfile
import gzip
from collections import defaultdict
import paddle.v2.dataset.common
import paddle.dataset.common
__all__ = [
"train",
......@@ -76,7 +76,7 @@ def __build_dict(tar_file, dict_size, save_path, lang):
def __load_dict(tar_file, dict_size, lang, reverse=False):
dict_path = os.path.join(paddle.v2.dataset.common.DATA_HOME,
dict_path = os.path.join(paddle.dataset.common.DATA_HOME,
"wmt16/%s_%d.dict" % (lang, dict_size))
if not os.path.exists(dict_path) or (
len(open(dict_path, "r").readlines()) != dict_size):
......@@ -178,8 +178,8 @@ def train(src_dict_size, trg_dict_size, src_lang="en"):
src_lang)
return reader_creator(
tar_file=paddle.v2.dataset.common.download(DATA_URL, "wmt16", DATA_MD5,
"wmt16.tar.gz"),
tar_file=paddle.dataset.common.download(DATA_URL, "wmt16", DATA_MD5,
"wmt16.tar.gz"),
file_name="wmt16/train",
src_dict_size=src_dict_size,
trg_dict_size=trg_dict_size,
......@@ -227,8 +227,8 @@ def test(src_dict_size, trg_dict_size, src_lang="en"):
src_lang)
return reader_creator(
tar_file=paddle.v2.dataset.common.download(DATA_URL, "wmt16", DATA_MD5,
"wmt16.tar.gz"),
tar_file=paddle.dataset.common.download(DATA_URL, "wmt16", DATA_MD5,
"wmt16.tar.gz"),
file_name="wmt16/test",
src_dict_size=src_dict_size,
trg_dict_size=trg_dict_size,
......@@ -274,8 +274,8 @@ def validation(src_dict_size, trg_dict_size, src_lang="en"):
src_lang)
return reader_creator(
tar_file=paddle.v2.dataset.common.download(DATA_URL, "wmt16", DATA_MD5,
"wmt16.tar.gz"),
tar_file=paddle.dataset.common.download(DATA_URL, "wmt16", DATA_MD5,
"wmt16.tar.gz"),
file_name="wmt16/val",
src_dict_size=src_dict_size,
trg_dict_size=trg_dict_size,
......@@ -303,12 +303,12 @@ def get_dict(lang, dict_size, reverse=False):
if lang == "en": dict_size = min(dict_size, TOTAL_EN_WORDS)
else: dict_size = min(dict_size, TOTAL_DE_WORDS)
dict_path = os.path.join(paddle.v2.dataset.common.DATA_HOME,
dict_path = os.path.join(paddle.dataset.common.DATA_HOME,
"wmt16/%s_%d.dict" % (lang, dict_size))
assert os.path.exists(dict_path), "Word dictionary does not exist. "
"Please invoke paddle.dataset.wmt16.train/test/validation first "
"to build the dictionary."
tar_file = os.path.join(paddle.v2.dataset.common.DATA_HOME, "wmt16.tar.gz")
tar_file = os.path.join(paddle.dataset.common.DATA_HOME, "wmt16.tar.gz")
return __load_dict(tar_file, dict_size, lang, reverse)
......@@ -323,7 +323,7 @@ def convert(path, src_dict_size, trg_dict_size, src_lang):
"""Converts dataset to recordio format.
"""
paddle.v2.dataset.common.convert(
paddle.dataset.common.convert(
path,
train(
src_dict_size=src_dict_size,
......@@ -331,7 +331,7 @@ def convert(path, src_dict_size, trg_dict_size, src_lang):
src_lang=src_lang),
1000,
"wmt16_train")
paddle.v2.dataset.common.convert(
paddle.dataset.common.convert(
path,
test(
src_dict_size=src_dict_size,
......@@ -339,7 +339,7 @@ def convert(path, src_dict_size, trg_dict_size, src_lang):
src_lang=src_lang),
1000,
"wmt16_test")
paddle.v2.dataset.common.convert(
paddle.dataset.common.convert(
path,
validation(
src_dict_size=src_dict_size,
......
......@@ -13,7 +13,7 @@
# limitations under the License.
import numpy as np
import paddle.v2 as paddle
import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
import paddle.fluid.framework as framework
......
......@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle.v2 as paddle
import paddle
import paddle.fluid as fluid
import contextlib
import numpy
......
......@@ -14,7 +14,7 @@
from __future__ import print_function
import paddle.v2 as paddle
import paddle
import paddle.fluid as fluid
import contextlib
import math
......
......@@ -15,8 +15,8 @@
import math
import numpy as np
import paddle.v2 as paddle
import paddle.v2.dataset.conll05 as conll05
import paddle
import paddle.dataset.conll05 as conll05
import paddle.fluid as fluid
from paddle.fluid.initializer import init_on_cpu
import contextlib
......
......@@ -14,7 +14,7 @@
import contextlib
import numpy as np
import paddle.v2 as paddle
import paddle
import paddle.fluid as fluid
import paddle.fluid.framework as framework
import paddle.fluid.layers as pd
......
......@@ -14,7 +14,7 @@
from __future__ import print_function
import argparse
import paddle.fluid as fluid
import paddle.v2 as paddle
import paddle
import sys
import numpy
import unittest
......
......@@ -16,7 +16,7 @@ import math
import sys
import os
import numpy as np
import paddle.v2 as paddle
import paddle
import paddle.fluid as fluid
import paddle.fluid.framework as framework
import paddle.fluid.layers as layers
......
......@@ -15,7 +15,7 @@ from __future__ import print_function
import unittest
import paddle.fluid as fluid
import paddle.v2 as paddle
import paddle
import contextlib
import math
import numpy as np
......
......@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle.v2 as paddle
import paddle
import paddle.fluid as fluid
import unittest
import os
......
......@@ -13,7 +13,7 @@
# limitations under the License.
import numpy as np
import paddle.v2 as paddle
import paddle
import paddle.fluid as fluid
import math
import sys
......
......@@ -16,7 +16,7 @@ from __future__ import print_function
import sys
import paddle.v2 as paddle
import paddle
import paddle.fluid as fluid
import math
import sys
......
......@@ -13,7 +13,7 @@
# limitations under the License.
import numpy as np
import paddle.v2 as paddle
import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
import paddle.fluid.framework as framework
......
......@@ -19,7 +19,7 @@ import os
import matplotlib
import numpy
import paddle.v2 as paddle
import paddle
import paddle.fluid as fluid
matplotlib.use('Agg')
......
......@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle.v2 as paddle
import paddle
import paddle.fluid as fluid
import numpy as np
import sys
......
......@@ -14,7 +14,7 @@
from __future__ import print_function
import numpy as np
import paddle.v2 as paddle
import paddle
import paddle.fluid as fluid
BATCH_SIZE = 128
......
......@@ -13,7 +13,7 @@
# limitations under the License.
import numpy as np
import paddle.v2 as paddle
import paddle
import paddle.fluid as fluid
BATCH_SIZE = 128
......
......@@ -12,12 +12,12 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import paddle.fluid.layers as layers
from paddle.fluid.framework import Program, program_guard, default_main_program, default_startup_program
from paddle.fluid.executor import Executor
from paddle.fluid.optimizer import MomentumOptimizer
import paddle.fluid.core as core
import paddle.v2 as paddle
import unittest
import numpy as np
......
......@@ -13,7 +13,7 @@
# limitations under the License.
import paddle.fluid as fluid
import paddle.v2 as paddle
import paddle
import unittest
import numpy
......
......@@ -13,7 +13,7 @@
# limitations under the License.
import unittest
import paddle.v2 as paddle
import paddle
import paddle.fluid.core as core
import paddle.fluid as fluid
from paddle.fluid.backward import append_backward
......
......@@ -15,8 +15,8 @@
import unittest
import paddle.fluid as fluid
import paddle.v2 as paddle
import paddle.v2.dataset.mnist as mnist
import paddle
import paddle.dataset.mnist as mnist
class TestMultipleReader(unittest.TestCase):
......
......@@ -15,8 +15,8 @@
import unittest
import paddle.fluid as fluid
import paddle.v2 as paddle
import paddle.v2.dataset.mnist as mnist
import paddle
import paddle.dataset.mnist as mnist
from shutil import copyfile
......
......@@ -16,9 +16,9 @@ import numpy
import unittest
import paddle.fluid as fluid
import paddle.v2 as paddle
import paddle.v2.dataset.mnist as mnist
import paddle.v2.dataset.wmt16 as wmt16
import paddle
import paddle.dataset.mnist as mnist
import paddle.dataset.wmt16 as wmt16
def simple_fc_net():
......
......@@ -15,8 +15,8 @@
import unittest
import paddle.fluid as fluid
import paddle.v2 as paddle
import paddle.v2.dataset.mnist as mnist
import paddle
import paddle.dataset.mnist as mnist
class TestRecordIO(unittest.TestCase):
......
......@@ -16,7 +16,7 @@ Creator package contains some simple reader creator, which could
be used in user program.
"""
__all__ = ['np_array', 'text_file', 'recordio', 'cloud_reader']
__all__ = ['np_array', 'text_file', 'recordio']
def np_array(x):
......@@ -66,7 +66,7 @@ def recordio(paths, buf_size=100):
"""
import recordio as rec
import paddle.v2.reader.decorator as dec
import paddle.reader.decorator as dec
import cPickle as pickle
def reader():
......@@ -83,48 +83,3 @@ def recordio(paths, buf_size=100):
f.close()
return dec.buffered(reader, buf_size)
pass_num = 0
def cloud_reader(paths, etcd_endpoints, timeout_sec=5, buf_size=64):
"""
Create a data reader that yield a record one by one from
the paths:
:paths: path of recordio files, can be a string or a string list.
:etcd_endpoints: the endpoints for etcd cluster
:returns: data reader of recordio files.
.. code-block:: python
from paddle.v2.reader.creator import cloud_reader
etcd_endpoints = "http://127.0.0.1:2379"
trainer.train.(
reader=cloud_reader(["/work/dataset/uci_housing/uci_housing*"], etcd_endpoints),
)
"""
import os
import cPickle as pickle
import paddle.v2.master as master
c = master.client(etcd_endpoints, timeout_sec, buf_size)
if isinstance(paths, basestring):
path = [paths]
else:
path = paths
c.set_dataset(path)
def reader():
global pass_num
c.paddle_start_get_records(pass_num)
pass_num += 1
while True:
r, e = c.next_record()
if not r:
if e != -2:
print "get record error: ", e
break
yield pickle.loads(r)
return reader
......@@ -28,14 +28,14 @@
import os
import unittest
import numpy as np
import paddle.v2.reader.creator
import paddle.reader.creator
class TestNumpyArray(unittest.TestCase):
def test_numpy_array(self):
l = [[1, 2, 3], [4, 5, 6]]
x = np.array(l, np.int32)
reader = paddle.v2.reader.creator.np_array(x)
reader = paddle.reader.creator.np_array(x)
for idx, e in enumerate(reader()):
self.assertItemsEqual(e, l[idx])
......@@ -43,14 +43,14 @@ class TestNumpyArray(unittest.TestCase):
class TestTextFile(unittest.TestCase):
def test_text_file(self):
path = os.path.join(os.path.dirname(__file__), "test_data_creator.txt")
reader = paddle.v2.reader.creator.text_file(path)
reader = paddle.reader.creator.text_file(path)
for idx, e in enumerate(reader()):
self.assertEqual(e, str(idx * 2) + " " + str(idx * 2 + 1))
class TestRecordIO(unittest.TestCase):
def do_test(self, path):
reader = paddle.v2.reader.creator.recordio(path)
reader = paddle.reader.creator.recordio(path)
idx = 0
for e in reader():
if idx == 0:
......
......@@ -15,7 +15,7 @@
import time
import unittest
import paddle.v2.reader
import paddle.reader
def reader_creator_10(dur):
......@@ -39,7 +39,7 @@ class TestMap(unittest.TestCase):
yield "h"
yield "i"
r = paddle.v2.reader.map_readers(tokenize, read)
r = paddle.reader.map_readers(tokenize, read)
for i, e in enumerate(r()):
self.assertEqual(e, i)
......@@ -47,7 +47,7 @@ class TestMap(unittest.TestCase):
class TestBuffered(unittest.TestCase):
def test_read(self):
for size in range(20):
b = paddle.v2.reader.buffered(reader_creator_10(0), size)
b = paddle.reader.buffered(reader_creator_10(0), size)
c = 0
for i in b():
self.assertEqual(i, c)
......@@ -56,7 +56,7 @@ class TestBuffered(unittest.TestCase):
def test_buffering(self):
# read have 30ms delay.
b = paddle.v2.reader.buffered(reader_creator_10(0.03), 10)
b = paddle.reader.buffered(reader_creator_10(0.03), 10)
last_time = time.time()
for idx, i in enumerate(b()):
elapsed_time = time.time() - last_time
......@@ -70,17 +70,17 @@ class TestBuffered(unittest.TestCase):
class TestCompose(unittest.TestCase):
def test_compse(self):
reader = paddle.v2.reader.compose(
reader = paddle.reader.compose(
reader_creator_10(0), reader_creator_10(0))
for idx, e in enumerate(reader()):
self.assertEqual(e, (idx, idx))
def test_compose_not_aligned(self):
total = 0
reader = paddle.v2.reader.compose(
paddle.v2.reader.chain(reader_creator_10(0), reader_creator_10(0)),
reader = paddle.reader.compose(
paddle.reader.chain(reader_creator_10(0), reader_creator_10(0)),
reader_creator_10(0))
with self.assertRaises(paddle.v2.reader.ComposeNotAligned):
with self.assertRaises(paddle.reader.ComposeNotAligned):
for e in reader():
total += 1
# expecting 10, not 20
......@@ -88,8 +88,8 @@ class TestCompose(unittest.TestCase):
def test_compose_not_aligned_no_check(self):
total = 0
reader = paddle.v2.reader.compose(
paddle.v2.reader.chain(reader_creator_10(0), reader_creator_10(0)),
reader = paddle.reader.compose(
paddle.reader.chain(reader_creator_10(0), reader_creator_10(0)),
reader_creator_10(0),
check_alignment=False)
for e in reader():
......@@ -100,7 +100,7 @@ class TestCompose(unittest.TestCase):
class TestChain(unittest.TestCase):
def test_chain(self):
c = paddle.v2.reader.chain(reader_creator_10(0), reader_creator_10(0))
c = paddle.reader.chain(reader_creator_10(0), reader_creator_10(0))
idx = 0
for e in c():
self.assertEqual(e, idx % 10)
......@@ -113,7 +113,7 @@ class TestShuffle(unittest.TestCase):
case = [(0, True), (1, True), (10, False), (100, False)]
a = reader_creator_10(0)
for size, checkEq in case:
s = paddle.v2.reader.shuffle(a, size)
s = paddle.reader.shuffle(a, size)
total = 0
for idx, e in enumerate(s()):
if checkEq:
......@@ -133,9 +133,9 @@ class TestXmap(unittest.TestCase):
for order in orders:
for tNum in thread_nums:
for size in buffered_size:
reader = paddle.v2.reader.xmap_readers(mapper,
reader_creator_10(0),
tNum, size, order)
reader = paddle.reader.xmap_readers(mapper,
reader_creator_10(0),
tNum, size, order)
for n in xrange(3):
result = []
for i in reader():
......@@ -150,7 +150,7 @@ class TestPipeReader(unittest.TestCase):
def test_pipe_reader(self):
def example_reader(myfiles):
for f in myfiles:
pr = paddle.v2.reader.PipeReader("cat %s" % f, bufsize=128)
pr = paddle.reader.PipeReader("cat %s" % f, bufsize=128)
for l in pr.get_line():
yield l
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册