diff --git a/demo/model_zoo/resnet/classify.py b/demo/model_zoo/resnet/classify.py index e818995fa31a921956c04af67a86a9b4aa6fc2c6..fbc30d30e62247468b645c321b87e89b480663b5 100755 --- a/demo/model_zoo/resnet/classify.py +++ b/demo/model_zoo/resnet/classify.py @@ -33,6 +33,7 @@ logging.getLogger().setLevel(logging.INFO) class ImageClassifier(): def __init__(self, train_conf, model_dir=None, resize_dim=256, crop_dim=224, + use_gpu=True, mean_file=None, output_layer=None, oversample=False, is_color=True): @@ -76,9 +77,9 @@ class ImageClassifier(): # this three mean value is calculated from ImageNet. self.transformer.set_mean(np.array([103.939,116.779,123.68])) - conf_args = "is_test=1,use_gpu=1,is_predict=1" + conf_args = "is_test=1,use_gpu=%d,is_predict=1" % (int(use_gpu)) conf = parse_config(train_conf, conf_args) - swig_paddle.initPaddle("--use_gpu=1") + swig_paddle.initPaddle("--use_gpu=%d" % (int(use_gpu))) self.network = swig_paddle.GradientMachine.createFromConfigProto(conf.model_config) assert isinstance(self.network, swig_paddle.GradientMachine) self.network.loadParameters(self.model_dir) @@ -236,6 +237,9 @@ def option_parser(): parser.add_option("-w", "--model", action="store", dest="model_path", default=None, help="model path") + parser.add_option("-g", "--use_gpu", action="store", + dest="use_gpu", default=True, + help="Whether to use gpu mode.") parser.add_option("-o", "--output_dir", action="store", dest="output_dir", default="output", help="output path") @@ -259,10 +263,11 @@ def main(): """ options, args = option_parser() obj = ImageClassifier(options.train_conf, - options.model_path, - mean_file=options.mean, - output_layer=options.output_layer, - oversample=options.multi_crop) + options.model_path, + use_gpu=options.use_gpu, + mean_file=options.mean, + output_layer=options.output_layer, + oversample=options.multi_crop) if options.job_type == "predict": obj.predict(options.data_file) diff --git a/demo/model_zoo/resnet/extract_fea_py.sh b/demo/model_zoo/resnet/extract_fea_py.sh index b0ec748bb8f0f885ad880327445240ca86b7be2e..a70cef9a87e9337a4dacd4a98fb1e2cf53004221 100755 --- a/demo/model_zoo/resnet/extract_fea_py.sh +++ b/demo/model_zoo/resnet/extract_fea_py.sh @@ -14,11 +14,16 @@ # limitations under the License. set -e +#Note if you use CPU mode, you need to set use_gpu=0 in classify.py. like this: +#conf_args = "is_test=0,use_gpu=1,is_predict=1" +#conf = parse_config(train_conf, conf_args) +#swig_paddle.initPaddle("--use_gpu=0") python classify.py \ --job=extract \ --conf=resnet.py \ + --use_gpu=1 \ --mean=model/mean_meta_224/mean.meta \ --model=model/resnet_50 \ --data=./example/test.list \ --output_layer="res5_3_branch2c_conv,res5_3_branch2c_bn" \ - --output_dir=features + --output_dir=features diff --git a/demo/model_zoo/resnet/net_diagram.sh b/demo/model_zoo/resnet/net_diagram.sh index ec72432f0ad0260060bf0d6e9606bb0ec56166c4..a21ab4345bfb31e5586cb07625b44e34ec1f7ec6 100755 --- a/demo/model_zoo/resnet/net_diagram.sh +++ b/demo/model_zoo/resnet/net_diagram.sh @@ -12,7 +12,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -set -e :' Visual deep residual network @@ -23,6 +22,8 @@ Usage: ./net_diagram.sh ' +set -e + DIR="$( cd "$(dirname "$0")" ; pwd -P )" cd $DIR diff --git a/demo/model_zoo/resnet/predict.sh b/demo/model_zoo/resnet/predict.sh index 0375cd2e08c85ddb380b6040b589ea7e662bb8a2..55cf16e34a759c3538ef4216fcb7e724ffd83b9f 100755 --- a/demo/model_zoo/resnet/predict.sh +++ b/demo/model_zoo/resnet/predict.sh @@ -19,4 +19,5 @@ python classify.py \ --conf=resnet.py\ --model=model/resnet_50 \ --multi_crop \ + --use_gpu=1 \ --data=./example/test.list diff --git a/demo/sentiment/predict.sh b/demo/sentiment/predict.sh index c3bfc1c8b61921811fe949e260335c347ccc52e7..053f23e491ab8d8af082e0b1ff1093be714a8268 100755 --- a/demo/sentiment/predict.sh +++ b/demo/sentiment/predict.sh @@ -14,8 +14,10 @@ # limitations under the License. set -e -config=trainer_config.py +#Note the default model is pass-00002, you shold make sure the model path +#exists or change the mode path. model=model_output/pass-00002/ +config=trainer_config.py label=data/pre-imdb/labels.list python predict.py \ -n $config\ diff --git a/doc/demo/image_classification/image_classification.md b/doc/demo/image_classification/image_classification.md index 6da20da8a8559c2a86a63e0e7287d165af8f8d40..b12a4e8f92d49c4260908c3a926b6889231c897c 100644 --- a/doc/demo/image_classification/image_classification.md +++ b/doc/demo/image_classification/image_classification.md @@ -11,7 +11,7 @@ First, download CIFAR-10 dataset. CIFAR-10 dataset can be downloaded from its of We have prepared a script to download and process CIFAR-10 dataset. The script will download CIFAR-10 dataset from the official dataset. -It will convert it to jpeg images and organize them into a directory with the required structure for the tutorial. Make sure that you have installed the python dependency (PIL). +It will convert it to jpeg images and organize them into a directory with the required structure for the tutorial. Make sure that you have installed the python dependency (PIL). If not, you can install it by `pip install PIL` and if you have installed `pip` package. ```bash cd demo/image_classification/data/ diff --git a/doc/demo/imagenet_model/resnet_model.md b/doc/demo/imagenet_model/resnet_model.md index 21c3a4cee53e16fb42ac0efdf4c7b9aa7593f335..76dddd1ec066e0d5dde77edd81f91bd56942c12a 100644 --- a/doc/demo/imagenet_model/resnet_model.md +++ b/doc/demo/imagenet_model/resnet_model.md @@ -223,6 +223,7 @@ extract_fea_py.sh: python classify.py \ --job=extract \ --conf=resnet.py\ + --use_gpu=1 \ --mean=model/mean_meta_224/mean.meta \ --model=model/resnet_50 \ --data=./example/test.list \ @@ -230,12 +231,15 @@ python classify.py \ --output_dir=features ``` -* --job=extract: specify job mode to extract feature. -* --conf=resnet.py: network configure. -* --model=model/resnet_5: model path. -* --data=./example/test.list: data list. -* --output_layer="xxx,xxx": specify layers to extract features. -* --output_dir=features: output diretcoty. +* \--job=extract: specify job mode to extract feature. +* \--conf=resnet.py: network configure. +* \--use_gpu=1: speficy GPU mode. +* \--model=model/resnet_5: model path. +* \--data=./example/test.list: data list. +* \--output_layer="xxx,xxx": specify layers to extract features. +* \--output_dir=features: output diretcoty. + +Note, since the convolution layer in these ResNet models is suitable for the cudnn implementation which only support GPU. It not support CPU mode because of compatibility issue and we will fix later. If run successfully, you will see features saved in `features/batch_0`, this file is produced with cPickle. You can use `load_feature_py` interface in `load_feature.py` to open the file, and it returns a dictionary as follows: @@ -265,13 +269,15 @@ python classify.py \ --conf=resnet.py\ --multi_crop \ --model=model/resnet_50 \ + --use_gpu=1 \ --data=./example/test.list ``` -* --job=extract: speficy job mode to predict. -* --conf=resnet.py: network configure. -* --multi_crop: use 10 crops and average predicting probability. -* --model=model/resnet_50: model path. -* --data=./example/test.list: data list. +* \--job=extract: speficy job mode to predict. +* \--conf=resnet.py: network configure. +* \--multi_crop: use 10 crops and average predicting probability. +* \--use_gpu=1: speficy GPU mode. +* \--model=model/resnet_50: model path. +* \--data=./example/test.list: data list. If run successfully, you will see following results, where 156 and 285 are labels of the images. diff --git a/doc/demo/sentiment_analysis/sentiment_analysis.md b/doc/demo/sentiment_analysis/sentiment_analysis.md index 957e85869820a8e8c2dc70f6d95d7602f305b40c..385f49891dcd840c525f7d1c3aaf7f08a7e4903f 100644 --- a/doc/demo/sentiment_analysis/sentiment_analysis.md +++ b/doc/demo/sentiment_analysis/sentiment_analysis.md @@ -204,15 +204,15 @@ paddle train --config=$config \ 2>&1 | tee 'train.log' ``` -* --config=$config: set network config. -* --save\_dir=$output: set output path to save models. -* --job=train: set job mode to train. -* --use\_gpu=false: use CPU to train, set true, if you install GPU version of PaddlePaddle and want to use GPU to train. -* --trainer\_count=4: set thread number (or GPU count). -* --num\_passes=15: set pass number, one pass in PaddlePaddle means training all samples in dataset one time. -* --log\_period=20: print log every 20 batches. -* --show\_parameter\_stats\_period=100: show parameter statistic every 100 batches. -* --test\_all_data\_in\_one\_period=1: test all data every testing. +* \--config=$config: set network config. +* \--save\_dir=$output: set output path to save models. +* \--job=train: set job mode to train. +* \--use\_gpu=false: use CPU to train, set true, if you install GPU version of PaddlePaddle and want to use GPU to train. +* \--trainer\_count=4: set thread number (or GPU count). +* \--num\_passes=15: set pass number, one pass in PaddlePaddle means training all samples in dataset one time. +* \--log\_period=20: print log every 20 batches. +* \--show\_parameter\_stats\_period=100: show parameter statistic every 100 batches. +* \--test\_all_data\_in\_one\_period=1: test all data every testing. If the run succeeds, the output log is saved in path of `demo/sentiment/train.log` and model is saved in path of `demo/sentiment/model_output/`. The output log is explained as follows. @@ -286,8 +286,10 @@ cd demo/sentiment predict.sh: ``` -config=trainer_config.py +#Note the default model is pass-00002, you shold make sure the model path +#exists or change the mode path. model=model_output/pass-00002/ +config=trainer_config.py label=data/pre-imdb/labels.list python predict.py \ -n $config\ @@ -304,6 +306,9 @@ python predict.py \ * -d data/pre-imdb/dict.txt: set dictionary. * -i data/aclImdb/test/pos/10014_7.txt: set one example file to predict. +Note you should make sure the default model path `model_output/pass-00002` +exists or change the model path. + Predicting result of this example: ``` diff --git a/paddle/math/Matrix.cpp b/paddle/math/Matrix.cpp index 289260b42648e9df55766fabebd6e84fd03f5c84..7e1840076833feded5a4bdec4066f0ab1a6411ec 100644 --- a/paddle/math/Matrix.cpp +++ b/paddle/math/Matrix.cpp @@ -1157,7 +1157,8 @@ void CpuMatrix::copyFrom(const Matrix& src) { CHECK(elementCnt_ == src.getElementCnt()); hl_memcpy_device2host(data_, const_cast(src.getData()), sizeof(real) * elementCnt_); - } else if (typeid(src) == typeid(CpuMatrix)) { + } else if (typeid(src) == typeid(CpuMatrix) || + typeid(src) == typeid(SharedCpuMatrix)) { CHECK(src.isContiguous()); CHECK(elementCnt_ == src.getElementCnt()); memcpy(data_, src.getData(), sizeof(real) * elementCnt_);