提交 b1a57c4c 编写于 作者: A Alexander Alekhin

fix 3.4 links

上级 54c18009
......@@ -116,7 +116,7 @@ swapRB = false;
needSoftmax = false;
// url for label file, can from local or Internet
labelsUrl = "https://raw.githubusercontent.com/opencv/opencv/master/samples/data/dnn/classification_classes_ILSVRC2012.txt";
labelsUrl = "https://raw.githubusercontent.com/opencv/opencv/3.4/samples/data/dnn/classification_classes_ILSVRC2012.txt";
</script>
<script id="codeSnippet1" type="text/code-snippet">
......
......@@ -6,7 +6,7 @@
"std": "1",
"swapRB": "false",
"needSoftmax": "false",
"labelsUrl": "https://raw.githubusercontent.com/opencv/opencv/master/samples/data/dnn/classification_classes_ILSVRC2012.txt",
"labelsUrl": "https://raw.githubusercontent.com/opencv/opencv/3.4/samples/data/dnn/classification_classes_ILSVRC2012.txt",
"modelUrl": "http://dl.caffe.berkeleyvision.org/bvlc_alexnet.caffemodel",
"configUrl": "https://raw.githubusercontent.com/BVLC/caffe/master/models/bvlc_alexnet/deploy.prototxt"
},
......@@ -16,7 +16,7 @@
"std": "0.007843",
"swapRB": "false",
"needSoftmax": "true",
"labelsUrl": "https://raw.githubusercontent.com/opencv/opencv/master/samples/data/dnn/classification_classes_ILSVRC2012.txt",
"labelsUrl": "https://raw.githubusercontent.com/opencv/opencv/3.4/samples/data/dnn/classification_classes_ILSVRC2012.txt",
"modelUrl": "https://drive.google.com/open?id=0B7ubpZO7HnlCcHlfNmJkU2VPelE",
"configUrl": "https://raw.githubusercontent.com/shicai/DenseNet-Caffe/master/DenseNet_121.prototxt"
},
......@@ -26,7 +26,7 @@
"std": "1",
"swapRB": "false",
"needSoftmax": "false",
"labelsUrl": "https://raw.githubusercontent.com/opencv/opencv/master/samples/data/dnn/classification_classes_ILSVRC2012.txt",
"labelsUrl": "https://raw.githubusercontent.com/opencv/opencv/3.4/samples/data/dnn/classification_classes_ILSVRC2012.txt",
"modelUrl": "http://dl.caffe.berkeleyvision.org/bvlc_googlenet.caffemodel",
"configUrl": "https://raw.githubusercontent.com/BVLC/caffe/master/models/bvlc_googlenet/deploy.prototxt"
},
......@@ -36,7 +36,7 @@
"std": "1",
"swapRB": "false",
"needSoftmax": "false",
"labelsUrl": "https://raw.githubusercontent.com/opencv/opencv/master/samples/data/dnn/classification_classes_ILSVRC2012.txt",
"labelsUrl": "https://raw.githubusercontent.com/opencv/opencv/3.4/samples/data/dnn/classification_classes_ILSVRC2012.txt",
"modelUrl": "https://raw.githubusercontent.com/forresti/SqueezeNet/master/SqueezeNet_v1.0/squeezenet_v1.0.caffemodel",
"configUrl": "https://raw.githubusercontent.com/forresti/SqueezeNet/master/SqueezeNet_v1.0/deploy.prototxt"
},
......@@ -46,7 +46,7 @@
"std": "1",
"swapRB": "false",
"needSoftmax": "false",
"labelsUrl": "https://raw.githubusercontent.com/opencv/opencv/master/samples/data/dnn/classification_classes_ILSVRC2012.txt",
"labelsUrl": "https://raw.githubusercontent.com/opencv/opencv/3.4/samples/data/dnn/classification_classes_ILSVRC2012.txt",
"modelUrl": "http://www.robots.ox.ac.uk/~vgg/software/very_deep/caffe/VGG_ILSVRC_19_layers.caffemodel",
"configUrl": "https://gist.githubusercontent.com/ksimonyan/3785162f95cd2d5fee77/raw/f02f8769e64494bcd3d7e97d5d747ac275825721/VGG_ILSVRC_19_layers_deploy.prototxt"
}
......
......@@ -116,7 +116,7 @@ swapRB = false;
needSoftmax = false;
// url for label file, can from local or Internet
labelsUrl = "https://raw.githubusercontent.com/opencv/opencv/master/samples/data/dnn/classification_classes_ILSVRC2012.txt";
labelsUrl = "https://raw.githubusercontent.com/opencv/opencv/3.4/samples/data/dnn/classification_classes_ILSVRC2012.txt";
</script>
<script id="codeSnippet1" type="text/code-snippet">
......
......@@ -94,7 +94,7 @@ nmsThreshold = 0.4;
outType = "SSD";
// url for label file, can from local or Internet
labelsUrl = "https://raw.githubusercontent.com/opencv/opencv/master/samples/data/dnn/object_detection_classes_pascal_voc.txt";
labelsUrl = "https://raw.githubusercontent.com/opencv/opencv/3.4/samples/data/dnn/object_detection_classes_pascal_voc.txt";
</script>
<script id="codeSnippet1" type="text/code-snippet">
......
......@@ -7,7 +7,7 @@
"std": "0.007843",
"swapRB": "false",
"outType": "SSD",
"labelsUrl": "https://raw.githubusercontent.com/opencv/opencv/master/samples/data/dnn/object_detection_classes_pascal_voc.txt",
"labelsUrl": "https://raw.githubusercontent.com/opencv/opencv/3.4/samples/data/dnn/object_detection_classes_pascal_voc.txt",
"modelUrl": "https://raw.githubusercontent.com/chuanqi305/MobileNet-SSD/master/mobilenet_iter_73000.caffemodel",
"configUrl": "https://raw.githubusercontent.com/chuanqi305/MobileNet-SSD/master/deploy.prototxt"
},
......@@ -18,7 +18,7 @@
"std": "1",
"swapRB": "false",
"outType": "SSD",
"labelsUrl": "https://raw.githubusercontent.com/opencv/opencv/master/samples/data/dnn/object_detection_classes_pascal_voc.txt",
"labelsUrl": "https://raw.githubusercontent.com/opencv/opencv/3.4/samples/data/dnn/object_detection_classes_pascal_voc.txt",
"modelUrl": "https://drive.google.com/uc?id=0BzKzrI_SkD1_WVVTSmQxU0dVRzA&export=download",
"configUrl": "https://drive.google.com/uc?id=0BzKzrI_SkD1_WVVTSmQxU0dVRzA&export=download"
}
......@@ -31,7 +31,7 @@
"std": "0.00392",
"swapRB": "false",
"outType": "YOLO",
"labelsUrl": "https://raw.githubusercontent.com/opencv/opencv/master/samples/data/dnn/object_detection_classes_yolov3.txt",
"labelsUrl": "https://raw.githubusercontent.com/opencv/opencv/3.4/samples/data/dnn/object_detection_classes_yolov3.txt",
"modelUrl": "https://pjreddie.com/media/files/yolov2-tiny.weights",
"configUrl": "https://raw.githubusercontent.com/pjreddie/darknet/master/cfg/yolov2-tiny.cfg"
}
......
......@@ -94,7 +94,7 @@ nmsThreshold = 0.4;
outType = "SSD";
// url for label file, can from local or Internet
labelsUrl = "https://raw.githubusercontent.com/opencv/opencv/master/samples/data/dnn/object_detection_classes_pascal_voc.txt";
labelsUrl = "https://raw.githubusercontent.com/opencv/opencv/3.4/samples/data/dnn/object_detection_classes_pascal_voc.txt";
</script>
<script id="codeSnippet1" type="text/code-snippet">
......
......@@ -333,7 +333,7 @@ function installDOM(){
### Execute it ###
- Save the file as `exampleNodeCanvasData.js`.
- Make sure the files `aarcascade_frontalface_default.xml` and `haarcascade_eye.xml` are present in project's directory. They can be obtained from [OpenCV sources](https://github.com/opencv/opencv/tree/master/data/haarcascades).
- Make sure the files `aarcascade_frontalface_default.xml` and `haarcascade_eye.xml` are present in project's directory. They can be obtained from [OpenCV sources](https://github.com/opencv/opencv/tree/3.4/data/haarcascades).
- Make sure a sample image file `lena.jpg` exists in project's directory. It should display people's faces for this example to make sense. The following image is known to work:
![image](lena.jpg)
......
......@@ -4,7 +4,9 @@ Using OpenCV.js {#tutorial_js_usage}
Steps
-----
In this tutorial, you will learn how to include and start to use `opencv.js` inside a web page. You can get a copy of `opencv.js` from `opencv-{VERSION_NUMBER}-docs.zip` in each [release](https://github.com/opencv/opencv/releases), or simply download the prebuilt script from the online documentations at "https://docs.opencv.org/{VERSION_NUMBER}/opencv.js" (For example, [https://docs.opencv.org/3.4.0/opencv.js](https://docs.opencv.org/3.4.0/opencv.js). Use `master` if you want the latest build). You can also build your own copy by following the tutorial on Build Opencv.js.
In this tutorial, you will learn how to include and start to use `opencv.js` inside a web page.
You can get a copy of `opencv.js` from `opencv-{VERSION_NUMBER}-docs.zip` in each [release](https://github.com/opencv/opencv/releases), or simply download the prebuilt script from the online documentations at "https://docs.opencv.org/{VERSION_NUMBER}/opencv.js" (For example, [https://docs.opencv.org/3.4.0/opencv.js](https://docs.opencv.org/3.4.0/opencv.js). Use `3.4` if you want the latest build).
You can also build your own copy by following the tutorial @ref tutorial_js_setup.
### Create a web page
......
......@@ -95,7 +95,7 @@ QR faster than SVD, but potentially less precise
- *camera_resolution*: resolution of camera which is used for calibration
**Note:** *charuco_dict*, *charuco_square_length* and *charuco_marker_size* are used for chAruco pattern generation
(see Aruco module description for details: [Aruco tutorials](https://github.com/opencv/opencv_contrib/tree/master/modules/aruco/tutorials))
(see Aruco module description for details: [Aruco tutorials](https://github.com/opencv/opencv_contrib/tree/3.4/modules/aruco/tutorials))
Default chAruco pattern:
......
......@@ -23,7 +23,7 @@ Explanation
-----------
-# Firstly, download GoogLeNet model files:
[bvlc_googlenet.prototxt ](https://github.com/opencv/opencv_extra/blob/master/testdata/dnn/bvlc_googlenet.prototxt) and
[bvlc_googlenet.prototxt](https://github.com/opencv/opencv_extra/blob/3.4/testdata/dnn/bvlc_googlenet.prototxt) and
[bvlc_googlenet.caffemodel](http://dl.caffe.berkeleyvision.org/bvlc_googlenet.caffemodel)
Also you need file with names of [ILSVRC2012](http://image-net.org/challenges/LSVRC/2012/browse-synsets) classes:
......
......@@ -38,7 +38,7 @@ correspondingly. In example, for variable `x` in range `[0, 10)` directive
`split: { x: 2 }` gives new ones `xo` in range `[0, 5)` and `xi` in range `[0, 2)`.
Variable name `x` is no longer available in the same scheduling node.
You can find scheduling examples at [opencv_extra/testdata/dnn](https://github.com/opencv/opencv_extra/tree/master/testdata/dnn)
You can find scheduling examples at [opencv_extra/testdata/dnn](https://github.com/opencv/opencv_extra/tree/3.4/testdata/dnn)
and use it for schedule your networks.
## Layers fusing
......
......@@ -273,7 +273,7 @@ Results
Compile the code above and execute it (or run the script if using python) with an image as argument.
If you do not provide an image as argument the default sample image
([LinuxLogo.jpg](https://github.com/opencv/opencv/tree/master/samples/data/LinuxLogo.jpg)) will be used.
([LinuxLogo.jpg](https://github.com/opencv/opencv/tree/3.4/samples/data/LinuxLogo.jpg)) will be used.
For instance, using this image:
......
......@@ -23,7 +23,7 @@ Code
to populate our image with a big number of geometric figures. Since we will be initializing them
in a random fashion, this process will be automatic and made by using *loops* .
- This code is in your OpenCV sample folder. Otherwise you can grab it from
[here](http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/cpp/tutorial_code/core/Matrix/Drawing_2.cpp)
[here](https://github.com/opencv/opencv/blob/3.4/samples/cpp/tutorial_code/ImgProc/basic_drawing/Drawing_2.cpp)
Explanation
-----------
......
......@@ -18,7 +18,7 @@ parser = argparse.ArgumentParser(description="Use this script to create TensorFl
"with weights from OpenCV's face detection network. "
"Only backbone part of SSD model is converted this way. "
"Look for .pbtxt configuration file at "
"https://github.com/opencv/opencv_extra/tree/master/testdata/dnn/opencv_face_detector.pbtxt")
"https://github.com/opencv/opencv_extra/tree/3.4/testdata/dnn/opencv_face_detector.pbtxt")
parser.add_argument('--model', help='Path to .caffemodel weights', required=True)
parser.add_argument('--proto', help='Path to .prototxt Caffe model definition', required=True)
parser.add_argument('--pb', help='Path to output .pb TensorFlow model', required=True)
......
......@@ -58,7 +58,7 @@ if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--imgs_dir", help="path to ImageNet validation subset images dir, ILSVRC2012_img_val dir")
parser.add_argument("--img_cls_file", help="path to file with classes ids for images, download it here:"
"https://github.com/opencv/opencv_extra/tree/master/testdata/dnn/img_classes_inception.txt")
"https://github.com/opencv/opencv_extra/tree/3.4/testdata/dnn/img_classes_inception.txt")
parser.add_argument("--model", help="path to tensorflow model, download it here:"
"https://storage.googleapis.com/download.tensorflow.org/models/inception5h.zip")
parser.add_argument("--log", help="path to logging file")
......
......@@ -24,7 +24,7 @@ class NewOpenCVTests(unittest.TestCase):
repoPath = None
extraTestDataPath = None
# github repository url
repoUrl = 'https://raw.github.com/opencv/opencv/master'
repoUrl = 'https://raw.github.com/opencv/opencv/3.4'
def find_file(self, filename, searchPaths=[], required=True):
searchPaths = searchPaths if searchPaths else [self.repoPath, self.extraTestDataPath]
......
This folder contains toolchains and additional files that are needed for cross compilation.
For more information see introduction tutorials for target platform in documentation:
https://docs.opencv.org/master/df/d65/tutorial_table_of_content_introduction.html
https://docs.opencv.org/3.4/df/d65/tutorial_table_of_content_introduction.html
......@@ -7,7 +7,7 @@ Check [a wiki](https://github.com/opencv/opencv/wiki/Deep-Learning-in-OpenCV) fo
If OpenCV is built with [Intel's Inference Engine support](https://github.com/opencv/opencv/wiki/Intel%27s-Deep-Learning-Inference-Engine-backend) you can use [Intel's pre-trained](https://github.com/opencv/open_model_zoo) models.
There are different preprocessing parameters such mean subtraction or scale factors for different models.
You may check the most popular models and their parameters at [models.yml](https://github.com/opencv/opencv/blob/master/samples/dnn/models.yml) configuration file. It might be also used for aliasing samples parameters. In example,
You may check the most popular models and their parameters at [models.yml](https://github.com/opencv/opencv/blob/3.4/samples/dnn/models.yml) configuration file. It might be also used for aliasing samples parameters. In example,
```bash
python object_detection.py opencv_fd --model /path/to/caffemodel --config /path/to/prototxt
......@@ -27,7 +27,7 @@ You can download sample models using ```download_models.py```. For example, the
python download_models.py --save_dir FaceDetector opencv_fd
```
You can use default configuration files adopted for OpenCV from [here](https://github.com/opencv/opencv_extra/tree/master/testdata/dnn).
You can use default configuration files adopted for OpenCV from [here](https://github.com/opencv/opencv_extra/tree/3.4/testdata/dnn).
You also can use the script to download necessary files from your code. Assume you have the following code inside ```your_script.py```:
......@@ -79,6 +79,6 @@ AR @[ IoU=0.50:0.95 | area= large | maxDets=100 ] | 0.528 | 0.528 |
## References
* [Models downloading script](https://github.com/opencv/opencv/samples/dnn/download_models.py)
* [Configuration files adopted for OpenCV](https://github.com/opencv/opencv_extra/tree/master/testdata/dnn)
* [Configuration files adopted for OpenCV](https://github.com/opencv/opencv_extra/tree/3.4/testdata/dnn)
* [How to import models from TensorFlow Object Detection API](https://github.com/opencv/opencv/wiki/TensorFlow-Object-Detection-API)
* [Names of classes from different datasets](https://github.com/opencv/opencv/tree/3.4/samples/data/dnn)
......@@ -3,11 +3,11 @@
//
// it can be used for body pose detection, using either the COCO model(18 parts):
// http://posefs1.perception.cs.cmu.edu/OpenPose/models/pose/coco/pose_iter_440000.caffemodel
// https://raw.githubusercontent.com/opencv/opencv_extra/master/testdata/dnn/openpose_pose_coco.prototxt
// https://raw.githubusercontent.com/opencv/opencv_extra/3.4/testdata/dnn/openpose_pose_coco.prototxt
//
// or the MPI model(16 parts):
// http://posefs1.perception.cs.cmu.edu/OpenPose/models/pose/mpi/pose_iter_160000.caffemodel
// https://raw.githubusercontent.com/opencv/opencv_extra/master/testdata/dnn/openpose_pose_mpi_faster_4_stages.prototxt
// https://raw.githubusercontent.com/opencv/opencv_extra/3.4/testdata/dnn/openpose_pose_mpi_faster_4_stages.prototxt
//
// (to simplify this sample, the body models are restricted to a single person.)
//
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册