提交 d01bedbc 编写于 作者: M Maksim Shabunin

Removed Sphinx documentation files

上级 61991a33
function insertIframe (elementId, iframeSrc)
{
var iframe;
if (document.createElement && (iframe = document.createElement('iframe')))
{
iframe.src = unescape(iframeSrc);
iframe.width = "100%";
iframe.height = "511px";
var element = document.getElementById(elementId);
element.parentNode.replaceChild(iframe, element);
}
}
{#
basic/layout.html
~~~~~~~~~~~~~~~~~
Master layout template for Sphinx themes.
:copyright: Copyright 2007-2011 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
#}
{%- block doctype -%}
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
{%- endblock %}
{% set script_files = script_files + [pathto("_static/insertIframe.js", 1)] %}
{%- set reldelim1 = reldelim1 is not defined and ' &raquo;' or reldelim1 %}
{%- set reldelim2 = reldelim2 is not defined and ' |' or reldelim2 %}
{%- set render_sidebar = (not embedded) and (not theme_nosidebar|tobool) and
(sidebars != []) %}
{%- set url_root = pathto('', 1) %}
{%- if url_root == '#' %}{% set url_root = '' %}{% endif %}
<script type="text/javascript">
var _gaq = _gaq || [];
_gaq.push(['_setAccount', 'UA-33108845-1']);
_gaq.push(['_setDomainName', 'opencv.org']);
_gaq.push(['_trackPageview']);
(function() {
var ga = document.createElement('script'); ga.type = 'text/javascript'; ga.async = true;
ga.src = ('https:' == document.location.protocol ? 'https://ssl' : 'http://www') + '.google-analytics.com/ga.js';
var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(ga, s);
})();
</script>
{%- macro relbar() %}
<div class="related">
<h3>{{ _('Navigation') }}</h3>
<ul>
{%- for rellink in rellinks %}
<li class="right" {% if loop.first %}style="margin-right: 10px"{% endif %}>
<a href="{{ pathto(rellink[0]) }}" title="{{ rellink[1]|striptags|e }}"
{{ accesskey(rellink[2]) }}>{{ rellink[3] }}</a>
{%- if not loop.first %}{{ reldelim2 }}{% endif %}</li>
{%- endfor %}
{%- block rootrellink %}
<li><a href="{{ pathto(master_doc) }}">{{ shorttitle|e }}</a>{{ reldelim1 }}</li>
{%- endblock %}
{%- for parent in parents %}
<li><a href="{{ parent.link|e }}" {% if loop.last %}{{ accesskey("U") }}{% endif %}>{{ parent.title }}</a>{{ reldelim1 }}</li>
{%- endfor %}
{%- block relbaritems %} {% endblock %}
</ul>
</div>
{%- endmacro %}
{%- macro sidebar() %}
{%- if render_sidebar %}
<div class="sphinxsidebar">
<div class="sphinxsidebarwrapper">
{%- block sidebarlogo %}
{%- if logo %}
<p class="logo"><a href="{{ pathto(master_doc) }}">
<img class="logo" src="{{ pathto('_static/' + logo, 1) }}" alt="Logo"/>
</a></p>
{%- endif %}
{%- endblock %}
{%- if sidebars == None %}
{%- block sidebarsearch %}
{%- include "searchbox.html" %}
{%- endblock %}
{%- endif %}
{%- if sidebars != None %}
{#- new style sidebar: explicitly include/exclude templates #}
{%- for sidebartemplate in sidebars %}
{%- include sidebartemplate %}
{%- endfor %}
{%- else %}
{#- old style sidebars: using blocks -- should be deprecated #}
{%- block sidebartoc %}
{%- include "localtoc.html" %}
{%- endblock %}
{%- block sidebarrel %}
{%- include "relations.html" %}
{%- endblock %}
{%- block sidebarsourcelink %}
{%- include "sourcelink.html" %}
{%- endblock %}
{%- if customsidebar %}
{%- include customsidebar %}
{%- endif %}
{%- endif %}
</div>
</div>
{%- endif %}
{%- endmacro %}
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta http-equiv="Content-Type" content="text/html; charset={{ encoding }}" />
{{ metatags }}
{%- if not embedded and docstitle %}
{%- set titlesuffix = " &mdash; "|safe + docstitle|e %}
{%- else %}
{%- set titlesuffix = "" %}
{%- endif %}
{%- block htmltitle %}
<title>{{ title|striptags|e }}{{ titlesuffix }}</title>
{%- endblock %}
<link rel="stylesheet" href="{{ pathto('_static/' + style, 1) }}" type="text/css" />
<link rel="stylesheet" href="{{ pathto('_static/pygments.css', 1) }}" type="text/css" />
{%- for cssfile in css_files %}
<link rel="stylesheet" href="{{ pathto(cssfile, 1) }}" type="text/css" />
{%- endfor %}
{%- if not embedded %}
<script type="text/javascript">
var DOCUMENTATION_OPTIONS = {
URL_ROOT: '{{ url_root }}',
VERSION: '{{ release|e }}',
COLLAPSE_INDEX: false,
FILE_SUFFIX: '{{ '' if no_search_suffix else file_suffix }}',
HAS_SOURCE: {{ has_source|lower }}
};
</script>
{%- for scriptfile in script_files %}
<script type="text/javascript" src="{{ pathto(scriptfile, 1) }}"></script>
{%- endfor %}
{%- if use_opensearch %}
<link rel="search" type="application/opensearchdescription+xml"
title="{% trans docstitle=docstitle|e %}Search within {{ docstitle }}{% endtrans %}"
href="{{ pathto('_static/opensearch.xml', 1) }}"/>
{%- endif %}
{%- if favicon %}
<link rel="shortcut icon" href="{{ pathto('_static/' + favicon, 1) }}"/>
{%- endif %}
{%- endif %}
{%- block linktags %}
{%- if hasdoc('about') %}
<link rel="author" title="{{ _('About these documents') }}" href="{{ pathto('about') }}" />
{%- endif %}
{%- if hasdoc('genindex') %}
<link rel="index" title="{{ _('Index') }}" href="{{ pathto('genindex') }}" />
{%- endif %}
{%- if hasdoc('search') %}
<link rel="search" title="{{ _('Search') }}" href="{{ pathto('search') }}" />
{%- endif %}
{%- if hasdoc('copyright') %}
<link rel="copyright" title="{{ _('Copyright') }}" href="{{ pathto('copyright') }}" />
{%- endif %}
<link rel="top" title="{{ docstitle|e }}" href="{{ pathto('index') }}" />
{%- if parents %}
<link rel="up" title="{{ parents[-1].title|striptags|e }}" href="{{ parents[-1].link|e }}" />
{%- endif %}
{%- if next %}
<link rel="next" title="{{ next.title|striptags|e }}" href="{{ next.link|e }}" />
{%- endif %}
{%- if prev %}
<link rel="prev" title="{{ prev.title|striptags|e }}" href="{{ prev.link|e }}" />
{%- endif %}
{%- endblock %}
{%- block extrahead %} {% endblock %}
</head>
<body>
{%- block header %}{% endblock %}
{%- block relbar1 %}{{ relbar() }}{% endblock %}
{%- block content %}
{%- block sidebar1 %} {# possible location for sidebar #} {% endblock %}
<div class="document">
{% block document %}
<div class="documentwrapper">
{%- if not embedded %}{% if not theme_nosidebar|tobool %}
<div class="bodywrapper">
{%- endif %}{% endif %}
<div class="body">
{% block body %} {% endblock %}
</div>
<div class="feedback">
<h2>Help and Feedback</h2>
You did not find what you were looking for?
<ul>
{% if theme_lang == 'c' %}
{% endif %}
{% if theme_lang == 'cpp' %}
<li>Try the <a href="http://docs.opencv.org/opencv_cheatsheet.pdf">Cheatsheet</a>.</li>
{% endif %}
{% if theme_lang == 'py' %}
<li>Try the <a href="cookbook.html">Cookbook</a>.</li>
{% endif %}
<li>Ask a question on the <a href="http://answers.opencv.org">Q&A forum</a>.</li>
<li>If you think something is missing or wrong in the documentation,
please file a <a href="http://code.opencv.org">bug report</a>.</li>
</ul>
</div>
{%- if not embedded %}{% if not theme_nosidebar|tobool %}
</div>
{%- endif %}{% endif %}
</div>
{% endblock %}
{%- block sidebar2 %}{{ sidebar() }}{% endblock %}
<div class="clearer"></div>
</div>
{%- endblock %}
{%- block relbar2 %}{{ relbar() }}{% endblock %}
{%- block footer %}
<div class="footer">
{%- if show_copyright %}
{%- if hasdoc('copyright') %}
{% trans path=pathto('copyright'), copyright=copyright|e %}&copy; <a href="{{ path }}">Copyright</a> {{ copyright }}.{% endtrans %}
{%- else %}
{% trans copyright=copyright|e %}&copy; Copyright {{ copyright }}.{% endtrans %}
{%- endif %}
{%- endif %}
{%- if last_updated %}
{% trans last_updated=last_updated|e %}Last updated on {{ last_updated }}.{% endtrans %}
{%- endif %}
{%- if show_sphinx %}
{% trans sphinx_version=sphinx_version|e %}Created using <a href="http://sphinx.pocoo.org/">Sphinx</a> {{ sphinx_version }}.{% endtrans %}
{%- endif %}
</div>
{%- endblock %}
</body>
</html>
{#
basic/searchbox.html
~~~~~~~~~~~~~~~~~~~~
Sphinx sidebar template: quick search box.
:copyright: Copyright 2007-2011 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
#}
{%- if pagename != "search" %}
<div id="searchbox" style="display: none">
<form class="search" action="{{ pathto('search') }}" method="get">
<input type="text" name="q" size="18" />
<input type="submit" value="{{ _('Search') }}" />
<input type="hidden" name="check_keywords" value="yes" />
<input type="hidden" name="area" value="default" />
</p>
</form>
</div>
<script type="text/javascript">$('#searchbox').show(0);</script>
{%- endif %}
/**
* Sphinx stylesheet -- default theme
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
@import url("basic.css");
/* -- page layout ----------------------------------------------------------- */
body {
font-family: {{ theme_bodyfont }};
font-size: 100%;
background-color: {{ theme_footerbgcolor }};
color: #000;
margin: 0;
padding: 0;
}
img.logo {
width: 150px;
}
div.document {
background-color: {{ theme_sidebarbgcolor }};
}
div.documentwrapper {
float: left;
width: 100%;
}
div.bodywrapper {
margin: 0 0 0 270px;
}
div.body {
background-color: {{ theme_bgcolor }};
color: {{ theme_textcolor }};
padding: 0 20px 30px 20px;
}
div.feedback {
background-color: {{ theme_feedbackbgcolor }};
color: {{ theme_feedbacktextcolor }};
padding: 20px 20px 30px 20px;
}
div.feedback h2 {
margin: 10px 0 10px 0;
}
div.feedback a {
color: {{ theme_feedbacklinkcolor }};
font-weight: bold;
}
{%- if theme_rightsidebar|tobool %}
div.bodywrapper {
margin: 0 230px 0 0;
}
{%- endif %}
div.footer {
color: {{ theme_footertextcolor }};
width: 100%;
padding: 9px 0 9px 0;
text-align: center;
font-size: 75%;
}
div.footer a {
color: {{ theme_footertextcolor }};
text-decoration: underline;
}
div.related {
background-color: {{ theme_relbarbgcolor }};
line-height: 30px;
color: {{ theme_relbartextcolor }};
}
div.related a {
color: {{ theme_relbarlinkcolor }};
}
div.sphinxsidebar {
word-wrap: break-word;
width: 270px;
{%- if theme_stickysidebar|tobool %}
top: 30px;
margin: 0;
position: fixed;
overflow: auto;
height: 100%;
{%- endif %}
{%- if theme_rightsidebar|tobool %}
float: right;
{%- if theme_stickysidebar|tobool %}
right: 0;
{%- endif %}
{%- endif %}
}
{%- if theme_stickysidebar|tobool %}
/* this is nice, but it it leads to hidden headings when jumping
to an anchor */
/*
div.related {
position: fixed;
}
div.documentwrapper {
margin-top: 30px;
}
*/
{%- endif %}
div.sphinxsidebar h3 {
font-family: {{ theme_headfont }};
color: {{ theme_sidebartextcolor }};
font-size: 1.4em;
font-weight: normal;
margin: 0;
padding: 0;
}
div.sphinxsidebar h3 a {
color: {{ theme_sidebartextcolor }};
}
div.sphinxsidebar h4 {
font-family: {{ theme_headfont }};
color: {{ theme_sidebartextcolor }};
font-size: 1.3em;
font-weight: normal;
margin: 5px 0 0 0;
padding: 0;
}
div.sphinxsidebar p {
color: {{ theme_sidebartextcolor }};
}
div.sphinxsidebar p.topless {
margin: 5px 10px 10px 10px;
}
div.sphinxsidebar ul {
margin: 10px 0 10px 10px;
padding: 0;
color: {{ theme_sidebartextcolor }};
}
div.sphinxsidebar a {
color: {{ theme_sidebarlinkcolor }};
}
div.sphinxsidebar input {
border: 1px solid {{ theme_sidebarlinkcolor }};
font-family: sans-serif;
font-size: 1em;
}
/* -- body styles ----------------------------------------------------------- */
a {
color: {{ theme_linkcolor }};
text-decoration: none;
}
a:hover {
text-decoration: underline;
}
div.body p, div.body dd, div.body li {
text-align: justify;
line-height: 130%;
margin-top: 1em;
margin-bottom: 1em;
}
div.toctree-wrapper li, ul.simple li {
margin:0;
}
div.body h1,
div.body h2,
div.body h3,
div.body h4,
div.body h5,
div.body h6 {
font-family: {{ theme_headfont }};
background-color: {{ theme_headbgcolor }};
font-weight: normal;
color: {{ theme_headtextcolor }};
border-bottom: 1px solid #ccc;
margin: 20px -20px 10px -20px;
padding: 3px 0 3px 10px;
}
a.toc-backref, a.toc-backref:hover {
font-family: {{ theme_headfont }};
background-color: {{ theme_headbgcolor }};
font-weight: normal;
color: {{ theme_headtextcolor }};
text-decoration: none;
}
div.body h1 { margin-top: 0; font-size: 200%; }
div.body h2 { font-size: 160%; }
div.body h3 { font-size: 140%; }
div.body h4 { font-size: 120%; }
div.body h5 { font-size: 110%; }
div.body h6 { font-size: 100%; }
a.headerlink {
color: {{ theme_headlinkcolor }};
font-size: 0.8em;
padding: 0 4px 0 4px;
text-decoration: none;
}
a.headerlink:hover {
background-color: {{ theme_headlinkcolor }};
color: white;
}
div.body p, div.body dd, div.body li {
text-align: justify;
line-height: 130%;
}
div.admonition p.admonition-title + p {
display: inline;
}
div.note {
background-color: #eee;
border: 1px solid #ccc;
}
div.seealso {
background-color: #ffc;
border: 1px solid #ff6;
}
div.topic {
background-color: #eee;
}
div.warning {
background-color: #ffe4e4;
border: 1px solid #f66;
}
p.admonition-title {
display: inline;
}
p.admonition-title:after {
content: ":";
}
pre {
padding: 5px;
background-color: {{ theme_codebgcolor }};
color: {{ theme_codetextcolor }};
line-height: 120%;
border: 1px solid #ace;
border-left: none;
border-right: none;
}
tt {
color: {{ theme_headtextcolor }};
/*background-color: #ecf0f3;*/
padding: 0 1px 0 1px;
font-size: 1.2em;
}
tt.descname {
color: {{ theme_headtextcolor }};
/*background-color: #ecf0f3;*/
padding: 0 1px 0 1px;
font-size: 1.4em;
}
div.math p {
margin-top: 10px;
margin-bottom: 10px;
}
dl.function > dt:first-child {
margin-bottom: 7px;
}
dl.cfunction > dt:first-child {
margin-bottom: 7px;
color: #8080B0;
}
dl.cfunction > dt:first-child tt.descname
{
color: #8080B0;
}
dl.pyfunction > dt:first-child {
margin-bottom: 7px;
}
dl.jfunction > dt:first-child {
margin-bottom: 7px;
}
table.field-list {
margin-top: 20px;
}
/*ul.simple {
list-style: none;
}*/
em.menuselection, em.guilabel {
font-family: {{ theme_guifont }};
}
.enumeratevisibleitemswithsquare ul {
list-style: square;
margin-bottom: 0px;
margin-left: 0px;
margin-right: 0px;
margin-top: 0px;
}
.enumeratevisibleitemswithsquare li {
margin-bottom: 0.2em;
margin-left: 0px;
margin-right: 0px;
margin-top: 0.2em;
}
.enumeratevisibleitemswithsquare p {
margin-bottom: 0pt;
margin-top: 1pt;
}
.enumeratevisibleitemswithsquare dl{
margin-bottom: 0px;
margin-left: 0px;
margin-right: 0px;
margin-top: 0px;
}
.toctableopencv
{
width: 100% ;
table-layout: fixed;
}
.toctableopencv colgroup col:first-child
{
width: 100pt !important;
max-width: 100pt !important;
min-width: 100pt !important;
}
.toctableopencv colgroup col:nth-child(2)
{
width: 100% !important;
}
div.body ul.search li {
text-align: left;
}
div.linenodiv {
min-width: 1em;
text-align: right;
}
div.sphinxsidebar #searchbox input[type="text"] {
width:auto;
}
div.sphinxsidebar #searchbox input[type="submit"] {
width:auto;
}
[theme]
inherit = basic
stylesheet = default.css
pygments_style = sphinx
[options]
rightsidebar = false
stickysidebar = false
footerbgcolor = #004068
footertextcolor = #ffffff
sidebarbgcolor = #006090
sidebartextcolor = #ffffff
sidebarlinkcolor = #cceeff
relbarbgcolor = #003048
relbartextcolor = #ffffff
relbarlinkcolor = #ffffff
bgcolor = #ffffff
textcolor = #000000
headbgcolor = #f2f2f2
headtextcolor = #003048
headlinkcolor = #65a136
linkcolor = #0090d9
codebgcolor = #e0f5ff
codetextcolor = #333333
feedbackbgcolor = #004068
feedbacktextcolor = #ffffff
feedbacklinkcolor = #ffffff
bodyfont = sans-serif
headfont = 'Trebuchet MS', sans-serif
guifont = "Lucida Sans","Lucida Sans Unicode","Lucida Grande",Verdana,Arial,Helvetica,sans-serif
lang = none
{#
basic/layout.html
~~~~~~~~~~~~~~~~~
Master layout template for Sphinx themes.
:copyright: Copyright 2007-2014 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
#}
{%- block doctype -%}
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
{%- endblock %}
{% set script_files = script_files + [pathto("_static/insertIframe.js", 1)] %}
{%- set reldelim1 = reldelim1 is not defined and ' &raquo;' or reldelim1 %}
{%- set reldelim2 = reldelim2 is not defined and ' |' or reldelim2 %}
{%- set render_sidebar = (not embedded) and (not theme_nosidebar|tobool) and
(sidebars != []) %}
{%- set url_root = pathto('', 1) %}
{# XXX necessary? #}
{%- if url_root == '#' %}{% set url_root = '' %}{% endif %}
{%- if not embedded and docstitle %}
{%- set titlesuffix = " &mdash; "|safe + docstitle|e %}
{%- else %}
{%- set titlesuffix = "" %}
{%- endif %}
<script type="text/javascript">
var _gaq = _gaq || [];
_gaq.push(['_setAccount', 'UA-33108845-1']);
_gaq.push(['_setDomainName', 'opencv.org']);
_gaq.push(['_trackPageview']);
(function() {
var ga = document.createElement('script'); ga.type = 'text/javascript'; ga.async = true;
ga.src = ('https:' == document.location.protocol ? 'https://ssl' : 'http://www') + '.google-analytics.com/ga.js';
var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(ga, s);
})();
</script>
{%- macro relbar() %}
<div class="related">
<h3>{{ _('Navigation') }}</h3>
<ul>
{%- for rellink in rellinks %}
<li class="right" {% if loop.first %}style="margin-right: 10px"{% endif %}>
<a href="{{ pathto(rellink[0]) }}" title="{{ rellink[1]|striptags|e }}"
{{ accesskey(rellink[2]) }}>{{ rellink[3] }}</a>
{%- if not loop.first %}{{ reldelim2 }}{% endif %}</li>
{%- endfor %}
{%- block rootrellink %}
<li><a href="{{ pathto(master_doc) }}">{{ shorttitle|e }}</a>{{ reldelim1 }}</li>
{%- endblock %}
{%- for parent in parents %}
<li><a href="{{ parent.link|e }}" {% if loop.last %}{{ accesskey("U") }}{% endif %}>{{ parent.title }}</a>{{ reldelim1 }}</li>
{%- endfor %}
{%- block relbaritems %} {% endblock %}
</ul>
</div>
{%- endmacro %}
{%- macro sidebar() %}
{%- if render_sidebar %}
<div class="sphinxsidebar">
<div class="sphinxsidebarwrapper">
{%- block sidebarlogo %}
{%- if logo %}
<p class="logo"><a href="{{ pathto(master_doc) }}">
<img class="logo" src="{{ pathto('_static/' + logo, 1) }}" alt="Logo"/>
</a></p>
{%- endif %}
{%- endblock %}
{%- if sidebars == None %}
{%- block sidebarsearch %}
{%- include "searchbox.html" %}
{%- endblock %}
{%- endif %}
{%- if sidebars != None %}
{#- new style sidebar: explicitly include/exclude templates #}
{%- for sidebartemplate in sidebars %}
{%- include sidebartemplate %}
{%- endfor %}
{%- else %}
{#- old style sidebars: using blocks -- should be deprecated #}
{%- block sidebartoc %}
{%- include "localtoc.html" %}
{%- endblock %}
{%- block sidebarrel %}
{%- include "relations.html" %}
{%- endblock %}
{%- if customsidebar %}
{%- include customsidebar %}
{%- endif %}
{%- endif %}
</div>
</div>
{%- endif %}
{%- endmacro %}
{%- macro script() %}
<script type="text/javascript">
var DOCUMENTATION_OPTIONS = {
URL_ROOT: '{{ url_root }}',
VERSION: '{{ release|e }}',
COLLAPSE_INDEX: false,
FILE_SUFFIX: '{{ '' if no_search_suffix else file_suffix }}',
HAS_SOURCE: {{ has_source|lower }}
};
</script>
{%- for scriptfile in script_files %}
<script type="text/javascript" src="{{ pathto(scriptfile, 1) }}"></script>
{%- endfor %}
{%- endmacro %}
{%- macro css() %}
<link rel="stylesheet" href="{{ pathto('_static/' + style, 1) }}" type="text/css" />
<link rel="stylesheet" href="{{ pathto('_static/pygments.css', 1) }}" type="text/css" />
{%- for cssfile in css_files %}
<link rel="stylesheet" href="{{ pathto(cssfile, 1) }}" type="text/css" />
{%- endfor %}
{%- endmacro %}
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta http-equiv="Content-Type" content="text/html; charset={{ encoding }}" />
{{ metatags }}
{%- block htmltitle %}
<title>{{ title|striptags|e }}{{ titlesuffix }}</title>
{%- endblock %}
{{ css() }}
{%- if not embedded %}
{{ script() }}
{%- if use_opensearch %}
<link rel="search" type="application/opensearchdescription+xml"
title="{% trans docstitle=docstitle|e %}Search within {{ docstitle }}{% endtrans %}"
href="{{ pathto('_static/opensearch.xml', 1) }}"/>
{%- endif %}
{%- if favicon %}
<link rel="shortcut icon" href="{{ pathto('_static/' + favicon, 1) }}"/>
{%- endif %}
{%- endif %}
{%- block linktags %}
{%- if hasdoc('about') %}
<link rel="author" title="{{ _('About these documents') }}" href="{{ pathto('about') }}" />
{%- endif %}
{%- if hasdoc('genindex') %}
<link rel="index" title="{{ _('Index') }}" href="{{ pathto('genindex') }}" />
{%- endif %}
{%- if hasdoc('search') %}
<link rel="search" title="{{ _('Search') }}" href="{{ pathto('search') }}" />
{%- endif %}
{%- if hasdoc('copyright') %}
<link rel="copyright" title="{{ _('Copyright') }}" href="{{ pathto('copyright') }}" />
{%- endif %}
<link rel="top" title="{{ docstitle|e }}" href="{{ pathto('index') }}" />
{%- if parents %}
<link rel="up" title="{{ parents[-1].title|striptags|e }}" href="{{ parents[-1].link|e }}" />
{%- endif %}
{%- if next %}
<link rel="next" title="{{ next.title|striptags|e }}" href="{{ next.link|e }}" />
{%- endif %}
{%- if prev %}
<link rel="prev" title="{{ prev.title|striptags|e }}" href="{{ prev.link|e }}" />
{%- endif %}
{%- endblock %}
{%- block extrahead %}
<link href='http://fonts.googleapis.com/css?family=Open+Sans:300,400,700'
rel='stylesheet' type='text/css' />
{%- if not embedded %}
<style type="text/css">
table.right { float: right; margin-left: 20px; }
table.right td { border: 1px solid #ccc; }
</style>
<script type="text/javascript">
// intelligent scrolling of the sidebar content
$(window).scroll(function() {
var sb = $('.sphinxsidebarwrapper');
var win = $(window);
var sbh = sb.height();
var offset = $('.sphinxsidebar').position()['top'];
var wintop = win.scrollTop();
var winbot = wintop + win.innerHeight();
var curtop = sb.position()['top'];
var curbot = curtop + sbh;
// does sidebar fit in window?
if (sbh < win.innerHeight()) {
// yes: easy case -- always keep at the top
sb.css('top', $u.min([$u.max([0, wintop - offset - 10]),
$(document).height() - sbh - 200]));
} else {
// no: only scroll if top/bottom edge of sidebar is at
// top/bottom edge of window
if (curtop > wintop && curbot > winbot) {
sb.css('top', $u.max([wintop - offset - 10, 0]));
} else if (curtop < wintop && curbot < winbot) {
sb.css('top', $u.min([winbot - sbh - offset - 20,
$(document).height() - sbh - 200]));
}
}
});
</script>
{%- endif %}
{% endblock %}
</head>
{%- block header %}{% endblock %}
{%- block relbar1 %}{{ relbar() }}{% endblock %}
{%- block sidebar1 %} {# possible location for sidebar #} {% endblock %}
{%- block sidebar2 %}{{ sidebar() }}{% endblock %}
<body>
{%- block content %}
<div class="document">
{%- block document %}
<div class="documentwrapper">
{%- if render_sidebar %}
<div class="bodywrapper">
{%- endif %}
<div class="body">
{% block body %} {% endblock %}
</div>
<div class="feedback">
<h2>Help and Feedback</h2>
You did not find what you were looking for?
<ul>
{% if theme_lang == 'c' %}
{% endif %}
{% if theme_lang == 'cpp' %}
<li>Try the <a href="http://docs.opencv.org/opencv_cheatsheet.pdf">Cheatsheet</a>.</li>
{% endif %}
{% if theme_lang == 'py' %}
<li>Try the <a href="cookbook.html">Cookbook</a>.</li>
{% endif %}
<li>Ask a question on the <a href="http://answers.opencv.org">Q&A forum</a>.</li>
<li>If you think something is missing or wrong in the documentation,
please file a <a href="http://code.opencv.org">bug report</a>.</li>
</ul>
</div>
{%- if render_sidebar %}
</div>
{%- endif %}
</div>
{%- endblock %}
<div class="clearer"></div>
</div>
{%- endblock %}
{%- block relbar2 %}{{ relbar() }}{% endblock %}
{%- block footer %}
<div class="footer">
{%- if show_copyright %}
{%- if hasdoc('copyright') %}
{% trans path=pathto('copyright'), copyright=copyright|e %}&copy; <a href="{{ path }}">Copyright</a> {{ copyright }}.{% endtrans %}
{%- else %}
{% trans copyright=copyright|e %}&copy; Copyright {{ copyright }}.{% endtrans %}
{%- endif %}
{%- endif %}
{%- if last_updated %}
{% trans last_updated=last_updated|e %}Last updated on {{ last_updated }}.{% endtrans %}
{%- endif %}
{%- if show_sphinx %}
{% trans sphinx_version=sphinx_version|e %}Created using <a href="http://sphinx-doc.org/">Sphinx</a> {{ sphinx_version }}.{% endtrans %}
{%- endif %}
{%- if show_source and has_source and sourcename %}
<a href="{{ pathto('_sources/' + sourcename, true)|e }}" rel="nofollow">{{ _('Show this page source.') }}</a>
{%- endif %}
</div>
{%- endblock %}
</body>
</html>
{#
basic/searchbox.html
~~~~~~~~~~~~~~~~~~~~
Sphinx sidebar template: quick search box.
:copyright: Copyright 2007-2014 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
#}
{%- if pagename != "search" and builder != "singlehtml" %}
<div id="searchbox" style="display: none">
<h3>{{ _('Quick search') }}</h3>
<form class="search" action="{{ pathto('search') }}" method="get">
<input type="text" name="q" />
<input type="submit" value="{{ _('Go') }}" />
<input type="hidden" name="check_keywords" value="yes" />
<input type="hidden" name="area" value="default" />
</form>
</div>
<script type="text/javascript">$('#searchbox').show(0);</script>
{%- endif %}
此差异由.gitattributes 抑制。
/*
* sphinxdoc.css_t
* ~~~~~~~~~~~~~~~
*
* Sphinx stylesheet -- sphinxdoc theme.
*
* :copyright: Copyright 2007-2014 by the Sphinx team, see AUTHORS.
* :license: BSD, see LICENSE for details.
*
*/
@import url("basic.css");
/* -- page layout ----------------------------------------------------------- */
body {
font-family: 'Open Sans', 'Lucida Grande', 'Lucida Sans Unicode', 'Geneva',
'Verdana', sans-serif;
font-size: 14px;
text-align: center;
background-image: url(bodybg.png);
color: black;
padding: 0;
border-right: 1px solid #0a507a;
border-left: 1px solid #0a507a;
margin: 0 auto;
min-width: 780px;
max-width: 1080px;
}
div.document {
background-color: white;
text-align: left;
}
div.bodywrapper {
margin: 0 240px 0 0;
border-right: 1px solid #0a507a;
}
div.body {
margin: 0;
padding: 0.5em 20px 20px 20px;
}
div.related {
font-size: 1em;
color: white;
}
div.related ul {
background-image: url(relbg.png);
text-align: left;
border-top: 1px solid #002e50;
border-bottom: 1px solid #002e50;
}
div.related li + li {
display: inline;
}
div.related ul li.right {
float: right;
margin-right: 5px;
}
div.related ul li a {
margin: 0;
padding: 0 5px 0 5px;
line-height: 1.75em;
color: #f9f9f0;
text-shadow: 0px 0px 1px rgba(0, 0, 0, 0.5);
}
div.related ul li a:hover {
color: white;
text-shadow: 0px 0px 1px rgba(255, 255, 255, 0.5);
}
div.footer {
background-image: url(footerbg.png);
color: #ccc;
text-shadow: 0 0 .2px rgba(255, 255, 255, 0.8);
padding: 3px 8px 3px 0;
clear: both;
font-size: 0.8em;
text-align: center;
}
div.sphinxsidebarwrapper {
position: relative;
top: 0px;
padding: 0;
}
div.sphinxsidebar {
word-wrap: break-word;
margin: 0;
padding: 0 15px 15px 0;
width: 210px;
float: right;
font-size: 1em;
text-align: left;
}
div.sphinxsidebar .logo {
text-align: center;
}
div.sphinxsidebar .logo img {
width: 150px;
vertical-align: middle;
}
div.sphinxsidebar input {
border: 1px solid #aaa;
font-family: 'Open Sans', 'Lucida Grande', 'Lucida Sans Unicode', 'Geneva',
'Verdana', sans-serif;
font-size: 1em;
}
div.sphinxsidebar #searchbox input[type="text"] {
width: 160px;
}
div.sphinxsidebar #searchbox input[type="submit"] {
width: 40px;
}
div.sphinxsidebar h3 {
font-size: 1.5em;
border-top: 1px solid #0a507a;
margin-top: 1em;
margin-bottom: 0.5em;
padding-top: 0.5em;
}
div.sphinxsidebar h4 {
font-size: 1.2em;
margin-bottom: 0;
}
div.sphinxsidebar h3, div.sphinxsidebar h4 {
margin-right: -15px;
margin-left: -15px;
padding-right: 14px;
padding-left: 14px;
color: #333;
font-weight: 300;
/*text-shadow: 0px 0px 0.5px rgba(0, 0, 0, 0.4);*/
}
div.sphinxsidebarwrapper > h3:first-child {
margin-top: 0.5em;
border: none;
}
div.sphinxsidebar h3 a {
color: #333;
}
div.sphinxsidebar ul {
color: #444;
margin-top: 7px;
padding: 0;
line-height: 130%;
}
div.sphinxsidebar ul ul {
margin-left: 20px;
list-style-image: url(listitem.png);
}
/* -- body styles ----------------------------------------------------------- */
p {
margin: 0.8em 0 0.5em 0;
}
a, a tt {
color: #2878a2;
}
a:hover, a tt:hover {
color: #68b8c2;
}
a tt {
border: 0;
}
h1, h2, h3, h4, h5, h6 {
color: #0a507a;
background-color: #e5f5ff;
font-weight: 300;
}
h1 {
margin: 10px 0 0 0;
}
h2 {
margin: 1.em 0 0.2em 0;
padding: 0;
}
h3 {
margin: 1em 0 -0.3em 0;
}
h1 { font-size: 200%; }
h2 { font-size: 160%; }
h3 { font-size: 140%; }
h4 { font-size: 120%; }
h5 { font-size: 110%; }
h6 { font-size: 100%; }
div a, h1 a, h2 a, h3 a, h4 a, h5 a, h6 a {
text-decoration: none;
}
div.body h1 a tt, div.body h2 a tt, div.body h3 a tt,
div.body h4 a tt, div.body h5 a tt, div.body h6 a tt {
color: #0a507a !important;
font-size: inherit !important;
}
a.headerlink {
color: #0a507a !important;
font-size: 12px;
margin-left: 6px;
padding: 0 4px 0 4px;
text-decoration: none !important;
float: right;
}
a.headerlink:hover {
background-color: #ccc;
color: white!important;
}
cite, code, tt {
font-family: 'Consolas', 'DejaVu Sans Mono', 'Bitstream Vera Sans Mono',
monospace;
font-size: 14px;
min-width: 780px;
max-width: 1080px;
}
tt {
color: #003048;
padding: 1px;
}
tt.descname, tt.descclassname, tt.xref {
font-size: 12px;
}
hr {
border: 1px solid #abc;
margin: 2em;
}
pre {
font-family: 'Consolas', 'DejaVu Sans Mono', 'Bitstream Vera Sans Mono',
monospace;
font-size: 13px;
letter-spacing: 0.015em;
line-height: 120%;
padding: 0.5em;
border: 1px solid #ccc;
border-radius: 2px;
background-color: #f8f8f8;
}
pre a {
color: inherit;
text-decoration: none;
}
td.linenos pre {
padding: 0.5em 0;
}
td.code pre {
max-width: 740px;
overflow: auto;
overflow-y: hidden; /* fixes display issues on Chrome browsers */
}
div.quotebar {
background-color: #f8f8f8;
max-width: 250px;
float: right;
padding: 0px 7px;
border: 1px solid #ccc;
margin-left: 1em;
}
div.topic {
background-color: #f8f8f8;
}
table {
border-collapse: collapse;
margin: 0 -0.5em 0 -0.5em;
}
table td, table th {
padding: 0.2em 0.5em 0.2em 0.5em;
}
div.note {
background-color: #eee;
border: 1px solid #ccc;
}
div.seealso {
background-color: #ffc;
border: 1px solid #ff6;
}
div.topic {
background-color: #eee;
}
div.warning {
background-color: #ffe4e4;
border: 1px solid #f66;
}
div.admonition ul li, div.warning ul li,
div.admonition ol li, div.warning ol li {
text-align: left;
}
div.admonition p.admonition-title + p {
display: inline;
}
p.admonition-title {
display: inline;
}
p.admonition-title:after {
content: ":";
}
/* ------------------ our styles ----------------*/
div.body p, div.body dd, div.body li {
text-align: justify;
line-height: 130%;
margin-top: 1em;
margin-bottom: 1em;
}
div.toctree-wrapper li, ul.simple li {
margin:0;
}
/*a.toc-backref {
}*/
div.feedback {
/*background-color: #;*/
/*color: #;*/
padding: 20px 20px 30px 20px;
border-top: 1px solid #002e50;
}
div.feedback h2 {
margin: 10px 0 10px 0;
}
div.feedback a {
/*color: #;*/
font-weight: bold;
}
div.math p {
margin-top: 10px;
margin-bottom: 10px;
}
dl.function > dt:first-child {
margin-bottom: 7px;
}
dl.cfunction > dt:first-child {
margin-bottom: 7px;
color: #8080B0;
}
dl.cfunction > dt:first-child tt.descname {
color: #8080B0;
}
dl.pyfunction > dt:first-child {
margin-bottom: 7px;
}
dl.jfunction > dt:first-child {
margin-bottom: 7px;
}
table.field-list {
margin-top: 20px;
}
em.menuselection, em.guilabel {
font-family: 'Lucida Sans', 'Lucida Sans Unicode', 'Lucida Grande', Verdana,
Arial, Helvetica, sans-serif;
}
.enumeratevisibleitemswithsquare ul {
list-style: square;
margin-bottom: 0px;
margin-left: 0px;
margin-right: 0px;
margin-top: 0px;
}
.enumeratevisibleitemswithsquare li {
margin-bottom: 0.2em;
margin-left: 0px;
margin-right: 0px;
margin-top: 0.2em;
}
.enumeratevisibleitemswithsquare p {
margin-bottom: 0pt;
margin-top: 1pt;
}
.enumeratevisibleitemswithsquare dl {
margin-bottom: 0px;
margin-left: 0px;
margin-right: 0px;
margin-top: 0px;
}
.toctableopencv {
width: 100% ;
table-layout: fixed;
}
.toctableopencv colgroup col:first-child {
width: 100pt !important;
max-width: 100pt !important;
min-width: 100pt !important;
}
.toctableopencv colgroup col:nth-child(2) {
width: 100% !important;
}
div.body ul.search li {
text-align: left;
}
div.linenodiv {
min-width: 1em;
text-align: right;
}
此差异由.gitattributes 抑制。
此差异由.gitattributes 抑制。
此差异由.gitattributes 抑制。
此差异由.gitattributes 抑制。
[theme]
inherit = basic
stylesheet = default.css
pygments_style = sphinx
#!/usr/bin/env python
import sys, glob
sys.path.append("../modules/python/src2/")
import hdr_parser as hp
opencv_hdr_list = [
"../modules/core/include/opencv2/core.hpp",
"../modules/ml/include/opencv2/ml.hpp",
"../modules/imgproc/include/opencv2/imgproc.hpp",
"../modules/calib3d/include/opencv2/calib3d.hpp",
"../modules/features2d/include/opencv2/features2d.hpp",
"../modules/video/include/opencv2/video/tracking.hpp",
"../modules/video/include/opencv2/video/background_segm.hpp",
"../modules/objdetect/include/opencv2/objdetect.hpp",
"../modules/imgcodecs/include/opencv2/imgcodecs.hpp",
"../modules/videoio/include/opencv2/videoio.hpp",
"../modules/highgui/include/opencv2/highgui.hpp",
]
opencv_module_list = [
"core",
"imgproc",
"calib3d",
"features2d",
"video",
"objdetect",
"imgcodecs",
"videoio",
"highgui",
"ml"
]
class RSTParser(object):
def __init__(self):
self.read_whitelist()
# reads the file containing functions and classes that do not need to be documented
def read_whitelist(self):
self.whitelist = {}
try:
wf = open("check_docs_whitelist.txt", "rt")
except IOError:
return
self.parser = hp.CppHeaderParser()
for l in wf.readlines():
cpos = l.find("#")
if cpos >= 0:
l = l[:cpos]
l = l.strip()
if not l:
continue
rst_decl = None
if "(" in l:
l = l.replace("cv::", "")
rst_decl = self.parser.parse_func_decl_no_wrap(l)
fname = rst_decl[0]
else:
fname = l.replace("::", ".")
complist = fname.split(".")
prefix = ""
alreadyListed = False
wl = []
for c in complist:
prefix = (prefix + "." + c).lstrip(".")
wl = self.whitelist.get(prefix, [])
if wl == "*":
break
if wl == "*":
continue
if not rst_decl:
self.whitelist[fname] = "*"
else:
wl.append(rst_decl)
self.whitelist[fname] = wl
wf.close()
def process_rst(self, docname):
df = open(docname, "rt")
fdecl = ""
balance = 0
lineno = 0
for l in df.readlines():
lineno += 1
ll = l.strip()
if balance == 0:
if not ll.startswith(".. c:function::") and \
not ll.startswith(".. cpp:function::") and \
not ll.startswith(".. ocv:function::") and \
not ll.startswith(".. ocv:cfunction::"):
continue
fdecl = ll[ll.find("::") + 3:]
elif balance > 0:
fdecl += ll
balance = fdecl.count("(") - fdecl.count(")")
assert balance >= 0
if balance > 0:
continue
rst_decl = self.parser.parse_func_decl_no_wrap(fdecl)
fname = rst_decl[0]
hdr_decls = self.fmap.get(fname, [])
if not hdr_decls:
fname = fname.replace("cv.", "")
hdr_decls = self.fmap.get(fname, [])
if not hdr_decls:
print "Documented function %s (%s) in %s:%d is not in the headers" % (fdecl, rst_decl[0].replace(".", "::"), docname, lineno)
continue
decl_idx = 0
for hd in hdr_decls:
if len(hd[3]) != len(rst_decl[3]):
decl_idx += 1
continue
idx = 0
for a in hd[3]:
if a[0] != rst_decl[3][idx][0] and a[0].replace("cv::", "") != rst_decl[3][idx][0]:
break
idx += 1
if idx == len(hd[3]):
break
decl_idx += 1
if decl_idx < len(hdr_decls):
self.fmap[fname] = hdr_decls[:decl_idx] + hdr_decls[decl_idx+1:]
continue
print "Documented function %s in %s:%d does not have a match" % (fdecl, docname, lineno)
df.close()
def decl2str(self, decl):
return "%s %s(%s)" % (decl[1], decl[0], ", ".join([a[0] + " " + a[1] for a in decl[3]]))
def check_module_docs(self, name):
self.parser = hp.CppHeaderParser()
decls = []
self.fmap = {}
for hname in opencv_hdr_list:
if hname.startswith("../modules/" + name):
decls += self.parser.parse(hname, wmode=False)
for d in decls:
fname = d[0]
if not fname.startswith("struct") and not fname.startswith("class") and not fname.startswith("const"):
dlist = self.fmap.get(fname, [])
dlist.append(d)
self.fmap[fname] = dlist
self.missing_docfunc_list = []
doclist = glob.glob("../modules/" + name + "/doc/*.rst")
for d in doclist:
self.process_rst(d)
print "\n\n########## The list of undocumented functions: ###########\n\n"
misscount = 0
fkeys = sorted(self.fmap.keys())
for f in fkeys:
# skip undocumented destructors
if "~" in f:
continue
decls = self.fmap[f]
fcomps = f.split(".")
prefix = ""
wlist_decls = []
for c in fcomps:
prefix = (prefix + "." + c).lstrip(".")
wlist_decls = self.whitelist.get(prefix, [])
if wlist_decls == "*":
break
if wlist_decls == "*":
continue
wlist_decls = [self.decl2str(d) for d in wlist_decls]
for d in decls:
dstr = self.decl2str(d)
# special hack for ML: skip old variants of the methods
if name == "ml" and ("CvMat" in dstr):
continue
if dstr not in wlist_decls:
misscount += 1
print "%s %s(%s)" % (d[1], d[0].replace(".", "::"), ", ".join([a[0] + " " + a[1] for a in d[3]]))
print "\n\n\nundocumented functions in %s: %d" % (name, misscount)
p = RSTParser()
for m in opencv_module_list:
print "\n\n*************************** " + m + " *************************\n"
p.check_module_docs(m)
此差异已折叠。
# this is a list of functions, classes and methods
# that are not supposed to be documented in the near future,
# to make the output of check_docs.py script more sensible.
#
# Syntax:
# every line starting with # is a comment
# there can be empty lines
# each line includes either a class name (including all the necessary namespaces),
# or a function/method name
# or a full declaration of a function/method
# if a class name is in the whitelist, all the methods are considered "white-listed" too
# if a method/function name is listed, then all the overload variants are "white-listed".
# that is, to white list a particular overloaded variant of a function/method you need to put
# full declaration into the file
#
######################################### core #####################################
cv::Mat::MSize
cv::Mat::MStep
cv::MatConstIterator
cv::NAryMatIterator
cv::Algorithm
cv::_InputArray
cv::_OutputArray
######################################## imgproc ###################################
CvLSHOperations
cv::FilterEngine
cv::BaseFilter
cv::BaseRowFilter
cv::BaseColumnFilter
cv::Moments
###################################### features2d###################################
cv::BOWKMeansTrainer::cluster
cv::BOWTrainer::BOWTrainer
cv::BOWTrainer::clear
cv::AdjusterAdapter::clone
cv::MSER::MSER
cv::StarDetector::StarDetector
cv::SIFT::CommonParams::CommonParams
cv::SIFT::SIFT
cv::SURF::SURF
cv::SimpleBlobDetector::Params::Params
cv::FastFeatureDetector::read
cv::MserFeatureDetector::read
cv::StarFeatureDetector::read
cv::SurfFeatureDetector::read
cv::SiftFeatureDetector::read
cv::GoodFeaturesToTrackDetector::read
cv::OrbFeatureDetector::read
cv::FastFeatureDetector::write
cv::MserFeatureDetector::write
cv::StarFeatureDetector::write
cv::SurfFeatureDetector::write
cv::SiftFeatureDetector::write
cv::GoodFeaturesToTrackDetector::write
cv::OrbFeatureDetector::write
cv::DynamicAdaptedFeatureDetector::empty
cv::GridAdaptedFeatureDetector::empty
cv::PyramidAdaptedFeatureDetector::empty
cv::BriefDescriptorExtractor::descriptorSize
cv::SurfDescriptorExtractor::descriptorSize
cv::SiftDescriptorExtractor::descriptorSize
cv::OpponentColorDescriptorExtractor::descriptorSize
cv::OrbDescriptorExtractor::descriptorSize
cv::BriefDescriptorExtractor::descriptorType
cv::SurfDescriptorExtractor::descriptorType
cv::SiftDescriptorExtractor::descriptorType
cv::OpponentColorDescriptorExtractor::descriptorType
cv::OrbDescriptorExtractor::descriptorType
cv::SurfDescriptorExtractor::read
cv::SiftDescriptorExtractor::read
cv::OpponentColorDescriptorExtractor::read
cv::OrbDescriptorExtractor::read
cv::SurfDescriptorExtractor::write
cv::SiftDescriptorExtractor::write
cv::OpponentColorDescriptorExtractor::write
cv::OrbDescriptorExtractor::write
cv::OpponentColorDescriptorExtractor::empty
cv::FlannBasedMatcher::train
cv::FlannBasedMatcher::clear
cv::FlannBasedMatcher::clone
cv::FlannBasedMatcher::isMaskSupported
cv::GenericDescriptorMatcher::GenericDescriptorMatcher
cv::VectorDescriptorMatcher::clear
cv::FernDescriptorMatcher::clear
cv::OneWayDescriptorMatcher::clear
cv::VectorDescriptorMatcher::empty
cv::FernDescriptorMatcher::empty
cv::OneWayDescriptorMatcher::empty
cv::OneWayDescriptorMatcher::read
cv::VectorDescriptorMatcher::isMaskSupported
cv::FernDescriptorMatcher::isMaskSupported
cv::OneWayDescriptorMatcher::isMaskSupported
cv::VectorDescriptorMatcher::train
cv::FernDescriptorMatcher::train
cv::OneWayDescriptorMatcher::train
cv::VectorDescriptorMatcher::read
cv::FernDescriptorMatcher::read
cv::VectorDescriptorMatcher::write
cv::FernDescriptorMatcher::write
cv::OneWayDescriptorMatcher::write
cv::FastAdjuster::good
cv::StarAdjuster::good
cv::SurfAdjuster::good
cv::FastAdjuster::tooFew
cv::StarAdjuster::tooFew
cv::SurfAdjuster::tooFew
cv::FastAdjuster::tooMany
cv::StarAdjuster::tooMany
cv::SurfAdjuster::tooMany
cv::FastAdjuster::clone
cv::StarAdjuster::clone
cv::SurfAdjuster::clone
######################################## calib3d ###################################
CvLevMarq
Mat cv::findFundamentalMat( InputArray points1, InputArray points2, OutputArray mask, int method=FM_RANSAC, double param1=3., double param2=0.99)
Mat findHomography( InputArray srcPoints, InputArray dstPoints, OutputArray mask, int method=0, double ransacReprojThreshold=3);
########################################## ml ######################################
CvBoostTree
CvForestTree
CvSVMKernel
CvSVMSolver
CvDTreeTrainData
CvERTreeTrainData
CvKNearest::CvKNearest
CvKNearest::clear
CvDTreeNode::get_num_valid
CvDTreeNode::set_num_valid
CvDTree::CvDTree
CvDTree::clear
CvDTree::read
CvDTree::write
CvEM::CvEM
CvEM::clear
CvEM::read
CvEM::write
CvSVM::CvSVM
CvSVM::clear
CvSVM::read
CvSVM::write
CvMLData::CvMLData
CvRTrees::CvRTrees
CvRTrees::clear
CvRTrees::read
CvRTrees::write
CvBoost::CvBoost
CvBoost::clear
CvBoost::read
CvBoost::write
CvGBTrees::CvGBTrees
CvGBTrees::clear
CvGBTrees::read
CvGBTrees::write
CvNormalBayesClassifier::CvNormalBayerClassifier
CvNormalBayesClassifier::clear
CvNormalBayesClassifier::read
CvNormalBayesClassifier::write
CvANN_MLP::CvANN_MLP
CvANN_MLP::clear
CvANN_MLP::read
CvANN_MLP::write
CvTrainTestSplit
cvParamLattice
cvDefaultParamLattice
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
#!/usr/bin/env python
import sys
f=open(sys.argv[1], "rt")
ll = list(f.readlines())
f.close()
f=open(sys.argv[1], "wt")
singleparam = False
for l in ll:
l = l.replace("\\code{~const}}{}", "}{\\code{~const}}")
if l.startswith("\\item[{Parameters}] \\leavevmode"):
if not l.startswith("\\item[{Parameters}] \\leavevmode\\begin{itemize}"):
singleparam = True
l = "\\item[{Parameters}] \\leavevmode\\begin{itemize}[label=]\n"
if singleparam:
l += "\\item {}\n"
elif singleparam and l.startswith("\\end{description}\\end{quote}"):
l = "\\end{itemize}\n" + l
singleparam = False
f.write(l)
f.close()
.. _Bindings_Basics:
How OpenCV-Python Bindings Works?
************************************
Goal
=====
Learn:
* How OpenCV-Python bindings are generated?
* How to extend new OpenCV modules to Python?
How OpenCV-Python bindings are generated?
=========================================
In OpenCV, all algorithms are implemented in C++. But these algorithms can be used from different languages like Python, Java etc. This is made possible by the bindings generators. These generators create a bridge between C++ and Python which enables users to call C++ functions from Python. To get a complete picture of what is happening in background, a good knowledge of Python/C API is required. A simple example on extending C++ functions to Python can be found in official Python documentation[1]. So extending all functions in OpenCV to Python by writing their wrapper functions manually is a time-consuming task. So OpenCV does it in a more intelligent way. OpenCV generates these wrapper functions automatically from the C++ headers using some Python scripts which are located in ``modules/python/src2``. We will look into what they do.
First, ``modules/python/CMakeFiles.txt`` is a CMake script which checks the modules to be extended to Python. It will automatically check all the modules to be extended and grab their header files. These header files contain list of all classes, functions, constants etc. for that particular modules.
Second, these header files are passed to a Python script, ``modules/python/src2/gen2.py``. This is the Python bindings generator script. It calls another Python script ``modules/python/src2/hdr_parser.py``. This is the header parser script. This header parser splits the complete header file into small Python lists. So these lists contain all details about a particular function, class etc. For example, a function will be parsed to get a list containing function name, return type, input arguments, argument types etc. Final list contains details of all the functions, structs, classes etc. in that header file.
But header parser doesn't parse all the functions/classes in the header file. The developer has to specify which functions should be exported to Python. For that, there are certain macros added to the beginning of these declarations which enables the header parser to identify functions to be parsed. These macros are added by the developer who programs the particular function. In short, the developer decides which functions should be extended to Python and which are not. Details of those macros will be given in next session.
So header parser returns a final big list of parsed functions. Our generator script (gen2.py) will create wrapper functions for all the functions/classes/enums/structs parsed by header parser (You can find these header files during compilation in the ``build/modules/python/`` folder as ``pyopencv_generated_*.h`` files). But there may be some basic OpenCV datatypes like Mat, Vec4i, Size. They need to be extended manually. For example, a Mat type should be extended to Numpy array, Size should be extended to a tuple of two integers etc. Similarly, there may be some complex structs/classes/functions etc. which need to be extended manually. All such manual wrapper functions are placed in ``modules/python/src2/pycv2.hpp``.
So now only thing left is the compilation of these wrapper files which gives us **cv2** module. So when you call a function, say ``res = equalizeHist(img1,img2)`` in Python, you pass two numpy arrays and you expect another numpy array as the output. So these numpy arrays are converted to ``cv::Mat`` and then calls the ``equalizeHist()`` function in C++. Final result, ``res`` will be converted back into a Numpy array. So in short, almost all operations are done in C++ which gives us almost same speed as that of C++.
So this is the basic version of how OpenCV-Python bindings are generated.
How to extend new modules to Python?
=====================================
Header parser parse the header files based on some wrapper macros added to function declaration. Enumeration constants don't need any wrapper macros. They are automatically wrapped. But remaining functions, classes etc. need wrapper macros.
Functions are extended using ``CV_EXPORTS_W`` macro. An example is shown below.
.. code-block:: cpp
CV_EXPORTS_W void equalizeHist( InputArray src, OutputArray dst );
Header parser can understand the input and output arguments from keywords like ``InputArray, OutputArray`` etc. But sometimes, we may need to hardcode inputs and outputs. For that, macros like ``CV_OUT, CV_IN_OUT`` etc. are used.
.. code-block:: cpp
CV_EXPORTS_W void minEnclosingCircle( InputArray points,
CV_OUT Point2f& center, CV_OUT float& radius );
For large classes also, ``CV_EXPORTS_W`` is used. To extend class methods, ``CV_WRAP`` is used. Similarly, ``CV_PROP`` is used for class fields.
.. code-block:: cpp
class CV_EXPORTS_W CLAHE : public Algorithm
{
public:
CV_WRAP virtual void apply(InputArray src, OutputArray dst) = 0;
CV_WRAP virtual void setClipLimit(double clipLimit) = 0;
CV_WRAP virtual double getClipLimit() const = 0;
}
Overloaded functions can be extended using ``CV_EXPORTS_AS``. But we need to pass a new name so that each function will be called by that name in Python. Take the case of integral function below. Three functions are available, so each one is named with a suffix in Python. Similarly ``CV_WRAP_AS`` can be used to wrap overloaded methods.
.. code-block:: cpp
//! computes the integral image
CV_EXPORTS_W void integral( InputArray src, OutputArray sum, int sdepth = -1 );
//! computes the integral image and integral for the squared image
CV_EXPORTS_AS(integral2) void integral( InputArray src, OutputArray sum,
OutputArray sqsum, int sdepth = -1, int sqdepth = -1 );
//! computes the integral image, integral for the squared image and the tilted integral image
CV_EXPORTS_AS(integral3) void integral( InputArray src, OutputArray sum,
OutputArray sqsum, OutputArray tilted,
int sdepth = -1, int sqdepth = -1 );
Small classes/structs are extended using ``CV_EXPORTS_W_SIMPLE``. These structs are passed by value to C++ functions. Examples are KeyPoint, Match etc. Their methods are extended by ``CV_WRAP`` and fields are extended by ``CV_PROP_RW``.
.. code-block:: cpp
class CV_EXPORTS_W_SIMPLE DMatch
{
public:
CV_WRAP DMatch();
CV_WRAP DMatch(int _queryIdx, int _trainIdx, float _distance);
CV_WRAP DMatch(int _queryIdx, int _trainIdx, int _imgIdx, float _distance);
CV_PROP_RW int queryIdx; // query descriptor index
CV_PROP_RW int trainIdx; // train descriptor index
CV_PROP_RW int imgIdx; // train image index
CV_PROP_RW float distance;
};
Some other small classes/structs can be exported using ``CV_EXPORTS_W_MAP`` where it is exported to a Python native dictionary. Moments() is an example of it.
.. code-block:: cpp
class CV_EXPORTS_W_MAP Moments
{
public:
//! spatial moments
CV_PROP_RW double m00, m10, m01, m20, m11, m02, m30, m21, m12, m03;
//! central moments
CV_PROP_RW double mu20, mu11, mu02, mu30, mu21, mu12, mu03;
//! central normalized moments
CV_PROP_RW double nu20, nu11, nu02, nu30, nu21, nu12, nu03;
};
So these are the major extension macros available in OpenCV. Typically, a developer has to put proper macros in their appropriate positions. Rest is done by generator scripts. Sometimes, there may be an exceptional cases where generator scripts cannot create the wrappers. Such functions need to be handled manually. But most of the time, a code written according to OpenCV coding guidelines will be automatically wrapped by generator scripts.
.. _PY_Table-Of-Content-Bindings:
OpenCV-Python Bindings
--------------------------------
Here, you will learn how OpenCV-Python bindings are generated.
* :ref:`Bindings_Basics`
.. tabularcolumns:: m{100pt} m{300pt}
.. cssclass:: toctableopencv
=========== ======================================================
|bind1| Learn how OpenCV-Python bindings are generated.
=========== ======================================================
.. |bind1| image:: images/nlm_icon.jpg
:height: 90pt
:width: 90pt
.. raw:: latex
\pagebreak
.. We use a custom table of content format and as the table of content only informs Sphinx about the hierarchy of the files, no need to show it.
.. toctree::
:hidden:
../py_bindings_basics/py_bindings_basics
.. _calibration:
Camera Calibration
********************
Goal
=======
In this section,
* We will learn about distortions in camera, intrinsic and extrinsic parameters of camera etc.
* We will learn to find these parameters, undistort images etc.
Basics
========
Today's cheap pinhole cameras introduces a lot of distortion to images. Two major distortions are radial distortion and tangential distortion.
Due to radial distortion, straight lines will appear curved. Its effect is more as we move away from the center of image. For example, one image is shown below, where two edges of a chess board are marked with red lines. But you can see that border is not a straight line and doesn't match with the red line. All the expected straight lines are bulged out. Visit `Distortion (optics) <http://en.wikipedia.org/wiki/Distortion_%28optics%29>`_ for more details.
.. image:: images/calib_radial.jpg
:alt: Radial Distortion
:align: center
This distortion is solved as follows:
.. math::
x_{corrected} = x( 1 + k_1 r^2 + k_2 r^4 + k_3 r^6) \\
y_{corrected} = y( 1 + k_1 r^2 + k_2 r^4 + k_3 r^6)
Similarly, another distortion is the tangential distortion which occurs because image taking lense is not aligned perfectly parallel to the imaging plane. So some areas in image may look nearer than expected. It is solved as below:
.. math::
x_{corrected} = x + [ 2p_1xy + p_2(r^2+2x^2)] \\
y_{corrected} = y + [ p_1(r^2+ 2y^2)+ 2p_2xy]
In short, we need to find five parameters, known as distortion coefficients given by:
.. math::
Distortion \; coefficients=(k_1 \hspace{10pt} k_2 \hspace{10pt} p_1 \hspace{10pt} p_2 \hspace{10pt} k_3)
In addition to this, we need to find a few more information, like intrinsic and extrinsic parameters of a camera. Intrinsic parameters are specific to a camera. It includes information like focal length (:math:`f_x,f_y`), optical centers (:math:`c_x, c_y`) etc. It is also called camera matrix. It depends on the camera only, so once calculated, it can be stored for future purposes. It is expressed as a 3x3 matrix:
.. math::
camera \; matrix = \left [ \begin{matrix} f_x & 0 & c_x \\ 0 & f_y & c_y \\ 0 & 0 & 1 \end{matrix} \right ]
Extrinsic parameters corresponds to rotation and translation vectors which translates a coordinates of a 3D point to a coordinate system.
For stereo applications, these distortions need to be corrected first. To find all these parameters, what we have to do is to provide some sample images of a well defined pattern (eg, chess board). We find some specific points in it ( square corners in chess board). We know its coordinates in real world space and we know its coordinates in image. With these data, some mathematical problem is solved in background to get the distortion coefficients. That is the summary of the whole story. For better results, we need atleast 10 test patterns.
Code
========
As mentioned above, we need atleast 10 test patterns for camera calibration. OpenCV comes with some images of chess board (see ``samples/cpp/left01.jpg -- left14.jpg``), so we will utilize it. For sake of understanding, consider just one image of a chess board. Important input datas needed for camera calibration is a set of 3D real world points and its corresponding 2D image points. 2D image points are OK which we can easily find from the image. (These image points are locations where two black squares touch each other in chess boards)
What about the 3D points from real world space? Those images are taken from a static camera and chess boards are placed at different locations and orientations. So we need to know :math:`(X,Y,Z)` values. But for simplicity, we can say chess board was kept stationary at XY plane, (so Z=0 always) and camera was moved accordingly. This consideration helps us to find only X,Y values. Now for X,Y values, we can simply pass the points as (0,0), (1,0), (2,0), ... which denotes the location of points. In this case, the results we get will be in the scale of size of chess board square. But if we know the square size, (say 30 mm), and we can pass the values as (0,0),(30,0),(60,0),..., we get the results in mm. (In this case, we don't know square size since we didn't take those images, so we pass in terms of square size).
3D points are called **object points** and 2D image points are called **image points.**
Setup
---------
So to find pattern in chess board, we use the function, **cv2.findChessboardCorners()**. We also need to pass what kind of pattern we are looking, like 8x8 grid, 5x5 grid etc. In this example, we use 7x6 grid. (Normally a chess board has 8x8 squares and 7x7 internal corners). It returns the corner points and retval which will be True if pattern is obtained. These corners will be placed in an order (from left-to-right, top-to-bottom)
.. seealso:: This function may not be able to find the required pattern in all the images. So one good option is to write the code such that, it starts the camera and check each frame for required pattern. Once pattern is obtained, find the corners and store it in a list. Also provides some interval before reading next frame so that we can adjust our chess board in different direction. Continue this process until required number of good patterns are obtained. Even in the example provided here, we are not sure out of 14 images given, how many are good. So we read all the images and take the good ones.
.. seealso:: Instead of chess board, we can use some circular grid, but then use the function **cv2.findCirclesGrid()** to find the pattern. It is said that less number of images are enough when using circular grid.
Once we find the corners, we can increase their accuracy using **cv2.cornerSubPix()**. We can also draw the pattern using **cv2.drawChessboardCorners()**. All these steps are included in below code:
::
import numpy as np
import cv2
import glob
# termination criteria
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
objp = np.zeros((6*7,3), np.float32)
objp[:,:2] = np.mgrid[0:7,0:6].T.reshape(-1,2)
# Arrays to store object points and image points from all the images.
objpoints = [] # 3d point in real world space
imgpoints = [] # 2d points in image plane.
images = glob.glob('*.jpg')
for fname in images:
img = cv2.imread(fname)
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
# Find the chess board corners
ret, corners = cv2.findChessboardCorners(gray, (7,6),None)
# If found, add object points, image points (after refining them)
if ret == True:
objpoints.append(objp)
cv2.cornerSubPix(gray,corners,(11,11),(-1,-1),criteria)
imgpoints.append(corners)
# Draw and display the corners
cv2.drawChessboardCorners(img, (7,6), corners2,ret)
cv2.imshow('img',img)
cv2.waitKey(500)
cv2.destroyAllWindows()
One image with pattern drawn on it is shown below:
.. image:: images/calib_pattern.jpg
:alt: Calibration Pattern
:align: center
Calibration
------------
So now we have our object points and image points we are ready to go for calibration. For that we use the function, **cv2.calibrateCamera()**. It returns the camera matrix, distortion coefficients, rotation and translation vectors etc.
::
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1],None,None)
Undistortion
---------------
We have got what we were trying. Now we can take an image and undistort it. OpenCV comes with two methods, we will see both. But before that, we can refine the camera matrix based on a free scaling parameter using **cv2.getOptimalNewCameraMatrix()**. If the scaling parameter ``alpha=0``, it returns undistorted image with minimum unwanted pixels. So it may even remove some pixels at image corners. If ``alpha=1``, all pixels are retained with some extra black images. It also returns an image ROI which can be used to crop the result.
So we take a new image (``left12.jpg`` in this case. That is the first image in this chapter)
::
img = cv2.imread('left12.jpg')
h, w = img.shape[:2]
newcameramtx, roi=cv2.getOptimalNewCameraMatrix(mtx,dist,(w,h),1,(w,h))
1. Using **cv2.undistort()**
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
This is the shortest path. Just call the function and use ROI obtained above to crop the result.
::
# undistort
dst = cv2.undistort(img, mtx, dist, None, newcameramtx)
# crop the image
x,y,w,h = roi
dst = dst[y:y+h, x:x+w]
cv2.imwrite('calibresult.png',dst)
2. Using **remapping**
^^^^^^^^^^^^^^^^^^^^^^^^^^^
This is curved path. First find a mapping function from distorted image to undistorted image. Then use the remap function.
::
# undistort
mapx,mapy = cv2.initUndistortRectifyMap(mtx,dist,None,newcameramtx,(w,h),5)
dst = cv2.remap(img,mapx,mapy,cv2.INTER_LINEAR)
# crop the image
x,y,w,h = roi
dst = dst[y:y+h, x:x+w]
cv2.imwrite('calibresult.png',dst)
Both the methods give the same result. See the result below:
.. image:: images/calib_result.jpg
:alt: Calibration Result
:align: center
You can see in the result that all the edges are straight.
Now you can store the camera matrix and distortion coefficients using write functions in Numpy (np.savez, np.savetxt etc) for future uses.
Re-projection Error
=======================
Re-projection error gives a good estimation of just how exact is the found parameters. This should be as close to zero as possible. Given the intrinsic, distortion, rotation and translation matrices, we first transform the object point to image point using **cv2.projectPoints()**. Then we calculate the absolute norm between what we got with our transformation and the corner finding algorithm. To find the average error we calculate the arithmetical mean of the errors calculate for all the calibration images.
::
mean_error = 0
for i in xrange(len(objpoints)):
imgpoints2, _ = cv2.projectPoints(objpoints[i], rvecs[i], tvecs[i], mtx, dist)
error = cv2.norm(imgpoints[i],imgpoints2, cv2.NORM_L2)/len(imgpoints2)
tot_error += error
print "total error: ", mean_error/len(objpoints)
Additional Resources
======================
Exercises
============
#. Try camera calibration with circular grid.
.. _py_depthmap:
Depth Map from Stereo Images
******************************
Goal
=======
In this session,
* We will learn to create depth map from stereo images.
Basics
===========
In last session, we saw basic concepts like epipolar constraints and other related terms. We also saw that if we have two images of same scene, we can get depth information from that in an intuitive way. Below is an image and some simple mathematical formulas which proves that intuition. (Image Courtesy :
.. image:: images/stereo_depth.jpg
:alt: Calculating depth
:align: center
The above diagram contains equivalent triangles. Writing their equivalent equations will yield us following result:
.. math::
disparity = x - x' = \frac{Bf}{Z}
:math:`x` and :math:`x'` are the distance between points in image plane corresponding to the scene point 3D and their camera center. :math:`B` is the distance between two cameras (which we know) and :math:`f` is the focal length of camera (already known). So in short, above equation says that the depth of a point in a scene is inversely proportional to the difference in distance of corresponding image points and their camera centers. So with this information, we can derive the depth of all pixels in an image.
So it finds corresponding matches between two images. We have already seen how epiline constraint make this operation faster and accurate. Once it finds matches, it finds the disparity. Let's see how we can do it with OpenCV.
Code
========
Below code snippet shows a simple procedure to create disparity map.
::
import numpy as np
import cv2
from matplotlib import pyplot as plt
imgL = cv2.imread('tsukuba_l.png',0)
imgR = cv2.imread('tsukuba_r.png',0)
stereo = cv2.createStereoBM(numDisparities=16, blockSize=15)
disparity = stereo.compute(imgL,imgR)
plt.imshow(disparity,'gray')
plt.show()
Below image contains the original image (left) and its disparity map (right). As you can see, result is contaminated with high degree of noise. By adjusting the values of numDisparities and blockSize, you can get a better result.
.. image:: images/disparity_map.jpg
:alt: Disparity Map
:align: center
.. note:: More details to be added
Additional Resources
=============================
Exercises
============
1. OpenCV samples contain an example of generating disparity map and its 3D reconstruction. Check ``stereo_match.py`` in OpenCV-Python samples.
.. _epipolar_geometry:
Epipolar Geometry
*********************
Goal
========
In this section,
* We will learn about the basics of multiview geometry
* We will see what is epipole, epipolar lines, epipolar constraint etc.
Basic Concepts
=================
When we take an image using pin-hole camera, we loose an important information, ie depth of the image. Or how far is each point in the image from the camera because it is a 3D-to-2D conversion. So it is an important question whether we can find the depth information using these cameras. And the answer is to use more than one camera. Our eyes works in similar way where we use two cameras (two eyes) which is called stereo vision. So let's see what OpenCV provides in this field.
(*Learning OpenCV* by Gary Bradsky has a lot of information in this field.)
Before going to depth images, let's first understand some basic concepts in multiview geometry. In this section we will deal with epipolar geometry. See the image below which shows a basic setup with two cameras taking the image of same scene.
.. image:: images/epipolar.jpg
:alt: Epipolar geometry
:align: center
If we are using only the left camera, we can't find the 3D point corresponding to the point :math:`x` in image because every point on the line :math:`OX` projects to the same point on the image plane. But consider the right image also. Now different points on the line :math:`OX` projects to different points (:math:`x'`) in right plane. So with these two images, we can triangulate the correct 3D point. This is the whole idea.
The projection of the different points on :math:`OX` form a line on right plane (line :math:`l'`). We call it **epiline** corresponding to the point :math:`x`. It means, to find the point :math:`x` on the right image, search along this epiline. It should be somewhere on this line (Think of it this way, to find the matching point in other image, you need not search the whole image, just search along the epiline. So it provides better performance and accuracy). This is called **Epipolar Constraint**. Similarly all points will have its corresponding epilines in the other image. The plane :math:`XOO'` is called **Epipolar Plane**.
:math:`O` and :math:`O'` are the camera centers. From the setup given above, you can see that projection of right camera :math:`O'` is seen on the left image at the point, :math:`e`. It is called the **epipole**. Epipole is the point of intersection of line through camera centers and the image planes. Similarly :math:`e'` is the epipole of the left camera. In some cases, you won't be able to locate the epipole in the image, they may be outside the image (which means, one camera doesn't see the other).
All the epilines pass through its epipole. So to find the location of epipole, we can find many epilines and find their intersection point.
So in this session, we focus on finding epipolar lines and epipoles. But to find them, we need two more ingredients, **Fundamental Matrix (F)** and **Essential Matrix (E)**. Essential Matrix contains the information about translation and rotation, which describe the location of the second camera relative to the first in global coordinates. See the image below (Image courtesy: Learning OpenCV by Gary Bradsky):
.. image:: images/essential_matrix.jpg
:alt: Essential Matrix
:align: center
But we prefer measurements to be done in pixel coordinates, right? Fundamental Matrix contains the same information as Essential Matrix in addition to the information about the intrinsics of both cameras so that we can relate the two cameras in pixel coordinates. (If we are using rectified images and normalize the point by dividing by the focal lengths, :math:`F=E`). In simple words, Fundamental Matrix F, maps a point in one image to a line (epiline) in the other image. This is calculated from matching points from both the images. A minimum of 8 such points are required to find the fundamental matrix (while using 8-point algorithm). More points are preferred and use RANSAC to get a more robust result.
Code
=========
So first we need to find as many possible matches between two images to find the fundamental matrix. For this, we use SIFT descriptors with FLANN based matcher and ratio test.
::
import cv2
import numpy as np
from matplotlib import pyplot as plt
img1 = cv2.imread('myleft.jpg',0) #queryimage # left image
img2 = cv2.imread('myright.jpg',0) #trainimage # right image
sift = cv2.SIFT()
# find the keypoints and descriptors with SIFT
kp1, des1 = sift.detectAndCompute(img1,None)
kp2, des2 = sift.detectAndCompute(img2,None)
# FLANN parameters
FLANN_INDEX_KDTREE = 0
index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
search_params = dict(checks=50)
flann = cv2.FlannBasedMatcher(index_params,search_params)
matches = flann.knnMatch(des1,des2,k=2)
good = []
pts1 = []
pts2 = []
# ratio test as per Lowe's paper
for i,(m,n) in enumerate(matches):
if m.distance < 0.8*n.distance:
good.append(m)
pts2.append(kp2[m.trainIdx].pt)
pts1.append(kp1[m.queryIdx].pt)
Now we have the list of best matches from both the images. Let's find the Fundamental Matrix.
::
pts1 = np.int32(pts1)
pts2 = np.int32(pts2)
F, mask = cv2.findFundamentalMat(pts1,pts2,cv2.FM_LMEDS)
# We select only inlier points
pts1 = pts1[mask.ravel()==1]
pts2 = pts2[mask.ravel()==1]
Next we find the epilines. Epilines corresponding to the points in first image is drawn on second image. So mentioning of correct images are important here. We get an array of lines. So we define a new function to draw these lines on the images.
::
def drawlines(img1,img2,lines,pts1,pts2):
''' img1 - image on which we draw the epilines for the points in img2
lines - corresponding epilines '''
r,c = img1.shape
img1 = cv2.cvtColor(img1,cv2.COLOR_GRAY2BGR)
img2 = cv2.cvtColor(img2,cv2.COLOR_GRAY2BGR)
for r,pt1,pt2 in zip(lines,pts1,pts2):
color = tuple(np.random.randint(0,255,3).tolist())
x0,y0 = map(int, [0, -r[2]/r[1] ])
x1,y1 = map(int, [c, -(r[2]+r[0]*c)/r[1] ])
img1 = cv2.line(img1, (x0,y0), (x1,y1), color,1)
img1 = cv2.circle(img1,tuple(pt1),5,color,-1)
img2 = cv2.circle(img2,tuple(pt2),5,color,-1)
return img1,img2
Now we find the epilines in both the images and draw them.
::
# Find epilines corresponding to points in right image (second image) and
# drawing its lines on left image
lines1 = cv2.computeCorrespondEpilines(pts2.reshape(-1,1,2), 2,F)
lines1 = lines1.reshape(-1,3)
img5,img6 = drawlines(img1,img2,lines1,pts1,pts2)
# Find epilines corresponding to points in left image (first image) and
# drawing its lines on right image
lines2 = cv2.computeCorrespondEpilines(pts1.reshape(-1,1,2), 1,F)
lines2 = lines2.reshape(-1,3)
img3,img4 = drawlines(img2,img1,lines2,pts2,pts1)
plt.subplot(121),plt.imshow(img5)
plt.subplot(122),plt.imshow(img3)
plt.show()
Below is the result we get:
.. image:: images/epiresult.jpg
:alt: Epilines
:align: center
You can see in the left image that all epilines are converging at a point outside the image at right side. That meeting point is the epipole.
For better results, images with good resolution and many non-planar points should be used.
Additional Resources
==========================
Exercises
=============
#. One important topic is the forward movement of camera. Then epipoles will be seen at the same locations in both with epilines emerging from a fixed point. `See this discussion <http://answers.opencv.org/question/17912/location-of-epipole/>`_.
#. Fundamental Matrix estimation is sensitive to quality of matches, outliers etc. It becomes worse when all selected matches lie on the same plane. `Check this discussion <http://answers.opencv.org/question/18125/epilines-not-correct/>`_.
.. _pose_estimation:
Pose Estimation
*********************
Goal
==========
In this section,
* We will learn to exploit calib3d module to create some 3D effects in images.
Basics
========
This is going to be a small section. During the last session on camera calibration, you have found the camera matrix, distortion coefficients etc. Given a pattern image, we can utilize the above information to calculate its pose, or how the object is situated in space, like how it is rotated, how it is displaced etc. For a planar object, we can assume Z=0, such that, the problem now becomes how camera is placed in space to see our pattern image. So, if we know how the object lies in the space, we can draw some 2D diagrams in it to simulate the 3D effect. Let's see how to do it.
Our problem is, we want to draw our 3D coordinate axis (X, Y, Z axes) on our chessboard's first corner. X axis in blue color, Y axis in green color and Z axis in red color. So in-effect, Z axis should feel like it is perpendicular to our chessboard plane.
First, let's load the camera matrix and distortion coefficients from the previous calibration result.
::
import cv2
import numpy as np
import glob
# Load previously saved data
with np.load('B.npz') as X:
mtx, dist, _, _ = [X[i] for i in ('mtx','dist','rvecs','tvecs')]
Now let's create a function, ``draw`` which takes the corners in the chessboard (obtained using **cv2.findChessboardCorners()**) and **axis points** to draw a 3D axis.
::
def draw(img, corners, imgpts):
corner = tuple(corners[0].ravel())
img = cv2.line(img, corner, tuple(imgpts[0].ravel()), (255,0,0), 5)
img = cv2.line(img, corner, tuple(imgpts[1].ravel()), (0,255,0), 5)
img = cv2.line(img, corner, tuple(imgpts[2].ravel()), (0,0,255), 5)
return img
Then as in previous case, we create termination criteria, object points (3D points of corners in chessboard) and axis points. Axis points are points in 3D space for drawing the axis. We draw axis of length 3 (units will be in terms of chess square size since we calibrated based on that size). So our X axis is drawn from (0,0,0) to (3,0,0), so for Y axis. For Z axis, it is drawn from (0,0,0) to (0,0,-3). Negative denotes it is drawn towards the camera.
::
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
objp = np.zeros((6*7,3), np.float32)
objp[:,:2] = np.mgrid[0:7,0:6].T.reshape(-1,2)
axis = np.float32([[3,0,0], [0,3,0], [0,0,-3]]).reshape(-1,3)
Now, as usual, we load each image. Search for 7x6 grid. If found, we refine it with subcorner pixels. Then to calculate the rotation and translation, we use the function, **cv2.solvePnPRansac()**. Once we those transformation matrices, we use them to project our **axis points** to the image plane. In simple words, we find the points on image plane corresponding to each of (3,0,0),(0,3,0),(0,0,3) in 3D space. Once we get them, we draw lines from the first corner to each of these points using our ``draw()`` function. Done !!!
::
for fname in glob.glob('left*.jpg'):
img = cv2.imread(fname)
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
ret, corners = cv2.findChessboardCorners(gray, (7,6),None)
if ret == True:
corners2 = cv2.cornerSubPix(gray,corners,(11,11),(-1,-1),criteria)
# Find the rotation and translation vectors.
rvecs, tvecs, inliers = cv2.solvePnPRansac(objp, corners2, mtx, dist)
# project 3D points to image plane
imgpts, jac = cv2.projectPoints(axis, rvecs, tvecs, mtx, dist)
img = draw(img,corners2,imgpts)
cv2.imshow('img',img)
k = cv2.waitKey(0) & 0xff
if k == 's':
cv2.imwrite(fname[:6]+'.png', img)
cv2.destroyAllWindows()
See some results below. Notice that each axis is 3 squares long.:
.. image:: images/pose_1.jpg
:alt: Pose Estimation
:align: center
Render a Cube
---------------
If you want to draw a cube, modify the draw() function and axis points as follows.
Modified draw() function:
::
def draw(img, corners, imgpts):
imgpts = np.int32(imgpts).reshape(-1,2)
# draw ground floor in green
img = cv2.drawContours(img, [imgpts[:4]],-1,(0,255,0),-3)
# draw pillars in blue color
for i,j in zip(range(4),range(4,8)):
img = cv2.line(img, tuple(imgpts[i]), tuple(imgpts[j]),(255),3)
# draw top layer in red color
img = cv2.drawContours(img, [imgpts[4:]],-1,(0,0,255),3)
return img
Modified axis points. They are the 8 corners of a cube in 3D space:
::
axis = np.float32([[0,0,0], [0,3,0], [3,3,0], [3,0,0],
[0,0,-3],[0,3,-3],[3,3,-3],[3,0,-3] ])
And look at the result below:
.. image:: images/pose_2.jpg
:alt: Pose Estimation
:align: center
If you are interested in graphics, augmented reality etc, you can use OpenGL to render more complicated figures.
Additional Resources
===========================
Exercises
===========
.. _PY_Table-Of-Content-Calib:
Camera Calibration and 3D Reconstruction
----------------------------------------------
* :ref:`calibration`
.. tabularcolumns:: m{100pt} m{300pt}
.. cssclass:: toctableopencv
=========== ======================================================
|calib_1| Let's find how good is our camera. Is there any distortion in images taken with it? If so how to correct it?
=========== ======================================================
.. |calib_1| image:: images/calibration_icon.jpg
:height: 90pt
:width: 90pt
* :ref:`pose_estimation`
.. tabularcolumns:: m{100pt} m{300pt}
.. cssclass:: toctableopencv
=========== ======================================================
|calib_2| This is a small section which will help you to create some cool 3D effects with calib module.
=========== ======================================================
.. |calib_2| image:: images/pose_icon.jpg
:height: 90pt
:width: 90pt
* :ref:`epipolar_geometry`
.. tabularcolumns:: m{100pt} m{300pt}
.. cssclass:: toctableopencv
=========== ======================================================
|calib_3| Let's understand epipolar geometry and epipolar constraint.
=========== ======================================================
.. |calib_3| image:: images/epipolar_icon.jpg
:height: 90pt
:width: 90pt
* :ref:`py_depthmap`
.. tabularcolumns:: m{100pt} m{300pt}
.. cssclass:: toctableopencv
=========== ======================================================
|calib_4| Extract depth information from 2D images.
=========== ======================================================
.. |calib_4| image:: images/depthmap_icon.jpg
:height: 90pt
:width: 90pt
.. raw:: latex
\pagebreak
.. We use a custom table of content format and as the table of content only informs Sphinx about the hierarchy of the files, no need to show it.
.. toctree::
:hidden:
../py_calibration/py_calibration
../py_pose/py_pose
../py_epipolar_geometry/py_epipolar_geometry
../py_depthmap/py_depthmap
.. _Mathematical_Tools:
Mathematical Tools in OpenCV
********************************
.. _PY_Table-Of-Content-Core:
Core Operations
-----------------------------------------------------------
* :ref:`Basic_Ops`
.. tabularcolumns:: m{100pt} m{300pt}
.. cssclass:: toctableopencv
=========== ======================================================
|core_1| Learn to read and edit pixel values, working with image ROI and other basic operations.
=========== ======================================================
.. |core_1| image:: images/pixel_ops.jpg
:height: 90pt
:width: 90pt
* :ref:`Image_Arithmetics`
.. tabularcolumns:: m{100pt} m{300pt}
.. cssclass:: toctableopencv
=========== ======================================================
|core_2| Perform arithmetic operations on images
=========== ======================================================
.. |core_2| image:: images/image_arithmetic.jpg
:height: 90pt
:width: 90pt
* :ref:`Optimization_Techniques`
.. tabularcolumns:: m{100pt} m{300pt}
.. cssclass:: toctableopencv
=========== ======================================================
|core_4| Getting a solution is important. But getting it in the fastest way is more important. Learn to check the speed of your code, optimize the code etc.
=========== ======================================================
.. |core_4| image:: images/speed.jpg
:height: 90pt
:width: 90pt
* :ref:`Mathematical_Tools`
.. tabularcolumns:: m{100pt} m{300pt}
.. cssclass:: toctableopencv
=========== ======================================================
|core_5| Learn some of the mathematical tools provided by OpenCV like PCA, SVD etc.
=========== ======================================================
.. |core_5| image:: images/maths_tools.jpg
:height: 90pt
:width: 90pt
.. raw:: latex
\pagebreak
.. We use a custom table of content format and as the table of content only informs Sphinx about the hierarchy of the files, no need to show it.
.. toctree::
:hidden:
../py_basic_ops/py_basic_ops
../py_image_arithmetics/py_image_arithmetics
../py_optimization/py_optimization
../py_maths_tools/py_maths_tools
.. _Table-Of-Content-Transforms:
Image Transforms in OpenCV
-----------------------------------------------------------
* :ref:`Fourier_Transform`
.. tabularcolumns:: m{100pt} m{300pt}
.. cssclass:: toctableopencv
============= ===================================================================
|transform_1| Learn to find the Fourier Transform of images
============= ===================================================================
.. |transform_1| image:: images/transform_fourier.jpg
:height: 90pt
:width: 90pt
.. raw:: latex
\pagebreak
.. We use a custom table of content format and as the table of content only informs Sphinx about the hierarchy of the files, no need to show it.
.. toctree::
:hidden:
../py_fourier_transform/py_fourier_transform
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册