提交 5f578425 编写于 作者: A Alexander Alekhin

Merge pull request #13794 from alalek:backport_13786

......@@ -88,7 +88,7 @@ struct CvVectors
#if 0
/* A structure, representing the lattice range of statmodel parameters.
It is used for optimizing statmodel parameters by cross-validation method.
The lattice is logarithmic, so <step> must be greater then 1. */
The lattice is logarithmic, so <step> must be greater than 1. */
typedef struct CvParamLattice
{
double min_val;
......@@ -158,7 +158,7 @@ protected:
/* The structure, representing the grid range of statmodel parameters.
It is used for optimizing statmodel accuracy by varying model parameters,
the accuracy estimate being computed by cross-validation.
The grid is logarithmic, so <step> must be greater then 1. */
The grid is logarithmic, so <step> must be greater than 1. */
class CvMLData;
......
......@@ -12,7 +12,7 @@ python gen_pattern.py -o out.svg -r 11 -c 8 -T circles -s 20.0 -R 5.0 -u mm -w 2
-u, --units - mm, inches, px, m (default mm)
-w, --page_width - page width in units (default 216)
-h, --page_height - page height in units (default 279)
-a, --page_size - page size (default A4), supercedes -h -w arguments
-a, --page_size - page size (default A4), supersedes -h -w arguments
-H, --help - show help
"""
......
......@@ -12,7 +12,7 @@ Theory
We know SIFT uses 128-dim vector for descriptors. Since it is using floating point numbers, it takes
basically 512 bytes. Similarly SURF also takes minimum of 256 bytes (for 64-dim). Creating such a
vector for thousands of features takes a lot of memory which are not feasible for resouce-constraint
vector for thousands of features takes a lot of memory which are not feasible for resource-constraint
applications especially for embedded systems. Larger the memory, longer the time it takes for
matching.
......
......@@ -2164,7 +2164,7 @@ inline void RHO_HEST_REFC::refine(void){
* order to compute a candidate homography (newH).
*
* The system above is solved by Cholesky decomposition of a
* sufficently-damped JtJ into a lower-triangular matrix (and its
* sufficiently-damped JtJ into a lower-triangular matrix (and its
* transpose), whose inverse is then computed. This inverse (and its
* transpose) then multiply Jte in order to find dH.
*/
......
......@@ -103,7 +103,7 @@ double memory deallocation.
CV_EXPORTS void fastFree(void* ptr);
/*!
The STL-compilant memory Allocator based on cv::fastMalloc() and cv::fastFree()
The STL-compliant memory Allocator based on cv::fastMalloc() and cv::fastFree()
*/
template<typename _Tp> class Allocator
{
......
......@@ -2266,7 +2266,7 @@ inline v_float32x4 v_matmuladd(const v_float32x4& v, const v_float32x4& m0,
v.s[0]*m0.s[3] + v.s[1]*m1.s[3] + v.s[2]*m2.s[3] + m3.s[3]);
}
////// FP16 suport ///////
////// FP16 support ///////
inline v_reg<float, V_TypeTraits<float>::nlanes128>
v_load_expand(const float16_t* ptr)
......
......@@ -1635,7 +1635,7 @@ inline void v_lut_deinterleave(const double* tab, const v_int32x4& idxvec, v_flo
}
#endif
////// FP16 suport ///////
////// FP16 support ///////
#if CV_FP16
inline v_float32x4 v_load_expand(const float16_t* ptr)
{
......
......@@ -33,7 +33,7 @@ String dumpInputArray(InputArray argument)
}
catch (...)
{
ss << " ERROR: exception occured, dump is non-complete"; // need to properly support different kinds
ss << " ERROR: exception occurred, dump is non-complete"; // need to properly support different kinds
}
return ss.str();
}
......@@ -70,7 +70,7 @@ CV_EXPORTS_W String dumpInputArrayOfArrays(InputArrayOfArrays argument)
}
catch (...)
{
ss << " ERROR: exception occured, dump is non-complete"; // need to properly support different kinds
ss << " ERROR: exception occurred, dump is non-complete"; // need to properly support different kinds
}
return ss.str();
}
......@@ -100,7 +100,7 @@ CV_EXPORTS_W String dumpInputOutputArray(InputOutputArray argument)
}
catch (...)
{
ss << " ERROR: exception occured, dump is non-complete"; // need to properly support different kinds
ss << " ERROR: exception occurred, dump is non-complete"; // need to properly support different kinds
}
return ss.str();
}
......@@ -137,7 +137,7 @@ CV_EXPORTS_W String dumpInputOutputArrayOfArrays(InputOutputArrayOfArrays argume
}
catch (...)
{
ss << " ERROR: exception occured, dump is non-complete"; // need to properly support different kinds
ss << " ERROR: exception occurred, dump is non-complete"; // need to properly support different kinds
}
return ss.str();
}
......
......@@ -250,7 +250,7 @@ BinaryFunc getCopyMaskFunc(size_t esz);
// There is some mess in code with vectors representation.
// Both vector-column / vector-rows are used with dims=2 (as Mat2D always).
// Reshape matrices if neccessary (in case of vectors) and returns size with scaled width.
// Reshape matrices if necessary (in case of vectors) and returns size with scaled width.
Size getContinuousSize2D(Mat& m1, int widthScale=1);
Size getContinuousSize2D(Mat& m1, Mat& m2, int widthScale=1);
Size getContinuousSize2D(Mat& m1, Mat& m2, Mat& m3, int widthScale=1);
......
......@@ -344,7 +344,7 @@ cv::String findDataFile(const cv::String& relative_path,
#if defined OPENCV_INSTALL_PREFIX && defined OPENCV_DATA_INSTALL_PATH
cv::String install_dir(OPENCV_INSTALL_PREFIX);
// use core/world module path and verify that library is running from installation directory
// It is neccessary to avoid touching of unrelated common /usr/local path
// It is necessary to avoid touching of unrelated common /usr/local path
if (module_path.empty()) // can't determine
module_path = install_dir;
if (isSubDirectory(install_dir, module_path) || isSubDirectory(utils::fs::canonical(install_dir), utils::fs::canonical(module_path)))
......
......@@ -119,7 +119,7 @@ message AttributeProto {
// implementations needed to use has_field hueristics to determine
// which value field was in use. For IR_VERSION 0.0.2 or later, this
// field MUST be set and match the f|i|s|t|... field in use. This
// change was made to accomodate proto3 implementations.
// change was made to accommodate proto3 implementations.
optional AttributeType type = 20; // discriminator that indicates which field below is in use
// Exactly ONE of the following fields must be present for this version of the IR
......
......@@ -120,7 +120,7 @@ public:
\f[(minVal, minVal*step, minVal*{step}^2, \dots, minVal*{logStep}^n),\f]
where \f$n\f$ is the maximal index satisfying
\f[\texttt{minVal} * \texttt{logStep} ^n < \texttt{maxVal}\f]
The grid is logarithmic, so logStep must always be greater then 1. Default value is 1.
The grid is logarithmic, so logStep must always be greater than 1. Default value is 1.
*/
CV_PROP_RW double logStep;
......
......@@ -99,7 +99,7 @@ static void checkParamGrid(const ParamGrid& pg)
if( pg.minVal < DBL_EPSILON )
CV_Error( CV_StsBadArg, "Lower bound of the grid must be positive" );
if( pg.logStep < 1. + FLT_EPSILON )
CV_Error( CV_StsBadArg, "Grid step must greater then 1" );
CV_Error( CV_StsBadArg, "Grid step must greater than 1" );
}
// SVM training parameters
......
......@@ -2171,7 +2171,7 @@ void videoInput::setPhyCon(int id, int conn){
// ----------------------------------------------------------------------
// Check that we are not trying to setup a non-existant device
// Check that we are not trying to setup a non-existent device
// Then start the graph building!
// ----------------------------------------------------------------------
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册