提交 0c9235eb 编写于 作者: S Stefan Weil

Fix typos in new LSTM code

All of them were found and fixed by codespell.
Signed-off-by: NStefan Weil <sw@weilnetz.de>
上级 a7d27588
......@@ -209,7 +209,7 @@ class LSTMRecognizer {
// If label_threshold is positive, uses it for making the labels, otherwise
// uses standard ctc. Returned in scale_factor is the reduction factor
// between the image and the output coords, for computing bounding boxes.
// If re_invert is true, the input is inverted back to its orginal
// If re_invert is true, the input is inverted back to its original
// photometric interpretation if inversion is attempted but fails to
// improve the results. This ensures that outputs contains the correct
// forward outputs for the best photometric interpretation.
......@@ -351,7 +351,7 @@ class LSTMRecognizer {
// The unicharset. Only the unicharset element is serialized.
// Has to be a CCUtil, so Dict can point to it.
CCUtil ccutil_;
// For backward compatability, recoder_ is serialized iff
// For backward compatibility, recoder_ is serialized iff
// training_flags_ & TF_COMPRESS_UNICHARSET.
// Further encode/decode ccutil_.unicharset's ids to simplify the unicharset.
UnicharCompress recoder_;
......
......@@ -91,7 +91,7 @@ struct RecodeNode {
bool start_of_word;
// True if this represents a valid candidate end of word position. Does not
// necessarily mark the end of a word, since a word can be extended beyond a
// candidiate end by a continuation, eg 'the' continues to 'these'.
// candidate end by a continuation, eg 'the' continues to 'these'.
bool end_of_word;
// True if this is a duplicate of prev in all respects. Some training modes
// allow the network to output duplicate characters and crush them with CTC,
......
......@@ -142,7 +142,7 @@ bool Series::Backward(bool debug, const NetworkIO& fwd_deltas,
}
// Splits the series after the given index, returning the two parts and
// deletes itself. The first part, upto network with index last_start, goes
// deletes itself. The first part, up to network with index last_start, goes
// into start, and the rest goes into end.
void Series::SplitAt(int last_start, Series** start, Series** end) {
*start = NULL;
......
......@@ -77,7 +77,7 @@ class Series : public Plumbing {
NetworkIO* back_deltas);
// Splits the series after the given index, returning the two parts and
// deletes itself. The first part, upto network with index last_start, goes
// deletes itself. The first part, up to network with index last_start, goes
// into start, and the rest goes into end.
void SplitAt(int last_start, Series** start, Series** end);
......
......@@ -32,7 +32,7 @@ enum LossType {
};
// Simple class to hold the tensor shape that is known at network build time
// and the LossType of the loss funtion.
// and the LossType of the loss function.
class StaticShape {
public:
StaticShape()
......
......@@ -38,7 +38,7 @@ bool StrideMap::Index::IsLast(FlexDimensions dimension) const {
return MaxIndexOfDim(dimension) == indices_[dimension];
}
// Given that the dimensions upto and including dim-1 are valid, returns the
// Given that the dimensions up to and including dim-1 are valid, returns the
// maximum index for dimension dim.
int StrideMap::Index::MaxIndexOfDim(FlexDimensions dim) const {
int max_index = stride_map_->shape_[dim] - 1;
......
......@@ -69,7 +69,7 @@ class StrideMap {
bool IsValid() const;
// Returns true if the index of the given dimension is the last.
bool IsLast(FlexDimensions dimension) const;
// Given that the dimensions upto and including dim-1 are valid, returns the
// Given that the dimensions up to and including dim-1 are valid, returns the
// maximum index for dimension dim.
int MaxIndexOfDim(FlexDimensions dim) const;
// Adds the given offset to the given dimension. Returns true if the result
......
......@@ -163,7 +163,7 @@ const int kDoubleFlag = 128;
// Writes to the given file. Returns false in case of error.
bool WeightMatrix::Serialize(bool training, TFile* fp) const {
// For backward compatability, add kDoubleFlag to mode to indicate the doubles
// For backward compatibility, add kDoubleFlag to mode to indicate the doubles
// format, without errs, so we can detect and read old format weight matrices.
uinT8 mode = (int_mode_ ? kInt8Flag : 0) |
(use_ada_grad_ ? kAdaGradFlag : 0) | kDoubleFlag;
......@@ -202,7 +202,7 @@ bool WeightMatrix::DeSerialize(bool training, bool swap, TFile* fp) {
}
// As DeSerialize, but reads an old (float) format WeightMatrix for
// backward compatability.
// backward compatibility.
bool WeightMatrix::DeSerializeOld(bool training, bool swap, TFile* fp) {
GENERIC_2D_ARRAY<float> float_array;
if (int_mode_) {
......
......@@ -100,7 +100,7 @@ class WeightMatrix {
// If swap is true, assumes a big/little-endian swap is needed.
bool DeSerialize(bool training, bool swap, TFile* fp);
// As DeSerialize, but reads an old (float) format WeightMatrix for
// backward compatability.
// backward compatibility.
bool DeSerializeOld(bool training, bool swap, TFile* fp);
// Computes matrix.vector v = Wu.
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册