未验证 提交 7545c9d7 编写于 作者: N Nat Jeffries 提交者: GitHub

Fix misc formatting and spelling errors. (#435)

* Fix misc formatting and spelling errors.

* Re-enable layering check.
Co-authored-by: NAdvait Jain <advaitjain@users.noreply.github.com>
上级 6e0c5161
def micro_copts():
return [
"-Wall",
"-Werror",
"-DFLATBUFFERS_LOCALE_INDEPENDENT=0",
]
return [
"-Wall",
"-Werror",
"-DFLATBUFFERS_LOCALE_INDEPENDENT=0",
]
def generate_cc_arrays(name, src, out, visibility = None):
native.genrule(
name = name,
srcs = [
src,
],
outs = [
out,
],
cmd = "$(location //tensorflow/lite/micro/tools:generate_cc_arrays) $@ $<",
tools = ["//tensorflow/lite/micro/tools:generate_cc_arrays"],
visibility=visibility,
)
native.genrule(
name = name,
srcs = [
src,
],
outs = [
out,
],
cmd = "$(location //tensorflow/lite/micro/tools:generate_cc_arrays) $@ $<",
tools = ["//tensorflow/lite/micro/tools:generate_cc_arrays"],
visibility = visibility,
)
......@@ -2,13 +2,12 @@
# TensorFlow Lite for Microcontrollers "hello world" example.
load(
"//tensorflow/lite/micro:build_def.bzl",
"generate_cc_arrays",
"micro_copts",
)
load("//tensorflow/lite/micro:build_def.bzl","generate_cc_arrays")
package(
default_visibility = ["//visibility:public"],
features = ["-layering_check"],
licenses = ["notice"],
)
......
# Description:
# TensorFlow Lite for Microcontrollers "gesture recognition" example.
load("//tensorflow/lite/micro:build_def.bzl","generate_cc_arrays")
package(
default_visibility = ["//visibility:public"],
features = ["-layering_check"],
licenses = ["notice"],
)
load("//tensorflow/lite/micro:build_def.bzl","generate_cc_arrays")
generate_cc_arrays(
name="generated_magic_wand_model_cc",
src="magic_wand.tflite",
......
# Description:
# TensorFlow Lite for Microcontrollers Vision Example.
load("//tensorflow/lite/micro:build_def.bzl","generate_cc_arrays")
package(
default_visibility = ["//visibility:public"],
features = ["-layering_check"],
licenses = ["notice"],
)
load("//tensorflow/lite/micro:build_def.bzl","generate_cc_arrays")
cc_library(
name = "model_settings",
srcs = [
......
......@@ -119,7 +119,7 @@ fed into the model. It reduces each image to the size specified by `--train_imag
- `--train_dir` will contain the trained checkpoints and summaries.
- The `--learning_rate`, `--label_smoothing`, `--learning_rate_decay_factor`,
`--num_epochs_per_decay`, `--moving_average_decay` and `--batch_size` are all
parameters that control how weights are updated during the the training
parameters that control how weights are updated during the training
process. Training deep networks is still a bit of a dark art, so these exact
values we found through experimentation for this particular model. You can try
tweaking them to speed up training or gain a small boost in accuracy, but we
......
......@@ -692,7 +692,7 @@ void UpdateLstmCellInteger(int n_batch, int n_cell, int16_t* cell_state,
// constant inputs, describing projection matrix and bias.
// - output_state_zp: zero point of output_state. (Input, calibrated value.)
// - quantized_proj_clip: if > 0, clip the output of the projection.
// - output_state: output vector, size n_batch*n_output. Must be contigous.
// - output_state: output vector, size n_batch*n_output. Must be contiguous.
// - context: data for optimized MatrixBatchVectorMultiplyAccumulate.
// - scratch0: scratch area of size n_batch*n_cell
// - scratch1: scratch area of size n_batch*n_cell
......@@ -843,7 +843,7 @@ void CalculateLstmGateInteger8x8_8(
// constant inputs, describing projection matrix and bias.
// - output_state_zp: zero point of the output state.
// - quantized_proj_clip: if > 0, clip the output of the projection.
// - output_state: output vector, size n_batch*n_output. Must be contigous.
// - output_state: output vector, size n_batch*n_output. Must be contiguous.
// - scratch: scratch area of size n_batch*n_cell
void CalculateLstmOutputInteger8x8_8(
int n_batch, int n_cell, int n_output, const int16_t* cell_state,
......@@ -1324,7 +1324,7 @@ inline void LstmStepInteger8x8_8(
projection_bias_ptr, output_state_zp, quantized_proj_clip,
output_state_ptr, scratch2);
// Copy output state to the output. Note that unlike float or hybrid, output
// is always contigous.
// is always contiguous.
std::copy_n(output_state_ptr, n_batch * n_output, output_ptr);
}
......
......@@ -587,7 +587,6 @@ TfLiteStatus CheckInputTensorDimensions(TfLiteContext* context,
// 1) If projection weight is not present, then projection bias should not be
// present.
// 2) If projection weight is present, then projection bias is optional.
// TODO(ghodrat): make sure this is correct.
const bool projecton_tensors_consistent =
((projection_weights != nullptr) || (projection_bias == nullptr));
TF_LITE_ENSURE(context, projecton_tensors_consistent == true);
......
# Description:
# TensorFlow Lite for Microcontrollers Vision Example.
load("//tensorflow/lite/micro:build_def.bzl","generate_cc_arrays")
package(
default_visibility = ["//visibility:public"],
features = ["-layering_check"],
licenses = ["notice"],
)
load("//tensorflow/lite/micro:build_def.bzl","generate_cc_arrays")
generate_cc_arrays(
name="generated_person_detect_model_cc",
src="person_detect.tflite",
......
......@@ -36,7 +36,7 @@ readable_run make -f tensorflow/lite/micro/tools/make/Makefile \
build -j$(nproc)
# Since we currently do not have optimized kernel implemetations for vision_p6,
# Since we currently do not have optimized kernel implementations for vision_p6,
# running the tests (in particular person_detection_int8) takes a very long
# time. So, we have changed the default for this script to only perform a build
# and added an option to run all the tests when that is feasible.
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册