diff --git a/HISTORY.rst b/HISTORY.rst index f5f20e666fbffcca3b9df6b761540f9d94ac9ba7..a3bf0ffe30762e39ea547ab7dbe65d3b72d0b90a 100644 --- a/HISTORY.rst +++ b/HISTORY.rst @@ -6,6 +6,7 @@ History * Added --show-distance to cli * Fixed a bug where --tolerance was ignored in cli if testing a single image +* Added benchmark.py to examples 0.2.1 (2017-06-03) diff --git a/examples/benchmark.py b/examples/benchmark.py new file mode 100644 index 0000000000000000000000000000000000000000..4904437428ab6aadbd49f430f32985a0b644a056 --- /dev/null +++ b/examples/benchmark.py @@ -0,0 +1,77 @@ +import timeit + +# Note: This example is only tested with Python 3 (not Python 2) + +# This is a very simple benchmark to give you an idea of how fast each step of face recognition will run on your system. +# Notice that face detection gets very slow at large image sizes. So you might consider running face detection on a +# scaled down version of your image and then running face encodings on the the full size image. + +TEST_IMAGES = [ + "obama-240p.jpg", + "obama-480p.jpg", + "obama-720p.jpg", + "obama-1080p.jpg" +] + + +def run_test(setup, test, iterations_per_test=5, tests_to_run=10): + fastest_execution = min(timeit.Timer(test, setup=setup).repeat(tests_to_run, iterations_per_test)) + execution_time = fastest_execution / iterations_per_test + fps = 1.0 / execution_time + return execution_time, fps + + +setup_locate_faces = """ +import face_recognition + +image = face_recognition.load_image_file("{}") +""" + +test_locate_faces = """ +face_locations = face_recognition.face_locations(image) +""" + +setup_face_landmarks = """ +import face_recognition + +image = face_recognition.load_image_file("{}") +face_locations = face_recognition.face_locations(image) +""" + +test_face_landmarks = """ +landmarks = face_recognition.face_landmarks(image, face_locations=face_locations)[0] +""" + +setup_encode_face = """ +import face_recognition + +image = face_recognition.load_image_file("{}") +face_locations = face_recognition.face_locations(image) +""" + +test_encode_face = """ +encoding = face_recognition.face_encodings(image, known_face_locations=face_locations)[0] +""" + +setup_end_to_end = """ +import face_recognition + +image = face_recognition.load_image_file("{}") +""" + +test_end_to_end = """ +encoding = face_recognition.face_encodings(image)[0] +""" + +print("Benchmarks (Note: All benchmarks are only using a single CPU core)") +print() + +for image in TEST_IMAGES: + size = image.split("-")[1].split(".")[0] + print("Timings at {}:".format(size)) + + print(" - Face locations: {:.4f}s ({:.2f} fps)".format(*run_test(setup_locate_faces.format(image), test_locate_faces))) + print(" - Face landmarks: {:.4f}s ({:.2f} fps)".format(*run_test(setup_face_landmarks.format(image), test_face_landmarks))) + print(" - Encode face (inc. landmarks): {:.4f}s ({:.2f} fps)".format(*run_test(setup_encode_face.format(image), test_encode_face))) + print(" - End-to-end: {:.4f}s ({:.2f} fps)".format(*run_test(setup_end_to_end.format(image), test_end_to_end))) + print() diff --git a/examples/obama-1080p.jpg b/examples/obama-1080p.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a244779067c801d8e744b0fc6a83a82a34574b1f Binary files /dev/null and b/examples/obama-1080p.jpg differ diff --git a/examples/obama-240p.jpg b/examples/obama-240p.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c4a947a52a1d81e41c25b84c214604a233e85fb6 Binary files /dev/null and b/examples/obama-240p.jpg differ diff --git a/examples/obama-480p.jpg b/examples/obama-480p.jpg new file mode 100644 index 0000000000000000000000000000000000000000..78837efc451cea599ea009138d69a57292640059 Binary files /dev/null and b/examples/obama-480p.jpg differ diff --git a/examples/obama-720p.jpg b/examples/obama-720p.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d06cec3e257778cd802e355359fcf653daeb7208 Binary files /dev/null and b/examples/obama-720p.jpg differ