diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..4bcead2998e1d5a3f11a1e2d6ea9f2ea7e6da240 --- /dev/null +++ b/LICENSE @@ -0,0 +1,708 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + ====================================================================================== + Apache MXNET (incubating) Subcomponents: + + The Apache MXNET (incubating) project contains subcomponents with separate copyright + notices and license terms. Your use of the source code for the these + subcomponents is subject to the terms and conditions of the following + licenses. + + ======================================================================================= + Apache-2.0 licenses + ======================================================================================= + + The following components are provided under an Apache 2.0 license. + + 1. MXNet Cpp-package - For details, /cpp-package/LICENSE + Copyright (c) 2015-2016 by Contributors + 2. MXNet rcnn - For details, see, example/rcnn/LICENSE + Copyright (c) 2014, 2015, The Regents of the University of California (Regents) + 3. MXNet scala-package - For details, see, scala-package/LICENSE + Copyright (c) 2014, 2015, the respective contributors + 4. Warp-CTC - For details, see, 3rdparty/ctc_include/LICENSE + Copyright 2015-2016, Baidu USA LLC. + 5. 3rdparty/dlpack - For details, see, 3rdparty/dlpack/LICENSE + Copyright 2017 by Contributors + 6. 3rdparty/dmlc-core - For details, see, 3rdparty/dmlc-core/LICENSE + Copyright (c) 2015 by Contributors + Copyright 2015 by dmlc-core developers + Copyright by Contributors + 7. 3rdparty/mshadow - For details, see, 3rdparty/mshadow/LICENSE + Copyright (c) 2014-2016 by Contributors + Copyright by Contributors + 8. 3rdparty/tvm - For details, see, 3rdparty/tvm/LICENSE + Copyright (c) 2016-2018 by Contributors + Copyright 2018 by Contributors + Copyright (c) 2018 by Xilinx, Contributors + 9. 3rdparty/tvm/dmlc-core - For details, see, 3rdparty/tvm/3rdparty/dmlc-core/LICENSE + Copyright (c) 2015 by Contributors + 10. 3rdparty/tvm/dlpack - For details, see, 3rdparty/tvm/3rdparty/dlpack/LICENSE + Copyright (c) 2015-2017 by Contributors + Copyright by Contributors + 11. 3rdparty/ps-lite - For details, see, 3rdparty/ps-lite/LICENSE + Copyright 2015 Carnegie Mellon University + Copyright 2016, ps-lite developers + Copyright (c) 2015-2016 by Contributors + Copyright by Contributors + 12. 3rdparty/mkldnn - For details, see, 3rdparty/mkldnn/LICENSE + Copyright (c) 2017-2018 Intel Corporation + Copyright 2016-2018 Intel Corporation + Copyright 2018 YANDEX LLC + 13. googlemock scripts/generator - For details, see, 3rdparty/googletest/googlemock/scripts/generator/LICENSE + Copyright [2007-2009] Neal Norwitz + Portions Copyright [2007-2009] Google Inc. + 14. MXNet clojure-package - For details, see, contrib/clojure-package/LICENSE + Copyright 2018 by Contributors + 15. MXNet R-package - For details, see, R-package/LICENSE + Copyright (c) 2015 by Contributors + 16. ONNX-TensorRT benchmark package - For details, see, 3rdparty/onnx-tensorrt/third_party/onnx/third_party/benchmark/LICENSE + Copyright 2015 Google Inc. All rights reserved. + Copyright 2016 Ismael Jimenez Martinez. All rights reserved. + Copyright 2017 Roman Lebedev. All rights reserved. + 17. Dockerfiles - For details, see docker/Dockerfiles/License.md + 18. MXNet Julia Package - For details, see julia/LICENSE.md + Copyright (c) 2015-2018 by Chiyuan Zhang + 19. Benchdnn - For details, see 3rdparty/mkldnn/tests/benchdnn/README.md + Copyright 2017-2018 Intel Corporation + 20. MXNet perl-package - For details, see perl-package/README + 21. MXNet perl-package AI-MXNET - For details, see perl-package/AI-MXNet/README + 22. MXNet perl-package AI-MXNET Gluon Contrib - For details, see perl-package/AI-MXNet-Gluon-Contrib/README + 23. MXNet perl-package AI-MXNET Gluon ModelZoo - For details, see perl-package/AI-MXNet-Gluon-ModelZoo/README + 24. MXNet perl-package AI-MXNETCAPI - For details, see perl-package/AI-MXNetCAPI/README + 25. MXNet perl-package AI-NNVMCAPI - For details, see perl-package/AI-NNVMCAPI/README + 26. Cephes Library Functions - For details, see src/operator/special_functions-inl.h + Copyright (c) 2015 by Contributors + Copyright 1984, 1987, 1992 by Stephen L. Moshier + + + ======================================================================================= + MIT licenses + ======================================================================================= + + 1. Fast R-CNN - For details, see example/rcnn/LICENSE + Copyright (c) Microsoft Corporation + 2. Faster R-CNN - For details, see example/rcnn/LICENSE + Copyright (c) 2015 Microsoft Corporation + 3. tree_lstm - For details, see example/gluon/tree_lstm/LICENSE + Copyright (c) 2017 Riddhiman Dasgupta, Sheng Zha + 4. OpenMP - For details, see 3rdparty/openmp/LICENSE.txt + Copyright (c) 1997-2016 Intel Corporation + 6. HalideIR - For details, see 3rdparty/tvm/3rdparty/HalideIR/LICENSE + Copyright (c) 2016 HalideIR contributors + Copyright (c) 2012-2014 MIT CSAIL, Google Inc., and other contributors + Copyright (c) 2016-2018 by Contributors + 7. ONNX-TensorRT - For details, see 3rdparty/onnx-tensorrt/LICENSE + Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. + Copyright (c) 2018 Open Neural Network Exchange + 8. ONNX-TensorRT - For details, see 3rdparty/onnx-tensorrt/third_party/onnx/LICENSE + Copyright (c) Facebook, Inc. and Microsoft Corporation. + 9. clipboard.js - Refer to https://zenorocha.github.io/clipboard.js + Licensed MIT © Zeno Rocha + 10. clipboard.min.js - Refer to https://zenorocha.github.io/clipboard.js + Licensed MIT © Zeno Rocha + + + ======================================================================================= + 3-clause BSD licenses + ======================================================================================= + + 1. Xbyak - For details, see 3rdparty/mkldnn/src/cpu/xbyak/COPYRIGHT + Copyright (c) 2007 MITSUNARI Shigeo + Copyright 2016-2018 Intel Corporation + 2. gtest - For details, see, 3rdparty/mkldnn/tests/gtests/gtest/LICENSE + Copyright 2005-2008, Google Inc. + 3. Moderngpu - For details, see, 3rdparty/ctc_include/contrib/moderngpu/LICENSE + Copyright (c) 2013, NVIDIA CORPORATION. All rights reserved. + 4. CUB Library - For details, see, 3rdparty/cub/LICENSE.TXT + Copyright (c) 2010-2011, Duane Merrill. All rights reserved. + Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. + 5. CUB mersenne.h - For details, see 3rdparty/cub/test/mersenne.h + Copyright (C) 1997 - 2002, Makoto Matsumoto and Takuji Nishimura, + 6. Googlemock - For details, see, 3rdparty/googletest/googlemock/LICENSE + Copyright 2006-2015, Google Inc. + 7. Googletest - For details, see, 3rdparty/googletest/googletest/LICENSE + Copyright 2005-2015, Google Inc. + 8. OpenMP Testsuite - For details, see, 3rdparty/openmp/testsuite/LICENSE + Copyright (c) 2011, 2012 University of Houston System + + + ======================================================================================= + 2-clause BSD licenses + ======================================================================================= + + 1. Sphinx JavaScript utilties for the full-text search - For details, see, docs/_static/searchtools_custom.js + Copyright (c) 2007-2017 by the Sphinx team + 2. blockingconcurrentqueue.h - For details, see, 3rdparty/dmlc-core/include/dmlc/blockingconcurrentqueue.h + ©2015-2016 Cameron Desrochers + 3. concurrentqueue.h - For details, see, 3rdparty/dmlc-core/include/dmlc/concurrentqueue.h + Copyright (c) 2013-2016, Cameron Desrochers. + 4. MSCOCO Toolbox - For details, see, example/ssd/dataset/pycocotools/coco.py + Code written by Piotr Dollar and Tsung-Yi Lin, 2014. + 5. PyBind11 FindEigen3.cmake - For details, see 3rdparty/onnx-tensorrt/third_party/onnx/third_party/pybind11/tools/FindEigen3.cmake + Copyright (c) 2006, 2007 Montel Laurent, + Copyright (c) 2008, 2009 Gael Guennebaud, + Copyright (c) 2009 Benoit Jacob + 6. PyBind11 FindPythonLibsNew.cmake - For details, see 3rdparty/onnx-tensorrt/third_party/onnx/third_party/pybind11/tools/FindPythonLibsNew.cmake + Copyright 2001-2009 Kitware, Inc. + Copyright 2012 Continuum Analytics, Inc. + + + + ======================================================================================= + Other Licenses + ======================================================================================= + + 1. Caffe - For details, see, example/rcnn/LICENSE + Copyright (c) 2014, 2015, The Regents of the University of California (Regents) + Copyright (c) 2014, 2015, the respective contributors + 2. pool.h - For details, see, src/operator/nn/pool.h + Copyright (c) 2014-2017 The Regents of the University of California (Regents) + Copyright (c) 2014-2017, the respective contributors + 3. pool.cuh - For details, see, src/operator/nn/pool.cuh + Copyright (c) 2014-2017 The Regents of the University of California (Regents) + Copyright (c) 2014-2017, the respective contributors + 4. im2col.h - For details, see, src/operator/nn/im2col.h + Copyright (c) 2014-2017 The Regents of the University of California (Regents) + Copyright (c) 2014-2017, the respective contributors + 5. im2col.cuh - For details, see, src/operator/nn/im2col.cuh + Copyright (c) 2014-2017 The Regents of the University of California (Regents) + Copyright (c) 2014-2017, the respective contributors + + 6. deformable_im2col.h - For details, see, src/operator/contrib/nn/deformable_im2col.h + Copyright (c) 2014-2017 The Regents of the University of California (Regents) + Copyright (c) 2014-2017, the respective contributors + + 7. deformable_im2col.cuh - For details, see, src/operator/contrib/nn/deformable_im2col.cuh + Copyright (c) 2014-2017 The Regents of the University of California (Regents) + Copyright (c) 2014-2017, the respective contributors + + + COPYRIGHT + + Caffe uses a shared copyright model: each contributor holds copyright over + their contributions to Caffe. The project versioning records all such + contribution and copyright details. If a contributor wants to further mark + their specific copyright on a particular contribution, they should indicate + their copyright solely in the commit message of the change when it is + committed. + + LICENSE + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + CONTRIBUTION AGREEMENT + + By contributing to the BVLC/caffe repository through pull-request, comment, + or otherwise, the contributor releases their content to the + license and copyright terms herein. + + ======================================================================================= + + 8. MS COCO API + For details, see, example/rcnn/LICENSE + Copyright (c) 2014, Piotr Dollar and Tsung-Yi Lin + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + The views and conclusions contained in the software and documentation are those + of the authors and should not be interpreted as representing official policies, + either expressed or implied, of the FreeBSD Project. + + ======================================================================================= + + 9. Semaphore implementation in blockingconcurrentqueue.h + This file uses a semaphore implementation under the terms of its separate zlib license. + For details, see, 3rdparty/dmlc-core/include/dmlc/blockingconcurrentqueue.h + Copyright Jeff Preshing + + ======================================================================================= + + 10. ONNX Export module + For details, see, python/mxnet/contrib/onnx/mx2onnx/LICENSE + + # Licensed to the Apache Software Foundation (ASF) under one + # or more contributor license agreements. See the NOTICE file + # distributed with this work for additional information + # regarding copyright ownership. The ASF licenses this file + # to you under the Apache License, Version 2.0 (the + # "License"); you may not use this file except in compliance + # with the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, + # software distributed under the License is distributed on an + # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + # KIND, either express or implied. See the License for the + # specific language governing permissions and limitations + # under the License. + # + # Based on + # https://github.com/NVIDIA/mxnet_to_onnx/blob/master/mx2onnx_converter/# + # Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + # + # Redistribution and use in source and binary forms, with or without + # modification, are permitted provided that the following conditions + # are met: + # * Redistributions of source code must retain the above copyright + # notice, this list of conditions and the following disclaimer. + # * Redistributions in binary form must reproduce the above copyright + # notice, this list of conditions and the following disclaimer in the + # documentation and/or other materials provided with the distribution. + # * Neither the name of NVIDIA CORPORATION nor the names of its + # contributors may be used to endorse or promote products derived + # from this software without specific prior written permission. + # + # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY + # EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR + # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + # OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + ======================================================================================= + + 11. ONNX python bindings + For details, see, 3rdparty/onnx-tensorrt/third_party/onnx/third_party/pybind11/LICENSE + Copyright (c) 2015-2017 Wenzel Jakob , All rights reserved. + Copyright (c) 2016 Trent Houliston and Wenzel Jakob + Copyright (c) 2016-2017 Jason Rhinelander + Copyright (c) 2016 Klemens Morgenstern and Wenzel Jakob + Copyright (c) 2017 Henry F. Schreiner + Copyright (c) 2016 Sergey Lyskov and Wenzel Jakob + Copyright (c) 2016 Ben North + Copyright (c) 2016 Klemens D. Morgenstern + Copyright (c) 2016 Pim Schellart + Copyright (c) 2016 Ivan Smirnov + Copyright (c) 2016 Sergey Lyskov + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the copyright holder nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + You are under no obligation whatsoever to provide any bug fixes, patches, or + upgrades to the features, functionality or performance of the source code + ("Enhancements") to anyone; however, if you choose to make your Enhancements + available either publicly, or directly to the author of this software, without + imposing a separate written license agreement for such Enhancements, then you + hereby grant the following license: a non-exclusive, royalty-free perpetual + license to install, use, modify, prepare derivative works, incorporate into + other computer software, distribute, and sublicense such enhancements or + derivative works thereof, in binary and source code form. + + ======================================================================================= + + 12. Clang + For details, see, 3rdparty/onnx-tensorrt/third_party/onnx/third_party/pybind11/tools/clang/LICENSE.TXT + + LLVM Release License + University of Illinois/NCSA + Open Source License + + Copyright (c) 2007-2012 University of Illinois at Urbana-Champaign. + All rights reserved. + + Developed by: + + LLVM Team + + University of Illinois at Urbana-Champaign + + http://llvm.org + + Permission is hereby granted, free of charge, to any person obtaining a copy of + this software and associated documentation files (the "Software"), to deal with + the Software without restriction, including without limitation the rights to + use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies + of the Software, and to permit persons to whom the Software is furnished to do + so, subject to the following conditions: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimers. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimers in the + documentation and/or other materials provided with the distribution. + + * Neither the names of the LLVM Team, University of Illinois at + Urbana-Champaign, nor the names of its contributors may be used to + endorse or promote products derived from this Software without specific + prior written permission. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE + SOFTWARE. + + The LLVM software contains code written by third parties. Such software will + have its own individual LICENSE.TXT file in the directory in which it appears. + This file will describe the copyrights, license, and restrictions which apply + to that code. + + The disclaimer of warranty in the University of Illinois Open Source License + applies to all code in the LLVM Distribution, and nothing in any of the + other licenses gives permission to use the names of the LLVM Team or the + University of Illinois to endorse or promote products derived from this + Software. + + The following pieces of software have additional or alternate copyrights, + licenses, and/or restrictions: + + Program Directory + ------- --------- + + + ======================================================================================= + + 13. MKL BLAS + For details, see, [Intel® Simplified license](https://software.intel.com/en-us/license/intel-simplified-software-license) and MKLDNN_README.md + + Copyright (c) 2018 Intel Corporation. + + Use and Redistribution. You may use and redistribute the software (the “Software”), without modification, provided the following conditions are met: + + * Redistributions must reproduce the above copyright notice and the following terms of use in the Software and in the documentation and/or other materials provided with the distribution. + + * Neither the name of Intel nor the names of its suppliers may be used to endorse or promote products derived from this Software without specific prior written permission. + + * No reverse engineering, decompilation, or disassembly of this Software is permitted. + + Limited patent license. Intel grants you a world-wide, royalty-free, non-exclusive license under patents it now or hereafter owns or controls to make, have made, use, import, offer to sell and sell (“Utilize”) this Software, but solely to the extent that any such patent is necessary to Utilize the Software alone. The patent license shall not apply to any combinations which include this software. No hardware per se is licensed hereunder. + + Third party and other Intel programs. “Third Party Programs” are the files listed in the “third-party-programs.txt” text file that is included with the Software and may include Intel programs under separate license terms. Third Party Programs, even if included with the distribution of the Materials, are governed by separate license terms and those license terms solely govern your use of those programs. + + DISCLAIMER. THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT ARE DISCLAIMED. THIS SOFTWARE IS NOT INTENDED FOR USE IN SYSTEMS OR APPLICATIONS WHERE FAILURE OF THE SOFTWARE MAY CAUSE PERSONAL INJURY OR DEATH AND YOU AGREE THAT YOU ARE FULLY RESPONSIBLE FOR ANY CLAIMS, COSTS, DAMAGES, EXPENSES, AND ATTORNEYS’ FEES ARISING OUT OF ANY SUCH USE, EVEN IF ANY CLAIM ALLEGES THAT INTEL WAS NEGLIGENT REGARDING THE DESIGN OR MANUFACTURE OF THE MATERIALS. + + LIMITATION OF LIABILITY. IN NO EVENT WILL INTEL BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. YOU AGREE TO INDEMNIFY AND HOLD INTEL HARMLESS AGAINST ANY CLAIMS AND EXPENSES RESULTING FROM YOUR USE OR UNAUTHORIZED USE OF THE SOFTWARE. + + No support. Intel may make changes to the Software, at any time without notice, and is not obligated to support, update or provide training for the Software. + + Termination. Intel may terminate your right to use the Software in the event of your breach of this Agreement and you fail to cure the breach within a reasonable period of time. + + Feedback. Should you provide Intel with comments, modifications, corrections, enhancements or other input (“Feedback”) related to the Software Intel will be free to use, disclose, reproduce, license or otherwise distribute or exploit the Feedback in its sole discretion without any obligations or restrictions of any kind, including without limitation, intellectual property rights or licensing obligations. + + Compliance with laws. You agree to comply with all relevant laws and regulations governing your use, transfer, import or export (or prohibition thereof) of the Software. + + Governing law. All disputes will be governed by the laws of the United States of America and the State of Delaware without reference to conflict of law principles and subject to the exclusive jurisdiction of the state or federal courts sitting in the State of Delaware, and each party agrees that it submits to the personal jurisdiction and venue of those courts and waives any objections. The United Nations Convention on Contracts for the International Sale of Goods (1980) is specifically excluded and will not apply to the Software. + + *Other names and brands may be claimed as the property of others. + + ======================================================================================= + + 14. FindJeMalloc.cmake + For details, see, cmake/Modules/FindJeMalloc.cmake + + Licensed to the Apache Software Foundation (ASF) under one + or more contributor license agreements. See the NOTICE file + distributed with this work for additional information + regarding copyright ownership. The ASF licenses this file + to you under the Apache License, Version 2.0 (the + "License"); you may not use this file except in compliance + with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, + software distributed under the License is distributed on an + "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + KIND, either express or implied. See the License for the + specific language governing permissions and limitations + under the License. + + + Copyright (c) 2014 Thomas Heller + Copyright (c) 2007-2012 Hartmut Kaiser + Copyright (c) 2010-2011 Matt Anderson + Copyright (c) 2011 Bryce Lelbach + + Distributed under the Boost Software License, Version 1.0. + Boost Software License - Version 1.0 - August 17th, 2003 + + Permission is hereby granted, free of charge, to any person or organization + obtaining a copy of the software and accompanying documentation covered by + this license (the "Software") to use, reproduce, display, distribute, + execute, and transmit the Software, and to prepare derivative works of the + Software, and to permit third-parties to whom the Software is furnished to + do so, all subject to the following: + + The copyright notices in the Software and this entire statement, including + the above license grant, this restriction and the following disclaimer, + must be included in all copies of the Software, in whole or in part, and + all derivative works of the Software, unless such copies or derivative + works are solely in the form of machine-executable object code generated by + a source language processor. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT + SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE + FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, + ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. diff --git a/cnocr/__init__.py b/cnocr/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/cnocr/data_utils/__init__.py b/cnocr/data_utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/cnocr/data_utils/captcha_generator.py b/cnocr/data_utils/captcha_generator.py new file mode 100644 index 0000000000000000000000000000000000000000..5d495280b7c2166357a5db4a3aa24202e7530dae --- /dev/null +++ b/cnocr/data_utils/captcha_generator.py @@ -0,0 +1,214 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +""" Helper classes for multiprocess captcha image generation + +This module also provides script for saving captcha images to file using CLI. +""" + +from __future__ import print_function +import random + +from captcha.image import ImageCaptcha +import cv2 +from .multiproc_data import MPData +import numpy as np + + +class CaptchaGen(object): + """ + Generates a captcha image + """ + def __init__(self, h, w, font_paths): + """ + Parameters + ---------- + h: int + Height of the generated images + w: int + Width of the generated images + font_paths: list of str + List of all fonts in ttf format + """ + self.captcha = ImageCaptcha(fonts=font_paths) + self.h = h + self.w = w + + def image(self, captcha_str): + """ + Generate a greyscale captcha image representing number string + + Parameters + ---------- + captcha_str: str + string a characters for captcha image + + Returns + ------- + numpy.ndarray + Generated greyscale image in np.ndarray float type with values normalized to [0, 1] + """ + img = self.captcha.generate(captcha_str) + img = np.fromstring(img.getvalue(), dtype='uint8') + img = cv2.imdecode(img, cv2.IMREAD_GRAYSCALE) + img = cv2.resize(img, (self.h, self.w)) + img = img.transpose(1, 0) + img = np.multiply(img, 1 / 255.0) + return img + + +class DigitCaptcha(object): + """ + Provides shape() and get() interface for digit-captcha image generation + """ + def __init__(self, font_paths, h, w, num_digit_min, num_digit_max): + """ + Parameters + ---------- + font_paths: list of str + List of path to ttf font files + h: int + height of the generated image + w: int + width of the generated image + num_digit_min: int + minimum number of digits in generated captcha image + num_digit_max: int + maximum number of digits in generated captcha image + """ + self.num_digit_min = num_digit_min + self.num_digit_max = num_digit_max + self.captcha = CaptchaGen(h=h, w=w, font_paths=font_paths) + + @property + def shape(self): + """ + Returns shape of the image data generated + + Returns + ------- + tuple(int, int) + """ + return self.captcha.h, self.captcha.w + + def get(self): + """ + Get an image from the queue + + Returns + ------- + np.ndarray + A captcha image, normalized to [0, 1] + """ + return self._gen_sample() + + @staticmethod + def get_rand(num_digit_min, num_digit_max): + """ + Generates a character string of digits. Number of digits are + between self.num_digit_min and self.num_digit_max + Returns + ------- + str + """ + buf = "" + max_len = random.randint(num_digit_min, num_digit_max) + for i in range(max_len): + buf += str(random.randint(0, 9)) + return buf + + def _gen_sample(self): + """ + Generate a random captcha image sample + Returns + ------- + (numpy.ndarray, str) + Tuple of image (numpy ndarray) and character string of digits used to generate the image + """ + num_str = self.get_rand(self.num_digit_min, self.num_digit_max) + return self.captcha.image(num_str), num_str + + +class MPDigitCaptcha(DigitCaptcha): + """ + Handles multi-process captcha image generation + """ + def __init__(self, font_paths, h, w, num_digit_min, num_digit_max, num_processes, max_queue_size): + """ + + Parameters + ---------- + font_paths: list of str + List of path to ttf font files + h: int + height of the generated image + w: int + width of the generated image + num_digit_min: int + minimum number of digits in generated captcha image + num_digit_max: int + maximum number of digits in generated captcha image + num_processes: int + Number of processes to spawn + max_queue_size: int + Maximum images in queue before processes wait + """ + super(MPDigitCaptcha, self).__init__(font_paths, h, w, num_digit_min, num_digit_max) + self.mp_data = MPData(num_processes, max_queue_size, self._gen_sample) + + def start(self): + """ + Starts the processes + """ + self.mp_data.start() + + def get(self): + """ + Get an image from the queue + + Returns + ------- + np.ndarray + A captcha image, normalized to [0, 1] + """ + return self.mp_data.get() + + def reset(self): + """ + Resets the generator by stopping all processes + """ + self.mp_data.reset() + + +if __name__ == '__main__': + import argparse + + def main(): + parser = argparse.ArgumentParser() + parser.add_argument("font_path", help="Path to ttf font file") + parser.add_argument("output", help="Output filename including extension (e.g. 'sample.jpg')") + parser.add_argument("--num", help="Up to 4 digit number [Default: random]") + args = parser.parse_args() + + captcha = ImageCaptcha(fonts=[args.font_path]) + captcha_str = args.num if args.num else DigitCaptcha.get_rand(3, 4) + img = captcha.generate(captcha_str) + img = np.fromstring(img.getvalue(), dtype='uint8') + img = cv2.imdecode(img, cv2.IMREAD_GRAYSCALE) + cv2.imwrite(args.output, img) + print("Captcha image with digits {} written to {}".format([int(c) for c in captcha_str], args.output)) + + main() diff --git a/cnocr/data_utils/data_iter.py b/cnocr/data_utils/data_iter.py new file mode 100644 index 0000000000000000000000000000000000000000..1cb9646f4ea51a37f3e7e9c1b51161dd1f325d55 --- /dev/null +++ b/cnocr/data_utils/data_iter.py @@ -0,0 +1,294 @@ +from __future__ import print_function + +import os +from PIL import Image +import numpy as np +import mxnet as mx +import random + +from .multiproc_data import MPData + + +class SimpleBatch(object): + def __init__(self, data_names, data, label_names=list(), label=list()): + self._data = data + self._label = label + self._data_names = data_names + self._label_names = label_names + + self.pad = 0 + self.index = None # TODO: what is index? + + @property + def data(self): + return self._data + + @property + def label(self): + return self._label + + @property + def data_names(self): + return self._data_names + + @property + def label_names(self): + return self._label_names + + @property + def provide_data(self): + return [(n, x.shape) for n, x in zip(self._data_names, self._data)] + + @property + def provide_label(self): + return [(n, x.shape) for n, x in zip(self._label_names, self._label)] + + +# class ImageIter(mx.io.DataIter): +# +# """ +# Iterator class for generating captcha image data +# """ +# def __init__(self, data_root, data_list, batch_size, data_shape, num_label, name=None): +# """ +# Parameters +# ---------- +# data_root: str +# root directory of images +# data_list: str +# a .txt file stores the image name and corresponding labels for each line +# batch_size: int +# name: str +# """ +# super(ImageIter, self).__init__() +# self.batch_size = batch_size +# self.data_shape = data_shape +# self.num_label = num_label +# +# self.data_root = data_root +# self.dataset_lst_file = open(data_list) +# +# self.provide_data = [('data', (batch_size, 1, data_shape[1], data_shape[0]))] +# self.provide_label = [('label', (self.batch_size, self.num_label))] +# self.name = name +# +# def __iter__(self): +# data = [] +# label = [] +# cnt = 0 +# for m_line in self.dataset_lst_file: +# img_lst = m_line.strip().split(' ') +# img_path = os.path.join(self.data_root, img_lst[0]) +# +# cnt += 1 +# img = Image.open(img_path).resize(self.data_shape, Image.BILINEAR).convert('L') +# img = np.array(img).reshape((1, self.data_shape[1], self.data_shape[0])) +# data.append(img) +# +# ret = np.zeros(self.num_label, int) +# for idx in range(1, len(img_lst)): +# ret[idx-1] = int(img_lst[idx]) +# +# label.append(ret) +# if cnt % self.batch_size == 0: +# data_all = [mx.nd.array(data)] +# label_all = [mx.nd.array(label)] +# data_names = ['data'] +# label_names = ['label'] +# data.clear() +# label.clear() +# yield SimpleBatch(data_names, data_all, label_names, label_all) +# continue +# +# +# def reset(self): +# if self.dataset_lst_file.seekable(): +# self.dataset_lst_file.seek(0) + +class ImageIterLstm(mx.io.DataIter): + + """ + Iterator class for generating captcha image data + """ + + def __init__(self, data_root, data_list, batch_size, data_shape, num_label, lstm_init_states, name=None): + """ + Parameters + ---------- + data_root: str + root directory of images + data_list: str + a .txt file stores the image name and corresponding labels for each line + batch_size: int + name: str + """ + super(ImageIterLstm, self).__init__() + self.batch_size = batch_size + self.data_shape = data_shape + self.num_label = num_label + + self.init_states = lstm_init_states + self.init_state_arrays = [mx.nd.zeros(x[1]) for x in lstm_init_states] + + self.data_root = data_root + self.dataset_lines = open(data_list).readlines() + + self.provide_data = [('data', (batch_size, 1, data_shape[1], data_shape[0]))] + lstm_init_states + self.provide_label = [('label', (self.batch_size, self.num_label))] + self.name = name + + def __iter__(self): + init_state_names = [x[0] for x in self.init_states] + data = [] + label = [] + cnt = 0 + for m_line in self.dataset_lines: + img_lst = m_line.strip().split(' ') + img_path = os.path.join(self.data_root, img_lst[0]) + + cnt += 1 + img = Image.open(img_path).resize(self.data_shape, Image.BILINEAR).convert('L') + img = np.array(img).reshape((1, self.data_shape[1], self.data_shape[0])) # res: [1, height, width] + data.append(img) + + ret = np.zeros(self.num_label, int) + for idx in range(1, len(img_lst)): + ret[idx - 1] = int(img_lst[idx]) + + label.append(ret) + if cnt % self.batch_size == 0: + data_all = [mx.nd.array(data)] + self.init_state_arrays + label_all = [mx.nd.array(label)] + data_names = ['data'] + init_state_names + label_names = ['label'] + data = [] + label = [] + yield SimpleBatch(data_names, data_all, label_names, label_all) + continue + + def reset(self): + # if self.dataset_lst_file.seekable(): + # self.dataset_lst_file.seek(0) + random.shuffle(self.dataset_lines) + + +class MPOcrImages(object): + """ + Handles multi-process Chinese OCR image generation + """ + def __init__(self, data_root, data_list, data_shape, num_label, num_processes, max_queue_size): + """ + + Parameters + ---------- + data_shape: [width, height] + num_processes: int + Number of processes to spawn + max_queue_size: int + Maximum images in queue before processes wait + """ + self.data_shape = data_shape + self.num_label = num_label + + self.data_root = data_root + self.dataset_lines = open(data_list).readlines() + + self.mp_data = MPData(num_processes, max_queue_size, self._gen_sample) + + def _gen_sample(self): + m_line = random.choice(self.dataset_lines) + img_lst = m_line.strip().split(' ') + img_path = os.path.join(self.data_root, img_lst[0]) + + img = Image.open(img_path).resize(self.data_shape, Image.BILINEAR).convert('L') + img = np.array(img) + # print(img.shape) + img = np.transpose(img, (1, 0)) # res: [1, width, height] + # if len(img.shape) == 2: + # img = np.expand_dims(np.transpose(img, (1, 0)), axis=0) # res: [1, width, height] + + labels = np.zeros(self.num_label, int) + for idx in range(1, len(img_lst)): + labels[idx - 1] = int(img_lst[idx]) + + return img, labels + + @property + def size(self): + return len(self.dataset_lines) + + @property + def shape(self): + return self.data_shape + + def start(self): + """ + Starts the processes + """ + self.mp_data.start() + + def get(self): + """ + Get an image from the queue + + Returns + ------- + np.ndarray + A captcha image, normalized to [0, 1] + """ + return self.mp_data.get() + + def reset(self): + """ + Resets the generator by stopping all processes + """ + self.mp_data.reset() + + +class OCRIter(mx.io.DataIter): + """ + Iterator class for generating captcha image data + """ + def __init__(self, count, batch_size, lstm_init_states, captcha, num_label, name): + """ + Parameters + ---------- + count: int + Number of batches to produce for one epoch + batch_size: int + lstm_init_states: list of tuple(str, tuple) + A list of tuples with [0] name and [1] shape of each LSTM init state + captcha MPCaptcha + Captcha image generator. Can be MPCaptcha or any other class providing .shape and .get() interface + name: str + """ + super(OCRIter, self).__init__() + self.batch_size = batch_size + self.count = count if count > 0 else captcha.size // batch_size + self.init_states = lstm_init_states + self.init_state_arrays = [mx.nd.zeros(x[1]) for x in lstm_init_states] + data_shape = captcha.shape + self.provide_data = [('data', (batch_size, 1, data_shape[1], data_shape[0]))] + lstm_init_states + self.provide_label = [('label', (self.batch_size, num_label))] + self.mp_captcha = captcha + self.name = name + + def __iter__(self): + init_state_names = [x[0] for x in self.init_states] + for k in range(self.count): + data = [] + label = [] + for i in range(self.batch_size): + img, labels = self.mp_captcha.get() + # print(img.shape) + img = np.expand_dims(np.transpose(img, (1, 0)), axis=0) # size: [1, height, width] + # import pdb; pdb.set_trace() + data.append(img) + label.append(labels) + data_all = [mx.nd.array(data)] + self.init_state_arrays + label_all = [mx.nd.array(label)] + data_names = ['data'] + init_state_names + label_names = ['label'] + + data_batch = SimpleBatch(data_names, data_all, label_names, label_all) + yield data_batch diff --git a/cnocr/data_utils/multiproc_data.py b/cnocr/data_utils/multiproc_data.py new file mode 100644 index 0000000000000000000000000000000000000000..62d81608ddffb9cd10164f5527e5ef5c3fa9d1c4 --- /dev/null +++ b/cnocr/data_utils/multiproc_data.py @@ -0,0 +1,143 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from __future__ import print_function +from ctypes import c_bool +import multiprocessing as mp +try: + from queue import Full as QFullExcept + from queue import Empty as QEmptyExcept +except ImportError as error: + raise error + +# import numpy as np + + +class MPData(object): + """ + Handles multi-process data generation. + + Operation: + - call start() to start the data generation + - call get() (blocking) to read one sample + - call reset() to stop data generation + """ + def __init__(self, num_processes, max_queue_size, fn): + """ + + Parameters + ---------- + num_processes: int + Number of processes to spawn + max_queue_size: int + Maximum samples in the queue before processes wait + fn: function + function that generates samples, executed on separate processes. + """ + self.queue = mp.Queue(maxsize=int(max_queue_size)) + self.alive = mp.Value(c_bool, False, lock=False) + self.num_proc = num_processes + self.proc = list() + self.fn = fn + + def start(self): + """ + Starts the processes + Parameters + ---------- + fn: function + + """ + """ + Starts the processes + """ + self._init_proc() + + @staticmethod + def _proc_loop(proc_id, alive, queue, fn): + """ + Thread loop for generating data + + Parameters + ---------- + proc_id: int + Process id + alive: multiprocessing.Value + variable for signaling whether process should continue or not + queue: multiprocessing.Queue + queue for passing data back + fn: function + function object that returns a sample to be pushed into the queue + """ + print("proc {} started".format(proc_id)) + try: + while alive.value: + data = fn() + put_success = False + while alive.value and not put_success: + try: + queue.put(data, timeout=0.5) + put_success = True + except QFullExcept: + # print("Queue Full") + pass + except KeyboardInterrupt: + print("W: interrupt received, stopping process {} ...".format(proc_id)) + print("Closing process {}".format(proc_id)) + queue.close() + + def _init_proc(self): + """ + Start processes if not already started + """ + if not self.proc: + self.proc = [ + mp.Process(target=self._proc_loop, args=(i, self.alive, self.queue, self.fn)) + for i in range(self.num_proc) + ] + self.alive.value = True + for p in self.proc: + p.start() + + def get(self): + """ + Get a datum from the queue + + Returns + ------- + np.ndarray + A captcha image, normalized to [0, 1] + """ + self._init_proc() + return self.queue.get() + + def reset(self): + """ + Resets the generator by stopping all processes + """ + self.alive.value = False + qsize = 0 + try: + while True: + self.queue.get(timeout=0.1) + qsize += 1 + except QEmptyExcept: + pass + print("Queue size on reset: {}".format(qsize)) + for i, p in enumerate(self.proc): + p.join() + self.proc.clear() diff --git a/cnocr/fit/__init__.py b/cnocr/fit/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/cnocr/fit/ctc_loss.py b/cnocr/fit/ctc_loss.py new file mode 100644 index 0000000000000000000000000000000000000000..ff5161dfa44c74f33334514aa367ca0ca2be44e8 --- /dev/null +++ b/cnocr/fit/ctc_loss.py @@ -0,0 +1,33 @@ +import mxnet as mx + +def _add_warp_ctc_loss(pred, seq_len, num_label, label): + """ Adds Symbol.contrib.ctc_loss on top of pred symbol and returns the resulting symbol """ + label = mx.sym.Reshape(data=label, shape=(-1,)) + label = mx.sym.Cast(data=label, dtype='int32') + return mx.sym.WarpCTC(data=pred, label=label, label_length=num_label, input_length=seq_len) + + +def _add_mxnet_ctc_loss(pred, seq_len, label): + """ Adds Symbol.WapCTC on top of pred symbol and returns the resulting symbol """ + pred_ctc = mx.sym.Reshape(data=pred, shape=(-4, seq_len, -1, 0)) + + loss = mx.sym.contrib.ctc_loss(data=pred_ctc, label=label) + ctc_loss = mx.sym.MakeLoss(loss) + + softmax_class = mx.symbol.SoftmaxActivation(data=pred) + softmax_loss = mx.sym.MakeLoss(softmax_class) + softmax_loss = mx.sym.BlockGrad(softmax_loss) + return mx.sym.Group([softmax_loss, ctc_loss]) + + +def add_ctc_loss(pred, seq_len, num_label, loss_type): + """ Adds CTC loss on top of pred symbol and returns the resulting symbol """ + label = mx.sym.Variable('label') + if loss_type == 'warpctc': + print("Using WarpCTC Loss") + sm = _add_warp_ctc_loss(pred, seq_len, num_label, label) + else: + print("Using MXNet CTC Loss") + assert loss_type == 'ctc' + sm = _add_mxnet_ctc_loss(pred, seq_len, label) + return sm \ No newline at end of file diff --git a/cnocr/fit/ctc_metrics.py b/cnocr/fit/ctc_metrics.py new file mode 100644 index 0000000000000000000000000000000000000000..0db680af18d708133400ee2443992c5c5d4447b7 --- /dev/null +++ b/cnocr/fit/ctc_metrics.py @@ -0,0 +1,114 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +"""Contains a class for calculating CTC eval metrics""" + +from __future__ import print_function + +import numpy as np + + +class CtcMetrics(object): + def __init__(self, seq_len): + self.seq_len = seq_len + + @staticmethod + def ctc_label(p): + """ + Iterates through p, identifying non-zero and non-repeating values, and returns them in a list + Parameters + ---------- + p: list of int + + Returns + ------- + list of int + """ + ret = [] + p1 = [0] + p + for i, _ in enumerate(p): + c1 = p1[i] + c2 = p1[i+1] + if c2 == 0 or c2 == c1: + continue + ret.append(c2) + return ret + + @staticmethod + def _remove_blank(l): + """ Removes trailing zeros in the list of integers and returns a new list of integers""" + ret = [] + for i, _ in enumerate(l): + if l[i] == 0: + break + ret.append(l[i]) + return ret + + @staticmethod + def _lcs(p, l): + """ Calculates the Longest Common Subsequence between p and l (both list of int) and returns its length""" + # Dynamic Programming Finding LCS + if len(p) == 0: + return 0 + P = np.array(list(p)).reshape((1, len(p))) + L = np.array(list(l)).reshape((len(l), 1)) + M = np.int32(P == L) + for i in range(M.shape[0]): + for j in range(M.shape[1]): + up = 0 if i == 0 else M[i-1, j] + left = 0 if j == 0 else M[i, j-1] + M[i, j] = max(up, left, M[i, j] if (i == 0 or j == 0) else M[i, j] + M[i-1, j-1]) + return M.max() + + def accuracy(self, label, pred): + """ Simple accuracy measure: number of 100% accurate predictions divided by total number """ + hit = 0. + total = 0. + batch_size = label.shape[0] + for i in range(batch_size): + l = self._remove_blank(label[i]) + p = [] + for k in range(self.seq_len): + p.append(np.argmax(pred[k * batch_size + i])) + p = self.ctc_label(p) + if len(p) == len(l): + match = True + for k, _ in enumerate(p): + if p[k] != int(l[k]): + match = False + break + if match: + hit += 1.0 + total += 1.0 + assert total == batch_size + return hit / total + + def accuracy_lcs(self, label, pred): + """ Longest Common Subsequence accuracy measure: calculate accuracy of each prediction as LCS/length""" + hit = 0. + total = 0. + batch_size = label.shape[0] + for i in range(batch_size): + l = self._remove_blank(label[i]) + p = [] + for k in range(self.seq_len): + p.append(np.argmax(pred[k * batch_size + i])) + p = self.ctc_label(p) + hit += self._lcs(p, l) * 1.0 / len(l) + total += 1.0 + assert total == batch_size + return hit / total + diff --git a/cnocr/fit/fit.py b/cnocr/fit/fit.py new file mode 100644 index 0000000000000000000000000000000000000000..bd0cab3b886ad799e3dccab3b6feda5481970a63 --- /dev/null +++ b/cnocr/fit/fit.py @@ -0,0 +1,53 @@ +import logging +import os +import mxnet as mx + + +def _load_model(args, rank=0): + if 'load_epoch' not in args or args.load_epoch is None: + return (None, None, None) + assert args.prefix is not None + model_prefix = args.prefix + if rank > 0 and os.path.exists("%s-%d-symbol.json" % (model_prefix, rank)): + model_prefix += "-%d" % (rank) + sym, arg_params, aux_params = mx.model.load_checkpoint( + model_prefix, args.load_epoch) + logging.info('Loaded model %s_%04d.params', model_prefix, args.load_epoch) + return (sym, arg_params, aux_params) + + +def fit(network, data_train, data_val, metrics, args, hp, data_names=None): + if args.gpu: + contexts = [mx.context.gpu(i) for i in range(args.gpu)] + else: + contexts = [mx.context.cpu(i) for i in range(args.cpu)] + + sym, arg_params, aux_params = _load_model(args) + if sym is not None: + assert sym.tojson() == network.tojson() + if not os.path.exists(os.path.dirname(args.prefix)): + os.makedirs(os.path.dirname(args.prefix)) + + module = mx.mod.Module( + symbol = network, + data_names= ["data"] if data_names is None else data_names, + label_names=['label'], + context=contexts) + + module.fit(train_data=data_train, + eval_data=data_val, + begin_epoch=args.load_epoch if args.load_epoch else 0, + num_epoch=hp.num_epoch, + # use metrics.accuracy or metrics.accuracy_lcs + eval_metric=mx.metric.np(metrics.accuracy, allow_extra_outputs=True), + optimizer='AdaDelta', + optimizer_params={'learning_rate': hp.learning_rate, + # 'momentum': hp.momentum, + 'wd': 0.00001, + }, + initializer=mx.init.Xavier(factor_type="in", magnitude=2.34), + arg_params=arg_params, + aux_params=aux_params, + batch_end_callback=mx.callback.Speedometer(hp.batch_size, 50), + epoch_end_callback=mx.callback.do_checkpoint(args.prefix), + ) \ No newline at end of file diff --git a/cnocr/fit/lstm.py b/cnocr/fit/lstm.py new file mode 100644 index 0000000000000000000000000000000000000000..7a89f69799dc11508be52adfa6230852eae83928 --- /dev/null +++ b/cnocr/fit/lstm.py @@ -0,0 +1,106 @@ +from __future__ import print_function + +from collections import namedtuple +import mxnet as mx + +LSTMState = namedtuple("LSTMState", ["c", "h"]) +LSTMParam = namedtuple("LSTMParam", ["i2h_weight", "i2h_bias", + "h2h_weight", "h2h_bias"]) +LSTMModel = namedtuple("LSTMModel", ["rnn_exec", "symbol", + "init_states", "last_states", "forward_state", "backward_state", + "seq_data", "seq_labels", "seq_outputs", + "param_blocks"]) + + +def init_states(batch_size, num_lstm_layer, num_hidden): + """ + Returns name and shape of init states of LSTM network + + Parameters + ---------- + batch_size: list of tuple of str and tuple of int and int + num_lstm_layer: int + num_hidden: int + + Returns + ------- + list of tuple of str and tuple of int and int + """ + init_c = [('l%d_init_c' % l, (batch_size, num_hidden)) for l in range(num_lstm_layer * 2)] + init_h = [('l%d_init_h' % l, (batch_size, num_hidden)) for l in range(num_lstm_layer * 2)] + return init_c + init_h + + +def _lstm(num_hidden, indata, prev_state, param, seqidx, layeridx): + """LSTM Cell symbol""" + i2h = mx.sym.FullyConnected(data=indata, + weight=param.i2h_weight, + bias=param.i2h_bias, + num_hidden=num_hidden * 4, + name="t%d_l%d_i2h" % (seqidx, layeridx)) + h2h = mx.sym.FullyConnected(data=prev_state.h, + weight=param.h2h_weight, + bias=param.h2h_bias, + num_hidden=num_hidden * 4, + name="t%d_l%d_h2h" % (seqidx, layeridx)) + gates = i2h + h2h + slice_gates = mx.sym.split(gates, num_outputs=4, + name="t%d_l%d_slice" % (seqidx, layeridx)) + in_gate = mx.sym.Activation(slice_gates[0], act_type="sigmoid") + in_transform = mx.sym.Activation(slice_gates[1], act_type="tanh") + forget_gate = mx.sym.Activation(slice_gates[2], act_type="sigmoid") + out_gate = mx.sym.Activation(slice_gates[3], act_type="sigmoid") + next_c = (forget_gate * prev_state.c) + (in_gate * in_transform) + next_h = out_gate * mx.sym.Activation(next_c, act_type="tanh") + return LSTMState(c=next_c, h=next_h) + +def lstm(net, num_lstm_layer, num_hidden, seq_length): + last_states = [] + forward_param = [] + backward_param = [] + + # seq_length = mx.sym.Variable("seq_length") + for i in range(num_lstm_layer * 2): + last_states.append(LSTMState(c=mx.sym.Variable("l%d_init_c" % i), h=mx.sym.Variable("l%d_init_h" % i))) + if i % 2 == 0: + forward_param.append(LSTMParam(i2h_weight=mx.sym.Variable("l%d_i2h_weight" % i), + i2h_bias=mx.sym.Variable("l%d_i2h_bias" % i), + h2h_weight=mx.sym.Variable("l%d_h2h_weight" % i), + h2h_bias=mx.sym.Variable("l%d_h2h_bias" % i))) + else: + backward_param.append(LSTMParam(i2h_weight=mx.sym.Variable("l%d_i2h_weight" % i), + i2h_bias=mx.sym.Variable("l%d_i2h_bias" % i), + h2h_weight=mx.sym.Variable("l%d_h2h_weight" % i), + h2h_bias=mx.sym.Variable("l%d_h2h_bias" % i))) + + slices_net = mx.sym.split(data=net, axis=3, num_outputs=seq_length, squeeze_axis=1) # bz x features x 1 x time_step + # slices_net = mx.sym.slice_axis(data=net, axis=3, begin=0, end=None) # bz x features x 1 x time_step + # seq_length = len(slices_net) + + forward_hidden = [] + for seqidx in range(seq_length): + hidden = mx.sym.flatten(data=slices_net[seqidx]) + for i in range(num_lstm_layer): + next_state = _lstm(num_hidden, indata=hidden, prev_state=last_states[2 * i], + param=forward_param[i], seqidx=seqidx, layeridx=i) + hidden = next_state.h + last_states[2 * i] = next_state + forward_hidden.append(hidden) + + backward_hidden = [] + for seqidx in range(seq_length): + k = seq_length - seqidx - 1 + hidden = mx.sym.flatten(data=slices_net[k]) + for i in range(num_lstm_layer): + next_state = _lstm(num_hidden, indata=hidden, prev_state=last_states[2 * i + 1], + param=backward_param[i], seqidx=k, layeridx=i) + hidden = next_state.h + last_states[2 * i + 1] = next_state + backward_hidden.insert(0, hidden) + + hidden_all = [] + for i in range(seq_length): + hidden_all.append(mx.sym.concat(*[forward_hidden[i], backward_hidden[i]], dim=1)) + + hidden_concat = mx.sym.concat(*hidden_all, dim=0) + return hidden_concat diff --git a/cnocr/hyperparams/__init__.py b/cnocr/hyperparams/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/cnocr/hyperparams/cn_hyperparams.py b/cnocr/hyperparams/cn_hyperparams.py new file mode 100644 index 0000000000000000000000000000000000000000..87885afbf4196cc3fea17aba00d79120b363e256 --- /dev/null +++ b/cnocr/hyperparams/cn_hyperparams.py @@ -0,0 +1,115 @@ +from __future__ import print_function + + +class CnHyperparams(object): + """ + Hyperparameters for LSTM network + """ + def __init__(self): + # Training hyper parameters + self._train_epoch_size = 2560000 + self._eval_epoch_size = 3000 + self._num_epoch = 20 + self._learning_rate = 0.001 + self._momentum = 0.9 + self._bn_mom = 0.9 + self._workspace = 512 + self._loss_type = "ctc" # ["warpctc" "ctc"] + + self._batch_size = 128 + self._num_classes = 6425 # 应该是6426的。。 5990 + self._img_width = 280 + self._img_height = 32 + + # DenseNet hyper parameters + self._depth = 161 + self._growrate = 32 + self._reduction = 0.5 + + # LSTM hyper parameters + self._num_hidden = 100 + self._num_lstm_layer = 2 + # self._seq_length = 35 + self._seq_length = self._img_width // 8 + self._num_label = 10 + self._drop_out = 0.5 + + @property + def train_epoch_size(self): + return self._train_epoch_size + + @property + def eval_epoch_size(self): + return self._eval_epoch_size + + @property + def num_epoch(self): + return self._num_epoch + + @property + def learning_rate(self): + return self._learning_rate + + @property + def momentum(self): + return self._momentum + + @property + def bn_mom(self): + return self._bn_mom + + @property + def workspace(self): + return self._workspace + + @property + def loss_type(self): + return self._loss_type + + @property + def batch_size(self): + return self._batch_size + + @property + def num_classes(self): + return self._num_classes + + @property + def img_width(self): + return self._img_width + + @property + def img_height(self): + return self._img_height + + @property + def depth(self): + return self._depth + + @property + def growrate(self): + return self._growrate + + @property + def reduction(self): + return self._reduction + + @property + def num_hidden(self): + return self._num_hidden + + @property + def num_lstm_layer(self): + return self._num_lstm_layer + + @property + def seq_length(self): + return self._seq_length + + @property + def num_label(self): + return self._num_label + + @property + def dropout(self): + return self._drop_out diff --git a/cnocr/hyperparams/hyperparams2.py b/cnocr/hyperparams/hyperparams2.py new file mode 100644 index 0000000000000000000000000000000000000000..e2f528c01fe0f2249c8ddae32dbd8d2cb28cbb9e --- /dev/null +++ b/cnocr/hyperparams/hyperparams2.py @@ -0,0 +1,114 @@ +from __future__ import print_function + + +class Hyperparams(object): + """ + Hyperparameters for LSTM network + """ + def __init__(self): + # Training hyper parameters + self._train_epoch_size = 30000 + self._eval_epoch_size = 3000 + self._num_epoch = 20 + self._learning_rate = 0.001 + self._momentum = 0.9 + self._bn_mom = 0.9 + self._workspace = 512 + self._loss_type = "ctc" # ["warpctc" "ctc"] + + self._batch_size = 128 + self._num_classes = 11 + self._img_width = 100 + self._img_height = 32 + + # DenseNet hyper parameters + self._depth = 161 + self._growrate = 32 + self._reduction = 0.5 + + # LSTM hyper parameters + self._num_hidden = 100 + self._num_lstm_layer = 2 + self._seq_length = self._img_width // 8 + self._num_label = 4 + self._drop_out = 0.5 + + @property + def train_epoch_size(self): + return self._train_epoch_size + + @property + def eval_epoch_size(self): + return self._eval_epoch_size + + @property + def num_epoch(self): + return self._num_epoch + + @property + def learning_rate(self): + return self._learning_rate + + @property + def momentum(self): + return self._momentum + + @property + def bn_mom(self): + return self._bn_mom + + @property + def workspace(self): + return self._workspace + + @property + def loss_type(self): + return self._loss_type + + @property + def batch_size(self): + return self._batch_size + + @property + def num_classes(self): + return self._num_classes + + @property + def img_width(self): + return self._img_width + + @property + def img_height(self): + return self._img_height + + @property + def depth(self): + return self._depth + + @property + def growrate(self): + return self._growrate + + @property + def reduction(self): + return self._reduction + + @property + def num_hidden(self): + return self._num_hidden + + @property + def num_lstm_layer(self): + return self._num_lstm_layer + + @property + def seq_length(self): + return self._seq_length + + @property + def num_label(self): + return self._num_label + + @property + def dropout(self): + return self._drop_out diff --git a/cnocr/symbols/__init__.py b/cnocr/symbols/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/cnocr/symbols/__pycache__/crnn_no_lstm.cpython-36.pyc b/cnocr/symbols/__pycache__/crnn_no_lstm.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..98400aa6d2fc25b2c6a167f8a62017742877a9cf Binary files /dev/null and b/cnocr/symbols/__pycache__/crnn_no_lstm.cpython-36.pyc differ diff --git a/cnocr/symbols/__pycache__/ctc_loss.cpython-36.pyc b/cnocr/symbols/__pycache__/ctc_loss.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1af88ee9592cd5f920cf6bbaec9ddfc3c405faab Binary files /dev/null and b/cnocr/symbols/__pycache__/ctc_loss.cpython-36.pyc differ diff --git a/cnocr/symbols/__pycache__/ctc_metrics.cpython-36.pyc b/cnocr/symbols/__pycache__/ctc_metrics.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f2b088fadb1b813d993d049ffc5c31a7241bf42a Binary files /dev/null and b/cnocr/symbols/__pycache__/ctc_metrics.cpython-36.pyc differ diff --git a/cnocr/symbols/crnn.py b/cnocr/symbols/crnn.py new file mode 100644 index 0000000000000000000000000000000000000000..ae46620a5eb8c0d3babdc96f7d212fb10b25bb9f --- /dev/null +++ b/cnocr/symbols/crnn.py @@ -0,0 +1,161 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +""" +LeCun, Yann, Leon Bottou, Yoshua Bengio, and Patrick Haffner. +Gradient-based learning applied to document recognition. +Proceedings of the IEEE (1998) +""" +import mxnet as mx +from ..fit.ctc_loss import add_ctc_loss +from ..fit.lstm import lstm + +def crnn_no_lstm(hp): + + # input + data = mx.sym.Variable('data') + label = mx.sym.Variable('label') + + kernel_size = [(3, 3), (3, 3), (3, 3), (3, 3), (3, 3), (3, 3)] + padding_size = [(1, 1), (1, 1), (1, 1), (1, 1), (1, 1), (1, 1)] + layer_size = [min(32*2**(i+1), 512) for i in range(len(kernel_size))] + + def convRelu(i, input_data, bn=True): + layer = mx.symbol.Convolution(name='conv-%d' % i, data=input_data, kernel=kernel_size[i], pad=padding_size[i], + num_filter=layer_size[i]) + if bn: + layer = mx.sym.BatchNorm(data=layer, name='batchnorm-%d' % i) + layer = mx.sym.LeakyReLU(data=layer,name='leakyrelu-%d' % i) + return layer + + net = convRelu(0, data) # bz x f x 32 x 200 + max = mx.sym.Pooling(data=net, name='pool-0_m', pool_type='max', kernel=(2, 2), stride=(2, 2)) + avg = mx.sym.Pooling(data=net, name='pool-0_a', pool_type='avg', kernel=(2, 2), stride=(2, 2)) + net = max - avg # 16 x 100 + net = convRelu(1, net) + net = mx.sym.Pooling(data=net, name='pool-1', pool_type='max', kernel=(2, 2), stride=(2, 2)) # bz x f x 8 x 50 + net = convRelu(2, net, True) + net = convRelu(3, net) + net = mx.sym.Pooling(data=net, name='pool-2', pool_type='max', kernel=(2, 2), stride=(2, 2)) # bz x f x 4 x 25 + net = convRelu(4, net, True) + net = convRelu(5, net) + net = mx.symbol.Pooling(data=net, kernel=(4, 1), pool_type='avg', name='pool1') # bz x f x 1 x 25 + + if hp.dropout > 0: + net = mx.symbol.Dropout(data=net, p=hp.dropout) + + net = mx.sym.transpose(data=net, axes=[1,0,2,3]) # f x bz x 1 x 25 + net = mx.sym.flatten(data=net) # f x (bz x 25) + hidden_concat = mx.sym.transpose(data=net, axes=[1,0]) # (bz x 25) x f + + # mx.sym.transpose(net, []) + pred = mx.sym.FullyConnected(data=hidden_concat, num_hidden=hp.num_classes) # (bz x 25) x num_classes + + if hp.loss_type: + # Training mode, add loss + return add_ctc_loss(pred, hp.seq_length, hp.num_label, hp.loss_type) + else: + # Inference mode, add softmax + return mx.sym.softmax(data=pred, name='softmax') + + +def crnn_lstm(hp): + + # input + data = mx.sym.Variable('data') + label = mx.sym.Variable('label') + # data = mx.sym.Variable('data', shape=(128, 1, 32, 100)) + # label = mx.sym.Variable('label', shape=(128, 4)) + + kernel_size = [(3, 3), (3, 3), (3, 3), (3, 3), (3, 3), (3, 3)] + padding_size = [(1, 1), (1, 1), (1, 1), (1, 1), (1, 1), (1, 1)] + layer_size = [min(32*2**(i+1), 512) for i in range(len(kernel_size))] + + def convRelu(i, input_data, bn=True): + layer = mx.symbol.Convolution(name='conv-%d' % i, data=input_data, kernel=kernel_size[i], pad=padding_size[i], + num_filter=layer_size[i]) + if bn: + layer = mx.sym.BatchNorm(data=layer, name='batchnorm-%d' % i) + layer = mx.sym.LeakyReLU(data=layer,name='leakyrelu-%d' % i) + layer = mx.symbol.Convolution(name='conv-%d-1x1' % i, data=layer, kernel=(1, 1), pad=(0, 0), + num_filter=layer_size[i]) + if bn: + layer = mx.sym.BatchNorm(data=layer, name='batchnorm-%d-1x1' % i) + layer = mx.sym.LeakyReLU(data=layer, name='leakyrelu-%d-1x1' % i) + return layer + + net = convRelu(0, data) # bz x f x 32 x 280 + # print('0', net.infer_shape()[1]) + max = mx.sym.Pooling(data=net, name='pool-0_m', pool_type='max', kernel=(2, 2), stride=(2, 2)) + avg = mx.sym.Pooling(data=net, name='pool-0_a', pool_type='avg', kernel=(2, 2), stride=(2, 2)) + net = convRelu(1, net) + net = max - avg # 8 x 70 + # print('2', net.infer_shape()[1]) + net = mx.sym.Pooling(data=net, name='pool-1', pool_type='max', kernel=(2, 2), stride=(2, 2)) # res: bz x f x 8 x 70 + # print('3', net.infer_shape()[1]) + net = convRelu(2, net, True) + net = convRelu(3, net) + net = mx.sym.Pooling(data=net, name='pool-2', pool_type='max', kernel=(2, 2), stride=(2, 2)) # res: bz x f x 4 x 35 + # print('4', net.infer_shape()[1]) + net = convRelu(4, net, True) + net = convRelu(5, net) + net = mx.symbol.Pooling(data=net, kernel=(4, 1), pool_type='avg', name='pool1') # res: bz x f x 1 x 35 + # print('5', net.infer_shape()[1]) + + if hp.dropout > 0: + net = mx.symbol.Dropout(data=net, p=hp.dropout) + + hidden_concat = lstm(net, num_lstm_layer=hp.num_lstm_layer, num_hidden=hp.num_hidden, seq_length=hp.seq_length) + # import pdb; pdb.set_trace() + + # mx.sym.transpose(net, []) + pred = mx.sym.FullyConnected(data=hidden_concat, num_hidden=hp.num_classes, name='pred_fc') # (bz x 25) x num_classes + + if hp.loss_type: + # Training mode, add loss + return add_ctc_loss(pred, hp.seq_length, hp.num_label, hp.loss_type) + else: + # Inference mode, add softmax + return mx.sym.softmax(data=pred, name='softmax') + + +from ..hyperparams.cn_hyperparams import CnHyperparams as Hyperparams + +if __name__ == '__main__': + hp = Hyperparams() + + init_states = {} + init_states['data'] = (hp.batch_size, 1, hp.img_height, hp.img_width) + init_states['label'] = (hp.batch_size, hp.num_label) + + # init_c = {('l%d_init_c' % l): (hp.batch_size, hp.num_hidden) for l in range(hp.num_lstm_layer*2)} + # init_h = {('l%d_init_h' % l): (hp.batch_size, hp.num_hidden) for l in range(hp.num_lstm_layer*2)} + # + # for item in init_c: + # init_states[item] = init_c[item] + # for item in init_h: + # init_states[item] = init_h[item] + + symbol = crnn_no_lstm(hp) + interals = symbol.get_internals() + _, out_shapes, _ = interals.infer_shape(**init_states) + shape_dict = dict(zip(interals.list_outputs(), out_shapes)) + + for item in shape_dict: + print(item,shape_dict[item]) + + diff --git a/examples/00010965.jpg b/examples/00010965.jpg new file mode 100644 index 0000000000000000000000000000000000000000..39553dd667e0ec44428075d7acaf5f678cbd9dca Binary files /dev/null and b/examples/00010965.jpg differ diff --git a/examples/00010994.jpg b/examples/00010994.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b03089740ae0606a74e2bb4a682a419534dcfc62 Binary files /dev/null and b/examples/00010994.jpg differ diff --git a/examples/20457890_2399557098.jpg b/examples/20457890_2399557098.jpg new file mode 100644 index 0000000000000000000000000000000000000000..585cd509456c539c1ea155458f1c054f0550b231 Binary files /dev/null and b/examples/20457890_2399557098.jpg differ diff --git a/examples/rand_cn1.png b/examples/rand_cn1.png new file mode 100644 index 0000000000000000000000000000000000000000..69e61e3d6ef2458c3d291280cfa6e5cffd06d00d Binary files /dev/null and b/examples/rand_cn1.png differ diff --git a/examples/rand_cn2.png b/examples/rand_cn2.png new file mode 100644 index 0000000000000000000000000000000000000000..35e62062f423d6587392bafe295d6a69a3473c8c Binary files /dev/null and b/examples/rand_cn2.png differ diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..7d6de99e3b437290dd4e52553d9dfa270ea0d0a2 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,6 @@ +#click==6.7 +numpy==1.14.0 +pillow==5.3.0 +mxnet==1.3.1 +gluoncv==0.3.0 +#opencv-python==3.4.4.19 diff --git a/scripts/infer_ocr.py b/scripts/infer_ocr.py new file mode 100644 index 0000000000000000000000000000000000000000..93046fb924ecb46efc65f661273bc598e5f317b8 --- /dev/null +++ b/scripts/infer_ocr.py @@ -0,0 +1,149 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +""" An example of predicting CAPTCHA image data with a LSTM network pre-trained with a CTC loss""" + +from __future__ import print_function + +import argparse + +from cnocr.fit.ctc_metrics import CtcMetrics +# from PIL import Image +from cnocr.hyperparams.cn_hyperparams import CnHyperparams as Hyperparams +from cnocr.hyperparams.hyperparams2 import Hyperparams as Hyperparams2 +from cnocr.fit.lstm import init_states +import mxnet as mx +import numpy as np +from cnocr.data_utils.data_iter import SimpleBatch +from cnocr.symbols.crnn import crnn_lstm + + +def read_captcha_img(path, hp): + """ Reads image specified by path into numpy.ndarray""" + import cv2 + tgt_h, tgt_w = hp.img_height, hp.img_width + img = cv2.resize(cv2.imread(path, 0), (tgt_h, tgt_w)).astype(np.float32) / 255 + img = np.expand_dims(img.transpose(1, 0), 0) # res: [1, height, width] + return img + + +def read_ocr_img(path, hp): + # img = Image.open(path).resize((hp.img_width, hp.img_height), Image.BILINEAR) + # img = img.convert('L') + # img = np.expand_dims(np.array(img), 0) + # return img + img = mx.image.imread(path, 0) + scale = hp.img_height / img.shape[0] + new_width = int(scale * img.shape[1]) + hp._seq_length = new_width // 8 + img = mx.image.imresize(img, new_width, hp.img_height).asnumpy() + img = np.squeeze(img, axis=2) + # import pdb; pdb.set_trace() + return np.expand_dims(img, 0) + + # img2 = mx.image.imread(path) + # img2 = mx.image.imresize(img2, hp.img_width, hp.img_height) + # img2 = cv2.cvtColor(img2.asnumpy(), cv2.COLOR_RGB2GRAY) + # img2 = np.expand_dims(np.array(img2), 0) + # return img2 + + +def lstm_init_states(batch_size, hp): + """ Returns a tuple of names and zero arrays for LSTM init states""" + init_shapes = init_states(batch_size=batch_size, num_lstm_layer=hp.num_lstm_layer, num_hidden=hp.num_hidden) + init_names = [s[0] for s in init_shapes] + init_arrays = [mx.nd.zeros(x[1]) for x in init_shapes] + # init_names.append('seq_length') + # init_arrays.append(hp.seq_length) + return init_names, init_arrays + + +def load_module(prefix, epoch, data_names, data_shapes, network=None): + """ + Loads the model from checkpoint specified by prefix and epoch, binds it + to an executor, and sets its parameters and returns a mx.mod.Module + """ + sym, arg_params, aux_params = mx.model.load_checkpoint(prefix, epoch) + if network is not None: + sym = network + + # We don't need CTC loss for prediction, just a simple softmax will suffice. + # We get the output of the layer just before the loss layer ('pred_fc') and add softmax on top + pred_fc = sym.get_internals()['pred_fc_output'] + sym = mx.sym.softmax(data=pred_fc) + + mod = mx.mod.Module(symbol=sym, context=mx.cpu(), data_names=data_names, label_names=None) + mod.bind(for_training=False, data_shapes=data_shapes) + mod.set_params(arg_params, aux_params, allow_missing=False) + return mod + + +def read_charset(charset_fp): + alphabet = [] + # 第0个元素是预留id,在CTC中用来分割字符。它不对应有意义的字符 + with open(charset_fp) as fp: + for line in fp: + alphabet.append(line.rstrip('\n')) + print('Alphabet size: %d' % len(alphabet)) + inv_alph_dict = {_char: idx for idx, _char in enumerate(alphabet)} + inv_alph_dict[' '] = inv_alph_dict[''] # 对应空格 + return alphabet, inv_alph_dict + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument("--dataset", help="use which kind of dataset, captcha or cn_ocr", + choices=['captcha', 'cn_ocr'], type=str, default='cn_ocr') + parser.add_argument("--file", help="Path to the CAPTCHA image file") + parser.add_argument("--prefix", help="Checkpoint prefix [Default 'ocr']", default='./models/model') + parser.add_argument("--epoch", help="Checkpoint epoch [Default 100]", type=int, default=100) + parser.add_argument('--charset_file', type=str, help='存储了每个字对应哪个id的关系.') + args = parser.parse_args() + if args.dataset == 'cn_ocr': + hp = Hyperparams() + img = read_ocr_img(args.file, hp) + else: + hp = Hyperparams2() + img = read_captcha_img(args.file, hp) + + init_state_names, init_state_arrays = lstm_init_states(batch_size=1, hp=hp) + # import pdb; pdb.set_trace() + + sample = SimpleBatch( + data_names=['data'] + init_state_names, + data=[mx.nd.array([img])] + init_state_arrays) + + network = crnn_lstm(hp) + mod = load_module(args.prefix, args.epoch, sample.data_names, sample.provide_data, network=network) + + mod.forward(sample) + prob = mod.get_outputs()[0].asnumpy() + + prediction = CtcMetrics.ctc_label(np.argmax(prob, axis=-1).tolist()) + + if args.charset_file: + alphabet, _ = read_charset(args.charset_file) + res = [alphabet[p] for p in prediction] + print("Predicted Chars:", res) + else: + # Predictions are 1 to 10 for digits 0 to 9 respectively (prediction 0 means no-digit) + prediction = [p - 1 for p in prediction] + print("Digits:", prediction) + return + + +if __name__ == '__main__': + main() diff --git a/scripts/run_crnn.sh b/scripts/run_crnn.sh new file mode 100644 index 0000000000000000000000000000000000000000..b8b63f822e6aed2d0d1efb073e8cf5276ede8bf5 --- /dev/null +++ b/scripts/run_crnn.sh @@ -0,0 +1,7 @@ +#!/usr/bin/env bash +# -*- coding: utf-8 -*- + +cd `dirname $0` + +# 训练中文ocr模型crnn +python train_ocr.py --cpu 2 --num_proc 4 --loss ctc --dataset cn_ocr diff --git a/scripts/train_ocr.py b/scripts/train_ocr.py new file mode 100644 index 0000000000000000000000000000000000000000..0566c57d034c79a17d77f27101dae4e3e25d3711 --- /dev/null +++ b/scripts/train_ocr.py @@ -0,0 +1,152 @@ +# coding: utf-8 +from __future__ import print_function + +import argparse +import logging +import os +import mxnet as mx +from cnocr.data_utils.captcha_generator import MPDigitCaptcha + +from cnocr.hyperparams.cn_hyperparams import CnHyperparams as Hyperparams +from cnocr.hyperparams.hyperparams2 import Hyperparams as Hyperparams2 +from cnocr.data_utils.data_iter import ImageIterLstm, MPOcrImages, OCRIter +from cnocr.symbols.crnn import crnn_no_lstm, crnn_lstm +from cnocr.fit.ctc_metrics import CtcMetrics +from cnocr.fit.fit import fit + +def parse_args(): + # Parse command line arguments + parser = argparse.ArgumentParser() + + parser.add_argument("--dataset", + help="use which kind of dataset, captcha or cn_ocr", + choices=['captcha', 'cn_ocr'], + type=str, default='captcha') + parser.add_argument("--data_root", help="Path to image files", type=str, + default='/Users/king/Documents/WhatIHaveDone/Test/text_renderer/output/wechat_simulator') + parser.add_argument("--train_file", help="Path to train txt file", type=str, + default='/Users/king/Documents/WhatIHaveDone/Test/text_renderer/output/wechat_simulator/train.txt') + parser.add_argument("--test_file", help="Path to test txt file", type=str, + default='/Users/king/Documents/WhatIHaveDone/Test/text_renderer/output/wechat_simulator/test.txt') + parser.add_argument("--cpu", + help="Number of CPUs for training [Default 8]. Ignored if --gpu is specified.", + type=int, default=2) + parser.add_argument("--gpu", help="Number of GPUs for training [Default 0]", type=int) + parser.add_argument('--load_epoch', type=int, + help='load the model on an epoch using the model-load-prefix') + parser.add_argument("--prefix", help="Checkpoint prefix [Default 'ocr']", default='./models/model') + parser.add_argument("--loss", help="'ctc' or 'warpctc' loss [Default 'ctc']", default='ctc') + parser.add_argument("--num_proc", help="Number CAPTCHA generating processes [Default 4]", type=int, default=4) + parser.add_argument("--font_path", help="Path to ttf font file or directory containing ttf files") + return parser.parse_args() + + +def get_fonts(path): + fonts = list() + if os.path.isdir(path): + for filename in os.listdir(path): + if filename.endswith('.ttf') or filename.endswith('.ttc'): + fonts.append(os.path.join(path, filename)) + else: + fonts.append(path) + return fonts + + +def run_captcha(args): + hp = Hyperparams2() + + network = crnn_lstm(hp) + # arg_shape, out_shape, aux_shape = network.infer_shape(data=(128, 1, 32, 100), label=(128, 10), + # l0_init_h=(128, 100), l1_init_h=(128, 100), l2_init_h=(128, 100), l3_init_h=(128, 100)) + # print(dict(zip(network.list_arguments(), arg_shape))) + # import pdb; pdb.set_trace() + + # Start a multiprocessor captcha image generator + mp_captcha = MPDigitCaptcha( + font_paths=get_fonts(args.font_path), h=hp.img_width, w=hp.img_height, + num_digit_min=3, num_digit_max=4, num_processes=args.num_proc, max_queue_size=hp.batch_size * 2) + mp_captcha.start() + # img, num = mp_captcha.get() + # print(img.shape) + # import numpy as np + # import cv2 + # img = np.transpose(img, (1, 0)) + # cv2.imwrite('captcha1.png', img * 255) + # import pdb; pdb.set_trace() + + init_c = [('l%d_init_c' % l, (hp.batch_size, hp.num_hidden)) for l in range(hp.num_lstm_layer * 2)] + init_h = [('l%d_init_h' % l, (hp.batch_size, hp.num_hidden)) for l in range(hp.num_lstm_layer * 2)] + init_states = init_c + init_h + data_names = ['data'] + [x[0] for x in init_states] + + data_train = OCRIter( + hp.train_epoch_size // hp.batch_size, hp.batch_size, init_states, captcha=mp_captcha, num_label=hp.num_label, + name='train') + data_val = OCRIter( + hp.eval_epoch_size // hp.batch_size, hp.batch_size, init_states, captcha=mp_captcha, num_label=hp.num_label, + name='val') + + head = '%(asctime)-15s %(message)s' + logging.basicConfig(level=logging.DEBUG, format=head) + + metrics = CtcMetrics(hp.seq_length) + + fit(network=network, data_train=data_train, data_val=data_val, metrics=metrics, args=args, hp=hp, data_names=data_names) + + mp_captcha.reset() + + +def run_cn_ocr(args): + hp = Hyperparams() + + network = crnn_lstm(hp) + + mp_data_train = MPOcrImages(args.data_root, args.train_file, (hp.img_width, hp.img_height), hp.num_label, + num_processes=args.num_proc, max_queue_size=hp.batch_size * 2) + # img, num = mp_data_train.get() + # print(img.shape) + # print(mp_data_train.shape) + # import pdb; pdb.set_trace() + # import numpy as np + # import cv2 + # img = np.transpose(img, (1, 0)) + # cv2.imwrite('captcha1.png', img * 255) + # import pdb; pdb.set_trace() + mp_data_test = MPOcrImages(args.data_root, args.test_file, (hp.img_width, hp.img_height), hp.num_label, + num_processes=max(args.num_proc // 2, 1), max_queue_size=hp.batch_size * 2) + mp_data_train.start() + mp_data_test.start() + + init_c = [('l%d_init_c' % l, (hp.batch_size, hp.num_hidden)) for l in range(hp.num_lstm_layer * 2)] + init_h = [('l%d_init_h' % l, (hp.batch_size, hp.num_hidden)) for l in range(hp.num_lstm_layer * 2)] + init_states = init_c + init_h + data_names = ['data'] + [x[0] for x in init_states] + + data_train = OCRIter( + hp.train_epoch_size // hp.batch_size, hp.batch_size, init_states, captcha=mp_data_train, num_label=hp.num_label, + name='train') + data_val = OCRIter( + hp.eval_epoch_size // hp.batch_size, hp.batch_size, init_states, captcha=mp_data_test, num_label=hp.num_label, + name='val') + # data_train = ImageIterLstm( + # args.data_root, args.train_file, hp.batch_size, (hp.img_width, hp.img_height), hp.num_label, init_states, name="train") + # data_val = ImageIterLstm( + # args.data_root, args.test_file, hp.batch_size, (hp.img_width, hp.img_height), hp.num_label, init_states, name="val") + + head = '%(asctime)-15s %(message)s' + logging.basicConfig(level=logging.DEBUG, format=head) + + metrics = CtcMetrics(hp.seq_length) + + fit(network=network, data_train=data_train, data_val=data_val, metrics=metrics, args=args, hp=hp, data_names=data_names) + + mp_data_train.reset() + mp_data_test.start() + + +if __name__ == '__main__': + args = parse_args() + if args.dataset == 'captcha': + run_captcha(args) + else: + run_cn_ocr(args) diff --git a/setup.py b/setup.py new file mode 100644 index 0000000000000000000000000000000000000000..115c3f2c1f4a20f4663e7ae43e05a417236084f8 --- /dev/null +++ b/setup.py @@ -0,0 +1,44 @@ +#!/usr/bin/env python3 +import os +from setuptools import find_packages, setup +from setuptools.command.build_py import build_py +from subprocess import check_call +dir_path = os.path.dirname(os.path.realpath(__file__)) + +required = [ + 'numpy>=1.14.0,<1.15.0', + 'pillow>=5.3.0', + 'mxnet>=1.3.1,<1.4.0', + 'gluoncv>=0.3.0,<0.4.0', +] + +setup( + name='cnocr', + version='0.1', + description="Package for Chinese OCR, which can be used after installed without training yourself OCR model", + author='breezedeus', + author_email='breezedeus@163.com', + license='Apache 2.0', + url='https://github.com/breezedeus/cnocr', + platforms=["all"], + packages=find_packages(), + # entry_points={'console_scripts': ['chitchatbot=chitchatbot.cli:main'], + # 'plus.ein.botlet': ['chitchatbot=chitchatbot:ChitchatBot'], + # 'plus.ein.botlet.parser': ['chitchatbot=chitchatbot:Spec']}, + include_package_data=True, + install_requires=required, + zip_safe=False, + classifiers=[ + 'Development Status :: 4 - Beta', + 'Operating System :: OS Independent', + 'Intended Audience :: Developers', + 'License :: OSI Approved :: Apache 2.0 License', + 'Programming Language :: Python', + 'Programming Language :: Python :: Implementation', + 'Programming Language :: Python :: 3', + 'Programming Language :: Python :: 3.4', + 'Programming Language :: Python :: 3.5', + 'Programming Language :: Python :: 3.6', + 'Topic :: Software Development :: Libraries' + ], +)