diff --git a/Makefile b/Makefile index 3123a7c..edd2cb7 100644 --- a/Makefile +++ b/Makefile @@ -1,6 +1,6 @@ ifneq ($(findstring movidius, $(PYTHONPATH)), movidius) - export PYTHONPATH:=/opt/movidius/caffe/python:/opt/movidius/mvnc/python:$(PYTHONPATH) + export PYTHONPATH:=/opt/movidius/caffe/python:$(PYTHONPATH) endif @@ -11,6 +11,7 @@ help: @echo " make help - shows this message" @echo " make install - Installs the ncsdk." @echo " make examples - makes the ncsdk examples." + @echo " make api - installs only the api. Ideal for RPi setup." @echo " make uninstall - uninstalls the ncsdk." @echo " make clean - removes targets and intermediate files." @@ -21,6 +22,10 @@ all: install examples opencv: ./install-opencv.sh +.PHONY: uninstallopencv +uninstallopencv: + ./uninstall-opencv.sh + .PHONY: prereqs prereqs: @sed -i 's/\r//' ncsdk.conf @@ -32,12 +37,9 @@ prereqs: @sed -i 's/\r//' install.sh @sed -i 's/\r//' uninstall.sh @sed -i 's/\r//' README.md - @chmod +x install.sh - @chmod +x uninstall.sh - @chmod +x install-opencv.sh .PHONY: install -install: prereqs +install: prereqs uninstallopencv @echo "\nmake install starting." ./install.sh @@ -56,6 +58,10 @@ runexamples: prereqs opencv @echo "\nmake examples starting." (cd examples; make run) +.PHONY: api +api: @echo "\nmake api starting." + (cd api/src; make; make install) + .PHONY: clean clean: @echo "\nmake clean starting." diff --git a/README.md b/README.md index fb9d913..e7225d8 100644 --- a/README.md +++ b/README.md @@ -1,20 +1,30 @@ -# Movidius™ Neural Compute Software Development Kit -This SDK is provided for users of the [Movidius™ Neural Compute Stick (NCS)](https://developer.movidius.com/). It provides software tools, an API, and examples which enable developers to create software that takes advantage of the hardware the accelerated neural network capability provided by the NCS. +# Intel® Movidius™ Neural Compute SDK +This Intel® Movidius™ Neural Compute software developer kit (NCSDK) is provided for users of the [Intel® Movidius™ Neural Compute Stick](https://developer.movidius.com/) (Intel® Movidius™ NCS). It includes software tools, an API, and examples, so developers can create software that takes advantage of the accelerated neural network capability provided by the Intel Movidius NCS hardware. # Installation -The provided Makefile helps with installation. Clone this repository and then run the following command to install the SDK. +The provided Makefile helps with installation. Clone this repository and then run the following command to install the NCSDK: ``` make install ``` # Examples -Also included in the SDK are examples. After cloning and running 'make install' run the following command to install examples. +The Neural Compute SDK also includes examples. After cloning and running 'make install,' run the following command to install the examples: ``` make examples ``` +## NCAPPZOO Examples +For additional examples, please see the Neural Compute App Zoo available at [http://www.github.com/movidius/ncappzoo](http://www.github.com/movidius/ncappzoo). The ncappzoo is a valuable resource for NCS users and includes community developed applications and neural networks for the NCS. + # Documentation -The complete Neural Compute SDK documentation can be viewed at [https://movidius.github.io/ncsdk/](https://movidius.github.io/ncsdk/) +The complete Intel Movidius Neural Compute SDK documentation can be viewed at [https://movidius.github.io/ncsdk/](https://movidius.github.io/ncsdk/) + +# Getting Started Video +For installation and general instructions to get started with the NCSDK, take a look at this [video](https://www.youtube.com/watch?v=fESFVNcQVVA) + +# Troubleshooting and Tech Support +Be sure to check the [NCS Troubleshooting Guide](https://ncsforum.movidius.com/discussion/370/intel-ncs-troubleshooting-help-and-guidelines#latest) if you run into any issues with the NCS or NCSDK. +Also for general tech support issues the [NCS User Forum](https://developer.movidius.com/forums) is recommended and contains community discussions on many issues and resolutions. diff --git a/api/LICENSE b/api/LICENSE new file mode 100644 index 0000000..a444ba1 --- /dev/null +++ b/api/LICENSE @@ -0,0 +1,193 @@ +SOFTWARE TOOLS LICENSE AGREEMENT + +DO NOT DOWNLOAD, INSTALL, ACCESS, COPY, OR USE ANY PORTION OF THE MATERIALS (DEFINED BELOW) UNTIL YOU HAVE READ AND ACCEPTED +THE TERMS AND CONDITIONS OF THIS AGREEMENT. BY INSTALLING, COPYING, ACCESSING, OR USING THE MATERIALS, YOU AGREE TO BE LEGALLY +BOUND BY THE TERMS AND CONDITIONS OF THIS AGREEMENT. If You do not agree to be bound by, or the entity for whose benefit You act has not +authorized You to accept, these terms and conditions, do not install, access, copy, or use the Software and destroy all copies of the Software in Your +possession. + +This DEVELOPMENT TOOLS LICENSE AGREEMENT (this "Agreement") is entered into between Intel Corporation, a Delaware corporation ("Intel") and You. +"You" refers to you or your employer or other entity for whose benefit you act, as applicable. If you are agreeing to the terms and conditions of this +Agreement on behalf of a company or other legal entity, you represent and warrant that you have the legal authority to bind that legal entity to the +Agreement, in which case, "You" or "Your" shall be in reference to such entity. Intel and You are referred to herein individually as a "Party" or, together, as +the "Parties". + +The Parties, in consideration of the mutual covenants contained in this Agreement, and for other good and valuable consideration, the receipt and +sufficiency of which they acknowledge, and intending to be legally bound, agree as follows: + +1. DEFINITIONS. The following definitions are used throughout this Agreement: +"Affiliate" means any entity controlling, controlled by or under common control with a Party hereto, where "control" means the direct or indirect ownership +of more than fifty percent (50%) of such entity"s capital or equivalent voting rights. An entity will be deemed an "Affiliate" only as long as such control exists +during the term of this Agreement. + +"Contractor" means a third party consultant or subcontractor who requires access to or use of the Materials to perform work on Your behalf or at Your +behest. + +"Development Tools" means the development, evaluation, production, or test tool software, and associated documentation or other collateral, identified +in the "development_tools.txt" text files, if any, included in the Materials. + +"Derivatives" means derivative works as defined in 17 U.S.C " 101 et seq. + +"Intel-based Device" means a device designed, manufactured, or configured by You or Your Affiliates to include or operate Intel hardware, software, or +services. + +"Materials" means the software, documentation, the software product serial number and license key codes (if applicable), Development Tools, +Redistributables, and other materials or collateral, including any updates and upgrades thereto, in source code or object code form where applicable, that +are provided or otherwise made available by Intel to You under this Agreement. "Materials" do not include Open Source Software or any computer +programming code that is subject to an agreement, obligation or license (whether or not accompanying the Materials) intended to supersede this +Agreement. + +"Redistributables" means the software, documentation, or other collateral identified in the "redist.txt" text files, if any, included in the Materials. + +2. LIMITED LICENSE. +(A) Subject to the terms and conditions of this Agreement, Intel grants You and Your Affiliates, a limited, nonexclusive, nontransferable, revocable, +worldwide, fully paid-up license during the term of this Agreement, without the right to sublicense, unless expressly stated otherwise, to: +(1) internally reproduce and install a reasonable number of copies of the Materials for Your internal use solely for the purposes of designing, +developing, manufacturing and testing Intel-based Devices; +(2) internally reproduce the source code of the Development Tools, if provided to You by Intel, and to internally create and reproduce Derivatives of +the Development Tools, and to internally reproduce the binary code of the Development Tools, or any Derivatives created by You, in each case solely +for the purpose of designing, developing, manufacturing and testing the Intel-based Device, solely as necessary for the integration of any Intel software +and the output generated by the Development Tools, with and into Intel-based Devices; +(3) create Derivatives of the Redistributables, or any portions thereof, provided to You by Intel in source code form solely for the purposes of designing, +developing, debugging, modifying, distributing and testing software containing significantly more functionality and features than the Redistributables +in the form provided to You by Intel; +(4) distribute (or otherwise make available) on a royalty-free basis, subject to any other terms and conditions which may appear in the Redistributables +text files, the Redistributables, including any Derivatives of the Redistributables pursuant to Section 2(A)(3), or any portions thereof, only as integrated +or embedded in software (and not on a stand-alone basis) solely for use on an Intel-based Device; and +(5) have the tasks set forth in Section 2(A)(1) and (2) above performed by a Contractor on the conditions that You enter into a written confidentiality +agreement with any such Contractor, subject to Section 7 (Confidentiality), and You remain fully liable to Intel for the actions and inactions of Your +Contractors. +(B) You will be liable for Your Affiliate"s breach of these terms. In addition, You acknowledge that Your Affiliates are beneficiaries of the licenses granted by +Intel under Section 2. +(C) Intel hereby grants You the right to sub-license (without rights to further sublicense) the Development Tools, including any accompanying +documentation, to Your manufacturing partners, in the code format provided to You by Intel, solely for designing, developing, manufacturing and testing +the Intel-based Devices solely as necessary for the integration of any Intel software and the output generated by the Development Tools, with and into +Intel-based Devices. The sublicense is subject to a written sublicensing agreement that contains confidentiality obligations and license restrictions that are +no less protective of Intel than those provided in this Agreement. You will be fully responsible and liable towards Intel for Your sub-licensees" compliance +with all such confidentiality obligations and license restrictions. You may grant Your manufacturing partners the right to further distribute Redistributables +solely as integrated or embedded in software for Your Intel-based Devices. + +3. LICENSE RESTRICTIONS. All right, title and interest in and to the Materials and associated documentation are and will remain the exclusive property of +Intel and its suppliers. Unless expressly permitted under the Agreement, You will not, and will not allow any third party to (i) use, copy, distribute, sell or +offer to sell the Materials or associated documentation; (ii) modify, adapt, enhance, disassemble, decompile, reverse engineer, change or create derivative +works from the Materials except and only to the extent as specifically required by mandatory applicable laws or any applicable third party license terms +accompanying the Materials; (iii) use or make the Materials available for the use or benefit of third parties; or (iv) use the Materials on Your products other +than those that include the Intel product(s), platform(s), or software identified in the Materials; or (v) publish or provide any Materials benchmark or +comparison test results. +If You received the Materials solely for evaluation purposes, You have no distribution rights to the Materials or any portion thereof. + +Distribution of the Redistributables is also subject to the following conditions: You shall: (i) be solely responsible to Your customers and end users for any +update or support obligation or other liability which may arise from the distribution, (ii) not make any statement that Your software is "certified", or that its +performance is guaranteed, by Intel, (iii) not use Intel's name or trademarks to promote Your software without prior written permission, (iv) use a license +agreement that contains provisions that are at least as restrictive as this Agreement and which prohibits disassembly and reverse engineering of the +Materials provided in object code form, and (v) indemnify, hold harmless, and defend Intel, Intel"s Affiliates, and its licensors from and against any claims +or lawsuits, including attorney's fees, that arise or result from Your Derivatives or Your distribution of Your software. + +The consideration under this Agreement is only for the licenses Intel expressly grants above. Any other rights including, but not limited to, additional patent +rights, will require an additional license and additional consideration. Nothing in this Agreement requires or will be treated to require Intel to grant any +additional license. You acknowledge that an essential basis of the bargain in this Agreement is that Intel grants You no licenses or other rights including, +but not limited to, patent, copyright, trade secret, trademark, trade name, service mark or other intellectual property licenses or rights with respect to the +Materials and associated documentation, by implication, estoppel or otherwise, except for the licenses expressly granted above. You acknowledge there +are significant uses of the Materials in their original, unmodified and uncombined form. The consideration for the licenses in this Agreement reflects Intel"s +continuing right to assert patent claims against any modifications or derivative works (including, without limitation, error corrections and bug fixes) of, or +combinations with, the Materials that You, Your Affiliates or third parties make that infringe any Intel patent claim. + +4. LICENSE TO FEEDBACK. This Agreement does not obligate You to provide Intel with materials, information, comments, suggestions, Your Derivatives or +other communication regarding the features, functions, performance or use of the Materials ("Feedback"). If any software included in the Materials is +provided or otherwise made available by Intel in source code form, to the extent You provide Intel with Feedback in a tangible form, You grant to Intel and +its affiliates a non-exclusive, perpetual, sublicenseable, irrevocable, worldwide, royalty-free, fully paid-up and transferable license, to and under all of Your +intellectual property rights, whether perfected or not, to publicly perform, publicly display, reproduce, use, make, have made, sell, offer for sale, distribute, +import, create derivative works of and otherwise exploit any comments, suggestions, descriptions, ideas, Your Derivatives or other feedback regarding the +Materials provided by You or on Your behalf. + +5. OPEN SOURCE STATEMENT. The Materials may include Open Source Software (OSS) licensed pursuant to OSS license agreement(s) identified in the +OSS comments in the applicable source code file(s) and/or file header(s) provided with or otherwise associated with the Materials. Neither You nor any +Original Equipment Manufacturer (OEM), Original Device Manufacturer (ODM), customer, or distributor may subject any proprietary portion of the Materials +to any OSS license obligations including, without limitation, combining or distributing the Materials with OSS in a manner that subjects Intel, the Materials +or any portion thereof to any OSS license obligation. Nothing in this Agreement limits any rights under, or grants rights that supersede, the terms of any +applicable OSS license. + +6. THIRD PARTY SOFTWARE. Certain third party software provided with or within the Materials may only be used (a) upon securing a license directly from +the owner of the software or (b) in combination with hardware components purchased from such third party and (c) subject to further license limitations +by the software owner. A listing of any such third party limitations is in one or more text files accompanying the Materials. You acknowledge Intel is not +providing You with a license to such third party software and further that it is Your responsibility to obtain appropriate licenses from such third parties +directly. + +7. CONFIDENTIALITY. The terms and conditions of this Agreement, exchanged confidential information, as well as the Materials are subject to the terms +and conditions of the Non-Disclosure Agreement(s) or Intel Pre-Release Loan Agreement(s) (referred to herein collectively or individually as "NDA") entered +into by and in force between Intel and You, and in any case no less confidentiality protection than You apply to Your information of similar sensitivity. If +You would like to have a Contractor perform work on Your behalf that requires any access to or use of Materials You must obtain a written confidentiality +agreement from the Contractor which contains terms and conditions with respect to access to or use of Materials no less restrictive than those set forth in +this Agreement, excluding any distribution rights and use for any other purpose, and You will remain fully liable to Intel for the actions and inactions of +those Contractors. You may not use Intel's name in any publications, advertisements, or other announcements without Intel's prior written consent. + +8. NO OBLIGATION; NO AGENCY. Intel may make changes to the Software, or items referenced therein, at any time without notice. Intel is not obligated to +support, update, provide training for, or develop any further version of the Software or to grant any license thereto. No agency, franchise, partnership, joint- +venture, or employee-employer relationship is intended or created by this Agreement. + +9. EXCLUSION OF WARRANTIES. THE MATERIALS ARE PROVIDED "AS IS" WITHOUT ANY EXPRESS OR IMPLIED WARRANTY OF ANY KIND INCLUDING +WARRANTIES OF MERCHANTABILITY, NONINFRINGEMENT, OR FITNESS FOR A PARTICULAR PURPOSE. Intel does not warrant or assume responsibility +for the accuracy or completeness of any information, text, graphics, links or other items within the Materials. + +10. LIMITATION OF LIABILITY. IN NO EVENT WILL INTEL OR ITS AFFILIATES, LICENSORS OR SUPPLIERS (INCLUDING THEIR RESPECTIVE DIRECTORS, +OFFICERS, EMPLOYEES, AND AGENTS) BE LIABLE FOR ANY DAMAGES WHATSOEVER (INCLUDING, WITHOUT LIMITATION, LOST PROFITS, BUSINESS +INTERRUPTION, OR LOST DATA) ARISING OUT OF OR IN RELATION TO THIS AGREEMENT, INCLUDING THE USE OF OR INABILITY TO USE THE MATERIALS, +EVEN IF INTEL HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. SOME JURISDICTIONS PROHIBIT EXCLUSION OR LIMITATION OF LIABILITY +FOR IMPLIED WARRANTIES OR CONSEQUENTIAL OR INCIDENTAL DAMAGES, SO THE ABOVE LIMITATION MAY IN PART NOT APPLY TO YOU. YOU MAY +ALSO HAVE OTHER LEGAL RIGHTS THAT VARY FROM JURISDICTION TO JURISDICTION. THE MATERIALS LICENSED HEREUNDER ARE NOT DESIGNED +OR INTENDED FOR USE IN ANY MEDICAL, LIFE SAVING OR LIFE SUSTAINING SYSTEMS, TRANSPORTATION SYSTEMS, NUCLEAR SYSTEMS, OR FOR ANY +OTHER MISSION CRITICAL APPLICATION IN WHICH THE FAILURE OF THE DEVELOPMENT TOOLS COULD LEAD TO PERSONAL INJURY OR DEATH. YOU +WILL INDEMNIFY AND HOLD INTEL AND ITS AFFILIATES, LICENSORS AND SUPPLIERS (INCLUDING THEIR RESPECTIVE DIRECTORS, OFFICERS, +EMPLOYEES, AND AGENTS) HARMLESS AGAINST ALL CLAIMS, LIABILITIES, LOSSES, COSTS, DAMAGES, AND EXPENSES (INCLUDING REASONABLE +ATTORNEY FEES), ARISING OUT OF, DIRECTLY OR INDIRECTLY, THE DISTRIBUTION OF THE MATERIALS AND ANY CLAIM OF PRODUCT LIABILITY, +PERSONAL INJURY OR DEATH ASSOCIATED WITH ANY UNINTENDED USE, EVEN IF SUCH CLAIM ALLEGES THAT INTEL OR AN INTEL AFFILIATE, LICENSOR +OR SUPPLIER WAS NEGLIGENT REGARDING THE DESIGN OR MANUFACTURE OF THE MATERIALS. THE LIMITED REMEDIES, WARRANTY DISCLAIMER AND +LIMITED LIABILITY ARE FUNDAMENTAL ELEMENTS OF THE BASIS OF THE BARGAIN BETWEEN INTEL AND YOU AND INTEL WOULD NOT BE ABLE TO +PROVIDE THE MATERIALS WITHOUT SUCH LIMITATIONS. + +11. TERMINATION AND SURVIVAL. Intel may terminate this Agreement for any reason with thirty (30) days" notice and immediately if You or someone +acting on Your behalf or at Your behest violates any of its terms or conditions. Upon termination You will immediately destroy and ensure the destruction +of the Materials (including providing certification of such destruction or return back to Intel). Upon termination of this Agreement, all licenses granted to +You hereunder terminate immediately. All Sections of this Agreement, except Section 2, will survive termination. In the event of termination of this +Agreement, the license grant to any Redistributables, including Your Derivatives of the Redistributables, distributed by You prior to the effective date of +such termination and in accordance with the terms and conditions of this Agreement shall survive any such termination of this Agreement. + +12. GOVERNING LAW AND JURISDICTION. This Agreement and any dispute arising out of or relating to it will be governed by the laws of the U.S.A. and +Delaware, without regard to conflict of laws principles. The Parties exclude the application of the United Nations Convention on Contracts for the +International Sale of Goods (1980). The state and federal courts sitting in Delaware, U.S.A. will have exclusive jurisdiction over any dispute arising out of or +relating to this Agreement. The Parties consent to personal jurisdiction and venue in those courts. A Party that obtains a judgment against the other Party +in the courts identified in this section may enforce that judgment in any court that has jurisdiction over the Parties. + +13. EXPORT REGULATIONS/EXPORT CONTROL. You agree that neither You nor Your subsidiaries or Affiliates will export/re-export the Materials, directly +or indirectly, to any country for which the U.S. Department of Commerce or any other agency or department of the U.S. Government or the foreign +government from where it is shipping requires an export license, or other governmental approval, without first obtaining any such required license or +approval. In the event the Materials are exported from the U.S.A. or re-exported from a foreign destination by You, Your subsidiaries, or Your Affiliates, You +will ensure that the distribution and export/re-export or import of the Materials complies with all laws, regulations, orders, or other restrictions of the U.S. +Export Administration Regulations and the appropriate foreign government. + +14. GOVERNMENT RESTRICTED RIGHTS. The Materials are a commercial item (as defined in 48 C.F.R. 2.101) consisting of commercial computer software +and commercial computer software documentation (as those terms are used in 48 C.F.R. 12.212). Consistent with 48 C.F.R. 12.212 and 48 C.F.R 227.72021 +through 227.7202-4, You will not provide the Materials to the U.S. Government. Contractor or Manufacturer is Intel Corporation, 2200 Mission College +Blvd., Santa Clara, CA 95054. + +15. TRADEMARKS. Third party trademarks, trade names, product names and logos (the "Trademarks") contained in or used by the Materials are the +trademarks or registered trademarks of their respective owners, and the use of such Trademarks shall inure to the benefit of the trademark owner. The +reference to such Trademarks (if any) by Intel in any of the Materials does not constitute: (i) an affiliation by Intel and its licensors with such company, or (ii) +an endorsement or approval of such company of Intel and its licensors and its products or services. + +16. ASSIGNMENT. You may not delegate, assign or transfer this Agreement, the license(s) granted or any of Your rights or duties hereunder, expressly, by +implication, by operation of law, or otherwise and any attempt to do so, without Intel"s express prior written consent, will be null and void. Intel may assign, +delegate and transfer this Agreement, and its rights and obligations hereunder, in its sole discretion. + +17. ENTIRE AGREEMENT; SEVERABILITY. The terms and conditions of this Agreement and any NDA with Intel constitute the entire agreement between the +Parties with respect to the subject matter hereof, and merge and supersede all prior or contemporaneous agreements, understandings, negotiations and +discussions. Neither Party will be bound by any terms, conditions, definitions, warranties, understandings, or representations with respect to the subject +matter hereof other than as expressly provided herein. In the event any provision of this Agreement is unenforceable or invalid under any applicable law +or applicable court decision, such unenforceability or invalidity will not render this Agreement unenforceable or invalid as a whole, instead such provision +will be changed and interpreted so as to best accomplish the objectives of such provision within legal limits. + +18. WAIVER. The failure of a Party to require performance by the other Party of any provision hereof will not affect the full right to require such performance +at any time thereafter; nor will waiver by a Party of a breach of any provision hereof constitute a waiver of the provision itself. + +19. PRIVACY. YOUR PRIVACY RIGHTS ARE SET FORTH IN INTEL'S PRIVACY NOTICE, WHICH FORMS A PART OF THIS AGREEMENT. PLEASE REVIEW THE +PRIVACY NOTICE AT HTTP://WWW.INTEL.COM/PRIVACY TO LEARN HOW INTEL COLLECTS, USES AND SHARES INFORMATION ABOUT YOU. diff --git a/api/include/mvnc.h b/api/include/mvnc.h new file mode 100644 index 0000000..082bd19 --- /dev/null +++ b/api/include/mvnc.h @@ -0,0 +1,69 @@ +#ifndef __MVNC_H_INCLUDED__ +#define __MVNC_H_INCLUDED__ + +#ifdef __cplusplus +extern "C" +{ +#endif + +#define MVNC_MAX_NAME_SIZE 28 + +typedef enum { + MVNC_OK = 0, + MVNC_BUSY = -1, // Device is busy, retry later + MVNC_ERROR = -2, // Error communicating with the device + MVNC_OUT_OF_MEMORY = -3, // Out of memory + MVNC_DEVICE_NOT_FOUND = -4, // No device at the given index or name + MVNC_INVALID_PARAMETERS = -5, // At least one of the given parameters is wrong + MVNC_TIMEOUT = -6, // Timeout in the communication with the device + MVNC_MVCMD_NOT_FOUND = -7, // The file to boot Myriad was not found + MVNC_NO_DATA = -8, // No data to return, call LoadTensor first + MVNC_GONE = -9, // The graph or device has been closed during the operation + MVNC_UNSUPPORTED_GRAPH_FILE = -10, // The graph file version is not supported + MVNC_MYRIAD_ERROR = -11, // An error has been reported by the device, use MVNC_DEBUG_INFO +} mvncStatus; + +typedef enum { + MVNC_LOG_LEVEL = 0, // Log level, int, 0 = nothing, 1 = errors, 2 = verbose +} mvncGlobalOptions; + +typedef enum { + MVNC_ITERATIONS = 0, // Number of iterations per inference, int, normally 1, not for general use + MVNC_NETWORK_THROTTLE = 1, // Measure temperature once per inference instead of once per layer, int, not for general use + MVNC_DONT_BLOCK = 2, // LoadTensor will return BUSY instead of blocking, GetResult will return NO_DATA, int + MVNC_TIME_TAKEN = 1000, // Return time taken for inference (float *) + MVNC_DEBUG_INFO = 1001, // Return debug info, string +} mvncGraphOptions; + +typedef enum { + MVNC_TEMP_LIM_LOWER = 1, // Temperature for short sleep, float, not for general use + MVNC_TEMP_LIM_HIGHER = 2, // Temperature for long sleep, float, not for general use + MVNC_BACKOFF_TIME_NORMAL = 3, // Normal sleep in ms, int, not for general use + MVNC_BACKOFF_TIME_HIGH = 4, // Short sleep in ms, int, not for general use + MVNC_BACKOFF_TIME_CRITICAL = 5, // Long sleep in ms, int, not for general use + MVNC_TEMPERATURE_DEBUG = 6, // Stop on critical temperature, int, not for general use + MVNC_THERMAL_STATS = 1000, // Return temperatures, float *, not for general use + MVNC_OPTIMISATION_LIST = 1001, // Return optimisations list, char *, not for general use + MVNC_THERMAL_THROTTLING_LEVEL = 1002, // 1=TEMP_LIM_LOWER reached, 2=TEMP_LIM_HIGHER reached +} mvncDeviceOptions; + +mvncStatus mvncGetDeviceName(int index, char *name, unsigned int nameSize); +mvncStatus mvncOpenDevice(const char *name, void **deviceHandle); +mvncStatus mvncCloseDevice(void *deviceHandle); +mvncStatus mvncAllocateGraph(void *deviceHandle, void **graphHandle, const void *graphFile, unsigned int graphFileLength); +mvncStatus mvncDeallocateGraph(void *graphHandle); +mvncStatus mvncSetGlobalOption(int option, const void *data, unsigned int dataLength); +mvncStatus mvncGetGlobalOption(int option, void *data, unsigned int *dataLength); +mvncStatus mvncSetGraphOption(void *graphHandle, int option, const void *data, unsigned int dataLength); +mvncStatus mvncGetGraphOption(void *graphHandle, int option, void *data, unsigned int *dataLength); +mvncStatus mvncSetDeviceOption(void *deviceHandle, int option, const void *data, unsigned int dataLength); +mvncStatus mvncGetDeviceOption(void *deviceHandle, int option, void *data, unsigned int *dataLength); +mvncStatus mvncLoadTensor(void *graphHandle, const void *inputTensor, unsigned int inputTensorLength, void *userParam); +mvncStatus mvncGetResult(void *graphHandle, void **outputData, unsigned int *outputDataLength, void **userParam); + +#include "mvnc_deprecated.h" +#ifdef __cplusplus +} +#endif + +#endif diff --git a/api/include/mvnc_deprecated.h b/api/include/mvnc_deprecated.h new file mode 100644 index 0000000..c7fabeb --- /dev/null +++ b/api/include/mvnc_deprecated.h @@ -0,0 +1,39 @@ +#ifndef __MVNC_DEPRECATED_H_INCLUDED__ +#define __MVNC_DEPRECATED_H_INCLUDED__ + +#ifdef __cplusplus +extern "C" +{ +#endif + +typedef mvncGraphOptions GraphOptions __attribute__ \ + ((deprecated("GraphOptions is deprecated. Please use mvncGraphOptions"))); +typedef mvncDeviceOptions DeviceOptions __attribute__ \ + ((deprecated("DeviceOptions is deprecated. Please use mvncDeviceOptions"))); + +// Deprecated Define +#define MVNC_MAXNAMESIZE _Pragma("GCC warning \"'MVNC_MAXNAMESIZE' is deprecated. Please use 'MVNC_MAX_NAME_SIZE'\"") MVNC_MAX_NAME_SIZE + +// Deprecated Global Options +#define MVNC_LOGLEVEL _Pragma("GCC warning \"'MVNC_LOGLEVEL' is deprecated. Please use 'MVNC_LOG_LEVEL'\"") MVNC_LOG_LEVEL + +// Deprecated status values +#define MVNC_MVCMDNOTFOUND _Pragma("GCC warning \"'MVNC_MVCMDNOTFOUND' is deprecated. Please use 'MVNC_MVCMD_NOT_FOUND'\"") MVNC_MVCMD_NOT_FOUND +#define MVNC_NODATA _Pragma("GCC warning \"'MVNC_NO_DATA' is deprecated. Please use 'MVNC_NO_DATA'\"") MVNC_NO_DATA +#define MVNC_UNSUPPORTEDGRAPHFILE _Pragma("GCC warning \"'MVNC_UNSUPPORTEDGRAPHFILE' is deprecated. Please use 'MVNC_UNSUPPORTED_GRAPH_FILE'\"") MVNC_UNSUPPORTED_GRAPH_FILE +#define MVNC_MYRIADERROR _Pragma("GCC warning \"'MVNC_MYRIADERROR' is deprecated. Please use 'MVNC_MYRIAD_ERROR'\"") MVNC_MYRIAD_ERROR + +// Deprecated Graph Options values +#define MVNC_DONTBLOCK _Pragma("GCC warning \"'MVNC_DONTBLOCK' is deprecated. Please use 'MVNC_DONT_BLOCK'\"") MVNC_DONT_BLOCK +#define MVNC_TIMETAKEN _Pragma("GCC warning \"'MVNC_TIMETAKEN' is deprecated. Please use 'MVNC_TIME_TAKEN'\"") MVNC_TIME_TAKEN +#define MVNC_DEBUGINFO _Pragma("GCC warning \"'MVNC_DEBUGINFO' is deprecated. Please use 'MVNC_DEBUG_INFO'\"") MVNC_DEBUG_INFO + +// Deprecated Device Options Values +#define MVNC_THERMALSTATS _Pragma("GCC warning \"'MVNC_THERMALSTATS' is deprecated. Please use 'MVNC_THERMAL_STATS'\"") MVNC_THERMAL_STATS +#define MVNC_OPTIMISATIONLIST _Pragma("GCC warning \"'MVNC_OPTIMISATIONLIST' is deprecated. Please use 'MVNC_OPTIMISATION_LIST'\"") MVNC_OPTIMISATION_LIST + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/api/python/mvnc/__init__.py b/api/python/mvnc/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/api/python/mvnc/mvncapi.py b/api/python/mvnc/mvncapi.py new file mode 100644 index 0000000..cfdcace --- /dev/null +++ b/api/python/mvnc/mvncapi.py @@ -0,0 +1,270 @@ +# Copyright 2017 Intel Corporation. +# The source code, information and material ("Material") contained herein is +# owned by Intel Corporation or its suppliers or licensors, and title to such +# Material remains with Intel Corporation or its suppliers or licensors. +# The Material contains proprietary information of Intel or its suppliers and +# licensors. The Material is protected by worldwide copyright laws and treaty +# provisions. +# No part of the Material may be used, copied, reproduced, modified, published, +# uploaded, posted, transmitted, distributed or disclosed in any way without +# Intel's prior express written permission. No license under any patent, +# copyright or other intellectual property rights in the Material is granted to +# or conferred upon you, either expressly, by implication, inducement, estoppel +# or otherwise. +# Any license under such intellectual property rights must be express and +# approved by Intel in writing. + +import sys +import numpy +import warnings +from enum import Enum +from ctypes import * + +# The toolkit wants its local version +try: + f = CDLL("./libmvnc.so") +except: + f = CDLL("libmvnc.so") + +warnings.simplefilter('default', DeprecationWarning) + + +class EnumDeprecationHelper(object): + def __init__(self, new_target, deprecated_values): + self.new_target = new_target + self.deprecated_values = deprecated_values + + def __call__(self, *args, **kwargs): + return self.new_target(*args, **kwargs) + + def __getattr__(self, attr): + if (attr in self.deprecated_values): + warnings.warn('\033[93m' + "\"" + attr + "\" is deprecated. Please use \"" + + self.deprecated_values[attr] + "\"!" + '\033[0m', + DeprecationWarning, stacklevel=2) + return getattr(self.new_target, self.deprecated_values[attr]) + return getattr(self.new_target, attr) + + +class mvncStatus(Enum): + OK = 0 + BUSY = -1 + ERROR = -2 + OUT_OF_MEMORY = -3 + DEVICE_NOT_FOUND = -4 + INVALID_PARAMETERS = -5 + TIMEOUT = -6 + MVCMD_NOT_FOUND = -7 + NO_DATA = -8 + GONE = -9 + UNSUPPORTED_GRAPH_FILE = -10 + MYRIAD_ERROR = -11 + +Status = EnumDeprecationHelper(mvncStatus, {"MVCMDNOTFOUND": "MVCMD_NOT_FOUND", + "NODATA": "NO_DATA", + "UNSUPPORTEDGRAPHFILE": "UNSUPPORTED_GRAPH_FILE", + "MYRIADERROR": "MYRIAD_ERROR"}) + + +class mvncGlobalOption(Enum): + LOG_LEVEL = 0 + +GlobalOption = EnumDeprecationHelper(mvncGlobalOption, {"LOGLEVEL": "LOG_LEVEL"}) + + +class mvncDeviceOption(Enum): + TEMP_LIM_LOWER = 1 + TEMP_LIM_HIGHER = 2 + BACKOFF_TIME_NORMAL = 3 + BACKOFF_TIME_HIGH = 4 + BACKOFF_TIME_CRITICAL = 5 + TEMPERATURE_DEBUG = 6 + THERMAL_STATS = 1000 + OPTIMISATION_LIST = 1001 + THERMAL_THROTTLING_LEVEL = 1002 + +DeviceOption = EnumDeprecationHelper(mvncDeviceOption, {"THERMALSTATS": "THERMAL_STATS", + "OPTIMISATIONLIST": "OPTIMISATION_LIST"}) + + +class mvncGraphOption(Enum): + ITERATIONS = 0 + NETWORK_THROTTLE = 1 + DONT_BLOCK = 2 + TIME_TAKEN = 1000 + DEBUG_INFO = 1001 + +GraphOption = EnumDeprecationHelper(mvncGraphOption, {"DONTBLOCK": "DONT_BLOCK", + "TIMETAKEN": "TIME_TAKEN", + "DEBUGINFO": "DEBUG_INFO"}) + + +def EnumerateDevices(): + name = create_string_buffer(28) + i = 0 + devices = [] + while True: + if f.mvncGetDeviceName(i, name, 28) != 0: + break + devices.append(name.value.decode("utf-8")) + i = i + 1 + return devices + + +def SetGlobalOption(opt, data): + data = c_int(data) + status = f.mvncSetGlobalOption(opt.value, pointer(data), sizeof(data)) + if status != Status.OK.value: + raise Exception(Status(status)) + + +def GetGlobalOption(opt): + if opt == GlobalOption.LOG_LEVEL: + optsize = c_uint() + optvalue = c_uint() + status = f.mvncGetGlobalOption(opt.value, byref(optvalue), byref(optsize)) + if status != Status.OK.value: + raise Exception(Status(status)) + return optvalue.value + optsize = c_uint() + optdata = POINTER(c_byte)() + status = f.mvncGetDeviceOption(0, opt.value, byref(optdata), byref(optsize)) + if status != Status.OK.value: + raise Exception(Status(status)) + v = create_string_buffer(optsize.value) + memmove(v, optdata, optsize.value) + return v.raw + + +class Device: + def __init__(self, name): + self.handle = c_void_p() + self.name = name + + def OpenDevice(self): + status = f.mvncOpenDevice(bytes(bytearray(self.name, "utf-8")), byref(self.handle)) + if status != Status.OK.value: + raise Exception(Status(status)) + + def CloseDevice(self): + status = f.mvncCloseDevice(self.handle) + self.handle = c_void_p() + if status != Status.OK.value: + raise Exception(Status(status)) + + def SetDeviceOption(self, opt, data): + if opt == DeviceOption.TEMP_LIM_HIGHER or opt == DeviceOption.TEMP_LIM_LOWER: + data = c_float(data) + else: + data = c_int(data) + status = f.mvncSetDeviceOption(self.handle, opt.value, pointer(data), sizeof(data)) + if status != Status.OK.value: + raise Exception(Status(status)) + + def GetDeviceOption(self, opt): + if opt == DeviceOption.TEMP_LIM_HIGHER or opt == DeviceOption.TEMP_LIM_LOWER: + optdata = c_float() + elif (opt == DeviceOption.BACKOFF_TIME_NORMAL or opt == DeviceOption.BACKOFF_TIME_HIGH or + opt == DeviceOption.BACKOFF_TIME_CRITICAL or opt == DeviceOption.TEMPERATURE_DEBUG or + opt == DeviceOption.THERMAL_THROTTLING_LEVEL): + optdata = c_int() + else: + optdata = POINTER(c_byte)() + optsize = c_uint() + status = f.mvncGetDeviceOption(self.handle, opt.value, byref(optdata), byref(optsize)) + if status != Status.OK.value: + raise Exception(Status(status)) + if opt == DeviceOption.TEMP_LIM_HIGHER or opt == DeviceOption.TEMP_LIM_LOWER: + return optdata.value + elif (opt == DeviceOption.BACKOFF_TIME_NORMAL or opt == DeviceOption.BACKOFF_TIME_HIGH or + opt == DeviceOption.BACKOFF_TIME_CRITICAL or opt == DeviceOption.TEMPERATURE_DEBUG or + opt == DeviceOption.THERMAL_THROTTLING_LEVEL): + return optdata.value + v = create_string_buffer(optsize.value) + memmove(v, optdata, optsize.value) + if opt == DeviceOption.OPTIMISATION_LIST: + l = [] + for i in range(40): + if v.raw[i * 50] != 0: + ss = v.raw[i * 50:] + end = ss.find(b'\x00') + val = ss[0:end].decode() + if val: + l.append(val) + return l + if opt == DeviceOption.THERMAL_STATS: + return numpy.frombuffer(v.raw, dtype=numpy.float32) + return int.from_bytes(v.raw, byteorder='little') + + def AllocateGraph(self, graphfile): + hgraph = c_void_p() + status = f.mvncAllocateGraph(self.handle, byref(hgraph), graphfile, len(graphfile)) + if status != Status.OK.value: + raise Exception(Status(status)) + return Graph(hgraph) + + +class Graph: + def __init__(self, handle): + self.handle = handle + self.userobjs = {} + + def SetGraphOption(self, opt, data): + data = c_int(data) + status = f.mvncSetGraphOption(self.handle, opt.value, pointer(data), sizeof(data)) + if status != Status.OK.value: + raise Exception(Status(status)) + + def GetGraphOption(self, opt): + if opt == GraphOption.ITERATIONS or opt == GraphOption.NETWORK_THROTTLE or opt == GraphOption.DONT_BLOCK: + optdata = c_int() + else: + optdata = POINTER(c_byte)() + optsize = c_uint() + status = f.mvncGetGraphOption(self.handle, opt.value, byref(optdata), byref(optsize)) + if status != Status.OK.value: + raise Exception(Status(status)) + if opt == GraphOption.ITERATIONS or opt == GraphOption.NETWORK_THROTTLE or opt == GraphOption.DONT_BLOCK: + return optdata.value + v = create_string_buffer(optsize.value) + memmove(v, optdata, optsize.value) + if opt == GraphOption.TIME_TAKEN: + return numpy.frombuffer(v.raw, dtype=numpy.float32) + if opt == GraphOption.DEBUG_INFO: + return v.raw[0:v.raw.find(0)].decode() + return int.from_bytes(v.raw, byteorder='little') + + def DeallocateGraph(self): + status = f.mvncDeallocateGraph(self.handle) + self.handle = 0 + if status != Status.OK.value: + raise Exception(Status(status)) + + def LoadTensor(self, tensor, userobj): + tensor = tensor.tostring() + userobj = py_object(userobj) + key = c_long(addressof(userobj)) + self.userobjs[key.value] = userobj + status = f.mvncLoadTensor(self.handle, tensor, len(tensor), key) + if status == Status.BUSY.value: + return False + if status != Status.OK.value: + del self.userobjs[key.value] + raise Exception(Status(status)) + return True + + def GetResult(self): + tensor = c_void_p() + tensorlen = c_uint() + userobj = c_long() + status = f.mvncGetResult(self.handle, byref(tensor), byref(tensorlen), byref(userobj)) + if status == Status.NO_DATA.value: + return None, None + if status != Status.OK.value: + raise Exception(Status(status)) + v = create_string_buffer(tensorlen.value) + memmove(v, tensor, tensorlen.value) + tensor = numpy.frombuffer(v.raw, dtype=numpy.float16) + retuserobj = self.userobjs[userobj.value] + del self.userobjs[userobj.value] + return tensor, retuserobj.value diff --git a/api/src/97-usbboot.rules b/api/src/97-usbboot.rules new file mode 100644 index 0000000..eb61fa4 --- /dev/null +++ b/api/src/97-usbboot.rules @@ -0,0 +1,3 @@ +SUBSYSTEM=="usb", ATTRS{idProduct}=="2150", ATTRS{idVendor}=="03e7", GROUP="users", MODE="0666", ENV{ID_MM_DEVICE_IGNORE}="1" +SUBSYSTEM=="usb", ATTRS{idProduct}=="f63b", ATTRS{idVendor}=="03e7", GROUP="users", MODE="0666", ENV{ID_MM_DEVICE_IGNORE}="1" +SUBSYSTEM=="tty", ATTRS{idProduct}=="2150", ATTRS{idVendor}=="03e7", GROUP="users", MODE="0666", ENV{ID_MM_DEVICE_IGNORE}="1" diff --git a/api/src/Makefile b/api/src/Makefile new file mode 100644 index 0000000..5d6b45d --- /dev/null +++ b/api/src/Makefile @@ -0,0 +1,81 @@ +ARCH := $(shell uname -m) + +LIBS += -lpthread -lusb-1.0 -ldl + +OUT := libmvnc.so.0 +OBJDIR := obj-$(ARCH) +INSTALLDIR := ${DESTDIR}/usr/local +PYTHON3DIST := $(shell python3 -c "import site; print(site.getsitepackages()[0])") +PYTHON2DIST := $(shell python -c "import site; print(site.getsitepackages()[0])") + +SRCS := \ + usb_boot.c \ + usb_link_vsc.c \ + mvnc_api.c + +INCLUDES := \ + -I. \ + -I../include \ + -I$(SYSROOT)/usr/include/libusb-1.0 \ + +CFLAGS += -O2 -Wall -pthread -fPIC -MMD -MP +LDFLAGS += -shared + +OBJS := $(SRCS:%.c=$(OBJDIR)/%.o) +DEPS := $(OBJS:.o=.d) + +all: obj-$(ARCH)/libmvnc.so.0 + +$(OBJDIR)/$(OUT): $(OBJS) + $(CC) $(LDFLAGS) $(OBJS) -o $@ $(LIBS) + ln -fs $(OBJDIR)/$(OUT) libmvnc.so + ln -fs $(OBJDIR)/$(OUT) $(OUT) + +$(OBJDIR)/%.o: %.c | $(OBJDIR) + $(CC) $(CFLAGS) $(INCLUDES) -c $< -o $@ + +$(OBJDIR): + @mkdir $@ + +-include $(DEPS) + +basicinstall: $(OBJDIR)/$(OUT) + mkdir -p $(INSTALLDIR)/include/ + mkdir -p $(INSTALLDIR)/lib/ + cp $(OBJDIR)/$(OUT) $(INSTALLDIR)/lib/ + ln -fs libmvnc.so.0 $(INSTALLDIR)/lib/libmvnc.so + cp ../include/*.h $(INSTALLDIR)/include/ + mkdir -p $(INSTALLDIR)/lib/mvnc + cp mvnc/MvNCAPI.mvcmd $(INSTALLDIR)/lib/mvnc/ + mkdir -p ${DESTDIR}/etc/udev/rules.d/ + cp 97-usbboot.rules ${DESTDIR}/etc/udev/rules.d/ + +pythoninstall: + mkdir -p ${DESTDIR}$(PYTHON3DIST) + mkdir -p ${DESTDIR}$(PYTHON2DIST) + cp -r ../python/mvnc ${DESTDIR}$(PYTHON3DIST)/ + cp -r ../python/mvnc ${DESTDIR}$(PYTHON2DIST)/ + +postinstall: + udevadm control --reload-rules + udevadm trigger + ldconfig + +install: basicinstall pythoninstall postinstall + +uninstall: + rm -f $(INSTALLDIR)/lib/libmvnc.so.0 + rm -f $(INSTALLDIR)/lib/libmvnc.so + rm -f $(INSTALLDIR)/include/mvnc.h + rm -f $(INSTALLDIR)/include/mvnc_deprecated.h + rm -f $(INSTALLDIR)/lib/mvnc/MvNCAPI.mvcmd + rm -rf $(INSTALLDIR)/lib/mvnc + rm -rf ${DESTDIR}$(PYTHON3DIST)/mvnc + rm -rf ${DESTDIR}$(PYTHON2DIST)/mvnc + rm -f ${DESTDIR}/etc/udev/rules.d/97-usbboot.rules + +clean: + rm -f $(OUT) + rm -f $(OBJS) + rm -rf $(OBJDIR) + rm -f libmvnc.so diff --git a/api/src/Makefile.rpi b/api/src/Makefile.rpi new file mode 100644 index 0000000..6cf9012 --- /dev/null +++ b/api/src/Makefile.rpi @@ -0,0 +1,43 @@ +ARCH := armv7l + +PIROOT := $(shell echo $(HOME))/piroot +CC := arm-linux-gnueabihf-gcc --sysroot=$(PIROOT) +LIBS += -ludev -lpthread -lusb-1.0 -ldl + +OUT := libmvnc.so.0 +OBJDIR := obj-$(ARCH) +INSTALLDIR := ${DESTDIR}/usr/local +PYTHON3DIST := $(shell python3 -c "import site; print(site.getsitepackages()[0])") +PYTHON2DIST := $(shell python -c "import site; print(site.getsitepackages()[0])") + +SRCS := \ + usb_boot.c \ + usb_link_vsc.c \ + mvnc_api.c + +INCLUDES := \ + -I. \ + -I../include \ + -I$(SYSROOT)/usr/include/libusb-1.0 \ + +CFLAGS += -O2 -Wall -pthread -fPIC -MMD -MP +LDFLAGS += -shared + +OBJS := $(SRCS:%.c=$(OBJDIR)/%.o) +DEPS := $(OBJS:.o=.d) + +$(OBJDIR)/$(OUT): $(OBJS) + $(CC) $(LDFLAGS) $(OBJS) -o $@ $(LIBS) + +$(OBJDIR)/%.o: %.c | $(OBJDIR) + $(CC) $(CFLAGS) $(INCLUDES) -c $< -o $@ + +$(OBJDIR): + @mkdir $@ + +-include $(DEPS) + +clean: + rm -f $(OUT) + rm -f $(OBJS) + rm -rf $(OBJDIR) diff --git a/api/src/USBLinkDefines.h b/api/src/USBLinkDefines.h new file mode 100644 index 0000000..4ec8cad --- /dev/null +++ b/api/src/USBLinkDefines.h @@ -0,0 +1,65 @@ +/* +* Copyright 2017 Intel Corporation. +* The source code, information and material ("Material") contained herein is +* owned by Intel Corporation or its suppliers or licensors, and title to such +* Material remains with Intel Corporation or its suppliers or licensors. +* The Material contains proprietary information of Intel or its suppliers and +* licensors. The Material is protected by worldwide copyright laws and treaty +* provisions. +* No part of the Material may be used, copied, reproduced, modified, published, +* uploaded, posted, transmitted, distributed or disclosed in any way without +* Intel's prior express written permission. No license under any patent, +* copyright or other intellectual property rights in the Material is granted to +* or conferred upon you, either expressly, by implication, inducement, estoppel +* or otherwise. +* Any license under such intellectual property rights must be express and +* approved by Intel in writing. +*/ + +#ifndef _USBLINKCOMMONDEFINES_H +#define _USBLINKCOMMONDEFINES_H +#include + +#ifdef __cplusplus +extern "C" { +#endif +#define MAX_NAME_LENGTH 52 +// Packet length will define the maximum message length between pc and myriad. All bigger messages than this number will be split in multiple messages +#define PACKET_LENGTH (64*1024) + +typedef struct bufferEntryDesc_t { + char name[MAX_NAME_LENGTH]; + uint8_t *data; + uint32_t length; +} bufferEntryDesc_t; + +typedef enum { + USB_LINK_GET_MYRIAD_STATUS = 0, + USB_LINK_RESET_REQUEST, + USB_LINK_HOST_SET_DATA, + USB_LINK_HOST_GET_DATA +} hostcommands_t; + +typedef enum { + MYRIAD_NOT_INIT = 0, + MYRIAD_INITIALIZED = 0x11, + MYRIAD_WAITING = 0x22, + MYRIAD_RUNNING = 0x33, + MYRIAD_FINISHED = 0x44, + MYRIAD_PENDING = 0x55, +} myriadStatus_t; + +typedef struct usbHeader_t { + uint8_t cmd; + uint8_t hostready; + uint16_t reserved; + uint32_t dataLength; + uint32_t offset; + char name[MAX_NAME_LENGTH]; +} usbHeader_t; + +#ifdef __cplusplus +} +#endif +#endif +/* end of include file */ diff --git a/api/src/common.h b/api/src/common.h new file mode 100644 index 0000000..7385f0c --- /dev/null +++ b/api/src/common.h @@ -0,0 +1,34 @@ +/* +* Copyright 2017 Intel Corporation. +* The source code, information and material ("Material") contained herein is +* owned by Intel Corporation or its suppliers or licensors, and title to such +* Material remains with Intel Corporation or its suppliers or licensors. +* The Material contains proprietary information of Intel or its suppliers and +* licensors. The Material is protected by worldwide copyright laws and treaty +* provisions. +* No part of the Material may be used, copied, reproduced, modified, published, +* uploaded, posted, transmitted, distributed or disclosed in any way without +* Intel's prior express written permission. No license under any patent, +* copyright or other intellectual property rights in the Material is granted to +* or conferred upon you, either expressly, by implication, inducement, estoppel +* or otherwise. +* Any license under such intellectual property rights must be express and +* approved by Intel in writing. +*/ + +// Common logging macros +#define PRINT_DEBUG(...) {if (mvnc_loglevel > 1) fprintf(__VA_ARGS__);} +#define PRINT_DEBUG_F(...) {if (mvnc_loglevel > 1) \ + { fprintf(__VA_ARGS__); fflush(stderr); } }\ + +#define PRINT_INFO(...) {if (mvnc_loglevel > 0) fprintf(__VA_ARGS__);} +#define PRINT_INFO_F(...) {if (mvnc_loglevel > 0) \ + { fprintf(__VA_ARGS__); fflush(stderr); } }\ + +#define PRINT(...) fprintf(stderr,__VA_ARGS__) + +// Common defines +#define DEFAULT_VID 0x03E7 +#define DEFAULT_PID 0x2150 // Myriad2v2 ROM +#define DEFAULT_OPEN_VID DEFAULT_VID +#define DEFAULT_OPEN_PID 0xf63b // Once opened in VSC mode, VID/PID change diff --git a/api/src/mvnc/MvNCAPI.mvcmd b/api/src/mvnc/MvNCAPI.mvcmd new file mode 100644 index 0000000..87db9cb Binary files /dev/null and b/api/src/mvnc/MvNCAPI.mvcmd differ diff --git a/api/src/mvnc_api.c b/api/src/mvnc_api.c new file mode 100644 index 0000000..2975836 --- /dev/null +++ b/api/src/mvnc_api.c @@ -0,0 +1,1001 @@ +/* +* Copyright 2017 Intel Corporation. +* The source code, information and material ("Material") contained herein is +* owned by Intel Corporation or its suppliers or licensors, and title to such +* Material remains with Intel Corporation or its suppliers or licensors. +* The Material contains proprietary information of Intel or its suppliers and +* licensors. The Material is protected by worldwide copyright laws and treaty +* provisions. +* No part of the Material may be used, copied, reproduced, modified, published, +* uploaded, posted, transmitted, distributed or disclosed in any way without +* Intel's prior express written permission. No license under any patent, +* copyright or other intellectual property rights in the Material is granted to +* or conferred upon you, either expressly, by implication, inducement, estoppel +* or otherwise. +* Any license under such intellectual property rights must be express and +* approved by Intel in writing. +*/ + +#define _GNU_SOURCE +#include // For dladdr +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "mvnc.h" +#include "usb_link.h" +#include "usb_boot.h" +#include "common.h" + +// Graph file structure +#define HEADER_LENGTH 264 +#define STAGE_LENGTH 227 +#define VERSION_OFFSET 36 +#define GRAPH_VERSION 2 +#define N_STAGES_OFFSET 240 +#define FIRST_SHAVE_OFFSET 248 +#define N_OUTPUTS_OFFSET (HEADER_LENGTH + 136) +#define X_OUT_STRIDE_OFFSET (HEADER_LENGTH + 172) + +#define THERMAL_BUFFER_SIZE 100 +#define DEBUG_BUFFER_SIZE 120 + +#define MAX_OPTIMISATIONS 40 +#define OPTIMISATION_NAME_LEN 50 +#define OPTIMISATION_LIST_BUFFER_SIZE (MAX_OPTIMISATIONS * OPTIMISATION_NAME_LEN) + +#define MAX_PATH_LENGTH 255 +#define STATUS_WAIT_TIMEOUT 15 + +static int initialized = 0; +static pthread_mutex_t mm = PTHREAD_MUTEX_INITIALIZER; + +int mvnc_loglevel = 0; + +/////////////////////////// Structs ///////////////////////////// +struct Graph; + +struct Device { + int backoff_time_normal, backoff_time_high, backoff_time_critical; + int temperature_debug, throttle_happened; + float temp_lim_upper, temp_lim_lower; + float *thermal_stats; + char *dev_addr; // Device USB address as returned by usb_ + char *dev_file; // Device filename in /dev directory + char *optimisation_list; + void *usb_link; + struct Device *next; // Next device in chain + struct Graph *graphs; // List of associated graphs + pthread_mutex_t mm; +} *devices; + +struct Graph { + int started; + int have_data; + int dont_block; + int input_idx; + int output_idx; + int failed; + int iterations; + int network_throttle; + unsigned noutputs; + unsigned nstages; + struct Device *dev; + struct Graph *next; + char *aux_buffer; + char *debug_buffer; + float *time_taken; + void *user_param[2]; + void *output_data; +}; + +static double time_in_seconds() +{ + static double s; + struct timespec ts; + + clock_gettime(CLOCK_MONOTONIC, &ts); + if (!s) + s = ts.tv_sec + ts.tv_nsec * 1e-9; + return ts.tv_sec + ts.tv_nsec * 1e-9 - s; +} + +static void initialize() +{ + // We sanitize the situation by trying to reset the devices that have been left open + initialized = 1; + usblink_resetall(); +} + +mvncStatus mvncGetDeviceName(int index, char *name, unsigned int nameSize) +{ + if (index < 0 || !name || nameSize < MVNC_MAX_NAME_SIZE) + return MVNC_INVALID_PARAMETERS; + + pthread_mutex_lock(&mm); + if (!initialized) + initialize(); + int rc = usb_find_device(index, name, nameSize, 0, 0, 0); + pthread_mutex_unlock(&mm); + + return rc; +} + +static int is_device_opened(const char *name) +{ + struct Device *d = devices; + while (d) { + if (strcmp(d->dev_addr, name) == 0) + return 0; + d = d->next; + } + return -1; +} + +static mvncStatus load_fw_file(const char *name) +{ + int rc; + FILE *fp; + char *tx_buf; + unsigned file_size; + char mv_cmd_file[MAX_PATH_LENGTH], *p; + + // Search the mvnc executable in the same directory of this library, under mvnc + Dl_info info; + dladdr(mvncOpenDevice, &info); + strncpy(mv_cmd_file, info.dli_fname, sizeof(mv_cmd_file) - 40); + p = strrchr(mv_cmd_file, '/'); + if (p) + strcpy(p + 1, "mvnc/MvNCAPI.mvcmd"); + else + strcpy(mv_cmd_file, "mvnc/MvNCAPI.mvcmd"); + + // Load the mvnc executable + fp = fopen(mv_cmd_file, "rb"); + if (fp == NULL) { + if (mvnc_loglevel) + perror(mv_cmd_file); + pthread_mutex_unlock(&mm); + return MVNC_MVCMD_NOT_FOUND; + } + + fseek(fp, 0, SEEK_END); + file_size = ftell(fp); + rewind(fp); + if (!(tx_buf = malloc(file_size))) { + if (mvnc_loglevel) + perror("buffer"); + fclose(fp); + pthread_mutex_unlock(&mm); + return MVNC_OUT_OF_MEMORY; + } + + if (fread(tx_buf, 1, file_size, fp) != file_size) { + if (mvnc_loglevel) + perror(mv_cmd_file); + fclose(fp); + free(tx_buf); + pthread_mutex_unlock(&mm); + return MVNC_MVCMD_NOT_FOUND; + } + fclose(fp); + + // Boot it + rc = usb_boot(name, tx_buf, file_size); + free(tx_buf); + if (rc) { + pthread_mutex_unlock(&mm); + return rc; + } + + PRINT_DEBUG(stderr, "Boot successful, device address %s\n", name); + return MVNC_OK; +} + +static void allocate_device(const char* name, void **deviceHandle, void* f) +{ + struct Device *d = calloc(1, sizeof(*d)); + d->dev_addr = strdup(name); + d->usb_link = f; + d->next = devices; + d->temp_lim_upper = 95; + d->temp_lim_lower = 85; + d->backoff_time_normal = 0; + d->backoff_time_high = 100; + d->backoff_time_critical = 10000; + d->temperature_debug = 0; + pthread_mutex_init(&d->mm, 0); + devices = d; + *deviceHandle = d; + + PRINT_DEBUG(stderr, "done\n"); + PRINT_INFO(stderr, "Booted %s -> %s\n", + d->dev_addr, + d->dev_file ? d->dev_file : "VSC"); +} + +mvncStatus mvncOpenDevice(const char *name, void **deviceHandle) +{ + int rc; + char name2[MVNC_MAX_NAME_SIZE] = ""; + char* device_name; + char* saved_name = NULL; + char* temp = NULL; //save to be able to free memory + int second_name_available = 0; + + if (!name || !deviceHandle) + return MVNC_INVALID_PARAMETERS; + + temp = saved_name = strdup(name); + + device_name = strtok_r(saved_name, ":", &saved_name); + if (device_name == NULL) { + free(temp); + return MVNC_INVALID_PARAMETERS; + } + + pthread_mutex_lock(&mm); + if (!initialized) + initialize(); + + + rc = load_fw_file(device_name); + if (rc != MVNC_OK) { + free(temp); + return rc; + } + if (saved_name && strlen(saved_name) > 0) { + device_name = strtok_r(NULL, ":", &saved_name); + second_name_available = 1; + } + + // Now we should have a new /dev/ttyACM, try to open it + double waittm = time_in_seconds() + STATUS_WAIT_TIMEOUT; + while (time_in_seconds() < waittm) { + void *f = usblink_open(device_name); + + //we might fail in case name changed after boot and we don't have it + if (f == NULL && !second_name_available) { + int count = 0; + while (1) { + name2[0] = '\0'; + rc = usb_find_device(count, name2, + sizeof(name2), NULL, + DEFAULT_OPEN_VID, + DEFAULT_OPEN_PID); + if (rc < 0) //Error or no more devices found + break; + + //check if we already have name2 open + // if not, check if it's not already busy + if (is_device_opened(name2) < 0 && + (f = usblink_open(name2))) + break; + count++; + } + } + + if (f) { + myriadStatus_t status; + + if (!usblink_getmyriadstatus(f, &status) && status == MYRIAD_WAITING) { + allocate_device(strlen(name2) > 0 ? name2 : device_name, deviceHandle, f); + free(temp); + pthread_mutex_unlock(&mm); + return MVNC_OK; + } else { + PRINT_DEBUG(stderr, + "found, but cannot get status\n"); + usblink_close(f); + } + } + // Error opening it, continue searching + usleep(10000); + } + free(temp); + pthread_mutex_unlock(&mm); + return MVNC_ERROR; +} + +static int find_device(void *deviceHandle) +{ + struct Device *d = devices; + + while (d) { + if (d == deviceHandle) + return 0; + d = d->next; + } + + return -1; +} + +static int find_graph(void *graphHandle) +{ + struct Device *d = devices; + + while (d) { + struct Graph *g = d->graphs; + while (g) { + if (g == graphHandle) + return 0; + g = g->next; + } + d = d->next; + } + + return -1; +} + +// Defined here as it will be used twice +static int deallocate_graph(struct Graph *g) +{ + int found = 0; + + // Remove it from the list of the associated device + if (g->dev->graphs == g) { + g->dev->graphs = g->next; + found = 1; + } else { + struct Graph *gp = g->dev->graphs; + while (gp->next) { + if (gp->next == g) { + found = 1; + gp->next = gp->next->next; + break; + } + gp = gp->next; + } + } + + // Free it with all its data + if (found) { + free(g->aux_buffer); + free(g->output_data); + g->dev->thermal_stats = 0; + free(g); + } + + return -!found; +} + +mvncStatus mvncCloseDevice(void *deviceHandle) +{ + int found = 0; + + if (!deviceHandle) + return MVNC_INVALID_PARAMETERS; + + pthread_mutex_lock(&mm); + if (find_device(deviceHandle)) { + pthread_mutex_unlock(&mm); + return MVNC_INVALID_PARAMETERS; + } + + struct Device *d = (struct Device *) deviceHandle; + // Remove it from our list + if (devices == d) { + devices = d->next; + found = 1; + } else { + struct Device *dp = devices; + while (dp->next) { + if (dp->next == d) { + found = 1; + dp->next = dp->next->next; + break; + } + dp = dp->next; + } + } + + if (!found) { + pthread_mutex_unlock(&mm); + return MVNC_INVALID_PARAMETERS; + } + // Deallocate all associated graphs + pthread_mutex_lock(&d->mm); + while (d->graphs) + deallocate_graph(d->graphs); + + // Reset + usblink_resetmyriad(d->usb_link); + usblink_close(d->usb_link); + if (d->optimisation_list) + free(d->optimisation_list); + + free(d->dev_addr); + free(d->dev_file); + pthread_mutex_unlock(&d->mm); + pthread_mutex_destroy(&d->mm); + free(d); + pthread_mutex_unlock(&mm); + + usleep(500000); + return MVNC_OK; +} + +static unsigned read_32bits(const unsigned char *ptr) +{ + return ptr[0] | (ptr[1] << 8) | (ptr[2] << 16) | (ptr[3] << 24); +} + + +mvncStatus mvncAllocateGraph(void *deviceHandle, void **graphHandle, + const void *graphFile, unsigned int graphFileLength) +{ + if (!deviceHandle || !graphHandle || !graphFile) + return MVNC_INVALID_PARAMETERS; + + if (graphFileLength < HEADER_LENGTH + STAGE_LENGTH || + graphFileLength > 512 * 1024 * 1024) + return MVNC_UNSUPPORTED_GRAPH_FILE; + + unsigned char *graph = (unsigned char *) graphFile; + if (graph[VERSION_OFFSET] != GRAPH_VERSION) + return MVNC_UNSUPPORTED_GRAPH_FILE; + + unsigned nstages = graph[N_STAGES_OFFSET] + (graph[N_STAGES_OFFSET + 1] << 8); + unsigned noutputs = read_32bits(graph + N_OUTPUTS_OFFSET + + (nstages - 1) * STAGE_LENGTH) * + read_32bits(graph + N_OUTPUTS_OFFSET + + (nstages - 1) * STAGE_LENGTH + 4) * + read_32bits(graph + X_OUT_STRIDE_OFFSET + + (nstages - 1) * STAGE_LENGTH) / 2; + + // A reasonable check on graph correctness + if (noutputs > 64 * 1024 * 1024) + return MVNC_UNSUPPORTED_GRAPH_FILE; + + pthread_mutex_lock(&mm); + struct Device *d = devices; + while (d) { + if (d == deviceHandle) + break; + d = d->next; + } + + if (!d) { + pthread_mutex_unlock(&mm); + return MVNC_INVALID_PARAMETERS; + } + + if (d->graphs) { + pthread_mutex_unlock(&mm); + return MVNC_BUSY; + } + + myriadStatus_t status; + double timeout = time_in_seconds() + 10; + do { + if (usblink_getmyriadstatus(d->usb_link, &status)) { + pthread_mutex_unlock(&mm); + return MVNC_ERROR; + } + usleep(10000); + } while (status != MYRIAD_WAITING && time_in_seconds() < timeout); + + if (status != MYRIAD_WAITING) { + pthread_mutex_unlock(&mm); + return MVNC_ERROR; + } + + if (usblink_setdata(d->usb_link, "blobFile", graphFile, graphFileLength, 0)) { + pthread_mutex_unlock(&mm); + return MVNC_ERROR; + } + + struct Graph *g = calloc(1, sizeof(*g)); + g->dev = d; + g->nstages = nstages; + g->noutputs = noutputs; + + // aux_buffer + g->aux_buffer = calloc(1, 224 + nstages * sizeof(*g->time_taken)); + if (!g->aux_buffer) { + free(g); + pthread_mutex_unlock(&mm); + return MVNC_OUT_OF_MEMORY; + } + + if (usblink_setdata(g->dev->usb_link, "auxBuffer", g->aux_buffer, + 224 + nstages * sizeof(*g->time_taken), 0)) { + free(g->aux_buffer); + free(g); + pthread_mutex_unlock(&mm); + return MVNC_ERROR; + } + + g->debug_buffer = g->aux_buffer; + g->time_taken = (float *) (g->aux_buffer + 224); + + // output_data + g->output_data = calloc(noutputs, 2); + if (!g->output_data) { + free(g->aux_buffer); + free(g); + pthread_mutex_unlock(&mm); + return MVNC_OUT_OF_MEMORY; + } + + g->dev->thermal_stats = (float *) (g->aux_buffer + DEBUG_BUFFER_SIZE); + + g->iterations = 1; + g->network_throttle = 1; + if (d->graphs) + g->next = d->graphs; + d->graphs = g; + *graphHandle = g; + pthread_mutex_unlock(&mm); + return MVNC_OK; +} + +mvncStatus mvncDeallocateGraph(void *graphHandle) +{ + if (!graphHandle) + return MVNC_INVALID_PARAMETERS; + + pthread_mutex_lock(&mm); + if (find_graph(graphHandle)) { + pthread_mutex_unlock(&mm); + return MVNC_INVALID_PARAMETERS; + } + + struct Device *d = ((struct Graph *) graphHandle)->dev; + + pthread_mutex_lock(&d->mm); + if (deallocate_graph((struct Graph *) graphHandle)) { + pthread_mutex_unlock(&d->mm); + pthread_mutex_unlock(&mm); + return MVNC_INVALID_PARAMETERS; + } + + pthread_mutex_unlock(&d->mm); + pthread_mutex_unlock(&mm); + return MVNC_OK; +} + +mvncStatus mvncSetGraphOption(void *graphHandle, int option, const void *data, + unsigned int dataLength) +{ + if (!graphHandle || !data || dataLength != 4) + return MVNC_INVALID_PARAMETERS; + + struct Graph *g = (struct Graph *) graphHandle; + pthread_mutex_lock(&mm); + if (find_graph(graphHandle)) { + pthread_mutex_unlock(&mm); + return MVNC_INVALID_PARAMETERS; + } + + pthread_mutex_lock(&g->dev->mm); + pthread_mutex_unlock(&mm); + switch (option) { + case MVNC_ITERATIONS: + g->iterations = *(int *) data; + break; + case MVNC_NETWORK_THROTTLE: + g->network_throttle = *(int *) data; + break; + case MVNC_DONT_BLOCK: + g->dont_block = *(int *) data; + break; + default: + pthread_mutex_unlock(&g->dev->mm); + return MVNC_INVALID_PARAMETERS; + } + + pthread_mutex_unlock(&g->dev->mm); + return MVNC_OK; +} + +mvncStatus mvncGetGraphOption(void *graphHandle, int option, void *data, + unsigned int *dataLength) +{ + if (!graphHandle || !data || !dataLength) + return MVNC_INVALID_PARAMETERS; + + struct Graph *g = (struct Graph *) graphHandle; + pthread_mutex_lock(&mm); + if (find_graph(graphHandle)) { + pthread_mutex_unlock(&mm); + return MVNC_INVALID_PARAMETERS; + } + + pthread_mutex_lock(&g->dev->mm); + pthread_mutex_unlock(&mm); + switch (option) { + case MVNC_ITERATIONS: + *(int *) data = g->iterations; + *dataLength = sizeof(int); + break; + case MVNC_NETWORK_THROTTLE: + *(int *) data = g->network_throttle; + *dataLength = sizeof(int); + break; + case MVNC_DONT_BLOCK: + *(int *) data = g->dont_block; + + *dataLength = sizeof(int); + break; + case MVNC_TIME_TAKEN: + *(float **) data = g->time_taken; + *dataLength = sizeof(*g->time_taken) * g->nstages; + break; + case MVNC_DEBUG_INFO: + *(char **) data = g->debug_buffer; + *dataLength = DEBUG_BUFFER_SIZE; + break; + default: + pthread_mutex_unlock(&g->dev->mm); + return MVNC_INVALID_PARAMETERS; + } + + pthread_mutex_unlock(&g->dev->mm); + return MVNC_OK; +} + +mvncStatus mvncSetGlobalOption(int option, const void *data, + unsigned int dataLength) +{ + if (!data || dataLength != 4) + return MVNC_INVALID_PARAMETERS; + + switch (option) { + case MVNC_LOG_LEVEL: + mvnc_loglevel = *(int *) data; + break; + default: + return MVNC_INVALID_PARAMETERS; + } + + return MVNC_OK; +} + +mvncStatus mvncGetGlobalOption(int option, void *data, unsigned int *dataLength) +{ + if (!data || !dataLength) + return MVNC_INVALID_PARAMETERS; + + switch (option) { + case MVNC_LOG_LEVEL: + *(int *) data = mvnc_loglevel; + *dataLength = sizeof(mvnc_loglevel); + break; + default: + return MVNC_INVALID_PARAMETERS; + } + return MVNC_OK; +} + +mvncStatus mvncSetDeviceOption(void *deviceHandle, int option, const void *data, + unsigned int dataLength) +{ + if (deviceHandle == 0 && option == MVNC_LOG_LEVEL) { + PRINT("Warning: MVNC_LOG_LEVEL is not a Device Option, \ + please use mvncSetGlobalOption()!\n"); + return mvncSetGlobalOption(option, data, dataLength); + } + + if (!deviceHandle || !data || dataLength != 4) + return MVNC_INVALID_PARAMETERS; + + struct Device *d = (struct Device *) deviceHandle; + pthread_mutex_lock(&mm); + if (find_device(d)) { + pthread_mutex_unlock(&mm); + return MVNC_INVALID_PARAMETERS; + } + + pthread_mutex_lock(&d->mm); + pthread_mutex_unlock(&mm); + switch (option) { + case MVNC_TEMP_LIM_LOWER: + d->temp_lim_lower = *(float *) data; + break; + case MVNC_TEMP_LIM_HIGHER: + d->temp_lim_upper = *(float *) data; + break; + case MVNC_BACKOFF_TIME_NORMAL: + d->backoff_time_normal = *(int *) data; + break; + case MVNC_BACKOFF_TIME_HIGH: + d->backoff_time_high = *(int *) data; + break; + case MVNC_BACKOFF_TIME_CRITICAL: + d->backoff_time_critical = *(int *) data; + break; + case MVNC_TEMPERATURE_DEBUG: + d->temperature_debug = *(int *) data; + break; + default: + pthread_mutex_unlock(&d->mm); + return MVNC_INVALID_PARAMETERS; + } + pthread_mutex_unlock(&d->mm); + + return MVNC_OK; +} + +static mvncStatus get_optimisation_list(struct Device *d) +{ + int i, config[10]; + double timeout; + myriadStatus_t status; + char *p; + + if (d->optimisation_list) + return MVNC_OK; + + d->optimisation_list = calloc(OPTIMISATION_LIST_BUFFER_SIZE, 1); + if (!d->optimisation_list) + return MVNC_OUT_OF_MEMORY; + + memset(config, 0, sizeof(config)); + config[0] = 1; + config[1] = 1; + if (usblink_setdata(d->usb_link, "config", config, sizeof(config), 1)) + return MVNC_ERROR; + + timeout = time_in_seconds() + STATUS_WAIT_TIMEOUT; + do { + if (usblink_getmyriadstatus(d->usb_link, &status)) + return MVNC_ERROR; + usleep(10000); + } while (status != MYRIAD_WAITING && + status != MYRIAD_FINISHED && time_in_seconds() < timeout); + + if (status != MYRIAD_WAITING && status != MYRIAD_FINISHED) + return MVNC_TIMEOUT; + + if (usblink_getdata(d->usb_link, "optimizationList", + d->optimisation_list, OPTIMISATION_LIST_BUFFER_SIZE, 0, 0)) + return MVNC_ERROR; + + for (i = 0; i < MAX_OPTIMISATIONS; i++) { + p = strchr(d->optimisation_list + i * OPTIMISATION_NAME_LEN, '~'); + if (p) + *p = 0; + } + + config[1] = 0; + if (usblink_setdata(d->usb_link, "config", config, sizeof(config), 0)) + return MVNC_ERROR; + return MVNC_OK; +} + +mvncStatus mvncGetDeviceOption(void *deviceHandle, int option, void *data, + unsigned int *dataLength) +{ + mvncStatus rc; + + if (deviceHandle == 0 && option == MVNC_LOG_LEVEL) { + PRINT("Warning: MVNC_LOG_LEVEL is not a Device Option, \ + please use mvncGetGlobalOption()!\n"); + return mvncGetGlobalOption(option, data, dataLength); + } + + if (!deviceHandle || !data || !dataLength) + return MVNC_INVALID_PARAMETERS; + + struct Device *d = (struct Device *) deviceHandle; + pthread_mutex_lock(&mm); + if (find_device(d)) { + pthread_mutex_unlock(&mm); + return MVNC_INVALID_PARAMETERS; + } + + pthread_mutex_lock(&d->mm); + pthread_mutex_unlock(&mm); + switch (option) { + case MVNC_TEMP_LIM_LOWER: + *(float *) data = d->temp_lim_lower; + *dataLength = sizeof(int); + break; + case MVNC_TEMP_LIM_HIGHER: + *(float *) data = d->temp_lim_upper; + *dataLength = sizeof(int); + break; + case MVNC_BACKOFF_TIME_NORMAL: + *(int *) data = d->backoff_time_normal; + *dataLength = sizeof(int); + break; + case MVNC_BACKOFF_TIME_HIGH: + *(int *) data = d->backoff_time_high; + *dataLength = sizeof(int); + break; + case MVNC_BACKOFF_TIME_CRITICAL: + *(int *) data = d->backoff_time_critical; + *dataLength = sizeof(int); + break; + case MVNC_TEMPERATURE_DEBUG: + *(int *) data = d->temperature_debug; + *dataLength = sizeof(int); + break; + case MVNC_THERMAL_STATS: + if (!d->thermal_stats) { + pthread_mutex_unlock(&d->mm); + return MVNC_NO_DATA; + } + *(float **) data = d->thermal_stats; + *dataLength = THERMAL_BUFFER_SIZE; + break; + case MVNC_OPTIMISATION_LIST: + rc = get_optimisation_list(d); + if (rc) { + pthread_mutex_unlock(&d->mm); + return rc; + } + *(char **) data = d->optimisation_list; + *dataLength = OPTIMISATION_LIST_BUFFER_SIZE; + break; + case MVNC_THERMAL_THROTTLING_LEVEL: + *(int *) data = d->throttle_happened; + *dataLength = sizeof(int); + break; + default: + pthread_mutex_unlock(&d->mm); + return MVNC_INVALID_PARAMETERS; + } + pthread_mutex_unlock(&d->mm); + + return MVNC_OK; +} + +static int send_opt_data(struct Graph *g) +{ + int config[10]; + + config[0] = 1; // Version + config[1] = 0; // Query disable + config[2] = g->iterations; + config[3] = g->dev->temp_lim_upper; + config[4] = g->dev->temp_lim_lower; + config[5] = g->dev->backoff_time_normal; + config[6] = g->dev->backoff_time_high; + config[7] = g->dev->backoff_time_critical; + config[8] = g->dev->temperature_debug; + config[9] = g->network_throttle; + + if (usblink_setdata(g->dev->usb_link, "config", config, sizeof(config), 0)) + return MVNC_ERROR; + + return MVNC_OK; +} + +mvncStatus mvncLoadTensor(void *graphHandle, const void *inputTensor, + unsigned int inputTensorLength, void *userParam) +{ + if (!graphHandle || !inputTensor || inputTensorLength < 2) + return MVNC_INVALID_PARAMETERS; + + struct Graph *g = (struct Graph *) graphHandle; + pthread_mutex_lock(&mm); + if (find_graph(graphHandle)) { + pthread_mutex_unlock(&mm); + return MVNC_INVALID_PARAMETERS; + } + + if (!g->started) { + if (send_opt_data(g)) { + pthread_mutex_unlock(&mm); + return MVNC_ERROR; + } + g->started = 1; + } + + while (g->have_data == 2) { + if (g->dont_block) { + pthread_mutex_unlock(&mm); + return MVNC_BUSY; + } + if (g->failed) { + pthread_mutex_unlock(&mm); + return MVNC_ERROR; + } + pthread_mutex_unlock(&mm); + usleep(1000); + pthread_mutex_lock(&mm); + if (find_graph(g)) { + pthread_mutex_unlock(&mm); + return MVNC_GONE; + } + } + pthread_mutex_lock(&g->dev->mm); + pthread_mutex_unlock(&mm); + + if (usblink_setdata(g->dev->usb_link, g->input_idx ? "input2" : "input1", + inputTensor, inputTensorLength, g->have_data == 0)) { + pthread_mutex_unlock(&mm); + return MVNC_ERROR; + } + + g->user_param[g->input_idx] = userParam; + g->input_idx = !g->input_idx; + g->have_data++; + pthread_mutex_unlock(&g->dev->mm); + return MVNC_OK; +} + +mvncStatus mvncGetResult(void *graphHandle, void **outputData, + unsigned int *outputDataLength, void **userParam) +{ + int rc, unlock_own = 0; + + if (!graphHandle || !outputData || !outputDataLength) + return MVNC_INVALID_PARAMETERS; + + struct Graph *g = (struct Graph *) graphHandle; + pthread_mutex_lock(&mm); + if (find_graph(graphHandle)) { + pthread_mutex_unlock(&mm); + return MVNC_INVALID_PARAMETERS; + } + + while (!g->have_data) { + if (g->dont_block) { + pthread_mutex_unlock(&mm); + return MVNC_NO_DATA; + } + pthread_mutex_unlock(&mm); + usleep(1000); + pthread_mutex_lock(&mm); + if (find_graph(g)) { + pthread_mutex_unlock(&mm); + return MVNC_GONE; + } + } + + double timeout = time_in_seconds() + STATUS_WAIT_TIMEOUT; + do { + pthread_mutex_lock(&g->dev->mm); + pthread_mutex_unlock(&mm); + if (!usblink_getdata(g->dev->usb_link, "output", g->output_data, + 2 * g->noutputs, 0, 0)) { + unsigned int length = DEBUG_BUFFER_SIZE + THERMAL_BUFFER_SIZE + + sizeof(int) + sizeof(*g->time_taken) * g->nstages; + + if (usblink_getdata(g->dev->usb_link, "auxBuffer", g->aux_buffer, + length, 0, g->have_data == 2)) { + g->failed = 1; + pthread_mutex_unlock(&g->dev->mm); + return MVNC_ERROR; + } + unlock_own = 1; + break; + } + pthread_mutex_unlock(&g->dev->mm); + usleep(1000); + pthread_mutex_lock(&mm); + if (find_graph(g)) { + pthread_mutex_unlock(&mm); + return MVNC_GONE; + } + } while (time_in_seconds() < timeout); + + g->dev->throttle_happened = *(int *) (g->aux_buffer + DEBUG_BUFFER_SIZE + + THERMAL_BUFFER_SIZE); + *outputData = g->output_data; + *outputDataLength = 2 * g->noutputs; + *userParam = g->user_param[g->output_idx]; + g->output_idx = !g->output_idx; + g->have_data--; + + if (unlock_own) { + rc = *g->debug_buffer ? MVNC_MYRIAD_ERROR : MVNC_OK; + if (rc) + g->failed = 1; + pthread_mutex_unlock(&g->dev->mm); + } else { + rc = MVNC_TIMEOUT; + g->failed = 1; + pthread_mutex_unlock(&mm); + } + + return rc; +} diff --git a/api/src/usb_boot.c b/api/src/usb_boot.c new file mode 100644 index 0000000..2f7cf27 --- /dev/null +++ b/api/src/usb_boot.c @@ -0,0 +1,335 @@ +/* +* Copyright 2017 Intel Corporation. +* The source code, information and material ("Material") contained herein is +* owned by Intel Corporation or its suppliers or licensors, and title to such +* Material remains with Intel Corporation or its suppliers or licensors. +* The Material contains proprietary information of Intel or its suppliers and +* licensors. The Material is protected by worldwide copyright laws and treaty +* provisions. +* No part of the Material may be used, copied, reproduced, modified, published, +* uploaded, posted, transmitted, distributed or disclosed in any way without +* Intel's prior express written permission. No license under any patent, +* copyright or other intellectual property rights in the Material is granted to +* or conferred upon you, either expressly, by implication, inducement, estoppel +* or otherwise. +* Any license under such intellectual property rights must be express and +* approved by Intel in writing. +*/ + +// USB utility for use with Myriad2v2 ROM +// Very heavily modified from Sabre version of usb_boot +// Author: David Steinberg + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "usb_boot.h" +#include "mvnc.h" +#include "common.h" + +#define DEFAULT_WRITE_TIMEOUT 2000 +#define DEFAULT_CONNECT_TIMEOUT 20 // in 100ms units +#define DEFAULT_CHUNK_SZ 1024 * 1024 + +static unsigned int bulk_chunk_len = DEFAULT_CHUNK_SZ; +static int write_timeout = DEFAULT_WRITE_TIMEOUT; +static int connect_timeout = DEFAULT_CONNECT_TIMEOUT; +static int initialized; + +void __attribute__ ((constructor)) usb_library_load() +{ + initialized = !libusb_init(NULL); +} + +void __attribute__ ((destructor)) usb_library_unload() +{ + if (initialized) + libusb_exit(NULL); +} + +typedef struct timespec highres_time_t; + +static inline void highres_gettime(highres_time_t *ptr) +{ + clock_gettime(CLOCK_REALTIME, ptr); +} + +static inline double highres_elapsed_ms(highres_time_t *start, highres_time_t *end) +{ + struct timespec temp; + if ((end->tv_nsec - start->tv_nsec) < 0) { + temp.tv_sec = end->tv_sec - start->tv_sec - 1; + temp.tv_nsec = 1000000000 + end->tv_nsec - start->tv_nsec; + } else { + temp.tv_sec = end->tv_sec - start->tv_sec; + temp.tv_nsec = end->tv_nsec - start->tv_nsec; + } + return (double)(temp.tv_sec * 1000) + (((double)temp.tv_nsec) * 0.000001); +} + +static const char *gen_addr(libusb_device *dev) +{ + static char buff[4 * 7] = ""; // '255-' x 7 (also gives us nul-terminator for last entry) + uint8_t pnums[7]; + int pnum_cnt, i; + char *p; + + pnum_cnt = libusb_get_port_numbers(dev, pnums, 7); + if (pnum_cnt == LIBUSB_ERROR_OVERFLOW) { + // shouldn't happen! + strcpy(buff, ""); + return buff; + } + p = buff; + for (i = 0; i < pnum_cnt - 1; i++) + p += snprintf(p, sizeof(buff) - strlen(buff), "%u.", pnums[i]); + snprintf(p, sizeof(buff) - strlen(buff), "%u", pnums[i]); + return buff; +} + +// if device is NULL, return device address for device at index idx +// if device is not NULL, search by name and return device struct +int usb_find_device(unsigned idx, char *addr, unsigned addr_size, void **device, + int vid, int pid) +{ + static libusb_device **devs; + libusb_device *dev; + struct libusb_device_descriptor desc; + int count = 0; + size_t i; + int res; + + if (!initialized) { + PRINT_INFO(stderr, + "Library has not been initialized when loaded\n"); + return MVNC_ERROR; + } + if (!devs || idx == 0) { + if (devs) { + libusb_free_device_list(devs, 1); + devs = 0; + } + if ((res = libusb_get_device_list(NULL, &devs)) < 0) { + PRINT_INFO(stderr, + "Unable to get USB device list: %s\n", + libusb_strerror(res)); + return MVNC_ERROR; + } + } + + i = 0; + while ((dev = devs[i++]) != NULL) { + if ((res = libusb_get_device_descriptor(dev, &desc)) < 0) { + PRINT_INFO(stderr, + "Unable to get USB device descriptor: %s\n", + libusb_strerror(res)); + continue; + } + if ((desc.idVendor == vid && desc.idProduct == pid) || + (pid == 0 && vid == 0 && ((desc.idVendor == DEFAULT_VID + && desc.idProduct == DEFAULT_PID) + || (desc.idVendor == + DEFAULT_OPEN_VID && + desc.idProduct == + DEFAULT_OPEN_PID)))) { + if (device) { + const char *caddr = gen_addr(dev); + if (!strcmp(caddr, addr)) { + PRINT_DEBUG(stderr, + "Found Address: %s - VID/PID %04x:%04x\n", + addr, desc.idVendor, desc.idProduct); + libusb_ref_device(dev); + libusb_free_device_list(devs, 1); + *device = dev; + devs = 0; + return 0; + } + } else if (idx == count) { + const char *caddr = gen_addr(dev); + PRINT_DEBUG(stderr, + "Device %d Address: %s - VID/PID %04x:%04x\n", + idx, caddr, desc.idVendor, desc.idProduct); + strncpy(addr, caddr, addr_size); + return 0; + } + count++; + } + } + libusb_free_device_list(devs, 1); + devs = 0; + return MVNC_DEVICE_NOT_FOUND; +} + +static libusb_device_handle *usb_open_device(libusb_device *dev, uint8_t *endpoint, + char *err_string_buff, unsigned buff_size) +{ + struct libusb_config_descriptor *cdesc; + const struct libusb_interface_descriptor *ifdesc; + libusb_device_handle *h = NULL; + int res, i; + + if ((res = libusb_open(dev, &h)) < 0) { + snprintf(err_string_buff, buff_size, "cannot open device: %s\n", + libusb_strerror(res)); + return 0; + } + if ((res = libusb_set_configuration(h, 1)) < 0) { + snprintf(err_string_buff, buff_size, + "setting config 1 failed: %s\n", libusb_strerror(res)); + libusb_close(h); + return 0; + } + if ((res = libusb_claim_interface(h, 0)) < 0) { + snprintf(err_string_buff, buff_size, + "claiming interface 0 failed: %s\n", + libusb_strerror(res)); + libusb_close(h); + return 0; + } + if ((res = libusb_get_config_descriptor(dev, 0, &cdesc)) < 0) { + snprintf(err_string_buff, buff_size, + "Unable to get USB config descriptor: %s\n", + libusb_strerror(res)); + libusb_close(h); + return 0; + } + + ifdesc = cdesc->interface->altsetting; + for (i = 0; i < ifdesc->bNumEndpoints; i++) { + PRINT_DEBUG(stderr, + "Found EP 0x%02x : max packet size is %u bytes\n", + ifdesc->endpoint[i].bEndpointAddress, + ifdesc->endpoint[i].wMaxPacketSize); + if ((ifdesc->endpoint[i].bmAttributes & LIBUSB_TRANSFER_TYPE_MASK) != + LIBUSB_TRANSFER_TYPE_BULK) + continue; + if (! + (ifdesc->endpoint[i].bEndpointAddress & LIBUSB_ENDPOINT_DIR_MASK)) { + *endpoint = ifdesc->endpoint[i].bEndpointAddress; + bulk_chunk_len = ifdesc->endpoint[i].wMaxPacketSize; + libusb_free_config_descriptor(cdesc); + return h; + } + } + libusb_free_config_descriptor(cdesc); + strcpy(err_string_buff, "Unable to find BULK OUT endpoint\n"); + libusb_close(h); + return 0; +} + +// timeout: -1 = no (infinite) timeout, 0 = must happen immediately +static int wait_findopen(const char *device_address, int timeout, + libusb_device ** dev, libusb_device_handle ** devh, + uint8_t * endpoint) +{ + int i, rc; + char last_open_dev_err[128]; + + usleep(100000); + if (mvnc_loglevel > 1) { + // I know about switch(), but for some reason -1 is picked up correctly + if (timeout == -1) + PRINT("Starting wait for connect, no timeout\n"); + else if (timeout == 0) + PRINT("Trying to connect\n"); + else + PRINT("Starting wait for connect with %ums timeout\n", timeout * 100); + } + + last_open_dev_err[0] = 0; + i = 0; + for (;;) { + rc = usb_find_device(0, (char *) device_address, 0, + (void **) dev, DEFAULT_VID, DEFAULT_PID); + if (rc < 0) + return MVNC_ERROR; + if (!rc) { + if ((*devh = usb_open_device(*dev, endpoint, last_open_dev_err, 128))) { + PRINT_DEBUG(stderr, "Found and opened device\n"); + return 0; + } + libusb_unref_device(*dev); + } + + if (timeout != -1 && i == timeout) { + PRINT_INFO(stderr, "%serror: device not found!\n", + last_open_dev_err[0] ? last_open_dev_err : ""); + return rc ? MVNC_DEVICE_NOT_FOUND : MVNC_TIMEOUT; + } + i++; + usleep(100000); + } +} + +static int send_file(libusb_device_handle * h, uint8_t endpoint, + const uint8_t * tx_buf, unsigned file_size) +{ + const uint8_t *p; + int rc; + int wb, twb, wbr; + double elapsed_time; + highres_time_t t1, t2; + + elapsed_time = 0; + twb = 0; + p = tx_buf; + PRINT_DEBUG(stderr, "Performing bulk write of %u bytes...\n", + file_size); + + while (twb < file_size) { + highres_gettime(&t1); + wb = file_size - twb; + if (wb > bulk_chunk_len) + wb = bulk_chunk_len; + wbr = 0; + rc = libusb_bulk_transfer(h, endpoint, (void *) p, wb, &wbr, + write_timeout); + + if (rc || (wb != wbr)) { + if (rc == LIBUSB_ERROR_NO_DEVICE) + break; + + PRINT_INFO(stderr, + "bulk write: %s (%d bytes written, %d bytes to write)\n", + libusb_strerror(rc), wbr, wb); + if (rc == LIBUSB_ERROR_TIMEOUT) + return MVNC_TIMEOUT; + else + return MVNC_ERROR; + } + highres_gettime(&t2); + elapsed_time += highres_elapsed_ms(&t1, &t2); + twb += wbr; + p += wbr; + } + PRINT_DEBUG(stderr, + "Successfully sent %u bytes of data in %lf ms (%lf MB/s)\n", + file_size, elapsed_time, + ((double) file_size / 1048576.) / (elapsed_time * 0.001)); + return 0; +} + +int usb_boot(const char *addr, const void *mvcmd, unsigned size) +{ + int rc; + libusb_device *dev; + libusb_device_handle *h; + uint8_t endpoint; + + rc = wait_findopen(addr, connect_timeout, &dev, &h, &endpoint); + if (rc) + return rc; + rc = send_file(h, endpoint, mvcmd, size); + libusb_release_interface(h, 0); + libusb_close(h); + libusb_unref_device(dev); + return rc; +} diff --git a/api/src/usb_boot.h b/api/src/usb_boot.h new file mode 100644 index 0000000..7659b5f --- /dev/null +++ b/api/src/usb_boot.h @@ -0,0 +1,21 @@ +/* +* Copyright 2017 Intel Corporation. +* The source code, information and material ("Material") contained herein is +* owned by Intel Corporation or its suppliers or licensors, and title to such +* Material remains with Intel Corporation or its suppliers or licensors. +* The Material contains proprietary information of Intel or its suppliers and +* licensors. The Material is protected by worldwide copyright laws and treaty +* provisions. +* No part of the Material may be used, copied, reproduced, modified, published, +* uploaded, posted, transmitted, distributed or disclosed in any way without +* Intel's prior express written permission. No license under any patent, +* copyright or other intellectual property rights in the Material is granted to +* or conferred upon you, either expressly, by implication, inducement, estoppel +* or otherwise. +* Any license under such intellectual property rights must be express and +* approved by Intel in writing. +*/ + +extern int mvnc_loglevel; +int usb_find_device(unsigned idx, char *addr, unsigned addrsize, void **device, int vid, int pid); +int usb_boot(const char *addr, const void *mvcmd, unsigned size); diff --git a/api/src/usb_link.h b/api/src/usb_link.h new file mode 100644 index 0000000..4c99f35 --- /dev/null +++ b/api/src/usb_link.h @@ -0,0 +1,28 @@ +/* +* Copyright 2017 Intel Corporation. +* The source code, information and material ("Material") contained herein is +* owned by Intel Corporation or its suppliers or licensors, and title to such +* Material remains with Intel Corporation or its suppliers or licensors. +* The Material contains proprietary information of Intel or its suppliers and +* licensors. The Material is protected by worldwide copyright laws and treaty +* provisions. +* No part of the Material may be used, copied, reproduced, modified, published, +* uploaded, posted, transmitted, distributed or disclosed in any way without +* Intel's prior express written permission. No license under any patent, +* copyright or other intellectual property rights in the Material is granted to +* or conferred upon you, either expressly, by implication, inducement, estoppel +* or otherwise. +* Any license under such intellectual property rights must be express and +* approved by Intel in writing. +*/ + +#include "USBLinkDefines.h" + +int usblink_sendcommand(void *f, hostcommands_t command); +int usblink_resetmyriad(void *f); +int usblink_getmyriadstatus(void *f, myriadStatus_t *myriadState); +void *usblink_open(const char *path); +void usblink_close(void *f); +int usblink_setdata(void *f, const char *name, const void *data, unsigned int length, int hostready); +int usblink_getdata(void *f, const char *name, void *data, unsigned int length, unsigned int offset, int hostready); +void usblink_resetall(); diff --git a/api/src/usb_link_vsc.c b/api/src/usb_link_vsc.c new file mode 100644 index 0000000..d9c8f26 --- /dev/null +++ b/api/src/usb_link_vsc.c @@ -0,0 +1,229 @@ +/* +* Copyright 2017 Intel Corporation. +* The source code, information and material ("Material") contained herein is +* owned by Intel Corporation or its suppliers or licensors, and title to such +* Material remains with Intel Corporation or its suppliers or licensors. +* The Material contains proprietary information of Intel or its suppliers and +* licensors. The Material is protected by worldwide copyright laws and treaty +* provisions. +* No part of the Material may be used, copied, reproduced, modified, published, +* uploaded, posted, transmitted, distributed or disclosed in any way without +* Intel's prior express written permission. No license under any patent, +* copyright or other intellectual property rights in the Material is granted to +* or conferred upon you, either expressly, by implication, inducement, estoppel +* or otherwise. +* Any license under such intellectual property rights must be express and +* approved by Intel in writing. +*/ + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "usb_link.h" +#include "usb_boot.h" +#include "common.h" + +#define USB_ENDPOINT_IN 0x81 +#define USB_ENDPOINT_OUT 0x01 +#define USB_TIMEOUT 10000 +#define USB_MAX_PACKET_SIZE 1024 * 1024 * 10 + +#define SLEEP_MS 100 +#define ITERATIONS 50 + +static int usb_write(void *f, const void *data, size_t size) +{ + while (size > 0) { + int bt, ss = size; + if (ss > USB_MAX_PACKET_SIZE) + ss = USB_MAX_PACKET_SIZE; + if (libusb_bulk_transfer(f, USB_ENDPOINT_OUT, (unsigned char *) data, ss, &bt, + USB_TIMEOUT)) + return -1; + data = (char *) data + bt; + size -= bt; + } + return 0; +} + +static int usb_read(void *f, void *data, size_t size) +{ + while (size > 0) { + int bt, ss = size; + if (ss > USB_MAX_PACKET_SIZE) + ss = USB_MAX_PACKET_SIZE; + if (libusb_bulk_transfer(f, USB_ENDPOINT_IN, data, ss, &bt, USB_TIMEOUT)) + return -1; + data = (char *) data + bt; + size -= bt; + } + return 0; +} + +void *usblink_open(const char *path) +{ + int rc; + libusb_device_handle *h = NULL; + libusb_device *dev; + + rc = usb_find_device(0, (char *) path, 0, (void **) &dev, + DEFAULT_OPEN_VID, DEFAULT_OPEN_PID); + if (rc < 0) + return 0; + + rc = libusb_open(dev, &h); + if (rc < 0) { + libusb_unref_device(dev); + return 0; + } + + libusb_unref_device(dev); + rc = libusb_claim_interface(h, 0); + if (rc < 0) { + libusb_close(h); + return 0; + } + return h; +} + +void usblink_close(void *f) +{ + libusb_release_interface(f, 0); + libusb_close(f); +} + +void usblink_resetall() +{ + libusb_device **devs; + libusb_device *dev; + struct libusb_device_descriptor desc; + libusb_device_handle *h; + size_t i; + int rc, iters = 0, cnt_bootrom = 0, cnt_runtime = 0, cnt_after = 0; + + if ((rc = libusb_get_device_list(NULL, &devs)) < 0) + return; + i = 0; + while ((dev = devs[i++]) != NULL) { + if (libusb_get_device_descriptor(dev, &desc) < 0) + continue; + if (desc.idVendor == DEFAULT_VID && + desc.idProduct == DEFAULT_PID) + cnt_bootrom++; + // If Runtime device found, reset it + if (desc.idVendor == DEFAULT_OPEN_VID && + desc.idProduct == DEFAULT_OPEN_PID) { + cnt_runtime++; + rc = libusb_open(dev, &h); + if (rc < -1) + continue; + rc = libusb_claim_interface(h, 0); + if (rc < 0) { + libusb_close(h); + continue; + } + PRINT_DEBUG(stderr, "Found stale device, resetting\n"); + usblink_resetmyriad(h); + usblink_close(h); + } + } + // If some devices needed reset + if(cnt_runtime > 0){ + iters = 0; + // Wait until all devices re-enumerate, or timeout occurs + while((cnt_after < cnt_bootrom + cnt_runtime) && (iters < ITERATIONS)){ + usleep(SLEEP_MS*1000); + cnt_after = 0; + if ((rc = libusb_get_device_list(NULL, &devs)) < 0) + return; + i = 0; + while ((dev = devs[i++]) != NULL) { + if ((rc = libusb_get_device_descriptor(dev, &desc)) < 0) + continue; + if (desc.idVendor == DEFAULT_VID && + desc.idProduct == DEFAULT_PID) + cnt_after++; + } + iters++; + } + } + libusb_free_device_list(devs, 1); +} + +int usblink_setdata(void *f, const char *name, const void *data, + unsigned int length, int host_ready) +{ + usbHeader_t header; + memset(&header, 0, sizeof(header)); + header.cmd = USB_LINK_HOST_SET_DATA; + header.hostready = host_ready; + strcpy(header.name, name); + header.dataLength = length; + if (usb_write(f, &header, sizeof(header))) + return -1; + + unsigned int operation_permit = 0xFFFF; + if (usb_read(f, &operation_permit, sizeof(operation_permit))) + return -1; + + if (operation_permit != 0xABCD) + return -1; + int rc = usb_write(f, data, length); + return rc; +} + +int usblink_getdata(void *f, const char *name, void *data, unsigned int length, + unsigned int offset, int host_ready) +{ + usbHeader_t header; + memset(&header, 0, sizeof(header)); + header.cmd = USB_LINK_HOST_GET_DATA; + header.hostready = host_ready; + strcpy(header.name, name); + header.dataLength = length; + header.offset = offset; + if (usb_write(f, &header, sizeof(header))) + return -1; + + unsigned int operation_permit = 0xFFFF; + if (usb_read(f, &operation_permit, sizeof(operation_permit))) + return -1; + + if (operation_permit != 0xABCD) + return -1; + return usb_read(f, data, length); +} + +int usblink_resetmyriad(void *f) +{ + usbHeader_t header; + memset(&header, 0, sizeof(header)); + header.cmd = USB_LINK_RESET_REQUEST; + if (usb_write(f, &header, sizeof(header))) + return -1; + return 0; +} + +int usblink_getmyriadstatus(void *f, myriadStatus_t* myriad_state) +{ + usbHeader_t header; + memset(&header, 0, sizeof(header)); + header.cmd = USB_LINK_GET_MYRIAD_STATUS; + if (usb_write(f, &header, sizeof(header))) + return -1; + return usb_read(f, myriad_state, sizeof(*myriad_state)); +} diff --git a/docs/Caffe.md b/docs/Caffe.md index ce3c915..fea53db 100644 --- a/docs/Caffe.md +++ b/docs/Caffe.md @@ -1,22 +1,22 @@ # Caffe Support ## Introduction -[Caffe](http://caffe.berkeleyvision.org/) is a deep learning framework developed by Berkeley AI Research ([BAIR](http://bair.berkley.edu)) and by community contributors. The setup script currently downloads BVLC Caffe and installs it in a system location. Other versions of Caffe are not supported at this time. For more information please visit http://caffe.berkeleyvision.org/ +[Caffe](http://caffe.berkeleyvision.org/) is a deep learning framework developed by Berkeley AI Research ([BAIR](http://bair.berkley.edu)) and by community contributors. The setup script currently downloads Berkley Vision and Learning Center (BVLC) Caffe and installs it in a system location. Other versions of Caffe are not supported at this time. For more information, please visit http://caffe.berkeleyvision.org/. -Default Caffe Installation Location: /opt/movidius/caffe
-Checkout Berkley Vision's Web Image Classification [demo](http://demo.caffe.berkeleyvision.org/) +* Default Caffe installation location: /opt/movidius/caffe
+* Check out Berkley Vision's Web Image Classification [demo](http://demo.caffe.berkeleyvision.org/) ## Caffe Zoo -Berkley Vision hosts a Caffe Model Zoo for researchers and engineers to contribute Caffe models for various tasks. Please visit the [Berkley Caffe Zoo](http://caffe.berkeleyvision.org/model_zoo.html) page to learn more about the caffe zoo and how to create your own Caffe Zoo model and contribute. +Berkley Vision hosts a Caffe Model Zoo for researchers and engineers to contribute Caffe models for various tasks. Please visit the [Berkley Caffe Zoo](http://caffe.berkeleyvision.org/model_zoo.html) page to learn more about the caffe zoo, how to create your own Caffe Zoo model, and contribute. -Caffe Zoo has several models contributed including a model network that can classify images for Age and Gender. This network trained by [Gil Levi](https://gist.github.com/GilLevi) and Tal Hassner is at this [Gender Net Caffe Zoo Model on GitHub](https://gist.github.com/GilLevi/c9e99062283c719c03de) +Caffe Zoo has several models contributed, including a model network that can classify images for age and gender. This network, trained by [Gil Levi](https://gist.github.com/GilLevi) and Tal Hassner, is available at [Gender Net Caffe Zoo Model on GitHub](https://gist.github.com/GilLevi/c9e99062283c719c03de). -Caffe models consists of two files that are used for compiling the caffe model using the [Neural Compute Compiler](tools/compile.md) -* Caffe Network Description (.prototxt): Text file that describes the topology and layers of the network. -* Caffe Weights (.caffemodel): Contains the weights for each layer that are obtained after training a model. +Caffe models consists of two files that are used for compiling the caffe model using the [Neural Compute Compiler](tools/compile.md). +* Caffe Network Description (.prototxt): Text file that describes the topology and layers of the network +* Caffe Weights (.caffemodel): Contains the weights for each layer that are obtained after training a model ## Neural Compute Caffe Layer Support -The following layers are supported in Caffe by the Neural Compute SDK. The Neural Compute Stick does not support training, so some layers that are only required for training are not supported. +The following layers are supported in Caffe by the Intel® Movidius™ Neural Compute SDK. The Intel® Movidius™ Neural Compute Stick does not support training, so some layers that are only required for training are not supported. ### Activation/Neuron * bias @@ -45,7 +45,7 @@ The following layers are supported in Caffe by the Neural Compute SDK. The Neur ### Vision * conv - * Regular Convolution - 1x1s1, 3x3s1, 5x5s1, 7x7s1, 7x7s2, 7x7s4 + * Regular Convolution - 1x1s1, 3x3s1, 5x5s1, 7x7s1, 7x7s2, 7x7s4 * Group Convolution - <1024 groups total * deconv * pooling @@ -53,7 +53,7 @@ The following layers are supported in Caffe by the Neural Compute SDK. The Neur # Known Issues ### Caffe Input Layer -Limitation: Batch Size which is the first dimension must always be 1 +Limitation: Batch Size, which is the first dimension, must always be 1 Limitation: The number of inputs must be 1 @@ -80,14 +80,14 @@ input: "data" ### Input Name Input name should be always called "data" -This works +This works: ``` name: "GoogleNet" input: "data" input_shape { dim:1 dim:3 dim:224 dim:224 } ``` -This does not +This does not: ``` name: "GoogleNet" input: "data_x" @@ -96,7 +96,7 @@ input: "data_x" ``` ### Non-Square Convolutions -Limitation: We don't support non-square convolutions such as 1x20 +Limitation: We don't support non-square convolutions such as 1x20. ``` input: "data" input_shape @@ -115,7 +115,7 @@ layer { ``` ### Crop Layer -Limitation: Crop layer cannot take reference size layer from input:"data" +Limitation: Crop layer cannot take reference size layer from input:"data". ``` layer { @@ -132,24 +132,24 @@ layer { ``` ### Size Limitations -Compiled Movidius™ "graph" file < 320MB -Intermediate layer buffer size < 100MB +Compiled Movidius "graph" file < 320 MB; +Intermediate layer buffer size < 100 MB ``` [Error 35] Setup Error: Not enough resources on Myriad to process this network ``` -Scratch Memory size < 112KB +Scratch Memory size < 112 KB ``` [Error 25] Myriad Error: "Matmul scratch memory [112640] lower than required [165392]" ``` ## Caffe Networks -The following networks are validated and known to work on the Movidius™ Neural Compute SDK. +The following networks are validated and known to work on the Intel Movidius Neural Compute SDK: - GoogleNet V1 - SqueezeNet V1.1 - LeNet - CaffeNet - VGG (Sousmith VGG_A) - AlexNet - +- TinyYolo v1 diff --git a/docs/README.md b/docs/README.md deleted file mode 100644 index 5aad8c5..0000000 --- a/docs/README.md +++ /dev/null @@ -1,91 +0,0 @@ - - -# Introduction -The Movidius™ Neural Compute SDK and Movidius™ Neural Compute Stick (NCS) enables rapid prototyping, validation and deployment of Deep Neural Networks (DNNs.) - -The NCS is used in two primary scenarios: -- Profiling, tuning, and compiling a DNN on a development computer (host system) with the tools provided in the Movidius™ Neural Compute SDK. In this scenario the host system is typically a desktop or laptop machine running Ubuntu 16.04 Desktop (x86, 64 bit), but you can use any supported platform for these steps. - -- Prototyping a user application on a development computer (host system) which accesses the hardware of the NCS to accelerate DNN inferences via the API provided with the Movidius™ Neural Compute SDK. In this scenario the host system can be a developer workstation or any developer system that runs an operating system compatible with the API. - -The following diagram shows the typical workflow for development with the NCS: -![](images/ncs_workflow.jpg) - -The training phase does not utilize the NCS hardware or SDK, while the subsequent phases of “profiling, tuning and compiling” and “prototyping” do require the NCS hardware and the accompanying Movidius™ Neural Compute SDK - -The SDK contains a set of software tools to compile, profile, and check validity of your DNN as well as an API for both the C and Python programming languages. The API is provided to allow users to create software which offloads the neural network computation onto the Movidius™ Neural Compute Stick. - -Here is more information on the [architecture](ncs1arch.md) of the Neural Compute Stick. - - -# Frameworks -Neural Compute SDK currently supports two Deep Learning frameworks. -1. [Caffe](Caffe.md) : Caffe is a deep learning framework from Berkeley Vision Labs. -2. [TensorFlow™](TensorFlow.md): TensorFlow™ is a deep learning framework from Google. - -[See how to use networks from these supported framework with NCS.](configure_network.md) - - - -# Installation and Examples -The following commands install NCSDK and run examples. Detailed instructions for [installation and Configuration](install.md) - -``` -git clone http://github.com/Movidius/ncsdk && cd ncsdk && make install && make examples - -``` - -# Movidius™ Neural Compute SDK Tools -The SDK comes with a set of tools to assist in development and deployment of applications that utilize hardware accelerated Deep Neural Networks via the Movidius™ Neural Compute Stick (NCS). Each tool and its usage is described on this page below - -* [mvNCCompile](tools/compile.md): Converts Caffe/TF network and weights to Movidius™ Internal compiled format - -* [mvNCProfile](tools/profile.md): Provides layer by layer statistics to evaluate the performance of Caffe/TF networks on the NCS - -* [mvNCCheck](tools/check.md): Compares the results from an inference by running the network on the NCS and Caffe/TF - - -# Neural Compute API -Applications for inferencing with Neural Compute SDK can be developed either in C/C++ or Python. The API provides a software interface to Open/Close Neural Compute Sticks, load graphs into the NCS and run inferences on the stick. - -* [C API](c_api/readme.md) -* [Python API](py_api/readme.md) - - -# Movidius™ Neural Compute User Forum - -There is an active user forum in which users of the Neural Compute Stick discuss ideas and issues they have with regard to the NCS. The link to the NCS User Forum is: - -[https://ncsforum.movidius.com](https://ncsforum.movidius.com) - -The forum is a good place to go if you need help troubleshooting an issue. You may find other people that have figured out the issue or get ideas for how to fix it. The forum is also monitored by Movidius™ engineers which provide solutions as well. - - -# Examples - -There are several examples including the following at the github -* Caffe - * GoogLeNet - * AlexNet - * SqueezeNet -* TensorFlow™ - * Inception V1 - * Inception V3 -* Apps - * hello_ncs_py - * hello_ncs_cpp - * multistick_cpp - -The examples demonstrate compiling, profiling and running inferences using the network on the Movidius™ Neural Compute Stick. -Each example contains a Makefile. Running 'make help' in the example's base directory will give possible make targets. - -``` - -git clone http://github.com/Movidius/ncsdk # Already done during installation -(cd ncsdk/examples && make) # run all examples -(cd ncsdk/examples/caffe/GoogLeNet && make) # Run just one example - -``` - - -[Release Notes](release_notes.md) diff --git a/docs/TOC.md b/docs/TOC.md index 56597e3..2887a46 100644 --- a/docs/TOC.md +++ b/docs/TOC.md @@ -1,17 +1,18 @@ # NC SDK Documentation Table of Contents - Introduction - - [NCS Architecture](ncs1arch.md) + - [Intel® Movidius™ NCS Architecture](ncs1arch.md) - Frameworks - [Caffe Support](Caffe.md) - - [TensorFlow Support](TensorFlow.md) + - [TensorFlow™ Support](TensorFlow.md) + - [Guidance for Compiling TensorFlow™ Networks](tf_compile_guidance.md) - [Configure Networks for NCS](configure_network.md) - Installation and examples - [Detailed Installation Instructions](install.md) - [Virtual Machine Configuration](VirtualMachineConfig.md) - - [Installation manifiest](manifest.md) + - [Installation Manifiest](manifest.md) - NC SDK Tools - [mvNCCompile](tools/compile.md) @@ -25,3 +26,9 @@ - Neural Compute User Forum - Examples + +- Neural Compute App Zoo + +- Troubleshooting + +- Release Notes diff --git a/docs/TensorFlow.md b/docs/TensorFlow.md index 4b01aec..1488546 100644 --- a/docs/TensorFlow.md +++ b/docs/TensorFlow.md @@ -1,15 +1,15 @@ # TensorFlow™ Support # Introduction -[TensorFlow™](https://www.tensorflow.org/) is a deep learning framework pionered by Google. The NCSDK introduced TensorFlow™ support with the 1.09.xx NCSDK release. Validation has been done with TensorFlow™ r1.3. The TensorFlow™ website describes it as "TensorFlow™ is an open source software library for numerical computation using data flow graphs. Nodes in the graph represent mathematical operations, while the graph edges represent the multidimensional data arrays (tensors) communicated between them." +[TensorFlow™](https://www.tensorflow.org/) is a deep learning framework pioneered by Google. The NCSDK introduced TensorFlow™ support with the 1.09.xx NCSDK release and TensorFlow™ 1.3. Validation for each release happens on the TensorFlow™ version noted in the release notes. As described on the TensorFlow™ website, "TensorFlow™ is an open source software library for numerical computation using data flow graphs. Nodes in the graph represent mathematical operations, while the graph edges represent the multidimensional data arrays (tensors) communicated between them." -Default Installation Location: /opt/movidius/tensorflow +* Default installation location: /opt/movidius/tensorflow -# TensorFlow™ Model Zoo -TensorFlow™ has a model GitHub repo at https://github.com/tensorflow/models similar to the Caffe Zoo for Caffe. The TensorFlow™ models GitHub repository contains several models which are maintained by the respective autors unlike Caffe which is not a single GitHub repo. - -# Save Session with graph and checkpoint information +# TensorFlow Model Zoo +TensorFlow has a model GitHub repo at https://github.com/tensorflow/models similar to the Caffe Zoo for Caffe. The TensorFlow models GitHub repository contains several models that are maintained by the respective authors, unlike Caffe, which is not a single GitHub repo. +# Save Session with Graph and Checkpoint Information +The code below shows one way to save a tensorflow session which can be compiled for the NCS. ```python import numpy as np import tensorflow as tf @@ -33,20 +33,40 @@ def run(name, image_size, num_classes): run('inception-v1', 224, 1001) ``` -# Compile for TensorFlow™ - +# Compile for TensorFlow +The command here shows how to compile the saved session from the above code sample ``` mvNCCompile output/inception-v1.meta -in=input -on=InceptionV1/Logits/Predictions/Reshape_1 -s12 ``` -# Neural Compute TensorFlow™ Layer Support +# Guidence for Compiling TensorFlow Networks +If you are training a TensorFlow network you will want to [follow the guidance for creating an inference only version of the network](tf_compile_guidance.md) that is suitable for compiling via the ncsdk compiler. -# TensorFlow™ Networks Supported +# TensorFlow Networks Supported * Inception V1 +* Inception V2 * Inception V3 * Inception V4 * Inception ResNet V2 -* MobileNet +* MobileNet_v1_1.0 variants: + * MobileNet_v1_1.0_224 + * MobileNet_v1_1.0_192 + * MobileNet_v1_1.0_160 + * MobileNet_v1_1.0_128 + * MobileNet_v1_0.75_224 + * MobileNet_v1_0.75_192 + * MobileNet_v1_0.75_160 + * MobileNet_v1_0.75_128 + * MobileNet_v1_0.5_224 + * MobileNet_v1_0.5_192 + * MobileNet_v1_0.5_160 + * MobileNet_v1_0.5_128 + * MobileNet_v1_0.25_224 + * MobileNet_v1_0.25_192 + * MobileNet_v1_0.25_160 + * MobileNet_v1_0.25_128 + +_*see release notes for supported networks for a particular release_ diff --git a/docs/VirtualMachineConfig.md b/docs/VirtualMachineConfig.md index 95056a2..b7e20cd 100644 --- a/docs/VirtualMachineConfig.md +++ b/docs/VirtualMachineConfig.md @@ -1,15 +1,15 @@ # Virtual Machine Configurations -The following configuration has been tested with the 1.09 SDK release +The following configuration has been tested with the 1.09 SDK release. ## General Requirements -- Virtualbox 5.1.28 (later releases should be fine but not tested) +- Virtualbox 5.1.28 (later releases should be fine, but have not been tested) - Guest Extensions installed -- You will need to select usb3.0 and create two filters: - - USB2 filter with vendor ID 03e7 and product ID 2150 - - USB3 filter with vendor ID 040e and product ID f63b +- You will need to select USB 3.0 and create two filters: + - USB2 filter with vendor ID 03e7 + - USB3 filter with vendor ID 040e - Host OS (these have been tested, other may work): - - OSX Yosemite 10.10.5 + - OS X Yosemite 10.10.5 - Windows 10 Enterprise - Ubuntu 16.04 Desktop - Guest OS: @@ -21,9 +21,9 @@ The following configuration has been tested with the 1.09 SDK release - Install guest extensions (virtualbox menu devices / Insert guest additions CD image) - Setup USB filters - Install NCSDK with 'make install' ([Installation Instructions](install.md)) -- Insert NCS device to USB port -- Install examples with 'make examples' if doesn’t work re-insert key and try again +- Insert Intel® Movidius™ NCS device to USB port +- Install examples with 'make examples'; if it doesn’t work, re-insert key and try again ## Notes -- During operation applications will need 2s delay between close and re-openign NCS device -- VM RAM needs to be 2GB or caffe compile will likely fail +- During operation applications, will need 2s delay between close and re-opening NCS device +- VM RAM needs to be 2 GB, or caffe compile will likely fail diff --git a/docs/c_api/mvncAllocateGraph.md b/docs/c_api/mvncAllocateGraph.md index a563e5b..ae90548 100644 --- a/docs/c_api/mvncAllocateGraph.md +++ b/docs/c_api/mvncAllocateGraph.md @@ -9,7 +9,7 @@ Version|1.0 See also|[mvncOpenDevice](mvncOpenDevice.md), [mvncDeallocateGraph](mvncDeallocateGraph.md) ## Overview -This function allocates a graph on the specified device and creates a handle to the graph which can be passed to other API functions such as mvncLoadTensor() and mvncGetResult(). When the caller is done with the graph the mvncDeallocateGraph() function should be called to free the resources associated with the graph. +This function allocates a graph on the specified device and creates a handle to the graph that can be passed to other API functions such as mvncLoadTensor() and mvncGetResult(). When the caller is done with the graph, the mvncDeallocateGraph() function should be called to free the resources associated with the graph. ## Prototype @@ -20,9 +20,9 @@ mvncStatus mvncAllocateGraph(void *deviceHandle, void **graphHandle, const void Name|Type|Description ----|----|----------- -deviceHandle|void \*|The deviceHandle pointer to the opaque device datatype (which was created via mvncOpenDevice()) on which the graph should be allocated. -graphHandle|void\*\*|Address of a pointer that will be set to point to an opaque graph datatype. Upon successful return this graphHandle can be passed to other API funtions. -graphFile|const void\* | Pointer to a buffer that contains the contents of a graph file. The graph file is a compiled neural network file that gets created by the MvNCCompile SDK tool. +deviceHandle|void\*|The deviceHandle pointer to the opaque device datatype (which was created via mvncOpenDevice()) on which the graph should be allocated. +graphHandle|void\*\*|Address of a pointer that will be set to point to an opaque graph datatype. Upon successful return, this graphHandle can be passed to other API funtions. +graphFile|const void\* | Pointer to a buffer that contains the contents of a graph file. The graph file is a compiled neural network file that gets created by the MvNCCompile SDK tool. graphFileLength|unsigned int|The number of bytes allocated for the buffer that graphFile points to. ## Return @@ -34,7 +34,7 @@ This function returns an appropriate value from the [mvncStatus](mvncStatus.md) ## Example ```C -// graph file name +// Graph file name #define GRAPH_FILE_NAME "graphfile" int main(int argc, char** argv) @@ -42,7 +42,7 @@ int main(int argc, char** argv) void* deviceHandle; // - // Assume NCS device opened here and deviceHandle is valid now + // Assume NCS device opened here and deviceHandle is valid now. // // Now read in a graph file so graphFileBuf will point to the @@ -51,16 +51,16 @@ int main(int argc, char** argv) unsigned int graphFileLen; void* graphFileBuf = LoadFile(GRAPH_FILE_NAME, &graphFileLen); - // allocate the graph + // Allocate the graph void* graphHandle; retCode = mvncAllocateGraph(deviceHandle, &graphHandle, graphFileBuf, graphFileLen); if (retCode != MVNC_OK) - { // error allocating graph + { // Error allocating graph printf("Could not allocate graph for file: %s\n", GRAPH_FILE_NAME); } else - { // successfully allocated graph. Now graphHandle is ready to go. - // use graphHandle for other API calls and call mvncDeallocateGraph + { // Successfully allocated graph. Now graphHandle is ready to go. + // Use graphHandle for other API calls and call mvncDeallocateGraph // when done with it. printf("Successfully allocated graph for %s\n", GRAPH_FILE_NAME); diff --git a/docs/c_api/mvncCloseDevice.md b/docs/c_api/mvncCloseDevice.md index e8e56ac..eb70d8f 100644 --- a/docs/c_api/mvncCloseDevice.md +++ b/docs/c_api/mvncCloseDevice.md @@ -20,7 +20,7 @@ mvncStatus mvncCloseDevice(void *deviceHandle); Name|Type|Description ----|----|----------- -deviceHandle|void*|Pointer to the opaque NCS Device structure that was allocated and returned from the mvncOpenDevice function. +deviceHandle|void*|Pointer to the opaque NCS device structure that was allocated and returned from the mvncOpenDevice function. ## Return This function returns an appropriate value from the [mvncStatus](mvncStatus.md) enumeration. @@ -36,7 +36,7 @@ extern "C" { #include } -// somewhat arbitrary buffer size for the device name +// Somewhat arbitrary buffer size for the device name. #define NAME_SIZE 100 int main(int argc, char** argv) { @@ -50,10 +50,10 @@ int main(int argc, char** argv) exit(-1); } - // Try to open the NCS device via the device name + // Try to open the NCS device via the device name. retCode = mvncOpenDevice(devName, &deviceHandle); if (retCode != MVNC_OK) - { // failed to open the device. + { // Failed to open the device. printf("Could not open NCS device\n"); exit(-1); } @@ -62,7 +62,7 @@ int main(int argc, char** argv) // Pass it to other NC API calls as needed and close it when finished. printf("Successfully opened NCS device!\n"); - // Close the device previously opened by mvncOpenDevice() + // Close the device previously opened by mvncOpenDevice(). retCode = mvncCloseDevice(deviceHandle); } ``` diff --git a/docs/c_api/mvncDeallocateGraph.md b/docs/c_api/mvncDeallocateGraph.md index ea5ec94..6d96e6c 100644 --- a/docs/c_api/mvncDeallocateGraph.md +++ b/docs/c_api/mvncDeallocateGraph.md @@ -9,7 +9,7 @@ Version|1.0 See also|[mvncOpenDevice](mvncOpenDevice.md), [mvncAllocateGraph](mvncAllocateGraph.md) ## Overview -This function deallocates a graph that was previously allocated with mvncAllocateGraph(). After successful return from this function the passed graphHandle will be invalid and should not be used. +This function deallocates a graph that was previously allocated with mvncAllocateGraph(). After successful return from this function, the passed graphHandle will be invalid and should not be used. ## Prototype @@ -26,7 +26,7 @@ graphHandle|void\*\*|Pointer to opaque graph data type that was initialized with This function returns an appropriate value from the [mvncStatus](mvncStatus.md) enumeration. ## Known Issues -Using a deallocated graph handle can lead to hard to find bugs. To prevent this it's good practice to set the handle to NULL (or nullptr for C++ 11) after deallocating as shown in this code snippet. +Using a deallocated graph handle can lead to hard-to-find bugs. To prevent this, it's good practice to set the handle to NULL (or nullptr for C++ 11) after deallocating, as shown in this code snippet: ```C++ mvncDeallocateGraph(graphHandle); graphHandle = NULL; @@ -34,7 +34,7 @@ graphHandle = NULL; ## Example ```C -// graph file name +// Graph file name #define GRAPH_FILE_NAME "graphfile" int main(int argc, char** argv) @@ -48,19 +48,19 @@ int main(int argc, char** argv) // // Assume NCS device opened here and deviceHandle is valid now // and the graph file is in graphFileBuf and length in bytes - // is in graphFileLen + // is in graphFileLen. // - // allocate the graph + // Allocate the graph void* graphHandle; retCode = mvncAllocateGraph(deviceHandle, &graphHandle, graphFileBuf, graphFileLen); if (retCode != MVNC_OK) - { // error allocating graph + { // Error allocating graph printf("Could not allocate graph for file: %s\n", GRAPH_FILE_NAME); } else - { // successfully allocated graph. Now graphHandle is ready to go. - // use graphHandle for other API calls and call mvncDeallocateGraph + { // Successfully allocated graph. Now graphHandle is ready to go. + // Use graphHandle for other API calls, and call mvncDeallocateGraph // when done with it. printf("Successfully allocated graph for %s\n", GRAPH_FILE_NAME); diff --git a/docs/c_api/mvncDeviceOptions.md b/docs/c_api/mvncDeviceOptions.md index ead35f1..c706abe 100644 --- a/docs/c_api/mvncDeviceOptions.md +++ b/docs/c_api/mvncDeviceOptions.md @@ -8,8 +8,8 @@ See also|[mvncGetDeviceOption](mvncGetDeviceOption.md), [mvncSetDeviceOption](mv ## Overview -This enumeration is used to specify an option on the NCS device that can be written or read via mvncGetDeviceOption() and mvncSetDeviceOption(). The table below provides details on the meaning of each of the values in the enumeration. +This enumeration is used to specify an option on the Intel® Movidius™ NCS device that can be written or read via mvncGetDeviceOption() and mvncSetDeviceOption(). The table below provides details on the meaning of each of the values in the enumeration. -constant | Option Type | Possible Values | get/set | Description +Constant | Option Type | Possible Values | Get/Set | Description -------- | ------------| --------------- | ------- | ----------- -MVNC_THERMAL_THROTTLING_LEVEL|int|1, or 2|get|Returns 1 if lower guard temperature threshold of chip sensor is reached. This indicates short throttling time is in action between inferences to protect the device. Returns 2 if upper guard temperature of chip sensor is reached. This indicates long throttling time is in action between inferences to protect the device. +MVNC_THERMAL_THROTTLING_LEVEL|int|1, 2|get|1: if lower guard temperature threshold of chip sensor is reached. This indicates short throttling time is in action between inferences to protect the device.
2: if upper guard temperature of chip sensor is reached. This indicates long throttling time is in action between inferences to protect the device. diff --git a/docs/c_api/mvncGetDeviceName.md b/docs/c_api/mvncGetDeviceName.md index 81bf1e5..77f0099 100644 --- a/docs/c_api/mvncGetDeviceName.md +++ b/docs/c_api/mvncGetDeviceName.md @@ -9,7 +9,7 @@ Version|1.0 See also|[mvncOpenDevice](mvncOpenDevice.md) ## Overview -This function is used to get the name of a particular NCS device. Typical usage is to call the function repeatedly starting with index = 0 and incrementing the index each time until an error is returned. These successive calls will give you the names of all the devices in the system. +This function is used to get the name of a particular Intel® Movidius™ NCS device. Typical usage is to call the function repeatedly, starting with index = 0, and incrementing the index each time until an error is returned. These successive calls will give you the names of all the devices in the system. ## Prototype @@ -20,9 +20,9 @@ mvncStatus mvncGetDeviceName(int index, char* name, unsigned int nameSize); Name|Type|Description ----|----|----------- -index|int|index of the device for which the name should be retrieved. -name|char\*|pointer to a character buffer into which the name will be copied. This buffer should be allocated by the caller. -nameSize|unsigned int| the number of characters allocated to the buffer pointed to by the name parameter. +index|int|Index of the device for which the name should be retrieved. +name|char\*|Pointer to a character buffer into which the name will be copied. This buffer should be allocated by the caller. +nameSize|unsigned int|The number of characters allocated to the buffer pointed to by the name parameter. ## Return This function returns an appropriate value from the [mvncStatus](mvncStatus.md) enumeration. @@ -30,7 +30,7 @@ This function returns an appropriate value from the [mvncStatus](mvncStatus.md) ## Known Issues ## Example -The following example shows how to get the name of all NCS devices attached to the system. mvncGetDeviceName is called repeatedly until it returns MVNC_DEVICE_NOT_FOUND. +The following example shows how to get the name of all Intel Movidius NCS devices attached to the system. mvncGetDeviceName is called repeatedly until it returns MVNC_DEVICE_NOT_FOUND. ```C++ #include @@ -53,7 +53,7 @@ int main(int argc, char** argv) printf("Total number of NCS devices found: %d\n", deviceCount); } ``` -Output from the example code above with two NCS devices in the system. +Output from the example code above with two Intel Movidius NCS devices in the system. ``` Found NCS device named: "2.3" diff --git a/docs/c_api/mvncGetDeviceOption.md b/docs/c_api/mvncGetDeviceOption.md index ce96992..d9b2693 100644 --- a/docs/c_api/mvncGetDeviceOption.md +++ b/docs/c_api/mvncGetDeviceOption.md @@ -9,7 +9,7 @@ Version|1.0 See also|[mvncOpenDevice](mvncOpenDevice.md), [mvncDeviceOptions](mvncDeviceOptions.md), [mvncSetDeviceOption](mvncSetDeviceOption.md) ## Overview -This function gets the current value of an option for an NCS device. The available options and their data types can be found in the [DeviceOptions](mvncDeviceOptions.md) enumeration documentation. +This function gets the current value of an option for an Intel® Movidius™ Neural Compute Stick (Intel® Movidius™ NCS) device. The available options and their data types can be found in the [DeviceOptions](mvncDeviceOptions.md) enumeration documentation. ## Prototype @@ -22,8 +22,8 @@ Name|Type|Description ----|----|----------- deviceHandle|void\*|Pointer to opaque device data type that was initialized with the mvncOpenDevice() function. This specifies which NCS device's option will be retrieved. option|int|A value from the DeviceOptions enumeration that specifies which option will be retrieved. -data|void\*|Pointer to a buffer where the value of the option will be copied. The type of data this points to will depend on the option that is specified. Check mvncDeviceOptions for the data types that each option requires. -dataLength|unsigned int \*| Pointer to an unsigned int which must point to the size, in bytes, of the buffer allocated to the data parameter when called. Upon successfull return it will be set to the number of bytes copied to the data buffer. +data|void\*|Pointer to a buffer where the value of the option will be copied. The type of data this points to will depend on the option that is specified. Check mvncDeviceOptions for the data types that each option requires. +dataLength|unsigned int\*|Pointer to an unsigned int which must point to the size, in bytes, of the buffer allocated to the data parameter when called. Upon successfull return, it will be set to the number of bytes copied to the data buffer. ## Return This function returns an appropriate value from the [mvncStatus](mvncStatus.md) enumeration. @@ -32,5 +32,5 @@ This function returns an appropriate value from the [mvncStatus](mvncStatus.md) ## Example ```C -TODO +TBD ``` diff --git a/docs/c_api/mvncGetGlobalOption.md b/docs/c_api/mvncGetGlobalOption.md index 529fcaf..b2e94d7 100644 --- a/docs/c_api/mvncGetGlobalOption.md +++ b/docs/c_api/mvncGetGlobalOption.md @@ -10,7 +10,7 @@ Version|1.09 See also|[mvncGlobalOptions](mvncGlobalOptions.md), [mvncSetGlobalOption](mvncSetGlobalOption.md) ## Overview -This function gets the current value of an option that is global to the SDK. The available options and their data types can be found in the [mvncGlobalOptions](mvncGlobalOptions.md) enumeration documentation. +This function gets the current value of an option that is global to the SDK. The available options and their data types can be found in the [mvncGlobalOptions](mvncGlobalOptions.md) enumeration documentation. ## Prototype @@ -22,8 +22,8 @@ mvncStatus mvncGetGlobalOption(int option, void *data, unsigned int *datalength) Name|Type|Description ----|----|----------- option|int|A value from the GlobalOptions enumeration that specifies which option will be retrieved. -data|void\*|Pointer to a buffer where the value of the option will be copied. The type of data this points to will depend on the option that is specified. Check mvncGlobalOptions for the data types that each option requires. -dataLength|unsigned int \*| Pointer to an unsigned int which must point to the size, in bytes, of the buffer allocated to the data parameter when called. Upon successfull return it will be set to the number of bytes copied to the data buffer. +data|void\*|Pointer to a buffer where the value of the option will be copied. The type of data this points to will depend on the option that is specified. Check mvncGlobalOptions for the data types that each option requires. +dataLength|unsigned int\*|Pointer to an unsigned int which must point to the size, in bytes, of the buffer allocated to the data parameter when called. Upon successfull return, it will be set to the number of bytes copied to the data buffer. ## Return This function returns an appropriate value from the [mvncStatus](mvncStatus.md) enumeration. diff --git a/docs/c_api/mvncGetGraphOption.md b/docs/c_api/mvncGetGraphOption.md index 7d499f2..7ac7a26 100644 --- a/docs/c_api/mvncGetGraphOption.md +++ b/docs/c_api/mvncGetGraphOption.md @@ -9,7 +9,7 @@ Version|1.0 See also|[mvncAllocateGraph](mvncAllocateGraph.md), [mvncGraphOptions](mvncGraphOptions.md), [mvncSetGraphOption](mvncSetGraphOption.md) ## Overview -This function gets the current value of an option for a graph. The available options can be found in the GraphOptions enumeration. +This function gets the current value of an option for a graph. The available options can be found in the GraphOptions enumeration. ## Prototype @@ -20,10 +20,10 @@ mvncStatus mvncGetGraphOption(void *graphHandle, int option, void **data, unsign Name|Type|Description ----|----|----------- -graphHandle|void\*|Pointer to opaque graph data type that was initialized with the mvncAllocateGraph() function which represents the neural network. This specifies which graph's option value will be retrieved. +graphHandle|void\*|Pointer to opaque graph data type that was initialized with the mvncAllocateGraph() function, which represents the neural network. This specifies which graph's option value will be retrieved. option|int|A value from the GraphOptions enumeration that specifies which option will be retrieved. -data|void\*|Pointer to a buffer where the value of the option will be copied. The type of data this points to will depend on the option that is specified. Check mvncGraphOptions for the data types that each option requires. -dataLength|unsigned int \*| Pointer to an unsigned int which must point to the size, in bytes, of the buffer allocated to the data parameter when called. Upon successfull return it will be set to the number of bytes copied to the data buffer. +data|void\*|Pointer to a buffer where the value of the option will be copied. The type of data this points to will depend on the option that is specified. Check mvncGraphOptions for the data types that each option requires. +dataLength|unsigned int\*|Pointer to an unsigned int, which must point to the size, in bytes, of the buffer allocated to the data parameter when called. Upon successfull return, it will be set to the number of bytes copied to the data buffer. ## Return This function returns an appropriate value from the [mvncStatus](mvncStatus.md) enumeration. @@ -35,10 +35,10 @@ This function returns an appropriate value from the [mvncStatus](mvncStatus.md) . . . - // open device to get device handle, - // allocate the graph to get graph handle + // Open device to get device handle, + // allocate the graph to get graph handle. - // set the graph option for blocking calls + // Set the graph option for blocking calls int dontBlockValue; unsigned int sizeOfValue; retCode = mvncGetGraphOption(graphHandle, MVNC_DONTBLOCK, (void**)(&dontBlockValue), &sizeOfValue); @@ -52,7 +52,7 @@ This function returns an appropriate value from the [mvncStatus](mvncStatus.md) printf("Error returned from mvncGetGraphOption: %d\n", retCode); } - // use graph, deallocate graph, close device etc. + // Use graph, deallocate graph, close device, etc. . . diff --git a/docs/c_api/mvncGetResult.md b/docs/c_api/mvncGetResult.md index 30e07ee..84520ef 100644 --- a/docs/c_api/mvncGetResult.md +++ b/docs/c_api/mvncGetResult.md @@ -3,7 +3,7 @@ Type|Function ------------ | ------------- Header|mvnc.h -Library| libmvnc.so +Library|libmvnc.so Return|[mvncStatus](mvncStatus.md) Version|1.0 See also|[mvncOpenDevice](mvncOpenDevice.md), [mvncAllocateGraph](mvncAllocateGraph.md), [mvncLoadTensor](mvncLoadTensor.md) @@ -21,9 +21,9 @@ mvncStatus mvncGetResult(void *graphHandle, void **outputData, unsigned int *out Name|Type|Description ----|----|----------- graphHandle|void\*|Pointer to opaque graph data type that was initialized with the mvncAllocateGraph() function that represents the neural network for which an inference was initiated. -outputData|void\*\*|Address of the pointer that will be set to a buffer of 16 bit floats which contain the result of the inference. The buffer will contain one 16 bit float for each network category, the values of which are the results of the output node. Typically these values are the probabilities that an image belongs to the category. +outputData|void\*\*|Address of the pointer that will be set to a buffer of 16-bit floats, which contain the result of the inference. The buffer will contain one 16-bit float for each network category, the values of which are the results of the output node. Typically these values are the probabilities that an image belongs to the category. outputDataLength|unsigned int\*|Pointer to an unsigned int that will be set to the number of bytes in the outputData buffer. -userParam|void \*\*| Address of a pointer that will be set to the user parameter for this inference. This corresponds to the userParam that was passed to the LoadTensor() function which initiated the inference. +userParam|void\*\*|Address of a pointer that will be set to the user parameter for this inference. This corresponds to the userParam that was passed to the LoadTensor() function, which initiated the inference. ## Return This function returns an appropriate value from the [mvncStatus](mvncStatus.md) enumeration. @@ -36,8 +36,8 @@ This function returns an appropriate value from the [mvncStatus](mvncStatus.md) . . -// use a 16 bit unsigned type to represent half precision floats since C++ has no -// built in support for 16 but floats. +// Use a 16-bit unsigned type to represent half precision floats since C++ has no +// built-in support for 16-bit floats. typedef unsigned short half; int main(int argc, char** argv) @@ -46,23 +46,23 @@ int main(int argc, char** argv) . . // - // Open NCS device and set deviceHandle to the valid handle + // Open NCS device and set deviceHandle to the valid handle. // // - // Read graph from disk and call mvncAllocateGraph to set graphHandle appropriately + // Read graph from disk and call mvncAllocateGraph to set graphHandle appropriately. // // // Load an image.png from disk and preprocess it to match network - // requirements so that imageBufFp16 list to 16 bit floats + // requirements so that imageBufFp16 list to 16-bit floats. // - // start the inference with call to mvncLoadTensor() + // Start the inference with call to mvncLoadTensor(). retCode = mvncLoadTensor(graphHandle, imageBufFp16, lenBufFp16, NULL); if (retCode == MVNC_OK) - { // the inference has been started, now call mvncGetResult() for the - // inference result + { // The inference has been started, now call mvncGetResult() for the + // inference result. printf("Successfully loaded the tensor for image %s\n", "image.png"); void* resultData16; @@ -70,7 +70,7 @@ int main(int argc, char** argv) unsigned int lenResultData; retCode = mvncGetResult(graphHandle, &resultData16, &lenResultData, &userParam); if (retCode == MVNC_OK) - { // Successfully got the result. The inference result is in the buffer pointed to by resultData + { // Successfully got the result. The inference result is in the buffer pointed to by resultData. printf("Successfully got the inference result for image %s\n", IMAGE_FILE_NAME); printf("resultData is %d bytes which is %d 16-bit floats.\n", lenResultData, lenResultData/(int)sizeof(half)); @@ -97,8 +97,8 @@ int main(int argc, char** argv) } // - // call mvncDeallocateGraph to free the resources tied to graphHandle - // close the device with mvncCloseDevice() + // Call mvncDeallocateGraph to free the resources tied to graphHandle. + // Close the device with mvncCloseDevice(). // } diff --git a/docs/c_api/mvncGlobalOptions.md b/docs/c_api/mvncGlobalOptions.md index a3255ab..f918d9a 100644 --- a/docs/c_api/mvncGlobalOptions.md +++ b/docs/c_api/mvncGlobalOptions.md @@ -10,6 +10,6 @@ See also|[mvncGetGlobalOption](mvncGetGlobalOption.md), [mvncSetGlobalOption](mv This enumeration is used to specify a global option that can be written or read via mvncGetDeviceOption() and mvncSetDeviceOption(). The table below provides details on the meaning of each of the values in the enumeration. -constant | Option Type | Possible Values | get/set | Description +Constant | Option Type | Possible Values | Get/Set | Description -------- | ------------| --------------- | ------- | ----------- -MVNC_LOGLEVEL | int | 0, 1, 2 |get, set|The logging level for application Value meanings are: 0 = log nothing (default), 1 = log errors only, 2 = log all, verbose logging. +MVNC_LOGLEVEL | int | 0, 1, 2 |get, set|The logging level for application Value meanings are:
0: log nothing (default)
1: log errors only
2: log all, verbose logging. diff --git a/docs/c_api/mvncGraphOptions.md b/docs/c_api/mvncGraphOptions.md index d2bdfc8..889dab5 100644 --- a/docs/c_api/mvncGraphOptions.md +++ b/docs/c_api/mvncGraphOptions.md @@ -9,10 +9,10 @@ See also|[mvncGetGraphOption](mvncGetGraphOption.md), [mvncSetGraphOption](mvncS ## Overview -This enumeration is used to specify an option on the a graph that can be written or read via mvncGetGraphOption() and mvncSetGraphOption(). The table below provides details on the meaning of each of the values in the enumeration. +This enumeration is used to specify an option on the graph that can be written or read via mvncGetGraphOption() and mvncSetGraphOption(). The table below provides details on the meaning of each of the values in the enumeration. -constant | Option Type | Possible Values | get/set | Description +Constant | Option Type | Possible Values | Get/Set | Description -------- | ------------| --------------- | ------- | ----------- -MVNC_DONT_BLOCK| int |0, 1|get, set|0: Calls to mvncLoadTensor and mvncGetResult block, 1: calls to those functions don't block. +MVNC_DONT_BLOCK| int |0, 1|get, set|0: Calls to mvncLoadTensor and mvncGetResult will block (won't return until the action is completed) (Default)
1: Calls to those functions don't block (they will return immediately). If the action coudn't be completed the return value will indicate why. mvncLoadTensor() will return MVNC_BUSY when the NCS isn't able to perform the action because its busy, try again later. mvncGetResult() will return MVNC_NO_DATA unless there is an inference that is ready to be returned. In this case try again later and when there is a completed inference the results will be returned. MVNC_TIME_TAKEN| float\* | any | get |Time taken for the last inference returned by mvncGetResult. MVNC_DEBUG_INFO| char\* | any | get | A string that provides more details when the result of a function call was MVNC_MYRIADERROR. diff --git a/docs/c_api/mvncLoadTensor.md b/docs/c_api/mvncLoadTensor.md index ecb1543..a735dfc 100644 --- a/docs/c_api/mvncLoadTensor.md +++ b/docs/c_api/mvncLoadTensor.md @@ -9,7 +9,7 @@ Version|1.0 See also|[mvncOpenDevice](mvncOpenDevice.md), [mvncAllocateGraph](mvncAllocateGraph.md), [mvncGetResult](mvncGetResult.md) ## Overview -This function initiates an inference on the specified graph via the associated NCS device. After calling this function use the mvncGetResult() function to retrieve the inference result. +This function initiates an inference on the specified graph via the associated Intel® Movidius™ Neural Compute Stick (Intel® Movidius™ NCS) device. After calling this function, use the mvncGetResult() function to retrieve the inference result. ## Prototype @@ -21,9 +21,9 @@ mvncStatus mvncLoadTensor(void *graphHandle, const void *inputTensor, unsigned i Name|Type|Description ----|----|----------- graphHandle|void\*|Pointer to opaque graph data type that was initialized with the mvncAllocateGraph() function that represents the neural network for which an inference will be initiated. -inputTensor|const void\*|Pointer to tensor data buffer which contains 16 bit half precision floats (per IEEE 754 half precision binary floating-point format: binary16). The values in the buffer are dependent on the neural network (graph) but are typically representations of each color channel of each pixel of an image. +inputTensor|const void\*|Pointer to tensor data buffer, which contains 16-bit half precision floats (per IEEE 754 half precision binary floating-point format: binary16). The values in the buffer are dependent on the neural network (graph), but are typically representations of each color channel of each pixel of an image. inputTensorLength|unsigned int|The length, in bytes, of the buffer pointed to by the inputTensor parameter. -userParam|void \*| Pointer to user data that will be returned along with the inference result from the GetResult() function. +userParam|void\*|Pointer to user data that will be returned along with the inference result from the GetResult() function. ## Return This function returns an appropriate value from the [mvncStatus](mvncStatus.md) enumeration. @@ -36,11 +36,11 @@ This function returns an appropriate value from the [mvncStatus](mvncStatus.md) . . -// use a 16 bit unsigned type to represent half precision floats since C++ has no -// built in support for 16 but floats. +// Use a 16-bit unsigned type to represent half precision floats, since C++ has no +// built-in support for 16-bit floats. typedef unsigned short half; -// GoogleNet image dimensions and network mean values for each channel. This information is specific +// GoogleNet image dimensions and network mean values for each channel. This information is specific // for each network, and usually available from network creators. const int networkDim = 224; float networkMean[] = {0.40787054*255.0, 0.45752458*255.0, 0.48109378*255.0}; @@ -51,39 +51,39 @@ int main(int argc, char** argv) . . // - // Open NCS device and set deviceHandle to the valid handle + // Open NCS device and set deviceHandle to the valid handle. // // - // Read graph from disk and call mvncAllocateGraph to set graphHandle appropriately + // Read graph from disk and call mvncAllocateGraph to set graphHandle appropriately. // // - // Load an image from disk - // LoadImage will read image from disk, convert channels to floats - // subtract network mean for each value in each channel. Then convert + // Load an image from disk. + // LoadImage will read image from disk, convert channels to floats. + // Subtract network mean for each value in each channel. Then convert // floats to half precision floats. - // return pointer to the buffer of half precision floats + // Return pointer to the buffer of half precision floats. half* imageBufFp16 = LoadImage("image.png", networkDim, networkMean); - // calculate the length of the buffer that contains the half precision floats. - // 3 channels * width * height * sizeof a 16bit float + // Calculate the length of the buffer that contains the half precision floats. + // 3 channels * width * height * sizeof a 16-bit float unsigned int lenBufFp16 = 3*networkDim*networkDim*sizeof(*imageBufFp16); - // start the inference with mvncLoadTensor() + // Start the inference with mvncLoadTensor() retCode = mvncLoadTensor(graphHandle, imageBufFp16, lenBufFp16, NULL); if (retCode == MVNC_OK) - { // the inference has been started, now call mvncGetResult() for the - // inference result + { // The inference has been started, now call mvncGetResult() for the + // inference result. printf("Successfully loaded the tensor for image %s\n", "image.png"); - // here mvncGetResult() can be called to get the result of the inference - // that was started with mvncLoadTensor() above + // Here mvncGetResult() can be called to get the result of the inference + // that was started with mvncLoadTensor() above. } // - // call mvncDeallocateGraph to free the resources tied to graphHandle - // close the device with mvncCloseDevice() + // Call mvncDeallocateGraph to free the resources tied to graphHandle. + // Close the device with mvncCloseDevice(). // } diff --git a/docs/c_api/mvncOpenDevice.md b/docs/c_api/mvncOpenDevice.md index 048253a..eadb29f 100644 --- a/docs/c_api/mvncOpenDevice.md +++ b/docs/c_api/mvncOpenDevice.md @@ -9,7 +9,7 @@ Version|1.0 See also|[mvncCloseDevice](mvncCloseDevice.md), [mvncGetDeviceName](mvncGetDeviceName.md) [mvncGetDeviceOption](mvncGetDeviceOption.md), [mvncSetDeviceOption](mvncSetDeviceOption.md) ## Overview -This function is used to initialize the NCS device and return a device handle that can be passed to other API functions. +This function is used to initialize the Intel® Movidius™ NCS device and return a device handle that can be passed to other API functions. ## Prototype @@ -22,7 +22,7 @@ mvncStatus mvncOpenDevice(const char *name, void **deviceHandle); Name|Type|Description ----|----|------------ name|const char\*|Pointer to a constant array of chars that contains the name of the device to open. This value is obtained from mvncGetDeviceName. -deviceHandle|void \*\*|Address of a pointer that will be set to point to an opaque structure representing an NCS device. +deviceHandle|void\*\*|Address of a pointer that will be set to point to an opaque structure representing an NCS device. ## Return This function returns an appropriate value from the [mvncStatus](mvncStatus.md) enumeration. @@ -30,7 +30,7 @@ This function returns an appropriate value from the [mvncStatus](mvncStatus.md) ## Known Issues ## Example -In the example below the code gets the name of the first device and then calls mvncOpenDevice to open the device and set the deviceHandle variable for use to other API calls that expect a device handle for an open device. +In the example below, the code gets the name of the first device and then calls mvncOpenDevice to open the device and set the deviceHandle variable for use to other API calls that expect a device handle for an open device. ```C++ #include #include @@ -39,7 +39,7 @@ extern "C" { #include } -// somewhat arbitrary buffer size for the device name +// Somewhat arbitrary buffer size for the device name. #define NAME_SIZE 100 int main(int argc, char** argv) { @@ -48,21 +48,21 @@ int main(int argc, char** argv) char devName[NAME_SIZE]; retCode = mvncGetDeviceName(0, devName, NAME_SIZE); if (retCode != MVNC_OK) - { // failed to get device name, maybe none plugged in. + { // If failed to get device name, may be none plugged in. printf("No NCS devices found\n"); exit(-1); } - // Try to open the NCS device via the device name + // Try to open the NCS device via the device name. retCode = mvncOpenDevice(devName, &deviceHandle); if (retCode != MVNC_OK) - { // failed to open the device. + { // Failed to open the device. printf("Could not open NCS device\n"); exit(-1); } // deviceHandle is ready to use now. - // Pass it to other NC API calls as needed and close it when finished. + // Pass it to other NC API calls as needed, and close it when finished. printf("Successfully opened NCS device!\n"); retCode = mvncCloseDevice(deviceHandle); diff --git a/docs/c_api/mvncSetDeviceOption.md b/docs/c_api/mvncSetDeviceOption.md index 56801d7..ff2eed8 100644 --- a/docs/c_api/mvncSetDeviceOption.md +++ b/docs/c_api/mvncSetDeviceOption.md @@ -9,7 +9,7 @@ Version|1.0 See also|[mvncOpenDevice](mvncOpenDevice.md), [mvncDeviceOptions](mvncDeviceOptions.md), [mvncGetDeviceOption](mvncGetDeviceOption.md) ## Overview -This function sets an option for a specific NCS device. The available options can be found in the [DeviceOptions](mvncDeviceOptions.md) enumeration. +This function sets an option for a specific Intel® Movidius™ Neural Compute Stick (Intel® Movidius™ NCS) device. The available options can be found in the [DeviceOptions](mvncDeviceOptions.md) enumeration. ## Prototype @@ -20,10 +20,10 @@ mvncStatus mvncSetDeviceOption(void *deviceHandle, int option, const void *data, Name|Type|Description ----|----|----------- -deviceHandle|void\*|Pointer to opaque device data type that was initialized with the mvncOpenDevice() function. This specifies which device's option will be set. +deviceHandle|void\*|Pointer to opaque device data type that was initialized with the mvncOpenDevice() function. This specifies which device's option will be set. option|int|A value from the DeviceOptions enumeration that specifies which option will be set. -data|const void\*|Pointer to the data for the new value for the option. The type of data this points to depends on the option that is being set. Check mvncDeviceOptions for the data types that each option requires. -dataLength|unsigned int| An unsigned int that contains the length, in bytes, of the buffer that the data parameter points to. +data|const void\*|Pointer to the data for the new value for the option. The type of data this points to depends on the option that is being set. Check mvncDeviceOptions for the data types that each option requires. +dataLength|unsigned int|An unsigned int that contains the length, in bytes, of the buffer that the data parameter points to. ## Return This function returns an appropriate value from the [mvncStatus](mvncStatus.md) enumeration. diff --git a/docs/c_api/mvncSetGraphOption.md b/docs/c_api/mvncSetGraphOption.md index e03b75b..caf782c 100644 --- a/docs/c_api/mvncSetGraphOption.md +++ b/docs/c_api/mvncSetGraphOption.md @@ -35,23 +35,23 @@ This function returns an appropriate value from the [mvncStatus](mvncStatus.md) . . . - // open NCS device to initialize deviceHandle. - // read compiled graph file into graphFileBuf and put length of it in graphFileLen + // Open NCS device to initialize deviceHandle. + // Read compiled graph file into graphFileBuf and put length of it in graphFileLen - // allocate the graph + // Allocate the graph void* graphHandle; retCode = mvncAllocateGraph(deviceHandle, &graphHandle, graphFileBuf, graphFileLen); if (retCode != MVNC_OK) - { // error allocating graph + { // Error allocating graph printf("Could not allocate graph for file: %s\n", GRAPH_FILE_NAME); } else - { // successfully allocated graph. Now graphHandle is ready to go. - // use graphHandle for other API calls and call mvncDeallocateGraph + { // Successfully allocated graph. Now graphHandle is ready to go. + // Use graphHandle for other API calls and call mvncDeallocateGraph // when done with it. printf("Successfully allocated graph for %s\n", GRAPH_FILE_NAME); - // set the graph option for blocking calls + // Set the graph option for blocking calls int dontBlockValue = 0; retCode = mvncSetGraphOption(graphHandle, MVNC_DONTBLOCK, &dontBlockValue, sizeof(int)); if (retCode == MVNC_OK) @@ -64,8 +64,8 @@ This function returns an appropriate value from the [mvncStatus](mvncStatus.md) printf("Error returned from mvncSetGraphOption: %d\n", retCode); } - // use graphHandle here with the option set above. - // then deallocate the graph and close the device + // Use graphHandle here with the option set above. + // Then deallocate the graph and close the device. } . . diff --git a/docs/c_api/mvncStatus.md b/docs/c_api/mvncStatus.md index 3b528c4..1102c79 100644 --- a/docs/c_api/mvncStatus.md +++ b/docs/c_api/mvncStatus.md @@ -22,4 +22,4 @@ MVNC_MVCMD_NOT_FOUND | The file named MvNCAPI.mvcmd should be installed in the m MVNC_NO_DATA | No data to return. MVNC_GONE | The graph or device has been closed during the operation. MVNC_UNSUPPORTED_GRAPH_FILE | The graph file may have been created with an incompatible prior version of the Toolkit. Try to recompile the graph file with the version of the Toolkit that corresponds to the API version. -MVNC_MYRIAD_ERROR | An error has been reported by Movidius™ VPU. Use MVNC_DEBUGINFO to get more information. +MVNC_MYRIAD_ERROR | An error has been reported by Intel® Movidius™ VPU. Use MVNC_DEBUGINFO to get more information. diff --git a/docs/c_api/readme.md b/docs/c_api/readme.md index 342c927..0d8d5b3 100644 --- a/docs/c_api/readme.md +++ b/docs/c_api/readme.md @@ -1,25 +1,25 @@ -# Movidius™ Neural Compute SDK C API +# Intel® Movidius™ Neural Compute SDK C API -The SDK comes with a C Language API that enables developers to create applications in C or C++ which utilize hardware-accelerated Deep Neural Networks via the Movidius™ Neural Compute Stick (NCS.) The C API is provided as a header file (mvnc.h) and an associated library file (libmvnc.so) both of which are placed on the development computer when the SDK is installed. Details of the C API are provided below and within the documentation linked from here. +The SDK comes with a C language API that enables developers to create applications in C or C++ that utilize hardware-accelerated Deep Neural Networks via the Intel® Movidius™ Neural Compute Stick (Intel® Movidius™ NCS). The C API is provided as a header file (mvnc.h) and an associated library file (libmvnc.so), both of which are placed on the development computer when the SDK is installed. Details of the C API are provided below and within the documentation avilable via the link in each line. ## Enumerations - [mvncStatus](mvncStatus.md): Contains possible return values for API functions. -- [mvncDeviceOptions](mvncDeviceOptions.md): Contains all possible options to get/set for an NCS device, and their data types. +- [mvncDeviceOptions](mvncDeviceOptions.md): Contains all possible options to get/set for an Intel Movidius NCS device, and their data types. - [mvncGraphOptions](mvncGraphOptions.md): Contains all possible options to get/set for a graph, and their data types. -- [mvncGlobalOptions](mvncGlobalOptions.md): Contains all possible global options to get/set and their data types. +- [mvncGlobalOptions](mvncGlobalOptions.md): Contains all possible global options to get/set, and their data types. ## Functions -- [mvncGetDeviceName](mvncGetDeviceName.md): Retrieves the name of an NCS device that can be opened. -- [mvncOpenDevice](mvncOpenDevice.md): Opens an NCS device for use by the application. -- [mvncAllocateGraph](mvncAllocateGraph.md): Allocates a graph for a specific NCS device in preparation for computing inferences. +- [mvncGetDeviceName](mvncGetDeviceName.md): Retrieves the name of an Intel Movidius NCS device that can be opened. +- [mvncOpenDevice](mvncOpenDevice.md): Opens an Intel Movidius NCS device for use by the application. +- [mvncAllocateGraph](mvncAllocateGraph.md): Allocates a graph for a specific Intel Movidius NCS device in preparation for computing inferences. - [mvncDeallocateGraph](mvncDeallocateGraph.md): Deallocates and frees resouces associated with a graph. - [mvncLoadTensor](mvncLoadTensor.md): Initiates an inference by providing input to the neural network. - [mvncGetResult](mvncGetResult.md): Retrieves the result of an inference that was previously initiated. - [mvncSetGraphOption](mvncSetGraphOption.md): Sets an option for a graph. -- [mvncGetGraphOption](mvncGetGraphOption.md): Retrieves an the current value of an option for a graph. -- [mvncSetDeviceOption](mvncSetDeviceOption.md): Sets an option for an NCS device. -- [mvncGetDeviceOption](mvncGetDeviceOption.md): Retrieves the current value of an option for an NCS device. -- [mvncSetGlobalOption](mvncSetGlobalOption.md): Sets a global option for an application +- [mvncGetGraphOption](mvncGetGraphOption.md): Retrieves the current value of an option for a graph. +- [mvncSetDeviceOption](mvncSetDeviceOption.md): Sets an option for an Intel Movidius NCS device. +- [mvncGetDeviceOption](mvncGetDeviceOption.md): Retrieves the current value of an option for an Intel Movidius NCS device. +- [mvncSetGlobalOption](mvncSetGlobalOption.md): Sets a global option for an application. - [mvncGetGlobalOption](mvncGetGlobalOption.md): Retrieves the current value of a global option for an application. -- [mvncCloseDevice](mvncCloseDevice.md): Closes a previously opened NCS device. +- [mvncCloseDevice](mvncCloseDevice.md): Closes a previously opened Intel Movidius NCS device. diff --git a/docs/configure_network.md b/docs/configure_network.md index c57f455..f0069c6 100644 --- a/docs/configure_network.md +++ b/docs/configure_network.md @@ -1,27 +1,24 @@ -# Configuring your network for NCS -This guide will help you get all of the configuration information correct when creating your network for the Movidius Neural Compute Stick. All of these parameters are critical, if you don't get them right, your network won't give you the accuracy that was achieved by the team that trained the model. The configuration parameters include: -* mean subtraction -* scale -* color channel configuration -* class prediction -* input image size +# Configuring Your Network for Intel® Movidius™ NCS +This guide will help you get all of the configuration information correct when creating your network for the Intel® Movidius™ Neural Compute Stick (Intel® Movidius™ NCS). All of these parameters are critical. If you don't get them right, your network won't give you the accuracy that was achieved by the team that trained the model. The configuration parameters are as follows: +* Mean subtraction +* Scale +* Color channel configuration +* Class prediction +* Input image size -**let's go through these one at a time** +**Let's go through these one at a time.** ## Mean Subtraction -mean substraction on the input data to a CNN is a common technique. The mean is calculated on the data set. For example on Imagenet the mean is calculated on a per channel basis to be: +Mean subtraction on the input data to a convolutional neural network (CNN) is a common technique. The mean is calculated on the data set. For example, the mean on Imagenet is calculated on a per channel basis to be: ``` 104, 117, 123 -these numbers are in BGR orientation +These numbers are in BGR orientation. ``` ### Caffe Specific Examples -this mean calculation can be calculated with a tool that comes with caffe: -[compute_image_mean.cpp](https://github.com/BVLC/caffe/blob/master/tools/compute_image_mean.cpp), -and Caffe provides a script to do it as well: -[make_imagenet_mean.sh](https://github.com/BVLC/caffe/blob/master/examples/imagenet/make_imagenet_mean.sh) +This mean calculation can be calculated with a tool that comes with caffe ([compute_image_mean.cpp](https://github.com/BVLC/caffe/blob/master/tools/compute_image_mean.cpp)). Caffe provides a script to do it, as well ([make_imagenet_mean.sh](https://github.com/BVLC/caffe/blob/master/examples/imagenet/make_imagenet_mean.sh)). --- -this will create an output file often called mean_binary.proto. You can see an example of this in the training prototxt file for AlexNet +This will create an output file often called mean_binary.proto. You can see an example of this in the training prototxt file for AlexNet: [train_val.prototxt](https://github.com/BVLC/caffe/blob/master/models/bvlc_alexnet/train_val.prototxt) ``` @@ -32,7 +29,7 @@ this will create an output file often called mean_binary.proto. You can see an } ``` --- -in the GoogLeNet prototxt file they have just put the values directly: +In the GoogLeNet prototxt file, they have put in the values directly: [train_val.prototxt](https://github.com/BVLC/caffe/blob/master/models/bvlc_googlenet/train_val.prototxt) ``` @@ -46,7 +43,7 @@ in the GoogLeNet prototxt file they have just put the values directly: } ``` --- -some models don't use mean subtraction, see below for LeNet as an example. There is no mean in the transform_param, but there is a scale which we'll get to later +Some models don't use mean subtraction. See the LeNet example below. There is no mean in the transform_param, but there is a scale that we'll get to later: [lenet_train_test.prototxt](https://github.com/BVLC/caffe/blob/master/examples/mnist/lenet_train_test.prototxt) ``` @@ -54,27 +51,28 @@ some models don't use mean subtraction, see below for LeNet as an example. Ther scale: 0.00390625 } ``` -### TensorFlow specific examples -TensorFlow documentation of mean is not as straight forward as Caffe. The TensorFlow Slim models for image classification are a great place to get high quality pre-trained models: +### TensorFlow™ Specific Examples +TensorFlow™ documentation of mean is not as straightforward as Caffe. The TensorFlow Slim models for image classification are a great place to get high quality pre-trained models: [slim models](https://github.com/tensorflow/models/tree/master/research/slim#pre-trained-models) -I could find this the following file the mean (and scale) for both Inception V3 and MobileNet V1 +The following file is the mean (and scale) for both Inception V3 and MobileNet V1: [retrain script](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/examples/image_retraining/retrain.py#L872) ``` input_mean = 128 ``` -in the case of the InceptionV3 model there is not a per color channel mean, the same mean is used for all channels. This mean should apply to all of the Inception and MobileNet models, but other models there might be different. -for example, the VGG16 model just had the weights converted from Caffe. If we look at the link to the VGG16 for Caffe page, we see the means are done like the other Caffe models: +In the case of the Inception V3 model, there is not a per color channel mean. The same mean is used for all channels. This mean should apply to all of the Inception and MobileNet models, but other models might be different. + +For example, the VGG16 model had the weights converted from Caffe. If we look at the link to the VGG16 for Caffe page, we see the means are done like the other Caffe models: ``` https://gist.github.com/ksimonyan/211839e770f7b538e2d8#description -the following BGR values should be subtracted: [103.939, 116.779, 123.68] +The following BGR values should be subtracted: [103.939, 116.779, 123.68] ``` ## Scale -typical 8 bit per pixel per channel images will have a scale of 0-255. Many CNN networks use the native scale, but some don't. As was seen in a snippet of the Caffe prototxt file, the **transform_param** would show whether there was a scale. In the example of LeNet for Caffe, you can see it has a scale pameter of **0.00390625** +Typical 8-bit per pixel per channel images will have a scale of 0-255. Many CNN networks use the native scale, but some don't. As was seen in a snippet of the Caffe prototxt file, the **transform_param** would show whether there was a scale. In the example of LeNet for Caffe, you can see it has a scale pameter of **0.00390625**. [lenet_train_test.prototxt](https://github.com/BVLC/caffe/blob/master/examples/mnist/lenet_train_test.prototxt) ``` @@ -82,36 +80,37 @@ typical 8 bit per pixel per channel images will have a scale of 0-255. Many CNN scale: 0.00390625 } ``` - this may seem like a strange number, but it is actually just 1/256. So the input 8 bit image is being scaled down to an image from 0-1 instead of 0-255 +This may seem like a strange number, but it is actually just 1/256. The input 8-bit image is being scaled down to an image from 0-1 instead of 0-255. --- -Back to the example of TensorFlow for Inception V3. Below the **input_mean** the **input_std** is also listed. All this is is a scaling factor. You divide 255/128 and it's about 2. So in this case, the scale is two, but the mean subtraction is 128. So in the end the scale is actually -1 to 1 +Regarding the example of TensorFlow for Inception V3, the **input_mean** the **input_std** are listed below. This is a scaling factor. You divide 255/128, and it's about 2. In this case, the scale is two, but the mean subtraction is 128. In the end, the scale is actually -1 to 1. [retrain script](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/examples/image_retraining/retrain.py#L872) ``` input_mean = 128 input_std = 128 ``` -## Color Channel configuration -different models may be trained with different color channel orientations (either RGB or BGR). Typically Caffe models seem to be trained with BGR whereas the Slim TensorFlow models (at least Inception and MobileNet) are trained in RGB. +## Color Channel Configuration +Different models may be trained with different color channel orientations (either RGB or BGR). Typically, Caffe models seem to be trained with BGR, whereas the Slim TensorFlow models (at least Inception and MobileNet) are trained in RGB. -Once you figure out the color channel orientation for your model, you will need to know the way the image is loaded. For example opencv will open images in BGR but skimiage will open the image in RGB. +Once you figure out the color channel orientation for your model, you will need to know the way the image is loaded. For example, opencv will open images in BGR, but skimiage will open the image in RGB. ``` -skimage.io.imread will open the image in RGB -cv2.imread will open the image in BGR -Caffe trained models will probably be BGR -TensorFlow trained models will probably be in RGB +* skimage.io.imread will open the image in RGB +* cv2.imread will open the image in BGR +* Caffe trained models will probably be BGR +* TensorFlow trained models will probably be in RGB ``` ## Categories -for models that are trained on the Imagenet database, some have 1000 output classes, and some have 1001 output classes. The extra output class is a background class. The list below has the list of the 1000 classes not including the background. +For models that are trained on the ImageNet database, some have 1000 output classes and some have 1001 output classes. The extra output class is a background class. The following list has the list of the 1000 classes not including the background: [synset_words.txt](https://github.com/HoldenCaulfieldRye/caffe/blob/master/data/ilsvrc12/synset_words.txt) -Most Caffe trained models seem to follow the 1000 class convention, and TensorFlow trained models follow the 1001 class convention. So for the TensorFlow models, an offset needs to be added. You can see this as documented in the TensorFlow github [here](https://github.com/tensorflow/models/tree/master/research/slim#the-resnet-and-vgg-models-have-1000-classes-but-the-imagenet-dataset-has-1001) -# Putting it all together -now with all of these factors, let's go through two examples +Most Caffe trained models seem to follow the 1000 class convention, and TensorFlow trained models follow the 1001 class convention. For the TensorFlow models, an offset needs to be added. You can see this documented in the [TensorFlow GitHub](https://github.com/tensorflow/models/tree/master/research/slim#the-resnet-and-vgg-models-have-1000-classes-but-the-imagenet-dataset-has-1001). + +# Putting It All Together +Now with all of these factors, let's go through two examples. ## Caffe Example -let's use the Berkeley Caffe GoogLeNet model as an example. the basic model parameters are: +Let's use the Berkeley Caffe GoogLeNet model as an example. The basic model parameters are: ``` Scale: 0-255 (before mean subtraction) Mean: based on mean_binary.proto file @@ -120,9 +119,9 @@ output categories: 1000 input size: 224x224 labels_offset=0 ``` -code snippet: +Code snippet: ``` -#load the label files +#Load the label files labels_offset=0 # no background class offset labels_file='./synset_words.txt' labels=numpy.loadtxt(labels_file,str,delimiter='\t') @@ -135,7 +134,7 @@ iterations = graph.GetGraphOption(mvnc.GraphOption.ITERATIONS) img = cv2.imread('./dog.jpg') # using OpenCV for reading the image, it will be in BGR img=cv2.resize(img,(224,224)) # resize to 224x224 img-=[104,117,124] # subtract mean -#run, get the result and print results per the synset_words.txt +#Run, get the result and print results per the synset_words.txt graph.LoadTensor(img.astype(numpy.float16), 'user object') output, userobj = graph.GetResult() order = output.argsort()[::-1][:6] @@ -144,8 +143,8 @@ for i in range(0,5): print ('prediction ' + str(i) + ' is ' + labels[order[i]-labels_offset]) ``` -## TensorFlow example -let's use the TensorFlow Slim Inception V3 +## TensorFlow Example +Let's use the TensorFlow Slim Inception V3: ``` Scale: -1 to 1 (after mean subtraction) Mean: 128 @@ -154,9 +153,9 @@ output categories: 1001 input size: 299x299 labels_offset=1 ``` -code snippet: +Code snippet: ``` -#load the label files +#Load the label files labels_offset=1 # background class offset of 1 labels_file='./synset_words.txt' labels=numpy.loadtxt(labels_file,str,delimiter='\t') @@ -166,13 +165,13 @@ with open('./inceptionv3.blob', mode='rb') as f: graph = device.AllocateGraph(blob) graph.SetGraphOption(mvnc.GraphOption.ITERATIONS, 1) iterations = graph.GetGraphOption(mvnc.GraphOption.ITERATIONS) -#import the image and do the proper scaling +#Import the image and do the proper scaling img = cv2.imread('./dog.jpg').astype(numpy.float32) # using OpenCV for reading the image, it will be in BGR img=cv2.resize(img,(299,299)) # resize to 299x299 img=cv2.cvtColor(img,cv2.COLOR_BGR2RGB) # need to convert to RBG img-=[128,128,128] # subtract mean img /=128. # scale the image -#run, get the result and print results per the synset_words.txt +#Run, get the result and print results per the synset_words.txt graph.LoadTensor(img.astype(numpy.float16), 'user object') output, userobj = graph.GetResult() order = output.argsort()[::-1][:6] @@ -181,4 +180,4 @@ for i in range(0,5): print ('prediction ' + str(i) + ' is ' + labels[order[i]-labels_offset]) ``` -feedback or comments? let me know darren.s.crews@intel.com +Feedback or comments? Let me know at darren.s.crews@intel.com. diff --git a/docs/install.md b/docs/install.md index 8b1bdc7..e5f105d 100644 --- a/docs/install.md +++ b/docs/install.md @@ -1,56 +1,58 @@ # Installation and Configuration -This page provides installation and configuration information needed to use the NCS and the examples provided in this repository. To use the NCS you will need to have the Movidius™ Neural Compute SDK installed on your development computer. The SDK installation provides an option to install the examples in this repostitory. If you've already installed the SDK on your development computer you may have selected the option to also install these examples. If you have not already installed the SDK you should follow the instructions in the Example Installation with SDK section in this page, and when prompted select the option to install the examples. +This page provides installation and configuration information needed to use the Intel® Movidius™ Neural Compute Stick (Intel® Movidius™ NCS) and the examples provided in this repository. To use the Intel Movidius NCS, you will need to have the Intel Movidius Neural Compute SDK installed on your development computer. The SDK installation provides an option to install the examples in this repository. If you've already installed the SDK on your development computer, you may have selected the option to also install these examples. If you have not already installed the SDK, you should follow the instructions in the Installation of SDK and Examples section on this page. When prompted, select the option to install the examples. ## Prerequisites -To build and run the examples in this repository you will need to have the following. -- Movidius™ Neural Compute Stick (NCS) -- Movidius™ Neural Compute SDK -- Development Computer with Supported OS +To build and run the examples in this repository, you will need to have the following: +- Intel Movidius Neural Compute Stick +- Intel Movidius Neural Compute SDK +- Development computer with supported OS - x86-64 with Ubuntu (64 bit) 16.04 Desktop - Raspberry Pi 3 with Raspian Stretch (starting with SDK 1.09.xx) - See [Upgrade Raspian Jessie to Stretch](https://linuxconfig.org/how-to-upgrade-debian-8-jessie-to-debian-9-stretch) - Virtual Machine per the [supported VM configuration](VirtualMachineConfig.md) -- Internet Connection. -- USB Camera (optional) +- Internet connection +- USB camera (optional) -## Connecting the NCS to a development computer -The NCS connects to the development computer over a USB 2.0 High Speed interface. Plug the NCS directly to a USB port on your development computer or into a powered USB hub that is plugged into your development computer. +## Connecting the Intel Movidius NCS to a Development Computer +The Intel Movidius NCS connects to the development computer over a USB 2.0 High Speed interface. Plug the Intel Movidius NCS directly to a USB port on your development computer or into a powered USB hub that is plugged into your development computer. ![](images/ncs_plugged.jpg) -## Installation SDK and examples -To install the SDK along with the examples in this repository use the following command on your development computer. This is the typical installation. If you haven't already installed the SDK on your development computer you should use this command to install. +## Installation of SDK and Examples +To install the SDK along with the examples in this repository, use the following command on your development computer. This is the typical installation. If your development machine already has caffe installed see the note below. If you haven't already installed the SDK on your development computer, you should use this command to install: ``` git clone http://github.com/Movidius/ncsdk && cd ncsdk && make install && make examples ``` +Note: if you are installing on a machine that already has caffe installed and it's directory is already in the PYTHONPATH environment variable you will need to manually remove the existing directory from the PYTHONPATH environment variable prior to installing the ncsdk. Also, you will need to manually adjust the PYTHONPATH to match your development needs such that it points to the caffe version installed with the NCSDK when using the NCSDK, and it points to other caffe versions when using those. -## Installation of examples without SDK -To install only the examples and not the SDK on your development computer use the following command to clone the repository and then make appropriate examples for your development computer. If you already have the SDK installed and only need the examples on your machine you should use this command to install. +Note: The installation will only set the PYTHONPATH environment variable for the current user. It will do so by modifying the .bashrc file for that user. To use the sdk as other users on the machine you will need to manually set the PYTHONPATH for those other users. + +## Installation of Examples without SDK +To install only the examples and not the SDK on your development computer, use the following command to clone the repository and then make appropriate examples for your development computer. If you already have the SDK installed and only need the examples on your machine, you should use this command to install the examples: ``` git clone http://github.com/Movidius/ncsdk && cd ncsdk && make examples ``` ## Building Individual Examples -Whether installing with the SDK or without it, both methods above will install and build the examples that are appropriate for your development system including prerequisite software. Each example comes with its own Makefile that will install only that specific example and any prerequisites that it requires. To install and build any individual example run the 'make' command from within that example's base directory. For example to build the GoogLeNet examples type the following command. +Whether installing with the SDK or without it, both methods above will install and build the examples that are appropriate for your development system, including prerequisite software. Each example comes with its own Makefile that will install only that specific example and any prerequisites that it requires. To install and build any individual example, run the 'make' command from within that example's base directory. For example, to build the GoogLeNet examples, type the following command: ``` cd examples/Caffe/GoogLeNet && make ``` -The Makefile for each example also has a 'help' target which will display all possible targets. To see all possible targets for any example use the following command from within the examples top directory. +The Makefile for each example also has a 'help' target that will display all possible targets. To see all possible targets for any example, use the following command from within the examples top directory: ``` make help ``` ## Uninstallation -To uninstall the SDK type the following command. +To uninstall the SDK, type the following command: ``` make uninstall ``` - ## Installation Manifest -For the list of files that 'make install' will modify on your system (outside of the repository) see the [installation manifest](manifest.md). +For the list of files that 'make install' will modify on your system (outside of the repository), see the [installation manifest](manifest.md). diff --git a/docs/manifest.md b/docs/manifest.md index fbf251a..85c33d2 100644 --- a/docs/manifest.md +++ b/docs/manifest.md @@ -1,5 +1,5 @@ -# NC SDK installation manifest +# NC SDK Installation Manifest These are the files and directories installed outside of repository directory as part of the NC SDK. - /usr/local/include/mvnc.h diff --git a/docs/ncs1arch.md b/docs/ncs1arch.md index 60e4339..c91b3eb 100644 --- a/docs/ncs1arch.md +++ b/docs/ncs1arch.md @@ -1,18 +1,18 @@ # Introduction -The following explains how the Neural Compute SDK works on compiling and executing a given Caffe or TensorFlow™ Neural Network on the Neural Compute Stick. +The Neural Compute SDK works on compiling and executing a given Caffe or TensorFlow™ Neural Network on the Intel® Movidius™ Neural Compute Stick (Intel® Movidius™ NCS). Read on for a detailed explanation. # Architecture Details -The following diagram shows the inner workings of the Neural Compute Stick. The Neural Compute Stick primarily contains Movidius™ Myriad 2 VPU (Vision Processing Unit), and some power delivery voltage regulators. The Myriad 2 VPU includes 4 Gbit of LPDDR3 DRAM and its architecture includes specific imaging and vision accelerators and an array of 12 VLIW vector processors called SHAVE processors, used to accelerate neural networks by running parts of the neural networks in parallel for achieving the highest performance. The Neural Compute Stick is connected to an Application Processor (AP) such as a Raspberry Pi or Up Squared board using the USB interface on the Myriad 2 VPU. The USB3 interface can be used both in Super Speed (5Gbps) or High Speed (480Mbps) modes. +The following diagram shows the inner workings of the Intel Movidius NCS. The Intel Movidius NCS primarily contains the Intel® Movidius™ Myriad™ 2 vision processing unit (VPU) and some power delivery voltage regulators. The Intel Movidius Myriad 2 VPU includes 4 Gbit of LPDDR3 DRAM, and its architecture includes specific imaging and vision accelerators and an array of 12 VLIW vector processors called SHAVE processors. These processors are used to accelerate neural networks by running parts of the neural networks in parallel for achieving the highest performance. The Intel Movidius NCS is connected to an application processor (AP), such as a Raspberry Pi or UP Squared board, using the USB interface on the Intel Movidius Myriad 2 VPU. The USB3 interface can be used both in Super Speed (5 Gbps) or High Speed (480 Mbps) modes. -The CPU in the Myriad 2 VPU is a SPARC microprocessor core that runs custom firmware. When the Neural Compute Stick is first plugged in there is no firmware loaded onto it. The Myriad 2 VPU boots from the internal ROM and connects to the host computer(application processor) as a USB2 device. +The CPU in the Intel Movidius Myriad 2 VPU is a SPARC microprocessor core that runs custom firmware. When the Intel Movidius Neural Compute Stick is first plugged in, there is no firmware loaded onto it. The Intel Movidius Myriad 2 VPU boots from the internal ROM and connects to the host computer (application processor) as a USB 2.0 device. -Applications executing on the host computer (AP) communicate to the Myriad SOC using the Neural Compute API. When the API initializes and opens a device, the firmware from the Neural Compute SDK is loaded onto the Neural Compute Stick. At this time, the Neural Compute Stick resets and now shows up to the host computer as a USB2 or USB3 device depending on the host type. It is now ready to accept the neural network graph files and commands to execute inferences on the graph files. +Applications executing on the host computer (AP) communicate to the Intel Movidius Myriad VPU SOC using the Neural Compute API. When the API initializes and opens a device, the firmware from the Neural Compute SDK is loaded onto the Intel Movidius Neural Compute Stick. At this time, the Intel Movidius NCS resets and now shows up to the host computer as a USB 2.0 or USB 3.0 device depending on the host type. It is now ready to accept the neural network graph files and commands to execute inferences on the graph files. ![](images/NCS1_ArchDiagram.jpg) -A graph file is loaded into the DRAM attached to the Myriad-2 VPU via the API. The Leon processor coordinates receiving the graph file and images for inference via the USB connection. It also parses the graph file and schedules kernels to the SHAVE neural compute accelerator engines. In addition, the Leon processor also takes care of monitoring die temperature and throttling processing on high temperature alerts. Statistics and the output of the neural network are sent back to the host computer via the USB connection and they are received by a host application via the API. +A graph file is loaded into the DRAM attached to the Intel Movidius Myriad 2 VPU via the API. The LEON processor coordinates receiving the graph file and images for inference via the USB connection. It also parses the graph file and schedules kernels to the SHAVE neural compute accelerator engines. In addition, the LEON processor also takes care of monitoring die temperature and throttling processing on high temperature alerts. Statistics and the output of the neural network are sent back to the host computer via the USB connection, and they are received by a host application via the API. -In addition to the API, the SDK provides the tools mvNCCompile, mvNCCheck, and mvNCProfile that run on the host computer during application and neural network development. The checker and profiler tools run an inference on the Neural Compute Stick to validate against Caffe/TensorFlow™ and generate per layer statistics respectively. +In addition to the API, the NCSDK provides the tools mvNCCompile, mvNCCheck, and mvNCProfile that run on the host computer during application and neural network development. The checker and profiler tools run an inference on the Intel Movidius Neural Compute Stick to validate against Caffe/TensorFlow and generate per layer statistics respectively. diff --git a/docs/py_api/Device.AllocateGraph.md b/docs/py_api/Device.AllocateGraph.md index 45f59a6..779eca8 100644 --- a/docs/py_api/Device.AllocateGraph.md +++ b/docs/py_api/Device.AllocateGraph.md @@ -5,10 +5,10 @@ |Package | mvnc | |Module | mvncapi | |Version | 1.0 | -|See also|Graph
Graph.DeallocateGraph
| +|See also|[Graph](Graph.md)
[Graph.DeallocateGraph()](Graph.DeallocateGraph.md)
| ## Overview -This function is used to create an instance of a Graph that represents a neural network which can be used to infer results via methods Graph.LoadTensor() and Graph.GetResult(). +This function is used to create an instance of a Graph that represents a neural network, which can be used to infer results via methods Graph.LoadTensor() and Graph.GetResult(). ## Syntax @@ -20,13 +20,13 @@ dev.AllocateGraph(graphPath) |Parameter | Description | |---------------|---------------| -|graphPath | A string that is the path to the graph file. The graph file must have been created with the NC SDK graph compiler.| +|graphPath | A string that is the path to the graph file. The graph file must have been created with the NC SDK graph compiler.| ## Return Returns an instance of a Graph object that is ready to use. ## Known Issues -After the Graph that is created is no longer needed Graph.DeallocateGraph() must be called to free the graph resources. +After the Graph that is created is no longer needed, Graph.DeallocateGraph() must be called to free the graph resources. ## Example ```python diff --git a/docs/py_api/Device.CloseDevice.md b/docs/py_api/Device.CloseDevice.md index f4cf8bf..6c5dc47 100644 --- a/docs/py_api/Device.CloseDevice.md +++ b/docs/py_api/Device.CloseDevice.md @@ -5,7 +5,7 @@ |Package | mvnc | |Module | mvncapi | |Version | 1.0 | -|See also|Device.\_\_init\_\_()
Device.OpenDevice()| +|See also|[Device.\_\_init\_\_()](Device.__init__.md)
[Device.OpenDevice()](Device.OpenDevice.md)| ## Overview This function is used to cease communication and reset the device. diff --git a/docs/py_api/Device.GetDeviceOption.md b/docs/py_api/Device.GetDeviceOption.md index a671552..f3850c2 100644 --- a/docs/py_api/Device.GetDeviceOption.md +++ b/docs/py_api/Device.GetDeviceOption.md @@ -5,10 +5,10 @@ |Package | mvnc | |Module | mvncapi | |Version | 1.0 | -|See also |Device.SetDeviceOption
DeviceOption| +|See also |[Device.SetDeviceOption()](Device.SetDeviceOption.md)
[DeviceOption](DeviceOption.md)| ## Overview -This function is used to get an option for the device. The options can be found in the DeviceOption enumeration table. +This function is used to get an option for the device. The options can be found in the [DeviceOption](DeviceOption.md) enumeration table. ## Syntax ```python @@ -22,7 +22,7 @@ GetDeviceOption(option) | option | Member of the DeviceOption enumeration that specifies which option to get.| ## Return -The value for the specified device option. The type of the returned value depends on the option specified. See the DeviceOption enumeration for the type that will be returned for each option. +The value for the specified device option. The type of the returned value depends on the option specified. See the [DeviceOption](DeviceOption.md) enumeration for the type that will be returned for each option. ## Known Issues diff --git a/docs/py_api/Device.OpenDevice.md b/docs/py_api/Device.OpenDevice.md index fe17132..d36befc 100644 --- a/docs/py_api/Device.OpenDevice.md +++ b/docs/py_api/Device.OpenDevice.md @@ -5,7 +5,7 @@ |Package | mvnc | |Module | mvncapi | |Version | 1.0 | -|See also|Device.\_\_init\_\_()
Device.CloseDevice()| +|See also|[Device.\_\_init\_\_()](Device.__init__.md)
[Device.CloseDevice()](Device.CloseDevice.md)| ## Overview This function is used to initialize the device. @@ -31,7 +31,7 @@ import mvnc.mvncapi as ncs deviceNames = ncs.EnumerateDevices() firstDevice = ncs.Device(deviceNames[0]) firstDevice.OpenDevice() -# use device +# Use device firstDevice.CloseDevice() ############################################# diff --git a/docs/py_api/Device.SetDeviceOption.md b/docs/py_api/Device.SetDeviceOption.md index 22d7aa6..6200dd9 100644 --- a/docs/py_api/Device.SetDeviceOption.md +++ b/docs/py_api/Device.SetDeviceOption.md @@ -5,10 +5,10 @@ |Package | mvnc | |Module | mvncapi | |Version | 1.0 | -|See also|Device.GetDeviceOption()
DeviceOption| +|See also |[Device.GetDeviceOption()](Device.GetDeviceOption.md)
[DeviceOption](DeviceOption.md)| ## Overview -This function is used to set an option for the device. The options can be found in the DeviceOption enumeration class. +This function is used to set an option for the device. The options can be found in the [DeviceOption](DeviceOption.md) enumeration class. ## Syntax ```python @@ -19,8 +19,8 @@ SetDeviceOption(option, value) |Parameter | Description | |-----------|---------------| -|option | Member of the DeviceOption enumeration class that specifies which device option to set | -|value | The new value to which the device option will be set. The type to pass for this parameter depends on which option is being set. See the DeviceOption enumeration class for the types that correspond to each option.| +|option | Member of the DeviceOption enumeration class that specifies which device option to set. | +|value | The new value to which the device option will be set. The type to pass for this parameter depends on which option is being set. See the [DeviceOption](DeviceOption.md) enumeration class for the types that correspond to each option.| ## Return diff --git a/docs/py_api/Device.__init__.md b/docs/py_api/Device.__init__.md index 4a057b6..e1260ba 100644 --- a/docs/py_api/Device.__init__.md +++ b/docs/py_api/Device.__init__.md @@ -20,7 +20,7 @@ mvnc.Device("device name here") |Parameter | Description | |---------------|---------------| -|deviceName | The name of the device to initialize. This must come from calling mvncapi module function EnumerateDevices()| +|deviceName | The name of the device to initialize. This must come from calling mvncapi module function EnumerateDevices().| ## Return None. @@ -34,7 +34,7 @@ import mvnc.mvncapi as ncs # Enumerate devices deviceNames = ncs.EnumerateDevices() -# create and init a Device instance. +# Create and init a device instance ncsDevice = ncs.Device(deviceNames[0]) # Open device, use device, close device diff --git a/docs/py_api/Device.md b/docs/py_api/Device.md index b84d630..c7f3bca 100644 --- a/docs/py_api/Device.md +++ b/docs/py_api/Device.md @@ -1,14 +1,14 @@ -# Device class +# Device Class -The Device class represents the NCS device. Typically one instance of this class is created for each physical NCS device that is plugged into the system so multiple instances may exist if you have multiple devices attached ot your system. +The Device class represents the Intel® Movidius™ Neural Compute Stick (Intel® Movidius™ NCS) device. Typically one instance of this class is created for each physical NCS device that is plugged into the system, so multiple instances may exist if you have multiple devices attached to your system. -# Usage -To use the Device class you must create and initialize it by name. The valid names to use can be determined by calling the mvncapi module function EnumerateDevices(). Once you have successfully created an instance of this class the typical usage is to call OpenDevice(), AllocateGraph(), use the graph, CloseDevice(). +## Usage +To use the Device class, you must create and initialize it by name. The valid names to use can be determined by calling the mvncapi module function EnumerateDevices(). Once you have successfully created an instance of this class, the typical usage is to call OpenDevice(), AllocateGraph(), use the graph, and CloseDevice(). -# Device methods -## [\_\_init\_\_](Device.__init__.md) -## [OpenDevice](Device.OpenDevice.md) -## [CloseDevice](Device.CloseDevice.md) -## [SetDeviceOption](Device.SetDeviceOption.md) -## [GetDeviceOption](Device.GetDeviceOption.md) -## [AllocateGraph](Device.AllocateGraph.md) +## Device methods +- [\_\_init\_\_](Device.__init__.md) +- [OpenDevice](Device.OpenDevice.md) +- [CloseDevice](Device.CloseDevice.md) +- [SetDeviceOption](Device.SetDeviceOption.md) +- [GetDeviceOption](Device.GetDeviceOption.md) +- [AllocateGraph](Device.AllocateGraph.md) diff --git a/docs/py_api/DeviceOption.md b/docs/py_api/DeviceOption.md index b1dc59b..2d6ec16 100644 --- a/docs/py_api/DeviceOption.md +++ b/docs/py_api/DeviceOption.md @@ -1,11 +1,11 @@ -# DeviceOption enumeration class +# DeviceOption Enumeration Class |Info | Value | |----------|---------------| |Package | mvnc | |Module | mvncapi | |Version | 1.0 | -|See also |Device.SetDeviceOption()
Device.GetDeviceOption()| +|See also |[Device.SetDeviceOption()](Device.SetDeviceOption.md)
[Device.GetDeviceOption()](Device.GetDeviceOption.md)| @@ -13,6 +13,7 @@ The DeviceOption class is an enumeration class that defines the options that are passed to and received from the SetDeviceOption and the GetDeviceOption functions. -Enumerator Values|Description ------------- | ------------- -THERMAL_THROTTLING_LEVEL |Returns 1 if lower guard temperature threshold of chip sensor is reached. This indicates short throttling time is in action between inferences to protect the device. Returns 2 if upper guard temperature of chip sensor is reached. This indicates long throttling time is in action between inferences to protect the device. +enum | option type | possible values|get/set|Description +------------------------ | ----------- | -------------- |-------|----------- +THERMAL_THROTTLING_LEVEL | int | 1, 2 | get |1: if lower guard temperature threshold of chip sensor is reached. This indicates short throttling time is in action between inferences to protect the device.
2: if upper guard temperature of chip sensor is reached. This indicates long throttling time is in action between inferences to protect the device. + diff --git a/docs/py_api/EnumerateDevices.md b/docs/py_api/EnumerateDevices.md index ad9d4f5..2f88186 100644 --- a/docs/py_api/EnumerateDevices.md +++ b/docs/py_api/EnumerateDevices.md @@ -5,10 +5,10 @@ |Package | mvnc | |Module | mvncapi | |Version | 1.0 | -|See also | Device | +|See also | [Device](Device.md) | ## Overview -This function is used to get a list of the names of the devices present in the system. Each of the returned names can be used to create an instance of the Device class. +This function is used to get a list of the names of the devices present in the system. Each of the names returned can be used to create an instance of the Device class. ## Syntax @@ -20,7 +20,7 @@ deviceNames = EnumerateDevices() None. ## Return -An array of device names each of which can be used to create a new instance of the Device class. +An array of device names, each of which can be used to create a new instance of the Device class. ## Known Issues @@ -32,7 +32,7 @@ if len(deviceNames) == 0: print("Error - No devices detected.") quit() -# open first NCS device found +# Open first NCS device found device = ncs.Device(deviceNames[0]) # Allocate graph / otherwise use device as needed diff --git a/docs/py_api/GetGlobalOption.md b/docs/py_api/GetGlobalOption.md index bd6c852..950cac7 100644 --- a/docs/py_api/GetGlobalOption.md +++ b/docs/py_api/GetGlobalOption.md @@ -5,10 +5,10 @@ |Package | mvnc | |Module | mvncapi | |Version | 1.0 | -|See also | GlobalOption
SetGlobalOption()| +|See also | [GlobalOption](GlobalOption.md)
[SetGlobalOption()](SetGlobalOption.md)| ## Overview -This function is used to get a global option. The available options can be found in the GlobalOption enumeration section. +This function is used to get a Global option. The available options can be found in the [GlobalOption](GlobalOption.md) enumeration section. ## Syntax @@ -23,7 +23,7 @@ value = GetGlobalOption(option) |option |Member of the GlobalOption enumeration that specifies which option to get.| ## Return -The value for the specified option. The type of the returned value depends on the option specified. See the GlobalOption enumeration for the type that will be returned for each option. +The value for the specified option. The type of the returned value depends on the option specified. See the [GlobalOption](GlobalOption.md) enumeration for the type that will be returned for each option. ## Known Issues diff --git a/docs/py_api/GlobalOption.md b/docs/py_api/GlobalOption.md index 3df0d99..54ea6b9 100644 --- a/docs/py_api/GlobalOption.md +++ b/docs/py_api/GlobalOption.md @@ -1,11 +1,11 @@ -# GlobalOption enumeration class +# GlobalOption Enumeration Class |Info | Value | |----------|---------------| |Package | mvnc | |Module | mvncapi | |Version | 1.0 | -|See also | SetGlobalOption()
GetGlobalOption()| +|See also | [SetGlobalOption()](SetGlobalOption.md)
[GetGlobalOption()](GetGlobalOption.md)| @@ -13,8 +13,10 @@ The GlobalOption class is an enumeration class that defines the options that are passed to and received from the SetGlobalOption and the GetGlobalOption functions. -Enumerator Values|Description ------------- | ------------- -LOGLEVEL |0=Nothing is printed.
1=Print errors only.
2=Verbose. +enum | option type | possible values|get/set |Description +-------- | ----------- | -------------- |----------|----------- +LOGLEVEL | int | 0, 1, 2 | get, set | 0 = Nothing is printed (default)
1 = Print errors only
2 = Verbose + + diff --git a/docs/py_api/Graph.DeallocateGraph.md b/docs/py_api/Graph.DeallocateGraph.md index adf6d8f..202e8f2 100644 --- a/docs/py_api/Graph.DeallocateGraph.md +++ b/docs/py_api/Graph.DeallocateGraph.md @@ -5,10 +5,10 @@ |Package | mvnc | |Module | mvncapi | |Version | 1.0 | -|See also|Device.AllocateGraph()| +|See also|[Device.AllocateGraph()](Device.AllocateGraph.md)| ## Overview -This function is used to deallocate a graph that was allocated for an NCS device with the Device.AllocateGraph() method. This should be called for every graph that is created to free resources associated with the graph. +This function is used to deallocate a graph that was allocated for an Intel® Movidius™ NCS device with the Device.AllocateGraph() method. This should be called for every graph that is created to free resources associated with the graph. ## Syntax @@ -33,7 +33,7 @@ if len(deviceNames) == 0: print("Error - No devices detected.") quit() -# open first NCS device found +# Open first NCS device found device = mvnc.Device(devices[0]) # Allocate the graph diff --git a/docs/py_api/Graph.GetGraphOption.md b/docs/py_api/Graph.GetGraphOption.md index c0649d8..1514b63 100644 --- a/docs/py_api/Graph.GetGraphOption.md +++ b/docs/py_api/Graph.GetGraphOption.md @@ -5,10 +5,10 @@ |Package | mvnc | |Module | mvncapi | |Version | 1.0 | -|See also|Graph.SetGraphOption()
GraphOption| +|See also|[Graph.SetGraphOption()](Graph.SetGraphOption.md)
[GraphOption](GraphOption.md)| ## Overview -This function is used to get a graph option. The available options can be found in GraphOption enumeration class. +This function is used to get a graph option. The available options can be found in [GraphOption](GraphOption.md) enumeration class. ## Syntax ```python @@ -22,7 +22,7 @@ value = graph.GetGraphOption(option) |option | A value from the GraphOption enumeration to specify which option's value should be returned. | ## Return -The value for the specified GraphOption is returned. The type of the returned value depends on the option specified. See the GraphOption class for the value types for each option. +The value for the specified GraphOption is returned. The type of the returned value depends on the option specified. See the [GraphOption](GraphOption.md) class for the value types for each option. ## Known Issues diff --git a/docs/py_api/Graph.GetResult.md b/docs/py_api/Graph.GetResult.md index 5ca1755..04eab4a 100644 --- a/docs/py_api/Graph.GetResult.md +++ b/docs/py_api/Graph.GetResult.md @@ -5,7 +5,7 @@ |Package | mvnc | |Module | mvncapi | |Version | 1.0 | -|See also |Graph.LoadTensor()| +|See also |[Graph.LoadTensor()](Graph.LoadTensor.md)| ## Overview This function retrieves the result of an inference that was initiated via Graph.LoadTensor() on the specified graph. diff --git a/docs/py_api/Graph.LoadTensor.md b/docs/py_api/Graph.LoadTensor.md index 64e2111..602d9dd 100644 --- a/docs/py_api/Graph.LoadTensor.md +++ b/docs/py_api/Graph.LoadTensor.md @@ -5,10 +5,10 @@ |Package | mvnc | |Module | mvncapi | |Version | 1.0 | -|See also |Graph.GetResult()| +|See also |[Graph.GetResult()](Graph.GetResult.md)| ## Overview -This function initiates an inference on the specified graph via the associated NCS device. After calling this function use the Graph.GetResult() function to retrieve the inference result. +This function initiates an inference on the specified graph via the associated Intel® Movidius™ NCS device. After calling this function, use the Graph.GetResult() function to retrieve the inference result. ## Syntax @@ -19,11 +19,11 @@ graph.LoadTensor(inputTensor, userObject) |Parameter | Description | |---------------|---------------| -|inputTensor | Input data on which an inderence will be run. The data must be passed in a NumPy ndarray of half precision floats (float 16) | | +|inputTensor | Input data on which an inference will be run. The data must be passed in a NumPy ndarray of half precision floats (float 16). | | |userObject | A user-defined parameter that is returned by the GetResult function along with the inference result for this tensor.| ## Return -Returns True if the function works, False if not. When the graph is in non blocking mode (GraphOption.DONTBLOCK) this function will return False if the device is busy. +Returns True if the function works, False if not. When the graph is in non-blocking mode (GraphOption.DONTBLOCK), this function will return False if the device is busy. ## Known Issues diff --git a/docs/py_api/Graph.SetGraphOption.md b/docs/py_api/Graph.SetGraphOption.md index 907aeed..91e5a86 100644 --- a/docs/py_api/Graph.SetGraphOption.md +++ b/docs/py_api/Graph.SetGraphOption.md @@ -5,10 +5,10 @@ |Package | mvnc | |Module | mvncapi | |Version | 1.0 | -|See also|Graph.GetGraphOption()
GraphOption| +|See also|[Graph.GetGraphOption()](Graph.GetGraphOption.md)
[GraphOption](GraphOption.md)| ## Overview -This function is used to set a graph option. The available options can be found in the GraphOption enumeration class. +This function is used to set a graph option. The available options can be found in the [GraphOption](GraphOption.md) enumeration class. ## Syntax ```python @@ -20,7 +20,7 @@ graph.SetGraphOption(option, value) Parameter | Description ---------------|--------------- option | Member of the GraphOption enumeration specifying which option's value will be set. -value | The new value to which the specified graph option will be set. See the GraphOption enumeration class for the type of value for each option. +value | The new value to which the specified graph option will be set. See the [GraphOption](GraphOption.md) enumeration class for the type of value for each option. ## Return diff --git a/docs/py_api/Graph.md b/docs/py_api/Graph.md index 8d62029..b9c3b30 100644 --- a/docs/py_api/Graph.md +++ b/docs/py_api/Graph.md @@ -1,13 +1,13 @@ -# Graph class +# Graph Class -The Graph class is a container for a neural network graph file that is associated with a particular NCS device. +The Graph class is a container for a neural network graph file that is associated with a particular Intel® Movidius™ Neural Compute Stick (Intel® Movidius™ NCS) device. -# Usage -To use the Graph class you must create a graph handle by calling AllocateGraph() from the Device class. The location of the graph file will be passed to AllocateGraph() and it will return an instance of the Graph class. Once you have successfully created an instance of this class, the typical usage is to optionally get/set graph options, then call LoadTensor() and GetResult(), to perform inferencing with the graph that was allocated, and finally call DeallocateGraph() when the neural network is no longer needed. +## Usage +To use the Graph class, you must create a graph handle by calling AllocateGraph() from the Device class. The location of the graph file will be passed to AllocateGraph(), and it will return an instance of the Graph class. Once you have successfully created an instance of this class, the typical usage is to optionally get/set graph options, then call LoadTensor() and GetResult() to perform inferencing with the graph that was allocated. Finally, call DeallocateGraph() when the neural network is no longer needed. -# Graph methods -## [DeallocateGraph](Graph.DeallocateGraph.md) -## [SetGraphOption](Graph.SetGraphOption.md) -## [GetGraphOption](Graph.GetGraphOption.md) -## [LoadTensor](Graph.LoadTensor.md) -## [GetResult](Graph.GetResult.md) +## Graph methods +- [DeallocateGraph](Graph.DeallocateGraph.md) +- [SetGraphOption](Graph.SetGraphOption.md) +- [GetGraphOption](Graph.GetGraphOption.md) +- [LoadTensor](Graph.LoadTensor.md) +- [GetResult](Graph.GetResult.md) diff --git a/docs/py_api/GraphOption.md b/docs/py_api/GraphOption.md index 190bcaa..05eee91 100644 --- a/docs/py_api/GraphOption.md +++ b/docs/py_api/GraphOption.md @@ -1,11 +1,11 @@ -# GraphOption enumeration class +# GraphOption Enumeration Class |Info | Value | |----------|---------------| |Package | mvnc | |Module | mvncapi | |Version | 1.0 | -|See also | Graph.SetGraphOption()
Graph.GetGraphOption()| +|See also | [Graph.SetGraphOption()](Graph.SetGraphOption.md)
[Graph.GetGraphOption()](Graph.GetGraphOption.md)| @@ -15,6 +15,6 @@ The GraphOption class is an enumeration class that defines the options that are enum| option type | possible values|get/set|Description --- | ----------- | -------------- |-------|----------- -DONTBLOCK |integer |0 or 1|get/set|0: LoadTensor and GetResult Block
1: LoadTensor returns BUSY instead of blocking. GetResult will return NODATA instead of blocking. +DONTBLOCK |int |0 or 1|get/set|0: Calls to Graph.LoadTensor() and Graph.GetResult() will block (won't return until the action is completed) (Default)
1: Calls to those functions don't block (they will return immediately). If the action coudn't be completed the return value will indicate why. Graph.LoadTensor() will return MVNC_BUSY when the NCS isn't able to perform the action because its busy, try again later. Graph.GetResult() will return MVNC_NO_DATA unless there is an inference that is ready to be returned. In this case try again later and when there is a completed inference the results will be returned. TIMETAKEN |string |any|get|Return a NumPy float array [numpy.array()] of inference times per layer in float data type. -DEBUGINFO | string |any|get|Return a string with the error text as returned by the device. +DEBUGINFO |string |any|get|Return a string with the error text as returned by the device. diff --git a/docs/py_api/SetGlobalOption.md b/docs/py_api/SetGlobalOption.md index abfa611..09959eb 100644 --- a/docs/py_api/SetGlobalOption.md +++ b/docs/py_api/SetGlobalOption.md @@ -5,10 +5,10 @@ |Package | mvnc | |Module | mvncapi | |Version | 1.0 | -|See also | GlobalOption
GetGlobalOption()| +|See also | [GlobalOption](GlobalOption.md)
[GetGlobalOption()](GetGlobalOption.md)| ## Overview -This function is used to set a global option. The available Global options and possible values can be found in the documentation for the GlobalOption enumeration. +This function is used to set a Global option. The available Global options and possible values can be found in the documentation for the [GlobalOption](GlobalOption.md) enumeration. ## Syntax @@ -20,8 +20,8 @@ SetGlobalOption(option, value) |Parameter | Description | |---------------|---------------| -|option|Member of the GlobalOptions enumeration which specifies which option to set.| -|value |The new value to which the option will be set. See the GlobalOption enumeration class for the type of value for each option.| +|option|Member of the GlobalOptions enumeration that specifies which option to set.| +|value |The new value to which the option will be set. See the [GlobalOption](GlobalOption.md) enumeration class for the type of value for each option.| ## Known Issues @@ -29,6 +29,6 @@ SetGlobalOption(option, value) ```Python import mvnc.mvncapi as ncs -# set the global logging level to verbose +# Set the global logging level to verbose ncs.SetGlobalOption(ncs.GlobalOption.LOGLEVEL, 2) ``` diff --git a/docs/py_api/Status.md b/docs/py_api/Status.md index ea0d4a2..8713b49 100644 --- a/docs/py_api/Status.md +++ b/docs/py_api/Status.md @@ -1,4 +1,4 @@ -# Status enumeration class +# Status Enumeration Class |Info | Value | |----------|---------------| @@ -24,6 +24,6 @@ MVNC_TIMEOUT |Timeout in the communication with the device MVNC_MVCMD_NOT_FOUND |The file named MvNCAPI.mvcmd is installed in the mvnc directory. This message means that the file has been moved or installer failed. MVNC_NO_DATA |No data to return. MVNC_GONE |The graph or device has been closed during the operation. -MVNC_UNSUPPORTED_GRAPH_FILE |The graph file is corrupt or may have been created with an incompatible prior version of the NCS toolkit. Try to recompile the graph file with the version of the Toolkit that corresponds to the API version. -MVNC_MYRIAD_ERROR |An error has been reported by the Movidius™ VPU. Use MVNC_DEBUGINFO. +MVNC_UNSUPPORTED_GRAPH_FILE |The graph file is corrupt or may have been created with an incompatible prior version of the NCS toolkit. Try to recompile the graph file with the version of the toolkit that corresponds to the API version. +MVNC_MYRIAD_ERROR |An error has been reported by the Intel® Movidius™ VPU. Use MVNC_DEBUGINFO. diff --git a/docs/py_api/readme.md b/docs/py_api/readme.md index 169dc3c..c0880d2 100644 --- a/docs/py_api/readme.md +++ b/docs/py_api/readme.md @@ -1,28 +1,28 @@ -# Movidius™ Neural Compute SDK Python API +# Intel® Movidius™ Neural Compute SDK Python API -The SDK comes with a Python3 Language API that enables applications that utilize hardware accelerated Deep Neural Networks via the Movidius™ Neural Compute Stick (NCS.) The API is provided as a single python script (mvncapi.py) which is placed on the development computer when the SDK is installed. Details of the Python API are provided below and within the documents linked from here. +The Intel® Movidius™ Neural Compute SDK comes with a Python3 Language API that enables applications that utilize hardware accelerated Deep Neural Networks via the Intel® Movidius™ Neural Compute Stick (Intel® Movidius™ NCS). The API is provided as a single Python script (mvncapi.py), which is placed on the development computer when the SDK is installed. Details of the Python API are provided below and within the documents linked from here. ## Class Diagram -The Python class diagram follows. In this diagram it is shown that the entire python api is in the mvnc Python package. Within the mvnc package there is one Python module which is mvncapi. The mvncapi module is defined by the contents of the mvncapi.py file. +The Python class diagram follows. In this diagram, it is shown that the entire Python API is in the mvnc Python package. Within the mvnc package, there is one Python module, which is mvncapi. The mvncapi module is defined by the contents of the mvncapi.py file. ![](../images/python_api_class_diagram.jpg) -In the sections that follow, the details of the elements within the mvncapi module are provided. This includes module level enumerations, functions that are global to the module, and the classes defined in the module. +The details of the elements within the mvncapi module are provided in the lists that follow. These include module level enumerations, functions that are global to the module, and the classes defined in the module. ## Enumerations -### [GlobalOption](GlobalOption.md) -### [DeviceOption](DeviceOption.md) -### [GraphOption](GraphOption.md) -### [Status](Status.md) +- [GlobalOption](GlobalOption.md) +- [DeviceOption](DeviceOption.md) +- [GraphOption](GraphOption.md) +- [Status](Status.md) ## Global Functions -### [EnumerateDevices](EnumerateDevices.md) -### [SetGlobalOption](SetGlobalOption.md) -### [GetGlobalOption](GetGlobalOption.md) +- [EnumerateDevices](EnumerateDevices.md) +- [SetGlobalOption](SetGlobalOption.md) +- [GetGlobalOption](GetGlobalOption.md) ## Classes -### [Device](Device.md) -### [Graph](Graph.md) +- [Device](Device.md) +- [Graph](Graph.md) diff --git a/docs/readme.md b/docs/readme.md new file mode 100644 index 0000000..0ed35b0 --- /dev/null +++ b/docs/readme.md @@ -0,0 +1,104 @@ + + +# Introduction +The Intel® Movidius™ Neural Compute SDK (NCSDK) and Intel® Movidius™ Neural Compute Stick (Intel® Movidius™ NCS) enable rapid prototyping, validation, and deployment of deep neural networks (DNNs). + +The NCS is used in two primary scenarios: +- Profiling, tuning, and compiling a DNN on a development computer (host system) with the tools provided in the Intel Movidius Neural Compute SDK. In this scenario, the host system is typically a desktop or laptop machine running Ubuntu 16.04 desktop (x86, 64 bit), but you can use any supported platform for these steps. + +- Prototyping a user application on a development computer (host system), which accesses the hardware of the Intel Movidius NCS to accelerate DNN inferences via the API provided with the Intel Movidius Neural Compute SDK. In this scenario, the host system can be a developer workstation or any developer system that runs an operating system compatible with the API. + +The following diagram shows the typical workflow for development with the Intel Movidius NCS: +![](images/ncs_workflow.jpg) + +The training phase does not utilize the Intel Movidius NCS hardware or NCSDK, while the subsequent phases of “profiling, tuning, and compiling” and “prototyping” do require the Intel Movidius NCS hardware and the accompanying Intel Movidius Neural Compute SDK. + +The NCSDK contains a set of software tools to compile, profile, and check validity of your DNN as well as an API for both the C and Python programming languages. The API is provided to allow users to create software that offloads the neural network computation onto the Intel Movidius Neural Compute Stick. + +The following is more information on the [architecture](ncs1arch.md) of the Intel Movidius Neural Compute Stick: + + +# Frameworks +Neural Compute SDK currently supports two Deep Learning frameworks. +1. [Caffe](Caffe.md): Caffe is a deep learning framework from Berkeley Vision Labs. +2. [TensorFlow™](TensorFlow.md): TensorFlow™ is a deep learning framework from Google. + +[See how to use networks from these supported frameworks with Intel Movidius NCS.](configure_network.md) + + + +# Installation and Examples +The following commands install NCSDK and run examples. Detailed instructions for [installation and configuration](install.md): + +``` +git clone http://github.com/Movidius/ncsdk && cd ncsdk && make install && make examples + +``` + +# Intel® Movidius™ Neural Compute SDK Tools +The SDK comes with a set of tools to assist in development and deployment of applications that utilize hardware accelerated Deep Neural Networks via the Intel Movidius Neural Compute Stick. Each tool and its usage is described below: + +* [mvNCCompile](tools/compile.md): Converts Caffe/TF network and weights to Intel Movidius technology internal compiled format + +* [mvNCProfile](tools/profile.md): Provides layer-by-layer statistics to evaluate the performance of Caffe/TF networks on the NCS + +* [mvNCCheck](tools/check.md): Compares the results from an inference by running the network on the NCS and Caffe/TF + + +# Neural Compute API +Applications for inferencing with Neural Compute SDK can be developed either in C/C++ or Python. The API provides a software interface to Open/Close Neural Compute Sticks, load graphs into the Intel Movidius NCS, and run inferences on the stick. + +* [C API](c_api/readme.md) +* [Python API](py_api/readme.md) + + +# Intel® Movidius™ Neural Compute Stick User Forum + +There is an active user forum in which users of the Intel Movidius Neural Compute Stick discuss ideas and issues they have with regard to the Intel Movidius NCS. Access the Intel Movidius NCS User Forum with the following link: + +[https://ncsforum.movidius.com](https://ncsforum.movidius.com) + +The forum is a good place to go if you need help troubleshooting an issue. You may find other people who have figured out the issue, or get ideas for how to fix it. The forum is also monitored by Intel Movidius product engineers who provide solutions, as well. + + +# Examples + +There are several examples, including the following at GitHub: +* Caffe + * GoogLeNet + * AlexNet + * SqueezeNet +* TensorFlow™ + * Inception V1 + * Inception V3 +* Apps + * hello_ncs_py + * hello_ncs_cpp + * multistick_cpp + +The examples demonstrate compiling, profiling, and running inferences using the network on the Intel Movidius Neural Compute Stick. +Each example contains a Makefile. Running 'make help' in the example's base directory will give possible make targets. + +``` + +git clone http://github.com/Movidius/ncsdk # Already done during installation +(cd ncsdk/examples && make) # run all examples +(cd ncsdk/examples/caffe/GoogLeNet && make) # Run just one example + +``` + + +# Neural Compute App Zoo +The Neural Compute App Zoo is a GitHub repository at [http://github.com/Movidius/ncappzoo](http://github.com/Movidius/ncappzoo), which is designed for developers to contribute networks and applications written for the Intel Movidus Neural Compute Stick to the Intel Movidius NCS community. + +See [The Neural Compute App Zoo README](https://github.com/Movidius/ncappzoo/blob/master/README.md) for more information. + + +# Troubleshooting and Tech Support +Be sure to check the [NCS Troubleshooting Guide](https://ncsforum.movidius.com/discussion/370/intel-ncs-troubleshooting-help-and-guidelines#latest) if you run into any issues with the NCS or NCSDK. + +Also for general tech support issues the [NCS User Forum](https://developer.movidius.com/forums) is recommended and contains community discussions on many issues and resolutions. + + +# Release Notes +See the latests [Release Notes](release_notes.md) for detailed information of the specific release. diff --git a/docs/release_notes.md b/docs/release_notes.md index 1472bf7..f8987ea 100644 --- a/docs/release_notes.md +++ b/docs/release_notes.md @@ -1,24 +1,18 @@ ============================================================ # Movidius Neural Compute SDK Release Notes -# V1.09.00 2017-10-10 +# V1.12.00 2018-01-10 ============================================================ ## SDK Notes: -SDK has been refactored and contains many new features and structural changes. It is recommended you read the documentation to familiarize with the new features and contents. A partial list of new features: - -1. New, unified, faster installer and uninstaller. -2. Now supports complete SDK installation on Raspberry Pi. -3. System installation of tools and API libraries. -4. API support for Python 2.7. -5. Source code included for API, for porting to other architectures or Linux distributions. -6. Tools support for Raspberry Pi. -7. Tensorflow R1.3 support for tools (only on Ubuntu 16.04 LTS currently). -8. More network support, see documentation for details! -9. Support for SDK on Ubuntu 16.04 LTS as guest OS, and Win10, OSX, and Ubuntu 16.04 as host OS. See docs/VirtualMachineConfig. +### New features: +1. Improved compiler support for custom networks that use variable batch size via Tensorflow. +2. Improved description on how to use Tensorflow networks that were built for training. Please see "Guidence for Compiling TensorFlow Networks" in the SDK documentation +#### Networks: +1. Facenet based on inception-resnet-v1 (see erratum #12) +#### Layers: ## API Notes: -1. API supported on both python 2.7 and python 3.5. -2. Some APIs deprecated, will emit the "deprecated" warning if used. Users expected to move to using new APIs for these functions. +1. No change ## Network Notes: Support for the following networks has been tested. @@ -31,27 +25,54 @@ Support for the following networks has been tested. 5. VGG (Sousmith VGG_A) 6. Alexnet 7. TinyYolo v1 +8. VGG 16 +9. Resnet 50 +10. Resnet-18 +11. SSD Mobilenet v1 + + ### Tensorflow r1.3 1. inception-v1 -2. inception-v3 -3. inception-v4 -4. Inception ResNet v2 -5. Mobilenet_V1_1.0_224 (preview -- see erratum #3.) +2. inception-v2 +3. inception-v3 +4. inception-v4 +5. Inception ResNet v2 +6. VGG 16 +7. Mobilenet_V1_1.0 variants: + - MobileNet_v1_1.0_224 + - MobileNet_v1_1.0_192 + - MobileNet_v1_1.0_160 + - MobileNet_v1_1.0_128 + - MobileNet_v1_0.75_224 + - MobileNet_v1_0.75_192 + - MobileNet_v1_0.75_160 + - MobileNet_v1_0.75_128 + - MobileNet_v1_0.5_224 + - MobileNet_v1_0.5_192 + - MobileNet_v1_0.5_160 + - MobileNet_v1_0.5_128 + - MobileNet_v1_0.25_224 + - MobileNet_v1_0.25_192 + - MobileNet_v1_0.25_160 + - MobileNet_v1_0.25_128 +8. TinyYolo v2 via Darkflow tranformation +9. Facenet based on inception-resnet-v1 (See erratum #12) ## Firmware Features: 1. Convolutions - NxN Convolution with Stride S. - The following cases have been extensively tested: 1x1s1,3x3s1,5x5s1,7x7s1, 7x7s2, 7x7s4 - Group convolution - - Depth Convolution (limited support -- see erratum #10.) + - Depth Convolution + - Dilated convolution 2. Max Pooling Radix NxM with Stride S -3. Average Pooling Radix NxM with Stride S +3. Average Pooling: Radix NxM with Stride S, Global average pooling 4. Local Response Normalization -5. Relu, Relu-X, Prelu +5. Relu, Relu-X, Prelu (see erattum #10) 6. Softmax 7. Sigmoid -8. Tanh +8. Tanh (see erratum #10) 9. Deconvolution 10. Slice 11. Scale @@ -62,20 +83,27 @@ Support for the following networks has been tested. 16. Power 17. Crop 18. ELU +19. Batch Normalization +20. L2 Normalization +21. Input Layer ## Bug Fixes: -1. USB protocol bug fixes, for expanded compatibility with hubs and hosts. In particular, fix for devices with maxpacket of 64. -2. Fixed -- when a graph execution fails, the result for a previous execution is erroneously returned. - +1. Fixed: Tensorflow FusedBatchNorm doesn't support fully connected layer inputs +2. Fixed: Mobilenets on Tensforflow 1.4 provide incorrect classification +3. Fixed: Resnet-18 on Caffe providing NaN results + ## Errata: 1. Python 2.7 is fully supported for making user applications, but only the helloworld_py example runs as-is in both python 2.7 and 3.5 due to dependencies on modules. 2. SDK tools for tensorflow on Rasbpian Stretch are not supported for this release, due to lack of an integrated tensorflow installer for Rasbpian in the SDK. TF examples are provided with pre-compiled graph files to allow them to run on Rasperry Pi, however the compile, profile, and check functions will not be available on Raspberry Pi, and 'make examples' will generate failures for the tensorflow examples on Raspberry Pi. -3. Depth-wise convolution is not optimized, leading to low performance of Mobilenet, and does not support channel multiplier >1. +3. Depth-wise convolution may not be supported if channel multiplier > 1. 4. If working behind proxy, proper proxy settings must be applied for the installer to succeed. 5. Although improved, the installer is known to take a long time on Raspberry Pi. Date/time must be correct for SDK installation to succeed on Raspberry Pi. -6. Default system virtual memory swap file size is too small to compile AlexNet on Raspberry Pi. -7. Raspberry Pi users will need to upgrade to Raspbian Stretch for this release. -8. Fully Connected Layers may produce erroneous results if input size is not a multiple of 8. -9. Convolution may fail to find a solution for very large inputs. -10. Depth convolution is tested for 3x3 kernels. -11. TensorFlow-like padding not correctly supported in some convolution cases, such as when stride=2 and even input size for 3x3 convolution. +6. Default system virtual memory swap file size is too small to compile AlexNet on Raspberry Pi. VGG 16 not verified to compile on Pi. +7. Raspberry Pi users will need to upgrade to Raspbian Stretch for releases after 1.09. +8. Convolution may fail to find a solution for very large inputs. +9. Depth convolution is tested for 3x3 kernels. +10. A TanH layer’s “top” & “bottom” blobs must have different names.  This is different from a ReLU layer, whose “top” & “bottom” should be named the same as its previous layer. +11. On upgrade from previous versions of SDK, the installer will detect if openCV 3.3.0 was installed, for example from http://github.com/movidius/ncappzoo/apps/stream_ty_gn/install-opencv-from_source.sh. For this release, the installer will prompt to uninstall this specific version of openCV. This is required for ssd-caffe to run correctly. After 1.11 installation is complete, openCV 3.3.0 can be re-installed and the ssd-caffe will continue to function. +12. Facenet requires L2 Normalization be inserted to be used, please see the support forum for a saver script example. +13. Although mvNCCheck shows per-pixel error for some metrics for mobilenet_v1_224, classification results are not impacted. +14. Initial validation has been done on SSD Mobilenet v1 and TinyYolo v2 but more thorough evaluation is underway. diff --git a/docs/tf_compile_guidance.md b/docs/tf_compile_guidance.md new file mode 100644 index 0000000..e01768d --- /dev/null +++ b/docs/tf_compile_guidance.md @@ -0,0 +1,143 @@ +# Guidence for Compiling TensorFlow™ Networks +Below you will find general guidance for compiling a TensorFlow™ network that was built for training rather than inference. The general guidance is illustrated with changes to make to the [mnist_deep.py available from the tensorflow github repository](https://github.com/tensorflow/tensorflow/blob/r1.4/tensorflow/examples/tutorials/mnist/mnist_deep.py). The changes are shown as typical diff output where a '-' at the front of a line indicates the line is removed, and a '+' at the front of a line indicates the line should be added. Lines without a '-' or '+' are unchanged and provided for context. + +In order to compile a TensorFlow™ network for the NCS you will need to save a version of the network that is specific to deployment/inference and omits the training features. The following list of steps includes what users need to do to compile a typical TensorFlow™ network for the NCS. Every step may not apply to every network, but should be taken as general guidence. + + +- Make sure there is a name set for the first layer of the network. This is not strictly required but makes compiling much easier because if you don't explicitly name the first and last layer you will need to determine what name those layers were given and provide those to the compiler. For mnist_deep.py you would make the following change for the first node to give it the name "input": + +```python + # Create the model +- x = tf.placeholder(tf.float32, [None, 784]) ++ x = tf.placeholder(tf.float32, [None, 784], name="input") +``` + +- Add tensorflow code to save the trained network. For mnist_deep.py the change to save the trained network is: + +```python ++ saver = tf.train.Saver() ++ + with tf.Session() as sess: +... + + print('test accuracy %g' % accuracy.eval(feed_dict={ + x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0})) ++ ++ graph_location = "." ++ save_path = saver.save(sess, graph_location + "/mnist_model") +``` + +- Run the code to train the network and make sure saver.save() is called to save the trained network. After the program completes, if it was successful, saver.save() will have created the following files: + - mnist_model.index + - mnist_model.data-00000-of-00001 + - mnist_model.meta + +- Remove training specific code from the network, and add code to read in the previously saved network to create an inference only version. For this step its advised that you copy the original tensorflow code to a new file and modify the new file. For example if you are working with mnist_deep.py you could copy that to mnist_deep_inference.py. Things to remove from the inference code are: + - Dropout layers + - Training specific code + - Reading or importing training and testing data + - Cross entropy/accuracy code + - Placeholders except the input tensor. + +The ncsdk compiler does not resolve unknown placeholders. Often extra placeholders are used for training specific variables so they are not necessary for inference. Placeholder variables that cannot be removed should be replaced by constants in the inference graph. + + +For mnist_deep.py you would make the following changes + +```python +import tempfile +- from tensorflow.examples.tutorials.mnist import input_data + +... +- # Dropout - controls the complexity of the model, prevents co-adaptation of +- # features. +- with tf.name_scope('dropout'): +- keep_prob = tf.placeholder(tf.float32) +- h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob) + +... + +- y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2 +- return y_conv, keep_prob ++ y_conv = tf.matmul(h_fc1, W_fc2) + b_fc2 ++ return y_conv + +... + +- # Import data +- mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=True) + +... + +- # Define loss and optimizer +- y_ = tf.placeholder(tf.float32, [None, 10]) + +... + + # Build the graph for the deep net +- y_conv, keep_prob = deepnn(x) ++ # No longer need keep_prob since removing dropout layers. ++ y_conv = deepnn(x) + +... + +- with tf.name_scope('loss'): +- cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=y_, +- logits=y_conv) +- cross_entropy = tf.reduce_mean(cross_entropy) +- +- with tf.name_scope('adam_optimizer'): +- train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy) +- +- with tf.name_scope('accuracy'): +- correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1)) +- correct_prediction = tf.cast(correct_prediction, tf.float32) +- accuracy = tf.reduce_mean(correct_prediction) +- +- graph_location = tempfile.mkdtemp() +- print('Saving graph to: %s' % graph_location) +- train_writer = tf.summary.FileWriter(graph_location) +- train_writer.add_graph(tf.get_default_graph()) ++ ++ saver = tf.train.Saver(tf.global_variables()) ++ + with tf.Session() as sess: + sess.run(tf.global_variables_initializer()) ++ sess.run(tf.local_variables_initializer()) ++ # read the previously saved network. ++ saver.restore(sess, '.' + '/mnist_model') ++ # save the version of the network ready that can be compiled for NCS ++ saver.save(sess, '.' + '/mnist_inference') + +- for i in range(5000): +- batch = mnist.train.next_batch(50) +- if i % 100 == 0: +- train_accuracy = accuracy.eval(feed_dict={ +- x: batch[0], y_: batch[1], keep_prob: 1.0}) +- print('step %d, training accuracy %g' % (i, train_accuracy)) +- train_step.run(feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5}) +- +- print('test accuracy %g' % accuracy.eval(feed_dict={ +- x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0})) +- save_path = saver.save(sess, "./model.ckpt") +``` + +- Make sure the last node is named. As with the first node, this is not strictly required but you need to know the name to compile it. This is the change to make to mnist_deep.py in order to have a last softmax layer with a node name of "output": + +```python + # Build the graph for the deep net +- y_conv, keep_prob = deepnn(x) ++ y_conv = deepnn(x) ++ output = tf.nn.softmax(y_conv, name='output') +``` + +- Run the inference version of the code to save a session that is suitable for compiling via the ncsdk compiler. This will only take a second since its not actually training the network, just resaving it in an NCS-friendly way. After you run, if successfull, the following files will be created. + - mnist_inference.index + - mnist_inference.data-00000-of-00001 + - mnist_inference.meta + +- Compile the final saved network with the following command and if it all works you should see the mnist_inference.graph file created in the current directory. Note you pass in only the weights file prefix "mnist_inference" for the -w option for a TensorFlow network on the compile command line. The full command is below. + +```bash +mvNCCompile mnist_inference.meta -w mnist_inference -s 12 -in input -on output -o mnist_inference.graph +``` diff --git a/docs/tools/check.md b/docs/tools/check.md index 6335736..ec39978 100644 --- a/docs/tools/check.md +++ b/docs/tools/check.md @@ -8,9 +8,8 @@ Revision|1.08 See also| [mvNCCompile](compile.md), [mvNCProfile](profile.md), [TensorFlow™ Info](../TensorFlowUsage.md) ## Overview -This commandline tool compiles the provided network, runs the network on the connected Neural Compute Stick (NCS) and creates a text/HTML profiling output. The profiling data contains layer by layer stats about the performance of the input network. This is very helpful in determining how much time is spent on each layer and is helpful in determining changes to the network to improve the total inference time for a network on the Neural Compute Stick. +This command line tool validates (checks) a Caffe or TensorFlow™ nerual network on the Intel® Movidius™ Neural Compute Stick (Intel® Movidius™ NCS.) The check is done by running an inference on the NCS and also on the host computer in software using the supplied network and appropriate framework libraries. The results for both inferences (NCS results vs. framework's expected results) are compared to determine a if the network passes or fails and the top 5 inference results are provided as output. -The weights file is not required when profiling a network to determine bottlenecks. ## Syntax @@ -27,17 +26,18 @@ mvNCCheck network.meta [-s Max Number of Shaves] [-in Input Node Name] [-on Outp Argument|Description ------------ | ------------- network.prototxt(caffe)
network.meta(TensorFlow™)|Name of the network file. -[-w weights_file]|Weights filename from training (Only applies to Caffe, not to be used with TensorFlow™.) If omitted zero weights will be used. -[-s Max # of Shaves]|Default: 1

Selects the maximum number of SHAVEs (1,2,4,8 or 12.) to use for network layers.

Note: The NCS runtime code may use less than the MAX SHAVE value for some layers where measurements have typically shown no inference performance degradation (and consequently a power benefit) of using fewer SHAVEs. -[-in Input Node Name]|By default the network is processed from the input tensor. This option allows a user to select an alternative start point in the network.

This enables partial network processing. When used together with the -on option a user can isolate one or more layers in a network for analysis. -[-on Output Node Name]|By default the network is processed through to the output tensor. This option allows a user to select an alternative end point in the network.

This enables partial network processing. When used together with the -in option a user can isolate one or more layers in a network for analysis. +[-h --help] | Display help for the command +[-w weights_file]|Weights filename from training. For Caffe this is the .caffemodel file name. For TensorFlow™ it must be the network name prefix. If omitted zero weights will be used for Caffe models. +[-s Max # of Shaves]|Default: 1

Selects the maximum number of SHAVEs (1,2,4,8, or 12) to use for network layers.

Note: The NCS runtime code may use less than the MAX SHAVE value for some layers where measurements have typically shown no inference performance degradation (and consequently a power benefit) of using fewer SHAVEs. +[-in Input Node Name]|By default the network is processed from the input tensor. This option allows a user to select an alternative start point in the network.

This enables partial network processing. When used together with the -on option, a user can isolate one or more layers in a network for analysis. +[-on Output Node Name]|By default the network is processed through to the output tensor. This option allows a user to select an alternative end point in the network.

This enables partial network processing. When used together with the -in option, a user can isolate one or more layers in a network for analysis. [-is Input-Width Input-Height]|Input size is typically described as a part of the network. For networks that do not have dimension constraints on the input tensor, this option can be used to set the desired input dimensions.

Only two dimensions are defined because the batch size is always 1 and the number of color planes is assumed to be 3. [-o Output Graph Filename]|Default: "graph"

Output graph container filename. If not provided, “graph” will be used. -[-i image filename]|Image to use as input to validation run.
If not set, a randomly generated image will be used -[-id Top-1 Validation ID]|Expected id for Top-1 validation -[-S scale factor]|Scale each value of the input by this amount.
E.g. if the network expects input values in the range 0-255, put 255 here (1 is default, as the range 0-1 is the default). -[-M Mean Subtraction Number or npy filename]|Subtract this from the input (applied after scale). E.g. If the network expects a mean file to be subtracted from the input image, put it here. -[-cs Color Sequence]|Color Sequence of Input channels
2,1,0: BGR (Default)
0,1,2 : RGB +[-i image filename]|Image to use as input to validation run.
If not set, a randomly generated image will be used. +[-id Top-1 Validation ID]|Expected id for Top-1 validation. +[-S scale factor]|Scale each value of the input by this amount.
E.g., if the network expects input values in the range 0-255, put 255 here (1 is default, as the range 0-1 is the default). +[-M Mean Subtraction Number or npy filename]|Subtract this from the input (applied after scale). E.g., if the network expects a mean file to be subtracted from the input image, put it here. +[-cs Color Sequence]|Color Sequence of Input channels:
2,1,0: BGR (Default)
0,1,2 : RGB ## Known Issues diff --git a/docs/tools/compile.md b/docs/tools/compile.md index 4374028..d1d89ea 100644 --- a/docs/tools/compile.md +++ b/docs/tools/compile.md @@ -8,27 +8,29 @@ Revision|1.08 See also| [mvNCProfile](profile.md), [mvNCCheck](check.md), [TensorFlow™ Info](../TensorFlow.md) ## Overview -This commandline tool compiles and converts the network file and weights file described in Caffe or TensorFlow™ into Movidius™ internal Graphfile format. The graph file is loaded into the Neural Compute Stick during runtime using the NCSDK API. The graph file then can be executed by sending an image to the NCS for inferencing. +This command line tool compiles and converts the network file and weights file described in Caffe or TensorFlow™ into Intel® Movidius™ internal Graphfile format. The graph file is loaded into the Intel® Movidius™ Neural Compute Stick (Intel® Movidius™ NCS) +during runtime using the NCSDK API. The graph file then can be executed by sending an image to the Intel Movidius NCS for inferencing. ## Syntax ### Caffe ```bash -mvNCCompile network.prototxt [-w weights_file] [-s Max Number of Shaves] [-in Input Node Name] [-on Output Node Name] [-is Input-Width Input-Height] [-o Output Graph Filename] +mvNCCompile network.prototxt [-w network.caffemodel] [-s Max Number of Shaves] [-in Input Node Name] [-on Output Node Name] [-is Input-Width Input-Height] [-o Output Graph Filename] ``` ### TensorFlow™ ```bash -mvNCCompile network.meta [-s Max Number of Shaves] [-in Input Node Name] [-on Output Node Name] [-is Input-Width Input-Height] [-o Output Graph Filename] +mvNCCompile network.meta [-w network] [-s Max Number of Shaves] [-in Input Node Name] [-on Output Node Name] [-is Input-Width Input-Height] [-o Output Graph Filename] ``` Argument|Description ------------ | ------------- network.prototxt(Caffe)
network.meta(TensorFlow™)|Name of the network file. -[-w weights_file]|Weights filename from training (only applies to Caffe, not to be used with TensorFlow™.) If omitted zero weights will be used. -[-s Max # of Shaves]|Default: 1

Selects the maximum number of SHAVEs (1, 2, 4, 8 or 12.) to use for network layers.

Note: The NCS runtime code may use less than the MAX SHAVE value for some layers where measurements have typically shown no inference performance degradation (and consequently a power benefit) of using fewer SHAVEs. -[-in Input Node Name]|By default the network is processed from the input tensor. This option allows a user to select an alternative start point in the network.

This enables partial network processing. When used together with the -on option a user can isolate one or more layers in a network for analysis. -[-on Output Node Name]|By default the network is processed through to the output tensor. This option allows a user to select an alternative end point in the network.

This enables partial network processing. When used together with the -in option a user can isolate one or more layers in a network for analysis. Note: beware that the parser stops at the first instance of the output node name (e.g., a Relu following a Conv will not be processed if it shares the same name). -[-is Input-Width Input-Height]|Input size is typically described as a part of the network. For networks that do not have dimension constraints on the input tensor, this option can be used to set the desired input dimensions.

Only two dimensions are defined because the batch size is always 1 and the number of color planes is assumed to be 3. +[-w weights_file]|Weights filename from training. For Caffe this is typically the .caffemodel file. For TensorFlow this is typically the network.data-xxx-of-yyy file, but you should only put the prefix of that file. ie. "network". If omitted, zero weights will be used. +[-s Max # of Shaves]|Default: 1

Selects the maximum number of SHAVEs (1, 2, 4, 8, or 12) to use for network layers.

Note: The NCS runtime code may use less than the MAX SHAVE value for some layers where measurements have typically shown no inference performance degradation (and consequently a power benefit) of using fewer SHAVEs. +[-in Input Node Name]|By default the network is processed from the input tensor. This option allows a user to select an alternative start point in the network.

This enables partial network processing. When used together with the -on option, a user can isolate one or more layers in a network for analysis. Use the name parameter available for most tensorflow layers for the first node when creating your network and pass this name into this option. For example if this is your first layer x = tf.placeholder(tf.float32, [1, 784], name='input') then you can use "-in input" +[-on Output Node Name]|By default the network is processed through to the output tensor. This option allows a user to select an alternative end point in the network.

This enables partial network processing. When used together with the -in option, a user can isolate one or more layers in a network for analysis. Note: Beware that the parser stops at the first instance of the output node name (e.g., a Relu following a Conv will not be processed if it shares the same name). Use the name parameter for most tensorflow layers to specify the name of the node. To add an output node with a known name that doesn't change the network you can use the following. +output = tensorflow.identity(prev_tensor, name='output') +[-is Input-Width Input-Height]|Input size is typically described as a part of the network. For networks that do not have dimension constraints on the input tensor, this option can be used to set the desired input dimensions.

Only two dimensions are defined because the batch size is always 1 and the number of color planes is assumed to be 3. if the number of color planes for the network is is not 3, this will need to be described as part of the network and the -is option can not be used. [-o Output Graph Filename]|Default: "graph"

Output graph container filename. If not provided, “graph” will be used. ## Known Issues diff --git a/docs/tools/profile.md b/docs/tools/profile.md index a55a256..c0c67cc 100644 --- a/docs/tools/profile.md +++ b/docs/tools/profile.md @@ -8,7 +8,7 @@ Revision|1.08 See also| [mvNCCompile](compile.md), [mvNCCheck](check.md), [TensorFlow™ info](../TensorFlow.md) ## Overview -This commandline tool compiles the provided network, runs the network on the connected Neural Compute Stick (NCS) and creates a text/HTML profiling output. The profiling data contains layer by layer stats about the performance of the input network. This is very helpful in determining how much time is spent on each layer and is helpful in determining changes to the network to improve the total inference time for a network on the Neural Compute Stick. +This command line tool compiles the provided network, runs the network on the connected Intel® Movidius™ Neural Compute Stick (Intel® Movidius™ NCS) and creates a text/HTML profiling output. The profiling data contains layer-by-layer stats about the performance of the input network. This is very helpful in determining how much time is spent on each layer, and is helpful in determining changes to the network to improve the total inference time for a network on the Intel Movidius NCS. The weights file is not required when profiling a network to determine bottlenecks. @@ -26,10 +26,10 @@ mvNCProfile network.meta [-s Max Number of Shaves] [-in Input Node Name] [-on Ou Argument|Description ------------ | ------------- network.prototxt(caffe)
network.meta(TensorFlow™)|Name of the network file. -[-w weights_file]|Weights filename from training (Only applies to Caffe, not to be used with TensorFlow™.) If omitted zero weights will be used. -[-s Max # of Shaves]|Default: 1

Selects the maximum number of SHAVEs (1,2,4,8 or 12.) to use for network layers.

Note: The NCS runtime code may use less than the MAX SHAVE value for some layers where measurements have typically shown no inference performance degradation (and consequently a power benefit) of using fewer SHAVEs. +[-w weights_file]|Weights filename from training. (Only applies to Caffe, not to be used with TensorFlow™.) If omitted, zero weights will be used. +[-s Max # of Shaves]|Default: 1

Selects the maximum number of SHAVEs (1,2,4,8, or 12.) to use for network layers.

Note: The NCS runtime code may use less than the MAX SHAVE value for some layers where measurements have typically shown no inference performance degradation (and consequently a power benefit) of using fewer SHAVEs. [-in Input Node Name]|By default the network is processed from the input tensor. This option allows a user to select an alternative start point in the network.

This enables partial network processing. When used together with the -on option a user can isolate one or more layers in a network for analysis. -[-on Output Node Name]|By default the network is processed through to the output tensor. This option allows a user to select an alternative end point in the network.

This enables partial network processing. When used together with the -in option a user can isolate one or more layers in a network for analysis. +[-on Output Node Name]|By default the network is processed through to the output tensor. This option allows a user to select an alternative end point in the network.

This enables partial network processing. When used together with the -in option, a user can isolate one or more layers in a network for analysis. [-is Input-Width Input-Height]|Input size is typically described as a part of the network. For networks that do not have dimension constraints on the input tensor, this option can be used to set the desired input dimensions.

Only two dimensions are defined because the batch size is always 1 and the number of color planes is assumed to be 3. [-o Output Graph Filename]|Default: "graph"

Output graph container filename. If not provided, “graph” will be used. @@ -141,7 +141,7 @@ Layer Name MFLOPs Bandwidth MB/s ``` ## Graphical Format -The mvNCProfile also creates the output_report.html and output.gv.svg files which contain a graphcial representation of the profile information as shown below. +The mvNCProfile also creates the output_report.html and output.gv.svg files, which contain a graphcial representation of the profile information as shown below. ![](../images/GoogLeNet_gv.png) diff --git a/examples/Makefile b/examples/Makefile index 691d6f1..c1dc89e 100644 --- a/examples/Makefile +++ b/examples/Makefile @@ -1,6 +1,6 @@ ifneq ($(findstring movidius, $(PYTHONPATH)), movidius) - export PYTHONPATH:=/opt/movidius/caffe/python:/opt/movidius/mvnc/python:$(PYTHONPATH) + export PYTHONPATH:=/opt/movidius/caffe/python:$(PYTHONPATH) endif TOPTARGETS := all clean check profile compile run diff --git a/examples/apps/Makefile b/examples/apps/Makefile index a53de79..3eebe57 100644 --- a/examples/apps/Makefile +++ b/examples/apps/Makefile @@ -1,6 +1,6 @@ ifneq ($(findstring movidius, $(PYTHONPATH)), movidius) - export PYTHONPATH:=/opt/movidius/caffe/python:/opt/movidius/mvnc/python:$(PYTHONPATH) + export PYTHONPATH:=/opt/movidius/caffe/python:$(PYTHONPATH) endif TOPTARGETS := all clean diff --git a/examples/caffe/AlexNet/Makefile b/examples/caffe/AlexNet/Makefile index 29ad0e7..b738de5 100644 --- a/examples/caffe/AlexNet/Makefile +++ b/examples/caffe/AlexNet/Makefile @@ -1,6 +1,6 @@ ifneq ($(findstring movidius, $(PYTHONPATH)), movidius) - export PYTHONPATH:=/opt/movidius/caffe/python:/opt/movidius/mvnc/python:$(PYTHONPATH) + export PYTHONPATH:=/opt/movidius/caffe/python:$(PYTHONPATH) endif NCCOMPILE = mvNCCompile @@ -83,7 +83,7 @@ compile: prototxt caffemodel check: prototxt caffemodel @echo "\nmaking check" #-${NCCHECK} -w ${CAFFEMODEL_FILENAME} -i ../../data/images/cat.jpg -s 12 -id 281 ${PROTOTXT_FILENAME} -S 255 -M ../../data/ilsvrc12/ilsvrc_2012_mean.npy - -${NCCHECK} -w ${CAFFEMODEL_FILENAME} -i ../../data/images/cat.jpg -s 12 -id 281 ${PROTOTXT_FILENAME} -M 110 -S 255 + -${NCCHECK} -w ${CAFFEMODEL_FILENAME} -i ../../data/images/cat.jpg -s 12 -id 281 ${PROTOTXT_FILENAME} -M 110 -S 255 -metric top1 .PHONY: run run: compile diff --git a/examples/caffe/GoogLeNet/Makefile b/examples/caffe/GoogLeNet/Makefile index c330c15..8b0b918 100644 --- a/examples/caffe/GoogLeNet/Makefile +++ b/examples/caffe/GoogLeNet/Makefile @@ -1,6 +1,6 @@ ifneq ($(findstring movidius, $(PYTHONPATH)), movidius) - export PYTHONPATH:=/opt/movidius/caffe/python:/opt/movidius/mvnc/python:$(PYTHONPATH) + export PYTHONPATH:=/opt/movidius/caffe/python:$(PYTHONPATH) endif NCCOMPILE = mvNCCompile @@ -84,7 +84,7 @@ compile: prototxt caffemodel check: prototxt caffemodel @echo "\nmaking check" #-${NCCHECK} -w ${CAFFEMODEL_FILENAME} -i ../../data/images/cat.jpg -s 12 -id 281 ${PROTOTXT_FILENAME} -S 255 -M ../../data/ilsvrc12/ilsvrc_2012_mean.npy - ${NCCHECK} -w ${CAFFEMODEL_FILENAME} -i ../../data/images/nps_electric_guitar.png -s 12 -id 546 ${PROTOTXT_FILENAME} -S 255 -M 110 + ${NCCHECK} -w ${CAFFEMODEL_FILENAME} -i ../../data/images/nps_electric_guitar.png -s 12 -id 546 ${PROTOTXT_FILENAME} -S 255 -M 110 -metric top1 .PHONY: run run: compile diff --git a/examples/caffe/Makefile b/examples/caffe/Makefile index 719db55..9bb8bf1 100644 --- a/examples/caffe/Makefile +++ b/examples/caffe/Makefile @@ -1,6 +1,6 @@ ifneq ($(findstring movidius, $(PYTHONPATH)), movidius) - export PYTHONPATH:=/opt/movidius/caffe/python:/opt/movidius/mvnc/python:$(PYTHONPATH) + export PYTHONPATH:=/opt/movidius/caffe/python:$(PYTHONPATH) endif TOPTARGETS := all clean check compile profile run diff --git a/examples/caffe/SqueezeNet/Makefile b/examples/caffe/SqueezeNet/Makefile index af22a3c..4d42b16 100644 --- a/examples/caffe/SqueezeNet/Makefile +++ b/examples/caffe/SqueezeNet/Makefile @@ -1,6 +1,6 @@ ifneq ($(findstring movidius, $(PYTHONPATH)), movidius) - export PYTHONPATH:=/opt/movidius/caffe/python:/opt/movidius/mvnc/python:$(PYTHONPATH) + export PYTHONPATH:=/opt/movidius/caffe/python:$(PYTHONPATH) endif NCCOMPILE = mvNCCompile @@ -92,7 +92,7 @@ compile: prototxt caffemodel check: prototxt caffemodel @echo "\nmaking check" #-${NCCHECK} -w ${CAFFEMODEL_FILENAME} -i ../../data/images/cat.jpg -s 12 -id 281 ${PROTOTXT_FILENAME} -S 255 -M ../../data/ilsvrc12/ilsvrc_2012_mean.npy - ${NCCHECK} -w ${CAFFEMODEL_FILENAME} -i ../../data/images/cat.jpg -s 12 -id 281 ${PROTOTXT_FILENAME} -S 255 -M 120 + ${NCCHECK} -w ${CAFFEMODEL_FILENAME} -i ../../data/images/cat.jpg -s 12 -id 281 ${PROTOTXT_FILENAME} -S 255 -M 120 -metric top1 .PHONY: run run: compile diff --git a/examples/data/Makefile b/examples/data/Makefile index ed0a976..1e6f99d 100644 --- a/examples/data/Makefile +++ b/examples/data/Makefile @@ -1,6 +1,6 @@ ifneq ($(findstring movidius, $(PYTHONPATH)), movidius) - export PYTHONPATH:=/opt/movidius/caffe/python:/opt/movidius/mvnc/python:$(PYTHONPATH) + export PYTHONPATH:=/opt/movidius/caffe/python:$(PYTHONPATH) endif .PHONY: help diff --git a/examples/tensorflow/Makefile b/examples/tensorflow/Makefile index 6f0af84..0262179 100644 --- a/examples/tensorflow/Makefile +++ b/examples/tensorflow/Makefile @@ -1,6 +1,6 @@ ifneq ($(findstring movidius, $(PYTHONPATH)), movidius) - export PYTHONPATH:=/opt/movidius/caffe/python:/opt/movidius/mvnc/python:$(PYTHONPATH) + export PYTHONPATH:=/opt/movidius/caffe/python:$(PYTHONPATH) endif diff --git a/examples/tensorflow/inception_v1/Makefile b/examples/tensorflow/inception_v1/Makefile index bba4dbf..0b9c057 100644 --- a/examples/tensorflow/inception_v1/Makefile +++ b/examples/tensorflow/inception_v1/Makefile @@ -1,6 +1,6 @@ ifneq ($(findstring movidius, $(PYTHONPATH)), movidius) - export PYTHONPATH:=/opt/movidius/caffe/python:/opt/movidius/mvnc/python:$(PYTHONPATH) + export PYTHONPATH:=/opt/movidius/caffe/python:$(PYTHONPATH) endif NCCOMPILE = mvNCCompile @@ -52,7 +52,7 @@ compile: weights .PHONY: check check: weights - -${NCCHECK} -s 12 ${MODEL_FILENAME} ${INPUT_NODE_FLAG} ${OUTPUT_NODE_FLAG} -i ../../data/images/cat.jpg -id 829 -S 2 -M 128 -cs 0,1,2 + -${NCCHECK} -s 12 ${MODEL_FILENAME} ${INPUT_NODE_FLAG} ${OUTPUT_NODE_FLAG} -i ../../data/images/cat.jpg -id 829 -S 2 -M 128 -cs 0,1,2 -metric top1 .PHONY: run run: compile diff --git a/examples/tensorflow/inception_v1/graph b/examples/tensorflow/inception_v1/graph index 3f18501..e5d5ae4 100644 Binary files a/examples/tensorflow/inception_v1/graph and b/examples/tensorflow/inception_v1/graph differ diff --git a/examples/tensorflow/inception_v3/Makefile b/examples/tensorflow/inception_v3/Makefile index da84808..bde24c4 100644 --- a/examples/tensorflow/inception_v3/Makefile +++ b/examples/tensorflow/inception_v3/Makefile @@ -1,6 +1,6 @@ ifneq ($(findstring movidius, $(PYTHONPATH)), movidius) - export PYTHONPATH:=/opt/movidius/caffe/python:/opt/movidius/mvnc/python:$(PYTHONPATH) + export PYTHONPATH:=/opt/movidius/caffe/python:$(PYTHONPATH) endif NCCOMPILE = mvNCCompile @@ -48,7 +48,7 @@ compile: weights .PHONY: check check: weights - -${NCCHECK} -s 12 ${MODEL_FILENAME} ${INPUT_NODE_FLAG} ${OUTPUT_NODE_FLAG} -i ../../data/images/cat.jpg -id 917 -M 128 -S 2 -cs 0,1,2 + -${NCCHECK} -s 12 ${MODEL_FILENAME} ${INPUT_NODE_FLAG} ${OUTPUT_NODE_FLAG} -i ../../data/images/cat.jpg -id 917 -M 128 -S 2 -cs 0,1,2 -metric top1 .PHONY: run run: compile diff --git a/examples/tensorflow/inception_v3/graph b/examples/tensorflow/inception_v3/graph index 35c4648..3e8ead8 100644 Binary files a/examples/tensorflow/inception_v3/graph and b/examples/tensorflow/inception_v3/graph differ diff --git a/examples/tensorflow/inception_v3/inception-v3.py b/examples/tensorflow/inception_v3/inception-v3.py index 42c186b..7154f04 100644 --- a/examples/tensorflow/inception_v3/inception-v3.py +++ b/examples/tensorflow/inception_v3/inception-v3.py @@ -8,16 +8,16 @@ slim = tf.contrib.slim def run(name, image_size, num_classes): - with tf.Graph().as_default(): - image = tf.placeholder("float", [1, image_size, image_size, 3], name="input") - with slim.arg_scope(inception.inception_v3_arg_scope()): - logits, _ = inception.inception_v3(image, num_classes, is_training=False, spatial_squeeze=False) - probabilities = tf.nn.softmax(logits) - init_fn = slim.assign_from_checkpoint_fn('inception_v3.ckpt', slim.get_model_variables('InceptionV3')) + with tf.Graph().as_default(): + image = tf.placeholder("float", [1, image_size, image_size, 3], name="input") + with slim.arg_scope(inception.inception_v3_arg_scope()): + logits, _ = inception.inception_v3(image, num_classes, is_training=False, spatial_squeeze=False) + probabilities = tf.nn.softmax(logits) + init_fn = slim.assign_from_checkpoint_fn('inception_v3.ckpt', slim.get_model_variables('InceptionV3')) - with tf.Session() as sess: - init_fn(sess) - saver = tf.train.Saver(tf.global_variables()) - saver.save(sess, "output/"+name) + with tf.Session() as sess: + init_fn(sess) + saver = tf.train.Saver(tf.global_variables()) + saver.save(sess, "output/"+name) run('inception-v3', 299, 1001) diff --git a/install-opencv.sh b/install-opencv.sh old mode 100644 new mode 100755 diff --git a/install.sh b/install.sh old mode 100644 new mode 100755 index 1547d8b..9976f01 --- a/install.sh +++ b/install.sh @@ -11,10 +11,10 @@ then cd /tmp else cd /tmp - wget --no-cache http://ncs-forum-uploads.s3.amazonaws.com/ncsdk/ncsdk_01_09/ncsdk_redirector.txt + wget --no-cache http://ncs-forum-uploads.s3.amazonaws.com/ncsdk/ncsdk_01_12/ncsdk_redirector.txt fi -download_filename=NCSDK-1.09.tar.gz +download_filename=NCSDK-1.12.tar.gz # redirector is the url from redirector text file redirector=$( /dev/null 2>&1 +if [ $? -eq 0 ] ; +then + echo "" + echo "************************ Please confirm *******************************" + echo " NCSDK 1.11 requires that previous installations of openCV" + echo " be uninstalled before proceeding with NCSDK installation." + echo " Note that if you installed opencv via pip3 or from source into the" + echo " home directory, it will be uninstalled." + read -p " Continue uninstalling OpenCV (y/n) ? " CONTINUE + if [[ "$CONTINUE" == "y" || "$CONTINUE" == "Y" ]]; then + echo ""; + echo "OpenCV already setup for python"; + echo ""; + echo "Uninstalling opencv pip installation"; + sudo pip3 uninstall opencv-contrib-python + sudo pip3 uninstall opencv-python + + echo "Looking for opencv source installation"; + if [ -d "$HOME/opencv-3.3.0" ]; then + echo "opencv-3.3.0 directory exists" + if [ -e "$HOME/opencv-3.3.0/build/Makefile" ]; then + echo "opencv-3.3.0 Makefile exists, attempting to uninstall opencv-3.3.0" + cd "$HOME/opencv-3.3.0/build" + sudo make uninstall &> /dev/null + echo "done." + fi + fi + else + echo " Not removing opencv, quitting." + exit 1 + fi +fi diff --git a/uninstall.sh b/uninstall.sh old mode 100644 new mode 100755