-
Notifications
You must be signed in to change notification settings - Fork 1.6k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
added component with tf slim model generator and pipeline adjustments #335
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,7 +1,9 @@ | ||
#!/usr/bin/python3 | ||
|
||
import argparse | ||
import subprocess | ||
import re | ||
from shutil import copyfile | ||
import os | ||
|
||
|
||
def is_insecure_path(path): | ||
|
@@ -33,7 +35,6 @@ def main(): | |
'--output_path', type=str, help='GCS path of output folder') | ||
args = parser.parse_args() | ||
|
||
bin_path = "../tools/google-cloud-sdk/bin/" | ||
# Validate parameters | ||
|
||
if is_insecure_path(args.input_path): | ||
|
@@ -48,24 +49,28 @@ def main(): | |
print("Invalid model optimizer options") | ||
exit(1) | ||
|
||
# Initialize gsutil creds | ||
command = bin_path + "gcloud auth activate-service-account " \ | ||
# Initialize gsutil creds if needed | ||
if "GOOGLE_APPLICATION_CREDENTIALS" in os.environ: | ||
command = "gcloud auth activate-service-account " \ | ||
"--key-file=${GOOGLE_APPLICATION_CREDENTIALS}" | ||
print("auth command", command) | ||
return_code = subprocess.call(command, shell=True) | ||
print("return code", return_code) | ||
print("auth command", command) | ||
return_code = subprocess.call(command, shell=True) | ||
print("return code", return_code) | ||
|
||
# Downloading input model | ||
command = bin_path + "gsutil cp -r " + args.input_path + " ." | ||
# Downloading input model or GCS folder with a model to current folder | ||
command = "gsutil cp -r " + args.input_path + " ." | ||
print("gsutil download command", command) | ||
return_code = subprocess.call(command, shell=True) | ||
print("return code", return_code) | ||
if return_code: | ||
exit(1) | ||
|
||
# Executing model optimization | ||
command = "python3 ../mo.py " + args.mo_options | ||
print("mo command", command) | ||
command = "mo.py " + args.mo_options | ||
print("Starting model optimization:", command) | ||
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, | ||
universal_newlines=True) | ||
print("Model optimization output",output.stdout) | ||
XML = "" | ||
BIN = "" | ||
for line in output.stdout.splitlines(): | ||
|
@@ -77,19 +82,25 @@ def main(): | |
print("Error, model optimization failed") | ||
exit(1) | ||
|
||
# copy generated model file to use them as workflow artifacts | ||
copyfile(BIN, "/tmp/model.bin") | ||
copyfile(XML, "/tmp/model.xml") | ||
|
||
command = bin_path + "gsutil cp " + XML + " " + args.output_path | ||
command = "gsutil cp " + XML + " " + os.path.join(args.output_path, os.path.split(XML)[1]) | ||
print("gsutil upload command", command) | ||
return_code = subprocess.call(command, shell=True) | ||
print("return code", return_code) | ||
command = bin_path + "gsutil cp " + BIN + " " + args.output_path | ||
command = "gsutil cp " + BIN + " " + os.path.join(args.output_path, os.path.split(BIN)[1]) | ||
print("gsutil upload command", command) | ||
return_code = subprocess.call(command, shell=True) | ||
print("return code", return_code) | ||
if return_code: | ||
exit(1) | ||
|
||
with open('/tmp/output_path.txt', 'w') as f: | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
|
||
f.write(args.output_path) | ||
with open('/tmp/bin_path.txt', 'w') as f: | ||
f.write(os.path.join(args.output_path, os.path.split(BIN)[1])) | ||
with open('/tmp/xml_path.txt', 'w') as f: | ||
f.write(os.path.join(args.output_path, os.path.split(XML)[1])) | ||
|
||
print("Model successfully generated and uploaded to ", args.output_path) | ||
|
||
if __name__ == "__main__": | ||
main() |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,79 @@ | ||
# Slim models generator | ||
|
||
This component is automating implementation of [slim models](https://github.com/tensorflow/models/blob/master/research/slim). | ||
It can create a graph from slim models zoo, load the variables pre-trained checkpoint and export the model in the form | ||
of Tensorflow `frozen graph` and `saved model`. | ||
|
||
The results of the component can be saved in a local path or in GCS cloud storage. The can be used to other ML pipeline | ||
components like OpenVINO model optimizer, OpenVINO predict or OpenVINO Model Server. | ||
|
||
## Building | ||
|
||
```bash | ||
docker build --build-arg http_proxy=$http_proxy --build-arg https_proxy=$https_proxy . | ||
``` | ||
|
||
|
||
## Using the component | ||
|
||
```bash | ||
python slim_model.py --help | ||
usage: slim_model.py [-h] [--model_name MODEL_NAME] [--export_dir EXPORT_DIR] | ||
[--batch_size BATCH_SIZE] | ||
[--checkpoint_url CHECKPOINT_URL] | ||
[--num_classes NUM_CLASSES] | ||
|
||
Slim model generator | ||
|
||
optional arguments: | ||
-h, --help show this help message and exit | ||
--model_name MODEL_NAME | ||
--export_dir EXPORT_DIR | ||
GCS or local path to save the generated model | ||
--batch_size BATCH_SIZE | ||
batch size to be used in the exported model | ||
--checkpoint_url CHECKPOINT_URL | ||
URL to the pretrained compressed checkpoint | ||
--num_classes NUM_CLASSES | ||
number of model classes | ||
``` | ||
|
||
*Model name* can be any model defined in the slim repository. The naming convention needs to match the key name from | ||
[net_factory.py]()https://github.com/tensorflow/models/blob/master/research/slim/nets/nets_factory.py#L39) | ||
|
||
*export dir* can be a local path in the container or it might be GCS path to store generated files: | ||
- model graph file in pb format | ||
- frozen graph including weights from the provided checkpoint | ||
- event file which can be imported in tensorboard | ||
- saved model which will be stored in subfolder called `1`. | ||
|
||
*batch size* represent the batch used in the exported models. It can be natural number to represent fixed batch size | ||
or `-1` value can be set for dynamic batch size. | ||
|
||
*checkpoint_url* is the URL to a pre-trained checkpoint https://github.com/tensorflow/models/tree/master/research/slim#pre-trained-models | ||
It must match the model specified in model_name parameter. | ||
|
||
*num classes* should include model specific number of classes in the outputs. For slim models it should be a value | ||
of `1000` or `1001`. It must match the number of classes used in the requested model name. | ||
|
||
|
||
## Examples | ||
|
||
``` | ||
python slim_model.py --model_name mobilenet_v1_050 --export_dir /tmp/mobilnet | ||
--batch_size 1 --num_classes=1001 \ | ||
--checkpoint_url http://download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_0.5_160.tgz | ||
|
||
python slim_model.py --model_name resnet_v1_50 --export_dir gs://<bucket_name>/resnet \ | ||
--batch_size -1 --num_classes=1000 \ | ||
--checkpoint_url http://download.tensorflow.org/models/resnet_v1_50_2016_08_28.tar.gz | ||
|
||
python slim_model.py --model_name inception_v4 --export_dir gs://<bucket_name>/inception \ | ||
--batch_size -1 --num_classes=1001 \ | ||
--checkpoint_url http://download.tensorflow.org/models/inception_v4_2016_09_09.tar.gz | ||
|
||
python slim_model.py --model_name vgg_19 --export_dir /tmp/vgg \ | ||
--batch_size 1 --num_classes=1000 \ | ||
--checkpoint_url http://download.tensorflow.org/models/vgg_19_2016_08_28.tar.gz | ||
``` | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,74 @@ | ||
FROM intelpython/intelpython3_core as BUILD | ||
|
||
RUN apt-get update && apt-get install -y --no-install-recommends \ | ||
openjdk-8-jdk \ | ||
openjdk-8-jre-headless \ | ||
build-essential \ | ||
curl \ | ||
git \ | ||
libcurl3-dev \ | ||
libfreetype6-dev \ | ||
libhdf5-serial-dev \ | ||
libpng-dev \ | ||
libzmq3-dev \ | ||
pkg-config \ | ||
rsync \ | ||
software-properties-common \ | ||
unzip \ | ||
zip \ | ||
zlib1g-dev && \ | ||
apt-get clean | ||
|
||
RUN git clone --depth 1 https://github.com/tensorflow/tensorflow | ||
|
||
|
||
RUN conda create --name myenv -y | ||
ENV PATH /opt/conda/envs/myenv/bin:$PATH | ||
|
||
# Set up Bazel. | ||
|
||
|
||
# Running bazel inside a `docker build` command causes trouble, cf: | ||
# https://github.com/bazelbuild/bazel/issues/134 | ||
# The easiest solution is to set up a bazelrc file forcing --batch. | ||
RUN echo "startup --batch" >>/etc/bazel.bazelrc | ||
# Similarly, we need to workaround sandboxing issues: | ||
# https://github.com/bazelbuild/bazel/issues/418 | ||
RUN echo "build --spawn_strategy=standalone --genrule_strategy=standalone" \ | ||
>>/etc/bazel.bazelrc | ||
# Install the most recent bazel release. | ||
ENV BAZEL_VERSION 0.15.0 | ||
WORKDIR / | ||
RUN mkdir /bazel && \ | ||
cd /bazel && \ | ||
curl -H "User-Agent: Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.133 Safari/537.36" -fSsL -O https://github.com/bazelbuild/bazel/releases/download/$BAZEL_VERSION/bazel-$BAZEL_VERSION-installer-linux-x86_64.sh && \ | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Why does this need the User Agent header? There should be a comment about this. |
||
curl -H "User-Agent: Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.133 Safari/537.36" -fSsL -o /bazel/LICENSE.txt https://raw.githubusercontent.com/bazelbuild/bazel/master/LICENSE && \ | ||
chmod +x bazel-*.sh && \ | ||
./bazel-$BAZEL_VERSION-installer-linux-x86_64.sh && \ | ||
cd / && \ | ||
rm -f /bazel/bazel-$BAZEL_VERSION-installer-linux-x86_64.sh | ||
|
||
RUN cd tensorflow && bazel build tensorflow/tools/graph_transforms:summarize_graph | ||
|
||
FROM intelpython/intelpython3_core as PROD | ||
RUN apt-get update && apt-get install -y --no-install-recommends \ | ||
git && \ | ||
apt-get clean | ||
|
||
WORKDIR /slim | ||
|
||
RUN git clone --depth 1 https://github.com/tensorflow/models && rm -Rf models/.git && \ | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Using master is fragile. We should have an ARG and specify some release tag for it. |
||
git clone --depth 1 https://github.com/tensorflow/tensorflow && rm -Rf tensorflow/.git | ||
|
||
RUN conda create --name myenv -y | ||
ENV PATH /opt/conda/envs/myenv/bin:$PATH | ||
|
||
RUN pip install --no-cache-dir tensorflow validators google-cloud-storage | ||
ENV PYTHONPATH=models/research/slim:tensorflow/python/tools | ||
|
||
COPY --from=BUILD /tensorflow/bazel-bin/tensorflow/tools/graph_transforms/summarize_graph summarize_graph | ||
COPY --from=BUILD /root/.cache/bazel/_bazel_root/*/execroot/org_tensorflow/bazel-out/k8-opt/bin/_solib_k8/_U_S_Stensorflow_Stools_Sgraph_Utransforms_Csummarize_Ugraph___Utensorflow/libtensorflow_framework.so libtensorflow_framework.so | ||
COPY slim_model.py . | ||
|
||
|
||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.