Skip to content

Commit

Permalink
Merge pull request #34 from gsunner/seldon-core-examples
Browse files Browse the repository at this point in the history
seldon-core-examples repo added to main project
  • Loading branch information
gsunner authored Jan 15, 2018
2 parents f8f09ea + 382a876 commit 14d5e44
Show file tree
Hide file tree
Showing 36 changed files with 1,017 additions and 0 deletions.
19 changes: 19 additions & 0 deletions examples/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
# Content

Seldon-core-examples repository provides out-of-the-box machine learning models examples to deploy using [seldon-core](https://github.com/SeldonIO/seldon-core). Since seldon-core deploys dockerized versions of your models, the repository also includes wrapping scripts that allow you to create docker images of such models which are deployable with seldon-core.

## Wrapping scripts

The repository contains two wrapping scripts at the moment
* wrap-model-in-host : If you are using docker on your machine, this script will build a docker image of your model locally.
* wrap-model-in-minikube: If you are using minikube, this script will build a docker image of your model directly on your minikube cluster (for usage see [seldon-core docs](https://github.com/SeldonIO/seldon-core/blob/master/docs/wrappers/readme.md)).

## Examples

The examples in the "models" folder are out-of-the-box machine learning models packaged as required by seldon wrappers. Each model folder usually includes a script to create and save the model, a model python file and a requirements file.
As an example, we describe the content of the folder "models/sklearn_iris". Check out [seldon wrappers guidelines](https://github.com/SeldonIO/seldon-core/blob/master/docs/wrappers/readme.md)) for more details about packaging models.

* train_iris.py : Script to train and save a sklearn iris classifier
* IrisClassifier.py : The file used by seldon-wrappers to load and serve your saved model.
* requirements.txt : A list of packages required by your model
* sklearn_iris_deployment.json : A configuration json file used to deploy your model in [seldon-core](https://github.com/SeldonIO/seldon-core#quick-start).
Empty file added examples/models/.keep
Empty file.
18 changes: 18 additions & 0 deletions examples/models/deep_mnist/DeepMnist.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
import tensorflow as tf

class DeepMnist(object):
def __init__(self):
self.class_names = ["class:{}".format(str(i)) for i in range(10)]
self.sess = tf.Session()
saver = tf.train.import_meta_graph("model/deep_mnist_model.meta")
saver.restore(self.sess,tf.train.latest_checkpoint("./model/"))

graph = tf.get_default_graph()
self.x = graph.get_tensor_by_name("x:0")
self.y = graph.get_tensor_by_name("y:0")

def predict(self,X,feature_names):
predictions = self.sess.run(self.y,feed_dict={self.x:X})
return predictions


22 changes: 22 additions & 0 deletions examples/models/deep_mnist/contract.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
{
"features":[
{
"name":"x",
"dtype":"FLOAT",
"ftype":"continuous",
"range":[0,1],
"repeat":784
}
],
"targets":[
{
"name":"class",
"dtype":"FLOAT",
"ftype":"continuous",
"range":[0,1],
"repeat":10
}
]
}


36 changes: 36 additions & 0 deletions examples/models/deep_mnist/create_model.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot = True)
import tensorflow as tf

if __name__ == '__main__':

x = tf.placeholder(tf.float32, [None,784], name="x")

W = tf.Variable(tf.zeros([784,10]))
b = tf.Variable(tf.zeros([10]))

y = tf.nn.softmax(tf.matmul(x,W) + b, name="y")

y_ = tf.placeholder(tf.float32, [None, 10])


cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))

train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)

init = tf.initialize_all_variables()

sess = tf.Session()
sess.run(init)

for i in range(1000):
batch_xs, batch_ys = mnist.train.next_batch(100)
sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})

correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print(sess.run(accuracy, feed_dict = {x: mnist.test.images, y_:mnist.test.labels}))

saver = tf.train.Saver()

saver.save(sess, "model/deep_mnist_model")
1 change: 1 addition & 0 deletions examples/models/deep_mnist/requirements.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
tensorflow==1.0.1
35 changes: 35 additions & 0 deletions examples/models/h2o_example/H2oModel.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
import numpy as np
import pandas as pd
import h2o
from h2o.frame import H2OFrame

MODEL_PATH='/microservice/glm_fit1'

def _to_frame(X,features_names):
"""Create H2OFrame object from lists
"""
return H2OFrame(X,column_names=features_names)

def _from_frame(frame):
"""Create numpy array with probabilities from H2OFrame object
"""
preds = h2o.as_list(frame,use_pandas=False); preds.pop(0); [r.pop(0) for r in preds]
return np.asarray(preds,dtype=np.float)

class H2oModel():

def __init__(self):

print 'Starting Java virtual machine'
h2o.init(nthreads = -1, max_mem_size = 8)
print 'Machine started!'

print 'Loading model from %s...' % MODEL_PATH
self.model = h2o.load_model(MODEL_PATH)
print 'Model Loaded'

def predict(self,X,features_names):
return _from_frame(self.model.predict(_to_frame(X,features_names)))



54 changes: 54 additions & 0 deletions examples/models/h2o_example/h2o_badloans_deployment.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,54 @@
{
"apiVersion": "machinelearning.seldon.io/v1alpha1",
"kind": "SeldonDeployment",
"metadata": {
"labels": {
"app": "seldon"
},
"name": "seldon-deployment-example"
},
"spec": {
"annotations": {
"project_name": "Bad loans prediction",
"deployment_version": "0.1"
},
"name": "h2o-bad-loans-deployment",
"oauth_key": "oauth-key",
"oauth_secret": "oauth-secret",
"predictors": [
{
"componentSpec": {
"spec": {
"containers": [
{
"image": "seldonio/h2omodel:0.1",
"imagePullPolicy": "IfNotPresent",
"name": "h2o-bad-loans-classifier",
"resources": {
"requests": {
"memory": "1Mi"
}
}
}
],
"terminationGracePeriodSeconds": 20
}
},
"graph": {
"children": [],
"name": "h2o-bad-loans-classifier",
"endpoint": {
"type" : "REST"
},
"subtype": "MICROSERVICE",
"type": "MODEL"
},
"name": "h2o-bad-loans-predictor",
"replicas": 1,
"annotations": {
"predictor_version" : "0.1"
}
}
]
}
}
14 changes: 14 additions & 0 deletions examples/models/h2o_example/requirements.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
numpy==1.11.2
pandas==0.18.1
grpc==0.3.post19
grpcio==1.1.3
Flask==0.11.1
futures
redis==2.10.5

requests
tabulate
scikit-learn
colorama
http://h2o-release.s3.amazonaws.com/h2o/rel-wheeler/2/Python/h2o-3.16.0.2-py2.py3-none-any.whl

36 changes: 36 additions & 0 deletions examples/models/h2o_example/train_model.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
"""This script run the code in https://github.com/h2oai/h2o-tutorials/blob/master/h2o-open-tour-2016/chicago/intro-to-h2o.ipynb
and save the trained model in a file glm_fit1 in the same directory of the script.
Data is not split into train and test sets as it is irrelevant for the purpose of this example.
Instead, training is performed on the whole dataset.
"""

# Load the H2O library and start up the H2O cluter locally on your machine
import h2o
# Import H2O GLM:
from h2o.estimators.glm import H2OGeneralizedLinearEstimator

if __name__=="__main__":

# Number of threads, nthreads = -1, means use all cores on your machine
# max_mem_size is the maximum memory (in GB) to allocate to H2O
h2o.init(nthreads = -1, max_mem_size = 8)

#loan_csv = "/Volumes/H2OTOUR/loan.csv" # modify this for your machine
# Alternatively, you can import the data directly from a URL
loan_csv = "https://raw.githubusercontent.com/h2oai/app-consumer-loan/master/data/loan.csv"
data = h2o.import_file(loan_csv) # 163,987 rows x 15 columns
data['bad_loan'] = data['bad_loan'].asfactor() #encode the binary repsonse as a factor
#data['bad_loan'].levels() #optional: after encoding, this shows the two factor levels, '0' and '1'

y = 'bad_loan'
x = list(data.columns)
x.remove(y) #remove the response
x.remove('int_rate') #remove the interest rate column because it's correlated with the outcome

# Initialize the GLM estimator:
# Similar to R's glm() and H2O's R GLM, H2O's GLM has the "family" argument
glm_fit1 = H2OGeneralizedLinearEstimator(family='binomial', model_id='glm_fit1')
glm_fit1.train(x=x, y=y, training_frame=data)

model_path = h2o.save_model(model=glm_fit1, path="", force=True)
14 changes: 14 additions & 0 deletions examples/models/keras_mnist/MnistClassifier.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
from keras.models import load_model

class MnistClassifier(object):

def __init__(self):
self.model = load_model('MnistClassifier.h5')

def predict(self,X,features_names):
assert X.shape[0]>=1, 'wrong shape 0'
if X.shape[0]==784:
X = X.reshape(1,28,28,1)
else:
X = X.reshape(X.shape[0],28,28,1)
return self.model.predict(X)
24 changes: 24 additions & 0 deletions examples/models/keras_mnist/contract.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
{
"features":[
{
"name":"loan_amnt",
"dtype":"STRING",
"ftype":"",
"range":[0,1,
"repeat":784
}
],




"targets":[
{
"name":"proba",
"dtype":"FLOAT",
"ftype":"continuous",
"values":[0,1],
"repeat":2
}
]
}
54 changes: 54 additions & 0 deletions examples/models/keras_mnist/keras_mnist_deployment.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,54 @@
{
"apiVersion": "machinelearning.seldon.io/v1alpha1",
"kind": "SeldonDeployment",
"metadata": {
"labels": {
"app": "seldon"
},
"name": "seldon-deployment-example"
},
"spec": {
"annotations": {
"project_name": "Digits classification",
"deployment_version": "0.0"
},
"name": "keras-mnist-deployment",
"oauth_key": "oauth-key",
"oauth_secret": "oauth-secret",
"predictors": [
{
"componentSpec": {
"spec": {
"containers": [
{
"image": "seldonio/mnistclassifier:0.0",
"imagePullPolicy": "IfNotPresent",
"name": "keras-mnist-classifier",
"resources": {
"requests": {
"memory": "1Mi"
}
}
}
],
"terminationGracePeriodSeconds": 20
}
},
"graph": {
"children": [],
"name": "keras-mnist-classifier",
"endpoint": {
"type" : "REST"
},
"subtype": "MICROSERVICE",
"type": "MODEL"
},
"name": "keras-mnist-predictor",
"replicas": 1,
"annotations": {
"predictor_version" : "0.0"
}
}
]
}
}
5 changes: 5 additions & 0 deletions examples/models/keras_mnist/requirements.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
scikit-learn==0.17.1
scipy==0.18.1
keras==2.0.6
tensorflow==1.0.1
h5py
Loading

0 comments on commit 14d5e44

Please sign in to comment.