Skip to content

Commit

Permalink
Merge pull request #63 from gsunner/release-0.1
Browse files Browse the repository at this point in the history
updates into Release 0.1
  • Loading branch information
gsunner authored Jan 23, 2018
2 parents a6b7e7b + 4675c6e commit d6d918f
Show file tree
Hide file tree
Showing 78 changed files with 1,928 additions and 281 deletions.
52 changes: 40 additions & 12 deletions .travis.yml
Original file line number Diff line number Diff line change
@@ -1,18 +1,46 @@
sudo: required

services:
- docker

- docker
branches:
only:
- master
- master
- release-0.1
- travis-update
jobs:
include:
- stage: build-components
script:
- echo Building api-frontend
- |
set -x && \
docker run --rm -it \
-v /var/run/docker.sock:/var/run/docker.sock \
-v ${HOME}/.m2:/root/.m2 \
-v $(pwd):/work \
seldonio/core-builder:0.2 bash -c 'cd api-frontend && make -f Makefile.ci build'
- docker images | grep 'seldonio/apife'
- script:
- echo Building engine
- |
set -x && \
docker run --rm -it \
-v /var/run/docker.sock:/var/run/docker.sock \
-v ${HOME}/.m2:/root/.m2 \
-v $(pwd):/work \
seldonio/core-builder:0.2 bash -c 'cd engine && make -f Makefile.ci build'
- docker images | grep 'seldonio/engine'
- script:
- echo Building cluster-manager
- |
set -x && \
docker run --rm -it \
-v /var/run/docker.sock:/var/run/docker.sock \
-v ${HOME}/.m2:/root/.m2 \
-v $(pwd):/work \
seldonio/core-builder:0.2 bash -c 'cd cluster-manager && make -f Makefile.ci build'
- docker images | grep 'seldonio/cluster-manager'

script:
- echo Building api-frontend
- |
docker run --rm -it \
-v /var/run/docker.sock:/var/run/docker.sock \
-v ${HOME}/.m2:/root/.m2 \
-v $(pwd):/work \
seldonio/core-builder:0.1 bash -c 'cd api-frontend && make -f Makefile.ci build'
notifications:
slack:
secure: HE0+D/nkZxEes5EStfbSG5x7/+DFjH06b21YysiyR7eApi0Oj6szCT0OLYyQbV3Rs8Pz9oJZIJvG/cBiQGHGVKHPna3o4Ny7y66WBJrthUGc53tWDf6TGbqIBmUrEOQlxzzZ03oVYdwsS0dCbRA8IZCgVKEBxii2AQ1IQyetGhMY0YxjbqCKFlgwaUvplugXT7ZkGpxr8icygeEDJnii7g+TqKGCVpdPnBJPjd6ey5O/biADEBGsrYMiNQ+hshjwmf1sViNFdclnUpPNLrD8WES84DHvE5h8sgRnVk+fgNBpVN5ItQFNyK4BIWxTkRY5KJV89otrYeyodPPPd/dEm0LfNqw0B/wd5dnU1uxUAtFZguZ4mnZohWp3vhCx/SWn1qWS3ojr7sIhKu7GbCgj2uzBO2n7DiE8q4hlDI8yfh5ny+2pLQNaJlnZ8bwHRksv1tz7BPpHUHyMBH9gy8SwXd0oRpgotDSQN6QsLVLBLsM70dGSLqQJ9M3rAMCyv8lxgOdn59GeauEAv48LBfUBijn/0wC/GiulG34XA6FopLQ0RPdwjy7mV+U2urTdw5EybtwREvhKILaVEI9ul+GHXHhiZFMmAXd7ZGzrm9w3XZI2mMAecIwvD2WboUwvswb3Gpdxs9a/krD/XU/DtVnS7TAgX/JPMJO+eDZmPjqENn4=

14 changes: 7 additions & 7 deletions api-frontend/src/main/java/io/seldon/apife/pb/QuantityUtils.java
Original file line number Diff line number Diff line change
Expand Up @@ -54,13 +54,13 @@ public static class QuantityParser implements TypeParser {
@Override
public void merge(JsonElement json, Builder builder) throws InvalidProtocolBufferException {
if (json instanceof JsonPrimitive) {
JsonPrimitive primitive = (JsonPrimitive) json;
if (primitive.isString())
{
Quantity.Builder b = Quantity.newBuilder().setString(primitive.getAsString());
builder.mergeFrom(b.build().toByteArray());
}
else throw new InvalidProtocolBufferException("Can't decode io.kubernetes.client.proto.resource.Quantity from "+json.toString());
JsonPrimitive primitive = (JsonPrimitive) json;
if (primitive.isString())
{
Quantity.Builder b = Quantity.newBuilder().setString(primitive.getAsString());
builder.mergeFrom(b.build().toByteArray());
}
else throw new InvalidProtocolBufferException("Can't decode io.kubernetes.client.proto.resource.Quantity from "+json.toString());
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -340,12 +340,21 @@ private void checkPredictiveUnitsMicroservices(PredictiveUnit pu,PredictorSpec p
checkPredictiveUnitsMicroservices(child,p);
}

private void checkTypeAndSubType(PredictiveUnit pu) throws SeldonDeploymentException
/*
* If implementation is specified, ignore the rest
* if not, implementation defaults to UNKNOWN_IMPLEMENTATION and
* if type is specified ignore the rest
* if not, type defaults to UNKNOWN_TYPE and
* if methods is not specified, raise an error (we are in the case when none of implementation, type, methods has been specified)
*/
private void checkTypeMethodAndImpl(PredictiveUnit pu) throws SeldonDeploymentException
{
if (!pu.hasType())
throw new SeldonDeploymentException(String.format("Predictive unit %s has no type",pu.getName()));
if ((!pu.hasImplementation() || pu.getImplementation().getNumber() == PredictiveUnitImplementation.UNKNOWN_IMPLEMENTATION_VALUE) &&
(!pu.hasType() || pu.getType().getNumber() == PredictiveUnitType.UNKNOWN_TYPE_VALUE) &&
pu.getMethodsCount() == 0)
throw new SeldonDeploymentException(String.format("Predictive unit %s has no methods specified",pu.getName()));
for(PredictiveUnit child : pu.getChildrenList())
checkTypeAndSubType(child);
checkTypeMethodAndImpl(child);
}

@Override
Expand All @@ -354,7 +363,7 @@ public void validate(SeldonDeployment mlDep) throws SeldonDeploymentException {
for(PredictorSpec p : mlDep.getSpec().getPredictorsList())
{
checkPredictiveUnitsMicroservices(p.getGraph(),p);
checkTypeAndSubType(p.getGraph());
checkTypeMethodAndImpl(p.getGraph());
}

}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -119,8 +119,16 @@ public int watchSeldonMLDeployments(int resourceVersion,int resourceVersionProce
{
if (resourceVersionNew > maxResourceVersion)
maxResourceVersion = resourceVersionNew;

this.processWatch(SeldonDeploymentUtils.jsonToSeldonDeployment(jsonInString), item.type);

try
{
this.processWatch(SeldonDeploymentUtils.jsonToSeldonDeployment(jsonInString), item.type);
}
catch (InvalidProtocolBufferException e)
{
//TODO : update status of seldondeployment to show error
logger.warn("Failed to parse SeldonDelployment " + jsonInString, e);
}
}
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -57,8 +57,8 @@ public void merge(JsonElement json, Builder builder) throws InvalidProtocolBuffe
JsonPrimitive primitive = (JsonPrimitive) json;
if (primitive.isString())
{
Quantity.Builder b = Quantity.newBuilder().setString(primitive.getAsString());
builder.mergeFrom(b.build().toByteArray());
Quantity.Builder b = Quantity.newBuilder().setString(primitive.getAsString());
builder.mergeFrom(b.build().toByteArray());
}
else throw new InvalidProtocolBufferException("Can't decode io.kubernetes.client.proto.resource.Quantity from "+json.toString());
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -45,10 +45,10 @@ public void testBadGraph() throws IOException, SeldonDeploymentException
}

@Test(expected = SeldonDeploymentException.class)
public void testNoType() throws IOException, SeldonDeploymentException
public void testNoMethod() throws IOException, SeldonDeploymentException
{
SeldonDeploymentOperator op = new SeldonDeploymentOperatorImpl(getClusterManagerprops());
String jsonStr = readFile("src/test/resources/model_invalid_no_type.json",StandardCharsets.UTF_8);
String jsonStr = readFile("src/test/resources/model_invalid_no_method.json",StandardCharsets.UTF_8);
SeldonDeployment mlDep = SeldonDeploymentUtils.jsonToSeldonDeployment(jsonStr);
SeldonDeployment mlDep2 = op.defaulting(mlDep);
op.validate(mlDep2);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,6 @@
"endpoint": {
"type" : "REST"
},
"subtype": "MICROSERVICE",
"type": "MODEL"
},
"name": "fx-market-predictor",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -39,8 +39,7 @@
"name": "mean-classifier",
"endpoint": {
"type" : "REST"
},
"subtype": "MICROSERVICE"
}
},
"name": "fx-market-predictor",
"replicas": 1,
Expand Down
7 changes: 7 additions & 0 deletions core-builder/Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,13 @@ RUN \
update-alternatives --set java /usr/lib/jvm/java-8-openjdk-amd64/jre/bin/java && \
apt-get remove -y --auto-remove && apt-get clean -y && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*

# dependencies for release script
RUN \
apt-get update -y && \
apt-get install -y python-pip && \
pip install pyyaml && \
apt-get remove -y --auto-remove && apt-get clean -y && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*

WORKDIR /work

# Define default command.
Expand Down
2 changes: 1 addition & 1 deletion core-builder/Makefile
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
DOCKER_IMAGE_NAME=seldonio/core-builder
DOCKER_IMAGE_VERSION=0.1
DOCKER_IMAGE_VERSION=0.2

build_docker_image:
docker build --force-rm=true -t $(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_VERSION) .
Expand Down
Binary file added docs/cicd.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
90 changes: 90 additions & 0 deletions docs/crd/readme.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,90 @@
# Custom Resource Definitions

## Seldon Deployment

The runtime inference graph for a machine learning deployment is described as a SeldonDeployment Kubernetes resource. The structure of this manifest is defined as a [proto buffer](../reference/seldon-deployment.md). This doc will describe the SeldonDeployment resource in general and how to create one for your runtime inference graph.

## Creating your resource definition

The full specification can be found [here](../reference/seldon-deployment.md). Below we highlight various parts and describe their intent.

The core goal is to describe your runtime inference graph(s) and deploy it with appropriate resources and scale. Example illustrative graphs are shown below:

![graph](../reference/graph.png)

The top level SeldonDeployment has standard Kubernetes meta data and consists of a spec which is defined by the user and a status which will be set by the system to represent the current state of the SeldonDeployment.

```proto
message SeldonDeployment {
required string apiVersion = 1;
required string kind = 2;
optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 3;
required DeploymentSpec spec = 4;
optional DeploymentStatus status = 5;
}
```

The core deployment spec consists of a set of ```predictors```. Each predictor represents a seperate runtime serving graph. The set of predictors will serve request as controlled by a load balancer. At present the share of traffic will be in relation to the number of replicas each predictor has. A use case for two predictors would be a main deployment and a canary, with the main deployment having 9 replicas and the canary 1, so the canary receives 10% of the overall traffic. Each predictor will be a seperately managed deployment with Kubernetes so it is safe to add and remove predictors without affecting existing predictors.

To allow an OAuth API to be provisioned you should specify an OAuth key and secret.

```proto
message DeploymentSpec {
optional string name = 1; // A unique name within the namespace.
repeated PredictorSpec predictors = 2; // A list of 1 or more predictors describing runtime machine learning deployment graphs.
optional string oauth_key = 6; // The oauth key for external users to use this deployment via an API.
optional string oauth_secret = 7; // The oauth secret for external users to use this deployment via an API.
map<string,string> annotations = 8; // Arbitrary annotations.
}
```

For each predictor you should at a minimum specify:

* A unique name
* A PredictiveUnit graph that presents the tree of components to deploy.
* A componentSpec which describes the set of images for parts of your container graph that will be instigated as microservice containers. These containers will have been wrapped to work within the [internal API](../reference/internal-api.md). This component spec is a standard [PodTemplateSpec](https://kubernetes.io/docs/api-reference/extensions/v1beta1/definitions/#_v1_podtemplatespec).
* If you leave the ports empty for each container they will be added automatically and matched to the ports in the graph specification. If you decide to specify the ports manually they should match the port specified for the matching component in the graph specification.
* the number of replicas of this predictor to deploy

```proto
message PredictorSpec {
required string name = 1; // A unique name not used by any other predictor in the deployment.
required PredictiveUnit graph = 2; // A graph describing how the predictive units are connected together.
required k8s.io.api.core.v1.PodTemplateSpec componentSpec = 3; // A description of the set of containers used by the graph. One for each microservice defined in the graph.
optional int32 replicas = 4; // The number of replicas of the predictor to create.
map<string,string> annotations = 5; // Arbitrary annotations.
}
```

The predictive unit graph is a tree. Each node is of a particular type. If the implementation is not specified then a microservice is assumed and you must define a matching named container within the componentSpec above. Each type of PredictiveUnit has a standard set of methods it is expected to manage, see [here](../reference/seldon-deployment.md).

For each node in the graph:

* A unique name. If the node describes a microservice then it must match a named container with the componentSpec.
* The children nodes.
* The type of the predictive unit : MODEL, ROUTER, COMBINER, TRANSFORMER or OUTPUT_TRANSFORMER.
* The implementation. This can be left blank if it will be a microserice as this is the default otherwise choose from the available appropriate implementations provided internally.
* Methods. This can be left blank if you wish to follow the standard methods for your PredictiveNode type : see [here](../reference/seldon-deployment.md).
* Endpoint. In here you should minimally if this a microservice specify whether the PredictiveUnit will use REST or gRPC. Ports will be defined automatically if not specified.
* Parameters. Specify any parameters you wish to pass to the PredictiveUnit. These will be passed in an environment variable called PREDICTIVE_UNIT_PARAMETERS as a JSON list.

```proto
message PredictiveUnit {
required string name = 1; //must match container name of component if no implementation
repeated PredictiveUnit children = 2; // The child predictive units.
optional PredictiveUnitType type = 3;
optional PredictiveUnitImplementation implementation = 4;
repeated PredictiveUnitMethod methods = 5;
optional Endpoint endpoint = 6; // The exposed endpoint for this unit.
repeated Parameter parameters = 7; // Customer parameter to pass to the unit.
}
```
21 changes: 21 additions & 0 deletions docs/deploying.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
# Deployment

You can manage your deployments via the standard kubernetes CLI kubectl, e.g.

```bash
kubectl apply -f my_ml_deployment.yaml
```

# Production Integration
For production settings you will want to incorporate your ML infrastructure and ML code into a continuous integration and deployment pipeline. One such realization of such a pipeline is shown below:

![Production Pipelines](./cicd.png)

The pipeline consists of

* A model code repo (in Git) where training and runtime ML components are stored
* A continuuous integration pipeline that will train and test the model and wrap it (using Seldon built-in Wrappers or custome wrappers)
* An image repository where the final runtime inference model image is stored.
* A git repo for the infrastructure to store the ML deployment graph described as a SeldonDeployment
* Some tool to either monitor the infrastructure repo and apply to the production kubernetes changes or a tool to allow dev ops to push updated infrastructure manually.

12 changes: 12 additions & 0 deletions docs/developer/readme.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
# Developer

We welcome new contributors. Please read the [code of conduct](../../CODE_OF_CONDUCT.md) and [contributing guidelines](../../CONTRIBUTING.md)

## Release process

To be completed.

## Tools we use

- [github-changelog-generator](https://github.com/skywinder/github-changelog-generator)
- [Grip - Local Markdown viewer](https://github.com/joeyespo/grip)
Loading

0 comments on commit d6d918f

Please sign in to comment.