Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Components release 2ed60100d1db9efeb38c6c358f90b21c144179be #694

Merged
merged 1 commit into from
Jan 17, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion components/kubeflow/launcher/kubeflow_tfjob_launcher_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
def kubeflow_tfjob_launcher_op(container_image, command, number_of_workers: int, number_of_parameter_servers: int, tfjob_timeout_minutes: int, output_dir=None, step_name='TFJob-launcher'):
return dsl.ContainerOp(
name = step_name,
image = 'gcr.io/ml-pipeline/ml-pipeline-kubeflow-tf:d3c4add0a95e930c70a330466d0923827784eb9a',
image = 'gcr.io/ml-pipeline/ml-pipeline-kubeflow-tf:2ed60100d1db9efeb38c6c358f90b21c144179be',
arguments = [
'--workers', number_of_workers,
'--pss', number_of_parameter_servers,
Expand Down
6 changes: 3 additions & 3 deletions components/kubeflow/launcher/src/train.template.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ spec:
spec:
containers:
- name: tensorflow
image: gcr.io/ml-pipeline/ml-pipeline-kubeflow-tf-trainer:d3c4add0a95e930c70a330466d0923827784eb9a
image: gcr.io/ml-pipeline/ml-pipeline-kubeflow-tf-trainer:2ed60100d1db9efeb38c6c358f90b21c144179be
command:
- python
- -m
Expand All @@ -49,7 +49,7 @@ spec:
spec:
containers:
- name: tensorflow
image: gcr.io/ml-pipeline/ml-pipeline-kubeflow-tf-trainer:d3c4add0a95e930c70a330466d0923827784eb9a
image: gcr.io/ml-pipeline/ml-pipeline-kubeflow-tf-trainer:2ed60100d1db9efeb38c6c358f90b21c144179be
command:
- python
- -m
Expand All @@ -72,7 +72,7 @@ spec:
spec:
containers:
- name: tensorflow
image: gcr.io/ml-pipeline/ml-pipeline-kubeflow-tf-trainer:d3c4add0a95e930c70a330466d0923827784eb9a
image: gcr.io/ml-pipeline/ml-pipeline-kubeflow-tf-trainer:2ed60100d1db9efeb38c6c358f90b21c144179be
command:
- python
- -m
Expand Down
10 changes: 5 additions & 5 deletions samples/kubeflow-tf/kubeflow-training-classification.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@
def dataflow_tf_transform_op(train_data: 'GcsUri', evaluation_data: 'GcsUri', schema: 'GcsUri[text/json]', project: 'GcpProject', preprocess_mode, preprocess_module: 'GcsUri[text/code/python]', transform_output: 'GcsUri[Directory]', step_name='preprocess'):
return dsl.ContainerOp(
name = step_name,
image = 'gcr.io/ml-pipeline/ml-pipeline-dataflow-tft:d3c4add0a95e930c70a330466d0923827784eb9a',
image = 'gcr.io/ml-pipeline/ml-pipeline-dataflow-tft:2ed60100d1db9efeb38c6c358f90b21c144179be',
arguments = [
'--train', train_data,
'--eval', evaluation_data,
Expand All @@ -38,7 +38,7 @@ def dataflow_tf_transform_op(train_data: 'GcsUri', evaluation_data: 'GcsUri', sc
def kubeflow_tf_training_op(transformed_data_dir, schema: 'GcsUri[text/json]', learning_rate: float, hidden_layer_size: int, steps: int, target, preprocess_module: 'GcsUri[text/code/python]', training_output: 'GcsUri[Directory]', step_name='training', use_gpu=False):
kubeflow_tf_training_op = dsl.ContainerOp(
name = step_name,
image = 'gcr.io/ml-pipeline/ml-pipeline-kubeflow-tf-trainer:d3c4add0a95e930c70a330466d0923827784eb9a',
image = 'gcr.io/ml-pipeline/ml-pipeline-kubeflow-tf-trainer:2ed60100d1db9efeb38c6c358f90b21c144179be',
arguments = [
'--transformed-data-dir', transformed_data_dir,
'--schema', schema,
Expand All @@ -52,15 +52,15 @@ def kubeflow_tf_training_op(transformed_data_dir, schema: 'GcsUri[text/json]', l
file_outputs = {'train': '/output.txt'}
)
if use_gpu:
kubeflow_tf_training_op.image = 'gcr.io/ml-pipeline/ml-pipeline-kubeflow-tf-trainer-gpu:d3c4add0a95e930c70a330466d0923827784eb9a'
kubeflow_tf_training_op.image = 'gcr.io/ml-pipeline/ml-pipeline-kubeflow-tf-trainer-gpu:2ed60100d1db9efeb38c6c358f90b21c144179be'
kubeflow_tf_training_op.set_gpu_limit(1)

return kubeflow_tf_training_op

def dataflow_tf_predict_op(evaluation_data: 'GcsUri', schema: 'GcsUri[text/json]', target: str, model: 'TensorFlow model', predict_mode, project: 'GcpProject', prediction_output: 'GcsUri', step_name='prediction'):
return dsl.ContainerOp(
name = step_name,
image = 'gcr.io/ml-pipeline/ml-pipeline-dataflow-tf-predict:d3c4add0a95e930c70a330466d0923827784eb9a',
image = 'gcr.io/ml-pipeline/ml-pipeline-dataflow-tf-predict:2ed60100d1db9efeb38c6c358f90b21c144179be',
arguments = [
'--data', evaluation_data,
'--schema', schema,
Expand All @@ -76,7 +76,7 @@ def dataflow_tf_predict_op(evaluation_data: 'GcsUri', schema: 'GcsUri[text/json]
def confusion_matrix_op(predictions, output, step_name='confusionmatrix'):
return dsl.ContainerOp(
name = step_name,
image = 'gcr.io/ml-pipeline/ml-pipeline-local-confusion-matrix:d3c4add0a95e930c70a330466d0923827784eb9a',
image = 'gcr.io/ml-pipeline/ml-pipeline-local-confusion-matrix:2ed60100d1db9efeb38c6c358f90b21c144179be',
arguments = [
'--predictions', predictions,
'--output', output,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -43,13 +43,13 @@
"EVAL_DATA = 'gs://ml-pipeline-playground/tfx/taxi-cab-classification/eval.csv'\n",
"HIDDEN_LAYER_SIZE = '1500'\n",
"STEPS = 3000\n",
"DATAFLOW_TFDV_IMAGE = 'gcr.io/ml-pipeline/ml-pipeline-dataflow-tfdv:d3c4add0a95e930c70a330466d0923827784eb9a'\n",
"DATAFLOW_TFT_IMAGE = 'gcr.io/ml-pipeline/ml-pipeline-dataflow-tft:d3c4add0a95e930c70a330466d0923827784eb9a'\n",
"DATAFLOW_TFMA_IMAGE = 'gcr.io/ml-pipeline/ml-pipeline-dataflow-tfma:d3c4add0a95e930c70a330466d0923827784eb9a'\n",
"DATAFLOW_TF_PREDICT_IMAGE = 'gcr.io/ml-pipeline/ml-pipeline-dataflow-tf-predict:d3c4add0a95e930c70a330466d0923827784eb9a'\n",
"KUBEFLOW_TF_TRAINER_IMAGE = 'gcr.io/ml-pipeline/ml-pipeline-kubeflow-tf-trainer:d3c4add0a95e930c70a330466d0923827784eb9a'\n",
"KUBEFLOW_TF_TRAINER_GPU_IMAGE = 'gcr.io/ml-pipeline/ml-pipeline-kubeflow-tf-trainer-gpu:d3c4add0a95e930c70a330466d0923827784eb9a'\n",
"KUBEFLOW_DEPLOYER_IMAGE = 'gcr.io/ml-pipeline/ml-pipeline-kubeflow-deployer:d3c4add0a95e930c70a330466d0923827784eb9a'\n",
"DATAFLOW_TFDV_IMAGE = 'gcr.io/ml-pipeline/ml-pipeline-dataflow-tfdv:2ed60100d1db9efeb38c6c358f90b21c144179be'\n",
"DATAFLOW_TFT_IMAGE = 'gcr.io/ml-pipeline/ml-pipeline-dataflow-tft:2ed60100d1db9efeb38c6c358f90b21c144179be'\n",
"DATAFLOW_TFMA_IMAGE = 'gcr.io/ml-pipeline/ml-pipeline-dataflow-tfma:2ed60100d1db9efeb38c6c358f90b21c144179be'\n",
"DATAFLOW_TF_PREDICT_IMAGE = 'gcr.io/ml-pipeline/ml-pipeline-dataflow-tf-predict:2ed60100d1db9efeb38c6c358f90b21c144179be'\n",
"KUBEFLOW_TF_TRAINER_IMAGE = 'gcr.io/ml-pipeline/ml-pipeline-kubeflow-tf-trainer:2ed60100d1db9efeb38c6c358f90b21c144179be'\n",
"KUBEFLOW_TF_TRAINER_GPU_IMAGE = 'gcr.io/ml-pipeline/ml-pipeline-kubeflow-tf-trainer-gpu:2ed60100d1db9efeb38c6c358f90b21c144179be'\n",
"KUBEFLOW_DEPLOYER_IMAGE = 'gcr.io/ml-pipeline/ml-pipeline-kubeflow-deployer:2ed60100d1db9efeb38c6c358f90b21c144179be'\n",
"DEPLOYER_MODEL = 'notebook_tfx_taxi'\n",
"DEPLOYER_VERSION_DEV = 'dev'\n",
"DEPLOYER_VERSION_PROD = 'prod'"
Expand Down
6 changes: 3 additions & 3 deletions samples/resnet-cmle/resnet-train-pipeline.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ def resnet_preprocess_op(project_id: 'GcpProject', output: 'GcsUri', train_csv:
validation_csv: 'GcsUri[text/csv]', labels, step_name='preprocess'):
return dsl.ContainerOp(
name = step_name,
image = 'gcr.io/ml-pipeline/resnet-preprocess:d3c4add0a95e930c70a330466d0923827784eb9a',
image = 'gcr.io/ml-pipeline/resnet-preprocess:2ed60100d1db9efeb38c6c358f90b21c144179be',
arguments = [
'--project_id', project_id,
'--output', output,
Expand All @@ -38,7 +38,7 @@ def resnet_train_op(data_dir, output: 'GcsUri', region: 'GcpRegion', depth: int,
num_eval_images: int, num_label_classes: int, tf_version, step_name='train'):
return dsl.ContainerOp(
name = step_name,
image = 'gcr.io/ml-pipeline/resnet-train:d3c4add0a95e930c70a330466d0923827784eb9a',
image = 'gcr.io/ml-pipeline/resnet-train:2ed60100d1db9efeb38c6c358f90b21c144179be',
arguments = [
'--data_dir', data_dir,
'--output', output,
Expand All @@ -60,7 +60,7 @@ def resnet_deploy_op(model_dir, model, version, project_id: 'GcpProject', region
tf_version, step_name='deploy'):
return dsl.ContainerOp(
name = step_name,
image = 'gcr.io/ml-pipeline/resnet-deploy:d3c4add0a95e930c70a330466d0923827784eb9a',
image = 'gcr.io/ml-pipeline/resnet-deploy:2ed60100d1db9efeb38c6c358f90b21c144179be',
arguments = [
'--model', model,
'--version', version,
Expand Down
16 changes: 8 additions & 8 deletions samples/tfx/taxi-cab-classification-pipeline.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@
def dataflow_tf_data_validation_op(inference_data: 'GcsUri', validation_data: 'GcsUri', column_names: 'GcsUri[text/json]', key_columns, project: 'GcpProject', mode, validation_output: 'GcsUri[Directory]', step_name='validation'):
return dsl.ContainerOp(
name = step_name,
image = 'gcr.io/ml-pipeline/ml-pipeline-dataflow-tfdv:d3c4add0a95e930c70a330466d0923827784eb9a',
image = 'gcr.io/ml-pipeline/ml-pipeline-dataflow-tfdv:2ed60100d1db9efeb38c6c358f90b21c144179be',
arguments = [
'--csv-data-for-inference', inference_data,
'--csv-data-to-validate', validation_data,
Expand All @@ -40,7 +40,7 @@ def dataflow_tf_data_validation_op(inference_data: 'GcsUri', validation_data: 'G
def dataflow_tf_transform_op(train_data: 'GcsUri', evaluation_data: 'GcsUri', schema: 'GcsUri[text/json]', project: 'GcpProject', preprocess_mode, preprocess_module: 'GcsUri[text/code/python]', transform_output: 'GcsUri[Directory]', step_name='preprocess'):
return dsl.ContainerOp(
name = step_name,
image = 'gcr.io/ml-pipeline/ml-pipeline-dataflow-tft:d3c4add0a95e930c70a330466d0923827784eb9a',
image = 'gcr.io/ml-pipeline/ml-pipeline-dataflow-tft:2ed60100d1db9efeb38c6c358f90b21c144179be',
arguments = [
'--train', train_data,
'--eval', evaluation_data,
Expand All @@ -57,7 +57,7 @@ def dataflow_tf_transform_op(train_data: 'GcsUri', evaluation_data: 'GcsUri', sc
def tf_train_op(transformed_data_dir, schema: 'GcsUri[text/json]', learning_rate: float, hidden_layer_size: int, steps: int, target: str, preprocess_module: 'GcsUri[text/code/python]', training_output: 'GcsUri[Directory]', step_name='training'):
return dsl.ContainerOp(
name = step_name,
image = 'gcr.io/ml-pipeline/ml-pipeline-kubeflow-tf-trainer:d3c4add0a95e930c70a330466d0923827784eb9a',
image = 'gcr.io/ml-pipeline/ml-pipeline-kubeflow-tf-trainer:2ed60100d1db9efeb38c6c358f90b21c144179be',
arguments = [
'--transformed-data-dir', transformed_data_dir,
'--schema', schema,
Expand All @@ -74,7 +74,7 @@ def tf_train_op(transformed_data_dir, schema: 'GcsUri[text/json]', learning_rate
def dataflow_tf_model_analyze_op(model: 'TensorFlow model', evaluation_data: 'GcsUri', schema: 'GcsUri[text/json]', project: 'GcpProject', analyze_mode, analyze_slice_column, analysis_output: 'GcsUri', step_name='analysis'):
return dsl.ContainerOp(
name = step_name,
image = 'gcr.io/ml-pipeline/ml-pipeline-dataflow-tfma:d3c4add0a95e930c70a330466d0923827784eb9a',
image = 'gcr.io/ml-pipeline/ml-pipeline-dataflow-tfma:2ed60100d1db9efeb38c6c358f90b21c144179be',
arguments = [
'--model', model,
'--eval', evaluation_data,
Expand All @@ -91,7 +91,7 @@ def dataflow_tf_model_analyze_op(model: 'TensorFlow model', evaluation_data: 'Gc
def dataflow_tf_predict_op(evaluation_data: 'GcsUri', schema: 'GcsUri[text/json]', target: str, model: 'TensorFlow model', predict_mode, project: 'GcpProject', prediction_output: 'GcsUri', step_name='prediction'):
return dsl.ContainerOp(
name = step_name,
image = 'gcr.io/ml-pipeline/ml-pipeline-dataflow-tf-predict:d3c4add0a95e930c70a330466d0923827784eb9a',
image = 'gcr.io/ml-pipeline/ml-pipeline-dataflow-tf-predict:2ed60100d1db9efeb38c6c358f90b21c144179be',
arguments = [
'--data', evaluation_data,
'--schema', schema,
Expand All @@ -108,7 +108,7 @@ def dataflow_tf_predict_op(evaluation_data: 'GcsUri', schema: 'GcsUri[text/json]
def confusion_matrix_op(predictions: 'GcsUri', output: 'GcsUri', step_name='confusion_matrix'):
return dsl.ContainerOp(
name=step_name,
image='gcr.io/ml-pipeline/ml-pipeline-local-confusion-matrix:d3c4add0a95e930c70a330466d0923827784eb9a',
image='gcr.io/ml-pipeline/ml-pipeline-local-confusion-matrix:2ed60100d1db9efeb38c6c358f90b21c144179be',
arguments=[
'--output', '%s/{{workflow.name}}/confusionmatrix' % output,
'--predictions', predictions,
Expand All @@ -119,7 +119,7 @@ def confusion_matrix_op(predictions: 'GcsUri', output: 'GcsUri', step_name='conf
def roc_op(predictions: 'GcsUri', output: 'GcsUri', step_name='roc'):
return dsl.ContainerOp(
name=step_name,
image='gcr.io/ml-pipeline/ml-pipeline-local-roc:d3c4add0a95e930c70a330466d0923827784eb9a',
image='gcr.io/ml-pipeline/ml-pipeline-local-roc:2ed60100d1db9efeb38c6c358f90b21c144179be',
arguments=[
'--output', '%s/{{workflow.name}}/roc' % output,
'--predictions', predictions,
Expand All @@ -130,7 +130,7 @@ def roc_op(predictions: 'GcsUri', output: 'GcsUri', step_name='roc'):
def kubeflow_deploy_op(model: 'TensorFlow model', tf_server_name, step_name='deploy'):
return dsl.ContainerOp(
name = step_name,
image = 'gcr.io/ml-pipeline/ml-pipeline-kubeflow-deployer:d3c4add0a95e930c70a330466d0923827784eb9a',
image = 'gcr.io/ml-pipeline/ml-pipeline-kubeflow-deployer:2ed60100d1db9efeb38c6c358f90b21c144179be',
arguments = [
'--model-path', model,
'--server-name', tf_server_name
Expand Down
16 changes: 8 additions & 8 deletions samples/xgboost-spark/xgboost-training-cm.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ class CreateClusterOp(dsl.ContainerOp):
def __init__(self, name, project, region, staging):
super(CreateClusterOp, self).__init__(
name=name,
image='gcr.io/ml-pipeline/ml-pipeline-dataproc-create-cluster:d3c4add0a95e930c70a330466d0923827784eb9a',
image='gcr.io/ml-pipeline/ml-pipeline-dataproc-create-cluster:2ed60100d1db9efeb38c6c358f90b21c144179be',
arguments=[
'--project', project,
'--region', region,
Expand All @@ -41,7 +41,7 @@ class DeleteClusterOp(dsl.ContainerOp):
def __init__(self, name, project, region):
super(DeleteClusterOp, self).__init__(
name=name,
image='gcr.io/ml-pipeline/ml-pipeline-dataproc-delete-cluster:d3c4add0a95e930c70a330466d0923827784eb9a',
image='gcr.io/ml-pipeline/ml-pipeline-dataproc-delete-cluster:2ed60100d1db9efeb38c6c358f90b21c144179be',
arguments=[
'--project', project,
'--region', region,
Expand All @@ -55,7 +55,7 @@ class AnalyzeOp(dsl.ContainerOp):
def __init__(self, name, project, region, cluster_name, schema, train_data, output):
super(AnalyzeOp, self).__init__(
name=name,
image='gcr.io/ml-pipeline/ml-pipeline-dataproc-analyze:d3c4add0a95e930c70a330466d0923827784eb9a',
image='gcr.io/ml-pipeline/ml-pipeline-dataproc-analyze:2ed60100d1db9efeb38c6c358f90b21c144179be',
arguments=[
'--project', project,
'--region', region,
Expand All @@ -73,7 +73,7 @@ def __init__(self, name, project, region, cluster_name, train_data, eval_data,
target, analysis, output):
super(TransformOp, self).__init__(
name=name,
image='gcr.io/ml-pipeline/ml-pipeline-dataproc-transform:d3c4add0a95e930c70a330466d0923827784eb9a',
image='gcr.io/ml-pipeline/ml-pipeline-dataproc-transform:2ed60100d1db9efeb38c6c358f90b21c144179be',
arguments=[
'--project', project,
'--region', region,
Expand All @@ -98,7 +98,7 @@ def __init__(self, name, project, region, cluster_name, train_data, eval_data,

super(TrainerOp, self).__init__(
name=name,
image='gcr.io/ml-pipeline/ml-pipeline-dataproc-train:d3c4add0a95e930c70a330466d0923827784eb9a',
image='gcr.io/ml-pipeline/ml-pipeline-dataproc-train:2ed60100d1db9efeb38c6c358f90b21c144179be',
arguments=[
'--project', project,
'--region', region,
Expand All @@ -121,7 +121,7 @@ class PredictOp(dsl.ContainerOp):
def __init__(self, name, project, region, cluster_name, data, model, target, analysis, output):
super(PredictOp, self).__init__(
name=name,
image='gcr.io/ml-pipeline/ml-pipeline-dataproc-predict:d3c4add0a95e930c70a330466d0923827784eb9a',
image='gcr.io/ml-pipeline/ml-pipeline-dataproc-predict:2ed60100d1db9efeb38c6c358f90b21c144179be',
arguments=[
'--project', project,
'--region', region,
Expand All @@ -141,7 +141,7 @@ class ConfusionMatrixOp(dsl.ContainerOp):
def __init__(self, name, predictions, output):
super(ConfusionMatrixOp, self).__init__(
name=name,
image='gcr.io/ml-pipeline/ml-pipeline-local-confusion-matrix:d3c4add0a95e930c70a330466d0923827784eb9a',
image='gcr.io/ml-pipeline/ml-pipeline-local-confusion-matrix:2ed60100d1db9efeb38c6c358f90b21c144179be',
arguments=[
'--output', output,
'--predictions', predictions
Expand All @@ -153,7 +153,7 @@ class RocOp(dsl.ContainerOp):
def __init__(self, name, predictions, trueclass, output):
super(RocOp, self).__init__(
name=name,
image='gcr.io/ml-pipeline/ml-pipeline-local-roc:d3c4add0a95e930c70a330466d0923827784eb9a',
image='gcr.io/ml-pipeline/ml-pipeline-local-roc:2ed60100d1db9efeb38c6c358f90b21c144179be',
arguments=[
'--output', output,
'--predictions', predictions,
Expand Down