jobUpdateTasks) {
}
}
}
+ }
+ /**
+ * Drain the given job. If this is successful, the job will start the draining process. When the
+ * draining process is complete, the job will be cleaned up and removed.
+ *
+ * Batch jobs will be cancelled, as draining these jobs is not supported by beam.
+ *
+ * @param id feast-internal id of a job
+ */
+ public void abortJob(String id) {
+ Optional jobOptional = jobInfoRepository.findById(id);
+ if (!jobOptional.isPresent()) {
+ throw new RetrievalException(String.format("Unable to retrieve job with id %s", id));
+ }
+ JobInfo job = jobOptional.get();
+ if (JobStatus.getTerminalState().contains(job.getStatus())) {
+ throw new IllegalStateException("Unable to stop job already in terminal state");
+ }
ready.removeAll(pending);
ready.forEach(
fs -> {
diff --git a/core/src/main/java/feast/core/service/JobStatusService.java b/core/src/main/java/feast/core/service/JobStatusService.java
new file mode 100644
index 0000000000..26d81647fa
--- /dev/null
+++ b/core/src/main/java/feast/core/service/JobStatusService.java
@@ -0,0 +1,80 @@
+/*
+ * SPDX-License-Identifier: Apache-2.0
+ * Copyright 2018-2019 The Feast Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package feast.core.service;
+
+import lombok.extern.slf4j.Slf4j;
+import org.springframework.stereotype.Service;
+
+@Slf4j
+@Service
+public class JobStatusService {
+ //
+ // private JobInfoRepository jobInfoRepository;
+ // private MetricsRepository metricsRepository;
+ //
+ // @Autowired
+ // public JobStatusService(
+ // JobInfoRepository jobInfoRepository,
+ // MetricsRepository metricsRepository) {
+ // this.jobInfoRepository = jobInfoRepository;
+ // this.metricsRepository = metricsRepository;
+ // }
+ //
+ // /**
+ // * Lists all jobs registered to the db, sorted by provided orderBy
+ // *
+ // * @param orderBy list order
+ // * @return list of JobDetails
+ // */
+ // @Transactional
+ // public List listJobs(Sort orderBy) {
+ // List jobs = jobInfoRepository.findAll(orderBy);
+ // return jobs.stream().map(JobInfo::getJobDetail).collect(Collectors.toList());
+ // }
+ //
+ // /**
+ // * Lists all jobs registered to the db, sorted chronologically by creation time
+ // *
+ // * @return list of JobDetails
+ // */
+ // @Transactional
+ // public List listJobs() {
+ // return listJobs(Sort.by(Sort.Direction.ASC, "created"));
+ // }
+ //
+ // /**
+ // * Gets information regarding a single job.
+ // *
+ // * @param id feast-internal job id
+ // * @return JobDetail for that job
+ // */
+ // @Transactional
+ // public JobDetail getJob(String id) {
+ // Optional job = jobInfoRepository.findById(id);
+ // if (!job.isPresent()) {
+ // throw new RetrievalException(String.format("Unable to retrieve job with id %s",
+ // id));
+ // }
+ // JobDetail.Builder jobDetailBuilder = job.get().getJobDetail().toBuilder();
+ // List metrics = metricsRepository.findByJobInfo_Id(id);
+ // for (Metrics metric : metrics) {
+ // jobDetailBuilder.putMetrics(metric.getName(), metric.getValue());
+ // }
+ // return jobDetailBuilder.build();
+ // }
+
+}
diff --git a/core/src/main/java/feast/core/util/TypeConversion.java b/core/src/main/java/feast/core/util/TypeConversion.java
index e01a551135..a7dd2b0d2a 100644
--- a/core/src/main/java/feast/core/util/TypeConversion.java
+++ b/core/src/main/java/feast/core/util/TypeConversion.java
@@ -85,7 +85,7 @@ public static String convertMapToJsonString(Map map) {
public static String[] convertMapToArgs(Map map) {
List args = new ArrayList<>();
for (Entry arg : map.entrySet()) {
- args.add(Strings.lenientFormat("--%s=%s", arg.getKey(), arg.getValue()));
+ args.add(String.format("--%s=%s", arg.getKey(), arg.getValue()));
}
return args.toArray(new String[] {});
}
diff --git a/core/src/test/java/feast/core/validators/MatchersTest.java b/core/src/test/java/feast/core/validators/MatchersTest.java
index 774e58c7a8..13c9e006a4 100644
--- a/core/src/test/java/feast/core/validators/MatchersTest.java
+++ b/core/src/test/java/feast/core/validators/MatchersTest.java
@@ -43,7 +43,7 @@ public void checkUpperSnakeCaseShouldPassForLegitUpperSnakeCaseWithNumbers() {
public void checkUpperSnakeCaseShouldThrowIllegalArgumentExceptionWithFieldForInvalidString() {
exception.expect(IllegalArgumentException.class);
exception.expectMessage(
- Strings.lenientFormat(
+ String.format(
"invalid value for field %s: %s",
"someField",
"argument must be in upper snake case, and cannot include any special characters."));
@@ -61,7 +61,7 @@ public void checkLowerSnakeCaseShouldPassForLegitLowerSnakeCase() {
public void checkLowerSnakeCaseShouldThrowIllegalArgumentExceptionWithFieldForInvalidString() {
exception.expect(IllegalArgumentException.class);
exception.expectMessage(
- Strings.lenientFormat(
+ String.format(
"invalid value for field %s: %s",
"someField",
"argument must be in lower snake case, and cannot include any special characters."));
diff --git a/docs/contributing.md b/docs/contributing.md
index 38caffd654..0ca107ff10 100644
--- a/docs/contributing.md
+++ b/docs/contributing.md
@@ -6,13 +6,13 @@ The following guide will help you quickly run Feast in your local machine.
The main components of Feast are:
-* **Feast Core** handles FeatureSpec registration, starts and monitors Ingestion
+* **Feast Core** handles FeatureSpec registration, starts and monitors Ingestion
jobs and ensures that Feast internal metadata is consistent.
* **Feast Ingestion** subscribes to streams of FeatureRow and writes the feature
- values to registered Stores.
+ values to registered Stores.
* **Feast Serving** handles requests for features values retrieval from the end users.
@@ -29,13 +29,13 @@ The main components of Feast are:
> **Assumptions:**
>
-> 1. Postgres is running in "localhost:5432" and has a database called "postgres" which
+> 1. Postgres is running in "localhost:5432" and has a database called "postgres" which
>
-> can be accessed with credentials user "postgres" and password "password".
+> can be accessed with credentials user "postgres" and password "password".
>
-> To use different database name and credentials, please update
+> To use different database name and credentials, please update
>
-> "$FEAST\_HOME/core/src/main/resources/application.yml"
+> "$FEAST\_HOME/core/src/main/resources/application.yml"
>
> or set these environment variables: DB\_HOST, DB\_USERNAME, DB\_PASSWORD.
>
@@ -52,16 +52,17 @@ cd feast
#### Starting Feast Core
```text
-# Please check the default configuration for Feast Core in
+# Please check the default configuration for Feast Core in
# "$FEAST_HOME/core/src/main/resources/application.yml" and update it accordingly.
-#
+#
# Start Feast Core GRPC server on localhost:6565
mvn --projects core spring-boot:run
# If Feast Core starts successfully, verify the correct Stores are registered
# correctly, for example by using grpc_cli.
-grpc_cli call localhost:6565 GetStores ''
+grpc_cli call localhost:6565 ListStores ''
+<<<<<<< HEAD:docs/contributing.md
# Should return something similar to the following.
# Note that you should change BigQuery projectId and datasetId accordingly
# in "$FEAST_HOME/core/src/main/resources/application.yml"
@@ -91,12 +92,19 @@ store {
project_id: "my-google-project-id"
dataset_id: "my-bigquery-dataset-id"
}
+# Should return something similar to the following if you have not updated any stores
+{
+ "store": []
}
```
#### Starting Feast Serving
-Feast Serving requires administrators to provide an **existing** store name in Feast. An instance of Feast Serving can only retrieve features from a **single** store.
+Feast Serving requires administrators to provide an **existing** store name in Feast.
+An instance of Feast Serving can only retrieve features from a **single** store.
+> In order to retrieve features from multiple stores you must start **multiple**
+instances of Feast serving. If you start multiple Feast serving on a single host,
+make sure that they are listening on different ports.
> In order to retrieve features from multiple stores you must start **multiple** instances of Feast serving. If you start multiple Feast serving on a single host, make sure that they are listening on different ports.
@@ -111,12 +119,76 @@ grpc_cli call localhost:6566 GetFeastServingType ''
type: FEAST_SERVING_TYPE_ONLINE
```
+#### Updating a store
+
+Create a new Store by sending a request to Feast Core.
+
+```
+# Example of updating a redis store
+
+grpc_cli call localhost:6565 UpdateStore '
+store {
+ name: "SERVING"
+ type: REDIS
+ subscriptions {
+ name: "*"
+ version: ">0"
+ }
+ redis_config {
+ host: "localhost"
+ port: 6379
+ }
+}
+'
+
+# Other supported stores examples (replacing redis_config):
+# BigQuery
+bigquery_config {
+ project_id: "my-google-project-id"
+ dataset_id: "my-bigquery-dataset-id"
+}
+
+# Cassandra: two options in cassandra depending on replication strategy
+# See details: https://docs.datastax.com/en/cassandra/3.0/cassandra/architecture/archDataDistributeReplication.html
+#
+# Please note that table name must be "feature_store" as is specified in the @Table annotation of the
+# datastax object mapper
+
+# SimpleStrategy
+cassandra_config {
+ bootstrap_hosts: "localhost"
+ port: 9042
+ keyspace: "feast"
+ table_name: "feature_store"
+ replication_options {
+ class: "SimpleStrategy"
+ replication_factor: 1
+ }
+}
+
+# NetworkTopologyStrategy
+cassandra_config {
+ bootstrap_hosts: "localhost"
+ port: 9042
+ keyspace: "feast"
+ table_name: "feature_store"
+ replication_options {
+ class: "NetworkTopologyStrategy"
+ east: 2
+ west: 2
+ }
+}
+
+# To check that the Stores has been updated correctly.
+grpc_cli call localhost:6565 ListStores ''
+```
+
#### Registering a FeatureSet
Create a new FeatureSet on Feast by sending a request to Feast Core. When a feature set is successfully registered, Feast Core will start an **ingestion** job that listens for new features in the FeatureSet. Note that Feast currently only supports source of type "KAFKA", so you must have access to a running Kafka broker to register a FeatureSet successfully.
```text
-# Example of registering a new driver feature set
+# Example of registering a new driver feature set
# Note the source value, it assumes that you have access to a Kafka broker
# running on localhost:9092
@@ -156,7 +228,7 @@ grpc_cli call localhost:6565 GetFeatureSets ''
# and written to the registered stores.
# Make sure the value here is the topic assigned to the feature set
# ... producer.send("feast-driver-features" ...)
-#
+#
# Install Python SDK to help writing FeatureRow messages to Kafka
cd $FEAST_HOME/sdk/python
pip3 install -e .
diff --git a/infra/charts/feast/charts/feast-serving/values.yaml b/infra/charts/feast/charts/feast-serving/values.yaml
index d489a48748..d50cd30885 100644
--- a/infra/charts/feast/charts/feast-serving/values.yaml
+++ b/infra/charts/feast/charts/feast-serving/values.yaml
@@ -56,6 +56,15 @@ application.yaml:
config-path: /etc/feast/feast-serving/store.yaml
redis-pool-max-size: 128
redis-pool-max-idle: 64
+ cassandra-pool-core-local-connections: 1
+ cassandra-pool-max-local-connections: 1
+ cassandra-pool-core-remote-connections: 1
+ cassandra-pool-max-remote-connections: 1
+ cassandra-pool-max-requests-local-connection: 32768
+ cassandra-pool-max-requests-remote-connection: 2048
+ cassandra-pool-new-local-connection-threshold: 30000
+ cassandra-pool-new-remote-connection-threshold: 400
+ cassandra-pool-timeout-millis: 0
jobs:
staging-location: ""
store-type: ""
diff --git a/ingestion/pom.xml b/ingestion/pom.xml
index c829674a64..a9e982290e 100644
--- a/ingestion/pom.xml
+++ b/ingestion/pom.xml
@@ -215,6 +215,12 @@
${org.apache.beam.version}