diff --git a/video/README.md b/video/README.md
index 5975d3aeb6b..03595140697 100644
--- a/video/README.md
+++ b/video/README.md
@@ -1,3 +1,36 @@
-# Video Intelligence Samples have been moved
+# [Cloud Video Intelligence: Java Samples](https://github.com/GoogleCloudPlatform/java-docs-samples/tree/main/video)
-[https://github.com/googleapis/java-video-intelligence](https://github.com/googleapis/java-video-intelligence/tree/main/samples).
\ No newline at end of file
+[![Open in Cloud Shell][shell_img]][shell_link]
+
+
+
+## Table of Contents
+
+* [Build the sample](#build-the-sample)
+* [Samples](#samples)
+
+
+## Build the sample
+
+Install [Maven](http://maven.apache.org/).
+
+Build your project with:
+
+```
+mvn clean package -DskipTests=True
+```
+
+## Samples
+
+Please follow [Before you begin](https://cloud.google.com/video-intelligence/docs/annotate-video-client-libraries#before-you-begin) for project and auth setup before you run the samples.
+
+
+## Run
+Run all tests:
+```
+mvn clean verify
+```
+
+[shell_img]: https://gstatic.com/cloudssh/images/open-btn.png
+[shell_link]: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/java-docs-samples&page=editor&open_in_editor=video/README.md
+[product-docs]: https://cloud.google.com/video-intelligence/docs/
diff --git a/video/pom.xml b/video/pom.xml
new file mode 100644
index 00000000000..161d75787b4
--- /dev/null
+++ b/video/pom.xml
@@ -0,0 +1,71 @@
+
+
+ 4.0.0
+ com.google.cloud
+ videointelligence-snippets
+ jar
+ Google Cloud Video Intelligence Snippets
+ https://github.com/GoogleCloudPlatform/java-docs-samples/tree/main/video
+
+
+
+ com.google.cloud.samples
+ shared-configuration
+ 1.2.0
+
+
+
+ 1.8
+ 1.8
+ UTF-8
+
+
+
+
+
+
+
+ com.google.cloud
+ libraries-bom
+ 26.1.3
+ pom
+ import
+
+
+
+
+
+
+ com.google.cloud
+ google-cloud-video-intelligence
+
+
+ com.google.cloud
+ google-cloud-storage
+
+
+
+
+ junit
+ junit
+ 4.13.2
+ test
+
+
+ com.google.truth
+ truth
+ 1.1.3
+ test
+
+
+ com.google.cloud
+ google-cloud-core
+ 2.8.22
+ test
+ tests
+
+
+
diff --git a/video/resources/cat.mp4 b/video/resources/cat.mp4
new file mode 100644
index 00000000000..0e071b9ec67
Binary files /dev/null and b/video/resources/cat.mp4 differ
diff --git a/video/resources/googlework_short.mp4 b/video/resources/googlework_short.mp4
new file mode 100644
index 00000000000..be0f40f8ad6
Binary files /dev/null and b/video/resources/googlework_short.mp4 differ
diff --git a/video/src/main/java/beta/video/Detect.java b/video/src/main/java/beta/video/Detect.java
new file mode 100644
index 00000000000..e87cdb147d4
--- /dev/null
+++ b/video/src/main/java/beta/video/Detect.java
@@ -0,0 +1,139 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package beta.video;
+
+import com.google.api.gax.longrunning.OperationFuture;
+import com.google.cloud.videointelligence.v1p1beta1.AnnotateVideoProgress;
+import com.google.cloud.videointelligence.v1p1beta1.AnnotateVideoRequest;
+import com.google.cloud.videointelligence.v1p1beta1.AnnotateVideoResponse;
+import com.google.cloud.videointelligence.v1p1beta1.Feature;
+import com.google.cloud.videointelligence.v1p1beta1.SpeechRecognitionAlternative;
+import com.google.cloud.videointelligence.v1p1beta1.SpeechTranscription;
+import com.google.cloud.videointelligence.v1p1beta1.SpeechTranscriptionConfig;
+import com.google.cloud.videointelligence.v1p1beta1.VideoAnnotationResults;
+import com.google.cloud.videointelligence.v1p1beta1.VideoContext;
+import com.google.cloud.videointelligence.v1p1beta1.VideoIntelligenceServiceClient;
+import com.google.cloud.videointelligence.v1p1beta1.WordInfo;
+import java.io.IOException;
+import java.util.concurrent.TimeUnit;
+
+public class Detect {
+ /**
+ * Detects video transcription using the Video Intelligence API
+ *
+ * @param args specifies features to detect and the path to the video on Google Cloud Storage.
+ */
+ public static void main(String[] args) {
+ try {
+ argsHelper(args);
+ } catch (Exception e) {
+ System.out.println("Exception while running:\n" + e.getMessage() + "\n");
+ e.printStackTrace(System.out);
+ }
+ }
+
+ /**
+ * Helper that handles the input passed to the program.
+ *
+ * @param args specifies features to detect and the path to the video on Google Cloud Storage.
+ * @throws IOException on Input/Output errors.
+ */
+ public static void argsHelper(String[] args) throws Exception {
+ if (args.length < 1) {
+ System.out.println("Usage:");
+ System.out.printf(
+ "\tjava %s \"\" \"\"\n"
+ + "Commands:\n"
+ + "\tspeech-transcription\n"
+ + "Path:\n\tA URI for a Cloud Storage resource (gs://...)\n"
+ + "Examples: ",
+ Detect.class.getCanonicalName());
+ return;
+ }
+ String command = args[0];
+ String path = args.length > 1 ? args[1] : "";
+
+ if (command.equals("speech-transcription")) {
+ speechTranscription(path);
+ }
+ }
+
+ // [START video_speech_transcription_gcs_beta]
+ /**
+ * Transcribe speech from a video stored on GCS.
+ *
+ * @param gcsUri the path to the video file to analyze.
+ */
+ public static void speechTranscription(String gcsUri) throws Exception {
+ // Instantiate a com.google.cloud.videointelligence.v1p1beta1.VideoIntelligenceServiceClient
+ try (VideoIntelligenceServiceClient client = VideoIntelligenceServiceClient.create()) {
+ // Set the language code
+ SpeechTranscriptionConfig config =
+ SpeechTranscriptionConfig.newBuilder()
+ .setLanguageCode("en-US")
+ .setEnableAutomaticPunctuation(true)
+ .build();
+
+ // Set the video context with the above configuration
+ VideoContext context = VideoContext.newBuilder().setSpeechTranscriptionConfig(config).build();
+
+ // Create the request
+ AnnotateVideoRequest request =
+ AnnotateVideoRequest.newBuilder()
+ .setInputUri(gcsUri)
+ .addFeatures(Feature.SPEECH_TRANSCRIPTION)
+ .setVideoContext(context)
+ .build();
+
+ // asynchronously perform speech transcription on videos
+ OperationFuture response =
+ client.annotateVideoAsync(request);
+
+ System.out.println("Waiting for operation to complete...");
+ // Display the results
+ for (VideoAnnotationResults results :
+ response.get(300, TimeUnit.SECONDS).getAnnotationResultsList()) {
+ for (SpeechTranscription speechTranscription : results.getSpeechTranscriptionsList()) {
+ try {
+ // Print the transcription
+ if (speechTranscription.getAlternativesCount() > 0) {
+ SpeechRecognitionAlternative alternative = speechTranscription.getAlternatives(0);
+
+ System.out.printf("Transcript: %s\n", alternative.getTranscript());
+ System.out.printf("Confidence: %.2f\n", alternative.getConfidence());
+
+ System.out.println("Word level information:");
+ for (WordInfo wordInfo : alternative.getWordsList()) {
+ double startTime =
+ wordInfo.getStartTime().getSeconds() + wordInfo.getStartTime().getNanos() / 1e9;
+ double endTime =
+ wordInfo.getEndTime().getSeconds() + wordInfo.getEndTime().getNanos() / 1e9;
+ System.out.printf(
+ "\t%4.2fs - %4.2fs: %s\n", startTime, endTime, wordInfo.getWord());
+ }
+ } else {
+ System.out.println("No transcription found");
+ }
+ } catch (IndexOutOfBoundsException ioe) {
+ System.out.println("Could not retrieve frame: " + ioe.getMessage());
+ }
+ }
+ }
+ }
+ }
+ // [END video_speech_transcription_gcs_beta]
+}
diff --git a/video/src/main/java/beta/video/DetectLogo.java b/video/src/main/java/beta/video/DetectLogo.java
new file mode 100644
index 00000000000..4df53415318
--- /dev/null
+++ b/video/src/main/java/beta/video/DetectLogo.java
@@ -0,0 +1,146 @@
+/*
+ * Copyright 2020 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package beta.video;
+
+// [START video_detect_logo_beta]
+
+import com.google.cloud.videointelligence.v1p3beta1.AnnotateVideoRequest;
+import com.google.cloud.videointelligence.v1p3beta1.AnnotateVideoResponse;
+import com.google.cloud.videointelligence.v1p3beta1.DetectedAttribute;
+import com.google.cloud.videointelligence.v1p3beta1.Entity;
+import com.google.cloud.videointelligence.v1p3beta1.Feature;
+import com.google.cloud.videointelligence.v1p3beta1.LogoRecognitionAnnotation;
+import com.google.cloud.videointelligence.v1p3beta1.NormalizedBoundingBox;
+import com.google.cloud.videointelligence.v1p3beta1.TimestampedObject;
+import com.google.cloud.videointelligence.v1p3beta1.Track;
+import com.google.cloud.videointelligence.v1p3beta1.VideoAnnotationResults;
+import com.google.cloud.videointelligence.v1p3beta1.VideoIntelligenceServiceClient;
+import com.google.cloud.videointelligence.v1p3beta1.VideoSegment;
+import com.google.protobuf.ByteString;
+import com.google.protobuf.Duration;
+import java.io.IOException;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.util.concurrent.ExecutionException;
+
+public class DetectLogo {
+
+ public void detectLogo() throws IOException, ExecutionException, InterruptedException {
+ String filePath = "path/to/your/video.mp4";
+ detectLogo(filePath);
+ }
+
+ public static void detectLogo(String localFilePath)
+ throws IOException, ExecutionException, InterruptedException {
+ // Initialize client that will be used to send requests. This client only needs to be created
+ // once, and can be reused for multiple requests. After completing all of your requests, call
+ // the "close" method on the client to safely clean up any remaining background resources.
+ try (VideoIntelligenceServiceClient client = VideoIntelligenceServiceClient.create()) {
+ // Read the files contents
+ Path path = Paths.get(localFilePath);
+ byte[] data = Files.readAllBytes(path);
+ ByteString inputContent = ByteString.copyFrom(data);
+
+ // Build the request with the inputContent and set the Feature
+ AnnotateVideoRequest request =
+ AnnotateVideoRequest.newBuilder()
+ .setInputContent(inputContent)
+ .addFeatures(Feature.LOGO_RECOGNITION)
+ .build();
+
+ // Make the asynchronous request
+ AnnotateVideoResponse response = client.annotateVideoAsync(request).get();
+
+ // Get the first response, since we sent only one video.
+ VideoAnnotationResults annotationResult = response.getAnnotationResultsList().get(0);
+
+ // Annotations for list of logos detected, tracked and recognized in the video.
+ for (LogoRecognitionAnnotation logoRecognitionAnnotation :
+ annotationResult.getLogoRecognitionAnnotationsList()) {
+
+ Entity entity = logoRecognitionAnnotation.getEntity();
+ // Opaque entity ID. Some IDs may be available in [Google Knowledge Graph Search
+ // API](https://developers.google.com/knowledge-graph/).
+ System.out.printf("Entity Id: %s\n", entity.getEntityId());
+ System.out.printf("Description: %s\n", entity.getDescription());
+
+ // All logo tracks where the recognized logo appears. Each track corresponds to one logo
+ // instance appearing in consecutive frames.
+ for (Track track : logoRecognitionAnnotation.getTracksList()) {
+
+ // Video segment of a track.
+ VideoSegment segment = track.getSegment();
+ Duration segmentStartTimeOffset = segment.getStartTimeOffset();
+ System.out.printf(
+ "\n\tStart Time Offset: %s.%s\n",
+ segmentStartTimeOffset.getSeconds(), segmentStartTimeOffset.getNanos());
+ Duration segmentEndTimeOffset = segment.getEndTimeOffset();
+ System.out.printf(
+ "\tEnd Time Offset: %s.%s\n",
+ segmentEndTimeOffset.getSeconds(), segmentEndTimeOffset.getNanos());
+ System.out.printf("\tConfidence: %s\n", track.getConfidence());
+
+ // The object with timestamp and attributes per frame in the track.
+ for (TimestampedObject timestampedObject : track.getTimestampedObjectsList()) {
+
+ // Normalized Bounding box in a frame, where the object is located.
+ NormalizedBoundingBox normalizedBoundingBox =
+ timestampedObject.getNormalizedBoundingBox();
+ System.out.printf("\n\t\tLeft: %s\n", normalizedBoundingBox.getLeft());
+ System.out.printf("\t\tTop: %s\n", normalizedBoundingBox.getTop());
+ System.out.printf("\t\tRight: %s\n", normalizedBoundingBox.getRight());
+ System.out.printf("\t\tBottom: %s\n", normalizedBoundingBox.getBottom());
+
+ // Optional. The attributes of the object in the bounding box.
+ for (DetectedAttribute attribute : timestampedObject.getAttributesList()) {
+ System.out.printf("\n\t\t\tName: %s\n", attribute.getName());
+ System.out.printf("\t\t\tConfidence: %s\n", attribute.getConfidence());
+ System.out.printf("\t\t\tValue: %s\n", attribute.getValue());
+ }
+ }
+
+ // Optional. Attributes in the track level.
+ for (DetectedAttribute trackAttribute : track.getAttributesList()) {
+ System.out.printf("\n\t\tName : %s\n", trackAttribute.getName());
+ System.out.printf("\t\tConfidence : %s\n", trackAttribute.getConfidence());
+ System.out.printf("\t\tValue : %s\n", trackAttribute.getValue());
+ }
+ }
+
+ // All video segments where the recognized logo appears. There might be multiple instances
+ // of the same logo class appearing in one VideoSegment.
+ for (VideoSegment logoRecognitionAnnotationSegment :
+ logoRecognitionAnnotation.getSegmentsList()) {
+ Duration logoRecognitionAnnotationSegmentStartTimeOffset =
+ logoRecognitionAnnotationSegment.getStartTimeOffset();
+ System.out.printf(
+ "\n\tStart Time Offset : %s.%s\n",
+ logoRecognitionAnnotationSegmentStartTimeOffset.getSeconds(),
+ logoRecognitionAnnotationSegmentStartTimeOffset.getNanos());
+ Duration logoRecognitionAnnotationSegmentEndTimeOffset =
+ logoRecognitionAnnotationSegment.getEndTimeOffset();
+ System.out.printf(
+ "\tEnd Time Offset : %s.%s\n",
+ logoRecognitionAnnotationSegmentEndTimeOffset.getSeconds(),
+ logoRecognitionAnnotationSegmentEndTimeOffset.getNanos());
+ }
+ }
+ }
+ }
+}
+// [END video_detect_logo_beta]
diff --git a/video/src/main/java/beta/video/DetectLogoGcs.java b/video/src/main/java/beta/video/DetectLogoGcs.java
new file mode 100644
index 00000000000..ee8fd0e5ce7
--- /dev/null
+++ b/video/src/main/java/beta/video/DetectLogoGcs.java
@@ -0,0 +1,137 @@
+/*
+ * Copyright 2020 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package beta.video;
+
+// [START video_detect_logo_gcs_beta]
+
+import com.google.cloud.videointelligence.v1p3beta1.AnnotateVideoRequest;
+import com.google.cloud.videointelligence.v1p3beta1.AnnotateVideoResponse;
+import com.google.cloud.videointelligence.v1p3beta1.DetectedAttribute;
+import com.google.cloud.videointelligence.v1p3beta1.Entity;
+import com.google.cloud.videointelligence.v1p3beta1.Feature;
+import com.google.cloud.videointelligence.v1p3beta1.LogoRecognitionAnnotation;
+import com.google.cloud.videointelligence.v1p3beta1.NormalizedBoundingBox;
+import com.google.cloud.videointelligence.v1p3beta1.TimestampedObject;
+import com.google.cloud.videointelligence.v1p3beta1.Track;
+import com.google.cloud.videointelligence.v1p3beta1.VideoAnnotationResults;
+import com.google.cloud.videointelligence.v1p3beta1.VideoIntelligenceServiceClient;
+import com.google.cloud.videointelligence.v1p3beta1.VideoSegment;
+import com.google.protobuf.Duration;
+import java.io.IOException;
+import java.util.concurrent.ExecutionException;
+
+public class DetectLogoGcs {
+
+ public void detectLogo() throws IOException, ExecutionException, InterruptedException {
+ String inputUri = "gs://cloud-samples-data/video/googlework_short.mp4";
+ detectLogoGcs(inputUri);
+ }
+
+ public static void detectLogoGcs(String inputUri)
+ throws IOException, ExecutionException, InterruptedException {
+ // Initialize client that will be used to send requests. This client only needs to be created
+ // once, and can be reused for multiple requests. After completing all of your requests, call
+ // the "close" method on the client to safely clean up any remaining background resources.
+ try (VideoIntelligenceServiceClient client = VideoIntelligenceServiceClient.create()) {
+ // Build the request with the inputUri and set the Feature
+ AnnotateVideoRequest request =
+ AnnotateVideoRequest.newBuilder()
+ .setInputUri(inputUri)
+ .addFeatures(Feature.LOGO_RECOGNITION)
+ .build();
+
+ // Make the asynchronous request
+ AnnotateVideoResponse response = client.annotateVideoAsync(request).get();
+
+ // Get the first response, since we sent only one video.
+ VideoAnnotationResults annotationResult = response.getAnnotationResultsList().get(0);
+
+ // Annotations for list of logos detected, tracked and recognized in the video.
+ for (LogoRecognitionAnnotation logoRecognitionAnnotation :
+ annotationResult.getLogoRecognitionAnnotationsList()) {
+
+ Entity entity = logoRecognitionAnnotation.getEntity();
+ // Opaque entity ID. Some IDs may be available in [Google Knowledge Graph Search
+ // API](https://developers.google.com/knowledge-graph/).
+ System.out.printf("Entity Id: %s\n", entity.getEntityId());
+ System.out.printf("Description: %s\n", entity.getDescription());
+
+ // All logo tracks where the recognized logo appears. Each track corresponds to one logo
+ // instance appearing in consecutive frames.
+ for (Track track : logoRecognitionAnnotation.getTracksList()) {
+
+ // Video segment of a track.
+ VideoSegment segment = track.getSegment();
+ Duration segmentStartTimeOffset = segment.getStartTimeOffset();
+ System.out.printf(
+ "\n\tStart Time Offset: %s.%s\n",
+ segmentStartTimeOffset.getSeconds(), segmentStartTimeOffset.getNanos());
+ Duration segmentEndTimeOffset = segment.getEndTimeOffset();
+ System.out.printf(
+ "\tEnd Time Offset: %s.%s\n",
+ segmentEndTimeOffset.getSeconds(), segmentEndTimeOffset.getNanos());
+ System.out.printf("\tConfidence: %s\n", track.getConfidence());
+
+ // The object with timestamp and attributes per frame in the track.
+ for (TimestampedObject timestampedObject : track.getTimestampedObjectsList()) {
+
+ // Normalized Bounding box in a frame, where the object is located.
+ NormalizedBoundingBox normalizedBoundingBox =
+ timestampedObject.getNormalizedBoundingBox();
+ System.out.printf("\n\t\tLeft: %s\n", normalizedBoundingBox.getLeft());
+ System.out.printf("\t\tTop: %s\n", normalizedBoundingBox.getTop());
+ System.out.printf("\t\tRight: %s\n", normalizedBoundingBox.getRight());
+ System.out.printf("\t\tBottom: %s\n", normalizedBoundingBox.getBottom());
+
+ // Optional. The attributes of the object in the bounding box.
+ for (DetectedAttribute attribute : timestampedObject.getAttributesList()) {
+ System.out.printf("\n\t\t\tName: %s\n", attribute.getName());
+ System.out.printf("\t\t\tConfidence: %s\n", attribute.getConfidence());
+ System.out.printf("\t\t\tValue: %s\n", attribute.getValue());
+ }
+ }
+
+ // Optional. Attributes in the track level.
+ for (DetectedAttribute trackAttribute : track.getAttributesList()) {
+ System.out.printf("\n\t\tName : %s\n", trackAttribute.getName());
+ System.out.printf("\t\tConfidence : %s\n", trackAttribute.getConfidence());
+ System.out.printf("\t\tValue : %s\n", trackAttribute.getValue());
+ }
+ }
+
+ // All video segments where the recognized logo appears. There might be multiple instances
+ // of the same logo class appearing in one VideoSegment.
+ for (VideoSegment logoRecognitionAnnotationSegment :
+ logoRecognitionAnnotation.getSegmentsList()) {
+ Duration logoRecognitionAnnotationSegmentStartTimeOffset =
+ logoRecognitionAnnotationSegment.getStartTimeOffset();
+ System.out.printf(
+ "\n\tStart Time Offset : %s.%s\n",
+ logoRecognitionAnnotationSegmentStartTimeOffset.getSeconds(),
+ logoRecognitionAnnotationSegmentStartTimeOffset.getNanos());
+ Duration logoRecognitionAnnotationSegmentEndTimeOffset =
+ logoRecognitionAnnotationSegment.getEndTimeOffset();
+ System.out.printf(
+ "\tEnd Time Offset : %s.%s\n",
+ logoRecognitionAnnotationSegmentEndTimeOffset.getSeconds(),
+ logoRecognitionAnnotationSegmentEndTimeOffset.getNanos());
+ }
+ }
+ }
+ }
+}
+// [END video_detect_logo_gcs_beta]
diff --git a/video/src/main/java/beta/video/StreamingAnnotationToStorage.java b/video/src/main/java/beta/video/StreamingAnnotationToStorage.java
new file mode 100644
index 00000000000..ab4834f5b50
--- /dev/null
+++ b/video/src/main/java/beta/video/StreamingAnnotationToStorage.java
@@ -0,0 +1,98 @@
+/*
+ * Copyright 2019 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package beta.video;
+
+// [START video_streaming_annotation_to_storage_beta]
+
+import com.google.api.gax.rpc.BidiStream;
+import com.google.cloud.videointelligence.v1p3beta1.StreamingAnnotateVideoRequest;
+import com.google.cloud.videointelligence.v1p3beta1.StreamingAnnotateVideoResponse;
+import com.google.cloud.videointelligence.v1p3beta1.StreamingFeature;
+import com.google.cloud.videointelligence.v1p3beta1.StreamingLabelDetectionConfig;
+import com.google.cloud.videointelligence.v1p3beta1.StreamingStorageConfig;
+import com.google.cloud.videointelligence.v1p3beta1.StreamingVideoConfig;
+import com.google.cloud.videointelligence.v1p3beta1.StreamingVideoIntelligenceServiceClient;
+import com.google.protobuf.ByteString;
+import io.grpc.StatusRuntimeException;
+import java.io.IOException;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.util.Arrays;
+import java.util.concurrent.TimeoutException;
+
+public class StreamingAnnotationToStorage {
+
+ // Perform streaming video detection for explicit content
+ static void streamingAnnotationToStorage(String filePath, String gcsUri)
+ throws IOException, TimeoutException, StatusRuntimeException {
+ // String filePath = "path_to_your_video_file";
+ // String gcsUri = "gs://BUCKET_ID";
+
+ try (StreamingVideoIntelligenceServiceClient client =
+ StreamingVideoIntelligenceServiceClient.create()) {
+
+ Path path = Paths.get(filePath);
+ byte[] data = Files.readAllBytes(path);
+ // Set the chunk size to 5MB (recommended less than 10MB).
+ int chunkSize = 5 * 1024 * 1024;
+ int numChunks = (int) Math.ceil((double) data.length / chunkSize);
+
+ StreamingStorageConfig streamingStorageConfig =
+ StreamingStorageConfig.newBuilder()
+ .setEnableStorageAnnotationResult(true)
+ .setAnnotationResultStorageDirectory(gcsUri)
+ .build();
+
+ StreamingLabelDetectionConfig labelConfig =
+ StreamingLabelDetectionConfig.newBuilder().setStationaryCamera(false).build();
+
+ StreamingVideoConfig streamingVideoConfig =
+ StreamingVideoConfig.newBuilder()
+ .setFeature(StreamingFeature.STREAMING_LABEL_DETECTION)
+ .setLabelDetectionConfig(labelConfig)
+ .setStorageConfig(streamingStorageConfig)
+ .build();
+
+ BidiStream call =
+ client.streamingAnnotateVideoCallable().call();
+
+ // The first request must **only** contain the audio configuration:
+ call.send(
+ StreamingAnnotateVideoRequest.newBuilder().setVideoConfig(streamingVideoConfig).build());
+
+ // Subsequent requests must **only** contain the audio data.
+ // Send the requests in chunks
+ for (int i = 0; i < numChunks; i++) {
+ call.send(
+ StreamingAnnotateVideoRequest.newBuilder()
+ .setInputContent(
+ ByteString.copyFrom(
+ Arrays.copyOfRange(data, i * chunkSize, i * chunkSize + chunkSize)))
+ .build());
+ }
+
+ // Tell the service you are done sending data
+ call.closeSend();
+
+ for (StreamingAnnotateVideoResponse response : call) {
+ System.out.format("Storage Uri: %s\n", response.getAnnotationResultsUri());
+ }
+ }
+ }
+}
+// [END video_streaming_annotation_to_storage_beta]
diff --git a/video/src/main/java/beta/video/StreamingAutoMlActionRecognition.java b/video/src/main/java/beta/video/StreamingAutoMlActionRecognition.java
new file mode 100644
index 00000000000..c06ff324cc0
--- /dev/null
+++ b/video/src/main/java/beta/video/StreamingAutoMlActionRecognition.java
@@ -0,0 +1,114 @@
+/*
+ * Copyright 2019 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package beta.video;
+
+// [START video_streaming_automl_action_recognition_beta]
+
+import com.google.api.gax.rpc.BidiStream;
+import com.google.cloud.videointelligence.v1p3beta1.LabelAnnotation;
+import com.google.cloud.videointelligence.v1p3beta1.LabelFrame;
+import com.google.cloud.videointelligence.v1p3beta1.StreamingAnnotateVideoRequest;
+import com.google.cloud.videointelligence.v1p3beta1.StreamingAnnotateVideoResponse;
+import com.google.cloud.videointelligence.v1p3beta1.StreamingAutomlActionRecognitionConfig;
+import com.google.cloud.videointelligence.v1p3beta1.StreamingFeature;
+import com.google.cloud.videointelligence.v1p3beta1.StreamingVideoAnnotationResults;
+import com.google.cloud.videointelligence.v1p3beta1.StreamingVideoConfig;
+import com.google.cloud.videointelligence.v1p3beta1.StreamingVideoIntelligenceServiceClient;
+import com.google.protobuf.ByteString;
+import io.grpc.StatusRuntimeException;
+import java.io.IOException;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.util.Arrays;
+import java.util.concurrent.TimeoutException;
+
+class StreamingAutoMlActionRecognition {
+
+ // Perform streaming video action recognition
+ static void streamingAutoMlActionRecognition(String filePath, String projectId, String modelId)
+ throws IOException, TimeoutException, StatusRuntimeException {
+
+ try (StreamingVideoIntelligenceServiceClient client =
+ StreamingVideoIntelligenceServiceClient.create()) {
+
+ Path path = Paths.get(filePath);
+ byte[] data = Files.readAllBytes(path);
+ // Set the chunk size to 5MB (recommended less than 10MB).
+ int chunkSize = 5 * 1024 * 1024;
+ int numChunks = (int) Math.ceil((double) data.length / chunkSize);
+
+ String modelPath =
+ String.format("projects/%s/locations/us-central1/models/%s", projectId, modelId);
+
+ System.out.println(modelPath);
+
+ StreamingAutomlActionRecognitionConfig streamingAutomlActionRecognitionConfig =
+ StreamingAutomlActionRecognitionConfig.newBuilder().setModelName(modelPath).build();
+
+ StreamingVideoConfig streamingVideoConfig =
+ StreamingVideoConfig.newBuilder()
+ .setFeature(StreamingFeature.STREAMING_AUTOML_ACTION_RECOGNITION)
+ .setAutomlActionRecognitionConfig(streamingAutomlActionRecognitionConfig)
+ .build();
+
+ BidiStream call =
+ client.streamingAnnotateVideoCallable().call();
+
+ // The first request must **only** contain the video configuration:
+ call.send(
+ StreamingAnnotateVideoRequest.newBuilder().setVideoConfig(streamingVideoConfig).build());
+
+ // Subsequent requests must **only** contain the video data.
+ // Send the requests in chunks
+ for (int i = 0; i < numChunks; i++) {
+ call.send(
+ StreamingAnnotateVideoRequest.newBuilder()
+ .setInputContent(
+ ByteString.copyFrom(
+ Arrays.copyOfRange(data, i * chunkSize, i * chunkSize + chunkSize)))
+ .build());
+ }
+
+ // Tell the service you are done sending data
+ call.closeSend();
+
+ for (StreamingAnnotateVideoResponse response : call) {
+ if (response.hasError()) {
+ System.out.println(response.getError().getMessage());
+ break;
+ }
+
+ StreamingVideoAnnotationResults annotationResults = response.getAnnotationResults();
+
+ for (LabelAnnotation annotation : annotationResults.getLabelAnnotationsList()) {
+ String entity = annotation.getEntity().getDescription();
+
+ // There is only one frame per annotation
+ LabelFrame labelFrame = annotation.getFrames(0);
+ double offset =
+ labelFrame.getTimeOffset().getSeconds() + labelFrame.getTimeOffset().getNanos() / 1e9;
+ float confidence = labelFrame.getConfidence();
+
+ System.out.format("At %fs segment: %s (%f)\n", offset, entity, confidence);
+ }
+ }
+ System.out.println("Video streamed successfully.");
+ }
+ }
+}
+// [END video_streaming_automl_action_recognition_beta]
diff --git a/video/src/main/java/beta/video/StreamingAutoMlClassification.java b/video/src/main/java/beta/video/StreamingAutoMlClassification.java
new file mode 100644
index 00000000000..5a01c581505
--- /dev/null
+++ b/video/src/main/java/beta/video/StreamingAutoMlClassification.java
@@ -0,0 +1,117 @@
+/*
+ * Copyright 2019 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package beta.video;
+
+// [START video_streaming_automl_classification_beta]
+
+import com.google.api.gax.rpc.BidiStream;
+import com.google.cloud.videointelligence.v1p3beta1.LabelAnnotation;
+import com.google.cloud.videointelligence.v1p3beta1.LabelFrame;
+import com.google.cloud.videointelligence.v1p3beta1.StreamingAnnotateVideoRequest;
+import com.google.cloud.videointelligence.v1p3beta1.StreamingAnnotateVideoResponse;
+import com.google.cloud.videointelligence.v1p3beta1.StreamingAutomlClassificationConfig;
+import com.google.cloud.videointelligence.v1p3beta1.StreamingFeature;
+import com.google.cloud.videointelligence.v1p3beta1.StreamingVideoAnnotationResults;
+import com.google.cloud.videointelligence.v1p3beta1.StreamingVideoConfig;
+import com.google.cloud.videointelligence.v1p3beta1.StreamingVideoIntelligenceServiceClient;
+import com.google.protobuf.ByteString;
+import io.grpc.StatusRuntimeException;
+import java.io.IOException;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.util.Arrays;
+import java.util.concurrent.TimeoutException;
+
+class StreamingAutoMlClassification {
+
+ // Perform streaming video classification with an AutoML Model
+ static void streamingAutoMlClassification(String filePath, String projectId, String modelId)
+ throws TimeoutException, StatusRuntimeException, IOException {
+ // String filePath = "path_to_your_video_file";
+ // String projectId = "YOUR_GCP_PROJECT_ID";
+ // String modelId = "YOUR_AUTO_ML_CLASSIFICATION_MODEL_ID";
+
+ try (StreamingVideoIntelligenceServiceClient client =
+ StreamingVideoIntelligenceServiceClient.create()) {
+
+ Path path = Paths.get(filePath);
+ byte[] data = Files.readAllBytes(path);
+ // Set the chunk size to 5MB (recommended less than 10MB).
+ int chunkSize = 5 * 1024 * 1024;
+ int numChunks = (int) Math.ceil((double) data.length / chunkSize);
+
+ String modelPath =
+ String.format("projects/%s/locations/us-central1/models/%s", projectId, modelId);
+
+ System.out.println(modelPath);
+
+ StreamingAutomlClassificationConfig streamingAutomlClassificationConfig =
+ StreamingAutomlClassificationConfig.newBuilder().setModelName(modelPath).build();
+
+ StreamingVideoConfig streamingVideoConfig =
+ StreamingVideoConfig.newBuilder()
+ .setFeature(StreamingFeature.STREAMING_AUTOML_CLASSIFICATION)
+ .setAutomlClassificationConfig(streamingAutomlClassificationConfig)
+ .build();
+
+ BidiStream call =
+ client.streamingAnnotateVideoCallable().call();
+
+ // The first request must **only** contain the audio configuration:
+ call.send(
+ StreamingAnnotateVideoRequest.newBuilder().setVideoConfig(streamingVideoConfig).build());
+
+ // Subsequent requests must **only** contain the audio data.
+ // Send the requests in chunks
+ for (int i = 0; i < numChunks; i++) {
+ call.send(
+ StreamingAnnotateVideoRequest.newBuilder()
+ .setInputContent(
+ ByteString.copyFrom(
+ Arrays.copyOfRange(data, i * chunkSize, i * chunkSize + chunkSize)))
+ .build());
+ }
+
+ // Tell the service you are done sending data
+ call.closeSend();
+
+ for (StreamingAnnotateVideoResponse response : call) {
+ if (response.hasError()) {
+ System.out.println(response.getError().getMessage());
+ break;
+ }
+
+ StreamingVideoAnnotationResults annotationResults = response.getAnnotationResults();
+
+ for (LabelAnnotation annotation : annotationResults.getLabelAnnotationsList()) {
+ String entity = annotation.getEntity().getDescription();
+
+ // There is only one frame per annotation
+ LabelFrame labelFrame = annotation.getFrames(0);
+ double offset =
+ labelFrame.getTimeOffset().getSeconds() + labelFrame.getTimeOffset().getNanos() / 1e9;
+ float confidence = labelFrame.getConfidence();
+
+ System.out.format("At %fs segment: %s (%f)\n", offset, entity, confidence);
+ }
+ }
+ System.out.println("Video streamed successfully.");
+ }
+ }
+}
+// [END video_streaming_automl_classification_beta]
diff --git a/video/src/main/java/beta/video/StreamingAutoMlObjectTracking.java b/video/src/main/java/beta/video/StreamingAutoMlObjectTracking.java
new file mode 100644
index 00000000000..7258c341b76
--- /dev/null
+++ b/video/src/main/java/beta/video/StreamingAutoMlObjectTracking.java
@@ -0,0 +1,123 @@
+/*
+ * Copyright 2021 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package beta.video;
+
+// [START video_streaming_automl_object_tracking_beta]
+
+import com.google.api.gax.rpc.BidiStream;
+import com.google.cloud.videointelligence.v1p3beta1.ObjectTrackingAnnotation;
+import com.google.cloud.videointelligence.v1p3beta1.ObjectTrackingFrame;
+import com.google.cloud.videointelligence.v1p3beta1.StreamingAnnotateVideoRequest;
+import com.google.cloud.videointelligence.v1p3beta1.StreamingAnnotateVideoResponse;
+import com.google.cloud.videointelligence.v1p3beta1.StreamingAutomlObjectTrackingConfig;
+import com.google.cloud.videointelligence.v1p3beta1.StreamingFeature;
+import com.google.cloud.videointelligence.v1p3beta1.StreamingVideoAnnotationResults;
+import com.google.cloud.videointelligence.v1p3beta1.StreamingVideoConfig;
+import com.google.cloud.videointelligence.v1p3beta1.StreamingVideoIntelligenceServiceClient;
+import com.google.protobuf.ByteString;
+import io.grpc.StatusRuntimeException;
+import java.io.IOException;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.util.Arrays;
+
+class StreamingAutoMlObjectTracking {
+
+ public static void main(String[] args) throws IOException {
+ // TODO(developer): Replace these variables before running the sample.
+ String filePath = "YOUR_VIDEO_FILE";
+ String projectId = "YOUR_PROJECT_ID";
+ String modelId = "YOUR_AUTOML_OBJECT_TRACKING_MODEL_ID";
+ streamingAutoMlObjectTracking(filePath, projectId, modelId);
+ }
+
+ // Perform streaming video object tracking with an AutoML Model
+ static void streamingAutoMlObjectTracking(String filePath, String projectId, String modelId)
+ throws StatusRuntimeException, IOException {
+
+ try (StreamingVideoIntelligenceServiceClient client =
+ StreamingVideoIntelligenceServiceClient.create()) {
+
+ Path path = Paths.get(filePath);
+ byte[] data = Files.readAllBytes(path);
+ // Set the chunk size to 5MB (recommended less than 10MB).
+ int chunkSize = 5 * 1024 * 1024;
+ int numChunks = (int) Math.ceil((double) data.length / chunkSize);
+
+ String modelPath =
+ String.format("projects/%s/locations/us-central1/models/%s", projectId, modelId);
+
+ StreamingAutomlObjectTrackingConfig streamingAutomlObjectTrackingConfig =
+ StreamingAutomlObjectTrackingConfig.newBuilder().setModelName(modelPath).build();
+
+ StreamingVideoConfig streamingVideoConfig =
+ StreamingVideoConfig.newBuilder()
+ .setFeature(StreamingFeature.STREAMING_AUTOML_OBJECT_TRACKING)
+ .setAutomlObjectTrackingConfig(streamingAutomlObjectTrackingConfig)
+ .build();
+
+ BidiStream call =
+ client.streamingAnnotateVideoCallable().call();
+
+ // The first request must **only** contain the audio configuration:
+ call.send(
+ StreamingAnnotateVideoRequest.newBuilder().setVideoConfig(streamingVideoConfig).build());
+
+ // Subsequent requests must **only** contain the audio data.
+ // Send the requests in chunks
+ for (int i = 0; i < numChunks; i++) {
+ call.send(
+ StreamingAnnotateVideoRequest.newBuilder()
+ .setInputContent(
+ ByteString.copyFrom(
+ Arrays.copyOfRange(data, i * chunkSize, i * chunkSize + chunkSize)))
+ .build());
+ }
+
+ // Tell the service you are done sending data
+ call.closeSend();
+
+ for (StreamingAnnotateVideoResponse response : call) {
+ StreamingVideoAnnotationResults annotationResults = response.getAnnotationResults();
+
+ for (ObjectTrackingAnnotation objectAnnotations :
+ annotationResults.getObjectAnnotationsList()) {
+
+ String entity = objectAnnotations.getEntity().getDescription();
+ float confidence = objectAnnotations.getConfidence();
+ long trackId = objectAnnotations.getTrackId();
+ System.out.format("%s: %f (ID: %d)\n", entity, confidence, trackId);
+
+ // In streaming, there is always one frame.
+ ObjectTrackingFrame frame = objectAnnotations.getFrames(0);
+ double offset =
+ frame.getTimeOffset().getSeconds() + frame.getTimeOffset().getNanos() / 1e9;
+ System.out.format("Offset: %f\n", offset);
+
+ System.out.println("Bounding Box:");
+ System.out.format("\tLeft: %f\n", frame.getNormalizedBoundingBox().getLeft());
+ System.out.format("\tTop: %f\n", frame.getNormalizedBoundingBox().getTop());
+ System.out.format("\tRight: %f\n", frame.getNormalizedBoundingBox().getRight());
+ System.out.format("\tBottom: %f\n", frame.getNormalizedBoundingBox().getBottom());
+ }
+ }
+ System.out.println("Video streamed successfully.");
+ }
+ }
+}
+// [END video_streaming_automl_object_tracking_beta]
diff --git a/video/src/main/java/beta/video/StreamingExplicitContentDetection.java b/video/src/main/java/beta/video/StreamingExplicitContentDetection.java
new file mode 100644
index 00000000000..2ce1c2d30f7
--- /dev/null
+++ b/video/src/main/java/beta/video/StreamingExplicitContentDetection.java
@@ -0,0 +1,101 @@
+/*
+ * Copyright 2019 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package beta.video;
+
+// [START video_streaming_explicit_content_detection_beta]
+
+import com.google.api.gax.rpc.BidiStream;
+import com.google.cloud.videointelligence.v1p3beta1.ExplicitContentFrame;
+import com.google.cloud.videointelligence.v1p3beta1.StreamingAnnotateVideoRequest;
+import com.google.cloud.videointelligence.v1p3beta1.StreamingAnnotateVideoResponse;
+import com.google.cloud.videointelligence.v1p3beta1.StreamingFeature;
+import com.google.cloud.videointelligence.v1p3beta1.StreamingLabelDetectionConfig;
+import com.google.cloud.videointelligence.v1p3beta1.StreamingVideoAnnotationResults;
+import com.google.cloud.videointelligence.v1p3beta1.StreamingVideoConfig;
+import com.google.cloud.videointelligence.v1p3beta1.StreamingVideoIntelligenceServiceClient;
+import com.google.protobuf.ByteString;
+import io.grpc.StatusRuntimeException;
+import java.io.IOException;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.util.Arrays;
+import java.util.concurrent.TimeoutException;
+
+class StreamingExplicitContentDetection {
+
+ // Perform streaming video detection for explicit content
+ static void streamingExplicitContentDetection(String filePath)
+ throws IOException, TimeoutException, StatusRuntimeException {
+ // String filePath = "path_to_your_video_file";
+
+ try (StreamingVideoIntelligenceServiceClient client =
+ StreamingVideoIntelligenceServiceClient.create()) {
+
+ Path path = Paths.get(filePath);
+ byte[] data = Files.readAllBytes(path);
+ // Set the chunk size to 5MB (recommended less than 10MB).
+ int chunkSize = 5 * 1024 * 1024;
+ int numChunks = (int) Math.ceil((double) data.length / chunkSize);
+
+ StreamingLabelDetectionConfig labelConfig =
+ StreamingLabelDetectionConfig.newBuilder().setStationaryCamera(false).build();
+
+ StreamingVideoConfig streamingVideoConfig =
+ StreamingVideoConfig.newBuilder()
+ .setFeature(StreamingFeature.STREAMING_EXPLICIT_CONTENT_DETECTION)
+ .setLabelDetectionConfig(labelConfig)
+ .build();
+
+ BidiStream call =
+ client.streamingAnnotateVideoCallable().call();
+
+ // The first request must **only** contain the audio configuration:
+ call.send(
+ StreamingAnnotateVideoRequest.newBuilder().setVideoConfig(streamingVideoConfig).build());
+
+ // Subsequent requests must **only** contain the audio data.
+ // Send the requests in chunks
+ for (int i = 0; i < numChunks; i++) {
+ call.send(
+ StreamingAnnotateVideoRequest.newBuilder()
+ .setInputContent(
+ ByteString.copyFrom(
+ Arrays.copyOfRange(data, i * chunkSize, i * chunkSize + chunkSize)))
+ .build());
+ }
+
+ // Tell the service you are done sending data
+ call.closeSend();
+
+ for (StreamingAnnotateVideoResponse response : call) {
+ StreamingVideoAnnotationResults annotationResults = response.getAnnotationResults();
+
+ for (ExplicitContentFrame frame :
+ annotationResults.getExplicitAnnotation().getFramesList()) {
+
+ double offset =
+ frame.getTimeOffset().getSeconds() + frame.getTimeOffset().getNanos() / 1e9;
+
+ System.out.format("Offset: %f\n", offset);
+ System.out.format("\tPornography: %s", frame.getPornographyLikelihood());
+ }
+ }
+ }
+ }
+}
+// [END video_streaming_explicit_content_detection_beta]
diff --git a/video/src/main/java/beta/video/StreamingLabelDetection.java b/video/src/main/java/beta/video/StreamingLabelDetection.java
new file mode 100644
index 00000000000..4b26534ecd2
--- /dev/null
+++ b/video/src/main/java/beta/video/StreamingLabelDetection.java
@@ -0,0 +1,104 @@
+/*
+ * Copyright 2019 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package beta.video;
+
+// [START video_streaming_label_detection_beta]
+
+import com.google.api.gax.rpc.BidiStream;
+import com.google.cloud.videointelligence.v1p3beta1.LabelAnnotation;
+import com.google.cloud.videointelligence.v1p3beta1.LabelFrame;
+import com.google.cloud.videointelligence.v1p3beta1.StreamingAnnotateVideoRequest;
+import com.google.cloud.videointelligence.v1p3beta1.StreamingAnnotateVideoResponse;
+import com.google.cloud.videointelligence.v1p3beta1.StreamingFeature;
+import com.google.cloud.videointelligence.v1p3beta1.StreamingLabelDetectionConfig;
+import com.google.cloud.videointelligence.v1p3beta1.StreamingVideoAnnotationResults;
+import com.google.cloud.videointelligence.v1p3beta1.StreamingVideoConfig;
+import com.google.cloud.videointelligence.v1p3beta1.StreamingVideoIntelligenceServiceClient;
+import com.google.protobuf.ByteString;
+import io.grpc.StatusRuntimeException;
+import java.io.IOException;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.util.Arrays;
+import java.util.concurrent.TimeoutException;
+
+class StreamingLabelDetection {
+
+ // Perform streaming video label detection
+ static void streamingLabelDetection(String filePath)
+ throws IOException, TimeoutException, StatusRuntimeException {
+ // String filePath = "path_to_your_video_file";
+
+ try (StreamingVideoIntelligenceServiceClient client =
+ StreamingVideoIntelligenceServiceClient.create()) {
+
+ Path path = Paths.get(filePath);
+ byte[] data = Files.readAllBytes(path);
+ // Set the chunk size to 5MB (recommended less than 10MB).
+ int chunkSize = 5 * 1024 * 1024;
+ int numChunks = (int) Math.ceil((double) data.length / chunkSize);
+
+ StreamingLabelDetectionConfig labelConfig =
+ StreamingLabelDetectionConfig.newBuilder().setStationaryCamera(false).build();
+
+ StreamingVideoConfig streamingVideoConfig =
+ StreamingVideoConfig.newBuilder()
+ .setFeature(StreamingFeature.STREAMING_LABEL_DETECTION)
+ .setLabelDetectionConfig(labelConfig)
+ .build();
+
+ BidiStream call =
+ client.streamingAnnotateVideoCallable().call();
+
+ // The first request must **only** contain the audio configuration:
+ call.send(
+ StreamingAnnotateVideoRequest.newBuilder().setVideoConfig(streamingVideoConfig).build());
+
+ // Subsequent requests must **only** contain the audio data.
+ // Send the requests in chunks
+ for (int i = 0; i < numChunks; i++) {
+ call.send(
+ StreamingAnnotateVideoRequest.newBuilder()
+ .setInputContent(
+ ByteString.copyFrom(
+ Arrays.copyOfRange(data, i * chunkSize, i * chunkSize + chunkSize)))
+ .build());
+ }
+
+ // Tell the service you are done sending data
+ call.closeSend();
+
+ for (StreamingAnnotateVideoResponse response : call) {
+ StreamingVideoAnnotationResults annotationResults = response.getAnnotationResults();
+
+ for (LabelAnnotation annotation : annotationResults.getLabelAnnotationsList()) {
+ String entity = annotation.getEntity().getDescription();
+
+ // There is only one frame per annotation
+ LabelFrame labelFrame = annotation.getFrames(0);
+ double offset =
+ labelFrame.getTimeOffset().getSeconds() + labelFrame.getTimeOffset().getNanos() / 1e9;
+ float confidence = labelFrame.getConfidence();
+
+ System.out.format("%fs: %s (%f)\n", offset, entity, confidence);
+ }
+ }
+ }
+ }
+}
+// [END video_streaming_label_detection_beta]
diff --git a/video/src/main/java/beta/video/StreamingObjectTracking.java b/video/src/main/java/beta/video/StreamingObjectTracking.java
new file mode 100644
index 00000000000..0fe458f28a8
--- /dev/null
+++ b/video/src/main/java/beta/video/StreamingObjectTracking.java
@@ -0,0 +1,113 @@
+/*
+ * Copyright 2019 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package beta.video;
+
+// [START video_streaming_object_tracking_beta]
+
+import com.google.api.gax.rpc.BidiStream;
+import com.google.cloud.videointelligence.v1p3beta1.ObjectTrackingAnnotation;
+import com.google.cloud.videointelligence.v1p3beta1.ObjectTrackingFrame;
+import com.google.cloud.videointelligence.v1p3beta1.StreamingAnnotateVideoRequest;
+import com.google.cloud.videointelligence.v1p3beta1.StreamingAnnotateVideoResponse;
+import com.google.cloud.videointelligence.v1p3beta1.StreamingFeature;
+import com.google.cloud.videointelligence.v1p3beta1.StreamingLabelDetectionConfig;
+import com.google.cloud.videointelligence.v1p3beta1.StreamingVideoAnnotationResults;
+import com.google.cloud.videointelligence.v1p3beta1.StreamingVideoConfig;
+import com.google.cloud.videointelligence.v1p3beta1.StreamingVideoIntelligenceServiceClient;
+import com.google.protobuf.ByteString;
+import io.grpc.StatusRuntimeException;
+import java.io.IOException;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.util.Arrays;
+import java.util.concurrent.TimeoutException;
+
+class StreamingObjectTracking {
+
+ // Perform streaming video object tracking
+ static void streamingObjectTracking(String filePath)
+ throws IOException, TimeoutException, StatusRuntimeException {
+ // String filePath = "path_to_your_video_file";
+
+ try (StreamingVideoIntelligenceServiceClient client =
+ StreamingVideoIntelligenceServiceClient.create()) {
+
+ Path path = Paths.get(filePath);
+ byte[] data = Files.readAllBytes(path);
+ // Set the chunk size to 5MB (recommended less than 10MB).
+ int chunkSize = 5 * 1024 * 1024;
+ int numChunks = (int) Math.ceil((double) data.length / chunkSize);
+
+ StreamingLabelDetectionConfig labelConfig =
+ StreamingLabelDetectionConfig.newBuilder().setStationaryCamera(false).build();
+
+ StreamingVideoConfig streamingVideoConfig =
+ StreamingVideoConfig.newBuilder()
+ .setFeature(StreamingFeature.STREAMING_OBJECT_TRACKING)
+ .setLabelDetectionConfig(labelConfig)
+ .build();
+
+ BidiStream call =
+ client.streamingAnnotateVideoCallable().call();
+
+ // The first request must **only** contain the audio configuration:
+ call.send(
+ StreamingAnnotateVideoRequest.newBuilder().setVideoConfig(streamingVideoConfig).build());
+
+ // Subsequent requests must **only** contain the audio data.
+ // Send the requests in chunks
+ for (int i = 0; i < numChunks; i++) {
+ call.send(
+ StreamingAnnotateVideoRequest.newBuilder()
+ .setInputContent(
+ ByteString.copyFrom(
+ Arrays.copyOfRange(data, i * chunkSize, i * chunkSize + chunkSize)))
+ .build());
+ }
+
+ // Tell the service you are done sending data
+ call.closeSend();
+
+ for (StreamingAnnotateVideoResponse response : call) {
+ StreamingVideoAnnotationResults annotationResults = response.getAnnotationResults();
+
+ for (ObjectTrackingAnnotation objectAnnotations :
+ annotationResults.getObjectAnnotationsList()) {
+
+ String entity = objectAnnotations.getEntity().getDescription();
+ float confidence = objectAnnotations.getConfidence();
+ long trackId = objectAnnotations.getTrackId();
+ System.out.format("%s: %f (ID: %d)\n", entity, confidence, trackId);
+
+ // In streaming, there is always one frame.
+ ObjectTrackingFrame frame = objectAnnotations.getFrames(0);
+ double offset =
+ frame.getTimeOffset().getSeconds() + frame.getTimeOffset().getNanos() / 1e9;
+ System.out.format("Offset: %f\n", offset);
+
+ System.out.println("Bounding Box:");
+ System.out.format("\tLeft: %f\n", frame.getNormalizedBoundingBox().getLeft());
+ System.out.format("\tTop: %f\n", frame.getNormalizedBoundingBox().getTop());
+ System.out.format("\tRight: %f\n", frame.getNormalizedBoundingBox().getRight());
+ System.out.format("\tBottom: %f\n", frame.getNormalizedBoundingBox().getBottom());
+ }
+ }
+ }
+ }
+}
+// [END video_streaming_object_tracking_beta]
diff --git a/video/src/main/java/beta/video/StreamingShotChangeDetection.java b/video/src/main/java/beta/video/StreamingShotChangeDetection.java
new file mode 100644
index 00000000000..8ce99489bf7
--- /dev/null
+++ b/video/src/main/java/beta/video/StreamingShotChangeDetection.java
@@ -0,0 +1,105 @@
+/*
+ * Copyright 2019 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package beta.video;
+
+// [START video_streaming_shot_change_detection_beta]
+
+import com.google.api.gax.rpc.BidiStream;
+import com.google.cloud.videointelligence.v1p3beta1.StreamingAnnotateVideoRequest;
+import com.google.cloud.videointelligence.v1p3beta1.StreamingAnnotateVideoResponse;
+import com.google.cloud.videointelligence.v1p3beta1.StreamingFeature;
+import com.google.cloud.videointelligence.v1p3beta1.StreamingLabelDetectionConfig;
+import com.google.cloud.videointelligence.v1p3beta1.StreamingVideoAnnotationResults;
+import com.google.cloud.videointelligence.v1p3beta1.StreamingVideoConfig;
+import com.google.cloud.videointelligence.v1p3beta1.StreamingVideoIntelligenceServiceClient;
+import com.google.cloud.videointelligence.v1p3beta1.VideoSegment;
+import com.google.protobuf.ByteString;
+import io.grpc.StatusRuntimeException;
+import java.io.IOException;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.util.Arrays;
+import java.util.concurrent.TimeoutException;
+
+class StreamingShotChangeDetection {
+
+ // Perform streaming video detection for shot changes
+ static void streamingShotChangeDetection(String filePath)
+ throws IOException, TimeoutException, StatusRuntimeException {
+ // String filePath = "path_to_your_video_file";
+
+ try (StreamingVideoIntelligenceServiceClient client =
+ StreamingVideoIntelligenceServiceClient.create()) {
+
+ Path path = Paths.get(filePath);
+ byte[] data = Files.readAllBytes(path);
+ // Set the chunk size to 5MB (recommended less than 10MB).
+ int chunkSize = 5 * 1024 * 1024;
+ int numChunks = (int) Math.ceil((double) data.length / chunkSize);
+
+ StreamingLabelDetectionConfig labelConfig =
+ StreamingLabelDetectionConfig.newBuilder().setStationaryCamera(false).build();
+
+ StreamingVideoConfig streamingVideoConfig =
+ StreamingVideoConfig.newBuilder()
+ .setFeature(StreamingFeature.STREAMING_SHOT_CHANGE_DETECTION)
+ .setLabelDetectionConfig(labelConfig)
+ .build();
+
+ BidiStream call =
+ client.streamingAnnotateVideoCallable().call();
+
+ // The first request must **only** contain the audio configuration:
+ call.send(
+ StreamingAnnotateVideoRequest.newBuilder().setVideoConfig(streamingVideoConfig).build());
+
+ // Subsequent requests must **only** contain the audio data.
+ // Send the requests in chunks
+ for (int i = 0; i < numChunks; i++) {
+ call.send(
+ StreamingAnnotateVideoRequest.newBuilder()
+ .setInputContent(
+ ByteString.copyFrom(
+ Arrays.copyOfRange(data, i * chunkSize, i * chunkSize + chunkSize)))
+ .build());
+ }
+
+ // Tell the service you are done sending data
+ call.closeSend();
+
+ for (StreamingAnnotateVideoResponse response : call) {
+ StreamingVideoAnnotationResults annotationResults = response.getAnnotationResults();
+ if (response.hasError()) {
+ System.out.println(response.getError().getMessage());
+ System.out.format(
+ "Error was occured with the following status: %s\n", response.getError());
+ }
+ for (VideoSegment segment : annotationResults.getShotAnnotationsList()) {
+ double startTimeOffset =
+ segment.getStartTimeOffset().getSeconds()
+ + segment.getStartTimeOffset().getNanos() / 1e9;
+ double endTimeOffset =
+ segment.getEndTimeOffset().getSeconds() + segment.getEndTimeOffset().getNanos() / 1e9;
+
+ System.out.format("Shot: %fs to %fs\n", startTimeOffset, endTimeOffset);
+ }
+ }
+ }
+ }
+}
+// [END video_streaming_shot_change_detection_beta]
diff --git a/video/src/main/java/beta/video/TextDetection.java b/video/src/main/java/beta/video/TextDetection.java
new file mode 100644
index 00000000000..2574a776589
--- /dev/null
+++ b/video/src/main/java/beta/video/TextDetection.java
@@ -0,0 +1,182 @@
+/*
+ * Copyright 2018 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package beta.video;
+
+import com.google.api.gax.longrunning.OperationFuture;
+import com.google.cloud.videointelligence.v1p2beta1.AnnotateVideoProgress;
+import com.google.cloud.videointelligence.v1p2beta1.AnnotateVideoRequest;
+import com.google.cloud.videointelligence.v1p2beta1.AnnotateVideoResponse;
+import com.google.cloud.videointelligence.v1p2beta1.Feature;
+import com.google.cloud.videointelligence.v1p2beta1.NormalizedVertex;
+import com.google.cloud.videointelligence.v1p2beta1.TextAnnotation;
+import com.google.cloud.videointelligence.v1p2beta1.TextFrame;
+import com.google.cloud.videointelligence.v1p2beta1.TextSegment;
+import com.google.cloud.videointelligence.v1p2beta1.VideoAnnotationResults;
+import com.google.cloud.videointelligence.v1p2beta1.VideoIntelligenceServiceClient;
+import com.google.cloud.videointelligence.v1p2beta1.VideoSegment;
+import com.google.protobuf.ByteString;
+import com.google.protobuf.Duration;
+import io.grpc.StatusRuntimeException;
+import java.io.IOException;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.util.List;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+
+public class TextDetection {
+
+ // [START video_detect_text_beta]
+
+ /**
+ * Detect text in a video.
+ *
+ * @param filePath the path to the video file to analyze.
+ */
+ public static VideoAnnotationResults detectText(String filePath)
+ throws IOException, StatusRuntimeException, TimeoutException, ExecutionException,
+ InterruptedException {
+ try (VideoIntelligenceServiceClient client = VideoIntelligenceServiceClient.create()) {
+ // Read file
+ Path path = Paths.get(filePath);
+ byte[] data = Files.readAllBytes(path);
+
+ // Create the request
+ AnnotateVideoRequest request =
+ AnnotateVideoRequest.newBuilder()
+ .setInputContent(ByteString.copyFrom(data))
+ .addFeatures(Feature.TEXT_DETECTION)
+ .build();
+
+ // asynchronously perform object tracking on videos
+ OperationFuture future =
+ client.annotateVideoAsync(request);
+
+ System.out.println("Waiting for operation to complete...");
+ // The first result is retrieved because a single video was processed.
+ AnnotateVideoResponse response = future.get(600, TimeUnit.SECONDS);
+ VideoAnnotationResults results = response.getAnnotationResults(0);
+
+ // Get only the first annotation for demo purposes.
+ TextAnnotation annotation = results.getTextAnnotations(0);
+ System.out.println("Text: " + annotation.getText());
+
+ // Get the first text segment.
+ TextSegment textSegment = annotation.getSegments(0);
+ System.out.println("Confidence: " + textSegment.getConfidence());
+ // For the text segment display it's time offset
+ VideoSegment videoSegment = textSegment.getSegment();
+ Duration startTimeOffset = videoSegment.getStartTimeOffset();
+ Duration endTimeOffset = videoSegment.getEndTimeOffset();
+ // Display the offset times in seconds, 1e9 is part of the formula to convert nanos to seconds
+ System.out.println(
+ String.format(
+ "Start time: %.2f", startTimeOffset.getSeconds() + startTimeOffset.getNanos() / 1e9));
+ System.out.println(
+ String.format(
+ "End time: %.2f", endTimeOffset.getSeconds() + endTimeOffset.getNanos() / 1e9));
+
+ // Show the first result for the first frame in the segment.
+ TextFrame textFrame = textSegment.getFrames(0);
+ Duration timeOffset = textFrame.getTimeOffset();
+ System.out.println(
+ String.format(
+ "Time offset for the first frame: %.2f",
+ timeOffset.getSeconds() + timeOffset.getNanos() / 1e9));
+
+ // Display the rotated bounding box for where the text is on the frame.
+ System.out.println("Rotated Bounding Box Vertices:");
+ List vertices = textFrame.getRotatedBoundingBox().getVerticesList();
+ for (NormalizedVertex normalizedVertex : vertices) {
+ System.out.println(
+ String.format(
+ "\tVertex.x: %.2f, Vertex.y: %.2f",
+ normalizedVertex.getX(), normalizedVertex.getY()));
+ }
+ return results;
+ }
+ }
+ // [END video_detect_text_beta]
+
+ // [START video_detect_text_gcs_beta]
+
+ /**
+ * Detect Text in a video.
+ *
+ * @param gcsUri the path to the video file to analyze.
+ */
+ public static VideoAnnotationResults detectTextGcs(String gcsUri) throws Exception {
+ try (VideoIntelligenceServiceClient client = VideoIntelligenceServiceClient.create()) {
+ // Create the request
+ AnnotateVideoRequest request =
+ AnnotateVideoRequest.newBuilder()
+ .setInputUri(gcsUri)
+ .addFeatures(Feature.TEXT_DETECTION)
+ .build();
+
+ // asynchronously perform object tracking on videos
+ OperationFuture future =
+ client.annotateVideoAsync(request);
+
+ System.out.println("Waiting for operation to complete...");
+ // The first result is retrieved because a single video was processed.
+ AnnotateVideoResponse response = future.get(600, TimeUnit.SECONDS);
+ VideoAnnotationResults results = response.getAnnotationResults(0);
+
+ // Get only the first annotation for demo purposes.
+ TextAnnotation annotation = results.getTextAnnotations(0);
+ System.out.println("Text: " + annotation.getText());
+
+ // Get the first text segment.
+ TextSegment textSegment = annotation.getSegments(0);
+ System.out.println("Confidence: " + textSegment.getConfidence());
+ // For the text segment display it's time offset
+ VideoSegment videoSegment = textSegment.getSegment();
+ Duration startTimeOffset = videoSegment.getStartTimeOffset();
+ Duration endTimeOffset = videoSegment.getEndTimeOffset();
+ // Display the offset times in seconds, 1e9 is part of the formula to convert nanos to seconds
+ System.out.println(
+ String.format(
+ "Start time: %.2f", startTimeOffset.getSeconds() + startTimeOffset.getNanos() / 1e9));
+ System.out.println(
+ String.format(
+ "End time: %.2f", endTimeOffset.getSeconds() + endTimeOffset.getNanos() / 1e9));
+
+ // Show the first result for the first frame in the segment.
+ TextFrame textFrame = textSegment.getFrames(0);
+ Duration timeOffset = textFrame.getTimeOffset();
+ System.out.println(
+ String.format(
+ "Time offset for the first frame: %.2f",
+ timeOffset.getSeconds() + timeOffset.getNanos() / 1e9));
+
+ // Display the rotated bounding box for where the text is on the frame.
+ System.out.println("Rotated Bounding Box Vertices:");
+ List vertices = textFrame.getRotatedBoundingBox().getVerticesList();
+ for (NormalizedVertex normalizedVertex : vertices) {
+ System.out.println(
+ String.format(
+ "\tVertex.x: %.2f, Vertex.y: %.2f",
+ normalizedVertex.getX(), normalizedVertex.getY()));
+ }
+ return results;
+ }
+ }
+ // [END video_detect_text_gcs_beta]
+}
diff --git a/video/src/main/java/beta/video/TrackObjects.java b/video/src/main/java/beta/video/TrackObjects.java
new file mode 100644
index 00000000000..30ba1ca0013
--- /dev/null
+++ b/video/src/main/java/beta/video/TrackObjects.java
@@ -0,0 +1,179 @@
+/*
+ * Copyright 2018 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package beta.video;
+
+import com.google.api.gax.longrunning.OperationFuture;
+import com.google.cloud.videointelligence.v1p2beta1.AnnotateVideoProgress;
+import com.google.cloud.videointelligence.v1p2beta1.AnnotateVideoRequest;
+import com.google.cloud.videointelligence.v1p2beta1.AnnotateVideoResponse;
+import com.google.cloud.videointelligence.v1p2beta1.Entity;
+import com.google.cloud.videointelligence.v1p2beta1.Feature;
+import com.google.cloud.videointelligence.v1p2beta1.NormalizedBoundingBox;
+import com.google.cloud.videointelligence.v1p2beta1.ObjectTrackingAnnotation;
+import com.google.cloud.videointelligence.v1p2beta1.ObjectTrackingFrame;
+import com.google.cloud.videointelligence.v1p2beta1.VideoAnnotationResults;
+import com.google.cloud.videointelligence.v1p2beta1.VideoIntelligenceServiceClient;
+import com.google.cloud.videointelligence.v1p2beta1.VideoSegment;
+import com.google.protobuf.ByteString;
+import com.google.protobuf.Duration;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.util.concurrent.TimeUnit;
+
+public class TrackObjects {
+
+ // [START video_object_tracking_beta]
+ /**
+ * Track objects in a video.
+ *
+ * @param filePath the path to the video file to analyze.
+ */
+ public static VideoAnnotationResults trackObjects(String filePath) throws Exception {
+ try (VideoIntelligenceServiceClient client = VideoIntelligenceServiceClient.create()) {
+ // Read file
+ Path path = Paths.get(filePath);
+ byte[] data = Files.readAllBytes(path);
+
+ // Create the request
+ AnnotateVideoRequest request =
+ AnnotateVideoRequest.newBuilder()
+ .setInputContent(ByteString.copyFrom(data))
+ .addFeatures(Feature.OBJECT_TRACKING)
+ .setLocationId("us-east1")
+ .build();
+
+ // asynchronously perform object tracking on videos
+ OperationFuture future =
+ client.annotateVideoAsync(request);
+
+ System.out.println("Waiting for operation to complete...");
+ // The first result is retrieved because a single video was processed.
+ AnnotateVideoResponse response = future.get(600, TimeUnit.SECONDS);
+ VideoAnnotationResults results = response.getAnnotationResults(0);
+
+ // Get only the first annotation for demo purposes.
+ ObjectTrackingAnnotation annotation = results.getObjectAnnotations(0);
+ System.out.println("Confidence: " + annotation.getConfidence());
+
+ if (annotation.hasEntity()) {
+ Entity entity = annotation.getEntity();
+ System.out.println("Entity description: " + entity.getDescription());
+ System.out.println("Entity id:: " + entity.getEntityId());
+ }
+
+ if (annotation.hasSegment()) {
+ VideoSegment videoSegment = annotation.getSegment();
+ Duration startTimeOffset = videoSegment.getStartTimeOffset();
+ Duration endTimeOffset = videoSegment.getEndTimeOffset();
+ // Display the segment time in seconds, 1e9 converts nanos to seconds
+ System.out.println(
+ String.format(
+ "Segment: %.2fs to %.2fs",
+ startTimeOffset.getSeconds() + startTimeOffset.getNanos() / 1e9,
+ endTimeOffset.getSeconds() + endTimeOffset.getNanos() / 1e9));
+ }
+
+ // Here we print only the bounding box of the first frame in this segment.
+ ObjectTrackingFrame frame = annotation.getFrames(0);
+ // Display the offset time in seconds, 1e9 converts nanos to seconds
+ Duration timeOffset = frame.getTimeOffset();
+ System.out.println(
+ String.format(
+ "Time offset of the first frame: %.2fs",
+ timeOffset.getSeconds() + timeOffset.getNanos() / 1e9));
+
+ // Display the bounding box of the detected object
+ NormalizedBoundingBox normalizedBoundingBox = frame.getNormalizedBoundingBox();
+ System.out.println("Bounding box position:");
+ System.out.println("\tleft: " + normalizedBoundingBox.getLeft());
+ System.out.println("\ttop: " + normalizedBoundingBox.getTop());
+ System.out.println("\tright: " + normalizedBoundingBox.getRight());
+ System.out.println("\tbottom: " + normalizedBoundingBox.getBottom());
+ return results;
+ }
+ }
+ // [END video_object_tracking_beta]
+
+ // [START video_object_tracking_gcs_beta]
+ /**
+ * Track objects in a video.
+ *
+ * @param gcsUri the path to the video file to analyze.
+ */
+ public static VideoAnnotationResults trackObjectsGcs(String gcsUri) throws Exception {
+ try (VideoIntelligenceServiceClient client = VideoIntelligenceServiceClient.create()) {
+ // Create the request
+ AnnotateVideoRequest request =
+ AnnotateVideoRequest.newBuilder()
+ .setInputUri(gcsUri)
+ .addFeatures(Feature.OBJECT_TRACKING)
+ .setLocationId("us-east1")
+ .build();
+
+ // asynchronously perform object tracking on videos
+ OperationFuture future =
+ client.annotateVideoAsync(request);
+
+ System.out.println("Waiting for operation to complete...");
+ // The first result is retrieved because a single video was processed.
+ AnnotateVideoResponse response = future.get(450, TimeUnit.SECONDS);
+ VideoAnnotationResults results = response.getAnnotationResults(0);
+
+ // Get only the first annotation for demo purposes.
+ ObjectTrackingAnnotation annotation = results.getObjectAnnotations(0);
+ System.out.println("Confidence: " + annotation.getConfidence());
+
+ if (annotation.hasEntity()) {
+ Entity entity = annotation.getEntity();
+ System.out.println("Entity description: " + entity.getDescription());
+ System.out.println("Entity id:: " + entity.getEntityId());
+ }
+
+ if (annotation.hasSegment()) {
+ VideoSegment videoSegment = annotation.getSegment();
+ Duration startTimeOffset = videoSegment.getStartTimeOffset();
+ Duration endTimeOffset = videoSegment.getEndTimeOffset();
+ // Display the segment time in seconds, 1e9 converts nanos to seconds
+ System.out.println(
+ String.format(
+ "Segment: %.2fs to %.2fs",
+ startTimeOffset.getSeconds() + startTimeOffset.getNanos() / 1e9,
+ endTimeOffset.getSeconds() + endTimeOffset.getNanos() / 1e9));
+ }
+
+ // Here we print only the bounding box of the first frame in this segment.
+ ObjectTrackingFrame frame = annotation.getFrames(0);
+ // Display the offset time in seconds, 1e9 converts nanos to seconds
+ Duration timeOffset = frame.getTimeOffset();
+ System.out.println(
+ String.format(
+ "Time offset of the first frame: %.2fs",
+ timeOffset.getSeconds() + timeOffset.getNanos() / 1e9));
+
+ // Display the bounding box of the detected object
+ NormalizedBoundingBox normalizedBoundingBox = frame.getNormalizedBoundingBox();
+ System.out.println("Bounding box position:");
+ System.out.println("\tleft: " + normalizedBoundingBox.getLeft());
+ System.out.println("\ttop: " + normalizedBoundingBox.getTop());
+ System.out.println("\tright: " + normalizedBoundingBox.getRight());
+ System.out.println("\tbottom: " + normalizedBoundingBox.getBottom());
+ return results;
+ }
+ }
+ // [END video_object_tracking_gcs_beta]
+}
diff --git a/video/src/main/java/com/example/video/Detect.java b/video/src/main/java/com/example/video/Detect.java
new file mode 100644
index 00000000000..ec643387222
--- /dev/null
+++ b/video/src/main/java/com/example/video/Detect.java
@@ -0,0 +1,410 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.example.video;
+
+import com.google.api.gax.longrunning.OperationFuture;
+import com.google.cloud.videointelligence.v1.AnnotateVideoProgress;
+import com.google.cloud.videointelligence.v1.AnnotateVideoRequest;
+import com.google.cloud.videointelligence.v1.AnnotateVideoResponse;
+import com.google.cloud.videointelligence.v1.Entity;
+import com.google.cloud.videointelligence.v1.ExplicitContentFrame;
+import com.google.cloud.videointelligence.v1.Feature;
+import com.google.cloud.videointelligence.v1.LabelAnnotation;
+import com.google.cloud.videointelligence.v1.LabelSegment;
+import com.google.cloud.videointelligence.v1.SpeechRecognitionAlternative;
+import com.google.cloud.videointelligence.v1.SpeechTranscription;
+import com.google.cloud.videointelligence.v1.SpeechTranscriptionConfig;
+import com.google.cloud.videointelligence.v1.VideoAnnotationResults;
+import com.google.cloud.videointelligence.v1.VideoContext;
+import com.google.cloud.videointelligence.v1.VideoIntelligenceServiceClient;
+import com.google.cloud.videointelligence.v1.VideoSegment;
+import com.google.cloud.videointelligence.v1.WordInfo;
+import com.google.protobuf.ByteString;
+import java.io.IOException;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.util.concurrent.TimeUnit;
+
+public class Detect {
+ /**
+ * Detects labels, shots, and explicit content in a video using the Video Intelligence API
+ *
+ * @param args specifies features to detect and the path to the video on Google Cloud Storage.
+ */
+ public static void main(String[] args) {
+ try {
+ argsHelper(args);
+ } catch (Exception e) {
+ System.out.println("Exception while running:\n" + e.getMessage() + "\n");
+ e.printStackTrace(System.out);
+ }
+ }
+
+ /**
+ * Helper that handles the input passed to the program.
+ *
+ * @param args specifies features to detect and the path to the video on Google Cloud Storage.
+ * @throws IOException on Input/Output errors.
+ */
+ public static void argsHelper(String[] args) throws Exception {
+ if (args.length < 1) {
+ System.out.println("Usage:");
+ System.out.printf(
+ "\tjava %s \"\" \"\"\n"
+ + "Commands:\n"
+ + "\tlabels | shots\n"
+ + "Path:\n\tA URI for a Cloud Storage resource (gs://...)\n"
+ + "Examples: ",
+ Detect.class.getCanonicalName());
+ return;
+ }
+ String command = args[0];
+ String path = args.length > 1 ? args[1] : "";
+
+ if (command.equals("labels")) {
+ analyzeLabels(path);
+ }
+ if (command.equals("labels-file")) {
+ analyzeLabelsFile(path);
+ }
+ if (command.equals("shots")) {
+ analyzeShots(path);
+ }
+ if (command.equals("explicit-content")) {
+ analyzeExplicitContent(path);
+ }
+ if (command.equals("speech-transcription")) {
+ speechTranscription(path);
+ }
+ }
+
+ /**
+ * Performs label analysis on the video at the provided Cloud Storage path.
+ *
+ * @param gcsUri the path to the video file to analyze.
+ */
+ public static void analyzeLabels(String gcsUri) throws Exception {
+ // [START video_analyze_labels_gcs]
+ // Instantiate a com.google.cloud.videointelligence.v1.VideoIntelligenceServiceClient
+ try (VideoIntelligenceServiceClient client = VideoIntelligenceServiceClient.create()) {
+ // Provide path to file hosted on GCS as "gs://bucket-name/..."
+ AnnotateVideoRequest request =
+ AnnotateVideoRequest.newBuilder()
+ .setInputUri(gcsUri)
+ .addFeatures(Feature.LABEL_DETECTION)
+ .build();
+ // Create an operation that will contain the response when the operation completes.
+ OperationFuture response =
+ client.annotateVideoAsync(request);
+
+ System.out.println("Waiting for operation to complete...");
+ for (VideoAnnotationResults results : response.get().getAnnotationResultsList()) {
+ // process video / segment level label annotations
+ System.out.println("Locations: ");
+ for (LabelAnnotation labelAnnotation : results.getSegmentLabelAnnotationsList()) {
+ System.out.println("Video label: " + labelAnnotation.getEntity().getDescription());
+ // categories
+ for (Entity categoryEntity : labelAnnotation.getCategoryEntitiesList()) {
+ System.out.println("Video label category: " + categoryEntity.getDescription());
+ }
+ // segments
+ for (LabelSegment segment : labelAnnotation.getSegmentsList()) {
+ double startTime =
+ segment.getSegment().getStartTimeOffset().getSeconds()
+ + segment.getSegment().getStartTimeOffset().getNanos() / 1e9;
+ double endTime =
+ segment.getSegment().getEndTimeOffset().getSeconds()
+ + segment.getSegment().getEndTimeOffset().getNanos() / 1e9;
+ System.out.printf("Segment location: %.3f:%.3f\n", startTime, endTime);
+ System.out.println("Confidence: " + segment.getConfidence());
+ }
+ }
+
+ // process shot label annotations
+ for (LabelAnnotation labelAnnotation : results.getShotLabelAnnotationsList()) {
+ System.out.println("Shot label: " + labelAnnotation.getEntity().getDescription());
+ // categories
+ for (Entity categoryEntity : labelAnnotation.getCategoryEntitiesList()) {
+ System.out.println("Shot label category: " + categoryEntity.getDescription());
+ }
+ // segments
+ for (LabelSegment segment : labelAnnotation.getSegmentsList()) {
+ double startTime =
+ segment.getSegment().getStartTimeOffset().getSeconds()
+ + segment.getSegment().getStartTimeOffset().getNanos() / 1e9;
+ double endTime =
+ segment.getSegment().getEndTimeOffset().getSeconds()
+ + segment.getSegment().getEndTimeOffset().getNanos() / 1e9;
+ System.out.printf("Segment location: %.3f:%.3f\n", startTime, endTime);
+ System.out.println("Confidence: " + segment.getConfidence());
+ }
+ }
+
+ // process frame label annotations
+ for (LabelAnnotation labelAnnotation : results.getFrameLabelAnnotationsList()) {
+ System.out.println("Frame label: " + labelAnnotation.getEntity().getDescription());
+ // categories
+ for (Entity categoryEntity : labelAnnotation.getCategoryEntitiesList()) {
+ System.out.println("Frame label category: " + categoryEntity.getDescription());
+ }
+ // segments
+ for (LabelSegment segment : labelAnnotation.getSegmentsList()) {
+ double startTime =
+ segment.getSegment().getStartTimeOffset().getSeconds()
+ + segment.getSegment().getStartTimeOffset().getNanos() / 1e9;
+ double endTime =
+ segment.getSegment().getEndTimeOffset().getSeconds()
+ + segment.getSegment().getEndTimeOffset().getNanos() / 1e9;
+ System.out.printf("Segment location: %.3f:%.2f\n", startTime, endTime);
+ System.out.println("Confidence: " + segment.getConfidence());
+ }
+ }
+ }
+ }
+ // [END video_analyze_labels_gcs]
+ }
+
+ /**
+ * Performs label analysis on the video at the provided file path.
+ *
+ * @param filePath the path to the video file to analyze.
+ */
+ public static void analyzeLabelsFile(String filePath) throws Exception {
+ // [START video_analyze_labels]
+ // Instantiate a com.google.cloud.videointelligence.v1.VideoIntelligenceServiceClient
+ try (VideoIntelligenceServiceClient client = VideoIntelligenceServiceClient.create()) {
+ // Read file and encode into Base64
+ Path path = Paths.get(filePath);
+ byte[] data = Files.readAllBytes(path);
+
+ AnnotateVideoRequest request =
+ AnnotateVideoRequest.newBuilder()
+ .setInputContent(ByteString.copyFrom(data))
+ .addFeatures(Feature.LABEL_DETECTION)
+ .build();
+ // Create an operation that will contain the response when the operation completes.
+ OperationFuture response =
+ client.annotateVideoAsync(request);
+
+ System.out.println("Waiting for operation to complete...");
+ for (VideoAnnotationResults results : response.get().getAnnotationResultsList()) {
+ // process video / segment level label annotations
+ System.out.println("Locations: ");
+ for (LabelAnnotation labelAnnotation : results.getSegmentLabelAnnotationsList()) {
+ System.out.println("Video label: " + labelAnnotation.getEntity().getDescription());
+ // categories
+ for (Entity categoryEntity : labelAnnotation.getCategoryEntitiesList()) {
+ System.out.println("Video label category: " + categoryEntity.getDescription());
+ }
+ // segments
+ for (LabelSegment segment : labelAnnotation.getSegmentsList()) {
+ double startTime =
+ segment.getSegment().getStartTimeOffset().getSeconds()
+ + segment.getSegment().getStartTimeOffset().getNanos() / 1e9;
+ double endTime =
+ segment.getSegment().getEndTimeOffset().getSeconds()
+ + segment.getSegment().getEndTimeOffset().getNanos() / 1e9;
+ System.out.printf("Segment location: %.3f:%.2f\n", startTime, endTime);
+ System.out.println("Confidence: " + segment.getConfidence());
+ }
+ }
+
+ // process shot label annotations
+ for (LabelAnnotation labelAnnotation : results.getShotLabelAnnotationsList()) {
+ System.out.println("Shot label: " + labelAnnotation.getEntity().getDescription());
+ // categories
+ for (Entity categoryEntity : labelAnnotation.getCategoryEntitiesList()) {
+ System.out.println("Shot label category: " + categoryEntity.getDescription());
+ }
+ // segments
+ for (LabelSegment segment : labelAnnotation.getSegmentsList()) {
+ double startTime =
+ segment.getSegment().getStartTimeOffset().getSeconds()
+ + segment.getSegment().getStartTimeOffset().getNanos() / 1e9;
+ double endTime =
+ segment.getSegment().getEndTimeOffset().getSeconds()
+ + segment.getSegment().getEndTimeOffset().getNanos() / 1e9;
+ System.out.printf("Segment location: %.3f:%.2f\n", startTime, endTime);
+ System.out.println("Confidence: " + segment.getConfidence());
+ }
+ }
+
+ // process frame label annotations
+ for (LabelAnnotation labelAnnotation : results.getFrameLabelAnnotationsList()) {
+ System.out.println("Frame label: " + labelAnnotation.getEntity().getDescription());
+ // categories
+ for (Entity categoryEntity : labelAnnotation.getCategoryEntitiesList()) {
+ System.out.println("Frame label category: " + categoryEntity.getDescription());
+ }
+ // segments
+ for (LabelSegment segment : labelAnnotation.getSegmentsList()) {
+ double startTime =
+ segment.getSegment().getStartTimeOffset().getSeconds()
+ + segment.getSegment().getStartTimeOffset().getNanos() / 1e9;
+ double endTime =
+ segment.getSegment().getEndTimeOffset().getSeconds()
+ + segment.getSegment().getEndTimeOffset().getNanos() / 1e9;
+ System.out.printf("Segment location: %.3f:%.2f\n", startTime, endTime);
+ System.out.println("Confidence: " + segment.getConfidence());
+ }
+ }
+ }
+ }
+ // [END video_analyze_labels]
+ }
+
+ /**
+ * Performs shot analysis on the video at the provided Cloud Storage path.
+ *
+ * @param gcsUri the path to the video file to analyze.
+ */
+ public static void analyzeShots(String gcsUri) throws Exception {
+ // [START video_analyze_shots]
+ // Instantiate a com.google.cloud.videointelligence.v1.VideoIntelligenceServiceClient
+ try (VideoIntelligenceServiceClient client = VideoIntelligenceServiceClient.create()) {
+ // Provide path to file hosted on GCS as "gs://bucket-name/..."
+ AnnotateVideoRequest request =
+ AnnotateVideoRequest.newBuilder()
+ .setInputUri(gcsUri)
+ .addFeatures(Feature.SHOT_CHANGE_DETECTION)
+ .build();
+
+ // Create an operation that will contain the response when the operation completes.
+ OperationFuture response =
+ client.annotateVideoAsync(request);
+
+ System.out.println("Waiting for operation to complete...");
+ // Print detected shot changes and their location ranges in the analyzed video.
+ for (VideoAnnotationResults result : response.get().getAnnotationResultsList()) {
+ if (result.getShotAnnotationsCount() > 0) {
+ System.out.println("Shots: ");
+ for (VideoSegment segment : result.getShotAnnotationsList()) {
+ double startTime =
+ segment.getStartTimeOffset().getSeconds()
+ + segment.getStartTimeOffset().getNanos() / 1e9;
+ double endTime =
+ segment.getEndTimeOffset().getSeconds()
+ + segment.getEndTimeOffset().getNanos() / 1e9;
+ System.out.printf("Location: %.3f:%.3f\n", startTime, endTime);
+ }
+ } else {
+ System.out.println("No shot changes detected in " + gcsUri);
+ }
+ }
+ }
+ // [END video_analyze_shots]
+ }
+
+ /**
+ * Performs explicit content analysis on the video at the provided Cloud Storage path.
+ *
+ * @param gcsUri the path to the video file to analyze.
+ */
+ public static void analyzeExplicitContent(String gcsUri) throws Exception {
+ // [START video_analyze_explicit_content]
+ // Instantiate a com.google.cloud.videointelligence.v1.VideoIntelligenceServiceClient
+ try (VideoIntelligenceServiceClient client = VideoIntelligenceServiceClient.create()) {
+ // Create an operation that will contain the response when the operation completes.
+ AnnotateVideoRequest request =
+ AnnotateVideoRequest.newBuilder()
+ .setInputUri(gcsUri)
+ .addFeatures(Feature.EXPLICIT_CONTENT_DETECTION)
+ .build();
+
+ OperationFuture response =
+ client.annotateVideoAsync(request);
+
+ System.out.println("Waiting for operation to complete...");
+ // Print detected annotations and their positions in the analyzed video.
+ for (VideoAnnotationResults result : response.get().getAnnotationResultsList()) {
+ for (ExplicitContentFrame frame : result.getExplicitAnnotation().getFramesList()) {
+ double frameTime =
+ frame.getTimeOffset().getSeconds() + frame.getTimeOffset().getNanos() / 1e9;
+ System.out.printf("Location: %.3fs\n", frameTime);
+ System.out.println("Adult: " + frame.getPornographyLikelihood());
+ }
+ }
+ // [END video_analyze_explicit_content]
+ }
+ }
+
+ /**
+ * Transcribe speech from a video stored on GCS.
+ *
+ * @param gcsUri the path to the video file to analyze.
+ */
+ public static void speechTranscription(String gcsUri) throws Exception {
+ // [START video_speech_transcription_gcs]
+ // Instantiate a com.google.cloud.videointelligence.v1.VideoIntelligenceServiceClient
+ try (VideoIntelligenceServiceClient client = VideoIntelligenceServiceClient.create()) {
+ // Set the language code
+ SpeechTranscriptionConfig config =
+ SpeechTranscriptionConfig.newBuilder()
+ .setLanguageCode("en-US")
+ .setEnableAutomaticPunctuation(true)
+ .build();
+
+ // Set the video context with the above configuration
+ VideoContext context = VideoContext.newBuilder().setSpeechTranscriptionConfig(config).build();
+
+ // Create the request
+ AnnotateVideoRequest request =
+ AnnotateVideoRequest.newBuilder()
+ .setInputUri(gcsUri)
+ .addFeatures(Feature.SPEECH_TRANSCRIPTION)
+ .setVideoContext(context)
+ .build();
+
+ // asynchronously perform speech transcription on videos
+ OperationFuture response =
+ client.annotateVideoAsync(request);
+
+ System.out.println("Waiting for operation to complete...");
+ // Display the results
+ for (VideoAnnotationResults results :
+ response.get(600, TimeUnit.SECONDS).getAnnotationResultsList()) {
+ for (SpeechTranscription speechTranscription : results.getSpeechTranscriptionsList()) {
+ try {
+ // Print the transcription
+ if (speechTranscription.getAlternativesCount() > 0) {
+ SpeechRecognitionAlternative alternative = speechTranscription.getAlternatives(0);
+
+ System.out.printf("Transcript: %s\n", alternative.getTranscript());
+ System.out.printf("Confidence: %.2f\n", alternative.getConfidence());
+
+ System.out.println("Word level information:");
+ for (WordInfo wordInfo : alternative.getWordsList()) {
+ double startTime =
+ wordInfo.getStartTime().getSeconds() + wordInfo.getStartTime().getNanos() / 1e9;
+ double endTime =
+ wordInfo.getEndTime().getSeconds() + wordInfo.getEndTime().getNanos() / 1e9;
+ System.out.printf(
+ "\t%4.2fs - %4.2fs: %s\n", startTime, endTime, wordInfo.getWord());
+ }
+ } else {
+ System.out.println("No transcription found");
+ }
+ } catch (IndexOutOfBoundsException ioe) {
+ System.out.println("Could not retrieve frame: " + ioe.getMessage());
+ }
+ }
+ }
+ }
+ // [END video_speech_transcription_gcs]
+ }
+}
diff --git a/video/src/main/java/com/example/video/LogoDetection.java b/video/src/main/java/com/example/video/LogoDetection.java
new file mode 100644
index 00000000000..09a8fed0a87
--- /dev/null
+++ b/video/src/main/java/com/example/video/LogoDetection.java
@@ -0,0 +1,141 @@
+/*
+ * Copyright 2020 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.example.video;
+
+// [START video_detect_logo]
+
+import com.google.api.gax.longrunning.OperationFuture;
+import com.google.cloud.videointelligence.v1.AnnotateVideoProgress;
+import com.google.cloud.videointelligence.v1.AnnotateVideoRequest;
+import com.google.cloud.videointelligence.v1.AnnotateVideoResponse;
+import com.google.cloud.videointelligence.v1.DetectedAttribute;
+import com.google.cloud.videointelligence.v1.Entity;
+import com.google.cloud.videointelligence.v1.Feature;
+import com.google.cloud.videointelligence.v1.LogoRecognitionAnnotation;
+import com.google.cloud.videointelligence.v1.NormalizedBoundingBox;
+import com.google.cloud.videointelligence.v1.TimestampedObject;
+import com.google.cloud.videointelligence.v1.Track;
+import com.google.cloud.videointelligence.v1.VideoAnnotationResults;
+import com.google.cloud.videointelligence.v1.VideoIntelligenceServiceClient;
+import com.google.cloud.videointelligence.v1.VideoSegment;
+import com.google.protobuf.ByteString;
+import com.google.protobuf.Duration;
+import java.io.IOException;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+
+public class LogoDetection {
+
+ public static void detectLogo() throws Exception {
+ // TODO(developer): Replace these variables before running the sample.
+ String localFilePath = "path/to/your/video.mp4";
+ detectLogo(localFilePath);
+ }
+
+ public static void detectLogo(String filePath)
+ throws IOException, ExecutionException, InterruptedException, TimeoutException {
+ // Initialize client that will be used to send requests. This client only needs to be created
+ // once, and can be reused for multiple requests. After completing all of your requests, call
+ // the "close" method on the client to safely clean up any remaining background resources.
+ try (VideoIntelligenceServiceClient client = VideoIntelligenceServiceClient.create()) {
+ // Read file
+ Path path = Paths.get(filePath);
+ byte[] data = Files.readAllBytes(path);
+ // Create the request
+ AnnotateVideoRequest request =
+ AnnotateVideoRequest.newBuilder()
+ .setInputContent(ByteString.copyFrom(data))
+ .addFeatures(Feature.LOGO_RECOGNITION)
+ .build();
+
+ // asynchronously perform object tracking on videos
+ OperationFuture future =
+ client.annotateVideoAsync(request);
+
+ System.out.println("Waiting for operation to complete...");
+ // The first result is retrieved because a single video was processed.
+ AnnotateVideoResponse response = future.get(300, TimeUnit.SECONDS);
+ VideoAnnotationResults annotationResult = response.getAnnotationResults(0);
+
+ // Annotations for list of logos detected, tracked and recognized in video.
+ for (LogoRecognitionAnnotation logoRecognitionAnnotation :
+ annotationResult.getLogoRecognitionAnnotationsList()) {
+ Entity entity = logoRecognitionAnnotation.getEntity();
+ // Opaque entity ID. Some IDs may be available in
+ // [Google Knowledge Graph Search API](https://developers.google.com/knowledge-graph/).
+ System.out.printf("Entity Id : %s\n", entity.getEntityId());
+ System.out.printf("Description : %s\n", entity.getDescription());
+ // All logo tracks where the recognized logo appears. Each track corresponds to one logo
+ // instance appearing in consecutive frames.
+ for (Track track : logoRecognitionAnnotation.getTracksList()) {
+
+ // Video segment of a track.
+ Duration startTimeOffset = track.getSegment().getStartTimeOffset();
+ System.out.printf(
+ "\n\tStart Time Offset: %s.%s\n",
+ startTimeOffset.getSeconds(), startTimeOffset.getNanos());
+ Duration endTimeOffset = track.getSegment().getEndTimeOffset();
+ System.out.printf(
+ "\tEnd Time Offset: %s.%s\n", endTimeOffset.getSeconds(), endTimeOffset.getNanos());
+ System.out.printf("\tConfidence: %s\n", track.getConfidence());
+
+ // The object with timestamp and attributes per frame in the track.
+ for (TimestampedObject timestampedObject : track.getTimestampedObjectsList()) {
+
+ // Normalized Bounding box in a frame, where the object is located.
+ NormalizedBoundingBox normalizedBoundingBox =
+ timestampedObject.getNormalizedBoundingBox();
+ System.out.printf("\n\t\tLeft: %s\n", normalizedBoundingBox.getLeft());
+ System.out.printf("\t\tTop: %s\n", normalizedBoundingBox.getTop());
+ System.out.printf("\t\tRight: %s\n", normalizedBoundingBox.getRight());
+ System.out.printf("\t\tBottom: %s\n", normalizedBoundingBox.getBottom());
+
+ // Optional. The attributes of the object in the bounding box.
+ for (DetectedAttribute attribute : timestampedObject.getAttributesList()) {
+ System.out.printf("\n\t\t\tName: %s\n", attribute.getName());
+ System.out.printf("\t\t\tConfidence: %s\n", attribute.getConfidence());
+ System.out.printf("\t\t\tValue: %s\n", attribute.getValue());
+ }
+ }
+
+ // Optional. Attributes in the track level.
+ for (DetectedAttribute trackAttribute : track.getAttributesList()) {
+ System.out.printf("\n\t\tName : %s\n", trackAttribute.getName());
+ System.out.printf("\t\tConfidence : %s\n", trackAttribute.getConfidence());
+ System.out.printf("\t\tValue : %s\n", trackAttribute.getValue());
+ }
+ }
+
+ // All video segments where the recognized logo appears. There might be multiple instances
+ // of the same logo class appearing in one VideoSegment.
+ for (VideoSegment segment : logoRecognitionAnnotation.getSegmentsList()) {
+ System.out.printf(
+ "\n\tStart Time Offset : %s.%s\n",
+ segment.getStartTimeOffset().getSeconds(), segment.getStartTimeOffset().getNanos());
+ System.out.printf(
+ "\tEnd Time Offset : %s.%s\n",
+ segment.getEndTimeOffset().getSeconds(), segment.getEndTimeOffset().getNanos());
+ }
+ }
+ }
+ }
+}
+// [END video_detect_logo]
diff --git a/video/src/main/java/com/example/video/LogoDetectionGcs.java b/video/src/main/java/com/example/video/LogoDetectionGcs.java
new file mode 100644
index 00000000000..ee054cfc6fd
--- /dev/null
+++ b/video/src/main/java/com/example/video/LogoDetectionGcs.java
@@ -0,0 +1,134 @@
+/*
+ * Copyright 2020 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.example.video;
+
+// [START video_detect_logo_gcs]
+
+import com.google.api.gax.longrunning.OperationFuture;
+import com.google.cloud.videointelligence.v1.AnnotateVideoProgress;
+import com.google.cloud.videointelligence.v1.AnnotateVideoRequest;
+import com.google.cloud.videointelligence.v1.AnnotateVideoResponse;
+import com.google.cloud.videointelligence.v1.DetectedAttribute;
+import com.google.cloud.videointelligence.v1.Entity;
+import com.google.cloud.videointelligence.v1.Feature;
+import com.google.cloud.videointelligence.v1.LogoRecognitionAnnotation;
+import com.google.cloud.videointelligence.v1.NormalizedBoundingBox;
+import com.google.cloud.videointelligence.v1.TimestampedObject;
+import com.google.cloud.videointelligence.v1.Track;
+import com.google.cloud.videointelligence.v1.VideoAnnotationResults;
+import com.google.cloud.videointelligence.v1.VideoIntelligenceServiceClient;
+import com.google.cloud.videointelligence.v1.VideoSegment;
+import com.google.protobuf.Duration;
+import java.io.IOException;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+
+public class LogoDetectionGcs {
+
+ public static void detectLogoGcs() throws Exception {
+ // TODO(developer): Replace these variables before running the sample.
+ String gcsUri = "gs://YOUR_BUCKET_ID/path/to/your/video.mp4";
+ detectLogoGcs(gcsUri);
+ }
+
+ public static void detectLogoGcs(String inputUri)
+ throws IOException, ExecutionException, InterruptedException, TimeoutException {
+ // Initialize client that will be used to send requests. This client only needs to be created
+ // once, and can be reused for multiple requests. After completing all of your requests, call
+ // the "close" method on the client to safely clean up any remaining background resources.
+ try (VideoIntelligenceServiceClient client = VideoIntelligenceServiceClient.create()) {
+ // Create the request
+ AnnotateVideoRequest request =
+ AnnotateVideoRequest.newBuilder()
+ .setInputUri(inputUri)
+ .addFeatures(Feature.LOGO_RECOGNITION)
+ .build();
+
+ // asynchronously perform object tracking on videos
+ OperationFuture future =
+ client.annotateVideoAsync(request);
+
+ System.out.println("Waiting for operation to complete...");
+ // The first result is retrieved because a single video was processed.
+ AnnotateVideoResponse response = future.get(300, TimeUnit.SECONDS);
+ VideoAnnotationResults annotationResult = response.getAnnotationResults(0);
+
+ // Annotations for list of logos detected, tracked and recognized in video.
+ for (LogoRecognitionAnnotation logoRecognitionAnnotation :
+ annotationResult.getLogoRecognitionAnnotationsList()) {
+ Entity entity = logoRecognitionAnnotation.getEntity();
+ // Opaque entity ID. Some IDs may be available in
+ // [Google Knowledge Graph Search API](https://developers.google.com/knowledge-graph/).
+ System.out.printf("Entity Id : %s\n", entity.getEntityId());
+ System.out.printf("Description : %s\n", entity.getDescription());
+ // All logo tracks where the recognized logo appears. Each track corresponds to one logo
+ // instance appearing in consecutive frames.
+ for (Track track : logoRecognitionAnnotation.getTracksList()) {
+
+ // Video segment of a track.
+ Duration startTimeOffset = track.getSegment().getStartTimeOffset();
+ System.out.printf(
+ "\n\tStart Time Offset: %s.%s\n",
+ startTimeOffset.getSeconds(), startTimeOffset.getNanos());
+ Duration endTimeOffset = track.getSegment().getEndTimeOffset();
+ System.out.printf(
+ "\tEnd Time Offset: %s.%s\n", endTimeOffset.getSeconds(), endTimeOffset.getNanos());
+ System.out.printf("\tConfidence: %s\n", track.getConfidence());
+
+ // The object with timestamp and attributes per frame in the track.
+ for (TimestampedObject timestampedObject : track.getTimestampedObjectsList()) {
+
+ // Normalized Bounding box in a frame, where the object is located.
+ NormalizedBoundingBox normalizedBoundingBox =
+ timestampedObject.getNormalizedBoundingBox();
+ System.out.printf("\n\t\tLeft: %s\n", normalizedBoundingBox.getLeft());
+ System.out.printf("\t\tTop: %s\n", normalizedBoundingBox.getTop());
+ System.out.printf("\t\tRight: %s\n", normalizedBoundingBox.getRight());
+ System.out.printf("\t\tBottom: %s\n", normalizedBoundingBox.getBottom());
+
+ // Optional. The attributes of the object in the bounding box.
+ for (DetectedAttribute attribute : timestampedObject.getAttributesList()) {
+ System.out.printf("\n\t\t\tName: %s\n", attribute.getName());
+ System.out.printf("\t\t\tConfidence: %s\n", attribute.getConfidence());
+ System.out.printf("\t\t\tValue: %s\n", attribute.getValue());
+ }
+ }
+
+ // Optional. Attributes in the track level.
+ for (DetectedAttribute trackAttribute : track.getAttributesList()) {
+ System.out.printf("\n\t\tName : %s\n", trackAttribute.getName());
+ System.out.printf("\t\tConfidence : %s\n", trackAttribute.getConfidence());
+ System.out.printf("\t\tValue : %s\n", trackAttribute.getValue());
+ }
+ }
+
+ // All video segments where the recognized logo appears. There might be multiple instances
+ // of the same logo class appearing in one VideoSegment.
+ for (VideoSegment segment : logoRecognitionAnnotation.getSegmentsList()) {
+ System.out.printf(
+ "\n\tStart Time Offset : %s.%s\n",
+ segment.getStartTimeOffset().getSeconds(), segment.getStartTimeOffset().getNanos());
+ System.out.printf(
+ "\tEnd Time Offset : %s.%s\n",
+ segment.getEndTimeOffset().getSeconds(), segment.getEndTimeOffset().getNanos());
+ }
+ }
+ }
+ }
+}
+// [END video_detect_logo_gcs]
diff --git a/video/src/main/java/com/example/video/QuickstartSample.java b/video/src/main/java/com/example/video/QuickstartSample.java
new file mode 100644
index 00000000000..f46d8d84a1f
--- /dev/null
+++ b/video/src/main/java/com/example/video/QuickstartSample.java
@@ -0,0 +1,85 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.example.video;
+
+// [START video_quickstart]
+
+import com.google.api.gax.longrunning.OperationFuture;
+import com.google.cloud.videointelligence.v1.AnnotateVideoProgress;
+import com.google.cloud.videointelligence.v1.AnnotateVideoRequest;
+import com.google.cloud.videointelligence.v1.AnnotateVideoResponse;
+import com.google.cloud.videointelligence.v1.Entity;
+import com.google.cloud.videointelligence.v1.Feature;
+import com.google.cloud.videointelligence.v1.LabelAnnotation;
+import com.google.cloud.videointelligence.v1.LabelSegment;
+import com.google.cloud.videointelligence.v1.VideoAnnotationResults;
+import com.google.cloud.videointelligence.v1.VideoIntelligenceServiceClient;
+import java.util.List;
+
+public class QuickstartSample {
+
+ /** Demonstrates using the video intelligence client to detect labels in a video file. */
+ public static void main(String[] args) throws Exception {
+ // Instantiate a video intelligence client
+ try (VideoIntelligenceServiceClient client = VideoIntelligenceServiceClient.create()) {
+ // The Google Cloud Storage path to the video to annotate.
+ String gcsUri = "gs://cloud-samples-data/video/cat.mp4";
+
+ // Create an operation that will contain the response when the operation completes.
+ AnnotateVideoRequest request =
+ AnnotateVideoRequest.newBuilder()
+ .setInputUri(gcsUri)
+ .addFeatures(Feature.LABEL_DETECTION)
+ .build();
+
+ OperationFuture response =
+ client.annotateVideoAsync(request);
+
+ System.out.println("Waiting for operation to complete...");
+
+ List results = response.get().getAnnotationResultsList();
+ if (results.isEmpty()) {
+ System.out.println("No labels detected in " + gcsUri);
+ return;
+ }
+ for (VideoAnnotationResults result : results) {
+ System.out.println("Labels:");
+ // get video segment label annotations
+ for (LabelAnnotation annotation : result.getSegmentLabelAnnotationsList()) {
+ System.out.println(
+ "Video label description : " + annotation.getEntity().getDescription());
+ // categories
+ for (Entity categoryEntity : annotation.getCategoryEntitiesList()) {
+ System.out.println("Label Category description : " + categoryEntity.getDescription());
+ }
+ // segments
+ for (LabelSegment segment : annotation.getSegmentsList()) {
+ double startTime =
+ segment.getSegment().getStartTimeOffset().getSeconds()
+ + segment.getSegment().getStartTimeOffset().getNanos() / 1e9;
+ double endTime =
+ segment.getSegment().getEndTimeOffset().getSeconds()
+ + segment.getSegment().getEndTimeOffset().getNanos() / 1e9;
+ System.out.printf("Segment location : %.3f:%.3f\n", startTime, endTime);
+ System.out.println("Confidence : " + segment.getConfidence());
+ }
+ }
+ }
+ }
+ }
+}
+// [END video_quickstart]
diff --git a/video/src/main/java/com/example/video/TextDetection.java b/video/src/main/java/com/example/video/TextDetection.java
new file mode 100644
index 00000000000..dd823298c2a
--- /dev/null
+++ b/video/src/main/java/com/example/video/TextDetection.java
@@ -0,0 +1,174 @@
+/*
+ * Copyright 2019 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.example.video;
+
+import com.google.api.gax.longrunning.OperationFuture;
+import com.google.cloud.videointelligence.v1.AnnotateVideoProgress;
+import com.google.cloud.videointelligence.v1.AnnotateVideoRequest;
+import com.google.cloud.videointelligence.v1.AnnotateVideoResponse;
+import com.google.cloud.videointelligence.v1.Feature;
+import com.google.cloud.videointelligence.v1.NormalizedVertex;
+import com.google.cloud.videointelligence.v1.TextAnnotation;
+import com.google.cloud.videointelligence.v1.TextFrame;
+import com.google.cloud.videointelligence.v1.TextSegment;
+import com.google.cloud.videointelligence.v1.VideoAnnotationResults;
+import com.google.cloud.videointelligence.v1.VideoIntelligenceServiceClient;
+import com.google.cloud.videointelligence.v1.VideoSegment;
+import com.google.protobuf.ByteString;
+import com.google.protobuf.Duration;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.util.List;
+import java.util.concurrent.TimeUnit;
+
+public class TextDetection {
+
+ // [START video_detect_text]
+ /**
+ * Detect text in a video.
+ *
+ * @param filePath the path to the video file to analyze.
+ */
+ public static VideoAnnotationResults detectText(String filePath) throws Exception {
+ try (VideoIntelligenceServiceClient client = VideoIntelligenceServiceClient.create()) {
+ // Read file
+ Path path = Paths.get(filePath);
+ byte[] data = Files.readAllBytes(path);
+
+ // Create the request
+ AnnotateVideoRequest request =
+ AnnotateVideoRequest.newBuilder()
+ .setInputContent(ByteString.copyFrom(data))
+ .addFeatures(Feature.TEXT_DETECTION)
+ .build();
+
+ // asynchronously perform object tracking on videos
+ OperationFuture future =
+ client.annotateVideoAsync(request);
+
+ System.out.println("Waiting for operation to complete...");
+ // The first result is retrieved because a single video was processed.
+ AnnotateVideoResponse response = future.get(300, TimeUnit.SECONDS);
+ VideoAnnotationResults results = response.getAnnotationResults(0);
+
+ // Get only the first annotation for demo purposes.
+ TextAnnotation annotation = results.getTextAnnotations(0);
+ System.out.println("Text: " + annotation.getText());
+
+ // Get the first text segment.
+ TextSegment textSegment = annotation.getSegments(0);
+ System.out.println("Confidence: " + textSegment.getConfidence());
+ // For the text segment display it's time offset
+ VideoSegment videoSegment = textSegment.getSegment();
+ Duration startTimeOffset = videoSegment.getStartTimeOffset();
+ Duration endTimeOffset = videoSegment.getEndTimeOffset();
+ // Display the offset times in seconds, 1e9 is part of the formula to convert nanos to seconds
+ System.out.println(
+ String.format(
+ "Start time: %.2f", startTimeOffset.getSeconds() + startTimeOffset.getNanos() / 1e9));
+ System.out.println(
+ String.format(
+ "End time: %.2f", endTimeOffset.getSeconds() + endTimeOffset.getNanos() / 1e9));
+
+ // Show the first result for the first frame in the segment.
+ TextFrame textFrame = textSegment.getFrames(0);
+ Duration timeOffset = textFrame.getTimeOffset();
+ System.out.println(
+ String.format(
+ "Time offset for the first frame: %.2f",
+ timeOffset.getSeconds() + timeOffset.getNanos() / 1e9));
+
+ // Display the rotated bounding box for where the text is on the frame.
+ System.out.println("Rotated Bounding Box Vertices:");
+ List vertices = textFrame.getRotatedBoundingBox().getVerticesList();
+ for (NormalizedVertex normalizedVertex : vertices) {
+ System.out.println(
+ String.format(
+ "\tVertex.x: %.2f, Vertex.y: %.2f",
+ normalizedVertex.getX(), normalizedVertex.getY()));
+ }
+ return results;
+ }
+ }
+ // [END video_detect_text]
+
+ // [START video_detect_text_gcs]
+ /**
+ * Detect Text in a video.
+ *
+ * @param gcsUri the path to the video file to analyze.
+ */
+ public static VideoAnnotationResults detectTextGcs(String gcsUri) throws Exception {
+ try (VideoIntelligenceServiceClient client = VideoIntelligenceServiceClient.create()) {
+ // Create the request
+ AnnotateVideoRequest request =
+ AnnotateVideoRequest.newBuilder()
+ .setInputUri(gcsUri)
+ .addFeatures(Feature.TEXT_DETECTION)
+ .build();
+
+ // asynchronously perform object tracking on videos
+ OperationFuture future =
+ client.annotateVideoAsync(request);
+
+ System.out.println("Waiting for operation to complete...");
+ // The first result is retrieved because a single video was processed.
+ AnnotateVideoResponse response = future.get(300, TimeUnit.SECONDS);
+ VideoAnnotationResults results = response.getAnnotationResults(0);
+
+ // Get only the first annotation for demo purposes.
+ TextAnnotation annotation = results.getTextAnnotations(0);
+ System.out.println("Text: " + annotation.getText());
+
+ // Get the first text segment.
+ TextSegment textSegment = annotation.getSegments(0);
+ System.out.println("Confidence: " + textSegment.getConfidence());
+ // For the text segment display it's time offset
+ VideoSegment videoSegment = textSegment.getSegment();
+ Duration startTimeOffset = videoSegment.getStartTimeOffset();
+ Duration endTimeOffset = videoSegment.getEndTimeOffset();
+ // Display the offset times in seconds, 1e9 is part of the formula to convert nanos to seconds
+ System.out.println(
+ String.format(
+ "Start time: %.2f", startTimeOffset.getSeconds() + startTimeOffset.getNanos() / 1e9));
+ System.out.println(
+ String.format(
+ "End time: %.2f", endTimeOffset.getSeconds() + endTimeOffset.getNanos() / 1e9));
+
+ // Show the first result for the first frame in the segment.
+ TextFrame textFrame = textSegment.getFrames(0);
+ Duration timeOffset = textFrame.getTimeOffset();
+ System.out.println(
+ String.format(
+ "Time offset for the first frame: %.2f",
+ timeOffset.getSeconds() + timeOffset.getNanos() / 1e9));
+
+ // Display the rotated bounding box for where the text is on the frame.
+ System.out.println("Rotated Bounding Box Vertices:");
+ List vertices = textFrame.getRotatedBoundingBox().getVerticesList();
+ for (NormalizedVertex normalizedVertex : vertices) {
+ System.out.println(
+ String.format(
+ "\tVertex.x: %.2f, Vertex.y: %.2f",
+ normalizedVertex.getX(), normalizedVertex.getY()));
+ }
+ return results;
+ }
+ }
+ // [END video_detect_text_gcs]
+}
diff --git a/video/src/main/java/com/example/video/TrackObjects.java b/video/src/main/java/com/example/video/TrackObjects.java
new file mode 100644
index 00000000000..d9c43df8866
--- /dev/null
+++ b/video/src/main/java/com/example/video/TrackObjects.java
@@ -0,0 +1,179 @@
+/*
+ * Copyright 2019 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.example.video;
+
+import com.google.api.gax.longrunning.OperationFuture;
+import com.google.cloud.videointelligence.v1.AnnotateVideoProgress;
+import com.google.cloud.videointelligence.v1.AnnotateVideoRequest;
+import com.google.cloud.videointelligence.v1.AnnotateVideoResponse;
+import com.google.cloud.videointelligence.v1.Entity;
+import com.google.cloud.videointelligence.v1.Feature;
+import com.google.cloud.videointelligence.v1.NormalizedBoundingBox;
+import com.google.cloud.videointelligence.v1.ObjectTrackingAnnotation;
+import com.google.cloud.videointelligence.v1.ObjectTrackingFrame;
+import com.google.cloud.videointelligence.v1.VideoAnnotationResults;
+import com.google.cloud.videointelligence.v1.VideoIntelligenceServiceClient;
+import com.google.cloud.videointelligence.v1.VideoSegment;
+import com.google.protobuf.ByteString;
+import com.google.protobuf.Duration;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.util.concurrent.TimeUnit;
+
+public class TrackObjects {
+
+ // [START video_object_tracking]
+ /**
+ * Track objects in a video.
+ *
+ * @param filePath the path to the video file to analyze.
+ */
+ public static VideoAnnotationResults trackObjects(String filePath) throws Exception {
+ try (VideoIntelligenceServiceClient client = VideoIntelligenceServiceClient.create()) {
+ // Read file
+ Path path = Paths.get(filePath);
+ byte[] data = Files.readAllBytes(path);
+
+ // Create the request
+ AnnotateVideoRequest request =
+ AnnotateVideoRequest.newBuilder()
+ .setInputContent(ByteString.copyFrom(data))
+ .addFeatures(Feature.OBJECT_TRACKING)
+ .setLocationId("us-east1")
+ .build();
+
+ // asynchronously perform object tracking on videos
+ OperationFuture future =
+ client.annotateVideoAsync(request);
+
+ System.out.println("Waiting for operation to complete...");
+ // The first result is retrieved because a single video was processed.
+ AnnotateVideoResponse response = future.get(450, TimeUnit.SECONDS);
+ VideoAnnotationResults results = response.getAnnotationResults(0);
+
+ // Get only the first annotation for demo purposes.
+ ObjectTrackingAnnotation annotation = results.getObjectAnnotations(0);
+ System.out.println("Confidence: " + annotation.getConfidence());
+
+ if (annotation.hasEntity()) {
+ Entity entity = annotation.getEntity();
+ System.out.println("Entity description: " + entity.getDescription());
+ System.out.println("Entity id:: " + entity.getEntityId());
+ }
+
+ if (annotation.hasSegment()) {
+ VideoSegment videoSegment = annotation.getSegment();
+ Duration startTimeOffset = videoSegment.getStartTimeOffset();
+ Duration endTimeOffset = videoSegment.getEndTimeOffset();
+ // Display the segment time in seconds, 1e9 converts nanos to seconds
+ System.out.println(
+ String.format(
+ "Segment: %.2fs to %.2fs",
+ startTimeOffset.getSeconds() + startTimeOffset.getNanos() / 1e9,
+ endTimeOffset.getSeconds() + endTimeOffset.getNanos() / 1e9));
+ }
+
+ // Here we print only the bounding box of the first frame in this segment.
+ ObjectTrackingFrame frame = annotation.getFrames(0);
+ // Display the offset time in seconds, 1e9 converts nanos to seconds
+ Duration timeOffset = frame.getTimeOffset();
+ System.out.println(
+ String.format(
+ "Time offset of the first frame: %.2fs",
+ timeOffset.getSeconds() + timeOffset.getNanos() / 1e9));
+
+ // Display the bounding box of the detected object
+ NormalizedBoundingBox normalizedBoundingBox = frame.getNormalizedBoundingBox();
+ System.out.println("Bounding box position:");
+ System.out.println("\tleft: " + normalizedBoundingBox.getLeft());
+ System.out.println("\ttop: " + normalizedBoundingBox.getTop());
+ System.out.println("\tright: " + normalizedBoundingBox.getRight());
+ System.out.println("\tbottom: " + normalizedBoundingBox.getBottom());
+ return results;
+ }
+ }
+ // [END video_object_tracking]
+
+ // [START video_object_tracking_gcs]
+ /**
+ * Track objects in a video.
+ *
+ * @param gcsUri the path to the video file to analyze.
+ */
+ public static VideoAnnotationResults trackObjectsGcs(String gcsUri) throws Exception {
+ try (VideoIntelligenceServiceClient client = VideoIntelligenceServiceClient.create()) {
+ // Create the request
+ AnnotateVideoRequest request =
+ AnnotateVideoRequest.newBuilder()
+ .setInputUri(gcsUri)
+ .addFeatures(Feature.OBJECT_TRACKING)
+ .setLocationId("us-east1")
+ .build();
+
+ // asynchronously perform object tracking on videos
+ OperationFuture future =
+ client.annotateVideoAsync(request);
+
+ System.out.println("Waiting for operation to complete...");
+ // The first result is retrieved because a single video was processed.
+ AnnotateVideoResponse response = future.get(300, TimeUnit.SECONDS);
+ VideoAnnotationResults results = response.getAnnotationResults(0);
+
+ // Get only the first annotation for demo purposes.
+ ObjectTrackingAnnotation annotation = results.getObjectAnnotations(0);
+ System.out.println("Confidence: " + annotation.getConfidence());
+
+ if (annotation.hasEntity()) {
+ Entity entity = annotation.getEntity();
+ System.out.println("Entity description: " + entity.getDescription());
+ System.out.println("Entity id:: " + entity.getEntityId());
+ }
+
+ if (annotation.hasSegment()) {
+ VideoSegment videoSegment = annotation.getSegment();
+ Duration startTimeOffset = videoSegment.getStartTimeOffset();
+ Duration endTimeOffset = videoSegment.getEndTimeOffset();
+ // Display the segment time in seconds, 1e9 converts nanos to seconds
+ System.out.println(
+ String.format(
+ "Segment: %.2fs to %.2fs",
+ startTimeOffset.getSeconds() + startTimeOffset.getNanos() / 1e9,
+ endTimeOffset.getSeconds() + endTimeOffset.getNanos() / 1e9));
+ }
+
+ // Here we print only the bounding box of the first frame in this segment.
+ ObjectTrackingFrame frame = annotation.getFrames(0);
+ // Display the offset time in seconds, 1e9 converts nanos to seconds
+ Duration timeOffset = frame.getTimeOffset();
+ System.out.println(
+ String.format(
+ "Time offset of the first frame: %.2fs",
+ timeOffset.getSeconds() + timeOffset.getNanos() / 1e9));
+
+ // Display the bounding box of the detected object
+ NormalizedBoundingBox normalizedBoundingBox = frame.getNormalizedBoundingBox();
+ System.out.println("Bounding box position:");
+ System.out.println("\tleft: " + normalizedBoundingBox.getLeft());
+ System.out.println("\ttop: " + normalizedBoundingBox.getTop());
+ System.out.println("\tright: " + normalizedBoundingBox.getRight());
+ System.out.println("\tbottom: " + normalizedBoundingBox.getBottom());
+ return results;
+ }
+ }
+ // [END video_object_tracking_gcs]
+}
diff --git a/video/src/main/java/video/Detect.java b/video/src/main/java/video/Detect.java
new file mode 100644
index 00000000000..40e08f5361b
--- /dev/null
+++ b/video/src/main/java/video/Detect.java
@@ -0,0 +1,410 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package video;
+
+import com.google.api.gax.longrunning.OperationFuture;
+import com.google.cloud.videointelligence.v1.AnnotateVideoProgress;
+import com.google.cloud.videointelligence.v1.AnnotateVideoRequest;
+import com.google.cloud.videointelligence.v1.AnnotateVideoResponse;
+import com.google.cloud.videointelligence.v1.Entity;
+import com.google.cloud.videointelligence.v1.ExplicitContentFrame;
+import com.google.cloud.videointelligence.v1.Feature;
+import com.google.cloud.videointelligence.v1.LabelAnnotation;
+import com.google.cloud.videointelligence.v1.LabelSegment;
+import com.google.cloud.videointelligence.v1.SpeechRecognitionAlternative;
+import com.google.cloud.videointelligence.v1.SpeechTranscription;
+import com.google.cloud.videointelligence.v1.SpeechTranscriptionConfig;
+import com.google.cloud.videointelligence.v1.VideoAnnotationResults;
+import com.google.cloud.videointelligence.v1.VideoContext;
+import com.google.cloud.videointelligence.v1.VideoIntelligenceServiceClient;
+import com.google.cloud.videointelligence.v1.VideoSegment;
+import com.google.cloud.videointelligence.v1.WordInfo;
+import com.google.protobuf.ByteString;
+import java.io.IOException;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.util.concurrent.TimeUnit;
+
+public class Detect {
+ /**
+ * Detects labels, shots, and explicit content in a video using the Video Intelligence API
+ *
+ * @param args specifies features to detect and the path to the video on Google Cloud Storage.
+ */
+ public static void main(String[] args) {
+ try {
+ argsHelper(args);
+ } catch (Exception e) {
+ System.out.println("Exception while running:\n" + e.getMessage() + "\n");
+ e.printStackTrace(System.out);
+ }
+ }
+
+ /**
+ * Helper that handles the input passed to the program.
+ *
+ * @param args specifies features to detect and the path to the video on Google Cloud Storage.
+ * @throws IOException on Input/Output errors.
+ */
+ public static void argsHelper(String[] args) throws Exception {
+ if (args.length < 1) {
+ System.out.println("Usage:");
+ System.out.printf(
+ "\tjava %s \"\" \"\"\n"
+ + "Commands:\n"
+ + "\tlabels | shots\n"
+ + "Path:\n\tA URI for a Cloud Storage resource (gs://...)\n"
+ + "Examples: ",
+ Detect.class.getCanonicalName());
+ return;
+ }
+ String command = args[0];
+ String path = args.length > 1 ? args[1] : "";
+
+ if (command.equals("labels")) {
+ analyzeLabels(path);
+ }
+ if (command.equals("labels-file")) {
+ analyzeLabelsFile(path);
+ }
+ if (command.equals("shots")) {
+ analyzeShots(path);
+ }
+ if (command.equals("explicit-content")) {
+ analyzeExplicitContent(path);
+ }
+ if (command.equals("speech-transcription")) {
+ speechTranscription(path);
+ }
+ }
+
+ /**
+ * Performs label analysis on the video at the provided Cloud Storage path.
+ *
+ * @param gcsUri the path to the video file to analyze.
+ */
+ public static void analyzeLabels(String gcsUri) throws Exception {
+ // [START video_analyze_labels_gcs]
+ // Instantiate a com.google.cloud.videointelligence.v1.VideoIntelligenceServiceClient
+ try (VideoIntelligenceServiceClient client = VideoIntelligenceServiceClient.create()) {
+ // Provide path to file hosted on GCS as "gs://bucket-name/..."
+ AnnotateVideoRequest request =
+ AnnotateVideoRequest.newBuilder()
+ .setInputUri(gcsUri)
+ .addFeatures(Feature.LABEL_DETECTION)
+ .build();
+ // Create an operation that will contain the response when the operation completes.
+ OperationFuture response =
+ client.annotateVideoAsync(request);
+
+ System.out.println("Waiting for operation to complete...");
+ for (VideoAnnotationResults results : response.get().getAnnotationResultsList()) {
+ // process video / segment level label annotations
+ System.out.println("Locations: ");
+ for (LabelAnnotation labelAnnotation : results.getSegmentLabelAnnotationsList()) {
+ System.out.println("Video label: " + labelAnnotation.getEntity().getDescription());
+ // categories
+ for (Entity categoryEntity : labelAnnotation.getCategoryEntitiesList()) {
+ System.out.println("Video label category: " + categoryEntity.getDescription());
+ }
+ // segments
+ for (LabelSegment segment : labelAnnotation.getSegmentsList()) {
+ double startTime =
+ segment.getSegment().getStartTimeOffset().getSeconds()
+ + segment.getSegment().getStartTimeOffset().getNanos() / 1e9;
+ double endTime =
+ segment.getSegment().getEndTimeOffset().getSeconds()
+ + segment.getSegment().getEndTimeOffset().getNanos() / 1e9;
+ System.out.printf("Segment location: %.3f:%.3f\n", startTime, endTime);
+ System.out.println("Confidence: " + segment.getConfidence());
+ }
+ }
+
+ // process shot label annotations
+ for (LabelAnnotation labelAnnotation : results.getShotLabelAnnotationsList()) {
+ System.out.println("Shot label: " + labelAnnotation.getEntity().getDescription());
+ // categories
+ for (Entity categoryEntity : labelAnnotation.getCategoryEntitiesList()) {
+ System.out.println("Shot label category: " + categoryEntity.getDescription());
+ }
+ // segments
+ for (LabelSegment segment : labelAnnotation.getSegmentsList()) {
+ double startTime =
+ segment.getSegment().getStartTimeOffset().getSeconds()
+ + segment.getSegment().getStartTimeOffset().getNanos() / 1e9;
+ double endTime =
+ segment.getSegment().getEndTimeOffset().getSeconds()
+ + segment.getSegment().getEndTimeOffset().getNanos() / 1e9;
+ System.out.printf("Segment location: %.3f:%.3f\n", startTime, endTime);
+ System.out.println("Confidence: " + segment.getConfidence());
+ }
+ }
+
+ // process frame label annotations
+ for (LabelAnnotation labelAnnotation : results.getFrameLabelAnnotationsList()) {
+ System.out.println("Frame label: " + labelAnnotation.getEntity().getDescription());
+ // categories
+ for (Entity categoryEntity : labelAnnotation.getCategoryEntitiesList()) {
+ System.out.println("Frame label category: " + categoryEntity.getDescription());
+ }
+ // segments
+ for (LabelSegment segment : labelAnnotation.getSegmentsList()) {
+ double startTime =
+ segment.getSegment().getStartTimeOffset().getSeconds()
+ + segment.getSegment().getStartTimeOffset().getNanos() / 1e9;
+ double endTime =
+ segment.getSegment().getEndTimeOffset().getSeconds()
+ + segment.getSegment().getEndTimeOffset().getNanos() / 1e9;
+ System.out.printf("Segment location: %.3f:%.2f\n", startTime, endTime);
+ System.out.println("Confidence: " + segment.getConfidence());
+ }
+ }
+ }
+ }
+ // [END video_analyze_labels_gcs]
+ }
+
+ /**
+ * Performs label analysis on the video at the provided file path.
+ *
+ * @param filePath the path to the video file to analyze.
+ */
+ public static void analyzeLabelsFile(String filePath) throws Exception {
+ // [START video_analyze_labels]
+ // Instantiate a com.google.cloud.videointelligence.v1.VideoIntelligenceServiceClient
+ try (VideoIntelligenceServiceClient client = VideoIntelligenceServiceClient.create()) {
+ // Read file and encode into Base64
+ Path path = Paths.get(filePath);
+ byte[] data = Files.readAllBytes(path);
+
+ AnnotateVideoRequest request =
+ AnnotateVideoRequest.newBuilder()
+ .setInputContent(ByteString.copyFrom(data))
+ .addFeatures(Feature.LABEL_DETECTION)
+ .build();
+ // Create an operation that will contain the response when the operation completes.
+ OperationFuture response =
+ client.annotateVideoAsync(request);
+
+ System.out.println("Waiting for operation to complete...");
+ for (VideoAnnotationResults results : response.get().getAnnotationResultsList()) {
+ // process video / segment level label annotations
+ System.out.println("Locations: ");
+ for (LabelAnnotation labelAnnotation : results.getSegmentLabelAnnotationsList()) {
+ System.out.println("Video label: " + labelAnnotation.getEntity().getDescription());
+ // categories
+ for (Entity categoryEntity : labelAnnotation.getCategoryEntitiesList()) {
+ System.out.println("Video label category: " + categoryEntity.getDescription());
+ }
+ // segments
+ for (LabelSegment segment : labelAnnotation.getSegmentsList()) {
+ double startTime =
+ segment.getSegment().getStartTimeOffset().getSeconds()
+ + segment.getSegment().getStartTimeOffset().getNanos() / 1e9;
+ double endTime =
+ segment.getSegment().getEndTimeOffset().getSeconds()
+ + segment.getSegment().getEndTimeOffset().getNanos() / 1e9;
+ System.out.printf("Segment location: %.3f:%.2f\n", startTime, endTime);
+ System.out.println("Confidence: " + segment.getConfidence());
+ }
+ }
+
+ // process shot label annotations
+ for (LabelAnnotation labelAnnotation : results.getShotLabelAnnotationsList()) {
+ System.out.println("Shot label: " + labelAnnotation.getEntity().getDescription());
+ // categories
+ for (Entity categoryEntity : labelAnnotation.getCategoryEntitiesList()) {
+ System.out.println("Shot label category: " + categoryEntity.getDescription());
+ }
+ // segments
+ for (LabelSegment segment : labelAnnotation.getSegmentsList()) {
+ double startTime =
+ segment.getSegment().getStartTimeOffset().getSeconds()
+ + segment.getSegment().getStartTimeOffset().getNanos() / 1e9;
+ double endTime =
+ segment.getSegment().getEndTimeOffset().getSeconds()
+ + segment.getSegment().getEndTimeOffset().getNanos() / 1e9;
+ System.out.printf("Segment location: %.3f:%.2f\n", startTime, endTime);
+ System.out.println("Confidence: " + segment.getConfidence());
+ }
+ }
+
+ // process frame label annotations
+ for (LabelAnnotation labelAnnotation : results.getFrameLabelAnnotationsList()) {
+ System.out.println("Frame label: " + labelAnnotation.getEntity().getDescription());
+ // categories
+ for (Entity categoryEntity : labelAnnotation.getCategoryEntitiesList()) {
+ System.out.println("Frame label category: " + categoryEntity.getDescription());
+ }
+ // segments
+ for (LabelSegment segment : labelAnnotation.getSegmentsList()) {
+ double startTime =
+ segment.getSegment().getStartTimeOffset().getSeconds()
+ + segment.getSegment().getStartTimeOffset().getNanos() / 1e9;
+ double endTime =
+ segment.getSegment().getEndTimeOffset().getSeconds()
+ + segment.getSegment().getEndTimeOffset().getNanos() / 1e9;
+ System.out.printf("Segment location: %.3f:%.2f\n", startTime, endTime);
+ System.out.println("Confidence: " + segment.getConfidence());
+ }
+ }
+ }
+ }
+ // [END video_analyze_labels]
+ }
+
+ /**
+ * Performs shot analysis on the video at the provided Cloud Storage path.
+ *
+ * @param gcsUri the path to the video file to analyze.
+ */
+ public static void analyzeShots(String gcsUri) throws Exception {
+ // [START video_analyze_shots]
+ // Instantiate a com.google.cloud.videointelligence.v1.VideoIntelligenceServiceClient
+ try (VideoIntelligenceServiceClient client = VideoIntelligenceServiceClient.create()) {
+ // Provide path to file hosted on GCS as "gs://bucket-name/..."
+ AnnotateVideoRequest request =
+ AnnotateVideoRequest.newBuilder()
+ .setInputUri(gcsUri)
+ .addFeatures(Feature.SHOT_CHANGE_DETECTION)
+ .build();
+
+ // Create an operation that will contain the response when the operation completes.
+ OperationFuture response =
+ client.annotateVideoAsync(request);
+
+ System.out.println("Waiting for operation to complete...");
+ // Print detected shot changes and their location ranges in the analyzed video.
+ for (VideoAnnotationResults result : response.get().getAnnotationResultsList()) {
+ if (result.getShotAnnotationsCount() > 0) {
+ System.out.println("Shots: ");
+ for (VideoSegment segment : result.getShotAnnotationsList()) {
+ double startTime =
+ segment.getStartTimeOffset().getSeconds()
+ + segment.getStartTimeOffset().getNanos() / 1e9;
+ double endTime =
+ segment.getEndTimeOffset().getSeconds()
+ + segment.getEndTimeOffset().getNanos() / 1e9;
+ System.out.printf("Location: %.3f:%.3f\n", startTime, endTime);
+ }
+ } else {
+ System.out.println("No shot changes detected in " + gcsUri);
+ }
+ }
+ }
+ // [END video_analyze_shots]
+ }
+
+ /**
+ * Performs explicit content analysis on the video at the provided Cloud Storage path.
+ *
+ * @param gcsUri the path to the video file to analyze.
+ */
+ public static void analyzeExplicitContent(String gcsUri) throws Exception {
+ // [START video_analyze_explicit_content]
+ // Instantiate a com.google.cloud.videointelligence.v1.VideoIntelligenceServiceClient
+ try (VideoIntelligenceServiceClient client = VideoIntelligenceServiceClient.create()) {
+ // Create an operation that will contain the response when the operation completes.
+ AnnotateVideoRequest request =
+ AnnotateVideoRequest.newBuilder()
+ .setInputUri(gcsUri)
+ .addFeatures(Feature.EXPLICIT_CONTENT_DETECTION)
+ .build();
+
+ OperationFuture response =
+ client.annotateVideoAsync(request);
+
+ System.out.println("Waiting for operation to complete...");
+ // Print detected annotations and their positions in the analyzed video.
+ for (VideoAnnotationResults result : response.get().getAnnotationResultsList()) {
+ for (ExplicitContentFrame frame : result.getExplicitAnnotation().getFramesList()) {
+ double frameTime =
+ frame.getTimeOffset().getSeconds() + frame.getTimeOffset().getNanos() / 1e9;
+ System.out.printf("Location: %.3fs\n", frameTime);
+ System.out.println("Adult: " + frame.getPornographyLikelihood());
+ }
+ }
+ // [END video_analyze_explicit_content]
+ }
+ }
+
+ /**
+ * Transcribe speech from a video stored on GCS.
+ *
+ * @param gcsUri the path to the video file to analyze.
+ */
+ public static void speechTranscription(String gcsUri) throws Exception {
+ // [START video_speech_transcription_gcs]
+ // Instantiate a com.google.cloud.videointelligence.v1.VideoIntelligenceServiceClient
+ try (VideoIntelligenceServiceClient client = VideoIntelligenceServiceClient.create()) {
+ // Set the language code
+ SpeechTranscriptionConfig config =
+ SpeechTranscriptionConfig.newBuilder()
+ .setLanguageCode("en-US")
+ .setEnableAutomaticPunctuation(true)
+ .build();
+
+ // Set the video context with the above configuration
+ VideoContext context = VideoContext.newBuilder().setSpeechTranscriptionConfig(config).build();
+
+ // Create the request
+ AnnotateVideoRequest request =
+ AnnotateVideoRequest.newBuilder()
+ .setInputUri(gcsUri)
+ .addFeatures(Feature.SPEECH_TRANSCRIPTION)
+ .setVideoContext(context)
+ .build();
+
+ // asynchronously perform speech transcription on videos
+ OperationFuture response =
+ client.annotateVideoAsync(request);
+
+ System.out.println("Waiting for operation to complete...");
+ // Display the results
+ for (VideoAnnotationResults results :
+ response.get(600, TimeUnit.SECONDS).getAnnotationResultsList()) {
+ for (SpeechTranscription speechTranscription : results.getSpeechTranscriptionsList()) {
+ try {
+ // Print the transcription
+ if (speechTranscription.getAlternativesCount() > 0) {
+ SpeechRecognitionAlternative alternative = speechTranscription.getAlternatives(0);
+
+ System.out.printf("Transcript: %s\n", alternative.getTranscript());
+ System.out.printf("Confidence: %.2f\n", alternative.getConfidence());
+
+ System.out.println("Word level information:");
+ for (WordInfo wordInfo : alternative.getWordsList()) {
+ double startTime =
+ wordInfo.getStartTime().getSeconds() + wordInfo.getStartTime().getNanos() / 1e9;
+ double endTime =
+ wordInfo.getEndTime().getSeconds() + wordInfo.getEndTime().getNanos() / 1e9;
+ System.out.printf(
+ "\t%4.2fs - %4.2fs: %s\n", startTime, endTime, wordInfo.getWord());
+ }
+ } else {
+ System.out.println("No transcription found");
+ }
+ } catch (IndexOutOfBoundsException ioe) {
+ System.out.println("Could not retrieve frame: " + ioe.getMessage());
+ }
+ }
+ }
+ }
+ // [END video_speech_transcription_gcs]
+ }
+}
diff --git a/video/src/main/java/video/DetectFaces.java b/video/src/main/java/video/DetectFaces.java
new file mode 100644
index 00000000000..71e45aec93f
--- /dev/null
+++ b/video/src/main/java/video/DetectFaces.java
@@ -0,0 +1,112 @@
+/*
+ * Copyright 2020 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package video;
+
+// [START video_detect_faces]
+
+import com.google.api.gax.longrunning.OperationFuture;
+import com.google.cloud.videointelligence.v1.AnnotateVideoProgress;
+import com.google.cloud.videointelligence.v1.AnnotateVideoRequest;
+import com.google.cloud.videointelligence.v1.AnnotateVideoResponse;
+import com.google.cloud.videointelligence.v1.DetectedAttribute;
+import com.google.cloud.videointelligence.v1.FaceDetectionAnnotation;
+import com.google.cloud.videointelligence.v1.FaceDetectionConfig;
+import com.google.cloud.videointelligence.v1.Feature;
+import com.google.cloud.videointelligence.v1.TimestampedObject;
+import com.google.cloud.videointelligence.v1.Track;
+import com.google.cloud.videointelligence.v1.VideoAnnotationResults;
+import com.google.cloud.videointelligence.v1.VideoContext;
+import com.google.cloud.videointelligence.v1.VideoIntelligenceServiceClient;
+import com.google.cloud.videointelligence.v1.VideoSegment;
+import com.google.protobuf.ByteString;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+
+public class DetectFaces {
+
+ public static void detectFaces() throws Exception {
+ // TODO(developer): Replace these variables before running the sample.
+ String localFilePath = "resources/googlework_short.mp4";
+ detectFaces(localFilePath);
+ }
+
+ // Detects faces in a video stored in a local file using the Cloud Video Intelligence API.
+ public static void detectFaces(String localFilePath) throws Exception {
+ try (VideoIntelligenceServiceClient videoIntelligenceServiceClient =
+ VideoIntelligenceServiceClient.create()) {
+ // Reads a local video file and converts it to base64.
+ Path path = Paths.get(localFilePath);
+ byte[] data = Files.readAllBytes(path);
+ ByteString inputContent = ByteString.copyFrom(data);
+
+ FaceDetectionConfig faceDetectionConfig =
+ FaceDetectionConfig.newBuilder()
+ // Must set includeBoundingBoxes to true to get facial attributes.
+ .setIncludeBoundingBoxes(true)
+ .setIncludeAttributes(true)
+ .build();
+ VideoContext videoContext =
+ VideoContext.newBuilder().setFaceDetectionConfig(faceDetectionConfig).build();
+
+ AnnotateVideoRequest request =
+ AnnotateVideoRequest.newBuilder()
+ .setInputContent(inputContent)
+ .addFeatures(Feature.FACE_DETECTION)
+ .setVideoContext(videoContext)
+ .build();
+
+ // Detects faces in a video
+ OperationFuture future =
+ videoIntelligenceServiceClient.annotateVideoAsync(request);
+
+ System.out.println("Waiting for operation to complete...");
+ AnnotateVideoResponse response = future.get();
+
+ // Gets annotations for video
+ VideoAnnotationResults annotationResult = response.getAnnotationResultsList().get(0);
+
+ // Annotations for list of faces detected, tracked and recognized in video.
+ for (FaceDetectionAnnotation faceDetectionAnnotation :
+ annotationResult.getFaceDetectionAnnotationsList()) {
+ System.out.print("Face detected:\n");
+ for (Track track : faceDetectionAnnotation.getTracksList()) {
+ VideoSegment segment = track.getSegment();
+ System.out.printf(
+ "\tStart: %d.%.0fs\n",
+ segment.getStartTimeOffset().getSeconds(),
+ segment.getStartTimeOffset().getNanos() / 1e6);
+ System.out.printf(
+ "\tEnd: %d.%.0fs\n",
+ segment.getEndTimeOffset().getSeconds(), segment.getEndTimeOffset().getNanos() / 1e6);
+
+ // Each segment includes timestamped objects that
+ // include characteristics of the face detected.
+ TimestampedObject firstTimestampedObject = track.getTimestampedObjects(0);
+
+ for (DetectedAttribute attribute : firstTimestampedObject.getAttributesList()) {
+ // Attributes include glasses, headwear, smiling, direction of gaze
+ System.out.printf(
+ "\tAttribute %s: %s %s\n",
+ attribute.getName(), attribute.getValue(), attribute.getConfidence());
+ }
+ }
+ }
+ }
+ }
+}
+// [END video_detect_faces]
diff --git a/video/src/main/java/video/DetectFacesGcs.java b/video/src/main/java/video/DetectFacesGcs.java
new file mode 100644
index 00000000000..ec41d6d572b
--- /dev/null
+++ b/video/src/main/java/video/DetectFacesGcs.java
@@ -0,0 +1,104 @@
+/*
+ * Copyright 2020 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package video;
+
+// [START video_detect_faces_gcs]
+
+import com.google.api.gax.longrunning.OperationFuture;
+import com.google.cloud.videointelligence.v1.AnnotateVideoProgress;
+import com.google.cloud.videointelligence.v1.AnnotateVideoRequest;
+import com.google.cloud.videointelligence.v1.AnnotateVideoResponse;
+import com.google.cloud.videointelligence.v1.DetectedAttribute;
+import com.google.cloud.videointelligence.v1.FaceDetectionAnnotation;
+import com.google.cloud.videointelligence.v1.FaceDetectionConfig;
+import com.google.cloud.videointelligence.v1.Feature;
+import com.google.cloud.videointelligence.v1.TimestampedObject;
+import com.google.cloud.videointelligence.v1.Track;
+import com.google.cloud.videointelligence.v1.VideoAnnotationResults;
+import com.google.cloud.videointelligence.v1.VideoContext;
+import com.google.cloud.videointelligence.v1.VideoIntelligenceServiceClient;
+import com.google.cloud.videointelligence.v1.VideoSegment;
+
+public class DetectFacesGcs {
+
+ public static void detectFacesGcs() throws Exception {
+ // TODO(developer): Replace these variables before running the sample.
+ String gcsUri = "gs://cloud-samples-data/video/googlework_short.mp4";
+ detectFacesGcs(gcsUri);
+ }
+
+ // Detects faces in a video stored in Google Cloud Storage using the Cloud Video Intelligence API.
+ public static void detectFacesGcs(String gcsUri) throws Exception {
+ try (VideoIntelligenceServiceClient videoIntelligenceServiceClient =
+ VideoIntelligenceServiceClient.create()) {
+
+ FaceDetectionConfig faceDetectionConfig =
+ FaceDetectionConfig.newBuilder()
+ // Must set includeBoundingBoxes to true to get facial attributes.
+ .setIncludeBoundingBoxes(true)
+ .setIncludeAttributes(true)
+ .build();
+ VideoContext videoContext =
+ VideoContext.newBuilder().setFaceDetectionConfig(faceDetectionConfig).build();
+
+ AnnotateVideoRequest request =
+ AnnotateVideoRequest.newBuilder()
+ .setInputUri(gcsUri)
+ .addFeatures(Feature.FACE_DETECTION)
+ .setVideoContext(videoContext)
+ .build();
+
+ // Detects faces in a video
+ OperationFuture future =
+ videoIntelligenceServiceClient.annotateVideoAsync(request);
+
+ System.out.println("Waiting for operation to complete...");
+ AnnotateVideoResponse response = future.get();
+
+ // Gets annotations for video
+ VideoAnnotationResults annotationResult = response.getAnnotationResultsList().get(0);
+
+ // Annotations for list of people detected, tracked and recognized in video.
+ for (FaceDetectionAnnotation faceDetectionAnnotation :
+ annotationResult.getFaceDetectionAnnotationsList()) {
+ System.out.print("Face detected:\n");
+ for (Track track : faceDetectionAnnotation.getTracksList()) {
+ VideoSegment segment = track.getSegment();
+ System.out.printf(
+ "\tStart: %d.%.0fs\n",
+ segment.getStartTimeOffset().getSeconds(),
+ segment.getStartTimeOffset().getNanos() / 1e6);
+ System.out.printf(
+ "\tEnd: %d.%.0fs\n",
+ segment.getEndTimeOffset().getSeconds(), segment.getEndTimeOffset().getNanos() / 1e6);
+
+ // Each segment includes timestamped objects that
+ // include characteristics of the face detected.
+ TimestampedObject firstTimestampedObject = track.getTimestampedObjects(0);
+
+ for (DetectedAttribute attribute : firstTimestampedObject.getAttributesList()) {
+ // Attributes include glasses, headwear, smiling, direction of gaze
+ System.out.printf(
+ "\tAttribute %s: %s %s\n",
+ attribute.getName(), attribute.getValue(), attribute.getConfidence());
+ }
+ }
+ }
+ }
+ }
+}
+// [END video_detect_faces_gcs]
diff --git a/video/src/main/java/video/DetectPerson.java b/video/src/main/java/video/DetectPerson.java
new file mode 100644
index 00000000000..29653db79d1
--- /dev/null
+++ b/video/src/main/java/video/DetectPerson.java
@@ -0,0 +1,122 @@
+/*
+ * Copyright 2020 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package video;
+
+// [START video_detect_person]
+
+import com.google.api.gax.longrunning.OperationFuture;
+import com.google.cloud.videointelligence.v1.AnnotateVideoProgress;
+import com.google.cloud.videointelligence.v1.AnnotateVideoRequest;
+import com.google.cloud.videointelligence.v1.AnnotateVideoResponse;
+import com.google.cloud.videointelligence.v1.DetectedAttribute;
+import com.google.cloud.videointelligence.v1.DetectedLandmark;
+import com.google.cloud.videointelligence.v1.Feature;
+import com.google.cloud.videointelligence.v1.PersonDetectionAnnotation;
+import com.google.cloud.videointelligence.v1.PersonDetectionConfig;
+import com.google.cloud.videointelligence.v1.TimestampedObject;
+import com.google.cloud.videointelligence.v1.Track;
+import com.google.cloud.videointelligence.v1.VideoAnnotationResults;
+import com.google.cloud.videointelligence.v1.VideoContext;
+import com.google.cloud.videointelligence.v1.VideoIntelligenceServiceClient;
+import com.google.cloud.videointelligence.v1.VideoSegment;
+import com.google.protobuf.ByteString;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+
+public class DetectPerson {
+
+ public static void detectPerson() throws Exception {
+ // TODO(developer): Replace these variables before running the sample.
+ String localFilePath = "resources/googlework_short.mp4";
+ detectPerson(localFilePath);
+ }
+
+ // Detects people in a video stored in a local file using the Cloud Video Intelligence API.
+ public static void detectPerson(String localFilePath) throws Exception {
+ try (VideoIntelligenceServiceClient videoIntelligenceServiceClient =
+ VideoIntelligenceServiceClient.create()) {
+ // Reads a local video file and converts it to base64.
+ Path path = Paths.get(localFilePath);
+ byte[] data = Files.readAllBytes(path);
+ ByteString inputContent = ByteString.copyFrom(data);
+
+ PersonDetectionConfig personDetectionConfig =
+ PersonDetectionConfig.newBuilder()
+ // Must set includeBoundingBoxes to true to get poses and attributes.
+ .setIncludeBoundingBoxes(true)
+ .setIncludePoseLandmarks(true)
+ .setIncludeAttributes(true)
+ .build();
+ VideoContext videoContext =
+ VideoContext.newBuilder().setPersonDetectionConfig(personDetectionConfig).build();
+
+ AnnotateVideoRequest request =
+ AnnotateVideoRequest.newBuilder()
+ .setInputContent(inputContent)
+ .addFeatures(Feature.PERSON_DETECTION)
+ .setVideoContext(videoContext)
+ .build();
+
+ // Detects people in a video
+ // We get the first result because only one video is processed.
+ OperationFuture future =
+ videoIntelligenceServiceClient.annotateVideoAsync(request);
+
+ System.out.println("Waiting for operation to complete...");
+ AnnotateVideoResponse response = future.get();
+
+ // Gets annotations for video
+ VideoAnnotationResults annotationResult = response.getAnnotationResultsList().get(0);
+
+ // Annotations for list of people detected, tracked and recognized in video.
+ for (PersonDetectionAnnotation personDetectionAnnotation :
+ annotationResult.getPersonDetectionAnnotationsList()) {
+ System.out.print("Person detected:\n");
+ for (Track track : personDetectionAnnotation.getTracksList()) {
+ VideoSegment segment = track.getSegment();
+ System.out.printf(
+ "\tStart: %d.%.0fs\n",
+ segment.getStartTimeOffset().getSeconds(),
+ segment.getStartTimeOffset().getNanos() / 1e6);
+ System.out.printf(
+ "\tEnd: %d.%.0fs\n",
+ segment.getEndTimeOffset().getSeconds(), segment.getEndTimeOffset().getNanos() / 1e6);
+
+ // Each segment includes timestamped objects that include characteristic--e.g. clothes,
+ // posture of the person detected.
+ TimestampedObject firstTimestampedObject = track.getTimestampedObjects(0);
+
+ // Attributes include unique pieces of clothing, poses (i.e., body landmarks)
+ // of the person detected.
+ for (DetectedAttribute attribute : firstTimestampedObject.getAttributesList()) {
+ System.out.printf(
+ "\tAttribute: %s; Value: %s\n", attribute.getName(), attribute.getValue());
+ }
+
+ // Landmarks in person detection include body parts.
+ for (DetectedLandmark attribute : firstTimestampedObject.getLandmarksList()) {
+ System.out.printf(
+ "\tLandmark: %s; Vertex: %f, %f\n",
+ attribute.getName(), attribute.getPoint().getX(), attribute.getPoint().getY());
+ }
+ }
+ }
+ }
+ }
+}
+// [END video_detect_person]
diff --git a/video/src/main/java/video/DetectPersonGcs.java b/video/src/main/java/video/DetectPersonGcs.java
new file mode 100644
index 00000000000..df4fdd07824
--- /dev/null
+++ b/video/src/main/java/video/DetectPersonGcs.java
@@ -0,0 +1,114 @@
+/*
+ * Copyright 2020 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package video;
+
+// [START video_detect_person_gcs]
+
+import com.google.api.gax.longrunning.OperationFuture;
+import com.google.cloud.videointelligence.v1.AnnotateVideoProgress;
+import com.google.cloud.videointelligence.v1.AnnotateVideoRequest;
+import com.google.cloud.videointelligence.v1.AnnotateVideoResponse;
+import com.google.cloud.videointelligence.v1.DetectedAttribute;
+import com.google.cloud.videointelligence.v1.DetectedLandmark;
+import com.google.cloud.videointelligence.v1.Feature;
+import com.google.cloud.videointelligence.v1.PersonDetectionAnnotation;
+import com.google.cloud.videointelligence.v1.PersonDetectionConfig;
+import com.google.cloud.videointelligence.v1.TimestampedObject;
+import com.google.cloud.videointelligence.v1.Track;
+import com.google.cloud.videointelligence.v1.VideoAnnotationResults;
+import com.google.cloud.videointelligence.v1.VideoContext;
+import com.google.cloud.videointelligence.v1.VideoIntelligenceServiceClient;
+import com.google.cloud.videointelligence.v1.VideoSegment;
+
+public class DetectPersonGcs {
+
+ public static void detectPersonGcs() throws Exception {
+ // TODO(developer): Replace these variables before running the sample.
+ String gcsUri = "gs://cloud-samples-data/video/googlework_short.mp4";
+ detectPersonGcs(gcsUri);
+ }
+
+ // Detects people in a video stored in Google Cloud Storage using
+ // the Cloud Video Intelligence API.
+ public static void detectPersonGcs(String gcsUri) throws Exception {
+ try (VideoIntelligenceServiceClient videoIntelligenceServiceClient =
+ VideoIntelligenceServiceClient.create()) {
+ // Reads a local video file and converts it to base64.
+
+ PersonDetectionConfig personDetectionConfig =
+ PersonDetectionConfig.newBuilder()
+ // Must set includeBoundingBoxes to true to get poses and attributes.
+ .setIncludeBoundingBoxes(true)
+ .setIncludePoseLandmarks(true)
+ .setIncludeAttributes(true)
+ .build();
+ VideoContext videoContext =
+ VideoContext.newBuilder().setPersonDetectionConfig(personDetectionConfig).build();
+
+ AnnotateVideoRequest request =
+ AnnotateVideoRequest.newBuilder()
+ .setInputUri(gcsUri)
+ .addFeatures(Feature.PERSON_DETECTION)
+ .setVideoContext(videoContext)
+ .build();
+
+ // Detects people in a video
+ OperationFuture future =
+ videoIntelligenceServiceClient.annotateVideoAsync(request);
+
+ System.out.println("Waiting for operation to complete...");
+ AnnotateVideoResponse response = future.get();
+ // Get the first response, since we sent only one video.
+ VideoAnnotationResults annotationResult = response.getAnnotationResultsList().get(0);
+
+ // Annotations for list of people detected, tracked and recognized in video.
+ for (PersonDetectionAnnotation personDetectionAnnotation :
+ annotationResult.getPersonDetectionAnnotationsList()) {
+ System.out.print("Person detected:\n");
+ for (Track track : personDetectionAnnotation.getTracksList()) {
+ VideoSegment segment = track.getSegment();
+ System.out.printf(
+ "\tStart: %d.%.0fs\n",
+ segment.getStartTimeOffset().getSeconds(),
+ segment.getStartTimeOffset().getNanos() / 1e6);
+ System.out.printf(
+ "\tEnd: %d.%.0fs\n",
+ segment.getEndTimeOffset().getSeconds(), segment.getEndTimeOffset().getNanos() / 1e6);
+
+ // Each segment includes timestamped objects that include characteristic--e.g. clothes,
+ // posture of the person detected.
+ TimestampedObject firstTimestampedObject = track.getTimestampedObjects(0);
+
+ // Attributes include unique pieces of clothing, poses (i.e., body landmarks)
+ // of the person detected.
+ for (DetectedAttribute attribute : firstTimestampedObject.getAttributesList()) {
+ System.out.printf(
+ "\tAttribute: %s; Value: %s\n", attribute.getName(), attribute.getValue());
+ }
+
+ // Landmarks in person detection include body parts.
+ for (DetectedLandmark attribute : firstTimestampedObject.getLandmarksList()) {
+ System.out.printf(
+ "\tLandmark: %s; Vertex: %f, %f\n",
+ attribute.getName(), attribute.getPoint().getX(), attribute.getPoint().getY());
+ }
+ }
+ }
+ }
+ }
+}
+// [END video_detect_person_gcs]
diff --git a/video/src/main/java/video/LogoDetection.java b/video/src/main/java/video/LogoDetection.java
new file mode 100644
index 00000000000..4a110e4a351
--- /dev/null
+++ b/video/src/main/java/video/LogoDetection.java
@@ -0,0 +1,141 @@
+/*
+ * Copyright 2020 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package video;
+
+// [START video_detect_logo]
+
+import com.google.api.gax.longrunning.OperationFuture;
+import com.google.cloud.videointelligence.v1.AnnotateVideoProgress;
+import com.google.cloud.videointelligence.v1.AnnotateVideoRequest;
+import com.google.cloud.videointelligence.v1.AnnotateVideoResponse;
+import com.google.cloud.videointelligence.v1.DetectedAttribute;
+import com.google.cloud.videointelligence.v1.Entity;
+import com.google.cloud.videointelligence.v1.Feature;
+import com.google.cloud.videointelligence.v1.LogoRecognitionAnnotation;
+import com.google.cloud.videointelligence.v1.NormalizedBoundingBox;
+import com.google.cloud.videointelligence.v1.TimestampedObject;
+import com.google.cloud.videointelligence.v1.Track;
+import com.google.cloud.videointelligence.v1.VideoAnnotationResults;
+import com.google.cloud.videointelligence.v1.VideoIntelligenceServiceClient;
+import com.google.cloud.videointelligence.v1.VideoSegment;
+import com.google.protobuf.ByteString;
+import com.google.protobuf.Duration;
+import java.io.IOException;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+
+public class LogoDetection {
+
+ public static void detectLogo() throws Exception {
+ // TODO(developer): Replace these variables before running the sample.
+ String localFilePath = "path/to/your/video.mp4";
+ detectLogo(localFilePath);
+ }
+
+ public static void detectLogo(String filePath)
+ throws IOException, ExecutionException, InterruptedException, TimeoutException {
+ // Initialize client that will be used to send requests. This client only needs to be created
+ // once, and can be reused for multiple requests. After completing all of your requests, call
+ // the "close" method on the client to safely clean up any remaining background resources.
+ try (VideoIntelligenceServiceClient client = VideoIntelligenceServiceClient.create()) {
+ // Read file
+ Path path = Paths.get(filePath);
+ byte[] data = Files.readAllBytes(path);
+ // Create the request
+ AnnotateVideoRequest request =
+ AnnotateVideoRequest.newBuilder()
+ .setInputContent(ByteString.copyFrom(data))
+ .addFeatures(Feature.LOGO_RECOGNITION)
+ .build();
+
+ // asynchronously perform object tracking on videos
+ OperationFuture future =
+ client.annotateVideoAsync(request);
+
+ System.out.println("Waiting for operation to complete...");
+ // The first result is retrieved because a single video was processed.
+ AnnotateVideoResponse response = future.get(300, TimeUnit.SECONDS);
+ VideoAnnotationResults annotationResult = response.getAnnotationResults(0);
+
+ // Annotations for list of logos detected, tracked and recognized in video.
+ for (LogoRecognitionAnnotation logoRecognitionAnnotation :
+ annotationResult.getLogoRecognitionAnnotationsList()) {
+ Entity entity = logoRecognitionAnnotation.getEntity();
+ // Opaque entity ID. Some IDs may be available in
+ // [Google Knowledge Graph Search API](https://developers.google.com/knowledge-graph/).
+ System.out.printf("Entity Id : %s\n", entity.getEntityId());
+ System.out.printf("Description : %s\n", entity.getDescription());
+ // All logo tracks where the recognized logo appears. Each track corresponds to one logo
+ // instance appearing in consecutive frames.
+ for (Track track : logoRecognitionAnnotation.getTracksList()) {
+
+ // Video segment of a track.
+ Duration startTimeOffset = track.getSegment().getStartTimeOffset();
+ System.out.printf(
+ "\n\tStart Time Offset: %s.%s\n",
+ startTimeOffset.getSeconds(), startTimeOffset.getNanos());
+ Duration endTimeOffset = track.getSegment().getEndTimeOffset();
+ System.out.printf(
+ "\tEnd Time Offset: %s.%s\n", endTimeOffset.getSeconds(), endTimeOffset.getNanos());
+ System.out.printf("\tConfidence: %s\n", track.getConfidence());
+
+ // The object with timestamp and attributes per frame in the track.
+ for (TimestampedObject timestampedObject : track.getTimestampedObjectsList()) {
+
+ // Normalized Bounding box in a frame, where the object is located.
+ NormalizedBoundingBox normalizedBoundingBox =
+ timestampedObject.getNormalizedBoundingBox();
+ System.out.printf("\n\t\tLeft: %s\n", normalizedBoundingBox.getLeft());
+ System.out.printf("\t\tTop: %s\n", normalizedBoundingBox.getTop());
+ System.out.printf("\t\tRight: %s\n", normalizedBoundingBox.getRight());
+ System.out.printf("\t\tBottom: %s\n", normalizedBoundingBox.getBottom());
+
+ // Optional. The attributes of the object in the bounding box.
+ for (DetectedAttribute attribute : timestampedObject.getAttributesList()) {
+ System.out.printf("\n\t\t\tName: %s\n", attribute.getName());
+ System.out.printf("\t\t\tConfidence: %s\n", attribute.getConfidence());
+ System.out.printf("\t\t\tValue: %s\n", attribute.getValue());
+ }
+ }
+
+ // Optional. Attributes in the track level.
+ for (DetectedAttribute trackAttribute : track.getAttributesList()) {
+ System.out.printf("\n\t\tName : %s\n", trackAttribute.getName());
+ System.out.printf("\t\tConfidence : %s\n", trackAttribute.getConfidence());
+ System.out.printf("\t\tValue : %s\n", trackAttribute.getValue());
+ }
+ }
+
+ // All video segments where the recognized logo appears. There might be multiple instances
+ // of the same logo class appearing in one VideoSegment.
+ for (VideoSegment segment : logoRecognitionAnnotation.getSegmentsList()) {
+ System.out.printf(
+ "\n\tStart Time Offset : %s.%s\n",
+ segment.getStartTimeOffset().getSeconds(), segment.getStartTimeOffset().getNanos());
+ System.out.printf(
+ "\tEnd Time Offset : %s.%s\n",
+ segment.getEndTimeOffset().getSeconds(), segment.getEndTimeOffset().getNanos());
+ }
+ }
+ }
+ }
+}
+// [END video_detect_logo]
diff --git a/video/src/main/java/video/LogoDetectionGcs.java b/video/src/main/java/video/LogoDetectionGcs.java
new file mode 100644
index 00000000000..714c308c58a
--- /dev/null
+++ b/video/src/main/java/video/LogoDetectionGcs.java
@@ -0,0 +1,134 @@
+/*
+ * Copyright 2020 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package video;
+
+// [START video_detect_logo_gcs]
+
+import com.google.api.gax.longrunning.OperationFuture;
+import com.google.cloud.videointelligence.v1.AnnotateVideoProgress;
+import com.google.cloud.videointelligence.v1.AnnotateVideoRequest;
+import com.google.cloud.videointelligence.v1.AnnotateVideoResponse;
+import com.google.cloud.videointelligence.v1.DetectedAttribute;
+import com.google.cloud.videointelligence.v1.Entity;
+import com.google.cloud.videointelligence.v1.Feature;
+import com.google.cloud.videointelligence.v1.LogoRecognitionAnnotation;
+import com.google.cloud.videointelligence.v1.NormalizedBoundingBox;
+import com.google.cloud.videointelligence.v1.TimestampedObject;
+import com.google.cloud.videointelligence.v1.Track;
+import com.google.cloud.videointelligence.v1.VideoAnnotationResults;
+import com.google.cloud.videointelligence.v1.VideoIntelligenceServiceClient;
+import com.google.cloud.videointelligence.v1.VideoSegment;
+import com.google.protobuf.Duration;
+import java.io.IOException;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+
+public class LogoDetectionGcs {
+
+ public static void detectLogoGcs() throws Exception {
+ // TODO(developer): Replace these variables before running the sample.
+ String gcsUri = "gs://YOUR_BUCKET_ID/path/to/your/video.mp4";
+ detectLogoGcs(gcsUri);
+ }
+
+ public static void detectLogoGcs(String inputUri)
+ throws IOException, ExecutionException, InterruptedException, TimeoutException {
+ // Initialize client that will be used to send requests. This client only needs to be created
+ // once, and can be reused for multiple requests. After completing all of your requests, call
+ // the "close" method on the client to safely clean up any remaining background resources.
+ try (VideoIntelligenceServiceClient client = VideoIntelligenceServiceClient.create()) {
+ // Create the request
+ AnnotateVideoRequest request =
+ AnnotateVideoRequest.newBuilder()
+ .setInputUri(inputUri)
+ .addFeatures(Feature.LOGO_RECOGNITION)
+ .build();
+
+ // asynchronously perform object tracking on videos
+ OperationFuture future =
+ client.annotateVideoAsync(request);
+
+ System.out.println("Waiting for operation to complete...");
+ // The first result is retrieved because a single video was processed.
+ AnnotateVideoResponse response = future.get(300, TimeUnit.SECONDS);
+ VideoAnnotationResults annotationResult = response.getAnnotationResults(0);
+
+ // Annotations for list of logos detected, tracked and recognized in video.
+ for (LogoRecognitionAnnotation logoRecognitionAnnotation :
+ annotationResult.getLogoRecognitionAnnotationsList()) {
+ Entity entity = logoRecognitionAnnotation.getEntity();
+ // Opaque entity ID. Some IDs may be available in
+ // [Google Knowledge Graph Search API](https://developers.google.com/knowledge-graph/).
+ System.out.printf("Entity Id : %s\n", entity.getEntityId());
+ System.out.printf("Description : %s\n", entity.getDescription());
+ // All logo tracks where the recognized logo appears. Each track corresponds to one logo
+ // instance appearing in consecutive frames.
+ for (Track track : logoRecognitionAnnotation.getTracksList()) {
+
+ // Video segment of a track.
+ Duration startTimeOffset = track.getSegment().getStartTimeOffset();
+ System.out.printf(
+ "\n\tStart Time Offset: %s.%s\n",
+ startTimeOffset.getSeconds(), startTimeOffset.getNanos());
+ Duration endTimeOffset = track.getSegment().getEndTimeOffset();
+ System.out.printf(
+ "\tEnd Time Offset: %s.%s\n", endTimeOffset.getSeconds(), endTimeOffset.getNanos());
+ System.out.printf("\tConfidence: %s\n", track.getConfidence());
+
+ // The object with timestamp and attributes per frame in the track.
+ for (TimestampedObject timestampedObject : track.getTimestampedObjectsList()) {
+
+ // Normalized Bounding box in a frame, where the object is located.
+ NormalizedBoundingBox normalizedBoundingBox =
+ timestampedObject.getNormalizedBoundingBox();
+ System.out.printf("\n\t\tLeft: %s\n", normalizedBoundingBox.getLeft());
+ System.out.printf("\t\tTop: %s\n", normalizedBoundingBox.getTop());
+ System.out.printf("\t\tRight: %s\n", normalizedBoundingBox.getRight());
+ System.out.printf("\t\tBottom: %s\n", normalizedBoundingBox.getBottom());
+
+ // Optional. The attributes of the object in the bounding box.
+ for (DetectedAttribute attribute : timestampedObject.getAttributesList()) {
+ System.out.printf("\n\t\t\tName: %s\n", attribute.getName());
+ System.out.printf("\t\t\tConfidence: %s\n", attribute.getConfidence());
+ System.out.printf("\t\t\tValue: %s\n", attribute.getValue());
+ }
+ }
+
+ // Optional. Attributes in the track level.
+ for (DetectedAttribute trackAttribute : track.getAttributesList()) {
+ System.out.printf("\n\t\tName : %s\n", trackAttribute.getName());
+ System.out.printf("\t\tConfidence : %s\n", trackAttribute.getConfidence());
+ System.out.printf("\t\tValue : %s\n", trackAttribute.getValue());
+ }
+ }
+
+ // All video segments where the recognized logo appears. There might be multiple instances
+ // of the same logo class appearing in one VideoSegment.
+ for (VideoSegment segment : logoRecognitionAnnotation.getSegmentsList()) {
+ System.out.printf(
+ "\n\tStart Time Offset : %s.%s\n",
+ segment.getStartTimeOffset().getSeconds(), segment.getStartTimeOffset().getNanos());
+ System.out.printf(
+ "\tEnd Time Offset : %s.%s\n",
+ segment.getEndTimeOffset().getSeconds(), segment.getEndTimeOffset().getNanos());
+ }
+ }
+ }
+ }
+}
+// [END video_detect_logo_gcs]
diff --git a/video/src/main/java/video/QuickstartSample.java b/video/src/main/java/video/QuickstartSample.java
new file mode 100644
index 00000000000..9f0bd88f67e
--- /dev/null
+++ b/video/src/main/java/video/QuickstartSample.java
@@ -0,0 +1,85 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package video;
+
+// [START video_quickstart]
+
+import com.google.api.gax.longrunning.OperationFuture;
+import com.google.cloud.videointelligence.v1.AnnotateVideoProgress;
+import com.google.cloud.videointelligence.v1.AnnotateVideoRequest;
+import com.google.cloud.videointelligence.v1.AnnotateVideoResponse;
+import com.google.cloud.videointelligence.v1.Entity;
+import com.google.cloud.videointelligence.v1.Feature;
+import com.google.cloud.videointelligence.v1.LabelAnnotation;
+import com.google.cloud.videointelligence.v1.LabelSegment;
+import com.google.cloud.videointelligence.v1.VideoAnnotationResults;
+import com.google.cloud.videointelligence.v1.VideoIntelligenceServiceClient;
+import java.util.List;
+
+public class QuickstartSample {
+
+ /** Demonstrates using the video intelligence client to detect labels in a video file. */
+ public static void main(String[] args) throws Exception {
+ // Instantiate a video intelligence client
+ try (VideoIntelligenceServiceClient client = VideoIntelligenceServiceClient.create()) {
+ // The Google Cloud Storage path to the video to annotate.
+ String gcsUri = "gs://cloud-samples-data/video/cat.mp4";
+
+ // Create an operation that will contain the response when the operation completes.
+ AnnotateVideoRequest request =
+ AnnotateVideoRequest.newBuilder()
+ .setInputUri(gcsUri)
+ .addFeatures(Feature.LABEL_DETECTION)
+ .build();
+
+ OperationFuture response =
+ client.annotateVideoAsync(request);
+
+ System.out.println("Waiting for operation to complete...");
+
+ List results = response.get().getAnnotationResultsList();
+ if (results.isEmpty()) {
+ System.out.println("No labels detected in " + gcsUri);
+ return;
+ }
+ for (VideoAnnotationResults result : results) {
+ System.out.println("Labels:");
+ // get video segment label annotations
+ for (LabelAnnotation annotation : result.getSegmentLabelAnnotationsList()) {
+ System.out.println(
+ "Video label description : " + annotation.getEntity().getDescription());
+ // categories
+ for (Entity categoryEntity : annotation.getCategoryEntitiesList()) {
+ System.out.println("Label Category description : " + categoryEntity.getDescription());
+ }
+ // segments
+ for (LabelSegment segment : annotation.getSegmentsList()) {
+ double startTime =
+ segment.getSegment().getStartTimeOffset().getSeconds()
+ + segment.getSegment().getStartTimeOffset().getNanos() / 1e9;
+ double endTime =
+ segment.getSegment().getEndTimeOffset().getSeconds()
+ + segment.getSegment().getEndTimeOffset().getNanos() / 1e9;
+ System.out.printf("Segment location : %.3f:%.3f\n", startTime, endTime);
+ System.out.println("Confidence : " + segment.getConfidence());
+ }
+ }
+ }
+ }
+ }
+}
+// [END video_quickstart]
diff --git a/video/src/main/java/video/TextDetection.java b/video/src/main/java/video/TextDetection.java
new file mode 100644
index 00000000000..aa8d1d929ba
--- /dev/null
+++ b/video/src/main/java/video/TextDetection.java
@@ -0,0 +1,174 @@
+/*
+ * Copyright 2019 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package video;
+
+import com.google.api.gax.longrunning.OperationFuture;
+import com.google.cloud.videointelligence.v1.AnnotateVideoProgress;
+import com.google.cloud.videointelligence.v1.AnnotateVideoRequest;
+import com.google.cloud.videointelligence.v1.AnnotateVideoResponse;
+import com.google.cloud.videointelligence.v1.Feature;
+import com.google.cloud.videointelligence.v1.NormalizedVertex;
+import com.google.cloud.videointelligence.v1.TextAnnotation;
+import com.google.cloud.videointelligence.v1.TextFrame;
+import com.google.cloud.videointelligence.v1.TextSegment;
+import com.google.cloud.videointelligence.v1.VideoAnnotationResults;
+import com.google.cloud.videointelligence.v1.VideoIntelligenceServiceClient;
+import com.google.cloud.videointelligence.v1.VideoSegment;
+import com.google.protobuf.ByteString;
+import com.google.protobuf.Duration;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.util.List;
+import java.util.concurrent.TimeUnit;
+
+public class TextDetection {
+
+ // [START video_detect_text]
+ /**
+ * Detect text in a video.
+ *
+ * @param filePath the path to the video file to analyze.
+ */
+ public static VideoAnnotationResults detectText(String filePath) throws Exception {
+ try (VideoIntelligenceServiceClient client = VideoIntelligenceServiceClient.create()) {
+ // Read file
+ Path path = Paths.get(filePath);
+ byte[] data = Files.readAllBytes(path);
+
+ // Create the request
+ AnnotateVideoRequest request =
+ AnnotateVideoRequest.newBuilder()
+ .setInputContent(ByteString.copyFrom(data))
+ .addFeatures(Feature.TEXT_DETECTION)
+ .build();
+
+ // asynchronously perform object tracking on videos
+ OperationFuture future =
+ client.annotateVideoAsync(request);
+
+ System.out.println("Waiting for operation to complete...");
+ // The first result is retrieved because a single video was processed.
+ AnnotateVideoResponse response = future.get(300, TimeUnit.SECONDS);
+ VideoAnnotationResults results = response.getAnnotationResults(0);
+
+ // Get only the first annotation for demo purposes.
+ TextAnnotation annotation = results.getTextAnnotations(0);
+ System.out.println("Text: " + annotation.getText());
+
+ // Get the first text segment.
+ TextSegment textSegment = annotation.getSegments(0);
+ System.out.println("Confidence: " + textSegment.getConfidence());
+ // For the text segment display it's time offset
+ VideoSegment videoSegment = textSegment.getSegment();
+ Duration startTimeOffset = videoSegment.getStartTimeOffset();
+ Duration endTimeOffset = videoSegment.getEndTimeOffset();
+ // Display the offset times in seconds, 1e9 is part of the formula to convert nanos to seconds
+ System.out.println(
+ String.format(
+ "Start time: %.2f", startTimeOffset.getSeconds() + startTimeOffset.getNanos() / 1e9));
+ System.out.println(
+ String.format(
+ "End time: %.2f", endTimeOffset.getSeconds() + endTimeOffset.getNanos() / 1e9));
+
+ // Show the first result for the first frame in the segment.
+ TextFrame textFrame = textSegment.getFrames(0);
+ Duration timeOffset = textFrame.getTimeOffset();
+ System.out.println(
+ String.format(
+ "Time offset for the first frame: %.2f",
+ timeOffset.getSeconds() + timeOffset.getNanos() / 1e9));
+
+ // Display the rotated bounding box for where the text is on the frame.
+ System.out.println("Rotated Bounding Box Vertices:");
+ List vertices = textFrame.getRotatedBoundingBox().getVerticesList();
+ for (NormalizedVertex normalizedVertex : vertices) {
+ System.out.println(
+ String.format(
+ "\tVertex.x: %.2f, Vertex.y: %.2f",
+ normalizedVertex.getX(), normalizedVertex.getY()));
+ }
+ return results;
+ }
+ }
+ // [END video_detect_text]
+
+ // [START video_detect_text_gcs]
+ /**
+ * Detect Text in a video.
+ *
+ * @param gcsUri the path to the video file to analyze.
+ */
+ public static VideoAnnotationResults detectTextGcs(String gcsUri) throws Exception {
+ try (VideoIntelligenceServiceClient client = VideoIntelligenceServiceClient.create()) {
+ // Create the request
+ AnnotateVideoRequest request =
+ AnnotateVideoRequest.newBuilder()
+ .setInputUri(gcsUri)
+ .addFeatures(Feature.TEXT_DETECTION)
+ .build();
+
+ // asynchronously perform object tracking on videos
+ OperationFuture future =
+ client.annotateVideoAsync(request);
+
+ System.out.println("Waiting for operation to complete...");
+ // The first result is retrieved because a single video was processed.
+ AnnotateVideoResponse response = future.get(300, TimeUnit.SECONDS);
+ VideoAnnotationResults results = response.getAnnotationResults(0);
+
+ // Get only the first annotation for demo purposes.
+ TextAnnotation annotation = results.getTextAnnotations(0);
+ System.out.println("Text: " + annotation.getText());
+
+ // Get the first text segment.
+ TextSegment textSegment = annotation.getSegments(0);
+ System.out.println("Confidence: " + textSegment.getConfidence());
+ // For the text segment display it's time offset
+ VideoSegment videoSegment = textSegment.getSegment();
+ Duration startTimeOffset = videoSegment.getStartTimeOffset();
+ Duration endTimeOffset = videoSegment.getEndTimeOffset();
+ // Display the offset times in seconds, 1e9 is part of the formula to convert nanos to seconds
+ System.out.println(
+ String.format(
+ "Start time: %.2f", startTimeOffset.getSeconds() + startTimeOffset.getNanos() / 1e9));
+ System.out.println(
+ String.format(
+ "End time: %.2f", endTimeOffset.getSeconds() + endTimeOffset.getNanos() / 1e9));
+
+ // Show the first result for the first frame in the segment.
+ TextFrame textFrame = textSegment.getFrames(0);
+ Duration timeOffset = textFrame.getTimeOffset();
+ System.out.println(
+ String.format(
+ "Time offset for the first frame: %.2f",
+ timeOffset.getSeconds() + timeOffset.getNanos() / 1e9));
+
+ // Display the rotated bounding box for where the text is on the frame.
+ System.out.println("Rotated Bounding Box Vertices:");
+ List vertices = textFrame.getRotatedBoundingBox().getVerticesList();
+ for (NormalizedVertex normalizedVertex : vertices) {
+ System.out.println(
+ String.format(
+ "\tVertex.x: %.2f, Vertex.y: %.2f",
+ normalizedVertex.getX(), normalizedVertex.getY()));
+ }
+ return results;
+ }
+ }
+ // [END video_detect_text_gcs]
+}
diff --git a/video/src/main/java/video/TrackObjects.java b/video/src/main/java/video/TrackObjects.java
new file mode 100644
index 00000000000..211948eeb75
--- /dev/null
+++ b/video/src/main/java/video/TrackObjects.java
@@ -0,0 +1,179 @@
+/*
+ * Copyright 2019 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package video;
+
+import com.google.api.gax.longrunning.OperationFuture;
+import com.google.cloud.videointelligence.v1.AnnotateVideoProgress;
+import com.google.cloud.videointelligence.v1.AnnotateVideoRequest;
+import com.google.cloud.videointelligence.v1.AnnotateVideoResponse;
+import com.google.cloud.videointelligence.v1.Entity;
+import com.google.cloud.videointelligence.v1.Feature;
+import com.google.cloud.videointelligence.v1.NormalizedBoundingBox;
+import com.google.cloud.videointelligence.v1.ObjectTrackingAnnotation;
+import com.google.cloud.videointelligence.v1.ObjectTrackingFrame;
+import com.google.cloud.videointelligence.v1.VideoAnnotationResults;
+import com.google.cloud.videointelligence.v1.VideoIntelligenceServiceClient;
+import com.google.cloud.videointelligence.v1.VideoSegment;
+import com.google.protobuf.ByteString;
+import com.google.protobuf.Duration;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.util.concurrent.TimeUnit;
+
+public class TrackObjects {
+
+ // [START video_object_tracking]
+ /**
+ * Track objects in a video.
+ *
+ * @param filePath the path to the video file to analyze.
+ */
+ public static VideoAnnotationResults trackObjects(String filePath) throws Exception {
+ try (VideoIntelligenceServiceClient client = VideoIntelligenceServiceClient.create()) {
+ // Read file
+ Path path = Paths.get(filePath);
+ byte[] data = Files.readAllBytes(path);
+
+ // Create the request
+ AnnotateVideoRequest request =
+ AnnotateVideoRequest.newBuilder()
+ .setInputContent(ByteString.copyFrom(data))
+ .addFeatures(Feature.OBJECT_TRACKING)
+ .setLocationId("us-east1")
+ .build();
+
+ // asynchronously perform object tracking on videos
+ OperationFuture future =
+ client.annotateVideoAsync(request);
+
+ System.out.println("Waiting for operation to complete...");
+ // The first result is retrieved because a single video was processed.
+ AnnotateVideoResponse response = future.get(450, TimeUnit.SECONDS);
+ VideoAnnotationResults results = response.getAnnotationResults(0);
+
+ // Get only the first annotation for demo purposes.
+ ObjectTrackingAnnotation annotation = results.getObjectAnnotations(0);
+ System.out.println("Confidence: " + annotation.getConfidence());
+
+ if (annotation.hasEntity()) {
+ Entity entity = annotation.getEntity();
+ System.out.println("Entity description: " + entity.getDescription());
+ System.out.println("Entity id:: " + entity.getEntityId());
+ }
+
+ if (annotation.hasSegment()) {
+ VideoSegment videoSegment = annotation.getSegment();
+ Duration startTimeOffset = videoSegment.getStartTimeOffset();
+ Duration endTimeOffset = videoSegment.getEndTimeOffset();
+ // Display the segment time in seconds, 1e9 converts nanos to seconds
+ System.out.println(
+ String.format(
+ "Segment: %.2fs to %.2fs",
+ startTimeOffset.getSeconds() + startTimeOffset.getNanos() / 1e9,
+ endTimeOffset.getSeconds() + endTimeOffset.getNanos() / 1e9));
+ }
+
+ // Here we print only the bounding box of the first frame in this segment.
+ ObjectTrackingFrame frame = annotation.getFrames(0);
+ // Display the offset time in seconds, 1e9 converts nanos to seconds
+ Duration timeOffset = frame.getTimeOffset();
+ System.out.println(
+ String.format(
+ "Time offset of the first frame: %.2fs",
+ timeOffset.getSeconds() + timeOffset.getNanos() / 1e9));
+
+ // Display the bounding box of the detected object
+ NormalizedBoundingBox normalizedBoundingBox = frame.getNormalizedBoundingBox();
+ System.out.println("Bounding box position:");
+ System.out.println("\tleft: " + normalizedBoundingBox.getLeft());
+ System.out.println("\ttop: " + normalizedBoundingBox.getTop());
+ System.out.println("\tright: " + normalizedBoundingBox.getRight());
+ System.out.println("\tbottom: " + normalizedBoundingBox.getBottom());
+ return results;
+ }
+ }
+ // [END video_object_tracking]
+
+ // [START video_object_tracking_gcs]
+ /**
+ * Track objects in a video.
+ *
+ * @param gcsUri the path to the video file to analyze.
+ */
+ public static VideoAnnotationResults trackObjectsGcs(String gcsUri) throws Exception {
+ try (VideoIntelligenceServiceClient client = VideoIntelligenceServiceClient.create()) {
+ // Create the request
+ AnnotateVideoRequest request =
+ AnnotateVideoRequest.newBuilder()
+ .setInputUri(gcsUri)
+ .addFeatures(Feature.OBJECT_TRACKING)
+ .setLocationId("us-east1")
+ .build();
+
+ // asynchronously perform object tracking on videos
+ OperationFuture future =
+ client.annotateVideoAsync(request);
+
+ System.out.println("Waiting for operation to complete...");
+ // The first result is retrieved because a single video was processed.
+ AnnotateVideoResponse response = future.get(450, TimeUnit.SECONDS);
+ VideoAnnotationResults results = response.getAnnotationResults(0);
+
+ // Get only the first annotation for demo purposes.
+ ObjectTrackingAnnotation annotation = results.getObjectAnnotations(0);
+ System.out.println("Confidence: " + annotation.getConfidence());
+
+ if (annotation.hasEntity()) {
+ Entity entity = annotation.getEntity();
+ System.out.println("Entity description: " + entity.getDescription());
+ System.out.println("Entity id:: " + entity.getEntityId());
+ }
+
+ if (annotation.hasSegment()) {
+ VideoSegment videoSegment = annotation.getSegment();
+ Duration startTimeOffset = videoSegment.getStartTimeOffset();
+ Duration endTimeOffset = videoSegment.getEndTimeOffset();
+ // Display the segment time in seconds, 1e9 converts nanos to seconds
+ System.out.println(
+ String.format(
+ "Segment: %.2fs to %.2fs",
+ startTimeOffset.getSeconds() + startTimeOffset.getNanos() / 1e9,
+ endTimeOffset.getSeconds() + endTimeOffset.getNanos() / 1e9));
+ }
+
+ // Here we print only the bounding box of the first frame in this segment.
+ ObjectTrackingFrame frame = annotation.getFrames(0);
+ // Display the offset time in seconds, 1e9 converts nanos to seconds
+ Duration timeOffset = frame.getTimeOffset();
+ System.out.println(
+ String.format(
+ "Time offset of the first frame: %.2fs",
+ timeOffset.getSeconds() + timeOffset.getNanos() / 1e9));
+
+ // Display the bounding box of the detected object
+ NormalizedBoundingBox normalizedBoundingBox = frame.getNormalizedBoundingBox();
+ System.out.println("Bounding box position:");
+ System.out.println("\tleft: " + normalizedBoundingBox.getLeft());
+ System.out.println("\ttop: " + normalizedBoundingBox.getTop());
+ System.out.println("\tright: " + normalizedBoundingBox.getRight());
+ System.out.println("\tbottom: " + normalizedBoundingBox.getBottom());
+ return results;
+ }
+ }
+ // [END video_object_tracking_gcs]
+}
diff --git a/video/src/test/java/beta/video/DetectIT.java b/video/src/test/java/beta/video/DetectIT.java
new file mode 100644
index 00000000000..11f97a342f2
--- /dev/null
+++ b/video/src/test/java/beta/video/DetectIT.java
@@ -0,0 +1,150 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package beta.video;
+
+import static com.google.common.truth.Truth.assertThat;
+
+import com.google.cloud.testing.junit4.MultipleAttemptsRule;
+import com.google.cloud.videointelligence.v1p2beta1.ObjectTrackingAnnotation;
+import com.google.cloud.videointelligence.v1p2beta1.TextAnnotation;
+import com.google.cloud.videointelligence.v1p2beta1.VideoAnnotationResults;
+import java.io.ByteArrayOutputStream;
+import java.io.PrintStream;
+import java.util.Arrays;
+import java.util.List;
+import java.util.concurrent.TimeoutException;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.JUnit4;
+
+/** Tests for video analysis sample. */
+@RunWith(JUnit4.class)
+@SuppressWarnings("checkstyle:abbreviationaswordinname")
+public class DetectIT {
+
+ static final String FILE_LOCATION = "gs://java-docs-samples-testing/video/googlework_short.mp4";
+ private static final List POSSIBLE_TEXTS =
+ Arrays.asList(
+ "Google",
+ "SUR",
+ "SUR",
+ "ROTO",
+ "Vice President",
+ "58oo9",
+ "LONDRES",
+ "OMAR",
+ "PARIS",
+ "METRO",
+ "RUE",
+ "CARLO");
+ private ByteArrayOutputStream bout;
+ private PrintStream out;
+ private PrintStream originalPrintStream;
+
+ @Rule public MultipleAttemptsRule multipleAttemptsRule = new MultipleAttemptsRule(3);
+
+ @Before
+ public void setUp() {
+ bout = new ByteArrayOutputStream();
+ out = new PrintStream(bout);
+ originalPrintStream = System.out;
+ System.setOut(out);
+ }
+
+ @After
+ public void tearDown() {
+ // restores print statements in the original method
+ System.out.flush();
+ System.setOut(originalPrintStream);
+ }
+
+ @Test
+ public void testSpeechTranscription() throws Exception {
+ String[] args = {"speech-transcription", FILE_LOCATION};
+ Detect.argsHelper(args);
+ String got = bout.toString();
+
+ assertThat(got).contains("cultural");
+ }
+
+ @Test
+ public void testTrackObjects() throws Exception {
+ TrackObjects.trackObjects("resources/googlework_short.mp4");
+
+ String got = bout.toString();
+
+ assertThat(got).contains("Entity id");
+ }
+
+ @Test
+ public void testTrackObjectsGcs() throws Exception {
+ VideoAnnotationResults result =
+ TrackObjects.trackObjectsGcs("gs://cloud-samples-data/video/cat.mp4");
+
+ boolean textExists = false;
+ for (ObjectTrackingAnnotation objectTrackingAnnotation : result.getObjectAnnotationsList()) {
+ if (objectTrackingAnnotation.getEntity().getDescription().toUpperCase().contains("CAT")) {
+ textExists = true;
+ break;
+ }
+ }
+
+ assertThat(textExists).isTrue();
+ }
+
+ @Test
+ public void testTextDetection() throws Exception {
+ try {
+ VideoAnnotationResults result = TextDetection.detectText("resources/googlework_short.mp4");
+ boolean textExists = false;
+ for (TextAnnotation textAnnotation : result.getTextAnnotationsList()) {
+ for (String possibleText : POSSIBLE_TEXTS) {
+ if (textAnnotation.getText().toUpperCase().contains(possibleText.toUpperCase())) {
+ textExists = true;
+ break;
+ }
+ }
+ }
+
+ assertThat(textExists).isTrue();
+
+ } catch (TimeoutException ex) {
+ Assert.assertTrue(ex.getMessage().contains("Waited"));
+ }
+ }
+
+ @Test
+ public void testTextDetectionGcs() throws Exception {
+ VideoAnnotationResults result = TextDetection.detectTextGcs(FILE_LOCATION);
+
+ boolean textExists = false;
+ for (TextAnnotation textAnnotation : result.getTextAnnotationsList()) {
+ for (String possibleText : POSSIBLE_TEXTS) {
+ if (textAnnotation.getText().toUpperCase().contains(possibleText.toUpperCase())) {
+ textExists = true;
+ break;
+ }
+ }
+ }
+
+ assertThat(textExists).isTrue();
+ }
+}
diff --git a/video/src/test/java/beta/video/DetectLogoGcsTest.java b/video/src/test/java/beta/video/DetectLogoGcsTest.java
new file mode 100644
index 00000000000..3e80e4695ee
--- /dev/null
+++ b/video/src/test/java/beta/video/DetectLogoGcsTest.java
@@ -0,0 +1,54 @@
+/*
+ * Copyright 2020 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package beta.video;
+
+import static com.google.common.truth.Truth.assertThat;
+
+import java.io.ByteArrayOutputStream;
+import java.io.PrintStream;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+public class DetectLogoGcsTest {
+
+ private ByteArrayOutputStream bout;
+ private PrintStream out;
+ private PrintStream originalPrintStream;
+
+ @Before
+ public void setUp() {
+ bout = new ByteArrayOutputStream();
+ out = new PrintStream(bout);
+ originalPrintStream = System.out;
+ System.setOut(out);
+ }
+
+ @After
+ public void tearDown() {
+ // restores print statements in the original method
+ System.out.flush();
+ System.setOut(originalPrintStream);
+ }
+
+ @Test
+ public void testDetectFaces() throws Exception {
+ DetectLogoGcs.detectLogoGcs("gs://cloud-samples-data/video/googlework_short.mp4");
+ String got = bout.toString();
+ assertThat(got).contains("Entity Id");
+ }
+}
diff --git a/video/src/test/java/beta/video/DetectLogoTest.java b/video/src/test/java/beta/video/DetectLogoTest.java
new file mode 100644
index 00000000000..64a56092a2d
--- /dev/null
+++ b/video/src/test/java/beta/video/DetectLogoTest.java
@@ -0,0 +1,54 @@
+/*
+ * Copyright 2020 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package beta.video;
+
+import static com.google.common.truth.Truth.assertThat;
+
+import java.io.ByteArrayOutputStream;
+import java.io.PrintStream;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+public class DetectLogoTest {
+
+ private ByteArrayOutputStream bout;
+ private PrintStream out;
+ private PrintStream originalPrintStream;
+
+ @Before
+ public void setUp() {
+ bout = new ByteArrayOutputStream();
+ out = new PrintStream(bout);
+ originalPrintStream = System.out;
+ System.setOut(out);
+ }
+
+ @After
+ public void tearDown() {
+ // restores print statements in the original method
+ System.out.flush();
+ System.setOut(originalPrintStream);
+ }
+
+ @Test
+ public void testDetectFaces() throws Exception {
+ DetectLogo.detectLogo("resources/googlework_short.mp4");
+ String got = bout.toString();
+ assertThat(got).contains("Entity Id");
+ }
+}
diff --git a/video/src/test/java/beta/video/StreamingAnnotationToStorageIT.java b/video/src/test/java/beta/video/StreamingAnnotationToStorageIT.java
new file mode 100644
index 00000000000..9b0f9ef1098
--- /dev/null
+++ b/video/src/test/java/beta/video/StreamingAnnotationToStorageIT.java
@@ -0,0 +1,95 @@
+/*
+ * Copyright 2019 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package beta.video;
+
+import static com.google.common.truth.Truth.assertThat;
+
+import com.google.api.gax.paging.Page;
+import com.google.cloud.storage.Blob;
+import com.google.cloud.storage.Storage;
+import com.google.cloud.storage.Storage.BlobListOption;
+import com.google.cloud.storage.StorageOptions;
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.io.PrintStream;
+import java.util.concurrent.TimeoutException;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.JUnit4;
+
+/** Integration (system) tests for {@link StreamingAnnotationToStorage}. */
+@RunWith(JUnit4.class)
+@SuppressWarnings("checkstyle:abbreviationaswordinname")
+public class StreamingAnnotationToStorageIT {
+ private static final String PROJECT_ID = System.getenv("GOOGLE_CLOUD_PROJECT");
+ private static final String OUTPUT_PREFIX = "VIDEO_STREAMING_TEST_OUTPUT";
+
+ private ByteArrayOutputStream bout;
+ private PrintStream out;
+ private PrintStream originalPrintStream;
+
+ @Before
+ public void setUp() {
+ bout = new ByteArrayOutputStream();
+ out = new PrintStream(bout);
+ originalPrintStream = System.out;
+ System.setOut(out);
+ }
+
+ @After
+ public void tearDown() {
+ // restores print statements in the original method
+ System.out.flush();
+ System.setOut(originalPrintStream);
+ }
+
+ @Test
+ public void testStreamingAnnotationToStorage() throws IOException, TimeoutException {
+ String gcsUri = String.format("gs://%s/%s", PROJECT_ID, OUTPUT_PREFIX);
+ StreamingAnnotationToStorage.streamingAnnotationToStorage("resources/cat.mp4", gcsUri);
+ String got = bout.toString();
+
+ assertThat(got).contains(String.format("Storage Uri: %s", gcsUri));
+
+ Storage storage = StorageOptions.getDefaultInstance().getService();
+
+ Page blobs =
+ storage.list(
+ PROJECT_ID,
+ BlobListOption.currentDirectory(),
+ BlobListOption.prefix(OUTPUT_PREFIX + "/"));
+
+ deleteDirectory(storage, blobs);
+ }
+
+ private void deleteDirectory(Storage storage, Page blobs) {
+ for (Blob blob : blobs.iterateAll()) {
+ System.out.println(blob.getName());
+ if (!blob.delete()) {
+ Page subBlobs =
+ storage.list(
+ PROJECT_ID,
+ BlobListOption.currentDirectory(),
+ BlobListOption.prefix(blob.getName()));
+
+ deleteDirectory(storage, subBlobs);
+ }
+ }
+ }
+}
diff --git a/video/src/test/java/beta/video/StreamingAutoMlActionRecognitionIT.java b/video/src/test/java/beta/video/StreamingAutoMlActionRecognitionIT.java
new file mode 100644
index 00000000000..b0fd040a5b3
--- /dev/null
+++ b/video/src/test/java/beta/video/StreamingAutoMlActionRecognitionIT.java
@@ -0,0 +1,80 @@
+/*
+ * Copyright 2019 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package beta.video;
+
+import static com.google.common.truth.Truth.assertThat;
+
+import io.grpc.Status;
+import io.grpc.StatusRuntimeException;
+import java.io.ByteArrayOutputStream;
+import java.io.PrintStream;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.JUnit4;
+
+/** Integration (system) tests for {@link StreamingAutoMlActionRecognition}. */
+@RunWith(JUnit4.class)
+@SuppressWarnings("checkstyle:abbreviationaswordinname")
+public class StreamingAutoMlActionRecognitionIT {
+
+ private static String PROJECT_ID = System.getenv().get("GOOGLE_CLOUD_PROJECT");
+ private static String MODEL_ID = "2787930479481847808";
+
+ private ByteArrayOutputStream bout;
+ private PrintStream out;
+ private PrintStream originalPrintStream;
+
+ @Before
+ public void setUp() {
+ bout = new ByteArrayOutputStream();
+ out = new PrintStream(bout);
+ originalPrintStream = System.out;
+ System.setOut(out);
+ }
+
+ @After
+ public void tearDown() {
+ // restores print statements in the original method
+ System.out.flush();
+ System.setOut(originalPrintStream);
+ }
+
+ @Test
+ public void testStreamingAutoMlActionRecognition() {
+ // Bad Gateway sporadically occurs
+ int tryCount = 0;
+ int maxTries = 3;
+ while (tryCount < maxTries) {
+ try {
+ StreamingAutoMlActionRecognition.streamingAutoMlActionRecognition(
+ "resources/cat.mp4", PROJECT_ID, MODEL_ID);
+ assertThat(bout.toString()).contains("Video streamed successfully.");
+
+ break;
+ } catch (StatusRuntimeException ex) {
+ if (ex.getStatus().getCode() == Status.Code.UNAVAILABLE) {
+ assertThat(ex.getMessage()).contains("Bad Gateway");
+ tryCount++;
+ }
+ } catch (Exception e) {
+ e.printStackTrace();
+ }
+ }
+ }
+}
diff --git a/video/src/test/java/beta/video/StreamingAutoMlClassificationIT.java b/video/src/test/java/beta/video/StreamingAutoMlClassificationIT.java
new file mode 100644
index 00000000000..e412833ae5d
--- /dev/null
+++ b/video/src/test/java/beta/video/StreamingAutoMlClassificationIT.java
@@ -0,0 +1,80 @@
+/*
+ * Copyright 2019 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package beta.video;
+
+import static com.google.common.truth.Truth.assertThat;
+
+import io.grpc.Status;
+import io.grpc.StatusRuntimeException;
+import java.io.ByteArrayOutputStream;
+import java.io.PrintStream;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.JUnit4;
+
+/** Integration (system) tests for {@link StreamingAutoMlClassification}. */
+@RunWith(JUnit4.class)
+@SuppressWarnings("checkstyle:abbreviationaswordinname")
+public class StreamingAutoMlClassificationIT {
+
+ private static String PROJECT_ID = "779844219229"; // System.getenv().get("GOOGLE_CLOUD_PROJECT");
+ private static String MODEL_ID = "VCN6455760532254228480";
+
+ private ByteArrayOutputStream bout;
+ private PrintStream out;
+ private PrintStream originalPrintStream;
+
+ @Before
+ public void setUp() {
+ bout = new ByteArrayOutputStream();
+ out = new PrintStream(bout);
+ originalPrintStream = System.out;
+ System.setOut(out);
+ }
+
+ @After
+ public void tearDown() {
+ // restores print statements in the original method
+ System.out.flush();
+ System.setOut(originalPrintStream);
+ }
+
+ @Test
+ public void testStreamingAutoMlClassification() {
+ // Bad Gateway sporadically occurs
+ int tryCount = 0;
+ int maxTries = 3;
+ while (tryCount < maxTries) {
+ try {
+ StreamingAutoMlClassification.streamingAutoMlClassification(
+ "resources/cat.mp4", PROJECT_ID, MODEL_ID);
+ assertThat(bout.toString()).contains("Video streamed successfully.");
+
+ break;
+ } catch (StatusRuntimeException ex) {
+ if (ex.getStatus().getCode() == Status.Code.UNAVAILABLE) {
+ assertThat(ex.getMessage()).contains("Bad Gateway");
+ tryCount++;
+ }
+ } catch (Exception e) {
+ e.printStackTrace();
+ }
+ }
+ }
+}
diff --git a/video/src/test/java/beta/video/StreamingAutoMlObjectTrackingIT.java b/video/src/test/java/beta/video/StreamingAutoMlObjectTrackingIT.java
new file mode 100644
index 00000000000..2d7f082fe2c
--- /dev/null
+++ b/video/src/test/java/beta/video/StreamingAutoMlObjectTrackingIT.java
@@ -0,0 +1,80 @@
+/*
+ * Copyright 2021 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package beta.video;
+
+import static com.google.common.truth.Truth.assertThat;
+
+import io.grpc.Status;
+import io.grpc.StatusRuntimeException;
+import java.io.ByteArrayOutputStream;
+import java.io.PrintStream;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.JUnit4;
+
+/** Integration (system) tests for {@link StreamingAutoMlObjectTracking}. */
+@RunWith(JUnit4.class)
+@SuppressWarnings("checkstyle:abbreviationaswordinname")
+public class StreamingAutoMlObjectTrackingIT {
+
+ private static String PROJECT_ID = System.getenv().get("GOOGLE_CLOUD_PROJECT");
+ private static String MODEL_ID = System.getenv().get("VIDEO_OBJECT_TRACKING_MODEL_ID");
+
+ private ByteArrayOutputStream bout;
+ private PrintStream out;
+ private PrintStream originalPrintStream;
+
+ @Before
+ public void setUp() {
+ bout = new ByteArrayOutputStream();
+ out = new PrintStream(bout);
+ originalPrintStream = System.out;
+ System.setOut(out);
+ }
+
+ @After
+ public void tearDown() {
+ // restores print statements in the original method
+ System.out.flush();
+ System.setOut(originalPrintStream);
+ }
+
+ @Test
+ public void testStreamingAutoMlObjectTracking() {
+ // Bad Gateway sporadically occurs
+ int tryCount = 0;
+ int maxTries = 3;
+ while (tryCount < maxTries) {
+ try {
+ StreamingAutoMlObjectTracking.streamingAutoMlObjectTracking(
+ "resources/cat.mp4", PROJECT_ID, MODEL_ID);
+ assertThat(bout.toString()).contains("Video streamed successfully.");
+
+ break;
+ } catch (StatusRuntimeException ex) {
+ if (ex.getStatus().getCode() == Status.Code.UNAVAILABLE) {
+ assertThat(ex.getMessage()).contains("Bad Gateway");
+ tryCount++;
+ }
+ } catch (Exception e) {
+ e.printStackTrace();
+ }
+ }
+ }
+}
diff --git a/video/src/test/java/beta/video/StreamingExplicitContentDetectionIT.java b/video/src/test/java/beta/video/StreamingExplicitContentDetectionIT.java
new file mode 100644
index 00000000000..6ba53cba575
--- /dev/null
+++ b/video/src/test/java/beta/video/StreamingExplicitContentDetectionIT.java
@@ -0,0 +1,61 @@
+/*
+ * Copyright 2019 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package beta.video;
+
+import static com.google.common.truth.Truth.assertThat;
+
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.io.PrintStream;
+import java.util.concurrent.TimeoutException;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.JUnit4;
+
+/** Integration (system) tests for {@link StreamingExplicitContentDetection}. */
+@RunWith(JUnit4.class)
+@SuppressWarnings("checkstyle:abbreviationaswordinname")
+public class StreamingExplicitContentDetectionIT {
+ private ByteArrayOutputStream bout;
+ private PrintStream out;
+ private PrintStream originalPrintStream;
+
+ @Before
+ public void setUp() {
+ bout = new ByteArrayOutputStream();
+ out = new PrintStream(bout);
+ originalPrintStream = System.out;
+ System.setOut(out);
+ }
+
+ @After
+ public void tearDown() {
+ // restores print statements in the original method
+ System.out.flush();
+ System.setOut(originalPrintStream);
+ }
+
+ @Test
+ public void testStreamingExplicitContent() throws IOException, TimeoutException {
+ StreamingExplicitContentDetection.streamingExplicitContentDetection("resources/cat.mp4");
+ String got = bout.toString();
+
+ assertThat(got).contains("UNLIKELY");
+ }
+}
diff --git a/video/src/test/java/beta/video/StreamingLabelDetectionIT.java b/video/src/test/java/beta/video/StreamingLabelDetectionIT.java
new file mode 100644
index 00000000000..e779725b9d9
--- /dev/null
+++ b/video/src/test/java/beta/video/StreamingLabelDetectionIT.java
@@ -0,0 +1,61 @@
+/*
+ * Copyright 2019 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package beta.video;
+
+import static com.google.common.truth.Truth.assertThat;
+
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.io.PrintStream;
+import java.util.concurrent.TimeoutException;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.JUnit4;
+
+/** Integration (system) tests for {@link StreamingLabelDetection}. */
+@RunWith(JUnit4.class)
+@SuppressWarnings("checkstyle:abbreviationaswordinname")
+public class StreamingLabelDetectionIT {
+ private ByteArrayOutputStream bout;
+ private PrintStream out;
+ private PrintStream originalPrintStream;
+
+ @Before
+ public void setUp() {
+ bout = new ByteArrayOutputStream();
+ out = new PrintStream(bout);
+ originalPrintStream = System.out;
+ System.setOut(out);
+ }
+
+ @After
+ public void tearDown() {
+ // restores print statements in the original method
+ System.out.flush();
+ System.setOut(originalPrintStream);
+ }
+
+ @Test
+ public void testStreamingLabelDetection() throws IOException, TimeoutException {
+ StreamingLabelDetection.streamingLabelDetection("resources/cat.mp4");
+ String got = bout.toString();
+
+ assertThat(got).contains("cat");
+ }
+}
diff --git a/video/src/test/java/beta/video/StreamingObjectTrackingIT.java b/video/src/test/java/beta/video/StreamingObjectTrackingIT.java
new file mode 100644
index 00000000000..2f16c1bd6d7
--- /dev/null
+++ b/video/src/test/java/beta/video/StreamingObjectTrackingIT.java
@@ -0,0 +1,65 @@
+/*
+ * Copyright 2019 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package beta.video;
+
+import static com.google.common.truth.Truth.assertThat;
+
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.io.PrintStream;
+import java.util.concurrent.TimeoutException;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.JUnit4;
+
+/** Integration (system) tests for {@link StreamingObjectTracking}. */
+@RunWith(JUnit4.class)
+@SuppressWarnings("checkstyle:abbreviationaswordinname")
+public class StreamingObjectTrackingIT {
+ private ByteArrayOutputStream bout;
+ private PrintStream out;
+ private PrintStream originalPrintStream;
+
+ @Before
+ public void setUp() {
+ bout = new ByteArrayOutputStream();
+ out = new PrintStream(bout);
+ originalPrintStream = System.out;
+ System.setOut(out);
+ }
+
+ @After
+ public void tearDown() {
+ // restores print statements in the original method
+ System.out.flush();
+ System.setOut(originalPrintStream);
+ }
+
+ @Test
+ public void testStreamingObjectTracking() throws IOException, TimeoutException {
+ StreamingObjectTracking.streamingObjectTracking("resources/cat.mp4");
+ String got = bout.toString();
+
+ assertThat(got).contains("cat");
+ assertThat(got).contains("Left: 0.1");
+ assertThat(got).contains("Top: 0.2");
+ assertThat(got).contains("Right: 0.7");
+ assertThat(got).contains("Bottom: 0.8");
+ }
+}
diff --git a/video/src/test/java/beta/video/StreamingShotChangeDetectionIT.java b/video/src/test/java/beta/video/StreamingShotChangeDetectionIT.java
new file mode 100644
index 00000000000..6387cba1c8e
--- /dev/null
+++ b/video/src/test/java/beta/video/StreamingShotChangeDetectionIT.java
@@ -0,0 +1,68 @@
+/*
+ * Copyright 2019 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package beta.video;
+
+import static com.google.common.truth.Truth.assertThat;
+
+import com.google.cloud.testing.junit4.MultipleAttemptsRule;
+import io.grpc.StatusRuntimeException;
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.io.PrintStream;
+import java.util.concurrent.TimeoutException;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.JUnit4;
+
+/** Integration (system) tests for {@link StreamingShotChangeDetection}. */
+@RunWith(JUnit4.class)
+@SuppressWarnings("checkstyle:abbreviationaswordinname")
+public class StreamingShotChangeDetectionIT {
+ @Rule public final MultipleAttemptsRule multipleAttemptsRule = new MultipleAttemptsRule(3);
+
+ private ByteArrayOutputStream bout;
+ private PrintStream out;
+ private PrintStream originalPrintStream;
+
+ @Before
+ public void setUp() {
+ bout = new ByteArrayOutputStream();
+ out = new PrintStream(bout);
+ originalPrintStream = System.out;
+ System.setOut(out);
+ }
+
+ @After
+ public void tearDown() {
+ // restores print statements in the original method
+ System.out.flush();
+ System.setOut(originalPrintStream);
+ }
+
+ @Test
+ public void testStreamingShotChangeDetection()
+ throws IOException, TimeoutException, StatusRuntimeException {
+ StreamingShotChangeDetection.streamingShotChangeDetection("resources/cat.mp4");
+ String got = bout.toString();
+
+ assertThat(got).contains("Shot: 0.0");
+ assertThat(got).contains("to 14.8");
+ }
+}
diff --git a/video/src/test/java/com/example/video/DetectIT.java b/video/src/test/java/com/example/video/DetectIT.java
new file mode 100644
index 00000000000..d48ad18cd38
--- /dev/null
+++ b/video/src/test/java/com/example/video/DetectIT.java
@@ -0,0 +1,164 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.example.video;
+
+import static com.google.common.truth.Truth.assertThat;
+
+import com.google.cloud.videointelligence.v1.TextAnnotation;
+import com.google.cloud.videointelligence.v1.VideoAnnotationResults;
+import java.io.ByteArrayOutputStream;
+import java.io.PrintStream;
+import java.util.Arrays;
+import java.util.List;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.JUnit4;
+
+/** Tests for video analysis sample. */
+@RunWith(JUnit4.class)
+@SuppressWarnings("checkstyle:abbreviationaswordinname")
+public class DetectIT {
+ static final String LABEL_GCS_LOCATION = "gs://cloud-samples-data/video/cat.mp4";
+ static final String LABEL_FILE_LOCATION = "./resources/googlework_short.mp4";
+ static final String SHOTS_FILE_LOCATION = "gs://cloud-samples-data/video/gbikes_dinosaur.mp4";
+ static final String EXPLICIT_CONTENT_LOCATION = "gs://cloud-samples-data/video/cat.mp4";
+ static final String SPEECH_GCS_LOCATION =
+ "gs://java-docs-samples-testing/video/googlework_short.mp4";
+ private static final List POSSIBLE_TEXTS =
+ Arrays.asList(
+ "Google",
+ "SUR",
+ "SUR",
+ "ROTO",
+ "Vice President",
+ "58oo9",
+ "LONDRES",
+ "OMAR",
+ "PARIS",
+ "METRO",
+ "RUE",
+ "CARLO");
+ private ByteArrayOutputStream bout;
+ private PrintStream out;
+
+ @Before
+ public void setUp() {
+ bout = new ByteArrayOutputStream();
+ out = new PrintStream(bout);
+ System.setOut(out);
+ }
+
+ @After
+ public void tearDown() {
+ System.setOut(null);
+ }
+
+ @Test
+ public void testLabels() throws Exception {
+ String[] args = {"labels", LABEL_GCS_LOCATION};
+ Detect.argsHelper(args);
+ String got = bout.toString();
+ assertThat(got).contains("Video label");
+ }
+
+ @Test
+ public void testLabelsFile() throws Exception {
+ String[] args = {"labels-file", LABEL_FILE_LOCATION};
+ Detect.argsHelper(args);
+ String got = bout.toString();
+ assertThat(got).contains("Video label");
+ }
+
+ @Test
+ public void testExplicitContent() throws Exception {
+ String[] args = {"explicit-content", EXPLICIT_CONTENT_LOCATION};
+ Detect.argsHelper(args);
+ String got = bout.toString();
+ assertThat(got).contains("Adult:");
+ }
+
+ @Test
+ public void testShots() throws Exception {
+ String[] args = {"shots", SHOTS_FILE_LOCATION};
+ Detect.argsHelper(args);
+ String got = bout.toString();
+ assertThat(got).contains("Shots:");
+ assertThat(got).contains("Location:");
+ }
+
+ @Test
+ public void testSpeechTranscription() throws Exception {
+ String[] args = {"speech-transcription", SPEECH_GCS_LOCATION};
+ Detect.argsHelper(args);
+ String got = bout.toString();
+
+ assertThat(got).contains("Transcript");
+ }
+
+ @Test
+ public void testTrackObjects() throws Exception {
+ TrackObjects.trackObjects("resources/googlework_short.mp4");
+
+ String got = bout.toString();
+
+ assertThat(got).contains("Entity id");
+ }
+
+ @Test
+ public void testTrackObjectsGcs() throws Exception {
+ VideoAnnotationResults result = TrackObjects.trackObjectsGcs(LABEL_GCS_LOCATION);
+
+ String got = bout.toString();
+ assertThat(got).contains("Entity id");
+ }
+
+ @Test
+ public void testTextDetection() throws Exception {
+ VideoAnnotationResults result = TextDetection.detectText("resources/googlework_short.mp4");
+
+ boolean textExists = false;
+ for (TextAnnotation textAnnotation : result.getTextAnnotationsList()) {
+ for (String possibleText : POSSIBLE_TEXTS) {
+ if (textAnnotation.getText().toUpperCase().contains(possibleText.toUpperCase())) {
+ textExists = true;
+ break;
+ }
+ }
+ }
+
+ assertThat(textExists).isTrue();
+ }
+
+ @Test
+ public void testTextDetectionGcs() throws Exception {
+ VideoAnnotationResults result = TextDetection.detectTextGcs(SPEECH_GCS_LOCATION);
+
+ boolean textExists = false;
+ for (TextAnnotation textAnnotation : result.getTextAnnotationsList()) {
+ for (String possibleText : POSSIBLE_TEXTS) {
+ if (textAnnotation.getText().toUpperCase().contains(possibleText.toUpperCase())) {
+ textExists = true;
+ break;
+ }
+ }
+ }
+
+ assertThat(textExists).isTrue();
+ }
+}
diff --git a/video/src/test/java/com/example/video/DetectLogoGcsTest.java b/video/src/test/java/com/example/video/DetectLogoGcsTest.java
new file mode 100644
index 00000000000..11715715503
--- /dev/null
+++ b/video/src/test/java/com/example/video/DetectLogoGcsTest.java
@@ -0,0 +1,61 @@
+/*
+ * Copyright 2021 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.example.video;
+
+import static com.google.common.truth.Truth.assertThat;
+
+import com.google.cloud.testing.junit4.MultipleAttemptsRule;
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.io.PrintStream;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.TimeoutException;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+
+public class DetectLogoGcsTest {
+ private ByteArrayOutputStream bout;
+ private PrintStream out;
+
+ @Before
+ public void setUp() {
+ bout = new ByteArrayOutputStream();
+ out = new PrintStream(bout);
+ System.setOut(out);
+ }
+
+ @After
+ public void tearDown() {
+ System.setOut(null);
+ }
+
+ @Rule public MultipleAttemptsRule multipleAttemptsRule = new MultipleAttemptsRule(3);
+
+ @Test
+ public void testLogoDetectGcs()
+ throws IOException, ExecutionException, InterruptedException, TimeoutException {
+ LogoDetectionGcs.detectLogoGcs("gs://cloud-samples-data/video/googlework_tiny.mp4");
+ String got = bout.toString();
+
+ assertThat(got).contains("Description");
+ assertThat(got).contains("Confidence");
+ assertThat(got).contains("Start Time Offset");
+ assertThat(got).contains("End Time Offset");
+ }
+}
diff --git a/video/src/test/java/com/example/video/DetectLogoTest.java b/video/src/test/java/com/example/video/DetectLogoTest.java
new file mode 100644
index 00000000000..cec71886f0d
--- /dev/null
+++ b/video/src/test/java/com/example/video/DetectLogoTest.java
@@ -0,0 +1,57 @@
+/*
+ * Copyright 2020 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.example.video;
+
+import static com.google.common.truth.Truth.assertThat;
+
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.io.PrintStream;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.TimeoutException;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+public class DetectLogoTest {
+ private ByteArrayOutputStream bout;
+ private PrintStream out;
+
+ @Before
+ public void setUp() {
+ bout = new ByteArrayOutputStream();
+ out = new PrintStream(bout);
+ System.setOut(out);
+ }
+
+ @After
+ public void tearDown() {
+ System.setOut(null);
+ }
+
+ @Test
+ public void testLogoDetect()
+ throws IOException, ExecutionException, InterruptedException, TimeoutException {
+ LogoDetection.detectLogo("resources/googlework_short.mp4");
+ String got = bout.toString();
+
+ assertThat(got).contains("Description");
+ assertThat(got).contains("Confidence");
+ assertThat(got).contains("Start Time Offset");
+ assertThat(got).contains("End Time Offset");
+ }
+}
diff --git a/video/src/test/java/com/example/video/QuickstartIT.java b/video/src/test/java/com/example/video/QuickstartIT.java
new file mode 100644
index 00000000000..75fdac01658
--- /dev/null
+++ b/video/src/test/java/com/example/video/QuickstartIT.java
@@ -0,0 +1,57 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.example.video;
+
+import static com.google.common.truth.Truth.assertThat;
+
+import java.io.ByteArrayOutputStream;
+import java.io.PrintStream;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.JUnit4;
+
+/** Tests for video analysis sample. */
+@RunWith(JUnit4.class)
+@SuppressWarnings("checkstyle:abbreviationaswordinname")
+public class QuickstartIT {
+ private ByteArrayOutputStream bout;
+ private PrintStream out;
+
+ @Before
+ public void setUp() {
+ bout = new ByteArrayOutputStream();
+ out = new PrintStream(bout);
+ System.setOut(out);
+ }
+
+ @After
+ public void tearDown() {
+ System.setOut(null);
+ }
+
+ @Test
+ public void test() throws Exception {
+ QuickstartSample.main(new String[0]);
+ String got = bout.toString();
+
+ // Test that the video with a cat has the whiskers label (may change).
+ assertThat(got.toUpperCase()).contains("VIDEO LABEL DESCRIPTION");
+ assertThat(got.toUpperCase()).contains("CONFIDENCE");
+ }
+}
diff --git a/video/src/test/java/video/DetectFacesGcsIT.java b/video/src/test/java/video/DetectFacesGcsIT.java
new file mode 100644
index 00000000000..d5e204a5b2a
--- /dev/null
+++ b/video/src/test/java/video/DetectFacesGcsIT.java
@@ -0,0 +1,54 @@
+/*
+ * Copyright 2020 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package video;
+
+import static com.google.common.truth.Truth.assertThat;
+
+import java.io.ByteArrayOutputStream;
+import java.io.PrintStream;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+@SuppressWarnings("checkstyle:AbbreviationAsWordInName")
+public class DetectFacesGcsIT {
+ private ByteArrayOutputStream bout;
+ private PrintStream out;
+ private PrintStream originalPrintStream;
+
+ @Before
+ public void setUp() {
+ bout = new ByteArrayOutputStream();
+ out = new PrintStream(bout);
+ originalPrintStream = System.out;
+ System.setOut(out);
+ }
+
+ @After
+ public void tearDown() {
+ // restores print statements in the original method
+ System.out.flush();
+ System.setOut(originalPrintStream);
+ }
+
+ @Test
+ public void testDetectFacesGcs() throws Exception {
+ DetectFacesGcs.detectFacesGcs("gs://cloud-samples-data/video/googlework_short.mp4");
+ String got = bout.toString();
+ assertThat(got).contains("Face detected:");
+ }
+}
diff --git a/video/src/test/java/video/DetectFacesIT.java b/video/src/test/java/video/DetectFacesIT.java
new file mode 100644
index 00000000000..da47d74efce
--- /dev/null
+++ b/video/src/test/java/video/DetectFacesIT.java
@@ -0,0 +1,55 @@
+/*
+ * Copyright 2020 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package video;
+
+import static com.google.common.truth.Truth.assertThat;
+
+import java.io.ByteArrayOutputStream;
+import java.io.PrintStream;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+@SuppressWarnings("checkstyle:AbbreviationAsWordInName")
+public class DetectFacesIT {
+
+ private ByteArrayOutputStream bout;
+ private PrintStream out;
+ private PrintStream originalPrintStream;
+
+ @Before
+ public void setUp() {
+ bout = new ByteArrayOutputStream();
+ out = new PrintStream(bout);
+ originalPrintStream = System.out;
+ System.setOut(out);
+ }
+
+ @After
+ public void tearDown() {
+ // restores print statements in the original method
+ System.out.flush();
+ System.setOut(originalPrintStream);
+ }
+
+ @Test
+ public void testDetectFaces() throws Exception {
+ DetectFaces.detectFaces("resources/googlework_short.mp4");
+ String got = bout.toString();
+ assertThat(got).contains("Face detected:");
+ }
+}
diff --git a/video/src/test/java/video/DetectIT.java b/video/src/test/java/video/DetectIT.java
new file mode 100644
index 00000000000..7837cca4ae2
--- /dev/null
+++ b/video/src/test/java/video/DetectIT.java
@@ -0,0 +1,118 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package video;
+
+import static com.google.common.truth.Truth.assertThat;
+
+import com.google.cloud.videointelligence.v1.VideoAnnotationResults;
+import java.io.ByteArrayOutputStream;
+import java.io.PrintStream;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.JUnit4;
+
+/** Tests for video analysis sample. */
+@RunWith(JUnit4.class)
+@SuppressWarnings("checkstyle:abbreviationaswordinname")
+public class DetectIT {
+ static final String LABEL_GCS_LOCATION = "gs://cloud-samples-data/video/cat.mp4";
+ static final String LABEL_FILE_LOCATION = "./resources/googlework_short.mp4";
+ static final String SHOTS_FILE_LOCATION = "gs://cloud-samples-data/video/gbikes_dinosaur.mp4";
+ static final String EXPLICIT_CONTENT_LOCATION = "gs://cloud-samples-data/video/cat.mp4";
+ static final String SPEECH_GCS_LOCATION =
+ "gs://java-docs-samples-testing/video/googlework_short.mp4";
+
+ private ByteArrayOutputStream bout;
+ private PrintStream out;
+ private PrintStream originalPrintStream;
+
+ @Before
+ public void setUp() {
+ bout = new ByteArrayOutputStream();
+ out = new PrintStream(bout);
+ originalPrintStream = System.out;
+ System.setOut(out);
+ }
+
+ @After
+ public void tearDown() {
+ // restores print statements in the original method
+ System.out.flush();
+ System.setOut(originalPrintStream);
+ }
+
+ @Test
+ public void testLabels() throws Exception {
+ String[] args = {"labels", LABEL_GCS_LOCATION};
+ Detect.argsHelper(args);
+ String got = bout.toString();
+ assertThat(got).contains("Video label");
+ }
+
+ @Test
+ public void testLabelsFile() throws Exception {
+ String[] args = {"labels-file", LABEL_FILE_LOCATION};
+ Detect.argsHelper(args);
+ String got = bout.toString();
+ assertThat(got).contains("Video label");
+ }
+
+ @Test
+ public void testExplicitContent() throws Exception {
+ String[] args = {"explicit-content", EXPLICIT_CONTENT_LOCATION};
+ Detect.argsHelper(args);
+ String got = bout.toString();
+ assertThat(got).contains("Adult:");
+ }
+
+ @Test
+ public void testShots() throws Exception {
+ String[] args = {"shots", SHOTS_FILE_LOCATION};
+ Detect.argsHelper(args);
+ String got = bout.toString();
+ assertThat(got).contains("Shots:");
+ assertThat(got).contains("Location:");
+ }
+
+ @Test
+ public void testSpeechTranscription() throws Exception {
+ String[] args = {"speech-transcription", SPEECH_GCS_LOCATION};
+ Detect.argsHelper(args);
+ String got = bout.toString();
+
+ assertThat(got).contains("Transcript");
+ }
+
+ @Test
+ public void testTrackObjects() throws Exception {
+ TrackObjects.trackObjects("resources/googlework_short.mp4");
+
+ String got = bout.toString();
+
+ assertThat(got).contains("Entity id");
+ }
+
+ @Test
+ public void testTrackObjectsGcs() throws Exception {
+ VideoAnnotationResults result = TrackObjects.trackObjectsGcs(LABEL_GCS_LOCATION);
+
+ String got = bout.toString();
+ assertThat(got).contains("Entity id");
+ }
+}
diff --git a/video/src/test/java/video/DetectLogoGcsTest.java b/video/src/test/java/video/DetectLogoGcsTest.java
new file mode 100644
index 00000000000..e1774aad680
--- /dev/null
+++ b/video/src/test/java/video/DetectLogoGcsTest.java
@@ -0,0 +1,61 @@
+/*
+ * Copyright 2020 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package video;
+
+import static com.google.common.truth.Truth.assertThat;
+
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.io.PrintStream;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.TimeoutException;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+public class DetectLogoGcsTest {
+ private ByteArrayOutputStream bout;
+ private PrintStream out;
+ private PrintStream originalPrintStream;
+
+ @Before
+ public void setUp() {
+ bout = new ByteArrayOutputStream();
+ out = new PrintStream(bout);
+ originalPrintStream = System.out;
+ System.setOut(out);
+ }
+
+ @After
+ public void tearDown() {
+ // restores print statements in the original method
+ System.out.flush();
+ System.setOut(originalPrintStream);
+ }
+
+ @Test
+ public void testLogoDetectGcs()
+ throws IOException, ExecutionException, InterruptedException, TimeoutException {
+ LogoDetectionGcs.detectLogoGcs("gs://cloud-samples-data/video/googlework_tiny.mp4");
+ String got = bout.toString();
+
+ assertThat(got).contains("Description");
+ assertThat(got).contains("Confidence");
+ assertThat(got).contains("Start Time Offset");
+ assertThat(got).contains("End Time Offset");
+ }
+}
diff --git a/video/src/test/java/video/DetectLogoTest.java b/video/src/test/java/video/DetectLogoTest.java
new file mode 100644
index 00000000000..0d91849bb4a
--- /dev/null
+++ b/video/src/test/java/video/DetectLogoTest.java
@@ -0,0 +1,61 @@
+/*
+ * Copyright 2020 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package video;
+
+import static com.google.common.truth.Truth.assertThat;
+
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.io.PrintStream;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.TimeoutException;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+public class DetectLogoTest {
+ private ByteArrayOutputStream bout;
+ private PrintStream out;
+ private PrintStream originalPrintStream;
+
+ @Before
+ public void setUp() {
+ bout = new ByteArrayOutputStream();
+ out = new PrintStream(bout);
+ originalPrintStream = System.out;
+ System.setOut(out);
+ }
+
+ @After
+ public void tearDown() {
+ // restores print statements in the original method
+ System.out.flush();
+ System.setOut(originalPrintStream);
+ }
+
+ @Test
+ public void testLogoDetect()
+ throws IOException, ExecutionException, InterruptedException, TimeoutException {
+ LogoDetection.detectLogo("resources/googlework_short.mp4");
+ String got = bout.toString();
+
+ assertThat(got).contains("Description");
+ assertThat(got).contains("Confidence");
+ assertThat(got).contains("Start Time Offset");
+ assertThat(got).contains("End Time Offset");
+ }
+}
diff --git a/video/src/test/java/video/DetectPersonGcsIT.java b/video/src/test/java/video/DetectPersonGcsIT.java
new file mode 100644
index 00000000000..1ffa8fe40cc
--- /dev/null
+++ b/video/src/test/java/video/DetectPersonGcsIT.java
@@ -0,0 +1,55 @@
+/*
+ * Copyright 2020 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package video;
+
+import static com.google.common.truth.Truth.assertThat;
+
+import java.io.ByteArrayOutputStream;
+import java.io.PrintStream;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+@SuppressWarnings("checkstyle:AbbreviationAsWordInName")
+public class DetectPersonGcsIT {
+
+ private ByteArrayOutputStream bout;
+ private PrintStream out;
+ private PrintStream originalPrintStream;
+
+ @Before
+ public void setUp() {
+ bout = new ByteArrayOutputStream();
+ out = new PrintStream(bout);
+ originalPrintStream = System.out;
+ System.setOut(out);
+ }
+
+ @After
+ public void tearDown() {
+ // restores print statements in the original method
+ System.out.flush();
+ System.setOut(originalPrintStream);
+ }
+
+ @Test
+ public void testDetectPersonGcs() throws Exception {
+ DetectPersonGcs.detectPersonGcs("gs://cloud-samples-data/video/googlework_short.mp4");
+ String got = bout.toString();
+ assertThat(got).contains("Landmark");
+ }
+}
diff --git a/video/src/test/java/video/DetectPersonIT.java b/video/src/test/java/video/DetectPersonIT.java
new file mode 100644
index 00000000000..12551afedd8
--- /dev/null
+++ b/video/src/test/java/video/DetectPersonIT.java
@@ -0,0 +1,55 @@
+/*
+ * Copyright 2020 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package video;
+
+import static com.google.common.truth.Truth.assertThat;
+
+import java.io.ByteArrayOutputStream;
+import java.io.PrintStream;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+@SuppressWarnings("checkstyle:AbbreviationAsWordInName")
+public class DetectPersonIT {
+
+ private ByteArrayOutputStream bout;
+ private PrintStream out;
+ private PrintStream originalPrintStream;
+
+ @Before
+ public void setUp() {
+ bout = new ByteArrayOutputStream();
+ out = new PrintStream(bout);
+ originalPrintStream = System.out;
+ System.setOut(out);
+ }
+
+ @After
+ public void tearDown() {
+ // restores print statements in the original method
+ System.out.flush();
+ System.setOut(originalPrintStream);
+ }
+
+ @Test
+ public void testDetectPerson() throws Exception {
+ DetectPerson.detectPerson("resources/googlework_short.mp4");
+ String got = bout.toString();
+ assertThat(got).contains("Landmark");
+ }
+}
diff --git a/video/src/test/java/video/DetectTextTest.java b/video/src/test/java/video/DetectTextTest.java
new file mode 100644
index 00000000000..7663860afbf
--- /dev/null
+++ b/video/src/test/java/video/DetectTextTest.java
@@ -0,0 +1,103 @@
+/*
+ * Copyright 2020 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package video;
+
+import static com.google.common.truth.Truth.assertThat;
+
+import com.google.cloud.testing.junit4.MultipleAttemptsRule;
+import com.google.cloud.videointelligence.v1.TextAnnotation;
+import com.google.cloud.videointelligence.v1.VideoAnnotationResults;
+import java.io.ByteArrayOutputStream;
+import java.io.PrintStream;
+import java.util.Arrays;
+import java.util.List;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+
+public class DetectTextTest {
+ static final String SPEECH_GCS_LOCATION =
+ "gs://java-docs-samples-testing/video/googlework_short.mp4";
+ private static final List POSSIBLE_TEXTS =
+ Arrays.asList(
+ "Google",
+ "SUR",
+ "SUR",
+ "ROTO",
+ "Vice President",
+ "58oo9",
+ "LONDRES",
+ "OMAR",
+ "PARIS",
+ "METRO",
+ "RUE",
+ "CARLO");
+ @Rule public MultipleAttemptsRule multipleAttemptsRule = new MultipleAttemptsRule(3);
+ private ByteArrayOutputStream bout;
+ private PrintStream out;
+ private PrintStream originalPrintStream;
+
+ @Before
+ public void setUp() {
+ bout = new ByteArrayOutputStream();
+ out = new PrintStream(bout);
+ originalPrintStream = System.out;
+ originalPrintStream = System.out;
+ System.setOut(out);
+ }
+
+ @After
+ public void tearDown() {
+ System.out.flush();
+ System.setOut(originalPrintStream);
+ }
+
+ @Test
+ public void testTextDetection() throws Exception {
+ VideoAnnotationResults result = TextDetection.detectText("resources/googlework_short.mp4");
+
+ boolean textExists = false;
+ for (TextAnnotation textAnnotation : result.getTextAnnotationsList()) {
+ for (String possibleText : POSSIBLE_TEXTS) {
+ if (textAnnotation.getText().toUpperCase().contains(possibleText.toUpperCase())) {
+ textExists = true;
+ break;
+ }
+ }
+ }
+
+ assertThat(textExists).isTrue();
+ }
+
+ @Test
+ public void testTextDetectionGcs() throws Exception {
+ VideoAnnotationResults result = TextDetection.detectTextGcs(SPEECH_GCS_LOCATION);
+
+ boolean textExists = false;
+ for (TextAnnotation textAnnotation : result.getTextAnnotationsList()) {
+ for (String possibleText : POSSIBLE_TEXTS) {
+ if (textAnnotation.getText().toUpperCase().contains(possibleText.toUpperCase())) {
+ textExists = true;
+ break;
+ }
+ }
+ }
+
+ assertThat(textExists).isTrue();
+ }
+}
diff --git a/video/src/test/java/video/QuickstartIT.java b/video/src/test/java/video/QuickstartIT.java
new file mode 100644
index 00000000000..dcee2841596
--- /dev/null
+++ b/video/src/test/java/video/QuickstartIT.java
@@ -0,0 +1,61 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package video;
+
+import static com.google.common.truth.Truth.assertThat;
+
+import java.io.ByteArrayOutputStream;
+import java.io.PrintStream;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.JUnit4;
+
+/** Tests for video analysis sample. */
+@RunWith(JUnit4.class)
+@SuppressWarnings("checkstyle:abbreviationaswordinname")
+public class QuickstartIT {
+ private ByteArrayOutputStream bout;
+ private PrintStream out;
+ private PrintStream originalPrintStream;
+
+ @Before
+ public void setUp() {
+ bout = new ByteArrayOutputStream();
+ out = new PrintStream(bout);
+ originalPrintStream = System.out;
+ System.setOut(out);
+ }
+
+ @After
+ public void tearDown() {
+ // restores print statements in the original method
+ System.out.flush();
+ System.setOut(originalPrintStream);
+ }
+
+ @Test
+ public void test() throws Exception {
+ QuickstartSample.main(new String[0]);
+ String got = bout.toString();
+
+ // Test that the video with a cat has the whiskers label (may change).
+ assertThat(got.toUpperCase()).contains("VIDEO LABEL DESCRIPTION");
+ assertThat(got.toUpperCase()).contains("CONFIDENCE");
+ }
+}