Skip to content

Commit

Permalink
Video v1beta2 (#1088)
Browse files Browse the repository at this point in the history
* update analyze_safe_search

* update analyze_shots

* update explicit_content_detection and test

* update fece detection

* update label detection (path)

* update label detection (file)

* flake

* safe search --> explicit content

* update faces tutorial

* update client library quickstart

* update shotchange tutorial

* update labels tutorial

* correct spelling

* correction start_time_offset

* import order

* rebased
  • Loading branch information
dizcology authored and Jon Wayne Parrott committed Sep 19, 2017
1 parent b93397f commit a75e025
Show file tree
Hide file tree
Showing 11 changed files with 249 additions and 151 deletions.
12 changes: 6 additions & 6 deletions video/cloud-client/analyze/README.rst
Original file line number Diff line number Diff line change
Expand Up @@ -59,25 +59,25 @@ To run this sample:
$ python analyze.py
usage: analyze.py [-h] {faces,labels,labels_file,safe_search,shots} ...
usage: analyze.py [-h] {faces,labels,labels_file,explicit_content,shots} ...
This application demonstrates face detection, label detection, safe search,
and shot change detection using the Google Cloud API.
This application demonstrates face detection, label detection,
explicit content, and shot change detection using the Google Cloud API.
Usage Examples:
python analyze.py faces gs://demomaker/google_gmail.mp4
python analyze.py labels gs://cloud-ml-sandbox/video/chicago.mp4
python analyze.py labels_file resources/cat.mp4
python analyze.py shots gs://demomaker/gbikes_dinosaur.mp4
python analyze.py safe_search gs://demomaker/gbikes_dinosaur.mp4
python analyze.py explicit_content gs://demomaker/gbikes_dinosaur.mp4
positional arguments:
{faces,labels,labels_file,safe_search,shots}
{faces,labels,labels_file,explicit_content,shots}
faces Detects faces given a GCS path.
labels Detects labels given a GCS path.
labels_file Detects labels given a file path.
safe_search Detects safe search features the GCS path to a video.
explicit_content Detects explicit content from the GCS path to a video.
shots Detects camera shot changes.
optional arguments:
Expand Down
242 changes: 169 additions & 73 deletions video/cloud-client/analyze/analyze.py

Large diffs are not rendered by default.

24 changes: 11 additions & 13 deletions video/cloud-client/analyze/analyze_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,46 +15,44 @@
# limitations under the License.

import os

import pytest

import analyze
import pytest


BUCKET = os.environ['CLOUD_STORAGE_BUCKET']
LABELS_FILE_PATH = '/video/cat.mp4'
FACES_FILE_PATH = '/video/googlework.mp4'
SAFE_SEARCH_FILE_PATH = '/video/cat.mp4'
EXPLICIT_CONTENT_FILE_PATH = '/video/cat.mp4'
SHOTS_FILE_PATH = '/video/gbikes_dinosaur.mp4'


@pytest.mark.slow
def test_cat_video_shots(capsys):
def test_analyze_shots(capsys):
analyze.analyze_shots(
'gs://{}{}'.format(BUCKET, SHOTS_FILE_PATH))
out, _ = capsys.readouterr()
assert 'Scene 1:' in out
assert 'Shot 1:' in out


@pytest.mark.slow
def test_work_video_faces(capsys):
def test_analyze_faces(capsys):
analyze.analyze_faces(
'gs://{}{}'.format(BUCKET, FACES_FILE_PATH))
out, _ = capsys.readouterr()
assert 'Thumbnail' in out


@pytest.mark.slow
def test_dino_video_labels(capsys):
def test_analyze_labels(capsys):
analyze.analyze_labels(
'gs://{}{}'.format(BUCKET, LABELS_FILE_PATH))
out, _ = capsys.readouterr()
assert 'Whiskers' in out
assert 'label description: cat' in out


@pytest.mark.slow
def test_cat_safe_search(capsys):
analyze.analyze_safe_search(
'gs://{}{}'.format(BUCKET, SAFE_SEARCH_FILE_PATH))
def test_analyze_explicit_content(capsys):
analyze.analyze_explicit_content(
'gs://{}{}'.format(BUCKET, EXPLICIT_CONTENT_FILE_PATH))
out, _ = capsys.readouterr()
assert 'medical' in out
assert 'pornography' in out
18 changes: 9 additions & 9 deletions video/cloud-client/faces/faces.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,17 +32,15 @@
import sys
import time

from google.cloud.gapic.videointelligence.v1beta1 import enums
from google.cloud.gapic.videointelligence.v1beta1 import (
video_intelligence_service_client)
from google.cloud import videointelligence_v1beta2
from google.cloud.videointelligence_v1beta2 import enums
# [END imports]


def analyze_faces(path):
# [START construct_request]
""" Detects faces given a GCS path. """
video_client = (video_intelligence_service_client.
VideoIntelligenceServiceClient())
video_client = videointelligence_v1beta2.VideoIntelligenceServiceClient()
features = [enums.Feature.FACE_DETECTION]
operation = video_client.annotate_video(path, features)
# [END construct_request]
Expand All @@ -66,10 +64,12 @@ def analyze_faces(path):
print('Thumbnail size: {}'.format(len(face.thumbnail)))

for segment_id, segment in enumerate(face.segments):
print('Track {}: {} to {}'.format(
segment_id,
segment.start_time_offset,
segment.end_time_offset))
start_time = (segment.segment.start_time_offset.seconds +
segment.segment.start_time_offset.nanos / 1e9)
end_time = (segment.segment.end_time_offset.seconds +
segment.segment.end_time_offset.nanos / 1e9)
positions = '{}s to {}s'.format(start_time, end_time)
print('\tSegment {}: {}'.format(segment_id, positions))
# [END parse_response]


Expand Down
3 changes: 1 addition & 2 deletions video/cloud-client/faces/faces_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,10 +15,9 @@
# limitations under the License.

import os

import faces
import pytest

import faces

BUCKET = os.environ['CLOUD_STORAGE_BUCKET']
FACES_FILE_PATH = '/video/googlework.mp4'
Expand Down
34 changes: 20 additions & 14 deletions video/cloud-client/labels/labels.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,17 +32,15 @@
import sys
import time

from google.cloud.gapic.videointelligence.v1beta1 import enums
from google.cloud.gapic.videointelligence.v1beta1 import (
video_intelligence_service_client)
from google.cloud import videointelligence_v1beta2
from google.cloud.videointelligence_v1beta2 import enums
# [END imports]


def analyze_labels(path):
""" Detects labels given a GCS path. """
# [START construct_request]
video_client = (video_intelligence_service_client.
VideoIntelligenceServiceClient())
video_client = videointelligence_v1beta2.VideoIntelligenceServiceClient()
features = [enums.Feature.LABEL_DETECTION]
operation = video_client.annotate_video(path, features)
# [END construct_request]
Expand All @@ -60,15 +58,23 @@ def analyze_labels(path):
# [START parse_response]
results = operation.result().annotation_results[0]

for label in results.label_annotations:
print('Label description: {}'.format(label.description))
print('Locations:')

for l, location in enumerate(label.locations):
print('\t{}: {} to {}'.format(
l,
location.segment.start_time_offset,
location.segment.end_time_offset))
for i, segment_label in enumerate(results.segment_label_annotations):
print('Video label description: {}'.format(
segment_label.entity.description))
for category_entity in segment_label.category_entities:
print('\tLabel category description: {}'.format(
category_entity.description))

for i, segment in enumerate(segment_label.segments):
start_time = (segment.segment.start_time_offset.seconds +
segment.segment.start_time_offset.nanos / 1e9)
end_time = (segment.segment.end_time_offset.seconds +
segment.segment.end_time_offset.nanos / 1e9)
positions = '{}s to {}s'.format(start_time, end_time)
confidence = segment.confidence
print('\tSegment {}: {}'.format(i, positions))
print('\tConfidence: {}'.format(confidence))
print('\n')
# [END parse_response]


Expand Down
5 changes: 2 additions & 3 deletions video/cloud-client/labels/labels_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,10 +15,9 @@
# limitations under the License.

import os

import labels
import pytest

import labels

BUCKET = os.environ['CLOUD_STORAGE_BUCKET']
LABELS_FILE_PATH = '/video/cat.mp4'
Expand All @@ -29,4 +28,4 @@ def test_feline_video_labels(capsys):
labels.analyze_labels(
'gs://{}{}'.format(BUCKET, LABELS_FILE_PATH))
out, _ = capsys.readouterr()
assert 'Whiskers' in out
assert 'Video label description: cat' in out
35 changes: 18 additions & 17 deletions video/cloud-client/quickstart/quickstart.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,12 +26,10 @@ def run_quickstart():
import sys
import time

from google.cloud.gapic.videointelligence.v1beta1 import enums
from google.cloud.gapic.videointelligence.v1beta1 import (
video_intelligence_service_client)
from google.cloud import videointelligence_v1beta2
from google.cloud.videointelligence_v1beta2 import enums

video_client = (video_intelligence_service_client.
VideoIntelligenceServiceClient())
video_client = videointelligence_v1beta2.VideoIntelligenceServiceClient()
features = [enums.Feature.LABEL_DETECTION]
operation = video_client.annotate_video('gs://demomaker/cat.mp4', features)
print('\nProcessing video for label annotations:')
Expand All @@ -46,19 +44,22 @@ def run_quickstart():
# first result is retrieved because a single video was processed
results = operation.result().annotation_results[0]

for label in results.label_annotations:
print('Label description: {}'.format(label.description))
print('Locations:')

for l, location in enumerate(label.locations):
positions = 'Entire video'
if (location.segment.start_time_offset != -1 or
location.segment.end_time_offset != -1):
positions = '{} to {}'.format(
location.segment.start_time_offset / 1000000.0,
location.segment.end_time_offset / 1000000.0)
print('\t{}: {}'.format(l, positions))
for i, segment_label in enumerate(results.segment_label_annotations):
print('Video label description: {}'.format(
segment_label.entity.description))
for category_entity in segment_label.category_entities:
print('\tLabel category description: {}'.format(
category_entity.description))

for i, segment in enumerate(segment_label.segments):
start_time = (segment.segment.start_time_offset.seconds +
segment.segment.start_time_offset.nanos / 1e9)
end_time = (segment.segment.end_time_offset.seconds +
segment.segment.end_time_offset.nanos / 1e9)
positions = '{}s to {}s'.format(start_time, end_time)
confidence = segment.confidence
print('\tSegment {}: {}'.format(i, positions))
print('\tConfidence: {}'.format(confidence))
print('\n')
# [END videointelligence_quickstart]

Expand Down
2 changes: 1 addition & 1 deletion video/cloud-client/quickstart/quickstart_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,4 +23,4 @@
def test_quickstart(capsys):
quickstart.run_quickstart()
out, _ = capsys.readouterr()
assert 'Whiskers' in out
assert 'Video label description: cat' in out
23 changes: 11 additions & 12 deletions video/cloud-client/shotchange/shotchange.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,17 +32,15 @@
import sys
import time

from google.cloud.gapic.videointelligence.v1beta1 import enums
from google.cloud.gapic.videointelligence.v1beta1 import (
video_intelligence_service_client)
from google.cloud import videointelligence_v1beta2
from google.cloud.videointelligence_v1beta2 import enums
# [END imports]


def analyze_shots(path):
""" Detects camera shot changes. """
# [START construct_request]
video_client = (video_intelligence_service_client.
VideoIntelligenceServiceClient())
video_client = videointelligence_v1beta2.VideoIntelligenceServiceClient()
features = [enums.Feature.SHOT_CHANGE_DETECTION]
operation = video_client.annotate_video(path, features)
# [END construct_request]
Expand All @@ -58,13 +56,14 @@ def analyze_shots(path):
# [END check_operation]

# [START parse_response]
shots = operation.result().annotation_results[0]

for note, shot in enumerate(shots.shot_annotations):
print('Scene {}: {} to {}'.format(
note,
shot.start_time_offset,
shot.end_time_offset))
shots = operation.result().annotation_results[0].shot_annotations

for i, shot in enumerate(shots):
start_time = (shot.start_time_offset.seconds +
shot.start_time_offset.nanos / 1e9)
end_time = (shot.end_time_offset.seconds +
shot.end_time_offset.nanos / 1e9)
print('\tShot {}: {} to {}'.format(i, start_time, end_time))
# [END parse_response]


Expand Down
2 changes: 1 addition & 1 deletion video/cloud-client/shotchange/shotchange_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,4 +29,4 @@ def test_shots_dino(capsys):
shotchange.analyze_shots(
'gs://{}{}'.format(BUCKET, SHOTS_FILE_PATH))
out, _ = capsys.readouterr()
assert 'Scene 1:' in out
assert 'Shot 1:' in out

0 comments on commit a75e025

Please sign in to comment.