From bf14f71949639c906cc5a2754484afb0f5487ae8 Mon Sep 17 00:00:00 2001 From: Averi Kitsch Date: Fri, 27 Mar 2020 12:00:25 -0700 Subject: [PATCH] samples: update shared config (#2443) * update shared config * Update to 1.0.13 * lint * Fix linting * lint * fix imports Co-authored-by: Les Vogel --- .../speech/InfiniteStreamRecognize.java | 61 ++++++++----------- .../InfiniteStreamRecognizeOptions.java | 15 +++-- .../com/example/speech/QuickstartSample.java | 19 +++--- .../java/com/example/speech/Recognize.java | 2 - .../speech/TranscribeContextClasses.java | 1 - .../example/speech/TranscribeDiarization.java | 19 +++--- .../speech/TranscribeDiarizationGcs.java | 37 ++++++----- .../example/speech/QuickstartSampleIT.java | 4 +- .../speech/TranscribeContextClassesTests.java | 1 - .../speech/TranscribeDiarizationIT.java | 8 +-- 10 files changed, 72 insertions(+), 95 deletions(-) diff --git a/speech/snippets/src/main/java/com/example/speech/InfiniteStreamRecognize.java b/speech/snippets/src/main/java/com/example/speech/InfiniteStreamRecognize.java index fd4d460ddff..fa045ae0772 100644 --- a/speech/snippets/src/main/java/com/example/speech/InfiniteStreamRecognize.java +++ b/speech/snippets/src/main/java/com/example/speech/InfiniteStreamRecognize.java @@ -30,8 +30,6 @@ import com.google.cloud.speech.v1p1beta1.StreamingRecognizeResponse; import com.google.protobuf.ByteString; import com.google.protobuf.Duration; - -import java.lang.Math; import java.text.DecimalFormat; import java.util.ArrayList; import java.util.concurrent.BlockingQueue; @@ -87,11 +85,12 @@ public static String convertMillisToDate(double milliSeconds) { long millis = (long) milliSeconds; DecimalFormat format = new DecimalFormat(); format.setMinimumIntegerDigits(2); - return String.format("%s:%s /", - format.format(TimeUnit.MILLISECONDS.toMinutes(millis)), - format.format(TimeUnit.MILLISECONDS.toSeconds(millis) - - TimeUnit.MINUTES.toSeconds(TimeUnit.MILLISECONDS.toMinutes(millis))) - ); + return String.format( + "%s:%s /", + format.format(TimeUnit.MILLISECONDS.toMinutes(millis)), + format.format( + TimeUnit.MILLISECONDS.toSeconds(millis) + - TimeUnit.MINUTES.toSeconds(TimeUnit.MILLISECONDS.toMinutes(millis)))); } /** Performs infinite streaming speech recognition */ @@ -139,37 +138,35 @@ public void onResponse(StreamingRecognizeResponse response) { responses.add(response); StreamingRecognitionResult result = response.getResultsList().get(0); Duration resultEndTime = result.getResultEndTime(); - resultEndTimeInMS = (int) ((resultEndTime.getSeconds() * 1000) - + (resultEndTime.getNanos() / 1000000)); - double correctedTime = resultEndTimeInMS - bridgingOffset - + (STREAMING_LIMIT * restartCounter); + resultEndTimeInMS = + (int) + ((resultEndTime.getSeconds() * 1000) + (resultEndTime.getNanos() / 1000000)); + double correctedTime = + resultEndTimeInMS - bridgingOffset + (STREAMING_LIMIT * restartCounter); SpeechRecognitionAlternative alternative = result.getAlternativesList().get(0); if (result.getIsFinal()) { System.out.print(GREEN); System.out.print("\033[2K\r"); - System.out.printf("%s: %s [confidence: %.2f]\n", - convertMillisToDate(correctedTime), - alternative.getTranscript(), - alternative.getConfidence() - ); + System.out.printf( + "%s: %s [confidence: %.2f]\n", + convertMillisToDate(correctedTime), + alternative.getTranscript(), + alternative.getConfidence()); isFinalEndTime = resultEndTimeInMS; lastTranscriptWasFinal = true; } else { System.out.print(RED); System.out.print("\033[2K\r"); - System.out.printf("%s: %s", convertMillisToDate(correctedTime), - alternative.getTranscript() - ); + System.out.printf( + "%s: %s", convertMillisToDate(correctedTime), alternative.getTranscript()); lastTranscriptWasFinal = false; } } - public void onComplete() { - } + public void onComplete() {} - public void onError(Throwable t) { - } + public void onError(Throwable t) {} }; clientStream = client.streamingRecognizeCallable().splitCall(responseObserver); @@ -244,8 +241,8 @@ public void onError(Throwable t) { request = StreamingRecognizeRequest.newBuilder() - .setStreamingConfig(streamingRecognitionConfig) - .build(); + .setStreamingConfig(streamingRecognitionConfig) + .build(); System.out.println(YELLOW); System.out.printf("%d: RESTARTING REQUEST\n", restartCounter * STREAMING_LIMIT); @@ -269,11 +266,11 @@ public void onError(Throwable t) { if (bridgingOffset > finalRequestEndTime) { bridgingOffset = finalRequestEndTime; } - int chunksFromMS = (int) Math.floor((finalRequestEndTime - - bridgingOffset) / chunkTime); + int chunksFromMS = + (int) Math.floor((finalRequestEndTime - bridgingOffset) / chunkTime); // chunks from MS is number of chunks to resend - bridgingOffset = (int) Math.floor((lastAudioInput.size() - - chunksFromMS) * chunkTime); + bridgingOffset = + (int) Math.floor((lastAudioInput.size() - chunksFromMS) * chunkTime); // set bridging offset for next request for (int i = chunksFromMS; i < lastAudioInput.size(); i++) { request = @@ -289,12 +286,9 @@ public void onError(Throwable t) { tempByteString = ByteString.copyFrom(sharedQueue.take()); request = - StreamingRecognizeRequest.newBuilder() - .setAudioContent(tempByteString) - .build(); + StreamingRecognizeRequest.newBuilder().setAudioContent(tempByteString).build(); audioInput.add(tempByteString); - } clientStream.send(request); @@ -304,6 +298,5 @@ public void onError(Throwable t) { } } } - } // [END speech_transcribe_infinite_streaming] diff --git a/speech/snippets/src/main/java/com/example/speech/InfiniteStreamRecognizeOptions.java b/speech/snippets/src/main/java/com/example/speech/InfiniteStreamRecognizeOptions.java index 5966c151b9e..909ff2be08c 100644 --- a/speech/snippets/src/main/java/com/example/speech/InfiniteStreamRecognizeOptions.java +++ b/speech/snippets/src/main/java/com/example/speech/InfiniteStreamRecognizeOptions.java @@ -24,18 +24,18 @@ import org.apache.commons.cli.ParseException; public class InfiniteStreamRecognizeOptions { - String langCode = "en-US"; //by default english US + String langCode = "en-US"; // by default english US /** Construct an InfiniteStreamRecognizeOptions class from command line flags. */ public static InfiniteStreamRecognizeOptions fromFlags(String[] args) { Options options = new Options(); options.addOption( - Option.builder() - .type(String.class) - .longOpt("lang_code") - .hasArg() - .desc("Language code") - .build()); + Option.builder() + .type(String.class) + .longOpt("lang_code") + .hasArg() + .desc("Language code") + .build()); CommandLineParser parser = new DefaultParser(); CommandLine commandLine; @@ -52,5 +52,4 @@ public static InfiniteStreamRecognizeOptions fromFlags(String[] args) { return null; } } - } diff --git a/speech/snippets/src/main/java/com/example/speech/QuickstartSample.java b/speech/snippets/src/main/java/com/example/speech/QuickstartSample.java index 1b1a4ee820a..8c4c17af81b 100644 --- a/speech/snippets/src/main/java/com/example/speech/QuickstartSample.java +++ b/speech/snippets/src/main/java/com/example/speech/QuickstartSample.java @@ -33,9 +33,7 @@ public class QuickstartSample { - /** - * Demonstrates using the Speech API to transcribe an audio file. - */ + /** Demonstrates using the Speech API to transcribe an audio file. */ public static void main(String... args) throws Exception { // Instantiates a client try (SpeechClient speechClient = SpeechClient.create()) { @@ -49,14 +47,13 @@ public static void main(String... args) throws Exception { ByteString audioBytes = ByteString.copyFrom(data); // Builds the sync recognize request - RecognitionConfig config = RecognitionConfig.newBuilder() - .setEncoding(AudioEncoding.LINEAR16) - .setSampleRateHertz(16000) - .setLanguageCode("en-US") - .build(); - RecognitionAudio audio = RecognitionAudio.newBuilder() - .setContent(audioBytes) - .build(); + RecognitionConfig config = + RecognitionConfig.newBuilder() + .setEncoding(AudioEncoding.LINEAR16) + .setSampleRateHertz(16000) + .setLanguageCode("en-US") + .build(); + RecognitionAudio audio = RecognitionAudio.newBuilder().setContent(audioBytes).build(); // Performs speech recognition on the audio file RecognizeResponse response = speechClient.recognize(config, audio); diff --git a/speech/snippets/src/main/java/com/example/speech/Recognize.java b/speech/snippets/src/main/java/com/example/speech/Recognize.java index 9c35e2b53ae..629974ad415 100644 --- a/speech/snippets/src/main/java/com/example/speech/Recognize.java +++ b/speech/snippets/src/main/java/com/example/speech/Recognize.java @@ -38,14 +38,12 @@ import com.google.cloud.speech.v1.WordInfo; import com.google.common.util.concurrent.SettableFuture; import com.google.protobuf.ByteString; - import java.io.IOException; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; import java.util.ArrayList; import java.util.List; - import javax.sound.sampled.AudioFormat; import javax.sound.sampled.AudioInputStream; import javax.sound.sampled.AudioSystem; diff --git a/speech/snippets/src/main/java/com/example/speech/TranscribeContextClasses.java b/speech/snippets/src/main/java/com/example/speech/TranscribeContextClasses.java index 04cdfb80cdc..b75013ea358 100644 --- a/speech/snippets/src/main/java/com/example/speech/TranscribeContextClasses.java +++ b/speech/snippets/src/main/java/com/example/speech/TranscribeContextClasses.java @@ -25,7 +25,6 @@ import com.google.cloud.speech.v1.SpeechContext; import com.google.cloud.speech.v1.SpeechRecognitionAlternative; import com.google.cloud.speech.v1.SpeechRecognitionResult; - import java.io.IOException; class TranscribeContextClasses { diff --git a/speech/snippets/src/main/java/com/example/speech/TranscribeDiarization.java b/speech/snippets/src/main/java/com/example/speech/TranscribeDiarization.java index 5a590d132d6..6778f4c5907 100644 --- a/speech/snippets/src/main/java/com/example/speech/TranscribeDiarization.java +++ b/speech/snippets/src/main/java/com/example/speech/TranscribeDiarization.java @@ -26,7 +26,6 @@ import com.google.cloud.speech.v1.SpeechRecognitionAlternative; import com.google.cloud.speech.v1.WordInfo; import com.google.protobuf.ByteString; - import java.io.IOException; import java.nio.file.Files; import java.nio.file.Path; @@ -51,14 +50,16 @@ static void transcribeDiarization(String fileName) throws IOException { try (SpeechClient client = SpeechClient.create()) { // Get the contents of the local audio file RecognitionAudio recognitionAudio = - RecognitionAudio.newBuilder().setContent(ByteString.copyFrom(content)).build(); - SpeakerDiarizationConfig speakerDiarizationConfig = SpeakerDiarizationConfig.newBuilder() + RecognitionAudio.newBuilder().setContent(ByteString.copyFrom(content)).build(); + SpeakerDiarizationConfig speakerDiarizationConfig = + SpeakerDiarizationConfig.newBuilder() .setEnableSpeakerDiarization(true) .setMinSpeakerCount(2) .setMaxSpeakerCount(2) .build(); // Configure request to enable Speaker diarization - RecognitionConfig config = RecognitionConfig.newBuilder() + RecognitionConfig config = + RecognitionConfig.newBuilder() .setEncoding(RecognitionConfig.AudioEncoding.LINEAR16) .setLanguageCode("en-US") .setSampleRateHertz(8000) @@ -70,14 +71,14 @@ static void transcribeDiarization(String fileName) throws IOException { // Speaker Tags are only included in the last result object, which has only one alternative. SpeechRecognitionAlternative alternative = - recognizeResponse.getResults( - recognizeResponse.getResultsCount() - 1).getAlternatives(0); + recognizeResponse.getResults(recognizeResponse.getResultsCount() - 1).getAlternatives(0); // The alternative is made up of WordInfo objects that contain the speaker_tag. WordInfo wordInfo = alternative.getWords(0); int currentSpeakerTag = wordInfo.getSpeakerTag(); // For each word, get all the words associated with one speaker, once the speaker changes, // add a new line with the new speaker and their spoken words. - StringBuilder speakerWords = new StringBuilder( + StringBuilder speakerWords = + new StringBuilder( String.format("Speaker %d: %s", wordInfo.getSpeakerTag(), wordInfo.getWord())); for (int i = 1; i < alternative.getWordsCount(); i++) { wordInfo = alternative.getWords(i); @@ -86,9 +87,7 @@ static void transcribeDiarization(String fileName) throws IOException { speakerWords.append(wordInfo.getWord()); } else { speakerWords.append( - String.format("\nSpeaker %d: %s", - wordInfo.getSpeakerTag(), - wordInfo.getWord())); + String.format("\nSpeaker %d: %s", wordInfo.getSpeakerTag(), wordInfo.getWord())); currentSpeakerTag = wordInfo.getSpeakerTag(); } } diff --git a/speech/snippets/src/main/java/com/example/speech/TranscribeDiarizationGcs.java b/speech/snippets/src/main/java/com/example/speech/TranscribeDiarizationGcs.java index de55cc44ea6..de7245b9a21 100644 --- a/speech/snippets/src/main/java/com/example/speech/TranscribeDiarizationGcs.java +++ b/speech/snippets/src/main/java/com/example/speech/TranscribeDiarizationGcs.java @@ -27,59 +27,58 @@ import com.google.cloud.speech.v1.SpeechClient; import com.google.cloud.speech.v1.SpeechRecognitionAlternative; import com.google.cloud.speech.v1.WordInfo; - import java.io.IOException; import java.util.concurrent.ExecutionException; public class TranscribeDiarizationGcs { - static void transcribeDiarizationGcs() throws IOException, ExecutionException, - InterruptedException { + static void transcribeDiarizationGcs() + throws IOException, ExecutionException, InterruptedException { // TODO(developer): Replace these variables before running the sample. String gcsUri = "gs://cloud-samples-data/speech/commercial_mono.wav"; transcribeDiarizationGcs(gcsUri); } // Transcribe the give gcs file using speaker diarization - public static void transcribeDiarizationGcs(String gcsUri) throws IOException, - ExecutionException, InterruptedException { + public static void transcribeDiarizationGcs(String gcsUri) + throws IOException, ExecutionException, InterruptedException { // Initialize client that will be used to send requests. This client only needs to be created // once, and can be reused for multiple requests. After completing all of your requests, call // the "close" method on the client to safely clean up any remaining background resources. try (SpeechClient speechClient = SpeechClient.create()) { - SpeakerDiarizationConfig speakerDiarizationConfig = SpeakerDiarizationConfig.newBuilder() + SpeakerDiarizationConfig speakerDiarizationConfig = + SpeakerDiarizationConfig.newBuilder() .setEnableSpeakerDiarization(true) .setMinSpeakerCount(2) .setMaxSpeakerCount(2) .build(); // Configure request to enable Speaker diarization RecognitionConfig config = - RecognitionConfig.newBuilder() - .setEncoding(RecognitionConfig.AudioEncoding.LINEAR16) - .setLanguageCode("en-US") - .setSampleRateHertz(8000) - .setDiarizationConfig(speakerDiarizationConfig) - .build(); + RecognitionConfig.newBuilder() + .setEncoding(RecognitionConfig.AudioEncoding.LINEAR16) + .setLanguageCode("en-US") + .setSampleRateHertz(8000) + .setDiarizationConfig(speakerDiarizationConfig) + .build(); // Set the remote path for the audio file RecognitionAudio audio = RecognitionAudio.newBuilder().setUri(gcsUri).build(); // Use non-blocking call for getting file transcription OperationFuture future = - speechClient.longRunningRecognizeAsync(config, audio); + speechClient.longRunningRecognizeAsync(config, audio); System.out.println("Waiting for response..."); // Speaker Tags are only included in the last result object, which has only one alternative. LongRunningRecognizeResponse response = future.get(); SpeechRecognitionAlternative alternative = - response.getResults( - response.getResultsCount() - 1) - .getAlternatives(0); + response.getResults(response.getResultsCount() - 1).getAlternatives(0); // The alternative is made up of WordInfo objects that contain the speaker_tag. WordInfo wordInfo = alternative.getWords(0); int currentSpeakerTag = wordInfo.getSpeakerTag(); // For each word, get all the words associated with one speaker, once the speaker changes, // add a new line with the new speaker and their spoken words. - StringBuilder speakerWords = new StringBuilder( + StringBuilder speakerWords = + new StringBuilder( String.format("Speaker %d: %s", wordInfo.getSpeakerTag(), wordInfo.getWord())); for (int i = 1; i < alternative.getWordsCount(); i++) { wordInfo = alternative.getWords(i); @@ -88,9 +87,7 @@ public static void transcribeDiarizationGcs(String gcsUri) throws IOException, speakerWords.append(wordInfo.getWord()); } else { speakerWords.append( - String.format("\nSpeaker %d: %s", - wordInfo.getSpeakerTag(), - wordInfo.getWord())); + String.format("\nSpeaker %d: %s", wordInfo.getSpeakerTag(), wordInfo.getWord())); currentSpeakerTag = wordInfo.getSpeakerTag(); } } diff --git a/speech/snippets/src/test/java/com/example/speech/QuickstartSampleIT.java b/speech/snippets/src/test/java/com/example/speech/QuickstartSampleIT.java index 22be282b8e1..ed739930161 100644 --- a/speech/snippets/src/test/java/com/example/speech/QuickstartSampleIT.java +++ b/speech/snippets/src/test/java/com/example/speech/QuickstartSampleIT.java @@ -26,9 +26,7 @@ import org.junit.runner.RunWith; import org.junit.runners.JUnit4; -/** - * Tests for quickstart sample. - */ +/** Tests for quickstart sample. */ @RunWith(JUnit4.class) @SuppressWarnings("checkstyle:abbreviationaswordinname") public class QuickstartSampleIT { diff --git a/speech/snippets/src/test/java/com/example/speech/TranscribeContextClassesTests.java b/speech/snippets/src/test/java/com/example/speech/TranscribeContextClassesTests.java index fc875ce2fea..1afd71d3517 100644 --- a/speech/snippets/src/test/java/com/example/speech/TranscribeContextClassesTests.java +++ b/speech/snippets/src/test/java/com/example/speech/TranscribeContextClassesTests.java @@ -21,7 +21,6 @@ import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.PrintStream; - import org.junit.After; import org.junit.Before; import org.junit.Test; diff --git a/speech/snippets/src/test/java/com/example/speech/TranscribeDiarizationIT.java b/speech/snippets/src/test/java/com/example/speech/TranscribeDiarizationIT.java index cf814c288d5..ce69cdd2286 100644 --- a/speech/snippets/src/test/java/com/example/speech/TranscribeDiarizationIT.java +++ b/speech/snippets/src/test/java/com/example/speech/TranscribeDiarizationIT.java @@ -23,7 +23,6 @@ import java.io.IOException; import java.io.PrintStream; import java.util.concurrent.ExecutionException; - import org.junit.After; import org.junit.Before; import org.junit.BeforeClass; @@ -43,9 +42,8 @@ public class TranscribeDiarizationIT { private static void requireEnvVar(String varName) { assertNotNull( - System.getenv(varName), - "Environment variable '%s' is required to perform these tests.".format(varName) - ); + System.getenv(varName), + "Environment variable '%s' is required to perform these tests.".format(varName)); } @BeforeClass @@ -75,7 +73,7 @@ public void testDiarization() throws IOException { @Test public void testDiarizationGcs() throws IOException, ExecutionException, InterruptedException { TranscribeDiarizationGcs.transcribeDiarizationGcs( - "gs://cloud-samples-data/speech/commercial_mono.wav"); + "gs://cloud-samples-data/speech/commercial_mono.wav"); String got = bout.toString(); assertThat(got).contains("Speaker"); }