From 172e0faf686876a7bf7fd711f378ceda0deb05e1 Mon Sep 17 00:00:00 2001 From: yoshi-automation Date: Wed, 9 Jan 2019 16:31:48 -0800 Subject: [PATCH] Regenerate speech client --- .../cloud/speech/v1/RecognitionConfig.java | 207 ++++++++++++++---- .../speech/v1/RecognitionConfigOrBuilder.java | 35 ++- .../google/cloud/speech/v1/SpeechProto.java | 121 +++++----- .../v1/SpeechRecognitionAlternative.java | 46 ++++ ...SpeechRecognitionAlternativeOrBuilder.java | 10 + .../speech/v1/SpeechRecognitionResult.java | 91 ++++++++ .../v1/SpeechRecognitionResultOrBuilder.java | 13 ++ .../speech/v1/StreamingRecognitionResult.java | 88 ++++++++ .../StreamingRecognitionResultOrBuilder.java | 13 ++ .../google/cloud/speech/v1/cloud_speech.proto | 40 +++- .../google-cloud-speech/synth.metadata | 41 ++-- 11 files changed, 573 insertions(+), 132 deletions(-) diff --git a/google-api-grpc/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/RecognitionConfig.java b/google-api-grpc/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/RecognitionConfig.java index b5d9ca4329ee..ddafe0dbc1f3 100644 --- a/google-api-grpc/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/RecognitionConfig.java +++ b/google-api-grpc/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/RecognitionConfig.java @@ -26,6 +26,7 @@ private RecognitionConfig(com.google.protobuf.GeneratedMessageV3.Builder buil private RecognitionConfig() { encoding_ = 0; sampleRateHertz_ = 0; + enableSeparateRecognitionPerChannel_ = false; languageCode_ = ""; maxAlternatives_ = 0; profanityFilter_ = false; @@ -91,10 +92,10 @@ private RecognitionConfig( } case 50: { - if (!((mutable_bitField0_ & 0x00000020) == 0x00000020)) { + if (!((mutable_bitField0_ & 0x00000040) == 0x00000040)) { speechContexts_ = new java.util.ArrayList(); - mutable_bitField0_ |= 0x00000020; + mutable_bitField0_ |= 0x00000040; } speechContexts_.add( input.readMessage( @@ -111,6 +112,11 @@ private RecognitionConfig( enableAutomaticPunctuation_ = input.readBool(); break; } + case 96: + { + enableSeparateRecognitionPerChannel_ = input.readBool(); + break; + } case 106: { java.lang.String s = input.readStringRequireUtf8(); @@ -137,7 +143,7 @@ private RecognitionConfig( } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this); } finally { - if (((mutable_bitField0_ & 0x00000020) == 0x00000020)) { + if (((mutable_bitField0_ & 0x00000040) == 0x00000040)) { speechContexts_ = java.util.Collections.unmodifiableList(speechContexts_); } this.unknownFields = unknownFields.build(); @@ -527,6 +533,26 @@ public int getSampleRateHertz() { return sampleRateHertz_; } + public static final int ENABLE_SEPARATE_RECOGNITION_PER_CHANNEL_FIELD_NUMBER = 12; + private boolean enableSeparateRecognitionPerChannel_; + /** + * + * + *
+   * This needs to be set to ‘true’ explicitly and `audio_channel_count` > 1
+   * to get each channel recognized separately. The recognition result will
+   * contain a `channel_tag` field to state which channel that result belongs
+   * to. If this is not true, we will only recognize the first channel. The
+   * request is billed cumulatively for all channels recognized:
+   * `audio_channel_count` multiplied by the length of the audio.
+   * 
+ * + * bool enable_separate_recognition_per_channel = 12; + */ + public boolean getEnableSeparateRecognitionPerChannel() { + return enableSeparateRecognitionPerChannel_; + } + public static final int LANGUAGE_CODE_FIELD_NUMBER = 3; private volatile java.lang.Object languageCode_; /** @@ -841,15 +867,18 @@ public com.google.protobuf.ByteString getModelBytes() { * *
    * *Optional* Set to true to use an enhanced model for speech recognition.
-   * You must also set the `model` field to a valid, enhanced model. If
-   * `use_enhanced` is set to true and the `model` field is not set, then
-   * `use_enhanced` is ignored. If `use_enhanced` is true and an enhanced
-   * version of the specified model does not exist, then the speech is
-   * recognized using the standard version of the specified model.
+   * If `use_enhanced` is set to true and the `model` field is not set, then
+   * an appropriate enhanced model is chosen if:
+   * 1. project is eligible for requesting enhanced models
+   * 2. an enhanced model exists for the audio
+   * If `use_enhanced` is true and an enhanced version of the specified model
+   * does not exist, then the speech is recognized using the standard version
+   * of the specified model.
    * Enhanced speech models require that you opt-in to data logging using
-   * instructions in the [documentation](/speech-to-text/enable-data-logging).
-   * If you set `use_enhanced` to true and you have not enabled audio logging,
-   * then you will receive an error.
+   * instructions in the
+   * [documentation](/speech-to-text/docs/enable-data-logging). If you set
+   * `use_enhanced` to true and you have not enabled audio logging, then you
+   * will receive an error.
    * 
* * bool use_enhanced = 14; @@ -898,6 +927,9 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io if (enableAutomaticPunctuation_ != false) { output.writeBool(11, enableAutomaticPunctuation_); } + if (enableSeparateRecognitionPerChannel_ != false) { + output.writeBool(12, enableSeparateRecognitionPerChannel_); + } if (!getModelBytes().isEmpty()) { com.google.protobuf.GeneratedMessageV3.writeString(output, 13, model_); } @@ -940,6 +972,11 @@ public int getSerializedSize() { size += com.google.protobuf.CodedOutputStream.computeBoolSize(11, enableAutomaticPunctuation_); } + if (enableSeparateRecognitionPerChannel_ != false) { + size += + com.google.protobuf.CodedOutputStream.computeBoolSize( + 12, enableSeparateRecognitionPerChannel_); + } if (!getModelBytes().isEmpty()) { size += com.google.protobuf.GeneratedMessageV3.computeStringSize(13, model_); } @@ -965,6 +1002,10 @@ public boolean equals(final java.lang.Object obj) { boolean result = true; result = result && encoding_ == other.encoding_; result = result && (getSampleRateHertz() == other.getSampleRateHertz()); + result = + result + && (getEnableSeparateRecognitionPerChannel() + == other.getEnableSeparateRecognitionPerChannel()); result = result && getLanguageCode().equals(other.getLanguageCode()); result = result && (getMaxAlternatives() == other.getMaxAlternatives()); result = result && (getProfanityFilter() == other.getProfanityFilter()); @@ -988,6 +1029,10 @@ public int hashCode() { hash = (53 * hash) + encoding_; hash = (37 * hash) + SAMPLE_RATE_HERTZ_FIELD_NUMBER; hash = (53 * hash) + getSampleRateHertz(); + hash = (37 * hash) + ENABLE_SEPARATE_RECOGNITION_PER_CHANNEL_FIELD_NUMBER; + hash = + (53 * hash) + + com.google.protobuf.Internal.hashBoolean(getEnableSeparateRecognitionPerChannel()); hash = (37 * hash) + LANGUAGE_CODE_FIELD_NUMBER; hash = (53 * hash) + getLanguageCode().hashCode(); hash = (37 * hash) + MAX_ALTERNATIVES_FIELD_NUMBER; @@ -1158,6 +1203,8 @@ public Builder clear() { sampleRateHertz_ = 0; + enableSeparateRecognitionPerChannel_ = false; + languageCode_ = ""; maxAlternatives_ = 0; @@ -1166,7 +1213,7 @@ public Builder clear() { if (speechContextsBuilder_ == null) { speechContexts_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000020); + bitField0_ = (bitField0_ & ~0x00000040); } else { speechContextsBuilder_.clear(); } @@ -1209,13 +1256,14 @@ public com.google.cloud.speech.v1.RecognitionConfig buildPartial() { int to_bitField0_ = 0; result.encoding_ = encoding_; result.sampleRateHertz_ = sampleRateHertz_; + result.enableSeparateRecognitionPerChannel_ = enableSeparateRecognitionPerChannel_; result.languageCode_ = languageCode_; result.maxAlternatives_ = maxAlternatives_; result.profanityFilter_ = profanityFilter_; if (speechContextsBuilder_ == null) { - if (((bitField0_ & 0x00000020) == 0x00000020)) { + if (((bitField0_ & 0x00000040) == 0x00000040)) { speechContexts_ = java.util.Collections.unmodifiableList(speechContexts_); - bitField0_ = (bitField0_ & ~0x00000020); + bitField0_ = (bitField0_ & ~0x00000040); } result.speechContexts_ = speechContexts_; } else { @@ -1281,6 +1329,9 @@ public Builder mergeFrom(com.google.cloud.speech.v1.RecognitionConfig other) { if (other.getSampleRateHertz() != 0) { setSampleRateHertz(other.getSampleRateHertz()); } + if (other.getEnableSeparateRecognitionPerChannel() != false) { + setEnableSeparateRecognitionPerChannel(other.getEnableSeparateRecognitionPerChannel()); + } if (!other.getLanguageCode().isEmpty()) { languageCode_ = other.languageCode_; onChanged(); @@ -1295,7 +1346,7 @@ public Builder mergeFrom(com.google.cloud.speech.v1.RecognitionConfig other) { if (!other.speechContexts_.isEmpty()) { if (speechContexts_.isEmpty()) { speechContexts_ = other.speechContexts_; - bitField0_ = (bitField0_ & ~0x00000020); + bitField0_ = (bitField0_ & ~0x00000040); } else { ensureSpeechContextsIsMutable(); speechContexts_.addAll(other.speechContexts_); @@ -1308,7 +1359,7 @@ public Builder mergeFrom(com.google.cloud.speech.v1.RecognitionConfig other) { speechContextsBuilder_.dispose(); speechContextsBuilder_ = null; speechContexts_ = other.speechContexts_; - bitField0_ = (bitField0_ & ~0x00000020); + bitField0_ = (bitField0_ & ~0x00000040); speechContextsBuilder_ = com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ? getSpeechContextsFieldBuilder() @@ -1512,6 +1563,65 @@ public Builder clearSampleRateHertz() { return this; } + private boolean enableSeparateRecognitionPerChannel_; + /** + * + * + *
+     * This needs to be set to ‘true’ explicitly and `audio_channel_count` > 1
+     * to get each channel recognized separately. The recognition result will
+     * contain a `channel_tag` field to state which channel that result belongs
+     * to. If this is not true, we will only recognize the first channel. The
+     * request is billed cumulatively for all channels recognized:
+     * `audio_channel_count` multiplied by the length of the audio.
+     * 
+ * + * bool enable_separate_recognition_per_channel = 12; + */ + public boolean getEnableSeparateRecognitionPerChannel() { + return enableSeparateRecognitionPerChannel_; + } + /** + * + * + *
+     * This needs to be set to ‘true’ explicitly and `audio_channel_count` > 1
+     * to get each channel recognized separately. The recognition result will
+     * contain a `channel_tag` field to state which channel that result belongs
+     * to. If this is not true, we will only recognize the first channel. The
+     * request is billed cumulatively for all channels recognized:
+     * `audio_channel_count` multiplied by the length of the audio.
+     * 
+ * + * bool enable_separate_recognition_per_channel = 12; + */ + public Builder setEnableSeparateRecognitionPerChannel(boolean value) { + + enableSeparateRecognitionPerChannel_ = value; + onChanged(); + return this; + } + /** + * + * + *
+     * This needs to be set to ‘true’ explicitly and `audio_channel_count` > 1
+     * to get each channel recognized separately. The recognition result will
+     * contain a `channel_tag` field to state which channel that result belongs
+     * to. If this is not true, we will only recognize the first channel. The
+     * request is billed cumulatively for all channels recognized:
+     * `audio_channel_count` multiplied by the length of the audio.
+     * 
+ * + * bool enable_separate_recognition_per_channel = 12; + */ + public Builder clearEnableSeparateRecognitionPerChannel() { + + enableSeparateRecognitionPerChannel_ = false; + onChanged(); + return this; + } + private java.lang.Object languageCode_ = ""; /** * @@ -1742,10 +1852,10 @@ public Builder clearProfanityFilter() { java.util.Collections.emptyList(); private void ensureSpeechContextsIsMutable() { - if (!((bitField0_ & 0x00000020) == 0x00000020)) { + if (!((bitField0_ & 0x00000040) == 0x00000040)) { speechContexts_ = new java.util.ArrayList(speechContexts_); - bitField0_ |= 0x00000020; + bitField0_ |= 0x00000040; } } @@ -1983,7 +2093,7 @@ public Builder addAllSpeechContexts( public Builder clearSpeechContexts() { if (speechContextsBuilder_ == null) { speechContexts_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000020); + bitField0_ = (bitField0_ & ~0x00000040); onChanged(); } else { speechContextsBuilder_.clear(); @@ -2120,7 +2230,7 @@ public com.google.cloud.speech.v1.SpeechContext.Builder addSpeechContextsBuilder com.google.cloud.speech.v1.SpeechContext.Builder, com.google.cloud.speech.v1.SpeechContextOrBuilder>( speechContexts_, - ((bitField0_ & 0x00000020) == 0x00000020), + ((bitField0_ & 0x00000040) == 0x00000040), getParentForChildren(), isClean()); speechContexts_ = null; @@ -2498,15 +2608,18 @@ public Builder setModelBytes(com.google.protobuf.ByteString value) { * *
      * *Optional* Set to true to use an enhanced model for speech recognition.
-     * You must also set the `model` field to a valid, enhanced model. If
-     * `use_enhanced` is set to true and the `model` field is not set, then
-     * `use_enhanced` is ignored. If `use_enhanced` is true and an enhanced
-     * version of the specified model does not exist, then the speech is
-     * recognized using the standard version of the specified model.
+     * If `use_enhanced` is set to true and the `model` field is not set, then
+     * an appropriate enhanced model is chosen if:
+     * 1. project is eligible for requesting enhanced models
+     * 2. an enhanced model exists for the audio
+     * If `use_enhanced` is true and an enhanced version of the specified model
+     * does not exist, then the speech is recognized using the standard version
+     * of the specified model.
      * Enhanced speech models require that you opt-in to data logging using
-     * instructions in the [documentation](/speech-to-text/enable-data-logging).
-     * If you set `use_enhanced` to true and you have not enabled audio logging,
-     * then you will receive an error.
+     * instructions in the
+     * [documentation](/speech-to-text/docs/enable-data-logging). If you set
+     * `use_enhanced` to true and you have not enabled audio logging, then you
+     * will receive an error.
      * 
* * bool use_enhanced = 14; @@ -2519,15 +2632,18 @@ public boolean getUseEnhanced() { * *
      * *Optional* Set to true to use an enhanced model for speech recognition.
-     * You must also set the `model` field to a valid, enhanced model. If
-     * `use_enhanced` is set to true and the `model` field is not set, then
-     * `use_enhanced` is ignored. If `use_enhanced` is true and an enhanced
-     * version of the specified model does not exist, then the speech is
-     * recognized using the standard version of the specified model.
+     * If `use_enhanced` is set to true and the `model` field is not set, then
+     * an appropriate enhanced model is chosen if:
+     * 1. project is eligible for requesting enhanced models
+     * 2. an enhanced model exists for the audio
+     * If `use_enhanced` is true and an enhanced version of the specified model
+     * does not exist, then the speech is recognized using the standard version
+     * of the specified model.
      * Enhanced speech models require that you opt-in to data logging using
-     * instructions in the [documentation](/speech-to-text/enable-data-logging).
-     * If you set `use_enhanced` to true and you have not enabled audio logging,
-     * then you will receive an error.
+     * instructions in the
+     * [documentation](/speech-to-text/docs/enable-data-logging). If you set
+     * `use_enhanced` to true and you have not enabled audio logging, then you
+     * will receive an error.
      * 
* * bool use_enhanced = 14; @@ -2543,15 +2659,18 @@ public Builder setUseEnhanced(boolean value) { * *
      * *Optional* Set to true to use an enhanced model for speech recognition.
-     * You must also set the `model` field to a valid, enhanced model. If
-     * `use_enhanced` is set to true and the `model` field is not set, then
-     * `use_enhanced` is ignored. If `use_enhanced` is true and an enhanced
-     * version of the specified model does not exist, then the speech is
-     * recognized using the standard version of the specified model.
+     * If `use_enhanced` is set to true and the `model` field is not set, then
+     * an appropriate enhanced model is chosen if:
+     * 1. project is eligible for requesting enhanced models
+     * 2. an enhanced model exists for the audio
+     * If `use_enhanced` is true and an enhanced version of the specified model
+     * does not exist, then the speech is recognized using the standard version
+     * of the specified model.
      * Enhanced speech models require that you opt-in to data logging using
-     * instructions in the [documentation](/speech-to-text/enable-data-logging).
-     * If you set `use_enhanced` to true and you have not enabled audio logging,
-     * then you will receive an error.
+     * instructions in the
+     * [documentation](/speech-to-text/docs/enable-data-logging). If you set
+     * `use_enhanced` to true and you have not enabled audio logging, then you
+     * will receive an error.
      * 
* * bool use_enhanced = 14; diff --git a/google-api-grpc/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/RecognitionConfigOrBuilder.java b/google-api-grpc/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/RecognitionConfigOrBuilder.java index 1f6a3a7a03dc..7ca833c9c5cc 100644 --- a/google-api-grpc/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/RecognitionConfigOrBuilder.java +++ b/google-api-grpc/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/RecognitionConfigOrBuilder.java @@ -50,6 +50,22 @@ public interface RecognitionConfigOrBuilder */ int getSampleRateHertz(); + /** + * + * + *
+   * This needs to be set to ‘true’ explicitly and `audio_channel_count` > 1
+   * to get each channel recognized separately. The recognition result will
+   * contain a `channel_tag` field to state which channel that result belongs
+   * to. If this is not true, we will only recognize the first channel. The
+   * request is billed cumulatively for all channels recognized:
+   * `audio_channel_count` multiplied by the length of the audio.
+   * 
+ * + * bool enable_separate_recognition_per_channel = 12; + */ + boolean getEnableSeparateRecognitionPerChannel(); + /** * * @@ -290,15 +306,18 @@ public interface RecognitionConfigOrBuilder * *
    * *Optional* Set to true to use an enhanced model for speech recognition.
-   * You must also set the `model` field to a valid, enhanced model. If
-   * `use_enhanced` is set to true and the `model` field is not set, then
-   * `use_enhanced` is ignored. If `use_enhanced` is true and an enhanced
-   * version of the specified model does not exist, then the speech is
-   * recognized using the standard version of the specified model.
+   * If `use_enhanced` is set to true and the `model` field is not set, then
+   * an appropriate enhanced model is chosen if:
+   * 1. project is eligible for requesting enhanced models
+   * 2. an enhanced model exists for the audio
+   * If `use_enhanced` is true and an enhanced version of the specified model
+   * does not exist, then the speech is recognized using the standard version
+   * of the specified model.
    * Enhanced speech models require that you opt-in to data logging using
-   * instructions in the [documentation](/speech-to-text/enable-data-logging).
-   * If you set `use_enhanced` to true and you have not enabled audio logging,
-   * then you will receive an error.
+   * instructions in the
+   * [documentation](/speech-to-text/docs/enable-data-logging). If you set
+   * `use_enhanced` to true and you have not enabled audio logging, then you
+   * will receive an error.
    * 
* * bool use_enhanced = 14; diff --git a/google-api-grpc/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/SpeechProto.java b/google-api-grpc/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/SpeechProto.java index db8239592103..3dac3ed10b25 100644 --- a/google-api-grpc/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/SpeechProto.java +++ b/google-api-grpc/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/SpeechProto.java @@ -102,65 +102,67 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { + "t\"\212\001\n\032StreamingRecognitionConfig\0229\n\006conf" + "ig\030\001 \001(\0132).google.cloud.speech.v1.Recogn" + "itionConfig\022\030\n\020single_utterance\030\002 \001(\010\022\027\n" - + "\017interim_results\030\003 \001(\010\"\377\003\n\021RecognitionCo" + + "\017interim_results\030\003 \001(\010\"\260\004\n\021RecognitionCo" + "nfig\022I\n\010encoding\030\001 \001(\01627.google.cloud.sp" + "eech.v1.RecognitionConfig.AudioEncoding\022" - + "\031\n\021sample_rate_hertz\030\002 \001(\005\022\025\n\rlanguage_c" - + "ode\030\003 \001(\t\022\030\n\020max_alternatives\030\004 \001(\005\022\030\n\020p" - + "rofanity_filter\030\005 \001(\010\022>\n\017speech_contexts" - + "\030\006 \003(\0132%.google.cloud.speech.v1.SpeechCo" - + "ntext\022 \n\030enable_word_time_offsets\030\010 \001(\010\022" - + "$\n\034enable_automatic_punctuation\030\013 \001(\010\022\r\n" - + "\005model\030\r \001(\t\022\024\n\014use_enhanced\030\016 \001(\010\"\213\001\n\rA" - + "udioEncoding\022\030\n\024ENCODING_UNSPECIFIED\020\000\022\014" - + "\n\010LINEAR16\020\001\022\010\n\004FLAC\020\002\022\t\n\005MULAW\020\003\022\007\n\003AMR" - + "\020\004\022\n\n\006AMR_WB\020\005\022\014\n\010OGG_OPUS\020\006\022\032\n\026SPEEX_WI" - + "TH_HEADER_BYTE\020\007\" \n\rSpeechContext\022\017\n\007phr" - + "ases\030\001 \003(\t\"D\n\020RecognitionAudio\022\021\n\007conten" - + "t\030\001 \001(\014H\000\022\r\n\003uri\030\002 \001(\tH\000B\016\n\014audio_source" - + "\"U\n\021RecognizeResponse\022@\n\007results\030\002 \003(\0132/" - + ".google.cloud.speech.v1.SpeechRecognitio" - + "nResult\"`\n\034LongRunningRecognizeResponse\022" - + "@\n\007results\030\002 \003(\0132/.google.cloud.speech.v" - + "1.SpeechRecognitionResult\"\236\001\n\034LongRunnin" - + "gRecognizeMetadata\022\030\n\020progress_percent\030\001" - + " \001(\005\022.\n\nstart_time\030\002 \001(\0132\032.google.protob" - + "uf.Timestamp\0224\n\020last_update_time\030\003 \001(\0132\032" - + ".google.protobuf.Timestamp\"\261\002\n\032Streaming" - + "RecognizeResponse\022!\n\005error\030\001 \001(\0132\022.googl" - + "e.rpc.Status\022C\n\007results\030\002 \003(\01322.google.c" - + "loud.speech.v1.StreamingRecognitionResul" - + "t\022]\n\021speech_event_type\030\004 \001(\0162B.google.cl" - + "oud.speech.v1.StreamingRecognizeResponse" - + ".SpeechEventType\"L\n\017SpeechEventType\022\034\n\030S" - + "PEECH_EVENT_UNSPECIFIED\020\000\022\033\n\027END_OF_SING" - + "LE_UTTERANCE\020\001\"\215\001\n\032StreamingRecognitionR" - + "esult\022J\n\014alternatives\030\001 \003(\01324.google.clo" - + "ud.speech.v1.SpeechRecognitionAlternativ" - + "e\022\020\n\010is_final\030\002 \001(\010\022\021\n\tstability\030\003 \001(\002\"e" - + "\n\027SpeechRecognitionResult\022J\n\014alternative" - + "s\030\001 \003(\01324.google.cloud.speech.v1.SpeechR" - + "ecognitionAlternative\"w\n\034SpeechRecogniti" - + "onAlternative\022\022\n\ntranscript\030\001 \001(\t\022\022\n\ncon" - + "fidence\030\002 \001(\002\022/\n\005words\030\003 \003(\0132 .google.cl" - + "oud.speech.v1.WordInfo\"t\n\010WordInfo\022-\n\nst" - + "art_time\030\001 \001(\0132\031.google.protobuf.Duratio" - + "n\022+\n\010end_time\030\002 \001(\0132\031.google.protobuf.Du" - + "ration\022\014\n\004word\030\003 \001(\t2\251\003\n\006Speech\022\201\001\n\tReco" - + "gnize\022(.google.cloud.speech.v1.Recognize" - + "Request\032).google.cloud.speech.v1.Recogni" - + "zeResponse\"\037\202\323\344\223\002\031\"\024/v1/speech:recognize" - + ":\001*\022\226\001\n\024LongRunningRecognize\0223.google.cl" - + "oud.speech.v1.LongRunningRecognizeReques" - + "t\032\035.google.longrunning.Operation\"*\202\323\344\223\002$" - + "\"\037/v1/speech:longrunningrecognize:\001*\022\201\001\n" - + "\022StreamingRecognize\0221.google.cloud.speec" - + "h.v1.StreamingRecognizeRequest\0322.google." - + "cloud.speech.v1.StreamingRecognizeRespon" - + "se\"\000(\0010\001Bl\n\032com.google.cloud.speech.v1B\013" - + "SpeechProtoP\001Z\n\017speech" + + "_contexts\030\006 \003(\0132%.google.cloud.speech.v1" + + ".SpeechContext\022 \n\030enable_word_time_offse" + + "ts\030\010 \001(\010\022$\n\034enable_automatic_punctuation" + + "\030\013 \001(\010\022\r\n\005model\030\r \001(\t\022\024\n\014use_enhanced\030\016 " + + "\001(\010\"\213\001\n\rAudioEncoding\022\030\n\024ENCODING_UNSPEC" + + "IFIED\020\000\022\014\n\010LINEAR16\020\001\022\010\n\004FLAC\020\002\022\t\n\005MULAW" + + "\020\003\022\007\n\003AMR\020\004\022\n\n\006AMR_WB\020\005\022\014\n\010OGG_OPUS\020\006\022\032\n" + + "\026SPEEX_WITH_HEADER_BYTE\020\007\" \n\rSpeechConte" + + "xt\022\017\n\007phrases\030\001 \003(\t\"D\n\020RecognitionAudio\022" + + "\021\n\007content\030\001 \001(\014H\000\022\r\n\003uri\030\002 \001(\tH\000B\016\n\014aud" + + "io_source\"U\n\021RecognizeResponse\022@\n\007result" + + "s\030\002 \003(\0132/.google.cloud.speech.v1.SpeechR" + + "ecognitionResult\"`\n\034LongRunningRecognize" + + "Response\022@\n\007results\030\002 \003(\0132/.google.cloud" + + ".speech.v1.SpeechRecognitionResult\"\236\001\n\034L" + + "ongRunningRecognizeMetadata\022\030\n\020progress_" + + "percent\030\001 \001(\005\022.\n\nstart_time\030\002 \001(\0132\032.goog" + + "le.protobuf.Timestamp\0224\n\020last_update_tim" + + "e\030\003 \001(\0132\032.google.protobuf.Timestamp\"\261\002\n\032" + + "StreamingRecognizeResponse\022!\n\005error\030\001 \001(" + + "\0132\022.google.rpc.Status\022C\n\007results\030\002 \003(\01322" + + ".google.cloud.speech.v1.StreamingRecogni" + + "tionResult\022]\n\021speech_event_type\030\004 \001(\0162B." + + "google.cloud.speech.v1.StreamingRecogniz" + + "eResponse.SpeechEventType\"L\n\017SpeechEvent" + + "Type\022\034\n\030SPEECH_EVENT_UNSPECIFIED\020\000\022\033\n\027EN" + + "D_OF_SINGLE_UTTERANCE\020\001\"\242\001\n\032StreamingRec" + + "ognitionResult\022J\n\014alternatives\030\001 \003(\01324.g" + + "oogle.cloud.speech.v1.SpeechRecognitionA" + + "lternative\022\020\n\010is_final\030\002 \001(\010\022\021\n\tstabilit" + + "y\030\003 \001(\002\022\023\n\013channel_tag\030\005 \001(\005\"z\n\027SpeechRe" + + "cognitionResult\022J\n\014alternatives\030\001 \003(\01324." + + "google.cloud.speech.v1.SpeechRecognition" + + "Alternative\022\023\n\013channel_tag\030\002 \001(\005\"w\n\034Spee" + + "chRecognitionAlternative\022\022\n\ntranscript\030\001" + + " \001(\t\022\022\n\nconfidence\030\002 \001(\002\022/\n\005words\030\003 \003(\0132" + + " .google.cloud.speech.v1.WordInfo\"t\n\010Wor" + + "dInfo\022-\n\nstart_time\030\001 \001(\0132\031.google.proto" + + "buf.Duration\022+\n\010end_time\030\002 \001(\0132\031.google." + + "protobuf.Duration\022\014\n\004word\030\003 \001(\t2\251\003\n\006Spee" + + "ch\022\201\001\n\tRecognize\022(.google.cloud.speech.v" + + "1.RecognizeRequest\032).google.cloud.speech" + + ".v1.RecognizeResponse\"\037\202\323\344\223\002\031\"\024/v1/speec" + + "h:recognize:\001*\022\226\001\n\024LongRunningRecognize\022" + + "3.google.cloud.speech.v1.LongRunningReco" + + "gnizeRequest\032\035.google.longrunning.Operat" + + "ion\"*\202\323\344\223\002$\"\037/v1/speech:longrunningrecog" + + "nize:\001*\022\201\001\n\022StreamingRecognize\0221.google." + + "cloud.speech.v1.StreamingRecognizeReques" + + "t\0322.google.cloud.speech.v1.StreamingReco" + + "gnizeResponse\"\000(\0010\001Bl\n\032com.google.cloud." + + "speech.v1B\013SpeechProtoP\001Z * Output only. A list of word-specific information for each recognized word. + * Note: When `enable_speaker_diarization` is true, you will see all the words + * from the beginning of the audio. * * * repeated .google.cloud.speech.v1.WordInfo words = 3; @@ -196,6 +198,8 @@ public java.util.List getWordsList() { * *
    * Output only. A list of word-specific information for each recognized word.
+   * Note: When `enable_speaker_diarization` is true, you will see all the words
+   * from the beginning of the audio.
    * 
* * repeated .google.cloud.speech.v1.WordInfo words = 3; @@ -209,6 +213,8 @@ public java.util.List getWordsList() { * *
    * Output only. A list of word-specific information for each recognized word.
+   * Note: When `enable_speaker_diarization` is true, you will see all the words
+   * from the beginning of the audio.
    * 
* * repeated .google.cloud.speech.v1.WordInfo words = 3; @@ -221,6 +227,8 @@ public int getWordsCount() { * *
    * Output only. A list of word-specific information for each recognized word.
+   * Note: When `enable_speaker_diarization` is true, you will see all the words
+   * from the beginning of the audio.
    * 
* * repeated .google.cloud.speech.v1.WordInfo words = 3; @@ -233,6 +241,8 @@ public com.google.cloud.speech.v1.WordInfo getWords(int index) { * *
    * Output only. A list of word-specific information for each recognized word.
+   * Note: When `enable_speaker_diarization` is true, you will see all the words
+   * from the beginning of the audio.
    * 
* * repeated .google.cloud.speech.v1.WordInfo words = 3; @@ -816,6 +826,8 @@ private void ensureWordsIsMutable() { * *
      * Output only. A list of word-specific information for each recognized word.
+     * Note: When `enable_speaker_diarization` is true, you will see all the words
+     * from the beginning of the audio.
      * 
* * repeated .google.cloud.speech.v1.WordInfo words = 3; @@ -832,6 +844,8 @@ public java.util.List getWordsList() { * *
      * Output only. A list of word-specific information for each recognized word.
+     * Note: When `enable_speaker_diarization` is true, you will see all the words
+     * from the beginning of the audio.
      * 
* * repeated .google.cloud.speech.v1.WordInfo words = 3; @@ -848,6 +862,8 @@ public int getWordsCount() { * *
      * Output only. A list of word-specific information for each recognized word.
+     * Note: When `enable_speaker_diarization` is true, you will see all the words
+     * from the beginning of the audio.
      * 
* * repeated .google.cloud.speech.v1.WordInfo words = 3; @@ -864,6 +880,8 @@ public com.google.cloud.speech.v1.WordInfo getWords(int index) { * *
      * Output only. A list of word-specific information for each recognized word.
+     * Note: When `enable_speaker_diarization` is true, you will see all the words
+     * from the beginning of the audio.
      * 
* * repeated .google.cloud.speech.v1.WordInfo words = 3; @@ -886,6 +904,8 @@ public Builder setWords(int index, com.google.cloud.speech.v1.WordInfo value) { * *
      * Output only. A list of word-specific information for each recognized word.
+     * Note: When `enable_speaker_diarization` is true, you will see all the words
+     * from the beginning of the audio.
      * 
* * repeated .google.cloud.speech.v1.WordInfo words = 3; @@ -906,6 +926,8 @@ public Builder setWords( * *
      * Output only. A list of word-specific information for each recognized word.
+     * Note: When `enable_speaker_diarization` is true, you will see all the words
+     * from the beginning of the audio.
      * 
* * repeated .google.cloud.speech.v1.WordInfo words = 3; @@ -928,6 +950,8 @@ public Builder addWords(com.google.cloud.speech.v1.WordInfo value) { * *
      * Output only. A list of word-specific information for each recognized word.
+     * Note: When `enable_speaker_diarization` is true, you will see all the words
+     * from the beginning of the audio.
      * 
* * repeated .google.cloud.speech.v1.WordInfo words = 3; @@ -950,6 +974,8 @@ public Builder addWords(int index, com.google.cloud.speech.v1.WordInfo value) { * *
      * Output only. A list of word-specific information for each recognized word.
+     * Note: When `enable_speaker_diarization` is true, you will see all the words
+     * from the beginning of the audio.
      * 
* * repeated .google.cloud.speech.v1.WordInfo words = 3; @@ -969,6 +995,8 @@ public Builder addWords(com.google.cloud.speech.v1.WordInfo.Builder builderForVa * *
      * Output only. A list of word-specific information for each recognized word.
+     * Note: When `enable_speaker_diarization` is true, you will see all the words
+     * from the beginning of the audio.
      * 
* * repeated .google.cloud.speech.v1.WordInfo words = 3; @@ -989,6 +1017,8 @@ public Builder addWords( * *
      * Output only. A list of word-specific information for each recognized word.
+     * Note: When `enable_speaker_diarization` is true, you will see all the words
+     * from the beginning of the audio.
      * 
* * repeated .google.cloud.speech.v1.WordInfo words = 3; @@ -1009,6 +1039,8 @@ public Builder addAllWords( * *
      * Output only. A list of word-specific information for each recognized word.
+     * Note: When `enable_speaker_diarization` is true, you will see all the words
+     * from the beginning of the audio.
      * 
* * repeated .google.cloud.speech.v1.WordInfo words = 3; @@ -1028,6 +1060,8 @@ public Builder clearWords() { * *
      * Output only. A list of word-specific information for each recognized word.
+     * Note: When `enable_speaker_diarization` is true, you will see all the words
+     * from the beginning of the audio.
      * 
* * repeated .google.cloud.speech.v1.WordInfo words = 3; @@ -1047,6 +1081,8 @@ public Builder removeWords(int index) { * *
      * Output only. A list of word-specific information for each recognized word.
+     * Note: When `enable_speaker_diarization` is true, you will see all the words
+     * from the beginning of the audio.
      * 
* * repeated .google.cloud.speech.v1.WordInfo words = 3; @@ -1059,6 +1095,8 @@ public com.google.cloud.speech.v1.WordInfo.Builder getWordsBuilder(int index) { * *
      * Output only. A list of word-specific information for each recognized word.
+     * Note: When `enable_speaker_diarization` is true, you will see all the words
+     * from the beginning of the audio.
      * 
* * repeated .google.cloud.speech.v1.WordInfo words = 3; @@ -1075,6 +1113,8 @@ public com.google.cloud.speech.v1.WordInfoOrBuilder getWordsOrBuilder(int index) * *
      * Output only. A list of word-specific information for each recognized word.
+     * Note: When `enable_speaker_diarization` is true, you will see all the words
+     * from the beginning of the audio.
      * 
* * repeated .google.cloud.speech.v1.WordInfo words = 3; @@ -1092,6 +1132,8 @@ public com.google.cloud.speech.v1.WordInfoOrBuilder getWordsOrBuilder(int index) * *
      * Output only. A list of word-specific information for each recognized word.
+     * Note: When `enable_speaker_diarization` is true, you will see all the words
+     * from the beginning of the audio.
      * 
* * repeated .google.cloud.speech.v1.WordInfo words = 3; @@ -1105,6 +1147,8 @@ public com.google.cloud.speech.v1.WordInfo.Builder addWordsBuilder() { * *
      * Output only. A list of word-specific information for each recognized word.
+     * Note: When `enable_speaker_diarization` is true, you will see all the words
+     * from the beginning of the audio.
      * 
* * repeated .google.cloud.speech.v1.WordInfo words = 3; @@ -1118,6 +1162,8 @@ public com.google.cloud.speech.v1.WordInfo.Builder addWordsBuilder(int index) { * *
      * Output only. A list of word-specific information for each recognized word.
+     * Note: When `enable_speaker_diarization` is true, you will see all the words
+     * from the beginning of the audio.
      * 
* * repeated .google.cloud.speech.v1.WordInfo words = 3; diff --git a/google-api-grpc/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/SpeechRecognitionAlternativeOrBuilder.java b/google-api-grpc/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/SpeechRecognitionAlternativeOrBuilder.java index ac3fed75f9ae..8798ff4f991d 100644 --- a/google-api-grpc/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/SpeechRecognitionAlternativeOrBuilder.java +++ b/google-api-grpc/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/SpeechRecognitionAlternativeOrBuilder.java @@ -51,6 +51,8 @@ public interface SpeechRecognitionAlternativeOrBuilder * *
    * Output only. A list of word-specific information for each recognized word.
+   * Note: When `enable_speaker_diarization` is true, you will see all the words
+   * from the beginning of the audio.
    * 
* * repeated .google.cloud.speech.v1.WordInfo words = 3; @@ -61,6 +63,8 @@ public interface SpeechRecognitionAlternativeOrBuilder * *
    * Output only. A list of word-specific information for each recognized word.
+   * Note: When `enable_speaker_diarization` is true, you will see all the words
+   * from the beginning of the audio.
    * 
* * repeated .google.cloud.speech.v1.WordInfo words = 3; @@ -71,6 +75,8 @@ public interface SpeechRecognitionAlternativeOrBuilder * *
    * Output only. A list of word-specific information for each recognized word.
+   * Note: When `enable_speaker_diarization` is true, you will see all the words
+   * from the beginning of the audio.
    * 
* * repeated .google.cloud.speech.v1.WordInfo words = 3; @@ -81,6 +87,8 @@ public interface SpeechRecognitionAlternativeOrBuilder * *
    * Output only. A list of word-specific information for each recognized word.
+   * Note: When `enable_speaker_diarization` is true, you will see all the words
+   * from the beginning of the audio.
    * 
* * repeated .google.cloud.speech.v1.WordInfo words = 3; @@ -91,6 +99,8 @@ public interface SpeechRecognitionAlternativeOrBuilder * *
    * Output only. A list of word-specific information for each recognized word.
+   * Note: When `enable_speaker_diarization` is true, you will see all the words
+   * from the beginning of the audio.
    * 
* * repeated .google.cloud.speech.v1.WordInfo words = 3; diff --git a/google-api-grpc/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/SpeechRecognitionResult.java b/google-api-grpc/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/SpeechRecognitionResult.java index 307fad4846ae..ecccdf0d1b61 100644 --- a/google-api-grpc/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/SpeechRecognitionResult.java +++ b/google-api-grpc/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/SpeechRecognitionResult.java @@ -24,6 +24,7 @@ private SpeechRecognitionResult(com.google.protobuf.GeneratedMessageV3.Builder alternatives_; /** @@ -182,6 +189,23 @@ public com.google.cloud.speech.v1.SpeechRecognitionAlternativeOrBuilder getAlter return alternatives_.get(index); } + public static final int CHANNEL_TAG_FIELD_NUMBER = 2; + private int channelTag_; + /** + * + * + *
+   * For multi-channel audio, this is the channel number corresponding to the
+   * recognized result for the audio from that channel.
+   * For audio_channel_count = N, its output values can range from '1' to 'N'.
+   * 
+ * + * int32 channel_tag = 2; + */ + public int getChannelTag() { + return channelTag_; + } + private byte memoizedIsInitialized = -1; @java.lang.Override @@ -199,6 +223,9 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io for (int i = 0; i < alternatives_.size(); i++) { output.writeMessage(1, alternatives_.get(i)); } + if (channelTag_ != 0) { + output.writeInt32(2, channelTag_); + } unknownFields.writeTo(output); } @@ -211,6 +238,9 @@ public int getSerializedSize() { for (int i = 0; i < alternatives_.size(); i++) { size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, alternatives_.get(i)); } + if (channelTag_ != 0) { + size += com.google.protobuf.CodedOutputStream.computeInt32Size(2, channelTag_); + } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; @@ -229,6 +259,7 @@ public boolean equals(final java.lang.Object obj) { boolean result = true; result = result && getAlternativesList().equals(other.getAlternativesList()); + result = result && (getChannelTag() == other.getChannelTag()); result = result && unknownFields.equals(other.unknownFields); return result; } @@ -244,6 +275,8 @@ public int hashCode() { hash = (37 * hash) + ALTERNATIVES_FIELD_NUMBER; hash = (53 * hash) + getAlternativesList().hashCode(); } + hash = (37 * hash) + CHANNEL_TAG_FIELD_NUMBER; + hash = (53 * hash) + getChannelTag(); hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; @@ -397,6 +430,8 @@ public Builder clear() { } else { alternativesBuilder_.clear(); } + channelTag_ = 0; + return this; } @@ -425,6 +460,7 @@ public com.google.cloud.speech.v1.SpeechRecognitionResult buildPartial() { com.google.cloud.speech.v1.SpeechRecognitionResult result = new com.google.cloud.speech.v1.SpeechRecognitionResult(this); int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; if (alternativesBuilder_ == null) { if (((bitField0_ & 0x00000001) == 0x00000001)) { alternatives_ = java.util.Collections.unmodifiableList(alternatives_); @@ -434,6 +470,8 @@ public com.google.cloud.speech.v1.SpeechRecognitionResult buildPartial() { } else { result.alternatives_ = alternativesBuilder_.build(); } + result.channelTag_ = channelTag_; + result.bitField0_ = to_bitField0_; onBuilt(); return result; } @@ -511,6 +549,9 @@ public Builder mergeFrom(com.google.cloud.speech.v1.SpeechRecognitionResult othe } } } + if (other.getChannelTag() != 0) { + setChannelTag(other.getChannelTag()); + } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; @@ -964,6 +1005,56 @@ public com.google.cloud.speech.v1.SpeechRecognitionAlternative.Builder addAltern return alternativesBuilder_; } + private int channelTag_; + /** + * + * + *
+     * For multi-channel audio, this is the channel number corresponding to the
+     * recognized result for the audio from that channel.
+     * For audio_channel_count = N, its output values can range from '1' to 'N'.
+     * 
+ * + * int32 channel_tag = 2; + */ + public int getChannelTag() { + return channelTag_; + } + /** + * + * + *
+     * For multi-channel audio, this is the channel number corresponding to the
+     * recognized result for the audio from that channel.
+     * For audio_channel_count = N, its output values can range from '1' to 'N'.
+     * 
+ * + * int32 channel_tag = 2; + */ + public Builder setChannelTag(int value) { + + channelTag_ = value; + onChanged(); + return this; + } + /** + * + * + *
+     * For multi-channel audio, this is the channel number corresponding to the
+     * recognized result for the audio from that channel.
+     * For audio_channel_count = N, its output values can range from '1' to 'N'.
+     * 
+ * + * int32 channel_tag = 2; + */ + public Builder clearChannelTag() { + + channelTag_ = 0; + onChanged(); + return this; + } + @java.lang.Override public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFieldsProto3(unknownFields); diff --git a/google-api-grpc/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/SpeechRecognitionResultOrBuilder.java b/google-api-grpc/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/SpeechRecognitionResultOrBuilder.java index f5a6b729930d..bde2b700b595 100644 --- a/google-api-grpc/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/SpeechRecognitionResultOrBuilder.java +++ b/google-api-grpc/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/SpeechRecognitionResultOrBuilder.java @@ -75,4 +75,17 @@ public interface SpeechRecognitionResultOrBuilder */ com.google.cloud.speech.v1.SpeechRecognitionAlternativeOrBuilder getAlternativesOrBuilder( int index); + + /** + * + * + *
+   * For multi-channel audio, this is the channel number corresponding to the
+   * recognized result for the audio from that channel.
+   * For audio_channel_count = N, its output values can range from '1' to 'N'.
+   * 
+ * + * int32 channel_tag = 2; + */ + int getChannelTag(); } diff --git a/google-api-grpc/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/StreamingRecognitionResult.java b/google-api-grpc/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/StreamingRecognitionResult.java index a27578f77897..70d70fa14659 100644 --- a/google-api-grpc/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/StreamingRecognitionResult.java +++ b/google-api-grpc/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/StreamingRecognitionResult.java @@ -27,6 +27,7 @@ private StreamingRecognitionResult() { alternatives_ = java.util.Collections.emptyList(); isFinal_ = false; stability_ = 0F; + channelTag_ = 0; } @java.lang.Override @@ -77,6 +78,11 @@ private StreamingRecognitionResult( stability_ = input.readFloat(); break; } + case 40: + { + channelTag_ = input.readInt32(); + break; + } default: { if (!parseUnknownFieldProto3(input, unknownFields, extensionRegistry, tag)) { @@ -234,6 +240,23 @@ public float getStability() { return stability_; } + public static final int CHANNEL_TAG_FIELD_NUMBER = 5; + private int channelTag_; + /** + * + * + *
+   * For multi-channel audio, this is the channel number corresponding to the
+   * recognized result for the audio from that channel.
+   * For audio_channel_count = N, its output values can range from '1' to 'N'.
+   * 
+ * + * int32 channel_tag = 5; + */ + public int getChannelTag() { + return channelTag_; + } + private byte memoizedIsInitialized = -1; @java.lang.Override @@ -257,6 +280,9 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io if (stability_ != 0F) { output.writeFloat(3, stability_); } + if (channelTag_ != 0) { + output.writeInt32(5, channelTag_); + } unknownFields.writeTo(output); } @@ -275,6 +301,9 @@ public int getSerializedSize() { if (stability_ != 0F) { size += com.google.protobuf.CodedOutputStream.computeFloatSize(3, stability_); } + if (channelTag_ != 0) { + size += com.google.protobuf.CodedOutputStream.computeInt32Size(5, channelTag_); + } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; @@ -298,6 +327,7 @@ public boolean equals(final java.lang.Object obj) { result && (java.lang.Float.floatToIntBits(getStability()) == java.lang.Float.floatToIntBits(other.getStability())); + result = result && (getChannelTag() == other.getChannelTag()); result = result && unknownFields.equals(other.unknownFields); return result; } @@ -317,6 +347,8 @@ public int hashCode() { hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getIsFinal()); hash = (37 * hash) + STABILITY_FIELD_NUMBER; hash = (53 * hash) + java.lang.Float.floatToIntBits(getStability()); + hash = (37 * hash) + CHANNEL_TAG_FIELD_NUMBER; + hash = (53 * hash) + getChannelTag(); hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; @@ -476,6 +508,8 @@ public Builder clear() { stability_ = 0F; + channelTag_ = 0; + return this; } @@ -516,6 +550,7 @@ public com.google.cloud.speech.v1.StreamingRecognitionResult buildPartial() { } result.isFinal_ = isFinal_; result.stability_ = stability_; + result.channelTag_ = channelTag_; result.bitField0_ = to_bitField0_; onBuilt(); return result; @@ -600,6 +635,9 @@ public Builder mergeFrom(com.google.cloud.speech.v1.StreamingRecognitionResult o if (other.getStability() != 0F) { setStability(other.getStability()); } + if (other.getChannelTag() != 0) { + setChannelTag(other.getChannelTag()); + } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; @@ -1165,6 +1203,56 @@ public Builder clearStability() { return this; } + private int channelTag_; + /** + * + * + *
+     * For multi-channel audio, this is the channel number corresponding to the
+     * recognized result for the audio from that channel.
+     * For audio_channel_count = N, its output values can range from '1' to 'N'.
+     * 
+ * + * int32 channel_tag = 5; + */ + public int getChannelTag() { + return channelTag_; + } + /** + * + * + *
+     * For multi-channel audio, this is the channel number corresponding to the
+     * recognized result for the audio from that channel.
+     * For audio_channel_count = N, its output values can range from '1' to 'N'.
+     * 
+ * + * int32 channel_tag = 5; + */ + public Builder setChannelTag(int value) { + + channelTag_ = value; + onChanged(); + return this; + } + /** + * + * + *
+     * For multi-channel audio, this is the channel number corresponding to the
+     * recognized result for the audio from that channel.
+     * For audio_channel_count = N, its output values can range from '1' to 'N'.
+     * 
+ * + * int32 channel_tag = 5; + */ + public Builder clearChannelTag() { + + channelTag_ = 0; + onChanged(); + return this; + } + @java.lang.Override public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFieldsProto3(unknownFields); diff --git a/google-api-grpc/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/StreamingRecognitionResultOrBuilder.java b/google-api-grpc/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/StreamingRecognitionResultOrBuilder.java index 1420c3a4136c..ed6952d4bfb6 100644 --- a/google-api-grpc/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/StreamingRecognitionResultOrBuilder.java +++ b/google-api-grpc/proto-google-cloud-speech-v1/src/main/java/com/google/cloud/speech/v1/StreamingRecognitionResultOrBuilder.java @@ -105,4 +105,17 @@ com.google.cloud.speech.v1.SpeechRecognitionAlternativeOrBuilder getAlternatives * float stability = 3; */ float getStability(); + + /** + * + * + *
+   * For multi-channel audio, this is the channel number corresponding to the
+   * recognized result for the audio from that channel.
+   * For audio_channel_count = N, its output values can range from '1' to 'N'.
+   * 
+ * + * int32 channel_tag = 5; + */ + int getChannelTag(); } diff --git a/google-api-grpc/proto-google-cloud-speech-v1/src/main/proto/google/cloud/speech/v1/cloud_speech.proto b/google-api-grpc/proto-google-cloud-speech-v1/src/main/proto/google/cloud/speech/v1/cloud_speech.proto index 001d54b3c17c..fe8e774806d7 100644 --- a/google-api-grpc/proto-google-cloud-speech-v1/src/main/proto/google/cloud/speech/v1/cloud_speech.proto +++ b/google-api-grpc/proto-google-cloud-speech-v1/src/main/proto/google/cloud/speech/v1/cloud_speech.proto @@ -215,6 +215,14 @@ message RecognitionConfig { // for all other audio formats. For details, see [AudioEncoding][google.cloud.speech.v1.RecognitionConfig.AudioEncoding]. int32 sample_rate_hertz = 2; + // This needs to be set to ‘true’ explicitly and `audio_channel_count` > 1 + // to get each channel recognized separately. The recognition result will + // contain a `channel_tag` field to state which channel that result belongs + // to. If this is not true, we will only recognize the first channel. The + // request is billed cumulatively for all channels recognized: + // `audio_channel_count` multiplied by the length of the audio. + bool enable_separate_recognition_per_channel = 12; + // *Required* The language of the supplied audio as a // [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag. // Example: "en-US". @@ -291,16 +299,20 @@ message RecognitionConfig { string model = 13; // *Optional* Set to true to use an enhanced model for speech recognition. - // You must also set the `model` field to a valid, enhanced model. If - // `use_enhanced` is set to true and the `model` field is not set, then - // `use_enhanced` is ignored. If `use_enhanced` is true and an enhanced - // version of the specified model does not exist, then the speech is - // recognized using the standard version of the specified model. + // If `use_enhanced` is set to true and the `model` field is not set, then + // an appropriate enhanced model is chosen if: + // 1. project is eligible for requesting enhanced models + // 2. an enhanced model exists for the audio + // + // If `use_enhanced` is true and an enhanced version of the specified model + // does not exist, then the speech is recognized using the standard version + // of the specified model. // // Enhanced speech models require that you opt-in to data logging using - // instructions in the [documentation](/speech-to-text/enable-data-logging). - // If you set `use_enhanced` to true and you have not enabled audio logging, - // then you will receive an error. + // instructions in the + // [documentation](/speech-to-text/docs/enable-data-logging). If you set + // `use_enhanced` to true and you have not enabled audio logging, then you + // will receive an error. bool use_enhanced = 14; } @@ -476,6 +488,11 @@ message StreamingRecognitionResult { // This field is only provided for interim results (`is_final=false`). // The default of 0.0 is a sentinel value indicating `stability` was not set. float stability = 3; + + // For multi-channel audio, this is the channel number corresponding to the + // recognized result for the audio from that channel. + // For audio_channel_count = N, its output values can range from '1' to 'N'. + int32 channel_tag = 5; } // A speech recognition result corresponding to a portion of the audio. @@ -485,6 +502,11 @@ message SpeechRecognitionResult { // These alternatives are ordered in terms of accuracy, with the top (first) // alternative being the most probable, as ranked by the recognizer. repeated SpeechRecognitionAlternative alternatives = 1; + + // For multi-channel audio, this is the channel number corresponding to the + // recognized result for the audio from that channel. + // For audio_channel_count = N, its output values can range from '1' to 'N'. + int32 channel_tag = 2; } // Alternative hypotheses (a.k.a. n-best list). @@ -502,6 +524,8 @@ message SpeechRecognitionAlternative { float confidence = 2; // Output only. A list of word-specific information for each recognized word. + // Note: When `enable_speaker_diarization` is true, you will see all the words + // from the beginning of the audio. repeated WordInfo words = 3; } diff --git a/google-cloud-clients/google-cloud-speech/synth.metadata b/google-cloud-clients/google-cloud-speech/synth.metadata index 2bd4f07a69a5..3be29e90377f 100644 --- a/google-cloud-clients/google-cloud-speech/synth.metadata +++ b/google-cloud-clients/google-cloud-speech/synth.metadata @@ -1,27 +1,42 @@ { + "updateTime": "2019-01-10T00:31:48.503770Z", "sources": [ + { + "generator": { + "name": "artman", + "version": "0.16.5", + "dockerImage": "googleapis/artman@sha256:5a96c2c5c6f9570cc9556b63dc9ce1838777fd9166b5b64e43ad8e0ecee2fe2c" + } + }, { "git": { "name": "googleapis", "remote": "https://github.com/googleapis/googleapis.git", - "sha": "5a57f0c13a358b2b15452bf2d67453774a5f6d4f", - "internalRef": "221837528" + "sha": "deeb0a29ecc3e746c6fd82f72117360c4ef984cf", + "internalRef": "228610851" } - }, + } + ], + "destinations": [ { - "git": { - "name": "googleapis-private", - "remote": "https://github.com/googleapis/googleapis-private.git", - "sha": "6aa8e1a447bb8d0367150356a28cb4d3f2332641", - "internalRef": "221340946" + "client": { + "source": "googleapis", + "apiName": "speech", + "apiVersion": "v1", + "language": "java", + "generator": "gapic", + "config": "google/cloud/speech/artman_speech_v1.yaml" } }, { - "generator": { - "name": "artman", - "version": "0.16.0", - "dockerImage": "googleapis/artman@sha256:90f9d15e9bad675aeecd586725bce48f5667ffe7d5fc4d1e96d51ff34304815b" + "client": { + "source": "googleapis", + "apiName": "speech", + "apiVersion": "v1p1beta1", + "language": "java", + "generator": "gapic", + "config": "google/cloud/speech/artman_speech_v1p1beta1.yaml" } } ] -} +} \ No newline at end of file