From 04e2c391a0554b61e029aa33d21172da66f17df7 Mon Sep 17 00:00:00 2001
From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com>
Date: Tue, 10 Aug 2021 07:12:00 -0700
Subject: [PATCH] feat: add total_billed_time response field (#787)
Committer: @cherba
PiperOrigin-RevId: 389755489
---
.../google/cloud/speech/v1/cloud_speech.proto | 54 ++++++---
packages/google-cloud-node/protos/protos.d.ts | 24 ++++
packages/google-cloud-node/protos/protos.js | 105 ++++++++++++++++++
packages/google-cloud-node/protos/protos.json | 23 +++-
4 files changed, 188 insertions(+), 18 deletions(-)
diff --git a/packages/google-cloud-node/protos/google/cloud/speech/v1/cloud_speech.proto b/packages/google-cloud-node/protos/google/cloud/speech/v1/cloud_speech.proto
index f343fa21da85..495cb6e66c36 100644
--- a/packages/google-cloud-node/protos/google/cloud/speech/v1/cloud_speech.proto
+++ b/packages/google-cloud-node/protos/google/cloud/speech/v1/cloud_speech.proto
@@ -1,4 +1,4 @@
-// Copyright 2019 Google LLC.
+// Copyright 2021 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -11,7 +11,6 @@
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
-//
syntax = "proto3";
@@ -24,6 +23,7 @@ import "google/longrunning/operations.proto";
import "google/protobuf/any.proto";
import "google/protobuf/duration.proto";
import "google/protobuf/timestamp.proto";
+import "google/protobuf/wrappers.proto";
import "google/rpc/status.proto";
option cc_enable_arenas = true;
@@ -136,6 +136,16 @@ message StreamingRecognitionConfig {
// `END_OF_SINGLE_UTTERANCE` event and cease recognition. It will return no
// more than one `StreamingRecognitionResult` with the `is_final` flag set to
// `true`.
+ //
+ // The `single_utterance` field can only be used with specified models,
+ // otherwise an error is thrown. The `model` field in [`RecognitionConfig`][]
+ // must be set to:
+ //
+ // * `command_and_search`
+ // * `phone_call` AND additional field `useEnhanced`=`true`
+ // * The `model` field is left undefined. In this case the API auto-selects
+ // a model based on any other parameters that you set in
+ // `RecognitionConfig`.
bool single_utterance = 2;
// If `true`, interim results (tentative hypotheses) may be
@@ -158,7 +168,7 @@ message RecognitionConfig {
// a lossless encoding (`FLAC` or `LINEAR16`). The accuracy of the speech
// recognition can be reduced if lossy codecs are used to capture or transmit
// audio, particularly if background noise is present. Lossy codecs include
- // `MULAW`, `AMR`, `AMR_WB`, `OGG_OPUS`, `SPEEX_WITH_HEADER_BYTE`, and `MP3`.
+ // `MULAW`, `AMR`, `AMR_WB`, `OGG_OPUS`, `SPEEX_WITH_HEADER_BYTE`, `MP3`.
//
// The `FLAC` and `WAV` audio file formats include a header that describes the
// included audio content. You can request recognition for `WAV` files that
@@ -274,7 +284,7 @@ message RecognitionConfig {
// A means to provide context to assist the speech recognition. For more
// information, see
// [speech
- // adaptation](https://cloud.google.com/speech-to-text/docs/context-strength).
+ // adaptation](https://cloud.google.com/speech-to-text/docs/adaptation).
repeated SpeechContext speech_contexts = 6;
// If `true`, the top result includes a list of words and
@@ -287,9 +297,6 @@ message RecognitionConfig {
// This feature is only available in select languages. Setting this for
// requests in other languages has no effect at all.
// The default 'false' value does not add punctuation to result hypotheses.
- // Note: This is currently offered as an experimental service, complimentary
- // to all users. In the future this may be exclusively available as a
- // premium feature.
bool enable_automatic_punctuation = 11;
// Config to enable speaker diarization and set additional
@@ -325,7 +332,7 @@ message RecognitionConfig {
//
//
// video |
- // Best for audio that originated from from video or includes multiple
+ // | Best for audio that originated from video or includes multiple
// speakers. Ideally the audio is recorded at a 16khz or greater
// sampling rate. This is a premium model that costs more than the
// standard rate. |
@@ -367,9 +374,11 @@ message SpeakerDiarizationConfig {
// number of speakers. If not set, the default value is 6.
int32 max_speaker_count = 3;
- // Unused.
- int32 speaker_tag = 5
- [(google.api.field_behavior) = OUTPUT_ONLY, deprecated = true];
+ // Output only. Unused.
+ int32 speaker_tag = 5 [
+ deprecated = true,
+ (google.api.field_behavior) = OUTPUT_ONLY
+ ];
}
// Description of audio data to be recognized.
@@ -548,6 +557,9 @@ message RecognizeResponse {
// Sequential list of transcription results corresponding to
// sequential portions of audio.
repeated SpeechRecognitionResult results = 2;
+
+ // When available, billed audio seconds for the corresponding request.
+ google.protobuf.Duration total_billed_time = 3;
}
// The only message returned to the client by the `LongRunningRecognize` method.
@@ -559,6 +571,9 @@ message LongRunningRecognizeResponse {
// Sequential list of transcription results corresponding to
// sequential portions of audio.
repeated SpeechRecognitionResult results = 2;
+
+ // When available, billed audio seconds for the corresponding request.
+ google.protobuf.Duration total_billed_time = 3;
}
// Describes the progress of a long-running `LongRunningRecognize` call. It is
@@ -574,6 +589,10 @@ message LongRunningRecognizeMetadata {
// Time of the most recent processing update.
google.protobuf.Timestamp last_update_time = 3;
+
+ // Output only. The URI of the audio file being transcribed. Empty if the audio was sent
+ // as byte content.
+ string uri = 4 [(google.api.field_behavior) = OUTPUT_ONLY];
}
// `StreamingRecognizeResponse` is the only message returned to the client by
@@ -582,8 +601,8 @@ message LongRunningRecognizeMetadata {
// audio, and `single_utterance` is set to false, then no messages are streamed
// back to the client.
//
-// Here's an example of a series of ten `StreamingRecognizeResponse`s that might
-// be returned while processing audio:
+// Here's an example of a series of `StreamingRecognizeResponse`s that might be
+// returned while processing audio:
//
// 1. results { alternatives { transcript: "tube" } stability: 0.01 }
//
@@ -653,6 +672,10 @@ message StreamingRecognizeResponse {
// Indicates the type of speech event.
SpeechEventType speech_event_type = 4;
+
+ // When available, billed audio seconds for the stream.
+ // Set only if this is the last response in the stream.
+ google.protobuf.Duration total_billed_time = 5;
}
// A streaming speech recognition result corresponding to a portion of the audio
@@ -749,11 +772,10 @@ message WordInfo {
// The word corresponding to this set of information.
string word = 3;
- // A distinct integer value is assigned for every speaker within
+ // Output only. A distinct integer value is assigned for every speaker within
// the audio. This field specifies which one of those speakers was detected to
// have spoken this word. Value ranges from '1' to diarization_speaker_count.
// speaker_tag is set if enable_speaker_diarization = 'true' and only in the
// top alternative.
- int32 speaker_tag = 5
- [(google.api.field_behavior) = OUTPUT_ONLY];
+ int32 speaker_tag = 5 [(google.api.field_behavior) = OUTPUT_ONLY];
}
diff --git a/packages/google-cloud-node/protos/protos.d.ts b/packages/google-cloud-node/protos/protos.d.ts
index 2e838da572a1..9ec2503bc2e5 100644
--- a/packages/google-cloud-node/protos/protos.d.ts
+++ b/packages/google-cloud-node/protos/protos.d.ts
@@ -1165,6 +1165,9 @@ export namespace google {
/** RecognizeResponse results */
results?: (google.cloud.speech.v1.ISpeechRecognitionResult[]|null);
+
+ /** RecognizeResponse totalBilledTime */
+ totalBilledTime?: (google.protobuf.IDuration|null);
}
/** Represents a RecognizeResponse. */
@@ -1179,6 +1182,9 @@ export namespace google {
/** RecognizeResponse results. */
public results: google.cloud.speech.v1.ISpeechRecognitionResult[];
+ /** RecognizeResponse totalBilledTime. */
+ public totalBilledTime?: (google.protobuf.IDuration|null);
+
/**
* Creates a new RecognizeResponse instance using the specified properties.
* @param [properties] Properties to set
@@ -1255,6 +1261,9 @@ export namespace google {
/** LongRunningRecognizeResponse results */
results?: (google.cloud.speech.v1.ISpeechRecognitionResult[]|null);
+
+ /** LongRunningRecognizeResponse totalBilledTime */
+ totalBilledTime?: (google.protobuf.IDuration|null);
}
/** Represents a LongRunningRecognizeResponse. */
@@ -1269,6 +1278,9 @@ export namespace google {
/** LongRunningRecognizeResponse results. */
public results: google.cloud.speech.v1.ISpeechRecognitionResult[];
+ /** LongRunningRecognizeResponse totalBilledTime. */
+ public totalBilledTime?: (google.protobuf.IDuration|null);
+
/**
* Creates a new LongRunningRecognizeResponse instance using the specified properties.
* @param [properties] Properties to set
@@ -1351,6 +1363,9 @@ export namespace google {
/** LongRunningRecognizeMetadata lastUpdateTime */
lastUpdateTime?: (google.protobuf.ITimestamp|null);
+
+ /** LongRunningRecognizeMetadata uri */
+ uri?: (string|null);
}
/** Represents a LongRunningRecognizeMetadata. */
@@ -1371,6 +1386,9 @@ export namespace google {
/** LongRunningRecognizeMetadata lastUpdateTime. */
public lastUpdateTime?: (google.protobuf.ITimestamp|null);
+ /** LongRunningRecognizeMetadata uri. */
+ public uri: string;
+
/**
* Creates a new LongRunningRecognizeMetadata instance using the specified properties.
* @param [properties] Properties to set
@@ -1453,6 +1471,9 @@ export namespace google {
/** StreamingRecognizeResponse speechEventType */
speechEventType?: (google.cloud.speech.v1.StreamingRecognizeResponse.SpeechEventType|keyof typeof google.cloud.speech.v1.StreamingRecognizeResponse.SpeechEventType|null);
+
+ /** StreamingRecognizeResponse totalBilledTime */
+ totalBilledTime?: (google.protobuf.IDuration|null);
}
/** Represents a StreamingRecognizeResponse. */
@@ -1473,6 +1494,9 @@ export namespace google {
/** StreamingRecognizeResponse speechEventType. */
public speechEventType: (google.cloud.speech.v1.StreamingRecognizeResponse.SpeechEventType|keyof typeof google.cloud.speech.v1.StreamingRecognizeResponse.SpeechEventType);
+ /** StreamingRecognizeResponse totalBilledTime. */
+ public totalBilledTime?: (google.protobuf.IDuration|null);
+
/**
* Creates a new StreamingRecognizeResponse instance using the specified properties.
* @param [properties] Properties to set
diff --git a/packages/google-cloud-node/protos/protos.js b/packages/google-cloud-node/protos/protos.js
index 02cfea07abef..12acf6e4156a 100644
--- a/packages/google-cloud-node/protos/protos.js
+++ b/packages/google-cloud-node/protos/protos.js
@@ -2949,6 +2949,7 @@
* @memberof google.cloud.speech.v1
* @interface IRecognizeResponse
* @property {Array.|null} [results] RecognizeResponse results
+ * @property {google.protobuf.IDuration|null} [totalBilledTime] RecognizeResponse totalBilledTime
*/
/**
@@ -2975,6 +2976,14 @@
*/
RecognizeResponse.prototype.results = $util.emptyArray;
+ /**
+ * RecognizeResponse totalBilledTime.
+ * @member {google.protobuf.IDuration|null|undefined} totalBilledTime
+ * @memberof google.cloud.speech.v1.RecognizeResponse
+ * @instance
+ */
+ RecognizeResponse.prototype.totalBilledTime = null;
+
/**
* Creates a new RecognizeResponse instance using the specified properties.
* @function create
@@ -3002,6 +3011,8 @@
if (message.results != null && message.results.length)
for (var i = 0; i < message.results.length; ++i)
$root.google.cloud.speech.v1.SpeechRecognitionResult.encode(message.results[i], writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim();
+ if (message.totalBilledTime != null && Object.hasOwnProperty.call(message, "totalBilledTime"))
+ $root.google.protobuf.Duration.encode(message.totalBilledTime, writer.uint32(/* id 3, wireType 2 =*/26).fork()).ldelim();
return writer;
};
@@ -3041,6 +3052,9 @@
message.results = [];
message.results.push($root.google.cloud.speech.v1.SpeechRecognitionResult.decode(reader, reader.uint32()));
break;
+ case 3:
+ message.totalBilledTime = $root.google.protobuf.Duration.decode(reader, reader.uint32());
+ break;
default:
reader.skipType(tag & 7);
break;
@@ -3085,6 +3099,11 @@
return "results." + error;
}
}
+ if (message.totalBilledTime != null && message.hasOwnProperty("totalBilledTime")) {
+ var error = $root.google.protobuf.Duration.verify(message.totalBilledTime);
+ if (error)
+ return "totalBilledTime." + error;
+ }
return null;
};
@@ -3110,6 +3129,11 @@
message.results[i] = $root.google.cloud.speech.v1.SpeechRecognitionResult.fromObject(object.results[i]);
}
}
+ if (object.totalBilledTime != null) {
+ if (typeof object.totalBilledTime !== "object")
+ throw TypeError(".google.cloud.speech.v1.RecognizeResponse.totalBilledTime: object expected");
+ message.totalBilledTime = $root.google.protobuf.Duration.fromObject(object.totalBilledTime);
+ }
return message;
};
@@ -3128,11 +3152,15 @@
var object = {};
if (options.arrays || options.defaults)
object.results = [];
+ if (options.defaults)
+ object.totalBilledTime = null;
if (message.results && message.results.length) {
object.results = [];
for (var j = 0; j < message.results.length; ++j)
object.results[j] = $root.google.cloud.speech.v1.SpeechRecognitionResult.toObject(message.results[j], options);
}
+ if (message.totalBilledTime != null && message.hasOwnProperty("totalBilledTime"))
+ object.totalBilledTime = $root.google.protobuf.Duration.toObject(message.totalBilledTime, options);
return object;
};
@@ -3157,6 +3185,7 @@
* @memberof google.cloud.speech.v1
* @interface ILongRunningRecognizeResponse
* @property {Array.|null} [results] LongRunningRecognizeResponse results
+ * @property {google.protobuf.IDuration|null} [totalBilledTime] LongRunningRecognizeResponse totalBilledTime
*/
/**
@@ -3183,6 +3212,14 @@
*/
LongRunningRecognizeResponse.prototype.results = $util.emptyArray;
+ /**
+ * LongRunningRecognizeResponse totalBilledTime.
+ * @member {google.protobuf.IDuration|null|undefined} totalBilledTime
+ * @memberof google.cloud.speech.v1.LongRunningRecognizeResponse
+ * @instance
+ */
+ LongRunningRecognizeResponse.prototype.totalBilledTime = null;
+
/**
* Creates a new LongRunningRecognizeResponse instance using the specified properties.
* @function create
@@ -3210,6 +3247,8 @@
if (message.results != null && message.results.length)
for (var i = 0; i < message.results.length; ++i)
$root.google.cloud.speech.v1.SpeechRecognitionResult.encode(message.results[i], writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim();
+ if (message.totalBilledTime != null && Object.hasOwnProperty.call(message, "totalBilledTime"))
+ $root.google.protobuf.Duration.encode(message.totalBilledTime, writer.uint32(/* id 3, wireType 2 =*/26).fork()).ldelim();
return writer;
};
@@ -3249,6 +3288,9 @@
message.results = [];
message.results.push($root.google.cloud.speech.v1.SpeechRecognitionResult.decode(reader, reader.uint32()));
break;
+ case 3:
+ message.totalBilledTime = $root.google.protobuf.Duration.decode(reader, reader.uint32());
+ break;
default:
reader.skipType(tag & 7);
break;
@@ -3293,6 +3335,11 @@
return "results." + error;
}
}
+ if (message.totalBilledTime != null && message.hasOwnProperty("totalBilledTime")) {
+ var error = $root.google.protobuf.Duration.verify(message.totalBilledTime);
+ if (error)
+ return "totalBilledTime." + error;
+ }
return null;
};
@@ -3318,6 +3365,11 @@
message.results[i] = $root.google.cloud.speech.v1.SpeechRecognitionResult.fromObject(object.results[i]);
}
}
+ if (object.totalBilledTime != null) {
+ if (typeof object.totalBilledTime !== "object")
+ throw TypeError(".google.cloud.speech.v1.LongRunningRecognizeResponse.totalBilledTime: object expected");
+ message.totalBilledTime = $root.google.protobuf.Duration.fromObject(object.totalBilledTime);
+ }
return message;
};
@@ -3336,11 +3388,15 @@
var object = {};
if (options.arrays || options.defaults)
object.results = [];
+ if (options.defaults)
+ object.totalBilledTime = null;
if (message.results && message.results.length) {
object.results = [];
for (var j = 0; j < message.results.length; ++j)
object.results[j] = $root.google.cloud.speech.v1.SpeechRecognitionResult.toObject(message.results[j], options);
}
+ if (message.totalBilledTime != null && message.hasOwnProperty("totalBilledTime"))
+ object.totalBilledTime = $root.google.protobuf.Duration.toObject(message.totalBilledTime, options);
return object;
};
@@ -3367,6 +3423,7 @@
* @property {number|null} [progressPercent] LongRunningRecognizeMetadata progressPercent
* @property {google.protobuf.ITimestamp|null} [startTime] LongRunningRecognizeMetadata startTime
* @property {google.protobuf.ITimestamp|null} [lastUpdateTime] LongRunningRecognizeMetadata lastUpdateTime
+ * @property {string|null} [uri] LongRunningRecognizeMetadata uri
*/
/**
@@ -3408,6 +3465,14 @@
*/
LongRunningRecognizeMetadata.prototype.lastUpdateTime = null;
+ /**
+ * LongRunningRecognizeMetadata uri.
+ * @member {string} uri
+ * @memberof google.cloud.speech.v1.LongRunningRecognizeMetadata
+ * @instance
+ */
+ LongRunningRecognizeMetadata.prototype.uri = "";
+
/**
* Creates a new LongRunningRecognizeMetadata instance using the specified properties.
* @function create
@@ -3438,6 +3503,8 @@
$root.google.protobuf.Timestamp.encode(message.startTime, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim();
if (message.lastUpdateTime != null && Object.hasOwnProperty.call(message, "lastUpdateTime"))
$root.google.protobuf.Timestamp.encode(message.lastUpdateTime, writer.uint32(/* id 3, wireType 2 =*/26).fork()).ldelim();
+ if (message.uri != null && Object.hasOwnProperty.call(message, "uri"))
+ writer.uint32(/* id 4, wireType 2 =*/34).string(message.uri);
return writer;
};
@@ -3481,6 +3548,9 @@
case 3:
message.lastUpdateTime = $root.google.protobuf.Timestamp.decode(reader, reader.uint32());
break;
+ case 4:
+ message.uri = reader.string();
+ break;
default:
reader.skipType(tag & 7);
break;
@@ -3529,6 +3599,9 @@
if (error)
return "lastUpdateTime." + error;
}
+ if (message.uri != null && message.hasOwnProperty("uri"))
+ if (!$util.isString(message.uri))
+ return "uri: string expected";
return null;
};
@@ -3556,6 +3629,8 @@
throw TypeError(".google.cloud.speech.v1.LongRunningRecognizeMetadata.lastUpdateTime: object expected");
message.lastUpdateTime = $root.google.protobuf.Timestamp.fromObject(object.lastUpdateTime);
}
+ if (object.uri != null)
+ message.uri = String(object.uri);
return message;
};
@@ -3576,6 +3651,7 @@
object.progressPercent = 0;
object.startTime = null;
object.lastUpdateTime = null;
+ object.uri = "";
}
if (message.progressPercent != null && message.hasOwnProperty("progressPercent"))
object.progressPercent = message.progressPercent;
@@ -3583,6 +3659,8 @@
object.startTime = $root.google.protobuf.Timestamp.toObject(message.startTime, options);
if (message.lastUpdateTime != null && message.hasOwnProperty("lastUpdateTime"))
object.lastUpdateTime = $root.google.protobuf.Timestamp.toObject(message.lastUpdateTime, options);
+ if (message.uri != null && message.hasOwnProperty("uri"))
+ object.uri = message.uri;
return object;
};
@@ -3609,6 +3687,7 @@
* @property {google.rpc.IStatus|null} [error] StreamingRecognizeResponse error
* @property {Array.|null} [results] StreamingRecognizeResponse results
* @property {google.cloud.speech.v1.StreamingRecognizeResponse.SpeechEventType|null} [speechEventType] StreamingRecognizeResponse speechEventType
+ * @property {google.protobuf.IDuration|null} [totalBilledTime] StreamingRecognizeResponse totalBilledTime
*/
/**
@@ -3651,6 +3730,14 @@
*/
StreamingRecognizeResponse.prototype.speechEventType = 0;
+ /**
+ * StreamingRecognizeResponse totalBilledTime.
+ * @member {google.protobuf.IDuration|null|undefined} totalBilledTime
+ * @memberof google.cloud.speech.v1.StreamingRecognizeResponse
+ * @instance
+ */
+ StreamingRecognizeResponse.prototype.totalBilledTime = null;
+
/**
* Creates a new StreamingRecognizeResponse instance using the specified properties.
* @function create
@@ -3682,6 +3769,8 @@
$root.google.cloud.speech.v1.StreamingRecognitionResult.encode(message.results[i], writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim();
if (message.speechEventType != null && Object.hasOwnProperty.call(message, "speechEventType"))
writer.uint32(/* id 4, wireType 0 =*/32).int32(message.speechEventType);
+ if (message.totalBilledTime != null && Object.hasOwnProperty.call(message, "totalBilledTime"))
+ $root.google.protobuf.Duration.encode(message.totalBilledTime, writer.uint32(/* id 5, wireType 2 =*/42).fork()).ldelim();
return writer;
};
@@ -3727,6 +3816,9 @@
case 4:
message.speechEventType = reader.int32();
break;
+ case 5:
+ message.totalBilledTime = $root.google.protobuf.Duration.decode(reader, reader.uint32());
+ break;
default:
reader.skipType(tag & 7);
break;
@@ -3784,6 +3876,11 @@
case 1:
break;
}
+ if (message.totalBilledTime != null && message.hasOwnProperty("totalBilledTime")) {
+ var error = $root.google.protobuf.Duration.verify(message.totalBilledTime);
+ if (error)
+ return "totalBilledTime." + error;
+ }
return null;
};
@@ -3824,6 +3921,11 @@
message.speechEventType = 1;
break;
}
+ if (object.totalBilledTime != null) {
+ if (typeof object.totalBilledTime !== "object")
+ throw TypeError(".google.cloud.speech.v1.StreamingRecognizeResponse.totalBilledTime: object expected");
+ message.totalBilledTime = $root.google.protobuf.Duration.fromObject(object.totalBilledTime);
+ }
return message;
};
@@ -3845,6 +3947,7 @@
if (options.defaults) {
object.error = null;
object.speechEventType = options.enums === String ? "SPEECH_EVENT_UNSPECIFIED" : 0;
+ object.totalBilledTime = null;
}
if (message.error != null && message.hasOwnProperty("error"))
object.error = $root.google.rpc.Status.toObject(message.error, options);
@@ -3855,6 +3958,8 @@
}
if (message.speechEventType != null && message.hasOwnProperty("speechEventType"))
object.speechEventType = options.enums === String ? $root.google.cloud.speech.v1.StreamingRecognizeResponse.SpeechEventType[message.speechEventType] : message.speechEventType;
+ if (message.totalBilledTime != null && message.hasOwnProperty("totalBilledTime"))
+ object.totalBilledTime = $root.google.protobuf.Duration.toObject(message.totalBilledTime, options);
return object;
};
diff --git a/packages/google-cloud-node/protos/protos.json b/packages/google-cloud-node/protos/protos.json
index ac3b21a2e2a7..09c87990828b 100644
--- a/packages/google-cloud-node/protos/protos.json
+++ b/packages/google-cloud-node/protos/protos.json
@@ -249,8 +249,8 @@
"type": "int32",
"id": 5,
"options": {
- "(google.api.field_behavior)": "OUTPUT_ONLY",
- "deprecated": true
+ "deprecated": true,
+ "(google.api.field_behavior)": "OUTPUT_ONLY"
}
}
}
@@ -367,6 +367,10 @@
"rule": "repeated",
"type": "SpeechRecognitionResult",
"id": 2
+ },
+ "totalBilledTime": {
+ "type": "google.protobuf.Duration",
+ "id": 3
}
}
},
@@ -376,6 +380,10 @@
"rule": "repeated",
"type": "SpeechRecognitionResult",
"id": 2
+ },
+ "totalBilledTime": {
+ "type": "google.protobuf.Duration",
+ "id": 3
}
}
},
@@ -392,6 +400,13 @@
"lastUpdateTime": {
"type": "google.protobuf.Timestamp",
"id": 3
+ },
+ "uri": {
+ "type": "string",
+ "id": 4,
+ "options": {
+ "(google.api.field_behavior)": "OUTPUT_ONLY"
+ }
}
}
},
@@ -409,6 +424,10 @@
"speechEventType": {
"type": "SpeechEventType",
"id": 4
+ },
+ "totalBilledTime": {
+ "type": "google.protobuf.Duration",
+ "id": 5
}
},
"nested": {