diff --git a/README.md b/README.md index 0b621554f8..b1eadb4443 100644 --- a/README.md +++ b/README.md @@ -50,20 +50,20 @@ If you are using Maven without the BOM, add this to your dependencies: If you are using Gradle 5.x or later, add this to your dependencies: ```Groovy -implementation platform('com.google.cloud:libraries-bom:26.16.0') +implementation platform('com.google.cloud:libraries-bom:26.17.0') implementation 'com.google.cloud:google-cloud-bigquerystorage' ``` If you are using Gradle without BOM, add this to your dependencies: ```Groovy -implementation 'com.google.cloud:google-cloud-bigquerystorage:2.37.2' +implementation 'com.google.cloud:google-cloud-bigquerystorage:2.38.0' ``` If you are using SBT, add this to your dependencies: ```Scala -libraryDependencies += "com.google.cloud" % "google-cloud-bigquerystorage" % "2.37.2" +libraryDependencies += "com.google.cloud" % "google-cloud-bigquerystorage" % "2.38.0" ``` @@ -220,7 +220,7 @@ Java is a registered trademark of Oracle and/or its affiliates. [kokoro-badge-link-5]: http://storage.googleapis.com/cloud-devrel-public/java/badges/java-bigquerystorage/java11.html [stability-image]: https://img.shields.io/badge/stability-stable-green [maven-version-image]: https://img.shields.io/maven-central/v/com.google.cloud/google-cloud-bigquerystorage.svg -[maven-version-link]: https://central.sonatype.com/artifact/com.google.cloud/google-cloud-bigquerystorage/2.37.2 +[maven-version-link]: https://central.sonatype.com/artifact/com.google.cloud/google-cloud-bigquerystorage/2.38.0 [authentication]: https://github.com/googleapis/google-cloud-java#authentication [auth-scopes]: https://developers.google.com/identity/protocols/oauth2/scopes [predefined-iam-roles]: https://cloud.google.com/iam/docs/understanding-roles#predefined_roles diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/BaseBigQueryReadClientTest.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/BaseBigQueryReadClientTest.java index 2c610bfebb..c2a862247c 100644 --- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/BaseBigQueryReadClientTest.java +++ b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/BaseBigQueryReadClientTest.java @@ -93,6 +93,7 @@ public void createReadSessionTest() throws Exception { .setReadOptions(ReadSession.TableReadOptions.newBuilder().build()) .addAllStreams(new ArrayList()) .setEstimatedTotalBytesScanned(452788190) + .setEstimatedTotalPhysicalFileSize(938325754) .setEstimatedRowCount(-1745583577) .setTraceId("traceId-1067401920") .build(); @@ -146,6 +147,7 @@ public void createReadSessionTest2() throws Exception { .setReadOptions(ReadSession.TableReadOptions.newBuilder().build()) .addAllStreams(new ArrayList()) .setEstimatedTotalBytesScanned(452788190) + .setEstimatedTotalPhysicalFileSize(938325754) .setEstimatedRowCount(-1745583577) .setTraceId("traceId-1067401920") .build(); diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ReadSession.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ReadSession.java index 205a5e551f..54711847d8 100644 --- a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ReadSession.java +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ReadSession.java @@ -1181,11 +1181,11 @@ public interface TableReadOptionsOrBuilder * *
      * Optional. Specifies a table sampling percentage. Specifically, the query
-     * planner will use TABLESAMPLE SYSTEM (sample_percentage PERCENT). This
-     * samples at the file-level. It will randomly choose for each file whether
-     * to include that file in the sample returned. Note, that if the table only
-     * has one file, then TABLESAMPLE SYSTEM will select that file and return
-     * all returnable rows contained within.
+     * planner will use TABLESAMPLE SYSTEM (sample_percentage PERCENT). The
+     * sampling percentage is applied at the data block granularity. It will
+     * randomly choose for each data block whether to read the rows in that data
+     * block. For more details, see
+     * https://cloud.google.com/bigquery/docs/table-sampling)
      * 
* * optional double sample_percentage = 5 [(.google.api.field_behavior) = OPTIONAL]; @@ -1198,11 +1198,11 @@ public interface TableReadOptionsOrBuilder * *
      * Optional. Specifies a table sampling percentage. Specifically, the query
-     * planner will use TABLESAMPLE SYSTEM (sample_percentage PERCENT). This
-     * samples at the file-level. It will randomly choose for each file whether
-     * to include that file in the sample returned. Note, that if the table only
-     * has one file, then TABLESAMPLE SYSTEM will select that file and return
-     * all returnable rows contained within.
+     * planner will use TABLESAMPLE SYSTEM (sample_percentage PERCENT). The
+     * sampling percentage is applied at the data block granularity. It will
+     * randomly choose for each data block whether to read the rows in that data
+     * block. For more details, see
+     * https://cloud.google.com/bigquery/docs/table-sampling)
      * 
* * optional double sample_percentage = 5 [(.google.api.field_behavior) = OPTIONAL]; @@ -1756,11 +1756,11 @@ public boolean hasAvroSerializationOptions() { * *
      * Optional. Specifies a table sampling percentage. Specifically, the query
-     * planner will use TABLESAMPLE SYSTEM (sample_percentage PERCENT). This
-     * samples at the file-level. It will randomly choose for each file whether
-     * to include that file in the sample returned. Note, that if the table only
-     * has one file, then TABLESAMPLE SYSTEM will select that file and return
-     * all returnable rows contained within.
+     * planner will use TABLESAMPLE SYSTEM (sample_percentage PERCENT). The
+     * sampling percentage is applied at the data block granularity. It will
+     * randomly choose for each data block whether to read the rows in that data
+     * block. For more details, see
+     * https://cloud.google.com/bigquery/docs/table-sampling)
      * 
* * optional double sample_percentage = 5 [(.google.api.field_behavior) = OPTIONAL]; @@ -1776,11 +1776,11 @@ public boolean hasSamplePercentage() { * *
      * Optional. Specifies a table sampling percentage. Specifically, the query
-     * planner will use TABLESAMPLE SYSTEM (sample_percentage PERCENT). This
-     * samples at the file-level. It will randomly choose for each file whether
-     * to include that file in the sample returned. Note, that if the table only
-     * has one file, then TABLESAMPLE SYSTEM will select that file and return
-     * all returnable rows contained within.
+     * planner will use TABLESAMPLE SYSTEM (sample_percentage PERCENT). The
+     * sampling percentage is applied at the data block granularity. It will
+     * randomly choose for each data block whether to read the rows in that data
+     * block. For more details, see
+     * https://cloud.google.com/bigquery/docs/table-sampling)
      * 
* * optional double sample_percentage = 5 [(.google.api.field_behavior) = OPTIONAL]; @@ -3567,11 +3567,11 @@ public Builder clearAvroSerializationOptions() { * *
        * Optional. Specifies a table sampling percentage. Specifically, the query
-       * planner will use TABLESAMPLE SYSTEM (sample_percentage PERCENT). This
-       * samples at the file-level. It will randomly choose for each file whether
-       * to include that file in the sample returned. Note, that if the table only
-       * has one file, then TABLESAMPLE SYSTEM will select that file and return
-       * all returnable rows contained within.
+       * planner will use TABLESAMPLE SYSTEM (sample_percentage PERCENT). The
+       * sampling percentage is applied at the data block granularity. It will
+       * randomly choose for each data block whether to read the rows in that data
+       * block. For more details, see
+       * https://cloud.google.com/bigquery/docs/table-sampling)
        * 
* * optional double sample_percentage = 5 [(.google.api.field_behavior) = OPTIONAL]; @@ -3588,11 +3588,11 @@ public boolean hasSamplePercentage() { * *
        * Optional. Specifies a table sampling percentage. Specifically, the query
-       * planner will use TABLESAMPLE SYSTEM (sample_percentage PERCENT). This
-       * samples at the file-level. It will randomly choose for each file whether
-       * to include that file in the sample returned. Note, that if the table only
-       * has one file, then TABLESAMPLE SYSTEM will select that file and return
-       * all returnable rows contained within.
+       * planner will use TABLESAMPLE SYSTEM (sample_percentage PERCENT). The
+       * sampling percentage is applied at the data block granularity. It will
+       * randomly choose for each data block whether to read the rows in that data
+       * block. For more details, see
+       * https://cloud.google.com/bigquery/docs/table-sampling)
        * 
* * optional double sample_percentage = 5 [(.google.api.field_behavior) = OPTIONAL]; @@ -3609,11 +3609,11 @@ public double getSamplePercentage() { * *
        * Optional. Specifies a table sampling percentage. Specifically, the query
-       * planner will use TABLESAMPLE SYSTEM (sample_percentage PERCENT). This
-       * samples at the file-level. It will randomly choose for each file whether
-       * to include that file in the sample returned. Note, that if the table only
-       * has one file, then TABLESAMPLE SYSTEM will select that file and return
-       * all returnable rows contained within.
+       * planner will use TABLESAMPLE SYSTEM (sample_percentage PERCENT). The
+       * sampling percentage is applied at the data block granularity. It will
+       * randomly choose for each data block whether to read the rows in that data
+       * block. For more details, see
+       * https://cloud.google.com/bigquery/docs/table-sampling)
        * 
* * optional double sample_percentage = 5 [(.google.api.field_behavior) = OPTIONAL]; @@ -3634,11 +3634,11 @@ public Builder setSamplePercentage(double value) { * *
        * Optional. Specifies a table sampling percentage. Specifically, the query
-       * planner will use TABLESAMPLE SYSTEM (sample_percentage PERCENT). This
-       * samples at the file-level. It will randomly choose for each file whether
-       * to include that file in the sample returned. Note, that if the table only
-       * has one file, then TABLESAMPLE SYSTEM will select that file and return
-       * all returnable rows contained within.
+       * planner will use TABLESAMPLE SYSTEM (sample_percentage PERCENT). The
+       * sampling percentage is applied at the data block granularity. It will
+       * randomly choose for each data block whether to read the rows in that data
+       * block. For more details, see
+       * https://cloud.google.com/bigquery/docs/table-sampling)
        * 
* * optional double sample_percentage = 5 [(.google.api.field_behavior) = OPTIONAL]; @@ -4336,6 +4336,30 @@ public long getEstimatedTotalBytesScanned() { return estimatedTotalBytesScanned_; } + public static final int ESTIMATED_TOTAL_PHYSICAL_FILE_SIZE_FIELD_NUMBER = 15; + private long estimatedTotalPhysicalFileSize_ = 0L; + /** + * + * + *
+   * Output only. A pre-projected estimate of the total physical size (in bytes)
+   * of files this session will scan when all streams are completely consumed.
+   * This estimate does not depend on the selected columns and can be based on
+   * metadata from the table which might be incomplete or stale. Only set for
+   * BigLake tables.
+   * 
+ * + * + * int64 estimated_total_physical_file_size = 15 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The estimatedTotalPhysicalFileSize. + */ + @java.lang.Override + public long getEstimatedTotalPhysicalFileSize() { + return estimatedTotalPhysicalFileSize_; + } + public static final int ESTIMATED_ROW_COUNT_FIELD_NUMBER = 14; private long estimatedRowCount_ = 0L; /** @@ -4468,6 +4492,9 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io if (estimatedRowCount_ != 0L) { output.writeInt64(14, estimatedRowCount_); } + if (estimatedTotalPhysicalFileSize_ != 0L) { + output.writeInt64(15, estimatedTotalPhysicalFileSize_); + } getUnknownFields().writeTo(output); } @@ -4519,6 +4546,11 @@ public int getSerializedSize() { if (estimatedRowCount_ != 0L) { size += com.google.protobuf.CodedOutputStream.computeInt64Size(14, estimatedRowCount_); } + if (estimatedTotalPhysicalFileSize_ != 0L) { + size += + com.google.protobuf.CodedOutputStream.computeInt64Size( + 15, estimatedTotalPhysicalFileSize_); + } size += getUnknownFields().getSerializedSize(); memoizedSize = size; return size; @@ -4552,6 +4584,8 @@ public boolean equals(final java.lang.Object obj) { } if (!getStreamsList().equals(other.getStreamsList())) return false; if (getEstimatedTotalBytesScanned() != other.getEstimatedTotalBytesScanned()) return false; + if (getEstimatedTotalPhysicalFileSize() != other.getEstimatedTotalPhysicalFileSize()) + return false; if (getEstimatedRowCount() != other.getEstimatedRowCount()) return false; if (!getTraceId().equals(other.getTraceId())) return false; if (!getSchemaCase().equals(other.getSchemaCase())) return false; @@ -4600,6 +4634,8 @@ public int hashCode() { } hash = (37 * hash) + ESTIMATED_TOTAL_BYTES_SCANNED_FIELD_NUMBER; hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getEstimatedTotalBytesScanned()); + hash = (37 * hash) + ESTIMATED_TOTAL_PHYSICAL_FILE_SIZE_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getEstimatedTotalPhysicalFileSize()); hash = (37 * hash) + ESTIMATED_ROW_COUNT_FIELD_NUMBER; hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getEstimatedRowCount()); hash = (37 * hash) + TRACE_ID_FIELD_NUMBER; @@ -4787,6 +4823,7 @@ public Builder clear() { } bitField0_ = (bitField0_ & ~0x00000100); estimatedTotalBytesScanned_ = 0L; + estimatedTotalPhysicalFileSize_ = 0L; estimatedRowCount_ = 0L; traceId_ = ""; schemaCase_ = 0; @@ -4866,9 +4903,12 @@ private void buildPartial0(com.google.cloud.bigquery.storage.v1.ReadSession resu result.estimatedTotalBytesScanned_ = estimatedTotalBytesScanned_; } if (((from_bitField0_ & 0x00000400) != 0)) { - result.estimatedRowCount_ = estimatedRowCount_; + result.estimatedTotalPhysicalFileSize_ = estimatedTotalPhysicalFileSize_; } if (((from_bitField0_ & 0x00000800) != 0)) { + result.estimatedRowCount_ = estimatedRowCount_; + } + if (((from_bitField0_ & 0x00001000) != 0)) { result.traceId_ = traceId_; } } @@ -4982,12 +5022,15 @@ public Builder mergeFrom(com.google.cloud.bigquery.storage.v1.ReadSession other) if (other.getEstimatedTotalBytesScanned() != 0L) { setEstimatedTotalBytesScanned(other.getEstimatedTotalBytesScanned()); } + if (other.getEstimatedTotalPhysicalFileSize() != 0L) { + setEstimatedTotalPhysicalFileSize(other.getEstimatedTotalPhysicalFileSize()); + } if (other.getEstimatedRowCount() != 0L) { setEstimatedRowCount(other.getEstimatedRowCount()); } if (!other.getTraceId().isEmpty()) { traceId_ = other.traceId_; - bitField0_ |= 0x00000800; + bitField0_ |= 0x00001000; onChanged(); } switch (other.getSchemaCase()) { @@ -5103,15 +5146,21 @@ public Builder mergeFrom( case 106: { traceId_ = input.readStringRequireUtf8(); - bitField0_ |= 0x00000800; + bitField0_ |= 0x00001000; break; } // case 106 case 112: { estimatedRowCount_ = input.readInt64(); - bitField0_ |= 0x00000400; + bitField0_ |= 0x00000800; break; } // case 112 + case 120: + { + estimatedTotalPhysicalFileSize_ = input.readInt64(); + bitField0_ |= 0x00000400; + break; + } // case 120 default: { if (!super.parseUnknownField(input, extensionRegistry, tag)) { @@ -7128,6 +7177,77 @@ public Builder clearEstimatedTotalBytesScanned() { return this; } + private long estimatedTotalPhysicalFileSize_; + /** + * + * + *
+     * Output only. A pre-projected estimate of the total physical size (in bytes)
+     * of files this session will scan when all streams are completely consumed.
+     * This estimate does not depend on the selected columns and can be based on
+     * metadata from the table which might be incomplete or stale. Only set for
+     * BigLake tables.
+     * 
+ * + * + * int64 estimated_total_physical_file_size = 15 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The estimatedTotalPhysicalFileSize. + */ + @java.lang.Override + public long getEstimatedTotalPhysicalFileSize() { + return estimatedTotalPhysicalFileSize_; + } + /** + * + * + *
+     * Output only. A pre-projected estimate of the total physical size (in bytes)
+     * of files this session will scan when all streams are completely consumed.
+     * This estimate does not depend on the selected columns and can be based on
+     * metadata from the table which might be incomplete or stale. Only set for
+     * BigLake tables.
+     * 
+ * + * + * int64 estimated_total_physical_file_size = 15 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @param value The estimatedTotalPhysicalFileSize to set. + * @return This builder for chaining. + */ + public Builder setEstimatedTotalPhysicalFileSize(long value) { + + estimatedTotalPhysicalFileSize_ = value; + bitField0_ |= 0x00000400; + onChanged(); + return this; + } + /** + * + * + *
+     * Output only. A pre-projected estimate of the total physical size (in bytes)
+     * of files this session will scan when all streams are completely consumed.
+     * This estimate does not depend on the selected columns and can be based on
+     * metadata from the table which might be incomplete or stale. Only set for
+     * BigLake tables.
+     * 
+ * + * + * int64 estimated_total_physical_file_size = 15 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return This builder for chaining. + */ + public Builder clearEstimatedTotalPhysicalFileSize() { + bitField0_ = (bitField0_ & ~0x00000400); + estimatedTotalPhysicalFileSize_ = 0L; + onChanged(); + return this; + } + private long estimatedRowCount_; /** * @@ -7163,7 +7283,7 @@ public long getEstimatedRowCount() { public Builder setEstimatedRowCount(long value) { estimatedRowCount_ = value; - bitField0_ |= 0x00000400; + bitField0_ |= 0x00000800; onChanged(); return this; } @@ -7181,7 +7301,7 @@ public Builder setEstimatedRowCount(long value) { * @return This builder for chaining. */ public Builder clearEstimatedRowCount() { - bitField0_ = (bitField0_ & ~0x00000400); + bitField0_ = (bitField0_ & ~0x00000800); estimatedRowCount_ = 0L; onChanged(); return this; @@ -7264,7 +7384,7 @@ public Builder setTraceId(java.lang.String value) { throw new NullPointerException(); } traceId_ = value; - bitField0_ |= 0x00000800; + bitField0_ |= 0x00001000; onChanged(); return this; } @@ -7286,7 +7406,7 @@ public Builder setTraceId(java.lang.String value) { */ public Builder clearTraceId() { traceId_ = getDefaultInstance().getTraceId(); - bitField0_ = (bitField0_ & ~0x00000800); + bitField0_ = (bitField0_ & ~0x00001000); onChanged(); return this; } @@ -7313,7 +7433,7 @@ public Builder setTraceIdBytes(com.google.protobuf.ByteString value) { } checkByteStringIsUtf8(value); traceId_ = value; - bitField0_ |= 0x00000800; + bitField0_ |= 0x00001000; onChanged(); return this; } diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ReadSessionOrBuilder.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ReadSessionOrBuilder.java index f0f25f637c..85a8b45aa1 100644 --- a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ReadSessionOrBuilder.java +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ReadSessionOrBuilder.java @@ -431,6 +431,25 @@ public interface ReadSessionOrBuilder */ long getEstimatedTotalBytesScanned(); + /** + * + * + *
+   * Output only. A pre-projected estimate of the total physical size (in bytes)
+   * of files this session will scan when all streams are completely consumed.
+   * This estimate does not depend on the selected columns and can be based on
+   * metadata from the table which might be incomplete or stale. Only set for
+   * BigLake tables.
+   * 
+ * + * + * int64 estimated_total_physical_file_size = 15 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The estimatedTotalPhysicalFileSize. + */ + long getEstimatedTotalPhysicalFileSize(); + /** * * diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/StreamProto.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/StreamProto.java index a6d4144bed..77eb1b5ff3 100644 --- a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/StreamProto.java +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/StreamProto.java @@ -63,7 +63,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { + "uery/storage/v1/arrow.proto\032+google/clou" + "d/bigquery/storage/v1/avro.proto\032,google" + "/cloud/bigquery/storage/v1/table.proto\032\037" - + "google/protobuf/timestamp.proto\"\216\n\n\013Read" + + "google/protobuf/timestamp.proto\"\300\n\n\013Read" + "Session\022\022\n\004name\030\001 \001(\tB\004\342A\001\003\0225\n\013expire_ti" + "me\030\002 \001(\0132\032.google.protobuf.TimestampB\004\342A" + "\001\003\022G\n\013data_format\030\003 \001(\0162,.google.cloud.b" @@ -80,51 +80,52 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { + "sion.TableReadOptionsB\004\342A\001\001\022C\n\007streams\030\n" + " \003(\0132,.google.cloud.bigquery.storage.v1." + "ReadStreamB\004\342A\001\003\022+\n\035estimated_total_byte" - + "s_scanned\030\014 \001(\003B\004\342A\001\003\022!\n\023estimated_row_c" - + "ount\030\016 \001(\003B\004\342A\001\003\022\026\n\010trace_id\030\r \001(\tB\004\342A\001\001" - + "\032C\n\016TableModifiers\0221\n\rsnapshot_time\030\001 \001(" - + "\0132\032.google.protobuf.Timestamp\032\371\002\n\020TableR" - + "eadOptions\022\027\n\017selected_fields\030\001 \003(\t\022\027\n\017r" - + "ow_restriction\030\002 \001(\t\022h\n\033arrow_serializat" - + "ion_options\030\003 \001(\0132;.google.cloud.bigquer" - + "y.storage.v1.ArrowSerializationOptionsB\004" - + "\342A\001\001H\000\022f\n\032avro_serialization_options\030\004 \001" - + "(\0132:.google.cloud.bigquery.storage.v1.Av" - + "roSerializationOptionsB\004\342A\001\001H\000\022$\n\021sample" - + "_percentage\030\005 \001(\001B\004\342A\001\001H\001\210\001\001B%\n#output_f" - + "ormat_serialization_optionsB\024\n\022_sample_p" - + "ercentage:k\352Ah\n*bigquerystorage.googleap" - + "is.com/ReadSession\022:projects/{project}/l" - + "ocations/{location}/sessions/{session}B\010" - + "\n\006schema\"\235\001\n\nReadStream\022\022\n\004name\030\001 \001(\tB\004\342" - + "A\001\003:{\352Ax\n)bigquerystorage.googleapis.com" - + "/ReadStream\022Kprojects/{project}/location" - + "s/{location}/sessions/{session}/streams/" - + "{stream}\"\202\005\n\013WriteStream\022\022\n\004name\030\001 \001(\tB\004" - + "\342A\001\003\022F\n\004type\030\002 \001(\01622.google.cloud.bigque" - + "ry.storage.v1.WriteStream.TypeB\004\342A\001\005\0225\n\013" - + "create_time\030\003 \001(\0132\032.google.protobuf.Time" - + "stampB\004\342A\001\003\0225\n\013commit_time\030\004 \001(\0132\032.googl" - + "e.protobuf.TimestampB\004\342A\001\003\022I\n\014table_sche" - + "ma\030\005 \001(\0132-.google.cloud.bigquery.storage" - + ".v1.TableSchemaB\004\342A\001\003\022Q\n\nwrite_mode\030\007 \001(" - + "\01627.google.cloud.bigquery.storage.v1.Wri" - + "teStream.WriteModeB\004\342A\001\005\022\026\n\010location\030\010 \001" - + "(\tB\004\342A\001\005\"F\n\004Type\022\024\n\020TYPE_UNSPECIFIED\020\000\022\r" - + "\n\tCOMMITTED\020\001\022\013\n\007PENDING\020\002\022\014\n\010BUFFERED\020\003" - + "\"3\n\tWriteMode\022\032\n\026WRITE_MODE_UNSPECIFIED\020" - + "\000\022\n\n\006INSERT\020\001:v\352As\n*bigquerystorage.goog" - + "leapis.com/WriteStream\022Eprojects/{projec" - + "t}/datasets/{dataset}/tables/{table}/str" - + "eams/{stream}*>\n\nDataFormat\022\033\n\027DATA_FORM" - + "AT_UNSPECIFIED\020\000\022\010\n\004AVRO\020\001\022\t\n\005ARROW\020\002*I\n" - + "\017WriteStreamView\022!\n\035WRITE_STREAM_VIEW_UN" - + "SPECIFIED\020\000\022\t\n\005BASIC\020\001\022\010\n\004FULL\020\002B\273\001\n$com" - + ".google.cloud.bigquery.storage.v1B\013Strea" - + "mProtoP\001Z>cloud.google.com/go/bigquery/s" - + "torage/apiv1/storagepb;storagepb\252\002 Googl" - + "e.Cloud.BigQuery.Storage.V1\312\002 Google\\Clo" - + "ud\\BigQuery\\Storage\\V1b\006proto3" + + "s_scanned\030\014 \001(\003B\004\342A\001\003\0220\n\"estimated_total" + + "_physical_file_size\030\017 \001(\003B\004\342A\001\003\022!\n\023estim" + + "ated_row_count\030\016 \001(\003B\004\342A\001\003\022\026\n\010trace_id\030\r" + + " \001(\tB\004\342A\001\001\032C\n\016TableModifiers\0221\n\rsnapshot" + + "_time\030\001 \001(\0132\032.google.protobuf.Timestamp\032" + + "\371\002\n\020TableReadOptions\022\027\n\017selected_fields\030" + + "\001 \003(\t\022\027\n\017row_restriction\030\002 \001(\t\022h\n\033arrow_" + + "serialization_options\030\003 \001(\0132;.google.clo" + + "ud.bigquery.storage.v1.ArrowSerializatio" + + "nOptionsB\004\342A\001\001H\000\022f\n\032avro_serialization_o" + + "ptions\030\004 \001(\0132:.google.cloud.bigquery.sto" + + "rage.v1.AvroSerializationOptionsB\004\342A\001\001H\000" + + "\022$\n\021sample_percentage\030\005 \001(\001B\004\342A\001\001H\001\210\001\001B%" + + "\n#output_format_serialization_optionsB\024\n" + + "\022_sample_percentage:k\352Ah\n*bigquerystorag" + + "e.googleapis.com/ReadSession\022:projects/{" + + "project}/locations/{location}/sessions/{" + + "session}B\010\n\006schema\"\235\001\n\nReadStream\022\022\n\004nam" + + "e\030\001 \001(\tB\004\342A\001\003:{\352Ax\n)bigquerystorage.goog" + + "leapis.com/ReadStream\022Kprojects/{project" + + "}/locations/{location}/sessions/{session" + + "}/streams/{stream}\"\202\005\n\013WriteStream\022\022\n\004na" + + "me\030\001 \001(\tB\004\342A\001\003\022F\n\004type\030\002 \001(\01622.google.cl" + + "oud.bigquery.storage.v1.WriteStream.Type" + + "B\004\342A\001\005\0225\n\013create_time\030\003 \001(\0132\032.google.pro" + + "tobuf.TimestampB\004\342A\001\003\0225\n\013commit_time\030\004 \001" + + "(\0132\032.google.protobuf.TimestampB\004\342A\001\003\022I\n\014" + + "table_schema\030\005 \001(\0132-.google.cloud.bigque" + + "ry.storage.v1.TableSchemaB\004\342A\001\003\022Q\n\nwrite" + + "_mode\030\007 \001(\01627.google.cloud.bigquery.stor" + + "age.v1.WriteStream.WriteModeB\004\342A\001\005\022\026\n\010lo" + + "cation\030\010 \001(\tB\004\342A\001\005\"F\n\004Type\022\024\n\020TYPE_UNSPE" + + "CIFIED\020\000\022\r\n\tCOMMITTED\020\001\022\013\n\007PENDING\020\002\022\014\n\010" + + "BUFFERED\020\003\"3\n\tWriteMode\022\032\n\026WRITE_MODE_UN" + + "SPECIFIED\020\000\022\n\n\006INSERT\020\001:v\352As\n*bigqueryst" + + "orage.googleapis.com/WriteStream\022Eprojec" + + "ts/{project}/datasets/{dataset}/tables/{" + + "table}/streams/{stream}*>\n\nDataFormat\022\033\n" + + "\027DATA_FORMAT_UNSPECIFIED\020\000\022\010\n\004AVRO\020\001\022\t\n\005" + + "ARROW\020\002*I\n\017WriteStreamView\022!\n\035WRITE_STRE" + + "AM_VIEW_UNSPECIFIED\020\000\022\t\n\005BASIC\020\001\022\010\n\004FULL" + + "\020\002B\273\001\n$com.google.cloud.bigquery.storage" + + ".v1B\013StreamProtoP\001Z>cloud.google.com/go/" + + "bigquery/storage/apiv1/storagepb;storage" + + "pb\252\002 Google.Cloud.BigQuery.Storage.V1\312\002 " + + "Google\\Cloud\\BigQuery\\Storage\\V1b\006proto3" }; descriptor = com.google.protobuf.Descriptors.FileDescriptor.internalBuildGeneratedFileFrom( @@ -153,6 +154,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { "ReadOptions", "Streams", "EstimatedTotalBytesScanned", + "EstimatedTotalPhysicalFileSize", "EstimatedRowCount", "TraceId", "Schema", diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/proto/google/cloud/bigquery/storage/v1/stream.proto b/proto-google-cloud-bigquerystorage-v1/src/main/proto/google/cloud/bigquery/storage/v1/stream.proto index e72932e187..0a7c7c79c0 100644 --- a/proto-google-cloud-bigquerystorage-v1/src/main/proto/google/cloud/bigquery/storage/v1/stream.proto +++ b/proto-google-cloud-bigquerystorage-v1/src/main/proto/google/cloud/bigquery/storage/v1/stream.proto @@ -131,11 +131,11 @@ message ReadSession { } // Optional. Specifies a table sampling percentage. Specifically, the query - // planner will use TABLESAMPLE SYSTEM (sample_percentage PERCENT). This - // samples at the file-level. It will randomly choose for each file whether - // to include that file in the sample returned. Note, that if the table only - // has one file, then TABLESAMPLE SYSTEM will select that file and return - // all returnable rows contained within. + // planner will use TABLESAMPLE SYSTEM (sample_percentage PERCENT). The + // sampling percentage is applied at the data block granularity. It will + // randomly choose for each data block whether to read the rows in that data + // block. For more details, see + // https://cloud.google.com/bigquery/docs/table-sampling) optional double sample_percentage = 5 [(google.api.field_behavior) = OPTIONAL]; } @@ -194,6 +194,14 @@ message ReadSession { int64 estimated_total_bytes_scanned = 12 [(google.api.field_behavior) = OUTPUT_ONLY]; + // Output only. A pre-projected estimate of the total physical size (in bytes) + // of files this session will scan when all streams are completely consumed. + // This estimate does not depend on the selected columns and can be based on + // metadata from the table which might be incomplete or stale. Only set for + // BigLake tables. + int64 estimated_total_physical_file_size = 15 + [(google.api.field_behavior) = OUTPUT_ONLY]; + // Output only. An estimate on the number of rows present in this session's // streams. This estimate is based on metadata from the table which might be // incomplete or stale.