Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: Adding support for databoost enabled in PartitionedRead and PartitionedQuery #2316

Merged
merged 10 commits into from
Mar 20, 2023
Original file line number Diff line number Diff line change
Expand Up @@ -595,6 +595,9 @@ ExecuteSqlRequest.Builder getExecuteSqlRequestBuilder(
builder.setTransaction(selector);
}
}
if (options.hasDataBoostEnabled()) {
builder.setDataBoostEnabled(options.dataBoostEnabled());
}
builder.setSeqno(getSeqNo());
builder.setQueryOptions(buildQueryOptions(statement.getQueryOptions()));
builder.setRequestOptions(buildRequestOptions(options));
Expand Down Expand Up @@ -773,6 +776,9 @@ ResultSet readInternalWithOptions(
if (partitionToken != null) {
builder.setPartitionToken(partitionToken);
}
if (readOptions.hasDataBoostEnabled()) {
builder.setDataBoostEnabled(readOptions.dataBoostEnabled());
}
final int prefetchChunks =
readOptions.hasPrefetchChunks() ? readOptions.prefetchChunks() : defaultPrefetchChunks;
ResumableStreamIterator stream =
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@

package com.google.cloud.spanner;

import com.google.api.core.BetaApi;
import com.google.common.base.Preconditions;
import com.google.spanner.v1.RequestOptions.Priority;
import java.io.Serializable;
Expand Down Expand Up @@ -146,6 +147,15 @@ public static ListOption pageSize(int pageSize) {
return new PageSizeOption(pageSize);
}

/**
* If this is for a partitioned read & query and this field is set to `true`, the request will be
* executed via Spanner independent compute resources.
*/
@BetaApi
gauravpurohit06 marked this conversation as resolved.
Show resolved Hide resolved
public static DataBoostQueryOption dataBoostEnabled(Boolean dataBoostEnabled) {
return new DataBoostQueryOption(dataBoostEnabled);
}

/**
* Specifying this will cause the list operation to start fetching the record from this onwards.
*/
Expand Down Expand Up @@ -321,6 +331,7 @@ void appendToOptions(Options options) {
private String etag;
private Boolean validateOnly;
private Boolean withOptimisticLock;
private Boolean dataBoostEnabled;

// Construction is via factory methods below.
private Options() {}
Expand Down Expand Up @@ -413,6 +424,14 @@ Boolean withOptimisticLock() {
return withOptimisticLock;
}

boolean hasDataBoostEnabled() {
return dataBoostEnabled != null;
}

Boolean dataBoostEnabled() {
return dataBoostEnabled;
}

@Override
public String toString() {
StringBuilder b = new StringBuilder();
Expand Down Expand Up @@ -440,6 +459,9 @@ public String toString() {
if (tag != null) {
b.append("tag: ").append(tag).append(' ');
}
if (dataBoostEnabled != null) {
b.append("dataBoostEnabled: ").append(dataBoostEnabled).append(' ');
}
gauravpurohit06 marked this conversation as resolved.
Show resolved Hide resolved
if (etag != null) {
b.append("etag: ").append(etag).append(' ');
}
Expand Down Expand Up @@ -482,6 +504,7 @@ public boolean equals(Object o) {
&& Objects.equals(priority(), that.priority())
&& Objects.equals(tag(), that.tag())
&& Objects.equals(etag(), that.etag())
&& Objects.equals(dataBoostEnabled(), that.dataBoostEnabled())
&& Objects.equals(validateOnly(), that.validateOnly())
&& Objects.equals(withOptimisticLock(), that.withOptimisticLock());
gauravpurohit06 marked this conversation as resolved.
Show resolved Hide resolved
}
Expand Down Expand Up @@ -525,6 +548,9 @@ public int hashCode() {
if (withOptimisticLock != null) {
result = 31 * result + withOptimisticLock.hashCode();
}
if (dataBoostEnabled != null) {
result = 31 * result + dataBoostEnabled.hashCode();
}
return result;
}

Expand Down Expand Up @@ -605,6 +631,20 @@ void appendToOptions(Options options) {
}
}

static final class DataBoostQueryOption extends InternalOption implements ReadAndQueryOption {

private final Boolean dataBoostEnabled;

DataBoostQueryOption(Boolean dataBoostEnabled) {
this.dataBoostEnabled = dataBoostEnabled;
}

@Override
void appendToOptions(Options options) {
options.dataBoostEnabled = dataBoostEnabled;
}
}

static class PageSizeOption extends InternalOption implements ListOption {
private final int pageSize;

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@
import com.google.cloud.spanner.IntegrationTestEnv;
import com.google.cloud.spanner.KeySet;
import com.google.cloud.spanner.Mutation;
import com.google.cloud.spanner.Options;
import com.google.cloud.spanner.ParallelIntegrationTest;
import com.google.cloud.spanner.Partition;
import com.google.cloud.spanner.PartitionOptions;
Expand Down Expand Up @@ -238,6 +239,27 @@ public void readUsingIndex() {
assertThat(numRowsRead).isEqualTo(numRows);
}

@Test
public void dataBoostRead() {
assumeFalse(
"PostgreSQL does not support the PartitionRead RPC", dialect.dialect == Dialect.POSTGRESQL);
assumeFalse("Emulator does not support data boost read", isUsingEmulator());

BitSet seenRows = new BitSet(numRows);
TimestampBound bound = getRandomBound();
PartitionOptions partitionParams = getRandomPartitionOptions();
batchTxn = getBatchClient().batchReadOnlyTransaction(bound);
List<Partition> partitions =
batchTxn.partitionRead(
partitionParams,
TABLE_NAME,
KeySet.all(),
Arrays.asList("Key", "Data", "Fingerprint", "Size"),
Options.dataBoostEnabled(true));
BatchTransactionId txnID = batchTxn.getBatchTransactionId();
fetchAndValidateRows(partitions, txnID, seenRows);
}

@After
public void tearDown() {
if (batchTxn != null) {
Expand Down Expand Up @@ -273,6 +295,22 @@ private PartitionOptions getRandomPartitionOptions() {
return parameters;
}

@Test
public void dataBoostQuery() {
assumeFalse("Emulator does not support data boost query", isUsingEmulator());
BitSet seenRows = new BitSet(numRows);
TimestampBound bound = getRandomBound();
PartitionOptions partitionParams = getRandomPartitionOptions();
batchTxn = getBatchClient().batchReadOnlyTransaction(bound);
List<Partition> partitions =
batchTxn.partitionQuery(
partitionParams,
Statement.of("SELECT Key, Data, Fingerprint, Size FROM " + TABLE_NAME),
Options.dataBoostEnabled(true));
BatchTransactionId txnID = batchTxn.getBatchTransactionId();
fetchAndValidateRows(partitions, txnID, seenRows);
}

private TimestampBound getRandomBound() {
Date date = new Date();
switch (RANDOM.nextInt(3)) {
Expand Down