Skip to content

Commit

Permalink
HBASE-26090 Remove the deprecated methods in Scan which should be rem…
Browse files Browse the repository at this point in the history
…oved in 3.0.0 (#3493)

Signed-off-by: Yi Mei <myimeiyi@gmail.com>
Signed-off-by: Anoop Sam John <anoopsamjohn@apache.org>
  • Loading branch information
Apache9 authored Jul 16, 2021
1 parent 3666357 commit 83d1bf1
Show file tree
Hide file tree
Showing 10 changed files with 38 additions and 132 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -63,12 +63,6 @@ public Scan setTimeRange(long minStamp, long maxStamp) {
throw new UnsupportedOperationException("ImmutableScan does not allow access to setTimeRange");
}

@Deprecated
@Override
public Scan setTimeStamp(long timestamp) {
throw new UnsupportedOperationException("ImmutableScan does not allow access to setTimeStamp");
}

@Override
public Scan setTimestamp(long timestamp) {
throw new UnsupportedOperationException("ImmutableScan does not allow access to setTimestamp");
Expand Down Expand Up @@ -183,12 +177,6 @@ public Scan setRaw(boolean raw) {
throw new UnsupportedOperationException("ImmutableScan does not allow access to setRaw");
}

@Override
@Deprecated
public Scan setSmall(boolean small) {
throw new UnsupportedOperationException("ImmutableScan does not allow access to setSmall");
}

@Override
public Scan setAttribute(String name, byte[] value) {
throw new UnsupportedOperationException("ImmutableScan does not allow access to setAttribute");
Expand Down Expand Up @@ -412,12 +400,6 @@ public boolean isRaw() {
return this.delegateScan.isRaw();
}

@Override
@Deprecated
public boolean isSmall() {
return this.delegateScan.isSmall();
}

@Override
public boolean isScanMetricsEnabled() {
return this.delegateScan.isScanMetricsEnabled();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -144,17 +144,6 @@ public class Scan extends Query {
*/
public static final boolean DEFAULT_HBASE_CLIENT_SCANNER_ASYNC_PREFETCH = false;

/**
* Set it true for small scan to get better performance Small scan should use pread and big scan
* can use seek + read seek + read is fast but can cause two problem (1) resource contention (2)
* cause too much network io [89-fb] Using pread for non-compaction read request
* https://issues.apache.org/jira/browse/HBASE-7266 On the other hand, if setting it true, we
* would do openScanner,next,closeScanner in one RPC call. It means the better performance for
* small scan. [HBASE-9488]. Generally, if the scan range is within one data block(64KB), it could
* be considered as a small scan.
*/
private boolean small = false;

/**
* The mvcc read point to use when open a scanner. Remember to clear it after switching regions as
* the mvcc is only valid within region scope.
Expand Down Expand Up @@ -203,7 +192,6 @@ public Scan(Scan scan) throws IOException {
this.setIsolationLevel(scan.getIsolationLevel());
reversed = scan.isReversed();
asyncPrefetch = scan.isAsyncPrefetch();
small = scan.isSmall();
allowPartialResults = scan.getAllowPartialResults();
tr = scan.getTimeRange(); // TimeRange is immutable
Map<byte[], NavigableSet<byte[]>> fams = scan.getFamilyMap();
Expand Down Expand Up @@ -320,24 +308,6 @@ public Scan setTimeRange(long minStamp, long maxStamp) throws IOException {
return this;
}

/**
* Get versions of columns with the specified timestamp. Note, default maximum
* versions to return is 1. If your time range spans more than one version
* and you want all versions returned, up the number of versions beyond the
* defaut.
* @param timestamp version timestamp
* @see #readAllVersions()
* @see #readVersions(int)
* @return this
* @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0.
* Use {@link #setTimestamp(long)} instead
*/
@Deprecated
public Scan setTimeStamp(long timestamp)
throws IOException {
return this.setTimestamp(timestamp);
}

/**
* Get versions of columns with the specified timestamp. Note, default maximum
* versions to return is 1. If your time range spans more than one version
Expand Down Expand Up @@ -874,45 +844,6 @@ public boolean isRaw() {
return attr == null ? false : Bytes.toBoolean(attr);
}

/**
* Set whether this scan is a small scan
* <p>
* Small scan should use pread and big scan can use seek + read seek + read is fast but can cause
* two problem (1) resource contention (2) cause too much network io [89-fb] Using pread for
* non-compaction read request https://issues.apache.org/jira/browse/HBASE-7266 On the other hand,
* if setting it true, we would do openScanner,next,closeScanner in one RPC call. It means the
* better performance for small scan. [HBASE-9488]. Generally, if the scan range is within one
* data block(64KB), it could be considered as a small scan.
* @param small
* @deprecated since 2.0.0 and will be removed in 3.0.0. Use {@link #setLimit(int)} and
* {@link #setReadType(ReadType)} instead. And for the one rpc optimization, now we will also
* fetch data when openScanner, and if the number of rows reaches the limit then we will close
* the scanner automatically which means we will fall back to one rpc.
* @see #setLimit(int)
* @see #setReadType(ReadType)
* @see <a href="https://issues.apache.org/jira/browse/HBASE-17045">HBASE-17045</a>
*/
@Deprecated
public Scan setSmall(boolean small) {
this.small = small;
if (small) {
this.readType = ReadType.PREAD;
}
return this;
}

/**
* Get whether this scan is a small scan
* @return true if small scan
* @deprecated since 2.0.0 and will be removed in 3.0.0. See the comment of
* {@link #setSmall(boolean)}
* @see <a href="https://issues.apache.org/jira/browse/HBASE-17045">HBASE-17045</a>
*/
@Deprecated
public boolean isSmall() {
return small;
}

@Override
public Scan setAttribute(String name, byte[] value) {
return (Scan) super.setAttribute(name, value);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1026,9 +1026,6 @@ public static ClientProtos.Scan toScan(
if (scan.getMaxResultSize() > 0) {
scanBuilder.setMaxResultSize(scan.getMaxResultSize());
}
if (scan.isSmall()) {
scanBuilder.setSmall(scan.isSmall());
}
if (scan.getAllowPartialResults()) {
scanBuilder.setAllowPartialResults(scan.getAllowPartialResults());
}
Expand Down Expand Up @@ -1179,9 +1176,6 @@ public static Scan toScan(
if (proto.hasMaxResultSize()) {
scan.setMaxResultSize(proto.getMaxResultSize());
}
if (proto.hasSmall()) {
scan.setSmall(proto.getSmall());
}
if (proto.hasAllowPartialResults()) {
scan.setAllowPartialResults(proto.getAllowPartialResults());
}
Expand Down Expand Up @@ -1212,9 +1206,7 @@ public static Scan toScan(
if (proto.hasMvccReadPoint()) {
PackagePrivateFieldAccessor.setMvccReadPoint(scan, proto.getMvccReadPoint());
}
if (scan.isSmall()) {
scan.setReadType(Scan.ReadType.PREAD);
} else if (proto.hasReadType()) {
if (proto.hasReadType()) {
scan.setReadType(toReadType(proto.getReadType()));
}
if (proto.getNeedCursorResult()) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,9 @@

package org.apache.hadoop.hbase.client;

import static org.junit.Assert.assertArrayEquals;
import static org.junit.Assert.assertEquals;

import java.io.IOException;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
Expand All @@ -39,8 +42,6 @@
import org.mockito.Mockito;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static org.junit.Assert.assertArrayEquals;
import static org.junit.Assert.assertEquals;

/**
* Small tests for ImmutableScan
Expand Down Expand Up @@ -85,7 +86,6 @@ public void testScanCopyConstructor() throws Exception {
.setRowOffsetPerColumnFamily(5)
.setRowPrefixFilter(Bytes.toBytes("row_"))
.setScanMetricsEnabled(true)
.setSmall(true)
.setReadType(Scan.ReadType.STREAM)
.withStartRow(Bytes.toBytes("row_1"))
.withStopRow(Bytes.toBytes("row_2"))
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,14 +18,12 @@
package org.apache.hadoop.hbase.client;

import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;

import java.io.IOException;
import java.util.Arrays;
import java.util.Set;

import org.apache.commons.lang3.builder.EqualsBuilder;
import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HConstants;
Expand All @@ -40,6 +38,7 @@
import org.junit.ClassRule;
import org.junit.Test;
import org.junit.experimental.categories.Category;

import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;

Expand Down Expand Up @@ -249,7 +248,6 @@ public void testScanCopyConstructor() throws Exception {
.setRowOffsetPerColumnFamily(5)
.setRowPrefixFilter(Bytes.toBytes("row_"))
.setScanMetricsEnabled(true)
.setSmall(true)
.setReadType(ReadType.STREAM)
.withStartRow(Bytes.toBytes("row_1"))
.withStopRow(Bytes.toBytes("row_2"))
Expand Down Expand Up @@ -297,11 +295,8 @@ public void testScanCopyConstructor() throws Exception {
@Test
public void testScanReadType() throws Exception {
Scan scan = new Scan();
assertFalse(scan.isSmall());
assertEquals(ReadType.DEFAULT, scan.getReadType());
Scan copyScan = new Scan(scan);
copyScan.setSmall(scan.isSmall());
assertFalse(copyScan.isSmall());
assertEquals(ReadType.DEFAULT, copyScan.getReadType());
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -80,6 +80,7 @@
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.Scan.ReadType;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
Expand Down Expand Up @@ -4234,7 +4235,7 @@ public static Compression.Algorithm[] getSupportedCompressionAlgorithms() {

public Result getClosestRowBefore(Region r, byte[] row, byte[] family) throws IOException {
Scan scan = new Scan().withStartRow(row);
scan.setSmall(true);
scan.setReadType(ReadType.PREAD);
scan.setCaching(1);
scan.setReversed(true);
scan.addFamily(family);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,7 @@
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNameTestRule;
import org.apache.hadoop.hbase.Waiter;
import org.apache.hadoop.hbase.client.Scan.ReadType;
import org.apache.hadoop.hbase.client.metrics.ScanMetrics;
import org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint;
import org.apache.hadoop.hbase.filter.BinaryComparator;
Expand Down Expand Up @@ -237,7 +238,7 @@ public void testGetClosestRowBefore() throws IOException, InterruptedException {

private Result getReverseScanResult(Table table, byte[] row) throws IOException {
Scan scan = new Scan().withStartRow(row);
scan.setSmall(true);
scan.setReadType(ReadType.PREAD);
scan.setReversed(true);
scan.setCaching(1);
scan.addFamily(HConstants.CATALOG_FAMILY);
Expand Down Expand Up @@ -1336,7 +1337,7 @@ public void testScanMetrics() throws Exception {
scan2 = new Scan();
scan2.setScanMetricsEnabled(true);
scan2.setCaching(1);
scan2.setSmall(true);
scan2.setReadType(ReadType.PREAD);
try (ResultScanner scanner = ht.getScanner(scan2)) {
int numBytes = 0;
for (Result result : scanner) {
Expand Down Expand Up @@ -1908,7 +1909,7 @@ public void testSmallScan() throws Exception {
// small scan
Scan scan = new Scan().withStartRow(HConstants.EMPTY_START_ROW)
.withStopRow(HConstants.EMPTY_END_ROW, true);
scan.setSmall(true);
scan.setReadType(ReadType.PREAD);
scan.setCaching(2);
try (ResultScanner scanner = table.getScanner(scan)) {
int count = 0;
Expand Down Expand Up @@ -2433,12 +2434,13 @@ public void testSmallReversedScanUnderMultiRegions() throws Exception {
assertEquals(12, count);
}

reverseScanTest(table, false);
reverseScanTest(table, true);
reverseScanTest(table, ReadType.STREAM);
reverseScanTest(table, ReadType.PREAD);
reverseScanTest(table, ReadType.DEFAULT);
}
}

private void reverseScanTest(Table table, boolean small) throws IOException {
private void reverseScanTest(Table table, ReadType readType) throws IOException {
// scan backward
Scan scan = new Scan();
scan.setReversed(true);
Expand All @@ -2460,7 +2462,7 @@ private void reverseScanTest(Table table, boolean small) throws IOException {
}

scan = new Scan();
scan.setSmall(small);
scan.setReadType(readType);
scan.setReversed(true);
scan.withStartRow(Bytes.toBytes("002"));
try (ResultScanner scanner = table.getScanner(scan)) {
Expand All @@ -2481,7 +2483,7 @@ private void reverseScanTest(Table table, boolean small) throws IOException {
}

scan = new Scan();
scan.setSmall(small);
scan.setReadType(readType);
scan.setReversed(true);
scan.withStartRow(Bytes.toBytes("002"));
scan.withStopRow(Bytes.toBytes("000"));
Expand All @@ -2503,7 +2505,7 @@ private void reverseScanTest(Table table, boolean small) throws IOException {
}

scan = new Scan();
scan.setSmall(small);
scan.setReadType(readType);
scan.setReversed(true);
scan.withStartRow(Bytes.toBytes("001"));
try (ResultScanner scanner = table.getScanner(scan)) {
Expand All @@ -2524,7 +2526,7 @@ private void reverseScanTest(Table table, boolean small) throws IOException {
}

scan = new Scan();
scan.setSmall(small);
scan.setReadType(readType);
scan.setReversed(true);
scan.withStartRow(Bytes.toBytes("000"));
try (ResultScanner scanner = table.getScanner(scan)) {
Expand All @@ -2545,7 +2547,7 @@ private void reverseScanTest(Table table, boolean small) throws IOException {
}

scan = new Scan();
scan.setSmall(small);
scan.setReadType(readType);
scan.setReversed(true);
scan.withStartRow(Bytes.toBytes("006"));
scan.withStopRow(Bytes.toBytes("002"));
Expand Down
Loading

0 comments on commit 83d1bf1

Please sign in to comment.