diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorParams.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorParams.java
index 9044cdbb36ff..e71a1d3b2744 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorParams.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorParams.java
@@ -76,7 +76,7 @@ public BufferedMutatorParams operationTimeout(final int operationTimeout) {
}
/**
- * @deprecated Since 2.3.0, will be removed in 4.0.0. Use {@link #operationTimeout()}
+ * @deprecated Since 2.3.0, will be removed in 4.0.0. Use {@link #operationTimeout(int)}
*/
@Deprecated
public BufferedMutatorParams opertationTimeout(final int operationTimeout) {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SimpleRequestController.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SimpleRequestController.java
index 4981d62a605b..d87014428c3b 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SimpleRequestController.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SimpleRequestController.java
@@ -431,9 +431,10 @@ private void waitForRegion() throws InterruptedIOException {
* regions. 3) check the total concurrent tasks. 4) check the concurrent
* tasks for server.
*
- * @param loc
- * @param heapSizeOfRow
- * @return either Include {@link ReturnCode} or Skip {@link ReturnCode}
+ * @param loc the destination of data
+ * @param heapSizeOfRow the data size
+ * @return either Include {@link RequestController.ReturnCode} or skip
+ * {@link RequestController.ReturnCode}
*/
@Override
public ReturnCode canTakeOperation(HRegionLocation loc, long heapSizeOfRow) {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SnapshotDescription.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SnapshotDescription.java
index 4fa825e5f457..872065bb8a69 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SnapshotDescription.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SnapshotDescription.java
@@ -41,63 +41,18 @@ public SnapshotDescription(String name) {
this(name, (TableName)null);
}
- /**
- * @deprecated since 2.0.0 and will be removed in 3.0.0. Use the version with the TableName
- * instance instead.
- * @see #SnapshotDescription(String, TableName)
- * @see HBASE-16892
- */
- @Deprecated
- public SnapshotDescription(String name, String table) {
- this(name, TableName.valueOf(table));
- }
-
public SnapshotDescription(String name, TableName table) {
this(name, table, SnapshotType.DISABLED, null, -1, -1, null);
}
- /**
- * @deprecated since 2.0.0 and will be removed in 3.0.0. Use the version with the TableName
- * instance instead.
- * @see #SnapshotDescription(String, TableName, SnapshotType)
- * @see HBASE-16892
- */
- @Deprecated
- public SnapshotDescription(String name, String table, SnapshotType type) {
- this(name, TableName.valueOf(table), type);
- }
-
public SnapshotDescription(String name, TableName table, SnapshotType type) {
this(name, table, type, null, -1, -1, null);
}
- /**
- * @see #SnapshotDescription(String, TableName, SnapshotType, String)
- * @see HBASE-16892
- * @deprecated since 2.0.0 and will be removed in 3.0.0. Use the version with the TableName
- * instance instead.
- */
- @Deprecated
- public SnapshotDescription(String name, String table, SnapshotType type, String owner) {
- this(name, TableName.valueOf(table), type, owner);
- }
-
public SnapshotDescription(String name, TableName table, SnapshotType type, String owner) {
this(name, table, type, owner, -1, -1, null);
}
- /**
- * @see #SnapshotDescription(String, TableName, SnapshotType, String, long, int, Map)
- * @see HBASE-16892
- * @deprecated since 2.0.0 and will be removed in 3.0.0. Use the version with the TableName
- * instance instead.
- */
- @Deprecated
- public SnapshotDescription(String name, String table, SnapshotType type, String owner,
- long creationTime, int version) {
- this(name, TableName.valueOf(table), type, owner, creationTime, version, null);
- }
-
/**
* SnapshotDescription Parameterized Constructor
*
@@ -141,18 +96,6 @@ public String getName() {
return this.name;
}
- /**
- * @deprecated since 2.0.0 and will be removed in 3.0.0. Use {@link #getTableName()} or
- * {@link #getTableNameAsString()} instead.
- * @see #getTableName()
- * @see #getTableNameAsString()
- * @see HBASE-16892
- */
- @Deprecated
- public String getTable() {
- return getTableNameAsString();
- }
-
public String getTableNameAsString() {
return this.table.getNameAsString();
}
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Table.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Table.java
index def977446267..41b0e47c0758 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Table.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Table.java
@@ -176,9 +176,9 @@ default Result get(Get get) throws IOException {
* @throws IOException if a remote or network exception occurs.
* @since 0.90.0
* @apiNote {@link #put(List)} runs pre-flight validations on the input list on client.
- * Currently {@link #get(List)} doesn't run any validations on the client-side, currently there
- * is no need, but this may change in the future. An
- * {@link IllegalArgumentException} will be thrown in this case.
+ * Currently {@link #get(List)} doesn't run any validations on the client-side,
+ * currently there is no need, but this may change in the future. An
+ * {@link IllegalArgumentException} will be thrown in this case.
*/
default Result[] get(List gets) throws IOException {
throw new NotImplementedException("Add an implementation!");
@@ -284,10 +284,10 @@ default void delete(Delete delete) throws IOException {
* that have not be successfully applied.
* @since 0.20.1
* @apiNote In 3.0.0 version, the input list {@code deletes} will no longer be modified. Also,
- * {@link #put(List)} runs pre-flight validations on the input list on client. Currently
- * {@link #delete(List)} doesn't run validations on the client, there is no need currently,
- * but this may change in the future. An * {@link IllegalArgumentException} will be thrown
- * in this case.
+ * {@link #put(List)} runs pre-flight validations on the input list on client. Currently
+ * {@link #delete(List)} doesn't run validations on the client, there is no need
+ * currently, but this may change in the future. An * {@link IllegalArgumentException}
+ * will be thrown in this case.
*/
default void delete(List deletes) throws IOException {
throw new NotImplementedException("Add an implementation!");
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/Batch.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/Batch.java
index 82b005d018c1..c3defda5b4d3 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/Batch.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/Batch.java
@@ -39,7 +39,7 @@ public abstract class Batch {
* the implementations {@link Batch.Call#call(Object)} method will be invoked
* with a proxy to each region's coprocessor {@link com.google.protobuf.Service} implementation.
*
- * @see org.apache.hadoop.hbase.client.coprocessor
+ * @see org.apache.hadoop.hbase.client.coprocessor.Batch
* @see org.apache.hadoop.hbase.client.Table#coprocessorService(byte[])
* @see org.apache.hadoop.hbase.client.Table#coprocessorService(Class, byte[], byte[],
* org.apache.hadoop.hbase.client.coprocessor.Batch.Call)
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/ClientExceptionsUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/ClientExceptionsUtil.java
index 6b1e251953b9..1975297dd218 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/ClientExceptionsUtil.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/ClientExceptionsUtil.java
@@ -32,7 +32,6 @@
import org.apache.hadoop.hbase.CallDroppedException;
import org.apache.hadoop.hbase.CallQueueTooBigException;
import org.apache.hadoop.hbase.DoNotRetryIOException;
-import org.apache.hadoop.hbase.MultiActionResultTooLarge;
import org.apache.hadoop.hbase.NotServingRegionException;
import org.apache.hadoop.hbase.RegionTooBusyException;
import org.apache.hadoop.hbase.RetryImmediatelyException;
@@ -59,18 +58,23 @@ public static boolean isMetaClearingException(Throwable cur) {
if (cur == null) {
return true;
}
- return !isSpecialException(cur) || (cur instanceof RegionMovedException)
- || cur instanceof NotServingRegionException;
+ return !regionDefinitelyOnTheRegionServerException(cur);
}
- public static boolean isSpecialException(Throwable cur) {
- return (cur instanceof RegionMovedException || cur instanceof RegionOpeningException
- || cur instanceof RegionTooBusyException || cur instanceof RpcThrottlingException
- || cur instanceof MultiActionResultTooLarge || cur instanceof RetryImmediatelyException
- || cur instanceof CallQueueTooBigException || cur instanceof CallDroppedException
- || cur instanceof NotServingRegionException || cur instanceof RequestTooBigException);
+ private static boolean regionDefinitelyOnTheRegionServerException(Throwable t) {
+ return (t instanceof RegionTooBusyException || t instanceof RpcThrottlingException
+ || t instanceof RetryImmediatelyException || t instanceof CallQueueTooBigException
+ || t instanceof CallDroppedException || t instanceof NotServingRegionException
+ || t instanceof RequestTooBigException);
}
+ /**
+ * This function is the alias of regionDefinitelyOnTheRegionServerException,
+ * whose name is confusing in the function findException().
+ */
+ private static boolean matchExceptionWeCare(Throwable t) {
+ return regionDefinitelyOnTheRegionServerException(t);
+ }
/**
* Look for an exception we know in the remote exception:
@@ -87,7 +91,7 @@ public static Throwable findException(Object exception) {
}
Throwable cur = (Throwable) exception;
while (cur != null) {
- if (isSpecialException(cur)) {
+ if (matchExceptionWeCare(cur)) {
return cur;
}
if (cur instanceof RemoteException) {
@@ -95,7 +99,7 @@ public static Throwable findException(Object exception) {
cur = re.unwrapRemoteException();
// unwrapRemoteException can return the exception given as a parameter when it cannot
- // unwrap it. In this case, there is no need to look further
+ // unwrap it. In this case, there is no need to look further
// noinspection ObjectEquality
if (cur == re) {
return cur;
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/nio/ByteBuff.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/nio/ByteBuff.java
index c04c3f55aaef..8d72f7b99877 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/nio/ByteBuff.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/nio/ByteBuff.java
@@ -19,6 +19,7 @@
import java.io.IOException;
import java.nio.ByteBuffer;
+import java.nio.channels.FileChannel;
import java.nio.channels.ReadableByteChannel;
import java.util.List;
@@ -450,10 +451,39 @@ public byte[] toBytes() {
*/
public abstract int read(ReadableByteChannel channel) throws IOException;
+ /**
+ * Reads bytes from FileChannel into this ByteBuff
+ */
+ public abstract int read(FileChannel channel, long offset) throws IOException;
+
+ /**
+ * Write this ByteBuff's data into target file
+ */
+ public abstract int write(FileChannel channel, long offset) throws IOException;
+
+ /**
+ * function interface for Channel read
+ */
+ @FunctionalInterface
+ interface ChannelReader {
+ int read(ReadableByteChannel channel, ByteBuffer buf, long offset) throws IOException;
+ }
+
+ static final ChannelReader CHANNEL_READER = (channel, buf, offset) -> {
+ return channel.read(buf);
+ };
+
+ static final ChannelReader FILE_READER = (channel, buf, offset) -> {
+ return ((FileChannel)channel).read(buf, offset);
+ };
+
// static helper methods
- public static int channelRead(ReadableByteChannel channel, ByteBuffer buf) throws IOException {
+ public static int read(ReadableByteChannel channel, ByteBuffer buf, long offset,
+ ChannelReader reader) throws IOException {
if (buf.remaining() <= NIO_BUFFER_LIMIT) {
- return channel.read(buf);
+ int res = reader.read(channel, buf, offset);
+ buf.rewind();
+ return res;
}
int originalLimit = buf.limit();
int initialRemaining = buf.remaining();
@@ -463,12 +493,14 @@ public static int channelRead(ReadableByteChannel channel, ByteBuffer buf) throw
try {
int ioSize = Math.min(buf.remaining(), NIO_BUFFER_LIMIT);
buf.limit(buf.position() + ioSize);
- ret = channel.read(buf);
+ offset += ret;
+ ret = reader.read(channel, buf, offset);
if (ret < ioSize) {
break;
}
} finally {
buf.limit(originalLimit);
+ buf.rewind();
}
}
int nBytes = initialRemaining - buf.remaining();
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/nio/MultiByteBuff.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/nio/MultiByteBuff.java
index 3ce170903974..7e55188d3a6f 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/nio/MultiByteBuff.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/nio/MultiByteBuff.java
@@ -24,6 +24,7 @@
import java.nio.BufferUnderflowException;
import java.nio.ByteBuffer;
import java.nio.InvalidMarkException;
+import java.nio.channels.FileChannel;
import java.nio.channels.ReadableByteChannel;
import org.apache.hadoop.hbase.io.ByteBuffAllocator.Recycler;
@@ -1064,21 +1065,22 @@ public byte[] toBytes(int offset, int length) {
return output;
}
- @Override
- public int read(ReadableByteChannel channel) throws IOException {
+ private int internalRead(ReadableByteChannel channel, long offset,
+ ChannelReader reader) throws IOException {
checkRefCount();
int total = 0;
while (true) {
- // Read max possible into the current BB
- int len = channelRead(channel, this.curItem);
- if (len > 0)
+ int len = read(channel, this.curItem, offset, reader);
+ if (len > 0) {
total += len;
+ offset += len;
+ }
if (this.curItem.hasRemaining()) {
- // We were not able to read enough to fill the current BB itself. Means there is no point in
- // doing more reads from Channel. Only this much there for now.
break;
} else {
- if (this.curItemIndex >= this.limitedItemIndex) break;
+ if (this.curItemIndex >= this.limitedItemIndex) {
+ break;
+ }
this.curItemIndex++;
this.curItem = this.items[this.curItemIndex];
}
@@ -1086,6 +1088,35 @@ public int read(ReadableByteChannel channel) throws IOException {
return total;
}
+ @Override
+ public int read(ReadableByteChannel channel) throws IOException {
+ return internalRead(channel, 0, CHANNEL_READER);
+ }
+
+ @Override
+ public int read(FileChannel channel, long offset) throws IOException {
+ return internalRead(channel, offset, FILE_READER);
+ }
+
+ @Override
+ public int write(FileChannel channel, long offset) throws IOException {
+ checkRefCount();
+ int total = 0;
+ while (true) {
+ while (curItem.hasRemaining()) {
+ int len = channel.write(curItem, offset);
+ total += len;
+ offset += len;
+ }
+ if (this.curItemIndex >= this.limitedItemIndex) {
+ break;
+ }
+ this.curItemIndex++;
+ this.curItem = this.items[this.curItemIndex];
+ }
+ return total;
+ }
+
@Override
public ByteBuffer[] nioByteBuffers() {
checkRefCount();
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/nio/SingleByteBuff.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/nio/SingleByteBuff.java
index 36a83a0ec212..797bfdc1fff5 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/nio/SingleByteBuff.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/nio/SingleByteBuff.java
@@ -21,6 +21,7 @@
import java.io.IOException;
import java.nio.ByteBuffer;
+import java.nio.channels.FileChannel;
import java.nio.channels.ReadableByteChannel;
import org.apache.hadoop.hbase.io.ByteBuffAllocator.Recycler;
@@ -371,7 +372,25 @@ public void get(ByteBuffer out, int sourceOffset, int length) {
@Override
public int read(ReadableByteChannel channel) throws IOException {
checkRefCount();
- return channelRead(channel, buf);
+ return read(channel, buf, 0, CHANNEL_READER);
+ }
+
+ @Override
+ public int read(FileChannel channel, long offset) throws IOException {
+ checkRefCount();
+ return read(channel, buf, offset, FILE_READER);
+ }
+
+ @Override
+ public int write(FileChannel channel, long offset) throws IOException {
+ checkRefCount();
+ int total = 0;
+ while(buf.hasRemaining()) {
+ int len = channel.write(buf, offset);
+ total += len;
+ offset += len;
+ }
+ return total;
}
@Override
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableOutputFormat.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableOutputFormat.java
index fcf6f552b7ac..a55784729c0b 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableOutputFormat.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableOutputFormat.java
@@ -53,21 +53,6 @@ protected static class TableRecordWriter implements RecordWriterHBASE-16774
- */
- @Deprecated
- public TableRecordWriter(final BufferedMutator mutator) throws IOException {
- this.m_mutator = mutator;
- this.conn = null;
- }
-
/**
* Instantiate a TableRecordWriter with a BufferedMutator for batch writing.
*/
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java
index 1ae6a2a6e2e3..c963c6c18ae2 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java
@@ -48,7 +48,6 @@
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionLocation;
-import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.PrivateCellUtil;
import org.apache.hadoop.hbase.TableName;
@@ -117,19 +116,6 @@ public TableInfo(TableDescriptor tableDesctiptor, RegionLocator regionLocator) {
this.regionLocator = regionLocator;
}
- /**
- * The modification for the returned HTD doesn't affect the inner TD.
- * @return A clone of inner table descriptor
- * @deprecated since 2.0.0 and will be removed in 3.0.0. Use {@link #getTableDescriptor()}
- * instead.
- * @see #getTableDescriptor()
- * @see HBASE-18241
- */
- @Deprecated
- public HTableDescriptor getHTableDescriptor() {
- return new HTableDescriptor(tableDesctiptor);
- }
-
public TableDescriptor getTableDescriptor() {
return tableDesctiptor;
}
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java
index 6907a8817cc5..4993feea223e 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java
@@ -267,9 +267,10 @@ private Path getOutputPath(final SnapshotFileInfo inputInfo) throws IOException
return new Path(outputArchive, path);
}
+ @SuppressWarnings("checkstyle:linelength")
/**
* Used by TestExportSnapshot to test for retries when failures happen.
- * Failure is injected in {@link #copyFile(Context, SnapshotFileInfo, Path)}.
+ * Failure is injected in {@link #copyFile(Mapper.Context, org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotFileInfo, Path)}.
*/
private void injectTestFailure(final Context context, final SnapshotFileInfo inputInfo)
throws IOException {
diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java
index 40f1a08f32da..60127a665243 100644
--- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java
+++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java
@@ -787,7 +787,7 @@ private void runIncrementalPELoad(Configuration conf, List
$(document).ready(function()
{
- $("#baseStatsTable").tablesorter();
- $("#memoryStatsTable").tablesorter();
- $("#requestStatsTable").tablesorter();
- $("#storeStatsTable").tablesorter();
- $("#compactionStatsTable").tablesorter();
+ $.tablesorter.addParser(
+ {
+ id: 'filesize',
+ is: function(s) {
+ return s.match(new RegExp( /([\.0-9]+)\ (B|KB|MB|GB|TB)/ ));
+ },
+ format: function(s) {
+ var suf = s.match(new RegExp( /(KB|B|GB|MB|TB)$/ ))[1];
+ var num = parseFloat(s.match( new RegExp( /([\.0-9]+)\ (B|KB|MB|GB|TB)/ ))[0]);
+ switch(suf) {
+ case 'B':
+ return num;
+ case 'KB':
+ return num * 1024;
+ case 'MB':
+ return num * 1024 * 1024;
+ case 'GB':
+ return num * 1024 * 1024 * 1024;
+ case 'TB':
+ return num * 1024 * 1024 * 1024 * 1024;
+ }
+ },
+ type: 'numeric'
+ });
+ $.tablesorter.addParser(
+ {
+ id: "separator",
+ is: function (s) {
+ return /^[0-9]?[0-9,]*$/.test(s);
+ }, format: function (s) {
+ return $.tablesorter.formatFloat( s.replace(/,/g,'') );
+ }, type: "numeric"
+ });
+ $("#baseStatsTable").tablesorter({
+ headers: {
+ 4: {sorter: 'separator'},
+ 5: {sorter: 'separator'}
+ }
+ });
+ $("#memoryStatsTable").tablesorter({
+ headers: {
+ 1: {sorter: 'filesize'},
+ 2: {sorter: 'filesize'},
+ 3: {sorter: 'filesize'}
+ }
+ });
+ $("#requestStatsTable").tablesorter({
+ headers: {
+ 1: {sorter: 'separator'},
+ 2: {sorter: 'separator'},
+ 3: {sorter: 'separator'},
+ 4: {sorter: 'separator'}
+ }
+ });
+ $("#storeStatsTable").tablesorter({
+ headers: {
+ 1: {sorter: 'separator'},
+ 2: {sorter: 'separator'},
+ 3: {sorter: 'filesize'},
+ 4: {sorter: 'filesize'},
+ 5: {sorter: 'filesize'},
+ 6: {sorter: 'filesize'}
+ }
+ });
+ $("#compactionStatsTable").tablesorter({
+ headers: {
+ 1: {sorter: 'separator'},
+ 2: {sorter: 'separator'},
+ 3: {sorter: 'separator'}
+ }
+ });
$("#userTables").tablesorter();
}
diff --git a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/RegionServerListTmpl.jamon b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/RegionServerListTmpl.jamon
index 0afc705c9ed2..1346ce81a775 100644
--- a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/RegionServerListTmpl.jamon
+++ b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/RegionServerListTmpl.jamon
@@ -125,8 +125,8 @@ Arrays.sort(serverNames);
%if>
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java
index e517405d4c20..8761d6b1d9dc 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java
@@ -1104,4 +1104,16 @@ default DeleteTracker postInstantiateDeleteTracker(
throws IOException {
return delTracker;
}
+
+ /**
+ * Called just before the WAL Entry is appended to the WAL. Implementing this hook allows
+ * coprocessors to add extended attributes to the WALKey that then get persisted to the
+ * WAL, and are available to replication endpoints to use in processing WAL Entries.
+ * @param ctx the environment provided by the region server
+ * @param key the WALKey associated with a particular append to a WAL
+ */
+ default void preWALAppend(ObserverContext ctx, WALKey key,
+ WALEdit edit)
+ throws IOException {
+ }
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
index 82767e963c74..1928845774ff 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
@@ -502,8 +502,10 @@ public Cacheable getBlock(BlockCacheKey key, boolean caching, boolean repeat,
// block will use the refCnt of bucketEntry, which means if two HFileBlock mapping to
// the same BucketEntry, then all of the three will share the same refCnt.
Cacheable cachedBlock = ioEngine.read(bucketEntry);
- // RPC start to reference, so retain here.
- cachedBlock.retain();
+ if (ioEngine.usesSharedMemory()) {
+ // RPC start to reference, so retain here.
+ cachedBlock.retain();
+ }
// Update the cache statistics.
if (updateCacheMetrics) {
cacheStats.hit(caching, key.isPrimary(), key.getBlockType());
@@ -689,7 +691,7 @@ private void freeEntireBuckets(int completelyFreeBucketsNeeded) {
// this set is small around O(Handler Count) unless something else is wrong
Set inUseBuckets = new HashSet<>();
backingMap.forEach((k, be) -> {
- if (be.isRpcRef()) {
+ if (ioEngine.usesSharedMemory() && be.isRpcRef()) {
inUseBuckets.add(bucketAllocator.getBucketIndex(be.offset()));
}
});
@@ -1529,11 +1531,11 @@ static class RAMCache {
/**
* Defined the map as {@link ConcurrentHashMap} explicitly here, because in
* {@link RAMCache#get(BlockCacheKey)} and
- * {@link RAMCache#putIfAbsent(BlockCacheKey, RAMQueueEntry)} , we need to guarantee the
- * atomicity of map#computeIfPresent(key, func) and map#putIfAbsent(key, func). Besides, the
- * func method can execute exactly once only when the key is present(or absent) and under the
- * lock context. Otherwise, the reference count of block will be messed up. Notice that the
- * {@link java.util.concurrent.ConcurrentSkipListMap} can not guarantee that.
+ * {@link RAMCache#putIfAbsent(BlockCacheKey, BucketCache.RAMQueueEntry)} , we need to
+ * guarantee the atomicity of map#computeIfPresent(key, func) and map#putIfAbsent(key, func).
+ * Besides, the func method can execute exactly once only when the key is present(or absent)
+ * and under the lock context. Otherwise, the reference count of block will be messed up.
+ * Notice that the {@link java.util.concurrent.ConcurrentSkipListMap} can not guarantee that.
*/
final ConcurrentHashMap delegate = new ConcurrentHashMap<>();
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketEntry.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketEntry.java
index ca41ecafb9d1..2dd77756e585 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketEntry.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketEntry.java
@@ -80,7 +80,7 @@ class BucketEntry implements HBaseReferenceCounted {
*/
private final RefCnt refCnt;
final AtomicBoolean markedAsEvicted;
- private final ByteBuffAllocator allocator;
+ final ByteBuffAllocator allocator;
/**
* Time this block was cached. Presumes we are created just before we are added to the cache.
@@ -194,7 +194,10 @@ boolean isRpcRef() {
}
Cacheable wrapAsCacheable(ByteBuffer[] buffers) throws IOException {
- ByteBuff buf = ByteBuff.wrap(buffers, this.refCnt);
+ return wrapAsCacheable(ByteBuff.wrap(buffers, this.refCnt));
+ }
+
+ Cacheable wrapAsCacheable(ByteBuff buf) throws IOException {
return this.deserializerReference().deserialize(buf, allocator);
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/ExclusiveMemoryMmapIOEngine.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/ExclusiveMemoryMmapIOEngine.java
index 3d7f2b1f3bdb..3169a66539aa 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/ExclusiveMemoryMmapIOEngine.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/ExclusiveMemoryMmapIOEngine.java
@@ -17,7 +17,6 @@
package org.apache.hadoop.hbase.io.hfile.bucket;
import java.io.IOException;
-import java.nio.ByteBuffer;
import org.apache.hadoop.hbase.io.hfile.Cacheable;
import org.apache.hadoop.hbase.nio.ByteBuff;
@@ -35,9 +34,9 @@ public ExclusiveMemoryMmapIOEngine(String filePath, long capacity) throws IOExce
@Override
public Cacheable read(BucketEntry be) throws IOException {
- ByteBuff dst = ByteBuff.wrap(ByteBuffer.allocate(be.getLength()));
+ ByteBuff dst = be.allocator.allocate(be.getLength());
bufferArray.read(be.offset(), dst);
dst.position(0).limit(be.getLength());
- return be.wrapAsCacheable(dst.nioByteBuffers());
+ return be.wrapAsCacheable(dst);
}
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.java
index b3afe482a02a..cef4aa0cdfdd 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.java
@@ -129,7 +129,7 @@ public Cacheable read(BucketEntry be) throws IOException {
long offset = be.offset();
int length = be.getLength();
Preconditions.checkArgument(length >= 0, "Length of read can not be less than 0.");
- ByteBuffer dstBuffer = ByteBuffer.allocate(length);
+ ByteBuff dstBuffer = be.allocator.allocate(length);
if (length != 0) {
accessFile(readAccessor, dstBuffer, offset);
// The buffer created out of the fileChannel is formed by copying the data from the file
@@ -142,7 +142,7 @@ public Cacheable read(BucketEntry be) throws IOException {
}
}
dstBuffer.rewind();
- return be.wrapAsCacheable(new ByteBuffer[] { dstBuffer });
+ return be.wrapAsCacheable(dstBuffer);
}
@VisibleForTesting
@@ -164,10 +164,7 @@ void closeFileChannels() {
*/
@Override
public void write(ByteBuffer srcBuffer, long offset) throws IOException {
- if (!srcBuffer.hasRemaining()) {
- return;
- }
- accessFile(writeAccessor, srcBuffer, offset);
+ write(ByteBuff.wrap(srcBuffer), offset);
}
/**
@@ -209,11 +206,13 @@ public void shutdown() {
@Override
public void write(ByteBuff srcBuffer, long offset) throws IOException {
- ByteBuffer dup = srcBuffer.asSubByteBuffer(srcBuffer.remaining()).duplicate();
- write(dup, offset);
+ if (!srcBuffer.hasRemaining()) {
+ return;
+ }
+ accessFile(writeAccessor, srcBuffer, offset);
}
- private void accessFile(FileAccessor accessor, ByteBuffer buffer,
+ private void accessFile(FileAccessor accessor, ByteBuff buffer,
long globalOffset) throws IOException {
int startFileNum = getFileNum(globalOffset);
int remainingAccessDataLen = buffer.remaining();
@@ -304,23 +303,23 @@ void refreshFileConnection(int accessFileNum, IOException ioe) throws IOExceptio
}
private interface FileAccessor {
- int access(FileChannel fileChannel, ByteBuffer byteBuffer, long accessOffset)
+ int access(FileChannel fileChannel, ByteBuff byteBuffer, long accessOffset)
throws IOException;
}
private static class FileReadAccessor implements FileAccessor {
@Override
- public int access(FileChannel fileChannel, ByteBuffer byteBuffer,
+ public int access(FileChannel fileChannel, ByteBuff byteBuffer,
long accessOffset) throws IOException {
- return fileChannel.read(byteBuffer, accessOffset);
+ return byteBuffer.read(fileChannel, accessOffset);
}
}
private static class FileWriteAccessor implements FileAccessor {
@Override
- public int access(FileChannel fileChannel, ByteBuffer byteBuffer,
+ public int access(FileChannel fileChannel, ByteBuff byteBuffer,
long accessOffset) throws IOException {
- return fileChannel.write(byteBuffer, accessOffset);
+ return byteBuffer.write(fileChannel, accessOffset);
}
}
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
index 25b7191d387e..81e582576faa 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
@@ -672,6 +672,10 @@ private void checkServer(byte [] metaTableRow, RegionLocations locations) {
if (sn == null) {
continue;
}
+ // skip the offline regions which belong to disabled table.
+ if (isTableDisabled(location.getRegion())) {
+ continue;
+ }
ServerManager.ServerLiveState state = this.services.getServerManager().
isServerKnownAndOnline(sn);
switch (state) {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HbckChore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HbckChore.java
index 69a8d536dcf2..5bb4e9524fc4 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HbckChore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HbckChore.java
@@ -31,6 +31,7 @@
import org.apache.hadoop.hbase.ScheduledChore;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.client.TableState;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hbase.util.HbckRegionInfo;
@@ -60,6 +61,8 @@ public class HbckChore extends ScheduledChore {
*/
private final Map regionInfoMap = new HashMap<>();
+ private final Set disabledTableRegions = new HashSet<>();
+
/**
* The regions only opened on RegionServers, but no region info in meta.
*/
@@ -67,7 +70,7 @@ public class HbckChore extends ScheduledChore {
/**
* The regions have directory on FileSystem, but no region info in meta.
*/
- private final Set orphanRegionsOnFS = new HashSet<>();
+ private final Map orphanRegionsOnFS = new HashMap<>();
/**
* The inconsistent regions. There are three case:
* case 1. Master thought this region opened, but no regionserver reported it.
@@ -81,7 +84,7 @@ public class HbckChore extends ScheduledChore {
* The "snapshot" is used to save the last round's HBCK checking report.
*/
private final Map orphanRegionsOnRSSnapshot = new HashMap<>();
- private final Set orphanRegionsOnFSSnapshot = new HashSet<>();
+ private final Map orphanRegionsOnFSSnapshot = new HashMap<>();
private final Map>> inconsistentRegionsSnapshot =
new HashMap<>();
@@ -98,16 +101,29 @@ public class HbckChore extends ScheduledChore {
private volatile long checkingStartTimestamp = 0;
private volatile long checkingEndTimestamp = 0;
+ private boolean disabled = false;
+
public HbckChore(MasterServices master) {
super("HbckChore-", master,
master.getConfiguration().getInt(HBCK_CHORE_INTERVAL, DEFAULT_HBCK_CHORE_INTERVAL));
this.master = master;
+ int interval =
+ master.getConfiguration().getInt(HBCK_CHORE_INTERVAL, DEFAULT_HBCK_CHORE_INTERVAL);
+ if (interval <= 0) {
+ LOG.warn(HBCK_CHORE_INTERVAL + " is <=0 hence disabling hbck chore");
+ disableChore();
+ }
}
@Override
protected synchronized void chore() {
+ if (isDisabled() || isRunning()) {
+ LOG.warn("hbckChore is either disabled or is already running. Can't run the chore");
+ return;
+ }
running = true;
regionInfoMap.clear();
+ disabledTableRegions.clear();
orphanRegionsOnRS.clear();
orphanRegionsOnFS.clear();
inconsistentRegions.clear();
@@ -123,6 +139,29 @@ protected synchronized void chore() {
running = false;
}
+ // This function does the sanity checks of making sure the chore is not run when it is
+ // disabled or when it's already running. It returns whether the chore was actually run or not.
+ protected boolean runChore() {
+ if (isDisabled() || isRunning()) {
+ if (isDisabled()) {
+ LOG.warn("hbck chore is disabled! Set " + HBCK_CHORE_INTERVAL + " > 0 to enable it.");
+ } else {
+ LOG.warn("hbck chore already running. Can't run till it finishes.");
+ }
+ return false;
+ }
+ chore();
+ return true;
+ }
+
+ private void disableChore() {
+ this.disabled = true;
+ }
+
+ public boolean isDisabled() {
+ return this.disabled;
+ }
+
private void saveCheckResultToSnapshot() {
// Need synchronized here, as this "snapshot" may be access by web ui.
rwLock.writeLock().lock();
@@ -131,7 +170,8 @@ private void saveCheckResultToSnapshot() {
orphanRegionsOnRS.entrySet()
.forEach(e -> orphanRegionsOnRSSnapshot.put(e.getKey(), e.getValue()));
orphanRegionsOnFSSnapshot.clear();
- orphanRegionsOnFSSnapshot.addAll(orphanRegionsOnFS);
+ orphanRegionsOnFS.entrySet()
+ .forEach(e -> orphanRegionsOnFSSnapshot.put(e.getKey(), e.getValue()));
inconsistentRegionsSnapshot.clear();
inconsistentRegions.entrySet()
.forEach(e -> inconsistentRegionsSnapshot.put(e.getKey(), e.getValue()));
@@ -146,6 +186,10 @@ private void loadRegionsFromInMemoryState() {
master.getAssignmentManager().getRegionStates().getRegionStates();
for (RegionState regionState : regionStates) {
RegionInfo regionInfo = regionState.getRegion();
+ if (master.getTableStateManager()
+ .isTableState(regionInfo.getTable(), TableState.State.DISABLED)) {
+ disabledTableRegions.add(regionInfo.getEncodedName());
+ }
HbckRegionInfo.MetaEntry metaEntry =
new HbckRegionInfo.MetaEntry(regionInfo, regionState.getServerName(),
regionState.getStamp());
@@ -178,6 +222,11 @@ private void loadRegionsFromRSReport() {
HbckRegionInfo hri = entry.getValue();
ServerName locationInMeta = hri.getMetaEntry().getRegionServer();
if (hri.getDeployedOn().size() == 0) {
+ // Because the inconsistent regions are not absolutely right, only skip the offline regions
+ // which belong to disabled table.
+ if (disabledTableRegions.contains(encodedRegionName)) {
+ continue;
+ }
// Master thought this region opened, but no regionserver reported it.
inconsistentRegions.put(encodedRegionName, new Pair<>(locationInMeta, new LinkedList<>()));
} else if (hri.getDeployedOn().size() > 1) {
@@ -202,7 +251,7 @@ private void loadRegionsFromFS() throws IOException {
String encodedRegionName = regionDir.getName();
HbckRegionInfo hri = regionInfoMap.get(encodedRegionName);
if (hri == null) {
- orphanRegionsOnFS.add(encodedRegionName);
+ orphanRegionsOnFS.put(encodedRegionName, regionDir);
continue;
}
HbckRegionInfo.HdfsEntry hdfsEntry = new HbckRegionInfo.HdfsEntry(regionDir);
@@ -237,7 +286,7 @@ public Map getOrphanRegionsOnRS() {
/**
* @return the regions have directory on FileSystem, but no region info in meta.
*/
- public Set getOrphanRegionsOnFS() {
+ public Map getOrphanRegionsOnFS() {
// Need synchronized here, as this "snapshot" may be changed after checking.
rwLock.readLock().lock();
try {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
index e55a39dc784a..c8caea76d871 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
@@ -2376,11 +2376,7 @@ public RunHbckChoreResponse runHbckChore(RpcController c, RunHbckChoreRequest re
rpcPreCheck("runHbckChore");
LOG.info("{} request HBCK chore to run", master.getClientIdAuditPrefix());
HbckChore hbckChore = master.getHbckChore();
- boolean ran = false;
- if (!hbckChore.isRunning()) {
- hbckChore.chore();
- ran = true;
- }
+ boolean ran = hbckChore.runChore();
return RunHbckChoreResponse.newBuilder().setRan(ran).build();
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateNode.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateNode.java
index 7dbdbee42ac5..12eb0a0fe4ff 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateNode.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateNode.java
@@ -99,7 +99,7 @@ public AssignmentProcedureEvent(final RegionInfo regionInfo) {
/**
* Updated whenever a call to {@link #setRegionLocation(ServerName)} or
- * {@link #setState(State, State...)}.
+ * {@link #setState(RegionState.State, RegionState.State...)}.
*/
private volatile long lastUpdate = 0;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java
index 94d7785c9083..b3340fa73835 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java
@@ -71,6 +71,8 @@
import org.apache.hadoop.hbase.security.AccessDeniedException;
import org.apache.hadoop.hbase.security.User;
import org.apache.hadoop.hbase.security.access.AccessChecker;
+import org.apache.hadoop.hbase.security.access.SnapshotScannerHDFSAclCleaner;
+import org.apache.hadoop.hbase.security.access.SnapshotScannerHDFSAclHelper;
import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
import org.apache.hadoop.hbase.snapshot.HBaseSnapshotException;
import org.apache.hadoop.hbase.snapshot.RestoreSnapshotException;
@@ -1123,6 +1125,10 @@ private void checkSnapshotSupport(final Configuration conf, final MasterFileSyst
// Inject snapshot cleaners, if snapshot.enable is true
hfileCleaners.add(SnapshotHFileCleaner.class.getName());
hfileCleaners.add(HFileLinkCleaner.class.getName());
+ // If sync acl to HDFS feature is enabled, then inject the cleaner
+ if (SnapshotScannerHDFSAclHelper.isAclSyncToHdfsEnabled(conf)) {
+ hfileCleaners.add(SnapshotScannerHDFSAclCleaner.class.getName());
+ }
// Set cleaners conf
conf.setStrings(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS,
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index 9f31522d7644..92a6145d9cd1 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -4047,7 +4047,7 @@ public OperationStatus[] batchReplay(MutationReplay[] mutations, long replaySeqI
* a batch are stored with highest durability specified of for all operations in a batch,
* except for {@link Durability#SKIP_WAL}.
*
- *
This function is called from {@link #batchReplay(MutationReplay[], long)} with
+ *
This function is called from {@link #batchReplay(WALSplitUtil.MutationReplay[], long)} with
* {@link ReplayBatchOperation} instance and {@link #batchMutate(Mutation[], long, long)} with
* {@link MutationBatchOperation} instance as an argument. As the processing of replay batch
* and mutation batch is very similar, lot of code is shared by providing generic methods in
@@ -4058,7 +4058,7 @@ public OperationStatus[] batchReplay(MutationReplay[] mutations, long replaySeqI
* @param batchOp contains the list of mutations
* @return an array of OperationStatus which internally contains the
* OperationStatusCode and the exceptionMessage if any.
- * @throws IOException
+ * @throws IOException if an IO problem is encountered
*/
OperationStatus[] batchMutate(BatchOperation> batchOp) throws IOException {
boolean initialized = false;
@@ -7964,6 +7964,11 @@ private WriteEntry doWALAppend(WALEdit walEdit, Durability durability, List> call(RegionObserver observer) throws IOException {
});
}
+ public void preWALAppend(WALKey key, WALEdit edit) throws IOException {
+ if (this.coprocEnvironments.isEmpty()){
+ return;
+ }
+ execOperation(new RegionObserverOperationWithoutResult() {
+ @Override
+ public void call(RegionObserver observer) throws IOException {
+ observer.preWALAppend(this, key, edit);
+ }
+ });
+ }
+
public Message preEndpointInvocation(final Service service, final String methodName,
Message request) throws IOException {
if (coprocEnvironments.isEmpty()) {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
index 0d2f461a3ea6..a7ac45a377c8 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
@@ -95,15 +95,6 @@ public interface StoreFile {
*/
long getMaxSequenceId();
- /**
- * Get the modification time of this store file. Usually will access the file system so throws
- * IOException.
- * @deprecated Since 2.0.0. Will be removed in 3.0.0.
- * @see #getModificationTimestamp()
- */
- @Deprecated
- long getModificationTimeStamp() throws IOException;
-
/**
* Get the modification time of this store file. Usually will access the file system so throws
* IOException.
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
index 93e79c5fd11c..44363fa70749 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
@@ -1089,7 +1089,8 @@ public void preCloneSnapshot(final ObserverContext
throws IOException {
User user = getActiveUser(ctx);
if (SnapshotDescriptionUtils.isSnapshotOwner(snapshot, user)
- && hTableDescriptor.getTableName().getNameAsString().equals(snapshot.getTable())) {
+ && hTableDescriptor.getTableName().getNameAsString()
+ .equals(snapshot.getTableNameAsString())) {
// Snapshot owner is allowed to create a table with the same name as the snapshot he took
AuthResult result = AuthResult.allow("cloneSnapshot " + snapshot.getName(),
"Snapshot owner check allowed", user, null, hTableDescriptor.getTableName(), null);
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SnapshotScannerHDFSAclCleaner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SnapshotScannerHDFSAclCleaner.java
index 86c663dfdb32..6bf4c1fdee3d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SnapshotScannerHDFSAclCleaner.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SnapshotScannerHDFSAclCleaner.java
@@ -27,7 +27,6 @@
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
import org.apache.hadoop.hbase.master.HMaster;
import org.apache.hadoop.hbase.master.cleaner.BaseHFileCleanerDelegate;
import org.apache.yetus.audience.InterfaceAudience;
@@ -59,7 +58,7 @@ public void init(Map params) {
@Override
public void setConf(Configuration conf) {
super.setConf(conf);
- userScanSnapshotEnabled = isUserScanSnapshotEnabled(conf);
+ userScanSnapshotEnabled = SnapshotScannerHDFSAclHelper.isAclSyncToHdfsEnabled(conf);
}
@Override
@@ -82,13 +81,6 @@ public boolean isEmptyDirDeletable(Path dir) {
return true;
}
- private boolean isUserScanSnapshotEnabled(Configuration conf) {
- String masterCoprocessors = conf.get(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY);
- return conf.getBoolean(SnapshotScannerHDFSAclHelper.USER_SCAN_SNAPSHOT_ENABLE, false)
- && masterCoprocessors.contains(SnapshotScannerHDFSAclController.class.getName())
- && masterCoprocessors.contains(AccessController.class.getName());
- }
-
private boolean isEmptyArchiveDirDeletable(Path dir) {
try {
if (isArchiveDataDir(dir)) {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SnapshotScannerHDFSAclController.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SnapshotScannerHDFSAclController.java
index f6d5b767e492..82e3430b9c74 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SnapshotScannerHDFSAclController.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SnapshotScannerHDFSAclController.java
@@ -119,7 +119,7 @@ public Optional getMasterObserver() {
public void preMasterInitialization(ObserverContext c)
throws IOException {
if (c.getEnvironment().getConfiguration()
- .getBoolean(SnapshotScannerHDFSAclHelper.USER_SCAN_SNAPSHOT_ENABLE, false)) {
+ .getBoolean(SnapshotScannerHDFSAclHelper.ACL_SYNC_TO_HDFS_ENABLE, false)) {
MasterCoprocessorEnvironment mEnv = c.getEnvironment();
if (!(mEnv instanceof HasMasterServices)) {
throw new IOException("Does not implement HMasterServices");
@@ -133,7 +133,7 @@ public void preMasterInitialization(ObserverContext c,
TableName tableName) throws IOException {
if (needHandleTableHdfsAcl(tableName, "truncateTable " + tableName)) {
- // Since the table directories is recreated, so add HDFS acls again
+ // 1. create tmp table directories
+ hdfsAclHelper.createTableDirectories(tableName);
+ // 2. Since the table directories is recreated, so add HDFS acls again
Set users = hdfsAclHelper.getUsersWithTableReadAction(tableName, false, false);
hdfsAclHelper.addTableAcl(tableName, users, "truncate");
}
@@ -233,9 +235,11 @@ public void postDeleteTable(ObserverContext ctx,
try (Table aclTable =
ctx.getEnvironment().getConnection().getTable(PermissionStorage.ACL_TABLE_NAME)) {
Set users = SnapshotScannerHDFSAclStorage.getTableUsers(aclTable, tableName);
- // 1. Delete table owner permission is synced to HDFS in acl table
+ // 1. Remove table archive directory default ACLs
+ hdfsAclHelper.removeTableDefaultAcl(tableName, users);
+ // 2. Delete table owner permission is synced to HDFS in acl table
SnapshotScannerHDFSAclStorage.deleteTableHdfsAcl(aclTable, tableName);
- // 2. Remove namespace access acls
+ // 3. Remove namespace access acls
Set removeUsers = filterUsersToRemoveNsAccessAcl(aclTable, tableName, users);
if (removeUsers.size() > 0) {
hdfsAclHelper.removeNamespaceAccessAcl(tableName, removeUsers, "delete");
@@ -251,7 +255,7 @@ public void postModifyTable(ObserverContext ctx,
try (Table aclTable =
ctx.getEnvironment().getConnection().getTable(PermissionStorage.ACL_TABLE_NAME)) {
if (needHandleTableHdfsAcl(currentDescriptor, "modifyTable " + tableName)
- && !hdfsAclHelper.isTableUserScanSnapshotEnabled(oldDescriptor)) {
+ && !hdfsAclHelper.isAclSyncToHdfsEnabled(oldDescriptor)) {
// 1. Create table directories used for acl inherited
hdfsAclHelper.createTableDirectories(tableName);
// 2. Add table users HDFS acls
@@ -264,7 +268,7 @@ public void postModifyTable(ObserverContext ctx,
SnapshotScannerHDFSAclStorage.addUserTableHdfsAcl(ctx.getEnvironment().getConnection(),
tableUsers, tableName);
} else if (needHandleTableHdfsAcl(oldDescriptor, "modifyTable " + tableName)
- && !hdfsAclHelper.isTableUserScanSnapshotEnabled(currentDescriptor)) {
+ && !hdfsAclHelper.isAclSyncToHdfsEnabled(currentDescriptor)) {
// 1. Remove empty table directories
List tableRootPaths = hdfsAclHelper.getTableRootPaths(tableName, false);
for (Path path : tableRootPaths) {
@@ -290,17 +294,24 @@ public void postModifyTable(ObserverContext ctx,
public void postDeleteNamespace(ObserverContext ctx,
String namespace) throws IOException {
if (checkInitialized("deleteNamespace " + namespace)) {
- // 1. Record namespace user acl is not synced to HDFS
- SnapshotScannerHDFSAclStorage.deleteNamespaceHdfsAcl(ctx.getEnvironment().getConnection(),
- namespace);
- // 2. Delete tmp namespace directory
- /**
- * Delete namespace tmp directory because it's created by this coprocessor when namespace is
- * created to make namespace default acl can be inherited by tables. The namespace data
- * directory is deleted by DeleteNamespaceProcedure, the namespace archive directory is
- * deleted by HFileCleaner.
- */
- hdfsAclHelper.deleteEmptyDir(pathHelper.getTmpNsDir(namespace));
+ try (Table aclTable =
+ ctx.getEnvironment().getConnection().getTable(PermissionStorage.ACL_TABLE_NAME)) {
+ // 1. Delete namespace archive dir default ACLs
+ Set users = SnapshotScannerHDFSAclStorage.getEntryUsers(aclTable,
+ PermissionStorage.toNamespaceEntry(Bytes.toBytes(namespace)));
+ hdfsAclHelper.removeNamespaceDefaultAcl(namespace, users);
+ // 2. Record namespace user acl is not synced to HDFS
+ SnapshotScannerHDFSAclStorage.deleteNamespaceHdfsAcl(ctx.getEnvironment().getConnection(),
+ namespace);
+ // 3. Delete tmp namespace directory
+ /**
+ * Delete namespace tmp directory because it's created by this coprocessor when namespace is
+ * created to make namespace default acl can be inherited by tables. The namespace data
+ * directory is deleted by DeleteNamespaceProcedure, the namespace archive directory is
+ * deleted by HFileCleaner.
+ */
+ hdfsAclHelper.deleteEmptyDir(pathHelper.getTmpNsDir(namespace));
+ }
}
}
@@ -364,7 +375,9 @@ public void postGrant(ObserverContext c,
UserPermission tPerm = getUserTablePermission(conf, userName, tableName);
if (tPerm != null && hdfsAclHelper.containReadAction(tPerm)) {
if (!isHdfsAclSet(aclTable, userName, tableName)) {
- // 1. Add HDFS acl
+ // 1. create table dirs
+ hdfsAclHelper.createTableDirectories(tableName);
+ // 2. Add HDFS acl
hdfsAclHelper.grantAcl(userPermission, new HashSet<>(0), new HashSet<>(0));
}
// 2. Record table acl is synced to HDFS
@@ -547,13 +560,13 @@ private boolean needHandleTableHdfsAcl(TablePermission tablePermission) throws I
private boolean needHandleTableHdfsAcl(TableName tableName, String operation) throws IOException {
return !tableName.isSystemTable() && checkInitialized(operation) && hdfsAclHelper
- .isTableUserScanSnapshotEnabled(masterServices.getTableDescriptors().get(tableName));
+ .isAclSyncToHdfsEnabled(masterServices.getTableDescriptors().get(tableName));
}
private boolean needHandleTableHdfsAcl(TableDescriptor tableDescriptor, String operation) {
TableName tableName = tableDescriptor.getTableName();
return !tableName.isSystemTable() && checkInitialized(operation)
- && hdfsAclHelper.isTableUserScanSnapshotEnabled(tableDescriptor);
+ && hdfsAclHelper.isAclSyncToHdfsEnabled(tableDescriptor);
}
private User getActiveUser(ObserverContext> ctx) throws IOException {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SnapshotScannerHDFSAclHelper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SnapshotScannerHDFSAclHelper.java
index 60d91558ed51..6cf1916efaec 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SnapshotScannerHDFSAclHelper.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SnapshotScannerHDFSAclHelper.java
@@ -28,6 +28,7 @@
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.ArrayList;
+import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
@@ -53,6 +54,7 @@
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.SnapshotDescription;
import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
import org.apache.hadoop.hbase.mob.MobUtils;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.yetus.audience.InterfaceAudience;
@@ -71,23 +73,23 @@
public class SnapshotScannerHDFSAclHelper implements Closeable {
private static final Logger LOG = LoggerFactory.getLogger(SnapshotScannerHDFSAclHelper.class);
- public static final String USER_SCAN_SNAPSHOT_ENABLE = "hbase.user.scan.snapshot.enable";
- public static final String USER_SCAN_SNAPSHOT_THREAD_NUMBER =
- "hbase.user.scan.snapshot.thread.number";
+ public static final String ACL_SYNC_TO_HDFS_ENABLE = "hbase.acl.sync.to.hdfs.enable";
+ public static final String ACL_SYNC_TO_HDFS_THREAD_NUMBER =
+ "hbase.acl.sync.to.hdfs.thread.number";
// The tmp directory to restore snapshot, it can not be a sub directory of HBase root dir
public static final String SNAPSHOT_RESTORE_TMP_DIR = "hbase.snapshot.restore.tmp.dir";
public static final String SNAPSHOT_RESTORE_TMP_DIR_DEFAULT =
"/hbase/.tmpdir-to-restore-snapshot";
// The default permission of the common directories if the feature is enabled.
public static final String COMMON_DIRECTORY_PERMISSION =
- "hbase.user.scan.snapshot.common.directory.permission";
+ "hbase.acl.sync.to.hdfs.common.directory.permission";
// The secure HBase permission is 700, 751 means all others have execute access and the mask is
// set to read-execute to make the extended access ACL entries can work. Be cautious to set
// this value.
public static final String COMMON_DIRECTORY_PERMISSION_DEFAULT = "751";
// The default permission of the snapshot restore directories if the feature is enabled.
public static final String SNAPSHOT_RESTORE_DIRECTORY_PERMISSION =
- "hbase.user.scan.snapshot.restore.directory.permission";
+ "hbase.acl.sync.to.hdfs.restore.directory.permission";
// 753 means all others have write-execute access.
public static final String SNAPSHOT_RESTORE_DIRECTORY_PERMISSION_DEFAULT = "753";
@@ -102,7 +104,7 @@ public SnapshotScannerHDFSAclHelper(Configuration configuration, Connection conn
this.conf = configuration;
this.pathHelper = new PathHelper(conf);
this.fs = pathHelper.getFileSystem();
- this.pool = Executors.newFixedThreadPool(conf.getInt(USER_SCAN_SNAPSHOT_THREAD_NUMBER, 10),
+ this.pool = Executors.newFixedThreadPool(conf.getInt(ACL_SYNC_TO_HDFS_THREAD_NUMBER, 10),
new ThreadFactoryBuilder().setNameFormat("hdfs-acl-thread-%d").setDaemon(true).build());
this.admin = connection.getAdmin();
}
@@ -230,6 +232,50 @@ public boolean removeNamespaceAccessAcl(TableName tableName, Set removeU
}
}
+ /**
+ * Remove default acl from namespace archive dir when delete namespace
+ * @param namespace the namespace
+ * @param removeUsers the users whose default acl will be removed
+ * @return false if an error occurred, otherwise true
+ */
+ public boolean removeNamespaceDefaultAcl(String namespace, Set removeUsers) {
+ try {
+ long start = System.currentTimeMillis();
+ Path archiveNsDir = pathHelper.getArchiveNsDir(namespace);
+ HDFSAclOperation operation = new HDFSAclOperation(fs, archiveNsDir, removeUsers,
+ HDFSAclOperation.OperationType.REMOVE, false, HDFSAclOperation.AclType.DEFAULT);
+ operation.handleAcl();
+ LOG.info("Remove HDFS acl when delete namespace {}, cost {} ms", namespace,
+ System.currentTimeMillis() - start);
+ return true;
+ } catch (Exception e) {
+ LOG.error("Remove HDFS acl error when delete namespace {}", namespace, e);
+ return false;
+ }
+ }
+
+ /**
+ * Remove default acl from table archive dir when delete table
+ * @param tableName the table name
+ * @param removeUsers the users whose default acl will be removed
+ * @return false if an error occurred, otherwise true
+ */
+ public boolean removeTableDefaultAcl(TableName tableName, Set removeUsers) {
+ try {
+ long start = System.currentTimeMillis();
+ Path archiveTableDir = pathHelper.getArchiveTableDir(tableName);
+ HDFSAclOperation operation = new HDFSAclOperation(fs, archiveTableDir, removeUsers,
+ HDFSAclOperation.OperationType.REMOVE, false, HDFSAclOperation.AclType.DEFAULT);
+ operation.handleAcl();
+ LOG.info("Remove HDFS acl when delete table {}, cost {} ms", tableName,
+ System.currentTimeMillis() - start);
+ return true;
+ } catch (Exception e) {
+ LOG.error("Remove HDFS acl error when delete table {}", tableName, e);
+ return false;
+ }
+ }
+
/**
* Add table user acls
* @param tableName the table
@@ -349,7 +395,7 @@ private void handleNamespaceAcl(Set namespaces, Set users,
Set tables = new HashSet<>();
for (String namespace : namespaces) {
tables.addAll(admin.listTableDescriptorsByNamespace(Bytes.toBytes(namespace)).stream()
- .filter(this::isTableUserScanSnapshotEnabled).map(TableDescriptor::getTableName)
+ .filter(this::isAclSyncToHdfsEnabled).map(TableDescriptor::getTableName)
.collect(Collectors.toSet()));
}
handleTableAcl(tables, users, skipNamespaces, skipTables, operationType);
@@ -403,7 +449,7 @@ void createTableDirectories(TableName tableName) throws IOException {
* return paths that user will global permission will visit
* @return the path list
*/
- private List getGlobalRootPaths() {
+ List getGlobalRootPaths() {
return Lists.newArrayList(pathHelper.getTmpDataDir(), pathHelper.getDataDir(),
pathHelper.getMobDataDir(), pathHelper.getArchiveDataDir(), pathHelper.getSnapshotRootDir());
}
@@ -511,9 +557,20 @@ boolean isNotFamilyOrQualifierPermission(TablePermission tablePermission) {
return !tablePermission.hasFamily() && !tablePermission.hasQualifier();
}
- boolean isTableUserScanSnapshotEnabled(TableDescriptor tableDescriptor) {
+ public static boolean isAclSyncToHdfsEnabled(Configuration conf) {
+ String[] masterCoprocessors = conf.getStrings(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY);
+ Set masterCoprocessorSet = new HashSet<>();
+ if (masterCoprocessors != null) {
+ Collections.addAll(masterCoprocessorSet, masterCoprocessors);
+ }
+ return conf.getBoolean(SnapshotScannerHDFSAclHelper.ACL_SYNC_TO_HDFS_ENABLE, false)
+ && masterCoprocessorSet.contains(SnapshotScannerHDFSAclController.class.getName())
+ && masterCoprocessorSet.contains(AccessController.class.getName());
+ }
+
+ boolean isAclSyncToHdfsEnabled(TableDescriptor tableDescriptor) {
return tableDescriptor == null ? false
- : Boolean.valueOf(tableDescriptor.getValue(USER_SCAN_SNAPSHOT_ENABLE));
+ : Boolean.valueOf(tableDescriptor.getValue(ACL_SYNC_TO_HDFS_ENABLE));
}
PathHelper getPathHelper() {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/BulkLoadHFilesTool.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/BulkLoadHFilesTool.java
index abf1fd831f9d..46a0669e5c52 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/BulkLoadHFilesTool.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/BulkLoadHFilesTool.java
@@ -453,7 +453,8 @@ private boolean checkHFilesCountPerRegionPerFamily(
}
/**
- * @param table the table to load into
+ * @param conn the HBase cluster connection
+ * @param tableName the table name of the table to load into
* @param pool the ExecutorService
* @param queue the queue for LoadQueueItem
* @param startEndKeys start and end keys
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALKey.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALKey.java
index c541cc0a80d6..fdbacbda2779 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALKey.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALKey.java
@@ -34,7 +34,6 @@
/**
* Key for WAL Entry.
- * Read-only. No Setters. For limited audience such as Coprocessors.
*/
@InterfaceAudience.LimitedPrivate({HBaseInterfaceAudience.REPLICATION,
HBaseInterfaceAudience.COPROC})
@@ -86,6 +85,13 @@ default long getNonce() {
*/
long getOrigLogSeqNum();
+ /**
+ * Add a named String value to this WALKey to be persisted into the WAL
+ * @param attributeKey Name of the attribute
+ * @param attributeValue Value of the attribute
+ */
+ void addExtendedAttribute(String attributeKey, byte[] attributeValue);
+
/**
* Return a named String value injected into the WALKey during processing, such as by a
* coprocessor
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALKeyImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALKeyImpl.java
index fc84d8e24526..33e034342d7d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALKeyImpl.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALKeyImpl.java
@@ -195,6 +195,37 @@ public WALKeyImpl(final byte[] encodedRegionName,
mvcc, null, null);
}
+ /**
+ * Copy constructor that takes in an existing WALKeyImpl plus some extended attributes.
+ * Intended for coprocessors to add annotations to a system-generated WALKey
+ * for persistence to the WAL.
+ * @param key Key to be copied into this new key
+ * @param extendedAttributes Extra attributes to copy into the new key
+ */
+ public WALKeyImpl(WALKeyImpl key,
+ Map extendedAttributes){
+ init(key.getEncodedRegionName(), key.getTableName(), key.getSequenceId(),
+ key.getWriteTime(), key.getClusterIds(), key.getNonceGroup(), key.getNonce(),
+ key.getMvcc(), key.getReplicationScopes(), extendedAttributes);
+
+ }
+
+ /**
+ * Copy constructor that takes in an existing WALKey, the extra WALKeyImpl fields that the
+ * parent interface is missing, plus some extended attributes. Intended
+ * for coprocessors to add annotations to a system-generated WALKey for
+ * persistence to the WAL.
+ */
+ public WALKeyImpl(WALKey key,
+ List clusterIds,
+ MultiVersionConcurrencyControl mvcc,
+ final NavigableMap replicationScopes,
+ Map extendedAttributes){
+ init(key.getEncodedRegionName(), key.getTableName(), key.getSequenceId(),
+ key.getWriteTime(), clusterIds, key.getNonceGroup(), key.getNonce(),
+ mvcc, replicationScopes, extendedAttributes);
+
+ }
/**
* Create the log key for writing to somewhere.
* We maintain the tablename mainly for debugging purposes.
@@ -464,6 +495,14 @@ public UUID getOriginatingClusterId(){
return clusterIds.isEmpty()? HConstants.DEFAULT_CLUSTER_ID: clusterIds.get(0);
}
+ @Override
+ public void addExtendedAttribute(String attributeKey, byte[] attributeValue){
+ if (extendedAttributes == null){
+ extendedAttributes = new HashMap();
+ }
+ extendedAttributes.put(attributeKey, attributeValue);
+ }
+
@Override
public byte[] getExtendedAttribute(String attributeKey){
return extendedAttributes != null ? extendedAttributes.get(attributeKey) : null;
diff --git a/hbase-server/src/main/resources/hbase-webapps/master/hbck.jsp b/hbase-server/src/main/resources/hbase-webapps/master/hbck.jsp
index 183740bb4e20..a003e5fe096f 100644
--- a/hbase-server/src/main/resources/hbase-webapps/master/hbck.jsp
+++ b/hbase-server/src/main/resources/hbase-webapps/master/hbck.jsp
@@ -23,11 +23,11 @@
import="java.util.Date"
import="java.util.List"
import="java.util.Map"
- import="java.util.Set"
import="java.util.stream.Collectors"
import="java.time.ZonedDateTime"
import="java.time.format.DateTimeFormatter"
%>
+<%@ page import="org.apache.hadoop.fs.Path" %>
<%@ page import="org.apache.hadoop.hbase.client.RegionInfo" %>
<%@ page import="org.apache.hadoop.hbase.master.HbckChore" %>
<%@ page import="org.apache.hadoop.hbase.master.HMaster" %>
@@ -42,7 +42,7 @@
HbckChore hbckChore = master.getHbckChore();
Map>> inconsistentRegions = null;
Map orphanRegionsOnRS = null;
- Set orphanRegionsOnFS = null;
+ Map orphanRegionsOnFS = null;
long startTimestamp = 0;
long endTimestamp = 0;
if (hbckChore != null) {
@@ -80,7 +80,11 @@
HBCK Chore Report
+ <% if (hbckChore.isDisabled()) { %>
+ HBCK chore is currently disabled. Set hbase.master.hbck.chore.interval > 0 in the config & do a rolling-restart to enable it.
+ <% } else { %>
Checking started at <%= iso8601start %> and generated report at <%= iso8601end %>. Execute 'hbck_chore_run' in hbase shell to generate a new sub-report.
+ <% } %>