diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorParams.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorParams.java index 9044cdbb36ff..e71a1d3b2744 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorParams.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorParams.java @@ -76,7 +76,7 @@ public BufferedMutatorParams operationTimeout(final int operationTimeout) { } /** - * @deprecated Since 2.3.0, will be removed in 4.0.0. Use {@link #operationTimeout()} + * @deprecated Since 2.3.0, will be removed in 4.0.0. Use {@link #operationTimeout(int)} */ @Deprecated public BufferedMutatorParams opertationTimeout(final int operationTimeout) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SimpleRequestController.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SimpleRequestController.java index 4981d62a605b..d87014428c3b 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SimpleRequestController.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SimpleRequestController.java @@ -431,9 +431,10 @@ private void waitForRegion() throws InterruptedIOException { * regions. 3) check the total concurrent tasks. 4) check the concurrent * tasks for server. * - * @param loc - * @param heapSizeOfRow - * @return either Include {@link ReturnCode} or Skip {@link ReturnCode} + * @param loc the destination of data + * @param heapSizeOfRow the data size + * @return either Include {@link RequestController.ReturnCode} or skip + * {@link RequestController.ReturnCode} */ @Override public ReturnCode canTakeOperation(HRegionLocation loc, long heapSizeOfRow) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SnapshotDescription.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SnapshotDescription.java index 4fa825e5f457..872065bb8a69 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SnapshotDescription.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SnapshotDescription.java @@ -41,63 +41,18 @@ public SnapshotDescription(String name) { this(name, (TableName)null); } - /** - * @deprecated since 2.0.0 and will be removed in 3.0.0. Use the version with the TableName - * instance instead. - * @see #SnapshotDescription(String, TableName) - * @see HBASE-16892 - */ - @Deprecated - public SnapshotDescription(String name, String table) { - this(name, TableName.valueOf(table)); - } - public SnapshotDescription(String name, TableName table) { this(name, table, SnapshotType.DISABLED, null, -1, -1, null); } - /** - * @deprecated since 2.0.0 and will be removed in 3.0.0. Use the version with the TableName - * instance instead. - * @see #SnapshotDescription(String, TableName, SnapshotType) - * @see HBASE-16892 - */ - @Deprecated - public SnapshotDescription(String name, String table, SnapshotType type) { - this(name, TableName.valueOf(table), type); - } - public SnapshotDescription(String name, TableName table, SnapshotType type) { this(name, table, type, null, -1, -1, null); } - /** - * @see #SnapshotDescription(String, TableName, SnapshotType, String) - * @see HBASE-16892 - * @deprecated since 2.0.0 and will be removed in 3.0.0. Use the version with the TableName - * instance instead. - */ - @Deprecated - public SnapshotDescription(String name, String table, SnapshotType type, String owner) { - this(name, TableName.valueOf(table), type, owner); - } - public SnapshotDescription(String name, TableName table, SnapshotType type, String owner) { this(name, table, type, owner, -1, -1, null); } - /** - * @see #SnapshotDescription(String, TableName, SnapshotType, String, long, int, Map) - * @see HBASE-16892 - * @deprecated since 2.0.0 and will be removed in 3.0.0. Use the version with the TableName - * instance instead. - */ - @Deprecated - public SnapshotDescription(String name, String table, SnapshotType type, String owner, - long creationTime, int version) { - this(name, TableName.valueOf(table), type, owner, creationTime, version, null); - } - /** * SnapshotDescription Parameterized Constructor * @@ -141,18 +96,6 @@ public String getName() { return this.name; } - /** - * @deprecated since 2.0.0 and will be removed in 3.0.0. Use {@link #getTableName()} or - * {@link #getTableNameAsString()} instead. - * @see #getTableName() - * @see #getTableNameAsString() - * @see HBASE-16892 - */ - @Deprecated - public String getTable() { - return getTableNameAsString(); - } - public String getTableNameAsString() { return this.table.getNameAsString(); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Table.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Table.java index def977446267..41b0e47c0758 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Table.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Table.java @@ -176,9 +176,9 @@ default Result get(Get get) throws IOException { * @throws IOException if a remote or network exception occurs. * @since 0.90.0 * @apiNote {@link #put(List)} runs pre-flight validations on the input list on client. - * Currently {@link #get(List)} doesn't run any validations on the client-side, currently there - * is no need, but this may change in the future. An - * {@link IllegalArgumentException} will be thrown in this case. + * Currently {@link #get(List)} doesn't run any validations on the client-side, + * currently there is no need, but this may change in the future. An + * {@link IllegalArgumentException} will be thrown in this case. */ default Result[] get(List gets) throws IOException { throw new NotImplementedException("Add an implementation!"); @@ -284,10 +284,10 @@ default void delete(Delete delete) throws IOException { * that have not be successfully applied. * @since 0.20.1 * @apiNote In 3.0.0 version, the input list {@code deletes} will no longer be modified. Also, - * {@link #put(List)} runs pre-flight validations on the input list on client. Currently - * {@link #delete(List)} doesn't run validations on the client, there is no need currently, - * but this may change in the future. An * {@link IllegalArgumentException} will be thrown - * in this case. + * {@link #put(List)} runs pre-flight validations on the input list on client. Currently + * {@link #delete(List)} doesn't run validations on the client, there is no need + * currently, but this may change in the future. An * {@link IllegalArgumentException} + * will be thrown in this case. */ default void delete(List deletes) throws IOException { throw new NotImplementedException("Add an implementation!"); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/Batch.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/Batch.java index 82b005d018c1..c3defda5b4d3 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/Batch.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/Batch.java @@ -39,7 +39,7 @@ public abstract class Batch { * the implementations {@link Batch.Call#call(Object)} method will be invoked * with a proxy to each region's coprocessor {@link com.google.protobuf.Service} implementation. *

- * @see org.apache.hadoop.hbase.client.coprocessor + * @see org.apache.hadoop.hbase.client.coprocessor.Batch * @see org.apache.hadoop.hbase.client.Table#coprocessorService(byte[]) * @see org.apache.hadoop.hbase.client.Table#coprocessorService(Class, byte[], byte[], * org.apache.hadoop.hbase.client.coprocessor.Batch.Call) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/ClientExceptionsUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/ClientExceptionsUtil.java index 6b1e251953b9..1975297dd218 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/ClientExceptionsUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/ClientExceptionsUtil.java @@ -32,7 +32,6 @@ import org.apache.hadoop.hbase.CallDroppedException; import org.apache.hadoop.hbase.CallQueueTooBigException; import org.apache.hadoop.hbase.DoNotRetryIOException; -import org.apache.hadoop.hbase.MultiActionResultTooLarge; import org.apache.hadoop.hbase.NotServingRegionException; import org.apache.hadoop.hbase.RegionTooBusyException; import org.apache.hadoop.hbase.RetryImmediatelyException; @@ -59,18 +58,23 @@ public static boolean isMetaClearingException(Throwable cur) { if (cur == null) { return true; } - return !isSpecialException(cur) || (cur instanceof RegionMovedException) - || cur instanceof NotServingRegionException; + return !regionDefinitelyOnTheRegionServerException(cur); } - public static boolean isSpecialException(Throwable cur) { - return (cur instanceof RegionMovedException || cur instanceof RegionOpeningException - || cur instanceof RegionTooBusyException || cur instanceof RpcThrottlingException - || cur instanceof MultiActionResultTooLarge || cur instanceof RetryImmediatelyException - || cur instanceof CallQueueTooBigException || cur instanceof CallDroppedException - || cur instanceof NotServingRegionException || cur instanceof RequestTooBigException); + private static boolean regionDefinitelyOnTheRegionServerException(Throwable t) { + return (t instanceof RegionTooBusyException || t instanceof RpcThrottlingException + || t instanceof RetryImmediatelyException || t instanceof CallQueueTooBigException + || t instanceof CallDroppedException || t instanceof NotServingRegionException + || t instanceof RequestTooBigException); } + /** + * This function is the alias of regionDefinitelyOnTheRegionServerException, + * whose name is confusing in the function findException(). + */ + private static boolean matchExceptionWeCare(Throwable t) { + return regionDefinitelyOnTheRegionServerException(t); + } /** * Look for an exception we know in the remote exception: @@ -87,7 +91,7 @@ public static Throwable findException(Object exception) { } Throwable cur = (Throwable) exception; while (cur != null) { - if (isSpecialException(cur)) { + if (matchExceptionWeCare(cur)) { return cur; } if (cur instanceof RemoteException) { @@ -95,7 +99,7 @@ public static Throwable findException(Object exception) { cur = re.unwrapRemoteException(); // unwrapRemoteException can return the exception given as a parameter when it cannot - // unwrap it. In this case, there is no need to look further + // unwrap it. In this case, there is no need to look further // noinspection ObjectEquality if (cur == re) { return cur; diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/nio/ByteBuff.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/nio/ByteBuff.java index c04c3f55aaef..8d72f7b99877 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/nio/ByteBuff.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/nio/ByteBuff.java @@ -19,6 +19,7 @@ import java.io.IOException; import java.nio.ByteBuffer; +import java.nio.channels.FileChannel; import java.nio.channels.ReadableByteChannel; import java.util.List; @@ -450,10 +451,39 @@ public byte[] toBytes() { */ public abstract int read(ReadableByteChannel channel) throws IOException; + /** + * Reads bytes from FileChannel into this ByteBuff + */ + public abstract int read(FileChannel channel, long offset) throws IOException; + + /** + * Write this ByteBuff's data into target file + */ + public abstract int write(FileChannel channel, long offset) throws IOException; + + /** + * function interface for Channel read + */ + @FunctionalInterface + interface ChannelReader { + int read(ReadableByteChannel channel, ByteBuffer buf, long offset) throws IOException; + } + + static final ChannelReader CHANNEL_READER = (channel, buf, offset) -> { + return channel.read(buf); + }; + + static final ChannelReader FILE_READER = (channel, buf, offset) -> { + return ((FileChannel)channel).read(buf, offset); + }; + // static helper methods - public static int channelRead(ReadableByteChannel channel, ByteBuffer buf) throws IOException { + public static int read(ReadableByteChannel channel, ByteBuffer buf, long offset, + ChannelReader reader) throws IOException { if (buf.remaining() <= NIO_BUFFER_LIMIT) { - return channel.read(buf); + int res = reader.read(channel, buf, offset); + buf.rewind(); + return res; } int originalLimit = buf.limit(); int initialRemaining = buf.remaining(); @@ -463,12 +493,14 @@ public static int channelRead(ReadableByteChannel channel, ByteBuffer buf) throw try { int ioSize = Math.min(buf.remaining(), NIO_BUFFER_LIMIT); buf.limit(buf.position() + ioSize); - ret = channel.read(buf); + offset += ret; + ret = reader.read(channel, buf, offset); if (ret < ioSize) { break; } } finally { buf.limit(originalLimit); + buf.rewind(); } } int nBytes = initialRemaining - buf.remaining(); diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/nio/MultiByteBuff.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/nio/MultiByteBuff.java index 3ce170903974..7e55188d3a6f 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/nio/MultiByteBuff.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/nio/MultiByteBuff.java @@ -24,6 +24,7 @@ import java.nio.BufferUnderflowException; import java.nio.ByteBuffer; import java.nio.InvalidMarkException; +import java.nio.channels.FileChannel; import java.nio.channels.ReadableByteChannel; import org.apache.hadoop.hbase.io.ByteBuffAllocator.Recycler; @@ -1064,21 +1065,22 @@ public byte[] toBytes(int offset, int length) { return output; } - @Override - public int read(ReadableByteChannel channel) throws IOException { + private int internalRead(ReadableByteChannel channel, long offset, + ChannelReader reader) throws IOException { checkRefCount(); int total = 0; while (true) { - // Read max possible into the current BB - int len = channelRead(channel, this.curItem); - if (len > 0) + int len = read(channel, this.curItem, offset, reader); + if (len > 0) { total += len; + offset += len; + } if (this.curItem.hasRemaining()) { - // We were not able to read enough to fill the current BB itself. Means there is no point in - // doing more reads from Channel. Only this much there for now. break; } else { - if (this.curItemIndex >= this.limitedItemIndex) break; + if (this.curItemIndex >= this.limitedItemIndex) { + break; + } this.curItemIndex++; this.curItem = this.items[this.curItemIndex]; } @@ -1086,6 +1088,35 @@ public int read(ReadableByteChannel channel) throws IOException { return total; } + @Override + public int read(ReadableByteChannel channel) throws IOException { + return internalRead(channel, 0, CHANNEL_READER); + } + + @Override + public int read(FileChannel channel, long offset) throws IOException { + return internalRead(channel, offset, FILE_READER); + } + + @Override + public int write(FileChannel channel, long offset) throws IOException { + checkRefCount(); + int total = 0; + while (true) { + while (curItem.hasRemaining()) { + int len = channel.write(curItem, offset); + total += len; + offset += len; + } + if (this.curItemIndex >= this.limitedItemIndex) { + break; + } + this.curItemIndex++; + this.curItem = this.items[this.curItemIndex]; + } + return total; + } + @Override public ByteBuffer[] nioByteBuffers() { checkRefCount(); diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/nio/SingleByteBuff.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/nio/SingleByteBuff.java index 36a83a0ec212..797bfdc1fff5 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/nio/SingleByteBuff.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/nio/SingleByteBuff.java @@ -21,6 +21,7 @@ import java.io.IOException; import java.nio.ByteBuffer; +import java.nio.channels.FileChannel; import java.nio.channels.ReadableByteChannel; import org.apache.hadoop.hbase.io.ByteBuffAllocator.Recycler; @@ -371,7 +372,25 @@ public void get(ByteBuffer out, int sourceOffset, int length) { @Override public int read(ReadableByteChannel channel) throws IOException { checkRefCount(); - return channelRead(channel, buf); + return read(channel, buf, 0, CHANNEL_READER); + } + + @Override + public int read(FileChannel channel, long offset) throws IOException { + checkRefCount(); + return read(channel, buf, offset, FILE_READER); + } + + @Override + public int write(FileChannel channel, long offset) throws IOException { + checkRefCount(); + int total = 0; + while(buf.hasRemaining()) { + int len = channel.write(buf, offset); + total += len; + offset += len; + } + return total; } @Override diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableOutputFormat.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableOutputFormat.java index fcf6f552b7ac..a55784729c0b 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableOutputFormat.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableOutputFormat.java @@ -53,21 +53,6 @@ protected static class TableRecordWriter implements RecordWriterHBASE-16774 - */ - @Deprecated - public TableRecordWriter(final BufferedMutator mutator) throws IOException { - this.m_mutator = mutator; - this.conn = null; - } - /** * Instantiate a TableRecordWriter with a BufferedMutator for batch writing. */ diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java index 1ae6a2a6e2e3..c963c6c18ae2 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java @@ -48,7 +48,6 @@ import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionLocation; -import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.TableName; @@ -117,19 +116,6 @@ public TableInfo(TableDescriptor tableDesctiptor, RegionLocator regionLocator) { this.regionLocator = regionLocator; } - /** - * The modification for the returned HTD doesn't affect the inner TD. - * @return A clone of inner table descriptor - * @deprecated since 2.0.0 and will be removed in 3.0.0. Use {@link #getTableDescriptor()} - * instead. - * @see #getTableDescriptor() - * @see HBASE-18241 - */ - @Deprecated - public HTableDescriptor getHTableDescriptor() { - return new HTableDescriptor(tableDesctiptor); - } - public TableDescriptor getTableDescriptor() { return tableDesctiptor; } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java index 6907a8817cc5..4993feea223e 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java @@ -267,9 +267,10 @@ private Path getOutputPath(final SnapshotFileInfo inputInfo) throws IOException return new Path(outputArchive, path); } + @SuppressWarnings("checkstyle:linelength") /** * Used by TestExportSnapshot to test for retries when failures happen. - * Failure is injected in {@link #copyFile(Context, SnapshotFileInfo, Path)}. + * Failure is injected in {@link #copyFile(Mapper.Context, org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotFileInfo, Path)}. */ private void injectTestFailure(final Context context, final SnapshotFileInfo inputInfo) throws IOException { diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java index 40f1a08f32da..60127a665243 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java @@ -787,7 +787,7 @@ private void runIncrementalPELoad(Configuration conf, List $(document).ready(function() { - $("#baseStatsTable").tablesorter(); - $("#memoryStatsTable").tablesorter(); - $("#requestStatsTable").tablesorter(); - $("#storeStatsTable").tablesorter(); - $("#compactionStatsTable").tablesorter(); + $.tablesorter.addParser( + { + id: 'filesize', + is: function(s) { + return s.match(new RegExp( /([\.0-9]+)\ (B|KB|MB|GB|TB)/ )); + }, + format: function(s) { + var suf = s.match(new RegExp( /(KB|B|GB|MB|TB)$/ ))[1]; + var num = parseFloat(s.match( new RegExp( /([\.0-9]+)\ (B|KB|MB|GB|TB)/ ))[0]); + switch(suf) { + case 'B': + return num; + case 'KB': + return num * 1024; + case 'MB': + return num * 1024 * 1024; + case 'GB': + return num * 1024 * 1024 * 1024; + case 'TB': + return num * 1024 * 1024 * 1024 * 1024; + } + }, + type: 'numeric' + }); + $.tablesorter.addParser( + { + id: "separator", + is: function (s) { + return /^[0-9]?[0-9,]*$/.test(s); + }, format: function (s) { + return $.tablesorter.formatFloat( s.replace(/,/g,'') ); + }, type: "numeric" + }); + $("#baseStatsTable").tablesorter({ + headers: { + 4: {sorter: 'separator'}, + 5: {sorter: 'separator'} + } + }); + $("#memoryStatsTable").tablesorter({ + headers: { + 1: {sorter: 'filesize'}, + 2: {sorter: 'filesize'}, + 3: {sorter: 'filesize'} + } + }); + $("#requestStatsTable").tablesorter({ + headers: { + 1: {sorter: 'separator'}, + 2: {sorter: 'separator'}, + 3: {sorter: 'separator'}, + 4: {sorter: 'separator'} + } + }); + $("#storeStatsTable").tablesorter({ + headers: { + 1: {sorter: 'separator'}, + 2: {sorter: 'separator'}, + 3: {sorter: 'filesize'}, + 4: {sorter: 'filesize'}, + 5: {sorter: 'filesize'}, + 6: {sorter: 'filesize'} + } + }); + $("#compactionStatsTable").tablesorter({ + headers: { + 1: {sorter: 'separator'}, + 2: {sorter: 'separator'}, + 3: {sorter: 'separator'} + } + }); $("#userTables").tablesorter(); } diff --git a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/RegionServerListTmpl.jamon b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/RegionServerListTmpl.jamon index 0afc705c9ed2..1346ce81a775 100644 --- a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/RegionServerListTmpl.jamon +++ b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/RegionServerListTmpl.jamon @@ -125,8 +125,8 @@ Arrays.sort(serverNames); <% new Date(startcode) %> <% TraditionalBinaryPrefix.long2String(lastContact, "s", 1) %> <% version %> - <% String.format("%.0f", requestsPerSecond) %> - <% numRegionsOnline %> + <% String.format("%,.0f", requestsPerSecond) %> + <% String.format("%,d", numRegionsOnline) %> <%java> } @@ -225,10 +225,10 @@ if (sl != null) { <& serverNameLink; serverName=serverName; &> -<% sl.getRequestCountPerSecond() %> -<% readRequestCount %> -<% filteredReadRequestCount %> -<% writeRequestCount %> +<% String.format("%,d", sl.getRequestCountPerSecond()) %> +<% String.format("%,d", readRequestCount) %> +<% String.format("%,d", filteredReadRequestCount) %> +<% String.format("%,d", writeRequestCount) %> <%java> } else { @@ -282,8 +282,8 @@ if (sl != null) { <& serverNameLink; serverName=serverName; &> -<% storeCount %> -<% storeFileCount %> +<% String.format("%,d", storeCount) %> +<% String.format("%,d", storeFileCount) %> <% TraditionalBinaryPrefix.long2String( storeUncompressedSizeMB * TraditionalBinaryPrefix.MEGA.value, "B", 1) %> <% TraditionalBinaryPrefix.long2String(storeFileSizeMB @@ -339,9 +339,9 @@ if (totalCompactingCells > 0) { <& serverNameLink; serverName=serverName; &> -<% totalCompactingCells %> -<% totalCompactedCells %> -<% totalCompactingCells - totalCompactedCells %> +<% String.format("%,d", totalCompactingCells) %> +<% String.format("%,d", totalCompactedCells) %> +<% String.format("%,d", totalCompactingCells - totalCompactedCells) %> <% percentDone %> <%java> diff --git a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.jamon b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.jamon index a4ef63adb789..2b07523844d7 100644 --- a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.jamon +++ b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.jamon @@ -270,11 +270,47 @@ $(document).ready(function() }, type: 'numeric' }); + $.tablesorter.addParser( + { + id: "separator", + is: function (s) { + return /^[0-9]?[0-9,]*$/.test(s); + }, + format: function (s) { + return $.tablesorter.formatFloat( s.replace(/,/g,'') ); + }, + type: "numeric" + }); + $("#baseStatsTable").tablesorter(); - $("#requestStatsTable").tablesorter(); - $("#storeStatsTable").tablesorter(); - $("#compactionStatsTable").tablesorter(); - $("#memstoreStatsTable").tablesorter(); + $("#requestStatsTable").tablesorter({ + headers: { + 1: {sorter: 'separator'}, + 2: {sorter: 'separator'}, + 3: {sorter: 'separator'} + } + }); + $("#storeStatsTable").tablesorter({ + headers: { + 1: {sorter: 'separator'}, + 2: {sorter: 'separator'}, + 3: {sorter: 'filesize'}, + 4: {sorter: 'filesize'}, + 5: {sorter: 'filesize'}, + 6: {sorter: 'filesize'} + } + }); + $("#compactionStatsTable").tablesorter({ + headers: { + 1: {sorter: 'separator'}, + 2: {sorter: 'separator'} + } + }); + $("#memstoreStatsTable").tablesorter({ + headers: { + 1: {sorter: 'filesize'} + } + }); } ); diff --git a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RegionListTmpl.jamon b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RegionListTmpl.jamon index 7e983f565133..066fe53b4c53 100644 --- a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RegionListTmpl.jamon +++ b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RegionListTmpl.jamon @@ -142,9 +142,9 @@ <% displayName %> <%if load != null %> - <% load.getReadRequestsCount() %> - <% load.getFilteredReadRequestsCount() %> - <% load.getWriteRequestsCount() %> + <% String.format("%,1d", load.getReadRequestsCount()) %> + <% String.format("%,1d", load.getFilteredReadRequestsCount()) %> + <% String.format("%,1d", load.getWriteRequestsCount()) %> @@ -182,8 +182,8 @@ <% displayName %> <%if load != null %> - <% load.getStores() %> - <% load.getStorefiles() %> + <% String.format("%,1d", load.getStores()) %> + <% String.format("%,1d", load.getStorefiles()) %> <% TraditionalBinaryPrefix.long2String(load.getStoreUncompressedSizeMB() * TraditionalBinaryPrefix.MEGA.value, "B", 1)%> <% TraditionalBinaryPrefix.long2String(load.getStorefileSizeMB() @@ -239,8 +239,8 @@ <% displayName %> <%if load != null %> - <% load.getTotalCompactingKVs() %> - <% load.getCurrentCompactedKVs() %> + <% String.format("%,1d", load.getTotalCompactingKVs()) %> + <% String.format("%,1d", load.getCurrentCompactedKVs()) %> <% percentDone %> <% compactTime %> diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java index e517405d4c20..8761d6b1d9dc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java @@ -1104,4 +1104,16 @@ default DeleteTracker postInstantiateDeleteTracker( throws IOException { return delTracker; } + + /** + * Called just before the WAL Entry is appended to the WAL. Implementing this hook allows + * coprocessors to add extended attributes to the WALKey that then get persisted to the + * WAL, and are available to replication endpoints to use in processing WAL Entries. + * @param ctx the environment provided by the region server + * @param key the WALKey associated with a particular append to a WAL + */ + default void preWALAppend(ObserverContext ctx, WALKey key, + WALEdit edit) + throws IOException { + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java index 82767e963c74..1928845774ff 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java @@ -502,8 +502,10 @@ public Cacheable getBlock(BlockCacheKey key, boolean caching, boolean repeat, // block will use the refCnt of bucketEntry, which means if two HFileBlock mapping to // the same BucketEntry, then all of the three will share the same refCnt. Cacheable cachedBlock = ioEngine.read(bucketEntry); - // RPC start to reference, so retain here. - cachedBlock.retain(); + if (ioEngine.usesSharedMemory()) { + // RPC start to reference, so retain here. + cachedBlock.retain(); + } // Update the cache statistics. if (updateCacheMetrics) { cacheStats.hit(caching, key.isPrimary(), key.getBlockType()); @@ -689,7 +691,7 @@ private void freeEntireBuckets(int completelyFreeBucketsNeeded) { // this set is small around O(Handler Count) unless something else is wrong Set inUseBuckets = new HashSet<>(); backingMap.forEach((k, be) -> { - if (be.isRpcRef()) { + if (ioEngine.usesSharedMemory() && be.isRpcRef()) { inUseBuckets.add(bucketAllocator.getBucketIndex(be.offset())); } }); @@ -1529,11 +1531,11 @@ static class RAMCache { /** * Defined the map as {@link ConcurrentHashMap} explicitly here, because in * {@link RAMCache#get(BlockCacheKey)} and - * {@link RAMCache#putIfAbsent(BlockCacheKey, RAMQueueEntry)} , we need to guarantee the - * atomicity of map#computeIfPresent(key, func) and map#putIfAbsent(key, func). Besides, the - * func method can execute exactly once only when the key is present(or absent) and under the - * lock context. Otherwise, the reference count of block will be messed up. Notice that the - * {@link java.util.concurrent.ConcurrentSkipListMap} can not guarantee that. + * {@link RAMCache#putIfAbsent(BlockCacheKey, BucketCache.RAMQueueEntry)} , we need to + * guarantee the atomicity of map#computeIfPresent(key, func) and map#putIfAbsent(key, func). + * Besides, the func method can execute exactly once only when the key is present(or absent) + * and under the lock context. Otherwise, the reference count of block will be messed up. + * Notice that the {@link java.util.concurrent.ConcurrentSkipListMap} can not guarantee that. */ final ConcurrentHashMap delegate = new ConcurrentHashMap<>(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketEntry.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketEntry.java index ca41ecafb9d1..2dd77756e585 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketEntry.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketEntry.java @@ -80,7 +80,7 @@ class BucketEntry implements HBaseReferenceCounted { */ private final RefCnt refCnt; final AtomicBoolean markedAsEvicted; - private final ByteBuffAllocator allocator; + final ByteBuffAllocator allocator; /** * Time this block was cached. Presumes we are created just before we are added to the cache. @@ -194,7 +194,10 @@ boolean isRpcRef() { } Cacheable wrapAsCacheable(ByteBuffer[] buffers) throws IOException { - ByteBuff buf = ByteBuff.wrap(buffers, this.refCnt); + return wrapAsCacheable(ByteBuff.wrap(buffers, this.refCnt)); + } + + Cacheable wrapAsCacheable(ByteBuff buf) throws IOException { return this.deserializerReference().deserialize(buf, allocator); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/ExclusiveMemoryMmapIOEngine.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/ExclusiveMemoryMmapIOEngine.java index 3d7f2b1f3bdb..3169a66539aa 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/ExclusiveMemoryMmapIOEngine.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/ExclusiveMemoryMmapIOEngine.java @@ -17,7 +17,6 @@ package org.apache.hadoop.hbase.io.hfile.bucket; import java.io.IOException; -import java.nio.ByteBuffer; import org.apache.hadoop.hbase.io.hfile.Cacheable; import org.apache.hadoop.hbase.nio.ByteBuff; @@ -35,9 +34,9 @@ public ExclusiveMemoryMmapIOEngine(String filePath, long capacity) throws IOExce @Override public Cacheable read(BucketEntry be) throws IOException { - ByteBuff dst = ByteBuff.wrap(ByteBuffer.allocate(be.getLength())); + ByteBuff dst = be.allocator.allocate(be.getLength()); bufferArray.read(be.offset(), dst); dst.position(0).limit(be.getLength()); - return be.wrapAsCacheable(dst.nioByteBuffers()); + return be.wrapAsCacheable(dst); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.java index b3afe482a02a..cef4aa0cdfdd 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.java @@ -129,7 +129,7 @@ public Cacheable read(BucketEntry be) throws IOException { long offset = be.offset(); int length = be.getLength(); Preconditions.checkArgument(length >= 0, "Length of read can not be less than 0."); - ByteBuffer dstBuffer = ByteBuffer.allocate(length); + ByteBuff dstBuffer = be.allocator.allocate(length); if (length != 0) { accessFile(readAccessor, dstBuffer, offset); // The buffer created out of the fileChannel is formed by copying the data from the file @@ -142,7 +142,7 @@ public Cacheable read(BucketEntry be) throws IOException { } } dstBuffer.rewind(); - return be.wrapAsCacheable(new ByteBuffer[] { dstBuffer }); + return be.wrapAsCacheable(dstBuffer); } @VisibleForTesting @@ -164,10 +164,7 @@ void closeFileChannels() { */ @Override public void write(ByteBuffer srcBuffer, long offset) throws IOException { - if (!srcBuffer.hasRemaining()) { - return; - } - accessFile(writeAccessor, srcBuffer, offset); + write(ByteBuff.wrap(srcBuffer), offset); } /** @@ -209,11 +206,13 @@ public void shutdown() { @Override public void write(ByteBuff srcBuffer, long offset) throws IOException { - ByteBuffer dup = srcBuffer.asSubByteBuffer(srcBuffer.remaining()).duplicate(); - write(dup, offset); + if (!srcBuffer.hasRemaining()) { + return; + } + accessFile(writeAccessor, srcBuffer, offset); } - private void accessFile(FileAccessor accessor, ByteBuffer buffer, + private void accessFile(FileAccessor accessor, ByteBuff buffer, long globalOffset) throws IOException { int startFileNum = getFileNum(globalOffset); int remainingAccessDataLen = buffer.remaining(); @@ -304,23 +303,23 @@ void refreshFileConnection(int accessFileNum, IOException ioe) throws IOExceptio } private interface FileAccessor { - int access(FileChannel fileChannel, ByteBuffer byteBuffer, long accessOffset) + int access(FileChannel fileChannel, ByteBuff byteBuffer, long accessOffset) throws IOException; } private static class FileReadAccessor implements FileAccessor { @Override - public int access(FileChannel fileChannel, ByteBuffer byteBuffer, + public int access(FileChannel fileChannel, ByteBuff byteBuffer, long accessOffset) throws IOException { - return fileChannel.read(byteBuffer, accessOffset); + return byteBuffer.read(fileChannel, accessOffset); } } private static class FileWriteAccessor implements FileAccessor { @Override - public int access(FileChannel fileChannel, ByteBuffer byteBuffer, + public int access(FileChannel fileChannel, ByteBuff byteBuffer, long accessOffset) throws IOException { - return fileChannel.write(byteBuffer, accessOffset); + return byteBuffer.write(fileChannel, accessOffset); } } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java index 25b7191d387e..81e582576faa 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java @@ -672,6 +672,10 @@ private void checkServer(byte [] metaTableRow, RegionLocations locations) { if (sn == null) { continue; } + // skip the offline regions which belong to disabled table. + if (isTableDisabled(location.getRegion())) { + continue; + } ServerManager.ServerLiveState state = this.services.getServerManager(). isServerKnownAndOnline(sn); switch (state) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HbckChore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HbckChore.java index 69a8d536dcf2..5bb4e9524fc4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HbckChore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HbckChore.java @@ -31,6 +31,7 @@ import org.apache.hadoop.hbase.ScheduledChore; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.client.RegionInfo; +import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.HbckRegionInfo; @@ -60,6 +61,8 @@ public class HbckChore extends ScheduledChore { */ private final Map regionInfoMap = new HashMap<>(); + private final Set disabledTableRegions = new HashSet<>(); + /** * The regions only opened on RegionServers, but no region info in meta. */ @@ -67,7 +70,7 @@ public class HbckChore extends ScheduledChore { /** * The regions have directory on FileSystem, but no region info in meta. */ - private final Set orphanRegionsOnFS = new HashSet<>(); + private final Map orphanRegionsOnFS = new HashMap<>(); /** * The inconsistent regions. There are three case: * case 1. Master thought this region opened, but no regionserver reported it. @@ -81,7 +84,7 @@ public class HbckChore extends ScheduledChore { * The "snapshot" is used to save the last round's HBCK checking report. */ private final Map orphanRegionsOnRSSnapshot = new HashMap<>(); - private final Set orphanRegionsOnFSSnapshot = new HashSet<>(); + private final Map orphanRegionsOnFSSnapshot = new HashMap<>(); private final Map>> inconsistentRegionsSnapshot = new HashMap<>(); @@ -98,16 +101,29 @@ public class HbckChore extends ScheduledChore { private volatile long checkingStartTimestamp = 0; private volatile long checkingEndTimestamp = 0; + private boolean disabled = false; + public HbckChore(MasterServices master) { super("HbckChore-", master, master.getConfiguration().getInt(HBCK_CHORE_INTERVAL, DEFAULT_HBCK_CHORE_INTERVAL)); this.master = master; + int interval = + master.getConfiguration().getInt(HBCK_CHORE_INTERVAL, DEFAULT_HBCK_CHORE_INTERVAL); + if (interval <= 0) { + LOG.warn(HBCK_CHORE_INTERVAL + " is <=0 hence disabling hbck chore"); + disableChore(); + } } @Override protected synchronized void chore() { + if (isDisabled() || isRunning()) { + LOG.warn("hbckChore is either disabled or is already running. Can't run the chore"); + return; + } running = true; regionInfoMap.clear(); + disabledTableRegions.clear(); orphanRegionsOnRS.clear(); orphanRegionsOnFS.clear(); inconsistentRegions.clear(); @@ -123,6 +139,29 @@ protected synchronized void chore() { running = false; } + // This function does the sanity checks of making sure the chore is not run when it is + // disabled or when it's already running. It returns whether the chore was actually run or not. + protected boolean runChore() { + if (isDisabled() || isRunning()) { + if (isDisabled()) { + LOG.warn("hbck chore is disabled! Set " + HBCK_CHORE_INTERVAL + " > 0 to enable it."); + } else { + LOG.warn("hbck chore already running. Can't run till it finishes."); + } + return false; + } + chore(); + return true; + } + + private void disableChore() { + this.disabled = true; + } + + public boolean isDisabled() { + return this.disabled; + } + private void saveCheckResultToSnapshot() { // Need synchronized here, as this "snapshot" may be access by web ui. rwLock.writeLock().lock(); @@ -131,7 +170,8 @@ private void saveCheckResultToSnapshot() { orphanRegionsOnRS.entrySet() .forEach(e -> orphanRegionsOnRSSnapshot.put(e.getKey(), e.getValue())); orphanRegionsOnFSSnapshot.clear(); - orphanRegionsOnFSSnapshot.addAll(orphanRegionsOnFS); + orphanRegionsOnFS.entrySet() + .forEach(e -> orphanRegionsOnFSSnapshot.put(e.getKey(), e.getValue())); inconsistentRegionsSnapshot.clear(); inconsistentRegions.entrySet() .forEach(e -> inconsistentRegionsSnapshot.put(e.getKey(), e.getValue())); @@ -146,6 +186,10 @@ private void loadRegionsFromInMemoryState() { master.getAssignmentManager().getRegionStates().getRegionStates(); for (RegionState regionState : regionStates) { RegionInfo regionInfo = regionState.getRegion(); + if (master.getTableStateManager() + .isTableState(regionInfo.getTable(), TableState.State.DISABLED)) { + disabledTableRegions.add(regionInfo.getEncodedName()); + } HbckRegionInfo.MetaEntry metaEntry = new HbckRegionInfo.MetaEntry(regionInfo, regionState.getServerName(), regionState.getStamp()); @@ -178,6 +222,11 @@ private void loadRegionsFromRSReport() { HbckRegionInfo hri = entry.getValue(); ServerName locationInMeta = hri.getMetaEntry().getRegionServer(); if (hri.getDeployedOn().size() == 0) { + // Because the inconsistent regions are not absolutely right, only skip the offline regions + // which belong to disabled table. + if (disabledTableRegions.contains(encodedRegionName)) { + continue; + } // Master thought this region opened, but no regionserver reported it. inconsistentRegions.put(encodedRegionName, new Pair<>(locationInMeta, new LinkedList<>())); } else if (hri.getDeployedOn().size() > 1) { @@ -202,7 +251,7 @@ private void loadRegionsFromFS() throws IOException { String encodedRegionName = regionDir.getName(); HbckRegionInfo hri = regionInfoMap.get(encodedRegionName); if (hri == null) { - orphanRegionsOnFS.add(encodedRegionName); + orphanRegionsOnFS.put(encodedRegionName, regionDir); continue; } HbckRegionInfo.HdfsEntry hdfsEntry = new HbckRegionInfo.HdfsEntry(regionDir); @@ -237,7 +286,7 @@ public Map getOrphanRegionsOnRS() { /** * @return the regions have directory on FileSystem, but no region info in meta. */ - public Set getOrphanRegionsOnFS() { + public Map getOrphanRegionsOnFS() { // Need synchronized here, as this "snapshot" may be changed after checking. rwLock.readLock().lock(); try { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java index e55a39dc784a..c8caea76d871 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java @@ -2376,11 +2376,7 @@ public RunHbckChoreResponse runHbckChore(RpcController c, RunHbckChoreRequest re rpcPreCheck("runHbckChore"); LOG.info("{} request HBCK chore to run", master.getClientIdAuditPrefix()); HbckChore hbckChore = master.getHbckChore(); - boolean ran = false; - if (!hbckChore.isRunning()) { - hbckChore.chore(); - ran = true; - } + boolean ran = hbckChore.runChore(); return RunHbckChoreResponse.newBuilder().setRan(ran).build(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateNode.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateNode.java index 7dbdbee42ac5..12eb0a0fe4ff 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateNode.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateNode.java @@ -99,7 +99,7 @@ public AssignmentProcedureEvent(final RegionInfo regionInfo) { /** * Updated whenever a call to {@link #setRegionLocation(ServerName)} or - * {@link #setState(State, State...)}. + * {@link #setState(RegionState.State, RegionState.State...)}. */ private volatile long lastUpdate = 0; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java index 94d7785c9083..b3340fa73835 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java @@ -71,6 +71,8 @@ import org.apache.hadoop.hbase.security.AccessDeniedException; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.security.access.AccessChecker; +import org.apache.hadoop.hbase.security.access.SnapshotScannerHDFSAclCleaner; +import org.apache.hadoop.hbase.security.access.SnapshotScannerHDFSAclHelper; import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils; import org.apache.hadoop.hbase.snapshot.HBaseSnapshotException; import org.apache.hadoop.hbase.snapshot.RestoreSnapshotException; @@ -1123,6 +1125,10 @@ private void checkSnapshotSupport(final Configuration conf, final MasterFileSyst // Inject snapshot cleaners, if snapshot.enable is true hfileCleaners.add(SnapshotHFileCleaner.class.getName()); hfileCleaners.add(HFileLinkCleaner.class.getName()); + // If sync acl to HDFS feature is enabled, then inject the cleaner + if (SnapshotScannerHDFSAclHelper.isAclSyncToHdfsEnabled(conf)) { + hfileCleaners.add(SnapshotScannerHDFSAclCleaner.class.getName()); + } // Set cleaners conf conf.setStrings(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS, diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index 9f31522d7644..92a6145d9cd1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -4047,7 +4047,7 @@ public OperationStatus[] batchReplay(MutationReplay[] mutations, long replaySeqI * a batch are stored with highest durability specified of for all operations in a batch, * except for {@link Durability#SKIP_WAL}. * - *

This function is called from {@link #batchReplay(MutationReplay[], long)} with + *

This function is called from {@link #batchReplay(WALSplitUtil.MutationReplay[], long)} with * {@link ReplayBatchOperation} instance and {@link #batchMutate(Mutation[], long, long)} with * {@link MutationBatchOperation} instance as an argument. As the processing of replay batch * and mutation batch is very similar, lot of code is shared by providing generic methods in @@ -4058,7 +4058,7 @@ public OperationStatus[] batchReplay(MutationReplay[] mutations, long replaySeqI * @param batchOp contains the list of mutations * @return an array of OperationStatus which internally contains the * OperationStatusCode and the exceptionMessage if any. - * @throws IOException + * @throws IOException if an IO problem is encountered */ OperationStatus[] batchMutate(BatchOperation batchOp) throws IOException { boolean initialized = false; @@ -7964,6 +7964,11 @@ private WriteEntry doWALAppend(WALEdit walEdit, Durability durability, List> call(RegionObserver observer) throws IOException { }); } + public void preWALAppend(WALKey key, WALEdit edit) throws IOException { + if (this.coprocEnvironments.isEmpty()){ + return; + } + execOperation(new RegionObserverOperationWithoutResult() { + @Override + public void call(RegionObserver observer) throws IOException { + observer.preWALAppend(this, key, edit); + } + }); + } + public Message preEndpointInvocation(final Service service, final String methodName, Message request) throws IOException { if (coprocEnvironments.isEmpty()) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java index 0d2f461a3ea6..a7ac45a377c8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java @@ -95,15 +95,6 @@ public interface StoreFile { */ long getMaxSequenceId(); - /** - * Get the modification time of this store file. Usually will access the file system so throws - * IOException. - * @deprecated Since 2.0.0. Will be removed in 3.0.0. - * @see #getModificationTimestamp() - */ - @Deprecated - long getModificationTimeStamp() throws IOException; - /** * Get the modification time of this store file. Usually will access the file system so throws * IOException. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java index 93e79c5fd11c..44363fa70749 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java @@ -1089,7 +1089,8 @@ public void preCloneSnapshot(final ObserverContext throws IOException { User user = getActiveUser(ctx); if (SnapshotDescriptionUtils.isSnapshotOwner(snapshot, user) - && hTableDescriptor.getTableName().getNameAsString().equals(snapshot.getTable())) { + && hTableDescriptor.getTableName().getNameAsString() + .equals(snapshot.getTableNameAsString())) { // Snapshot owner is allowed to create a table with the same name as the snapshot he took AuthResult result = AuthResult.allow("cloneSnapshot " + snapshot.getName(), "Snapshot owner check allowed", user, null, hTableDescriptor.getTableName(), null); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SnapshotScannerHDFSAclCleaner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SnapshotScannerHDFSAclCleaner.java index 86c663dfdb32..6bf4c1fdee3d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SnapshotScannerHDFSAclCleaner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SnapshotScannerHDFSAclCleaner.java @@ -27,7 +27,6 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.master.cleaner.BaseHFileCleanerDelegate; import org.apache.yetus.audience.InterfaceAudience; @@ -59,7 +58,7 @@ public void init(Map params) { @Override public void setConf(Configuration conf) { super.setConf(conf); - userScanSnapshotEnabled = isUserScanSnapshotEnabled(conf); + userScanSnapshotEnabled = SnapshotScannerHDFSAclHelper.isAclSyncToHdfsEnabled(conf); } @Override @@ -82,13 +81,6 @@ public boolean isEmptyDirDeletable(Path dir) { return true; } - private boolean isUserScanSnapshotEnabled(Configuration conf) { - String masterCoprocessors = conf.get(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY); - return conf.getBoolean(SnapshotScannerHDFSAclHelper.USER_SCAN_SNAPSHOT_ENABLE, false) - && masterCoprocessors.contains(SnapshotScannerHDFSAclController.class.getName()) - && masterCoprocessors.contains(AccessController.class.getName()); - } - private boolean isEmptyArchiveDirDeletable(Path dir) { try { if (isArchiveDataDir(dir)) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SnapshotScannerHDFSAclController.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SnapshotScannerHDFSAclController.java index f6d5b767e492..82e3430b9c74 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SnapshotScannerHDFSAclController.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SnapshotScannerHDFSAclController.java @@ -119,7 +119,7 @@ public Optional getMasterObserver() { public void preMasterInitialization(ObserverContext c) throws IOException { if (c.getEnvironment().getConfiguration() - .getBoolean(SnapshotScannerHDFSAclHelper.USER_SCAN_SNAPSHOT_ENABLE, false)) { + .getBoolean(SnapshotScannerHDFSAclHelper.ACL_SYNC_TO_HDFS_ENABLE, false)) { MasterCoprocessorEnvironment mEnv = c.getEnvironment(); if (!(mEnv instanceof HasMasterServices)) { throw new IOException("Does not implement HMasterServices"); @@ -133,7 +133,7 @@ public void preMasterInitialization(ObserverContext c, TableName tableName) throws IOException { if (needHandleTableHdfsAcl(tableName, "truncateTable " + tableName)) { - // Since the table directories is recreated, so add HDFS acls again + // 1. create tmp table directories + hdfsAclHelper.createTableDirectories(tableName); + // 2. Since the table directories is recreated, so add HDFS acls again Set users = hdfsAclHelper.getUsersWithTableReadAction(tableName, false, false); hdfsAclHelper.addTableAcl(tableName, users, "truncate"); } @@ -233,9 +235,11 @@ public void postDeleteTable(ObserverContext ctx, try (Table aclTable = ctx.getEnvironment().getConnection().getTable(PermissionStorage.ACL_TABLE_NAME)) { Set users = SnapshotScannerHDFSAclStorage.getTableUsers(aclTable, tableName); - // 1. Delete table owner permission is synced to HDFS in acl table + // 1. Remove table archive directory default ACLs + hdfsAclHelper.removeTableDefaultAcl(tableName, users); + // 2. Delete table owner permission is synced to HDFS in acl table SnapshotScannerHDFSAclStorage.deleteTableHdfsAcl(aclTable, tableName); - // 2. Remove namespace access acls + // 3. Remove namespace access acls Set removeUsers = filterUsersToRemoveNsAccessAcl(aclTable, tableName, users); if (removeUsers.size() > 0) { hdfsAclHelper.removeNamespaceAccessAcl(tableName, removeUsers, "delete"); @@ -251,7 +255,7 @@ public void postModifyTable(ObserverContext ctx, try (Table aclTable = ctx.getEnvironment().getConnection().getTable(PermissionStorage.ACL_TABLE_NAME)) { if (needHandleTableHdfsAcl(currentDescriptor, "modifyTable " + tableName) - && !hdfsAclHelper.isTableUserScanSnapshotEnabled(oldDescriptor)) { + && !hdfsAclHelper.isAclSyncToHdfsEnabled(oldDescriptor)) { // 1. Create table directories used for acl inherited hdfsAclHelper.createTableDirectories(tableName); // 2. Add table users HDFS acls @@ -264,7 +268,7 @@ public void postModifyTable(ObserverContext ctx, SnapshotScannerHDFSAclStorage.addUserTableHdfsAcl(ctx.getEnvironment().getConnection(), tableUsers, tableName); } else if (needHandleTableHdfsAcl(oldDescriptor, "modifyTable " + tableName) - && !hdfsAclHelper.isTableUserScanSnapshotEnabled(currentDescriptor)) { + && !hdfsAclHelper.isAclSyncToHdfsEnabled(currentDescriptor)) { // 1. Remove empty table directories List tableRootPaths = hdfsAclHelper.getTableRootPaths(tableName, false); for (Path path : tableRootPaths) { @@ -290,17 +294,24 @@ public void postModifyTable(ObserverContext ctx, public void postDeleteNamespace(ObserverContext ctx, String namespace) throws IOException { if (checkInitialized("deleteNamespace " + namespace)) { - // 1. Record namespace user acl is not synced to HDFS - SnapshotScannerHDFSAclStorage.deleteNamespaceHdfsAcl(ctx.getEnvironment().getConnection(), - namespace); - // 2. Delete tmp namespace directory - /** - * Delete namespace tmp directory because it's created by this coprocessor when namespace is - * created to make namespace default acl can be inherited by tables. The namespace data - * directory is deleted by DeleteNamespaceProcedure, the namespace archive directory is - * deleted by HFileCleaner. - */ - hdfsAclHelper.deleteEmptyDir(pathHelper.getTmpNsDir(namespace)); + try (Table aclTable = + ctx.getEnvironment().getConnection().getTable(PermissionStorage.ACL_TABLE_NAME)) { + // 1. Delete namespace archive dir default ACLs + Set users = SnapshotScannerHDFSAclStorage.getEntryUsers(aclTable, + PermissionStorage.toNamespaceEntry(Bytes.toBytes(namespace))); + hdfsAclHelper.removeNamespaceDefaultAcl(namespace, users); + // 2. Record namespace user acl is not synced to HDFS + SnapshotScannerHDFSAclStorage.deleteNamespaceHdfsAcl(ctx.getEnvironment().getConnection(), + namespace); + // 3. Delete tmp namespace directory + /** + * Delete namespace tmp directory because it's created by this coprocessor when namespace is + * created to make namespace default acl can be inherited by tables. The namespace data + * directory is deleted by DeleteNamespaceProcedure, the namespace archive directory is + * deleted by HFileCleaner. + */ + hdfsAclHelper.deleteEmptyDir(pathHelper.getTmpNsDir(namespace)); + } } } @@ -364,7 +375,9 @@ public void postGrant(ObserverContext c, UserPermission tPerm = getUserTablePermission(conf, userName, tableName); if (tPerm != null && hdfsAclHelper.containReadAction(tPerm)) { if (!isHdfsAclSet(aclTable, userName, tableName)) { - // 1. Add HDFS acl + // 1. create table dirs + hdfsAclHelper.createTableDirectories(tableName); + // 2. Add HDFS acl hdfsAclHelper.grantAcl(userPermission, new HashSet<>(0), new HashSet<>(0)); } // 2. Record table acl is synced to HDFS @@ -547,13 +560,13 @@ private boolean needHandleTableHdfsAcl(TablePermission tablePermission) throws I private boolean needHandleTableHdfsAcl(TableName tableName, String operation) throws IOException { return !tableName.isSystemTable() && checkInitialized(operation) && hdfsAclHelper - .isTableUserScanSnapshotEnabled(masterServices.getTableDescriptors().get(tableName)); + .isAclSyncToHdfsEnabled(masterServices.getTableDescriptors().get(tableName)); } private boolean needHandleTableHdfsAcl(TableDescriptor tableDescriptor, String operation) { TableName tableName = tableDescriptor.getTableName(); return !tableName.isSystemTable() && checkInitialized(operation) - && hdfsAclHelper.isTableUserScanSnapshotEnabled(tableDescriptor); + && hdfsAclHelper.isAclSyncToHdfsEnabled(tableDescriptor); } private User getActiveUser(ObserverContext ctx) throws IOException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SnapshotScannerHDFSAclHelper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SnapshotScannerHDFSAclHelper.java index 60d91558ed51..6cf1916efaec 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SnapshotScannerHDFSAclHelper.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SnapshotScannerHDFSAclHelper.java @@ -28,6 +28,7 @@ import java.io.FileNotFoundException; import java.io.IOException; import java.util.ArrayList; +import java.util.Collections; import java.util.HashSet; import java.util.List; import java.util.Map; @@ -53,6 +54,7 @@ import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.SnapshotDescription; import org.apache.hadoop.hbase.client.TableDescriptor; +import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; import org.apache.hadoop.hbase.mob.MobUtils; import org.apache.hadoop.hbase.util.Bytes; import org.apache.yetus.audience.InterfaceAudience; @@ -71,23 +73,23 @@ public class SnapshotScannerHDFSAclHelper implements Closeable { private static final Logger LOG = LoggerFactory.getLogger(SnapshotScannerHDFSAclHelper.class); - public static final String USER_SCAN_SNAPSHOT_ENABLE = "hbase.user.scan.snapshot.enable"; - public static final String USER_SCAN_SNAPSHOT_THREAD_NUMBER = - "hbase.user.scan.snapshot.thread.number"; + public static final String ACL_SYNC_TO_HDFS_ENABLE = "hbase.acl.sync.to.hdfs.enable"; + public static final String ACL_SYNC_TO_HDFS_THREAD_NUMBER = + "hbase.acl.sync.to.hdfs.thread.number"; // The tmp directory to restore snapshot, it can not be a sub directory of HBase root dir public static final String SNAPSHOT_RESTORE_TMP_DIR = "hbase.snapshot.restore.tmp.dir"; public static final String SNAPSHOT_RESTORE_TMP_DIR_DEFAULT = "/hbase/.tmpdir-to-restore-snapshot"; // The default permission of the common directories if the feature is enabled. public static final String COMMON_DIRECTORY_PERMISSION = - "hbase.user.scan.snapshot.common.directory.permission"; + "hbase.acl.sync.to.hdfs.common.directory.permission"; // The secure HBase permission is 700, 751 means all others have execute access and the mask is // set to read-execute to make the extended access ACL entries can work. Be cautious to set // this value. public static final String COMMON_DIRECTORY_PERMISSION_DEFAULT = "751"; // The default permission of the snapshot restore directories if the feature is enabled. public static final String SNAPSHOT_RESTORE_DIRECTORY_PERMISSION = - "hbase.user.scan.snapshot.restore.directory.permission"; + "hbase.acl.sync.to.hdfs.restore.directory.permission"; // 753 means all others have write-execute access. public static final String SNAPSHOT_RESTORE_DIRECTORY_PERMISSION_DEFAULT = "753"; @@ -102,7 +104,7 @@ public SnapshotScannerHDFSAclHelper(Configuration configuration, Connection conn this.conf = configuration; this.pathHelper = new PathHelper(conf); this.fs = pathHelper.getFileSystem(); - this.pool = Executors.newFixedThreadPool(conf.getInt(USER_SCAN_SNAPSHOT_THREAD_NUMBER, 10), + this.pool = Executors.newFixedThreadPool(conf.getInt(ACL_SYNC_TO_HDFS_THREAD_NUMBER, 10), new ThreadFactoryBuilder().setNameFormat("hdfs-acl-thread-%d").setDaemon(true).build()); this.admin = connection.getAdmin(); } @@ -230,6 +232,50 @@ public boolean removeNamespaceAccessAcl(TableName tableName, Set removeU } } + /** + * Remove default acl from namespace archive dir when delete namespace + * @param namespace the namespace + * @param removeUsers the users whose default acl will be removed + * @return false if an error occurred, otherwise true + */ + public boolean removeNamespaceDefaultAcl(String namespace, Set removeUsers) { + try { + long start = System.currentTimeMillis(); + Path archiveNsDir = pathHelper.getArchiveNsDir(namespace); + HDFSAclOperation operation = new HDFSAclOperation(fs, archiveNsDir, removeUsers, + HDFSAclOperation.OperationType.REMOVE, false, HDFSAclOperation.AclType.DEFAULT); + operation.handleAcl(); + LOG.info("Remove HDFS acl when delete namespace {}, cost {} ms", namespace, + System.currentTimeMillis() - start); + return true; + } catch (Exception e) { + LOG.error("Remove HDFS acl error when delete namespace {}", namespace, e); + return false; + } + } + + /** + * Remove default acl from table archive dir when delete table + * @param tableName the table name + * @param removeUsers the users whose default acl will be removed + * @return false if an error occurred, otherwise true + */ + public boolean removeTableDefaultAcl(TableName tableName, Set removeUsers) { + try { + long start = System.currentTimeMillis(); + Path archiveTableDir = pathHelper.getArchiveTableDir(tableName); + HDFSAclOperation operation = new HDFSAclOperation(fs, archiveTableDir, removeUsers, + HDFSAclOperation.OperationType.REMOVE, false, HDFSAclOperation.AclType.DEFAULT); + operation.handleAcl(); + LOG.info("Remove HDFS acl when delete table {}, cost {} ms", tableName, + System.currentTimeMillis() - start); + return true; + } catch (Exception e) { + LOG.error("Remove HDFS acl error when delete table {}", tableName, e); + return false; + } + } + /** * Add table user acls * @param tableName the table @@ -349,7 +395,7 @@ private void handleNamespaceAcl(Set namespaces, Set users, Set tables = new HashSet<>(); for (String namespace : namespaces) { tables.addAll(admin.listTableDescriptorsByNamespace(Bytes.toBytes(namespace)).stream() - .filter(this::isTableUserScanSnapshotEnabled).map(TableDescriptor::getTableName) + .filter(this::isAclSyncToHdfsEnabled).map(TableDescriptor::getTableName) .collect(Collectors.toSet())); } handleTableAcl(tables, users, skipNamespaces, skipTables, operationType); @@ -403,7 +449,7 @@ void createTableDirectories(TableName tableName) throws IOException { * return paths that user will global permission will visit * @return the path list */ - private List getGlobalRootPaths() { + List getGlobalRootPaths() { return Lists.newArrayList(pathHelper.getTmpDataDir(), pathHelper.getDataDir(), pathHelper.getMobDataDir(), pathHelper.getArchiveDataDir(), pathHelper.getSnapshotRootDir()); } @@ -511,9 +557,20 @@ boolean isNotFamilyOrQualifierPermission(TablePermission tablePermission) { return !tablePermission.hasFamily() && !tablePermission.hasQualifier(); } - boolean isTableUserScanSnapshotEnabled(TableDescriptor tableDescriptor) { + public static boolean isAclSyncToHdfsEnabled(Configuration conf) { + String[] masterCoprocessors = conf.getStrings(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY); + Set masterCoprocessorSet = new HashSet<>(); + if (masterCoprocessors != null) { + Collections.addAll(masterCoprocessorSet, masterCoprocessors); + } + return conf.getBoolean(SnapshotScannerHDFSAclHelper.ACL_SYNC_TO_HDFS_ENABLE, false) + && masterCoprocessorSet.contains(SnapshotScannerHDFSAclController.class.getName()) + && masterCoprocessorSet.contains(AccessController.class.getName()); + } + + boolean isAclSyncToHdfsEnabled(TableDescriptor tableDescriptor) { return tableDescriptor == null ? false - : Boolean.valueOf(tableDescriptor.getValue(USER_SCAN_SNAPSHOT_ENABLE)); + : Boolean.valueOf(tableDescriptor.getValue(ACL_SYNC_TO_HDFS_ENABLE)); } PathHelper getPathHelper() { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/BulkLoadHFilesTool.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/BulkLoadHFilesTool.java index abf1fd831f9d..46a0669e5c52 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/BulkLoadHFilesTool.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/BulkLoadHFilesTool.java @@ -453,7 +453,8 @@ private boolean checkHFilesCountPerRegionPerFamily( } /** - * @param table the table to load into + * @param conn the HBase cluster connection + * @param tableName the table name of the table to load into * @param pool the ExecutorService * @param queue the queue for LoadQueueItem * @param startEndKeys start and end keys diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALKey.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALKey.java index c541cc0a80d6..fdbacbda2779 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALKey.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALKey.java @@ -34,7 +34,6 @@ /** * Key for WAL Entry. - * Read-only. No Setters. For limited audience such as Coprocessors. */ @InterfaceAudience.LimitedPrivate({HBaseInterfaceAudience.REPLICATION, HBaseInterfaceAudience.COPROC}) @@ -86,6 +85,13 @@ default long getNonce() { */ long getOrigLogSeqNum(); + /** + * Add a named String value to this WALKey to be persisted into the WAL + * @param attributeKey Name of the attribute + * @param attributeValue Value of the attribute + */ + void addExtendedAttribute(String attributeKey, byte[] attributeValue); + /** * Return a named String value injected into the WALKey during processing, such as by a * coprocessor diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALKeyImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALKeyImpl.java index fc84d8e24526..33e034342d7d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALKeyImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALKeyImpl.java @@ -195,6 +195,37 @@ public WALKeyImpl(final byte[] encodedRegionName, mvcc, null, null); } + /** + * Copy constructor that takes in an existing WALKeyImpl plus some extended attributes. + * Intended for coprocessors to add annotations to a system-generated WALKey + * for persistence to the WAL. + * @param key Key to be copied into this new key + * @param extendedAttributes Extra attributes to copy into the new key + */ + public WALKeyImpl(WALKeyImpl key, + Map extendedAttributes){ + init(key.getEncodedRegionName(), key.getTableName(), key.getSequenceId(), + key.getWriteTime(), key.getClusterIds(), key.getNonceGroup(), key.getNonce(), + key.getMvcc(), key.getReplicationScopes(), extendedAttributes); + + } + + /** + * Copy constructor that takes in an existing WALKey, the extra WALKeyImpl fields that the + * parent interface is missing, plus some extended attributes. Intended + * for coprocessors to add annotations to a system-generated WALKey for + * persistence to the WAL. + */ + public WALKeyImpl(WALKey key, + List clusterIds, + MultiVersionConcurrencyControl mvcc, + final NavigableMap replicationScopes, + Map extendedAttributes){ + init(key.getEncodedRegionName(), key.getTableName(), key.getSequenceId(), + key.getWriteTime(), clusterIds, key.getNonceGroup(), key.getNonce(), + mvcc, replicationScopes, extendedAttributes); + + } /** * Create the log key for writing to somewhere. * We maintain the tablename mainly for debugging purposes. @@ -464,6 +495,14 @@ public UUID getOriginatingClusterId(){ return clusterIds.isEmpty()? HConstants.DEFAULT_CLUSTER_ID: clusterIds.get(0); } + @Override + public void addExtendedAttribute(String attributeKey, byte[] attributeValue){ + if (extendedAttributes == null){ + extendedAttributes = new HashMap(); + } + extendedAttributes.put(attributeKey, attributeValue); + } + @Override public byte[] getExtendedAttribute(String attributeKey){ return extendedAttributes != null ? extendedAttributes.get(attributeKey) : null; diff --git a/hbase-server/src/main/resources/hbase-webapps/master/hbck.jsp b/hbase-server/src/main/resources/hbase-webapps/master/hbck.jsp index 183740bb4e20..a003e5fe096f 100644 --- a/hbase-server/src/main/resources/hbase-webapps/master/hbck.jsp +++ b/hbase-server/src/main/resources/hbase-webapps/master/hbck.jsp @@ -23,11 +23,11 @@ import="java.util.Date" import="java.util.List" import="java.util.Map" - import="java.util.Set" import="java.util.stream.Collectors" import="java.time.ZonedDateTime" import="java.time.format.DateTimeFormatter" %> +<%@ page import="org.apache.hadoop.fs.Path" %> <%@ page import="org.apache.hadoop.hbase.client.RegionInfo" %> <%@ page import="org.apache.hadoop.hbase.master.HbckChore" %> <%@ page import="org.apache.hadoop.hbase.master.HMaster" %> @@ -42,7 +42,7 @@ HbckChore hbckChore = master.getHbckChore(); Map>> inconsistentRegions = null; Map orphanRegionsOnRS = null; - Set orphanRegionsOnFS = null; + Map orphanRegionsOnFS = null; long startTimestamp = 0; long endTimestamp = 0; if (hbckChore != null) { @@ -80,7 +80,11 @@

@@ -106,7 +110,7 @@ - + @@ -132,7 +136,7 @@ <% if (orphanRegionsOnRS != null && orphanRegionsOnRS.size() > 0) { %>
RegionRegion Encoded Name Location in META Reported Online RegionServers
- + <% for (Map.Entry entry : orphanRegionsOnRS.entrySet()) { %> @@ -155,11 +159,13 @@ <% if (orphanRegionsOnFS != null && orphanRegionsOnFS.size() > 0) { %>
RegionRegion Encoded Name Reported Online RegionServer
- + + - <% for (String region : orphanRegionsOnFS) { %> + <% for (Map.Entry entry : orphanRegionsOnFS.entrySet()) { %> - + + <% } %> diff --git a/hbase-server/src/main/resources/hbase-webapps/master/table.jsp b/hbase-server/src/main/resources/hbase-webapps/master/table.jsp index 6b4427ebb201..fb1d513651de 100644 --- a/hbase-server/src/main/resources/hbase-webapps/master/table.jsp +++ b/hbase-server/src/main/resources/hbase-webapps/master/table.jsp @@ -726,9 +726,62 @@ Actions: diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java index 09f93e9edaa5..973cfb0a85e6 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java @@ -2039,16 +2039,16 @@ public HRegion createLocalHRegion(RegionInfo info, TableDescriptor desc, WAL wal } /** - * @param tableName - * @param startKey - * @param stopKey - * @param callingMethod - * @param conf - * @param isReadOnly - * @param families - * @throws IOException - * @return A region on which you must call - {@link HBaseTestingUtility#closeRegionAndWAL(HRegion)} when done. + * @param tableName the name of the table + * @param startKey the start key of the region + * @param stopKey the stop key of the region + * @param callingMethod the name of the calling method probably a test method + * @param conf the configuration to use + * @param isReadOnly {@code true} if the table is read only, {@code false} otherwise + * @param families the column families to use + * @throws IOException if an IO problem is encountered + * @return A region on which you must call {@link HBaseTestingUtility#closeRegionAndWAL(HRegion)} + * when done. * @deprecated since 2.0.0 and will be removed in 3.0.0. Use * {@link #createLocalHRegion(TableName, byte[], byte[], boolean, Durability, WAL, byte[]...)} * instead. @@ -3292,10 +3292,9 @@ public boolean visit(Result r) throws IOException { * Waits for a table to be 'enabled'. Enabled means that table is set as 'enabled' and the * regions have been all assigned. Will timeout after default period (30 seconds) * Tolerates nonexistent table. - * @param table Table to wait on. - * @param table - * @throws InterruptedException - * @throws IOException + * @param table the table to wait on. + * @throws InterruptedException if interrupted while waiting + * @throws IOException if an IO problem is encountered */ public void waitTableEnabled(TableName table) throws InterruptedException, IOException { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/SimpleRegionObserver.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/SimpleRegionObserver.java index 62623b00e5c2..caf0abb03714 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/SimpleRegionObserver.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/SimpleRegionObserver.java @@ -25,6 +25,7 @@ import static org.junit.Assert.assertTrue; import java.io.IOException; +import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Optional; @@ -124,7 +125,11 @@ public class SimpleRegionObserver implements RegionCoprocessor, RegionObserver { final AtomicInteger ctPostStartRegionOperation = new AtomicInteger(0); final AtomicInteger ctPostCloseRegionOperation = new AtomicInteger(0); final AtomicBoolean throwOnPostFlush = new AtomicBoolean(false); + final AtomicInteger ctPreWALAppend = new AtomicInteger(0); + static final String TABLE_SKIPPED = "SKIPPED_BY_PREWALRESTORE"; + Map extendedAttributes = new HashMap(); + static final byte[] WAL_EXTENDED_ATTRIBUTE_BYTES = Bytes.toBytes("foo"); public void setThrowOnPostFlush(Boolean val){ throwOnPostFlush.set(val); @@ -631,6 +636,15 @@ public StoreFileReader postStoreFileReaderOpen(ObserverContext ctx, + WALKey key, WALEdit edit) throws IOException { + ctPreWALAppend.incrementAndGet(); + + key.addExtendedAttribute(Integer.toString(ctPreWALAppend.get()), + Bytes.toBytes("foo")); + } + public boolean hadPreGet() { return ctPreGet.get() > 0; } @@ -864,6 +878,10 @@ public int getCtPostWALRestore() { return ctPostWALRestore.get(); } + public int getCtPreWALAppend() { + return ctPreWALAppend.get(); + } + public boolean wasStoreFileReaderOpenCalled() { return ctPreStoreFileReaderOpen.get() > 0 && ctPostStoreFileReaderOpen.get() > 0; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverInterface.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverInterface.java index ad702e0876ca..0b8aa3a692ba 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverInterface.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverInterface.java @@ -25,6 +25,7 @@ import java.io.IOException; import java.lang.reflect.Method; import java.util.ArrayList; +import java.util.Arrays; import java.util.List; import java.util.Optional; import org.apache.hadoop.conf.Configuration; @@ -43,6 +44,7 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Append; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.client.Get; @@ -55,6 +57,8 @@ import org.apache.hadoop.hbase.client.RowMutations; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.client.TableDescriptor; +import org.apache.hadoop.hbase.client.TableDescriptorBuilder; import org.apache.hadoop.hbase.filter.FilterAllFilter; import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.io.hfile.HFile; @@ -70,6 +74,7 @@ import org.apache.hadoop.hbase.regionserver.StoreFile; import org.apache.hadoop.hbase.regionserver.compactions.CompactionLifeCycleTracker; import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest; +import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener; import org.apache.hadoop.hbase.testclassification.CoprocessorTests; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.tool.BulkLoadHFiles; @@ -77,13 +82,18 @@ import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.JVMClusterUtil; import org.apache.hadoop.hbase.util.Threads; +import org.apache.hadoop.hbase.wal.WALEdit; +import org.apache.hadoop.hbase.wal.WALKey; +import org.apache.hadoop.hbase.wal.WALKeyImpl; import org.junit.AfterClass; +import org.junit.Assert; import org.junit.BeforeClass; import org.junit.ClassRule; import org.junit.Rule; import org.junit.Test; import org.junit.experimental.categories.Category; import org.junit.rules.TestName; +import org.mockito.Mockito; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -99,6 +109,7 @@ public class TestRegionObserverInterface { private static final Logger LOG = LoggerFactory.getLogger(TestRegionObserverInterface.class); public static final TableName TEST_TABLE = TableName.valueOf("TestTable"); + public static final byte[] FAMILY = Bytes.toBytes("f"); public final static byte[] A = Bytes.toBytes("a"); public final static byte[] B = Bytes.toBytes("b"); public final static byte[] C = Bytes.toBytes("c"); @@ -663,6 +674,97 @@ public void testPreWALRestoreSkip() throws Exception { table.close(); } + //called from testPreWALAppendIsWrittenToWAL + private void testPreWALAppendHook(Table table, TableName tableName) throws IOException { + int expectedCalls = 0; + String [] methodArray = new String[1]; + methodArray[0] = "getCtPreWALAppend"; + Object[] resultArray = new Object[1]; + + Put p = new Put(ROW); + p.addColumn(A, A, A); + table.put(p); + resultArray[0] = ++expectedCalls; + verifyMethodResult(SimpleRegionObserver.class, methodArray, tableName, resultArray); + + Append a = new Append(ROW); + a.addColumn(B, B, B); + table.append(a); + resultArray[0] = ++expectedCalls; + verifyMethodResult(SimpleRegionObserver.class, methodArray, tableName, resultArray); + + Increment i = new Increment(ROW); + i.addColumn(C, C, 1); + table.increment(i); + resultArray[0] = ++expectedCalls; + verifyMethodResult(SimpleRegionObserver.class, methodArray, tableName, resultArray); + + Delete d = new Delete(ROW); + table.delete(d); + resultArray[0] = ++expectedCalls; + verifyMethodResult(SimpleRegionObserver.class, methodArray, tableName, resultArray); + } + + @Test + public void testPreWALAppend() throws Exception { + SimpleRegionObserver sro = new SimpleRegionObserver(); + ObserverContext ctx = Mockito.mock(ObserverContext.class); + WALKey key = new WALKeyImpl(Bytes.toBytes("region"), TEST_TABLE, + EnvironmentEdgeManager.currentTime()); + WALEdit edit = new WALEdit(); + sro.preWALAppend(ctx, key, edit); + Assert.assertEquals(1, key.getExtendedAttributes().size()); + Assert.assertArrayEquals(SimpleRegionObserver.WAL_EXTENDED_ATTRIBUTE_BYTES, + key.getExtendedAttribute(Integer.toString(sro.getCtPreWALAppend()))); + } + + @Test + public void testPreWALAppendIsWrittenToWAL() throws Exception { + final TableName tableName = TableName.valueOf(TEST_TABLE.getNameAsString() + + "." + name.getMethodName()); + Table table = util.createTable(tableName, new byte[][] { A, B, C }); + + PreWALAppendWALActionsListener listener = new PreWALAppendWALActionsListener(); + List regions = util.getHBaseCluster().getRegions(tableName); + //should be only one region + HRegion region = regions.get(0); + region.getWAL().registerWALActionsListener(listener); + testPreWALAppendHook(table, tableName); + boolean[] expectedResults = {true, true, true, true}; + Assert.assertArrayEquals(expectedResults, listener.getWalKeysCorrectArray()); + + } + + @Test + public void testPreWALAppendNotCalledOnMetaEdit() throws Exception { + final TableName tableName = TableName.valueOf(TEST_TABLE.getNameAsString() + + "." + name.getMethodName()); + TableDescriptorBuilder tdBuilder = TableDescriptorBuilder.newBuilder(tableName); + ColumnFamilyDescriptorBuilder cfBuilder = ColumnFamilyDescriptorBuilder.newBuilder(FAMILY); + tdBuilder.setColumnFamily(cfBuilder.build()); + tdBuilder.setCoprocessor(SimpleRegionObserver.class.getName()); + TableDescriptor td = tdBuilder.build(); + Table table = util.createTable(td, new byte[][] { A, B, C }); + + PreWALAppendWALActionsListener listener = new PreWALAppendWALActionsListener(); + List regions = util.getHBaseCluster().getRegions(tableName); + //should be only one region + HRegion region = regions.get(0); + + region.getWAL().registerWALActionsListener(listener); + //flushing should write to the WAL + region.flush(true); + //so should compaction + region.compact(false); + //and so should closing the region + region.close(); + + //but we still shouldn't have triggered preWALAppend because no user data was written + String[] methods = new String[] {"getCtPreWALAppend"}; + Object[] expectedResult = new Integer[]{0}; + verifyMethodResult(SimpleRegionObserver.class, methods, tableName, expectedResult); + } + // check each region whether the coprocessor upcalls are called or not. private void verifyMethodResult(Class coprocessor, String methodName[], TableName tableName, Object value[]) throws IOException { @@ -711,4 +813,23 @@ private static void createHFile(Configuration conf, FileSystem fs, Path path, by writer.close(); } } + + private static class PreWALAppendWALActionsListener implements WALActionsListener { + boolean[] walKeysCorrect = {false, false, false, false}; + + @Override + public void postAppend(long entryLen, long elapsedTimeMillis, + WALKey logKey, WALEdit logEdit) throws IOException { + for (int k = 0; k < 4; k++) { + if (!walKeysCorrect[k]) { + walKeysCorrect[k] = Arrays.equals(SimpleRegionObserver.WAL_EXTENDED_ATTRIBUTE_BYTES, + logKey.getExtendedAttribute(Integer.toString(k + 1))); + } + } + } + + boolean[] getWalKeysCorrectArray() { + return walKeysCorrect; + } + } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileScannerImplReferenceCount.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileScannerImplReferenceCount.java index dd9a1c80bedb..1f31b72a903e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileScannerImplReferenceCount.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileScannerImplReferenceCount.java @@ -29,6 +29,8 @@ import static org.junit.Assert.assertEquals; import java.io.IOException; +import java.util.Arrays; +import java.util.Collection; import java.util.Random; import org.apache.hadoop.conf.Configuration; @@ -58,9 +60,14 @@ import org.junit.Test; import org.junit.experimental.categories.Category; import org.junit.rules.TestName; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameter; +import org.junit.runners.Parameterized.Parameters; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +@RunWith(Parameterized.class) @Category({ IOTests.class, SmallTests.class }) public class TestHFileScannerImplReferenceCount { @@ -71,6 +78,15 @@ public class TestHFileScannerImplReferenceCount { @Rule public TestName CASE = new TestName(); + @Parameters(name = "{index}: ioengine={0}") + public static Collection data() { + return Arrays.asList(new Object[] { "file" }, new Object[] { "offheap" }, + new Object[] { "mmap" }, new Object[] { "pmem" }); + } + + @Parameter + public String ioengine; + private static final Logger LOG = LoggerFactory.getLogger(TestHFileScannerImplReferenceCount.class); private static final HBaseTestingUtility UTIL = new HBaseTestingUtility(); @@ -113,12 +129,16 @@ public static void setUpBeforeClass() { @Before public void setUp() throws IOException { + String caseName = CASE.getMethodName().replaceAll("[^a-zA-Z0-9]", "_"); + this.workDir = UTIL.getDataTestDir(caseName); + if (!"offheap".equals(ioengine)) { + ioengine = ioengine + ":" + workDir.toString() + "/cachedata"; + } + UTIL.getConfiguration().set(BUCKET_CACHE_IOENGINE_KEY, ioengine); this.firstCell = null; this.secondCell = null; this.allocator = ByteBuffAllocator.create(UTIL.getConfiguration(), true); this.conf = new Configuration(UTIL.getConfiguration()); - String caseName = CASE.getMethodName(); - this.workDir = UTIL.getDataTestDir(caseName); this.fs = this.workDir.getFileSystem(conf); this.hfilePath = new Path(this.workDir, caseName + System.currentTimeMillis()); LOG.info("Start to write {} cells into hfile: {}, case:{}", CELL_COUNT, hfilePath, caseName); @@ -202,34 +222,34 @@ private void testReleaseBlock(Algorithm compression, DataBlockEncoding encoding) scanner.seekTo(firstCell); curBlock = scanner.curBlock; - Assert.assertEquals(curBlock.refCnt(), 2); + this.assertRefCnt(curBlock, 2); // Seek to the block again, the curBlock won't change and won't read from BlockCache. so // refCnt should be unchanged. scanner.seekTo(firstCell); Assert.assertTrue(curBlock == scanner.curBlock); - Assert.assertEquals(curBlock.refCnt(), 2); + this.assertRefCnt(curBlock, 2); prevBlock = curBlock; scanner.seekTo(secondCell); curBlock = scanner.curBlock; - Assert.assertEquals(prevBlock.refCnt(), 2); - Assert.assertEquals(curBlock.refCnt(), 2); + this.assertRefCnt(prevBlock, 2); + this.assertRefCnt(curBlock, 2); // After shipped, the prevBlock will be release, but curBlock is still referenced by the // curBlock. scanner.shipped(); - Assert.assertEquals(prevBlock.refCnt(), 1); - Assert.assertEquals(curBlock.refCnt(), 2); + this.assertRefCnt(prevBlock, 1); + this.assertRefCnt(curBlock, 2); // Try to ship again, though with nothing to client. scanner.shipped(); - Assert.assertEquals(prevBlock.refCnt(), 1); - Assert.assertEquals(curBlock.refCnt(), 2); + this.assertRefCnt(prevBlock, 1); + this.assertRefCnt(curBlock, 2); // The curBlock will also be released. scanner.close(); - Assert.assertEquals(curBlock.refCnt(), 1); + this.assertRefCnt(curBlock, 1); // Finish the block & block2 RPC path Assert.assertTrue(block1.release()); @@ -287,7 +307,7 @@ public void testSeekBefore() throws Exception { curBlock = scanner.curBlock; Assert.assertFalse(curBlock == block2); Assert.assertEquals(1, block2.refCnt()); - Assert.assertEquals(2, curBlock.refCnt()); + this.assertRefCnt(curBlock, 2); prevBlock = scanner.curBlock; // Release the block1, no other reference. @@ -305,22 +325,22 @@ public void testSeekBefore() throws Exception { // the curBlock is read from IOEngine, so a different block. Assert.assertFalse(curBlock == block1); // Two reference for curBlock: 1. scanner; 2. blockCache. - Assert.assertEquals(2, curBlock.refCnt()); + this.assertRefCnt(curBlock, 2); // Reference count of prevBlock must be unchanged because we haven't shipped. - Assert.assertEquals(2, prevBlock.refCnt()); + this.assertRefCnt(prevBlock, 2); // Do the shipped scanner.shipped(); Assert.assertEquals(scanner.prevBlocks.size(), 0); Assert.assertNotNull(scanner.curBlock); - Assert.assertEquals(2, curBlock.refCnt()); - Assert.assertEquals(1, prevBlock.refCnt()); + this.assertRefCnt(curBlock, 2); + this.assertRefCnt(prevBlock, 1); // Do the close scanner.close(); Assert.assertNull(scanner.curBlock); - Assert.assertEquals(1, curBlock.refCnt()); - Assert.assertEquals(1, prevBlock.refCnt()); + this.assertRefCnt(curBlock, 1); + this.assertRefCnt(prevBlock, 1); Assert.assertTrue(defaultBC.evictBlocksByHfileName(hfilePath.getName()) >= 2); Assert.assertEquals(0, curBlock.refCnt()); @@ -340,18 +360,27 @@ public void testSeekBefore() throws Exception { Assert.assertTrue(scanner.seekTo()); curBlock = scanner.curBlock; Assert.assertFalse(curBlock == block1); - Assert.assertEquals(2, curBlock.refCnt()); + this.assertRefCnt(curBlock, 2); // Return false because firstCell <= c[0] Assert.assertFalse(scanner.seekBefore(firstCell)); // The block1 shouldn't be released because we still don't do the shipped or close. - Assert.assertEquals(2, curBlock.refCnt()); + this.assertRefCnt(curBlock, 2); scanner.close(); - Assert.assertEquals(1, curBlock.refCnt()); + this.assertRefCnt(curBlock, 1); Assert.assertTrue(defaultBC.evictBlocksByHfileName(hfilePath.getName()) >= 1); Assert.assertEquals(0, curBlock.refCnt()); } + private void assertRefCnt(HFileBlock block, int value) { + if (ioengine.startsWith("offheap") || ioengine.startsWith("pmem")) { + Assert.assertEquals(value, block.refCnt()); + } else { + // there is no RPC ref count if ioengine don't uses shared memory (See HBASE-22802) + Assert.assertEquals(value - 1, block.refCnt()); + } + } + @Test public void testDefault() throws Exception { testReleaseBlock(Algorithm.NONE, DataBlockEncoding.NONE); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java index 211efc0183cc..be0ece469549 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java @@ -153,8 +153,8 @@ class MockRegionServer implements AdminProtos.AdminService.BlockingInterface, private final Random random = new Random(); /** - * Map of regions to map of rows and {@link Result}. Used as data source when - * {@link #get(RpcController, GetRequest)} is called. Because we have a byte + * Map of regions to map of rows and {@link Result}. Used as data source when + * {@link #get(RpcController, ClientProtos.GetRequest)} is called. Because we have a byte * key, need to use TreeMap and provide a Comparator. Use * {@link #setGetResult(byte[], byte[], Result)} filling this map. */ @@ -205,10 +205,11 @@ int getThenIncrement() { } /** - * Use this method filling the backing data source used by {@link #get(RpcController, GetRequest)} - * @param regionName - * @param row - * @param r + * Use this method filling the backing data source used by + * {@link #get(RpcController, ClientProtos.GetRequest)} + * @param regionName the region name to assign + * @param row the row key + * @param r the single row result */ void setGetResult(final byte [] regionName, final byte [] row, final Result r) { Map value = this.gets.get(regionName); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestHbckChore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestHbckChore.java index 683b596402cf..a367318dad50 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestHbckChore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestHbckChore.java @@ -33,7 +33,9 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.RegionInfoBuilder; +import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.master.HbckChore; +import org.apache.hadoop.hbase.master.TableStateManager; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.testclassification.MediumTests; @@ -43,6 +45,7 @@ import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; +import org.mockito.Mockito; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -139,13 +142,44 @@ public void testForUserTable() throws Exception { assertTrue(reportedRegionServers.contains(locationInMeta)); assertTrue(reportedRegionServers.contains(anotherServer)); - // Reported right region location. Then not in problematic regions. + // Reported right region location, then not in inconsistent regions. am.reportOnlineRegions(anotherServer, Collections.EMPTY_SET); hbckChore.choreForTesting(); inconsistentRegions = hbckChore.getInconsistentRegions(); assertFalse(inconsistentRegions.containsKey(regionName)); } + @Test + public void testForDisabledTable() throws Exception { + TableName tableName = TableName.valueOf("testForDisabledTable"); + RegionInfo hri = createRegionInfo(tableName, 1); + String regionName = hri.getEncodedName(); + rsDispatcher.setMockRsExecutor(new GoodRsExecutor()); + Future future = submitProcedure(createAssignProcedure(hri)); + waitOnFuture(future); + + List serverNames = master.getServerManager().getOnlineServersList(); + assertEquals(NSERVERS, serverNames.size()); + + hbckChore.choreForTesting(); + Map>> inconsistentRegions = + hbckChore.getInconsistentRegions(); + assertTrue(inconsistentRegions.containsKey(regionName)); + Pair> pair = inconsistentRegions.get(regionName); + ServerName locationInMeta = pair.getFirst(); + List reportedRegionServers = pair.getSecond(); + assertTrue(serverNames.contains(locationInMeta)); + assertEquals(0, reportedRegionServers.size()); + + // Set table state to disabled, then not in inconsistent regions. + TableStateManager tableStateManager = master.getTableStateManager(); + Mockito.when(tableStateManager.isTableState(tableName, TableState.State.DISABLED)). + thenReturn(true); + hbckChore.choreForTesting(); + inconsistentRegions = hbckChore.getInconsistentRegions(); + assertFalse(inconsistentRegions.containsKey(regionName)); + } + @Test public void testOrphanRegionsOnFS() throws Exception { TableName tableName = TableName.valueOf("testOrphanRegionsOnFS"); @@ -158,10 +192,28 @@ public void testOrphanRegionsOnFS() throws Exception { HRegion.createRegionDir(conf, regionInfo, FSUtils.getRootDir(conf)); hbckChore.choreForTesting(); assertEquals(1, hbckChore.getOrphanRegionsOnFS().size()); - assertTrue(hbckChore.getOrphanRegionsOnFS().contains(regionInfo.getEncodedName())); + assertTrue(hbckChore.getOrphanRegionsOnFS().containsKey(regionInfo.getEncodedName())); FSUtils.deleteRegionDir(conf, new HRegionInfo(regionInfo)); hbckChore.choreForTesting(); assertEquals(0, hbckChore.getOrphanRegionsOnFS().size()); } + + @Test + public void testChoreDisable() { + // The way to disable to chore is to set hbase.master.hbck.chore.interval <= 0 + // When the interval is > 0, the chore should run. + long lastRunTime = hbckChore.getCheckingEndTimestamp(); + hbckChore.choreForTesting(); + boolean ran = lastRunTime != hbckChore.getCheckingEndTimestamp(); + assertTrue(ran); + + // When the interval <= 0, the chore shouldn't run + master.getConfiguration().setInt("hbase.master.hbck.chore.interval", 0); + HbckChore hbckChoreWithChangedConf = new HbckChore(master); + lastRunTime = hbckChoreWithChangedConf.getCheckingEndTimestamp(); + hbckChoreWithChangedConf.choreForTesting(); + ran = lastRunTime != hbckChoreWithChangedConf.getCheckingEndTimestamp(); + assertFalse(ran); + } } \ No newline at end of file diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MockHStoreFile.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MockHStoreFile.java index 4e8abfbdab16..6631a5258bf2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MockHStoreFile.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MockHStoreFile.java @@ -133,11 +133,6 @@ public boolean isCompactedAway() { return compactedAway; } - @Override - public long getModificationTimeStamp() { - return getModificationTimestamp(); - } - @Override public long getModificationTimestamp() { return modificationTime; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java index 0d4915593495..eb3cc7353827 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java @@ -104,6 +104,7 @@ import org.apache.hadoop.hbase.Waiter; import org.apache.hadoop.hbase.client.Append; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.client.Get; @@ -165,7 +166,6 @@ import org.apache.hadoop.hbase.wal.WALProvider; import org.apache.hadoop.hbase.wal.WALProvider.Writer; import org.apache.hadoop.hbase.wal.WALSplitUtil; -import org.apache.hadoop.hbase.wal.WALSplitter; import org.apache.hadoop.metrics2.MetricsExecutor; import org.junit.After; import org.junit.Assert; @@ -401,6 +401,7 @@ public void testMemstoreSizeAccountingWithFailedPostBatchMutate() throws IOExcep String testName = "testMemstoreSizeAccountingWithFailedPostBatchMutate"; FileSystem fs = FileSystem.get(CONF); Path rootDir = new Path(dir + testName); + ChunkCreator.initialize(MemStoreLABImpl.CHUNK_SIZE_DEFAULT, false, 0, 0, 0, null); FSHLog hLog = new FSHLog(fs, rootDir, testName, CONF); hLog.init(); region = initHRegion(tableName, null, null, false, Durability.SYNC_WAL, hLog, @@ -2427,7 +2428,16 @@ public Void answer(InvocationOnMock invocation) throws Throwable { return null; } }).when(mockedCPHost).preBatchMutate(Mockito.isA(MiniBatchOperationInProgress.class)); + ColumnFamilyDescriptorBuilder builder = ColumnFamilyDescriptorBuilder. + newBuilder(COLUMN_FAMILY_BYTES); + ScanInfo info = new ScanInfo(CONF, builder.build(), Long.MAX_VALUE, + Long.MAX_VALUE, region.getCellComparator()); + Mockito.when(mockedCPHost.preFlushScannerOpen(Mockito.any(HStore.class), + Mockito.any())).thenReturn(info); + Mockito.when(mockedCPHost.preFlush(Mockito.any(), Mockito.any(StoreScanner.class), + Mockito.any())).thenAnswer(i -> i.getArgument(1)); region.setCoprocessorHost(mockedCPHost); + region.put(originalPut); region.setCoprocessorHost(normalCPHost); final long finalSize = region.getDataInMemoryWithoutWAL(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestSnapshotScannerHDFSAclController.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestSnapshotScannerHDFSAclController.java index 69a48341980a..321b1d34ceb5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestSnapshotScannerHDFSAclController.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestSnapshotScannerHDFSAclController.java @@ -20,6 +20,9 @@ import static org.apache.hadoop.hbase.security.access.Permission.Action.READ; import static org.apache.hadoop.hbase.security.access.Permission.Action.WRITE; +import static org.apache.hadoop.hbase.security.access.SnapshotScannerHDFSAclController.SnapshotScannerHDFSAclStorage.hasUserGlobalHdfsAcl; +import static org.apache.hadoop.hbase.security.access.SnapshotScannerHDFSAclController.SnapshotScannerHDFSAclStorage.hasUserNamespaceHdfsAcl; +import static org.apache.hadoop.hbase.security.access.SnapshotScannerHDFSAclController.SnapshotScannerHDFSAclStorage.hasUserTableHdfsAcl; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; @@ -57,7 +60,6 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.HFileArchiveUtil; import org.junit.AfterClass; -import org.junit.Assert; import org.junit.BeforeClass; import org.junit.ClassRule; import org.junit.Rule; @@ -85,6 +87,7 @@ public class TestSnapshotScannerHDFSAclController { private static Path rootDir = null; private static User unGrantUser = null; private static SnapshotScannerHDFSAclHelper helper; + private static Table aclTable; @BeforeClass public static void setupBeforeClass() throws Exception { @@ -92,7 +95,7 @@ public static void setupBeforeClass() throws Exception { conf.setBoolean("dfs.namenode.acls.enabled", true); conf.set("fs.permissions.umask-mode", "027"); // enable hbase hdfs acl feature - conf.setBoolean(SnapshotScannerHDFSAclHelper.USER_SCAN_SNAPSHOT_ENABLE, true); + conf.setBoolean(SnapshotScannerHDFSAclHelper.ACL_SYNC_TO_HDFS_ENABLE, true); // enable secure conf.set(User.HBASE_SECURITY_CONF_KEY, "simple"); conf.set(SnapshotScannerHDFSAclHelper.SNAPSHOT_RESTORE_TMP_DIR, @@ -102,9 +105,6 @@ public static void setupBeforeClass() throws Exception { conf.set(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, conf.get(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY) + "," + SnapshotScannerHDFSAclController.class.getName()); - // set hfile cleaner plugin - conf.set(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS, - SnapshotScannerHDFSAclCleaner.class.getName()); TEST_UTIL.startMiniCluster(); admin = TEST_UTIL.getAdmin(); @@ -140,6 +140,7 @@ public static void setupBeforeClass() throws Exception { SnapshotScannerHDFSAclController coprocessor = TEST_UTIL.getHBaseCluster().getMaster() .getMasterCoprocessorHost().findCoprocessor(SnapshotScannerHDFSAclController.class); TEST_UTIL.waitFor(1200000, () -> coprocessor.checkInitialized("check initialized")); + aclTable = admin.getConnection().getTable(PermissionStorage.ACL_TABLE_NAME); } @AfterClass @@ -148,112 +149,185 @@ public static void tearDownAfterClass() throws Exception { } @Test - public void testGrantGlobal() throws Exception { + public void testGrantGlobal1() throws Exception { final String grantUserName = name.getMethodName(); User grantUser = User.createUserForTesting(conf, grantUserName, new String[] {}); + String namespace = name.getMethodName(); + TableName table = TableName.valueOf(namespace, "t1"); + String snapshot1 = namespace + "s1"; + String snapshot2 = namespace + "s2"; - String namespace1 = name.getMethodName(); - String namespace2 = namespace1 + "2"; - String namespace3 = namespace1 + "3"; - TableName table1 = TableName.valueOf(namespace1, "t1"); - TableName table12 = TableName.valueOf(namespace1, "t2"); - TableName table21 = TableName.valueOf(namespace2, "t21"); - TableName table3 = TableName.valueOf(namespace3, "t3"); - TableName table31 = TableName.valueOf(namespace3, "t31"); - String snapshot1 = namespace1 + "t1"; - String snapshot12 = namespace1 + "t12"; - String snapshot2 = namespace1 + "t2"; - String snapshot21 = namespace2 + "t21"; - String snapshot3 = namespace1 + "t3"; - String snapshot31 = namespace1 + "t31"; - - TestHDFSAclHelper.createTableAndPut(TEST_UTIL, table1); - admin.snapshot(snapshot1, table1); - - // case 1: grant G(R) -> grant G(W) -> grant G(R) + TestHDFSAclHelper.createTableAndPut(TEST_UTIL, table); + admin.snapshot(snapshot1, table); + // grant G(R) SecureTestUtil.grantGlobal(TEST_UTIL, grantUserName, READ); TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser, snapshot1, 6); + assertTrue(hasUserGlobalHdfsAcl(aclTable, grantUserName)); + // grant G(W) with merging existing permissions admin.grant( new UserPermission(grantUserName, Permission.newBuilder().withActions(WRITE).build()), true); TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser, snapshot1, 6); + assertTrue(hasUserGlobalHdfsAcl(aclTable, grantUserName)); + // grant G(W) without merging SecureTestUtil.grantGlobal(TEST_UTIL, grantUserName, WRITE); TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser, snapshot1, -1); + assertFalse(hasUserGlobalHdfsAcl(aclTable, grantUserName)); + // grant G(R) SecureTestUtil.grantGlobal(TEST_UTIL, grantUserName, READ); TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser, snapshot1, 6); - admin.snapshot(snapshot12, table1); - TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser, snapshot12, 6); + // take a snapshot and ACLs are inherited automatically + admin.snapshot(snapshot2, table); + TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser, snapshot2, 6); + assertTrue(hasUserGlobalHdfsAcl(aclTable, grantUserName)); + } + + @Test + public void testGrantGlobal2() throws Exception { + final String grantUserName = name.getMethodName(); + User grantUser = User.createUserForTesting(conf, grantUserName, new String[] {}); + String namespace1 = name.getMethodName(); + TableName table1 = TableName.valueOf(namespace1, "t1"); + String namespace2 = namespace1 + "2"; + TableName table2 = TableName.valueOf(namespace2, "t2"); + String snapshot1 = namespace1 + "s1"; + String snapshot2 = namespace2 + "s2"; - // case 2: grant G(R),N(R) -> G(W) + // grant G(R), grant namespace1(R) + SecureTestUtil.grantGlobal(TEST_UTIL, grantUserName, READ); + // create table in namespace1 and snapshot + TestHDFSAclHelper.createTableAndPut(TEST_UTIL, table1); + admin.snapshot(snapshot1, table1); admin.grant(new UserPermission(grantUserName, Permission.newBuilder(namespace1).withActions(READ).build()), false); + // grant G(W) SecureTestUtil.grantGlobal(TEST_UTIL, grantUserName, WRITE); - // table in ns1 - TestHDFSAclHelper.createTableAndPut(TEST_UTIL, table12); - admin.snapshot(snapshot2, table12); - // table in ns2 - TestHDFSAclHelper.createTableAndPut(TEST_UTIL, table21); - admin.snapshot(snapshot21, table21); + // create table in namespace2 and snapshot + TestHDFSAclHelper.createTableAndPut(TEST_UTIL, table2); + admin.snapshot(snapshot2, table2); + // check scan snapshot TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser, snapshot1, 6); - TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser, snapshot2, 6); - TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser, snapshot21, -1); + TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser, snapshot2, -1); + assertFalse(hasUserGlobalHdfsAcl(aclTable, grantUserName)); + assertTrue(hasUserNamespaceHdfsAcl(aclTable, grantUserName, namespace1)); + assertFalse(hasUserNamespaceHdfsAcl(aclTable, grantUserName, namespace2)); + checkUserAclEntry(helper.getGlobalRootPaths(), grantUserName, false, false); + checkUserAclEntry(helper.getNamespaceRootPaths(namespace1), grantUserName, true, true); + checkUserAclEntry(helper.getNamespaceRootPaths(namespace2), grantUserName, false, false); + } - // case 3: grant G(R),T(R) -> G(W) + @Test + public void testGrantGlobal3() throws Exception { + final String grantUserName = name.getMethodName(); + User grantUser = User.createUserForTesting(conf, grantUserName, new String[] {}); + String namespace = name.getMethodName(); + TableName table1 = TableName.valueOf(namespace, "t1"); + TableName table2 = TableName.valueOf(namespace, "t2"); + String snapshot1 = namespace + "s1"; + String snapshot2 = namespace + "s2"; + // grant G(R) SecureTestUtil.grantGlobal(TEST_UTIL, grantUserName, READ); - TestHDFSAclHelper.createTableAndPut(TEST_UTIL, table3); - admin.snapshot(snapshot3, table3); - TestHDFSAclHelper.grantOnTable(TEST_UTIL, grantUserName, table3, READ); + // grant table1(R) + TestHDFSAclHelper.createTableAndPut(TEST_UTIL, table1); + admin.snapshot(snapshot1, table1); + TestHDFSAclHelper.grantOnTable(TEST_UTIL, grantUserName, table1, READ); + // grant G(W) SecureTestUtil.grantGlobal(TEST_UTIL, grantUserName, WRITE); - TestHDFSAclHelper.createTableAndPut(TEST_UTIL, table31); - admin.snapshot(snapshot31, table31); - TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser, snapshot3, 6); - TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser, snapshot31, -1); + TestHDFSAclHelper.createTableAndPut(TEST_UTIL, table2); + admin.snapshot(snapshot2, table2); + // check scan snapshot + TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser, snapshot1, 6); + TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser, snapshot2, -1); + assertFalse(hasUserGlobalHdfsAcl(aclTable, grantUserName)); + assertFalse(hasUserNamespaceHdfsAcl(aclTable, grantUserName, namespace)); + assertTrue(hasUserTableHdfsAcl(aclTable, grantUserName, table1)); + assertFalse(hasUserTableHdfsAcl(aclTable, grantUserName, table2)); + checkUserAclEntry(helper.getGlobalRootPaths(), grantUserName, false, false); + checkUserAclEntry(helper.getTableRootPaths(table2, false), grantUserName, false, false); + checkUserAclEntry(helper.getTableRootPaths(table1, false), grantUserName, true, true); } @Test - public void testGrantNamespace() throws Exception { + public void testGrantNamespace1() throws Exception { final String grantUserName = name.getMethodName(); User grantUser = User.createUserForTesting(conf, grantUserName, new String[] {}); - String namespace = name.getMethodName(); - TableName table = TableName.valueOf(namespace, "t1"); + TableName table1 = TableName.valueOf(namespace, "t1"); TableName table2 = TableName.valueOf(namespace, "t2"); - TableName table3 = TableName.valueOf(namespace, "t3"); - String snapshot = namespace + "t1"; - String snapshot2 = namespace + "t2"; - String snapshot3 = namespace + "t3"; - - TestHDFSAclHelper.createTableAndPut(TEST_UTIL, table); - admin.snapshot(snapshot, table); + String snapshot1 = namespace + "s1"; + String snapshot2 = namespace + "s2"; - // case 1: grant N(R) -> grant N(W) -> grant N(R) + // create table1 and snapshot + TestHDFSAclHelper.createTableAndPut(TEST_UTIL, table1); + admin.snapshot(snapshot1, table1); + // grant N(R) SecureTestUtil.grantOnNamespace(TEST_UTIL, grantUserName, namespace, READ); - TestHDFSAclHelper.createTableAndPut(TEST_UTIL, table3); - admin.snapshot(snapshot3, table3); - TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser, snapshot, 6); - TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser, snapshot3, 6); - TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, unGrantUser, snapshot, -1); + // create table2 and snapshot, ACLs can be inherited automatically + TestHDFSAclHelper.createTableAndPut(TEST_UTIL, table2); + admin.snapshot(snapshot2, table2); + // check scan snapshot + TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser, snapshot1, 6); + TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser, snapshot2, 6); + TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, unGrantUser, snapshot1, -1); + assertTrue(hasUserNamespaceHdfsAcl(aclTable, grantUserName, namespace)); + assertFalse(hasUserTableHdfsAcl(aclTable, grantUserName, table1)); + checkUserAclEntry(helper.getNamespaceRootPaths(namespace), grantUserName, true, true); + // grant N(W) SecureTestUtil.grantOnNamespace(TEST_UTIL, grantUserName, namespace, WRITE); - TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser, snapshot, -1); - SecureTestUtil.grantOnNamespace(TEST_UTIL, grantUserName, namespace, READ); + TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser, snapshot1, -1); + assertFalse(hasUserNamespaceHdfsAcl(aclTable, grantUserName, namespace)); + checkUserAclEntry(helper.getNamespaceRootPaths(namespace), grantUserName, false, false); + } + + @Test + public void testGrantNamespace2() throws Exception { + final String grantUserName = name.getMethodName(); + User grantUser = User.createUserForTesting(conf, grantUserName, new String[] {}); + String namespace = name.getMethodName(); + TableName table1 = TableName.valueOf(namespace, "t1"); + String snapshot1 = namespace + "s1"; + + // create table1 and snapshot + TestHDFSAclHelper.createTableAndPut(TEST_UTIL, table1); + admin.snapshot(snapshot1, table1); - // case 2: grant T(R) -> N(W) + // grant N(R) SecureTestUtil.grantOnNamespace(TEST_UTIL, grantUserName, namespace, READ); - TestHDFSAclHelper.grantOnTable(TEST_UTIL, grantUserName, table, READ); + // grant table1(R) + TestHDFSAclHelper.grantOnTable(TEST_UTIL, grantUserName, table1, READ); + // grant N(W) SecureTestUtil.grantOnNamespace(TEST_UTIL, grantUserName, namespace, WRITE); - TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser, snapshot, 6); - TestHDFSAclHelper.createTableAndPut(TEST_UTIL, table2); - admin.snapshot(snapshot2, table2); - TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser, snapshot2, -1); - TestHDFSAclHelper.grantOnTable(TEST_UTIL, grantUserName, table, WRITE); + // check scan snapshot + TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser, snapshot1, 6); + assertFalse(hasUserNamespaceHdfsAcl(aclTable, grantUserName, namespace)); + checkUserAclEntry(helper.getNamespaceRootPaths(namespace), grantUserName, true, false); + assertTrue(hasUserTableHdfsAcl(aclTable, grantUserName, table1)); + checkUserAclEntry(helper.getTableRootPaths(table1, false), grantUserName, true, true); + } + + @Test + public void testGrantNamespace3() throws Exception { + final String grantUserName = name.getMethodName(); + User grantUser = User.createUserForTesting(conf, grantUserName, new String[] {}); + String namespace = name.getMethodName(); + TableName table = TableName.valueOf(namespace, "t1"); + String snapshot = namespace + "t1"; - // case 3: grant G(R) -> N(W) + // create table1 and snapshot + TestHDFSAclHelper.createTableAndPut(TEST_UTIL, table); + admin.snapshot(snapshot, table); + // grant namespace(R) SecureTestUtil.grantOnNamespace(TEST_UTIL, grantUserName, namespace, READ); + // grant global(R) SecureTestUtil.grantGlobal(TEST_UTIL, grantUserName, READ); + // grant namespace(W) SecureTestUtil.grantOnNamespace(TEST_UTIL, grantUserName, namespace, WRITE); + // check scan snapshot TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser, snapshot, 6); - TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser, snapshot2, 6); - TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser, snapshot3, 6); + assertFalse(hasUserNamespaceHdfsAcl(aclTable, grantUserName, namespace)); + checkUserAclEntry(helper.getNamespaceRootPaths(namespace), grantUserName, true, true); + assertTrue(hasUserGlobalHdfsAcl(aclTable, grantUserName)); + checkUserAclEntry(helper.getGlobalRootPaths(), grantUserName, true, true); } @Test @@ -262,171 +336,244 @@ public void testGrantTable() throws Exception { User grantUser = User.createUserForTesting(conf, grantUserName, new String[] {}); String namespace = name.getMethodName(); - TableName table = TableName.valueOf(namespace, "t1"); - TableName table2 = TableName.valueOf(namespace, "t2"); - String snapshot = namespace + "t1"; - String snapshot2 = namespace + "t1-2"; - String snapshot3 = namespace + "t2"; + TableName table1 = TableName.valueOf(namespace, "t1"); + String snapshot1 = namespace + "s1"; + String snapshot2 = namespace + "s2"; - try (Table t = TestHDFSAclHelper.createTable(TEST_UTIL, table)) { + try (Table t = TestHDFSAclHelper.createTable(TEST_UTIL, table1)) { TestHDFSAclHelper.put(t); - admin.snapshot(snapshot, table); + admin.snapshot(snapshot1, table1); // table owner can scan table snapshot TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, - User.createUserForTesting(conf, "owner", new String[] {}), snapshot, 6); - // case 1: grant table family(R) - SecureTestUtil.grantOnTable(TEST_UTIL, grantUserName, table, TestHDFSAclHelper.COLUMN1, null, + User.createUserForTesting(conf, "owner", new String[] {}), snapshot1, 6); + // grant table1 family(R) + SecureTestUtil.grantOnTable(TEST_UTIL, grantUserName, table1, TestHDFSAclHelper.COLUMN1, null, READ); - TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser, snapshot, -1); - // case 2: grant T(R) - TestHDFSAclHelper.grantOnTable(TEST_UTIL, grantUserName, table, READ); + TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser, snapshot1, -1); + + // grant table1(R) + TestHDFSAclHelper.grantOnTable(TEST_UTIL, grantUserName, table1, READ); TestHDFSAclHelper.put2(t); - admin.snapshot(snapshot2, table); - TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser, snapshot, 6); + admin.snapshot(snapshot2, table1); + // check scan snapshot + TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser, snapshot1, 6); TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser, snapshot2, 10); + assertTrue(hasUserTableHdfsAcl(aclTable, grantUserName, table1)); + checkUserAclEntry(helper.getTableRootPaths(table1, false), grantUserName, true, true); } - // create t2 and snapshot - TestHDFSAclHelper.createTableAndPut(TEST_UTIL, table2); - admin.snapshot(snapshot3, table2); - TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser, snapshot3, -1); - // case 3: grant T(R) -> grant T(W) with merging existing permissions - TEST_UTIL.getAdmin().grant( - new UserPermission(grantUserName, Permission.newBuilder(table).withActions(WRITE).build()), + // grant table1(W) with merging existing permissions + admin.grant( + new UserPermission(grantUserName, Permission.newBuilder(table1).withActions(WRITE).build()), true); - TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser, snapshot, 6); + assertTrue(hasUserTableHdfsAcl(aclTable, grantUserName, table1)); + checkUserAclEntry(helper.getTableRootPaths(table1, false), grantUserName, true, true); - // case 4: grant T(R) -> grant T(W) without merging existing permissions - TestHDFSAclHelper.grantOnTable(TEST_UTIL, grantUserName, table, WRITE); - TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser, snapshot, -1); - TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser, snapshot2, -1); + // grant table1(W) without merging existing permissions + TestHDFSAclHelper.grantOnTable(TEST_UTIL, grantUserName, table1, WRITE); + TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser, snapshot1, -1); + assertFalse(hasUserTableHdfsAcl(aclTable, grantUserName, table1)); + checkUserAclEntry(helper.getTableRootPaths(table1, false), grantUserName, false, false); } @Test - public void testRevokeGlobal() throws Exception { + public void testGrantMobTable() throws Exception { final String grantUserName = name.getMethodName(); User grantUser = User.createUserForTesting(conf, grantUserName, new String[] {}); + String namespace = name.getMethodName(); + TableName table = TableName.valueOf(namespace, "t1"); + String snapshot = namespace + "s1"; + + try (Table t = TestHDFSAclHelper.createMobTable(TEST_UTIL, table)) { + TestHDFSAclHelper.put(t); + admin.snapshot(snapshot, table); + TestHDFSAclHelper.grantOnTable(TEST_UTIL, grantUserName, table, READ); + TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser, snapshot, 6); + assertTrue(hasUserTableHdfsAcl(aclTable, grantUserName, table)); + checkUserAclEntry(helper.getTableRootPaths(table, false), grantUserName, true, true); + } + } + @Test + public void testRevokeGlobal1() throws Exception { + final String grantUserName = name.getMethodName(); + User grantUser = User.createUserForTesting(conf, grantUserName, new String[] {}); String namespace = name.getMethodName(); TableName table1 = TableName.valueOf(namespace, "t1"); - TableName table2 = TableName.valueOf(namespace, "t2"); - TableName table3 = TableName.valueOf(namespace, "t3"); String snapshot1 = namespace + "t1"; - String snapshot2 = namespace + "t2"; - String snapshot3 = namespace + "t3"; TestHDFSAclHelper.createTableAndPut(TEST_UTIL, table1); admin.snapshot(snapshot1, table1); - // case 1: grant G(R) -> revoke G(R) SecureTestUtil.grantGlobal(TEST_UTIL, grantUserName, READ); SecureTestUtil.revokeGlobal(TEST_UTIL, grantUserName, READ); TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser, snapshot1, -1); + assertFalse(hasUserGlobalHdfsAcl(aclTable, grantUserName)); + checkUserAclEntry(helper.getGlobalRootPaths(), grantUserName, false, false); + } - // case 2: grant G(R), grant N(R), grant T(R) -> revoke G(R) + @Test + public void testRevokeGlobal2() throws Exception { + final String grantUserName = name.getMethodName(); + User grantUser = User.createUserForTesting(conf, grantUserName, new String[] {}); + + String namespace = name.getMethodName(); + TableName table1 = TableName.valueOf(namespace, "t1"); + String snapshot1 = namespace + "s1"; + TestHDFSAclHelper.createTableAndPut(TEST_UTIL, table1); + admin.snapshot(snapshot1, table1); + + // grant G(R), grant N(R), grant T(R) -> revoke G(R) SecureTestUtil.grantGlobal(TEST_UTIL, grantUserName, READ); SecureTestUtil.grantOnNamespace(TEST_UTIL, grantUserName, namespace, READ); TestHDFSAclHelper.grantOnTable(TEST_UTIL, grantUserName, table1, READ); SecureTestUtil.revokeGlobal(TEST_UTIL, grantUserName, READ); + // check scan snapshot TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser, snapshot1, 6); - TestHDFSAclHelper.createTableAndPut(TEST_UTIL, table2); - admin.snapshot(snapshot2, table2); - TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser, snapshot2, 6); - SecureTestUtil.revokeFromNamespace(TEST_UTIL, grantUserName, namespace, READ); - TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser, snapshot2, -1); + assertFalse(hasUserGlobalHdfsAcl(aclTable, grantUserName)); + checkUserAclEntry(helper.getGlobalRootPaths(), grantUserName, false, false); + assertTrue(hasUserNamespaceHdfsAcl(aclTable, grantUserName, namespace)); + checkUserAclEntry(helper.getNamespaceRootPaths(namespace), grantUserName, true, true); + } + + @Test + public void testRevokeGlobal3() throws Exception { + final String grantUserName = name.getMethodName(); + User grantUser = User.createUserForTesting(conf, grantUserName, new String[] {}); + + String namespace = name.getMethodName(); + TableName table1 = TableName.valueOf(namespace, "t1"); + String snapshot1 = namespace + "t1"; + TestHDFSAclHelper.createTableAndPut(TEST_UTIL, table1); + admin.snapshot(snapshot1, table1); - // case 3: grant G(R), grant T(R) -> revoke G(R) + // grant G(R), grant T(R) -> revoke G(R) SecureTestUtil.grantGlobal(TEST_UTIL, grantUserName, READ); + TestHDFSAclHelper.grantOnTable(TEST_UTIL, grantUserName, table1, READ); SecureTestUtil.revokeGlobal(TEST_UTIL, grantUserName, READ); + // check scan snapshot TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser, snapshot1, 6); - TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser, snapshot2, -1); - TestHDFSAclHelper.createTableAndPut(TEST_UTIL, table3); - admin.snapshot(snapshot3, table3); - TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser, snapshot3, -1); + assertFalse(hasUserGlobalHdfsAcl(aclTable, grantUserName)); + checkUserAclEntry(helper.getGlobalRootPaths(), grantUserName, false, false); + assertFalse(hasUserNamespaceHdfsAcl(aclTable, grantUserName, namespace)); + checkUserAclEntry(helper.getNamespaceRootPaths(namespace), grantUserName, true, false); + assertTrue(hasUserTableHdfsAcl(aclTable, grantUserName, table1)); + checkUserAclEntry(helper.getTableRootPaths(table1, false), grantUserName, true, true); } @Test - public void testRevokeNamespace() throws Exception { + public void testRevokeNamespace1() throws Exception { String grantUserName = name.getMethodName(); User grantUser = User.createUserForTesting(conf, grantUserName, new String[] {}); - String namespace = name.getMethodName(); TableName table1 = TableName.valueOf(namespace, "t1"); - TableName table2 = TableName.valueOf(namespace, "t2"); - TableName table3 = TableName.valueOf(namespace, "t3"); - TableName table4 = TableName.valueOf(namespace, "t4"); - String snapshot1 = namespace + "t1"; - String snapshot2 = namespace + "t2"; - String snapshot3 = namespace + "t3"; - String snapshot4 = namespace + "t4"; - + String snapshot1 = namespace + "s1"; TestHDFSAclHelper.createTableAndPut(TEST_UTIL, table1); admin.snapshot(snapshot1, table1); - // case 1: grant N(R) -> revoke N(R) + // revoke N(R) SecureTestUtil.grantOnNamespace(TEST_UTIL, grantUserName, namespace, READ); admin.revoke(new UserPermission(grantUserName, Permission.newBuilder(namespace).build())); - TestHDFSAclHelper.createTableAndPut(TEST_UTIL, table3); - admin.snapshot(snapshot3, table3); + // check scan snapshot TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser, snapshot1, -1); - TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser, snapshot3, -1); + assertFalse(hasUserNamespaceHdfsAcl(aclTable, grantUserName, namespace)); + checkUserAclEntry(helper.getNamespaceRootPaths(namespace), grantUserName, false, false); - // case 2: grant N(R), grant G(R) -> revoke N(R) + // grant N(R), grant G(R) -> revoke N(R) SecureTestUtil.grantOnNamespace(TEST_UTIL, grantUserName, namespace, READ); SecureTestUtil.grantGlobal(TEST_UTIL, grantUserName, READ); admin.revoke(new UserPermission(grantUserName, Permission.newBuilder(namespace).build())); - TestHDFSAclHelper.createTableAndPut(TEST_UTIL, table4); - admin.snapshot(snapshot4, table4); + // check scan snapshot TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser, snapshot1, 6); - TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser, snapshot4, 6); - SecureTestUtil.revokeGlobal(TEST_UTIL, grantUserName, READ); + assertFalse(hasUserNamespaceHdfsAcl(aclTable, grantUserName, namespace)); + checkUserAclEntry(helper.getNamespaceRootPaths(namespace), grantUserName, true, true); + } - // case 3: grant N(R), grant T(R) -> revoke N(R) + @Test + public void testRevokeNamespace2() throws Exception { + String grantUserName = name.getMethodName(); + User grantUser = User.createUserForTesting(conf, grantUserName, new String[] {}); + String namespace = name.getMethodName(); + TableName table = TableName.valueOf(namespace, "t1"); + String snapshot = namespace + "s1"; + TestHDFSAclHelper.createTableAndPut(TEST_UTIL, table); + admin.snapshot(snapshot, table); + + // grant N(R), grant T(R) -> revoke N(R) SecureTestUtil.grantOnNamespace(TEST_UTIL, grantUserName, namespace, READ); - TestHDFSAclHelper.grantOnTable(TEST_UTIL, grantUserName, table1, READ); + TestHDFSAclHelper.grantOnTable(TEST_UTIL, grantUserName, table, READ); SecureTestUtil.revokeFromNamespace(TEST_UTIL, grantUserName, namespace, READ); - TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser, snapshot1, 6); - TestHDFSAclHelper.createTable(TEST_UTIL, table2); - admin.snapshot(snapshot2, table2); - TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser, snapshot2, -1); + // check scan snapshot + TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser, snapshot, 6); + assertFalse(hasUserNamespaceHdfsAcl(aclTable, grantUserName, namespace)); + checkUserAclEntry(helper.getNamespaceRootPaths(namespace), grantUserName, true, false); + assertTrue(hasUserTableHdfsAcl(aclTable, grantUserName, table)); + checkUserAclEntry(helper.getTableRootPaths(table, false), grantUserName, true, true); } @Test - public void testRevokeTable() throws Exception { + public void testRevokeTable1() throws Exception { final String grantUserName = name.getMethodName(); User grantUser = User.createUserForTesting(conf, grantUserName, new String[] {}); - String namespace = name.getMethodName(); TableName table = TableName.valueOf(namespace, "t1"); String snapshot = namespace + "t1"; - TestHDFSAclHelper.createTableAndPut(TEST_UTIL, table); admin.snapshot(snapshot, table); - // case 1: grant T(R) -> revoke table family + // grant T(R) -> revoke table family TestHDFSAclHelper.grantOnTable(TEST_UTIL, grantUserName, table, READ); SecureTestUtil.revokeFromTable(TEST_UTIL, grantUserName, table, TestHDFSAclHelper.COLUMN1, null, READ); TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser, snapshot, 6); - // case 2: grant T(R) -> revoke T(R) + // grant T(R) -> revoke T(R) TestHDFSAclHelper.grantOnTable(TEST_UTIL, grantUserName, table, READ); admin.revoke(new UserPermission(grantUserName, Permission.newBuilder(table).build())); TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser, snapshot, -1); + assertFalse(hasUserTableHdfsAcl(aclTable, grantUserName, table)); + checkUserAclEntry(helper.getTableRootPaths(table, false), grantUserName, false, false); + } + + @Test + public void testRevokeTable2() throws Exception { + final String grantUserName = name.getMethodName(); + User grantUser = User.createUserForTesting(conf, grantUserName, new String[] {}); + String namespace = name.getMethodName(); + TableName table = TableName.valueOf(namespace, "t1"); + String snapshot = namespace + "t1"; + TestHDFSAclHelper.createTableAndPut(TEST_UTIL, table); + admin.snapshot(snapshot, table); - // case 3: grant T(R), grant N(R) -> revoke T(R) + // grant T(R), grant N(R) -> revoke T(R) TestHDFSAclHelper.grantOnTable(TEST_UTIL, grantUserName, table, READ); SecureTestUtil.grantOnNamespace(TEST_UTIL, grantUserName, namespace, READ); admin.revoke(new UserPermission(grantUserName, Permission.newBuilder(table).build())); TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser, snapshot, 6); - SecureTestUtil.revokeFromNamespace(TEST_UTIL, grantUserName, namespace, READ); + assertFalse(hasUserTableHdfsAcl(aclTable, grantUserName, table)); + checkUserAclEntry(helper.getTableRootPaths(table, false), grantUserName, true, true); + assertTrue(hasUserNamespaceHdfsAcl(aclTable, grantUserName, namespace)); + checkUserAclEntry(helper.getNamespaceRootPaths(namespace), grantUserName, true, true); + } + + @Test + public void testRevokeTable3() throws Exception { + final String grantUserName = name.getMethodName(); + User grantUser = User.createUserForTesting(conf, grantUserName, new String[] {}); + String namespace = name.getMethodName(); + TableName table = TableName.valueOf(namespace, "t1"); + String snapshot = namespace + "t1"; + TestHDFSAclHelper.createTableAndPut(TEST_UTIL, table); + admin.snapshot(snapshot, table); - // case 4: grant T(R), grant G(R) -> revoke T(R) + // grant T(R), grant G(R) -> revoke T(R) TestHDFSAclHelper.grantOnTable(TEST_UTIL, grantUserName, table, READ); SecureTestUtil.grantGlobal(TEST_UTIL, grantUserName, READ); admin.revoke(new UserPermission(grantUserName, Permission.newBuilder(table).build())); TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser, snapshot, 6); - SecureTestUtil.revokeGlobal(TEST_UTIL, grantUserName, READ); - TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser, snapshot, -1); + assertFalse(hasUserTableHdfsAcl(aclTable, grantUserName, table)); + checkUserAclEntry(helper.getTableRootPaths(table, false), grantUserName, true, true); + assertTrue(hasUserGlobalHdfsAcl(aclTable, grantUserName)); + checkUserAclEntry(helper.getGlobalRootPaths(), grantUserName, true, true); } @Test @@ -438,8 +585,8 @@ public void testTruncateTable() throws Exception { String namespace = name.getMethodName(); TableName tableName = TableName.valueOf(namespace, "t1"); - String snapshot = namespace + "t1"; - String snapshot2 = namespace + "t1-2"; + String snapshot = namespace + "s1"; + String snapshot2 = namespace + "s2"; try (Table t = TestHDFSAclHelper.createTable(TEST_UTIL, tableName)) { TestHDFSAclHelper.put(t); // snapshot @@ -454,10 +601,16 @@ public void testTruncateTable() throws Exception { TestHDFSAclHelper.put2(t); // snapshot admin.snapshot(snapshot2, tableName); + // check scan snapshot TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser, snapshot, 6); TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser2, snapshot, 6); TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser, snapshot2, 9); TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser2, snapshot2, 9); + assertTrue(hasUserNamespaceHdfsAcl(aclTable, grantUserName2, namespace)); + checkUserAclEntry(helper.getNamespaceRootPaths(namespace), grantUserName2, true, true); + assertTrue(hasUserTableHdfsAcl(aclTable, grantUserName, tableName)); + checkUserAclEntry(helper.getTableRootPaths(tableName, false), grantUserName, true, true); + checkUserAclEntry(helper.getNamespaceRootPaths(namespace), grantUserName, true, false); } } @@ -467,9 +620,9 @@ public void testRestoreSnapshot() throws Exception { User grantUser = User.createUserForTesting(conf, grantUserName, new String[] {}); String namespace = name.getMethodName(); TableName table = TableName.valueOf(namespace, "t1"); - String snapshot = namespace + "t1"; - String snapshot2 = namespace + "t1-2"; - String snapshot3 = namespace + "t1-3"; + String snapshot = namespace + "s1"; + String snapshot2 = namespace + "s2"; + String snapshot3 = namespace + "s3"; try (Table t = TestHDFSAclHelper.createTable(TEST_UTIL, table)) { TestHDFSAclHelper.put(t); @@ -488,6 +641,8 @@ public void testRestoreSnapshot() throws Exception { admin.snapshot(snapshot2, table); TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser, snapshot, 6); TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser, snapshot2, 10); + assertTrue(hasUserTableHdfsAcl(aclTable, grantUserName, table)); + checkUserAclEntry(helper.getTableRootPaths(table, false), grantUserName, true, true); // delete admin.disableTable(table); @@ -499,6 +654,10 @@ public void testRestoreSnapshot() throws Exception { TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser, snapshot, -1); TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser, snapshot2, -1); TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser, snapshot3, -1); + assertFalse(hasUserTableHdfsAcl(aclTable, grantUserName, table)); + checkUserAclEntry(helper.getPathHelper().getDataTableDir(table), grantUserName, false, false); + checkUserAclEntry(helper.getPathHelper().getArchiveTableDir(table), grantUserName, true, + false); } } @@ -507,57 +666,62 @@ public void testDeleteTable() throws Exception { String namespace = name.getMethodName(); String grantUserName1 = namespace + "1"; String grantUserName2 = namespace + "2"; - String grantUserName3 = namespace + "3"; User grantUser1 = User.createUserForTesting(conf, grantUserName1, new String[] {}); User grantUser2 = User.createUserForTesting(conf, grantUserName2, new String[] {}); - User grantUser3 = User.createUserForTesting(conf, grantUserName3, new String[] {}); - - TableName tableName1 = TableName.valueOf(namespace, "t1"); - TableName tableName2 = TableName.valueOf(namespace, "t2"); + TableName table = TableName.valueOf(namespace, "t1"); String snapshot1 = namespace + "t1"; - String snapshot2 = namespace + "t2"; - try (Table t = TestHDFSAclHelper.createTable(TEST_UTIL, tableName1); - Table t2 = TestHDFSAclHelper.createTable(TEST_UTIL, tableName2)) { - TestHDFSAclHelper.put(t); - TestHDFSAclHelper.put(t2); - // snapshot - admin.snapshot(snapshot1, tableName1); - admin.snapshot(snapshot2, tableName2); - // grant user table permission - TestHDFSAclHelper.grantOnTable(TEST_UTIL, grantUserName1, tableName1, READ); - TestHDFSAclHelper.grantOnTable(TEST_UTIL, grantUserName2, tableName2, READ); - SecureTestUtil.grantOnNamespace(TEST_UTIL, grantUserName3, namespace, READ); - // delete table - admin.disableTable(tableName1); - admin.deleteTable(tableName1); - // grantUser2 and grantUser3 should have data/ns acl - TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser1, snapshot1, -1); - TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser2, snapshot2, 6); - TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser3, snapshot2, 6); - } + + TestHDFSAclHelper.createTableAndPut(TEST_UTIL, table); + // snapshot + admin.snapshot(snapshot1, table); + // grant user table permission + TestHDFSAclHelper.grantOnTable(TEST_UTIL, grantUserName1, table, READ); + SecureTestUtil.grantOnNamespace(TEST_UTIL, grantUserName2, namespace, READ); + // delete table + admin.disableTable(table); + admin.deleteTable(table); + // grantUser2 and grantUser3 should have data/ns acl + TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser1, snapshot1, -1); + TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser2, snapshot1, 6); + assertTrue(hasUserNamespaceHdfsAcl(aclTable, grantUserName2, namespace)); + checkUserAclEntry(helper.getNamespaceRootPaths(namespace), grantUserName2, true, true); + assertFalse(hasUserTableHdfsAcl(aclTable, grantUserName1, table)); + checkUserAclEntry(helper.getPathHelper().getDataTableDir(table), grantUserName1, false, false); + checkUserAclEntry(helper.getPathHelper().getMobTableDir(table), grantUserName1, false, false); + checkUserAclEntry(helper.getPathHelper().getArchiveTableDir(table), grantUserName1, true, + false); + + // check tmp table directory does not exist + Path tmpTableDir = helper.getPathHelper().getTmpTableDir(table); + assertFalse(fs.exists(tmpTableDir)); } @Test public void testDeleteNamespace() throws Exception { String grantUserName = name.getMethodName(); User grantUser = User.createUserForTesting(conf, grantUserName, new String[] {}); - String namespace = name.getMethodName(); - TableName tableName = TableName.valueOf(namespace, "t1"); + TableName table = TableName.valueOf(namespace, "t1"); String snapshot = namespace + "t1"; - try (Table t = TestHDFSAclHelper.createTable(TEST_UTIL, tableName)) { - TestHDFSAclHelper.put(t); - // snapshot - admin.snapshot(snapshot, tableName); - // grant user2 namespace permission - SecureTestUtil.grantOnNamespace(TEST_UTIL, grantUserName, namespace, READ); - // truncate table - admin.disableTable(tableName); - admin.deleteTable(tableName); - // snapshot - admin.deleteNamespace(namespace); - TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser, snapshot, 6); - } + TestHDFSAclHelper.createTableAndPut(TEST_UTIL, table); + // snapshot + admin.snapshot(snapshot, table); + // grant namespace permission + SecureTestUtil.grantOnNamespace(TEST_UTIL, grantUserName, namespace, READ); + // delete table + admin.disableTable(table); + admin.deleteTable(table); + // delete namespace + admin.deleteNamespace(namespace); + TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser, snapshot, 6); + assertFalse(hasUserNamespaceHdfsAcl(aclTable, grantUserName, namespace)); + checkUserAclEntry(helper.getPathHelper().getArchiveNsDir(namespace), grantUserName, true, + false); + + // check tmp namespace dir does not exist + assertFalse(fs.exists(helper.getPathHelper().getTmpNsDir(namespace))); + assertFalse(fs.exists(helper.getPathHelper().getDataNsDir(namespace))); + // assertFalse(fs.exists(helper.getPathHelper().getMobDataNsDir(namespace))); } @Test @@ -578,6 +742,7 @@ public void testCleanArchiveTableDir() throws Exception { cleaner.choreForTesting(); Path archiveTableDir = HFileArchiveUtil.getTableArchivePath(rootDir, table); assertTrue(fs.exists(archiveTableDir)); + checkUserAclEntry(helper.getTableRootPaths(table, false), grantUserName, true, true); // Check SnapshotScannerHDFSAclCleaner method assertTrue(SnapshotScannerHDFSAclCleaner.isArchiveTableDir(archiveTableDir)); @@ -589,29 +754,11 @@ public void testCleanArchiveTableDir() throws Exception { } @Test - public void testGrantMobTable() throws Exception { - final String grantUserName = name.getMethodName(); - User grantUser = User.createUserForTesting(conf, grantUserName, new String[] {}); - + public void testModifyTable1() throws Exception { String namespace = name.getMethodName(); TableName table = TableName.valueOf(namespace, "t1"); String snapshot = namespace + "t1"; - try (Table t = TestHDFSAclHelper.createMobTable(TEST_UTIL, table)) { - TestHDFSAclHelper.put(t); - admin.snapshot(snapshot, table); - TestHDFSAclHelper.grantOnTable(TEST_UTIL, grantUserName, table, READ); - TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser, snapshot, 6); - } - } - - @Test - public void testModifyTable() throws Exception { - String namespace = name.getMethodName(); - TableName table = TableName.valueOf(namespace, "t1"); - String snapshot = namespace + "t1"; - TableName table2 = TableName.valueOf(namespace, "t2"); - String tableUserName = name.getMethodName(); User tableUser = User.createUserForTesting(conf, tableUserName, new String[] {}); String tableUserName2 = tableUserName + "2"; @@ -647,42 +794,121 @@ public void testModifyTable() throws Exception { // enable user scan snapshot admin.modifyTable(TableDescriptorBuilder.newBuilder(td) - .setValue(SnapshotScannerHDFSAclHelper.USER_SCAN_SNAPSHOT_ENABLE, "true").build()); + .setValue(SnapshotScannerHDFSAclHelper.ACL_SYNC_TO_HDFS_ENABLE, "true").build()); + // check scan snapshot TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, tableUser, snapshot, 6); TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, tableUser2, snapshot, -1); TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, tableUser3, snapshot, -1); TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, nsUser, snapshot, 6); TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, globalUser, snapshot, 6); - // disable user scan snapshot + // check acl table storage and ACLs in dirs + assertTrue(hasUserGlobalHdfsAcl(aclTable, globalUserName)); + checkUserAclEntry(helper.getGlobalRootPaths(), globalUserName, true, true); + assertTrue(hasUserNamespaceHdfsAcl(aclTable, nsUserName, namespace)); + checkUserAclEntry(helper.getNamespaceRootPaths(namespace), nsUserName, true, true); + assertTrue(hasUserTableHdfsAcl(aclTable, tableUserName, table)); + checkUserAclEntry(helper.getTableRootPaths(table, false), tableUserName, true, true); + for (String user : new String[] { tableUserName2, tableUserName3 }) { + assertFalse(hasUserTableHdfsAcl(aclTable, user, table)); + checkUserAclEntry(helper.getTableRootPaths(table, false), user, false, false); + } + } + + @Test + public void testModifyTable2() throws Exception { + String namespace = name.getMethodName(); + TableName table = TableName.valueOf(namespace, "t1"); + String snapshot = namespace + "t1"; + TableName table2 = TableName.valueOf(namespace, "t2"); + + String tableUserName = name.getMethodName(); + User tableUser = User.createUserForTesting(conf, tableUserName, new String[] {}); + String tableUserName2 = tableUserName + "2"; + User tableUser2 = User.createUserForTesting(conf, tableUserName2, new String[] {}); + String tableUserName3 = tableUserName + "3"; + User tableUser3 = User.createUserForTesting(conf, tableUserName3, new String[] {}); + String nsUserName = tableUserName + "-ns"; + User nsUser = User.createUserForTesting(conf, nsUserName, new String[] {}); + String globalUserName = tableUserName + "-global"; + User globalUser = User.createUserForTesting(conf, globalUserName, new String[] {}); + String globalUserName2 = tableUserName + "-global-2"; + User globalUser2 = User.createUserForTesting(conf, globalUserName2, new String[] {}); + + TestHDFSAclHelper.createTableAndPut(TEST_UTIL, table); + SecureTestUtil.grantGlobal(TEST_UTIL, globalUserName, READ); + SecureTestUtil.grantGlobal(TEST_UTIL, globalUserName2, READ); + SecureTestUtil.grantOnNamespace(TEST_UTIL, nsUserName, namespace, READ); + TestHDFSAclHelper.grantOnTable(TEST_UTIL, tableUserName, table, READ); + SecureTestUtil.grantOnTable(TEST_UTIL, tableUserName2, table, TestHDFSAclHelper.COLUMN1, null, + READ); + TestHDFSAclHelper.grantOnTable(TEST_UTIL, tableUserName3, table, WRITE); + SecureTestUtil.grantOnNamespace(TEST_UTIL, tableUserName2, namespace, READ); TestHDFSAclHelper.createTable(TEST_UTIL, table2); TestHDFSAclHelper.grantOnTable(TEST_UTIL, tableUserName3, table2, READ); - admin.modifyTable(TableDescriptorBuilder.newBuilder(td) - .setValue(SnapshotScannerHDFSAclHelper.USER_SCAN_SNAPSHOT_ENABLE, "false").build()); + // disable user scan snapshot + admin.modifyTable(TableDescriptorBuilder.newBuilder(admin.getDescriptor(table)) + .setValue(SnapshotScannerHDFSAclHelper.ACL_SYNC_TO_HDFS_ENABLE, "false").build()); TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, tableUser, snapshot, -1); TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, tableUser2, snapshot, -1); TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, tableUser3, snapshot, -1); TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, nsUser, snapshot, -1); TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, globalUser, snapshot, -1); TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, globalUser2, snapshot, -1); - List namespaceRootPaths = helper.getNamespaceRootPaths(namespace); - List tableRootPaths = helper.getTableRootPaths(table, false); // check access - for (Path path : tableRootPaths) { - checkUserAclEntry(path, tableUserName, false, false); - checkUserAclEntry(path, tableUserName2, false, false); - checkUserAclEntry(path, tableUserName3, false, false); - checkUserAclEntry(path, nsUserName, false, false); - checkUserAclEntry(path, globalUserName, false, false); - checkUserAclEntry(path, globalUserName2, false, false); + String[] users = new String[] { globalUserName, globalUserName2, nsUserName, tableUserName, + tableUserName2, tableUserName3 }; + for (Path path : helper.getTableRootPaths(table, false)) { + for (String user : users) { + checkUserAclEntry(path, user, false, false); + } } - for (Path path : namespaceRootPaths) { + String[] nsUsers = new String[] { globalUserName, globalUserName2, nsUserName }; + for (Path path : helper.getNamespaceRootPaths(namespace)) { checkUserAclEntry(path, tableUserName, false, false); checkUserAclEntry(path, tableUserName2, true, true); checkUserAclEntry(path, tableUserName3, true, false); - checkUserAclEntry(path, nsUserName, true, true); - checkUserAclEntry(path, globalUserName, true, true); - checkUserAclEntry(path, globalUserName2, true, true); + for (String user : nsUsers) { + checkUserAclEntry(path, user, true, true); + } + } + assertTrue(hasUserNamespaceHdfsAcl(aclTable, nsUserName, namespace)); + assertTrue(hasUserNamespaceHdfsAcl(aclTable, tableUserName2, namespace)); + assertFalse(hasUserTableHdfsAcl(aclTable, tableUserName, table)); + } + + @Test + public void testRestartMaster() throws Exception { + final String grantUserName = name.getMethodName(); + User grantUser = User.createUserForTesting(conf, grantUserName, new String[] {}); + String namespace = name.getMethodName(); + TableName table = TableName.valueOf(namespace, "t1"); + String snapshot = namespace + "t1"; + admin.createNamespace(NamespaceDescriptor.create(namespace).build()); + + // grant N(R) + SecureTestUtil.grantOnNamespace(TEST_UTIL, grantUserName, namespace, READ); + // restart cluster and tmp directory will not be deleted + TEST_UTIL.getMiniHBaseCluster().shutdown(); + TEST_UTIL.restartHBaseCluster(1); + TEST_UTIL.waitUntilNoRegionsInTransition(); + + Path tmpNsDir = helper.getPathHelper().getTmpNsDir(namespace); + assertFalse(fs.exists(tmpNsDir)); + + // create table2 and snapshot + TestHDFSAclHelper.createTableAndPut(TEST_UTIL, table); + admin = TEST_UTIL.getAdmin(); + aclTable = TEST_UTIL.getConnection().getTable(PermissionStorage.ACL_TABLE_NAME); + admin.snapshot(snapshot, table); + // TODO fix it in another patch + TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser, snapshot, -1); + } + + private void checkUserAclEntry(List paths, String user, boolean requireAccessAcl, + boolean requireDefaultAcl) throws Exception { + for (Path path : paths) { + checkUserAclEntry(path, user, requireAccessAcl, requireDefaultAcl); } } @@ -703,8 +929,8 @@ private void checkUserAclEntry(Path path, String userName, boolean requireAccess } } String message = "require user: " + userName + ", path: " + path.toString() + " acl"; - Assert.assertEquals(message, requireAccessAcl, accessAclEntry); - Assert.assertEquals(message, requireDefaultAcl, defaultAclEntry); + assertEquals(message, requireAccessAcl, accessAclEntry); + assertEquals(message, requireDefaultAcl, defaultAclEntry); } } @@ -730,7 +956,7 @@ static void createNamespace(HBaseTestingUtility util, String namespace) throws I static Table createTable(HBaseTestingUtility util, TableName tableName) throws IOException { createNamespace(util, tableName.getNamespaceAsString()); TableDescriptor td = getTableDescriptorBuilder(util, tableName) - .setValue(SnapshotScannerHDFSAclHelper.USER_SCAN_SNAPSHOT_ENABLE, "true").build(); + .setValue(SnapshotScannerHDFSAclHelper.ACL_SYNC_TO_HDFS_ENABLE, "true").build(); byte[][] splits = new byte[][] { Bytes.toBytes("2"), Bytes.toBytes("4") }; return util.createTable(td, splits); } @@ -743,7 +969,7 @@ static Table createMobTable(HBaseTestingUtility util, TableName tableName) throw .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(COLUMN2).setMobEnabled(true) .setMobThreshold(0).build()) .setOwner(User.createUserForTesting(util.getConfiguration(), "owner", new String[] {})) - .setValue(SnapshotScannerHDFSAclHelper.USER_SCAN_SNAPSHOT_ENABLE, "true").build(); + .setValue(SnapshotScannerHDFSAclHelper.ACL_SYNC_TO_HDFS_ENABLE, "true").build(); byte[][] splits = new byte[][] { Bytes.toBytes("2"), Bytes.toBytes("4") }; return util.createTable(td, splits); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestFlushSnapshotFromClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestFlushSnapshotFromClient.java index 1d81dd757d6c..df03313f4461 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestFlushSnapshotFromClient.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestFlushSnapshotFromClient.java @@ -284,7 +284,7 @@ public void testSnapshotFailsOnNonExistantTable() throws Exception { * complete on the server by repeatedly checking the master. * @param master the master running the snapshot * @param snapshot the snapshot to check - * @param sleep amount to sleep between checks to see if the snapshot is done + * @param timeoutNanos the timeout in nano between checks to see if the snapshot is done */ private static void waitForSnapshotToComplete(HMaster master, SnapshotProtos.SnapshotDescription snapshot, long timeoutNanos) throws Exception { diff --git a/pom.xml b/pom.xml index b00f9bbfc734..7652b4ffb23b 100755 --- a/pom.xml +++ b/pom.xml @@ -580,6 +580,12 @@ virag@yahoo-inc.com -8 + + water + Xiang Li + xiangli@apache.org + +8 + wchevreuil Wellington Chevreuil @@ -3761,6 +3767,15 @@ org.apache.maven.plugins maven-javadoc-plugin + + + + apiNote + a + API Note: + + +
RegionRegion Encoded NameFileSystem Path
<%= region %><%= entry.getKey() %><%= entry.getValue() %>