Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Hbase-19761:Fix Checkstyle errors in hbase-zookeeper #72

Closed
wants to merge 19 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions bin/draining_servers.rb
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@ def addServers(_options, hostOrServers)
servers = getServerNames(hostOrServers, config)

zkw = org.apache.hadoop.hbase.zookeeper.ZKWatcher.new(config, 'draining_servers', nil)
parentZnode = zkw.znodePaths.drainingZNode
parentZnode = zkw.getZNodePaths().drainingZNode

begin
for server in servers
Expand All @@ -103,7 +103,7 @@ def removeServers(_options, hostOrServers)
servers = getServerNames(hostOrServers, config)

zkw = org.apache.hadoop.hbase.zookeeper.ZKWatcher.new(config, 'draining_servers', nil)
parentZnode = zkw.znodePaths.drainingZNode
parentZnode = zkw.getZNodePaths().drainingZNode

begin
for server in servers
Expand All @@ -120,7 +120,7 @@ def listServers(_options)
config = HBaseConfiguration.create

zkw = org.apache.hadoop.hbase.zookeeper.ZKWatcher.new(config, 'draining_servers', nil)
parentZnode = zkw.znodePaths.drainingZNode
parentZnode = zkw.getZNodePaths().drainingZNode

servers = ZKUtil.listChildrenNoWatch(zkw, parentZnode)
servers.each { |server| puts server }
Expand Down
92 changes: 92 additions & 0 deletions dev-support/hbase-personality.sh
Original file line number Diff line number Diff line change
Expand Up @@ -180,6 +180,26 @@ function personality_modules
done
}

## @description places where we override the built in assumptions about what tests to run
## @audience private
## @stability evolving
## @param filename of changed file
function personality_file_tests
{
local filename=$1
# If the change is to the refguide, then we don't need any builtin yetus tests
# the refguide test (below) will suffice for coverage.
if [[ ${filename} =~ src/main/asciidoc ]] ||
[[ ${filename} =~ src/main/xslt ]]; then
yetus_debug "Skipping builtin yetus checks for ${filename}. refguide test should pick it up."
# fallback to checking which tests based on what yetus would do by default
elif declare -f "${BUILDTOOL}_builtin_personality_file_tests" >/dev/null; then
"${BUILDTOOL}_builtin_personality_file_tests" "${filename}"
elif declare -f builtin_personality_file_tests >/dev/null; then
builtin_personality_file_tests "${filename}"
fi
}

## @description Uses relevant include/exclude env variable to fetch list of included/excluded
# tests and sets given variable to arguments to be passes to maven command.
## @audience private
Expand Down Expand Up @@ -225,6 +245,74 @@ function get_include_exclude_tests_arg

###################################################

add_test_type refguide

function refguide_initialize
{
maven_add_install refguide
}

function refguide_filefilter
{
local filename=$1

if [[ ${filename} =~ src/main/asciidoc ]] ||
[[ ${filename} =~ src/main/xslt ]] ||
[[ ${filename} =~ hbase-common/src/main/resources/hbase-default.xml ]]; then
add_test refguide
fi
}

function refguide_rebuild
{
local repostatus=$1
local logfile="${PATCH_DIR}/${repostatus}-refguide.log"
declare -i count

if ! verify_needed_test refguide; then
return 0
fi

big_console_header "Checking we can create the ref guide on ${repostatus}"

start_clock

echo_and_redirect "${logfile}" \
"${MAVEN}" "${MAVEN_ARGS[@]}" clean site --batch-mode \
-pl . \
-Dtest=NoUnitTests -DHBasePatchProcess -Prelease \
-Dmaven.javadoc.skip=true -Dcheckstyle.skip=true -Dfindbugs.skip=true

count=$(${GREP} -c '\[ERROR\]' "${logfile}")
if [[ ${count} -gt 0 ]]; then
add_vote_table -1 refguide "${repostatus} has ${count} errors when building the reference guide."
add_footer_table refguide "@@BASE@@/${repostatus}-refguide.log"
return 1
fi

if ! mv target/site "${PATCH_DIR}/${repostatus}-site"; then
add_vote_table -1 refguide "${repostatus} failed to produce a site directory."
add_footer_table refguide "@@BASE@@/${repostatus}-refguide.log"
return 1
fi

if [[ ! -f "${PATCH_DIR}/${repostatus}-site/book.html" ]]; then
add_vote_table -1 refguide "${repostatus} failed to produce the html version of the reference guide."
add_footer_table refguide "@@BASE@@/${repostatus}-refguide.log"
return 1
fi

if [[ ! -f "${PATCH_DIR}/${repostatus}-site/apache_hbase_reference_guide.pdf" ]]; then
add_vote_table -1 refguide "${repostatus} failed to produce the pdf version of the reference guide."
add_footer_table refguide "@@BASE@@/${repostatus}-refguide.log"
return 1
fi

add_vote_table 0 refguide "${repostatus} has no errors when building the reference guide. See footer for rendered docs, which you should manually inspect."
add_footer_table refguide "@@BASE@@/${repostatus}-site/book.html"
return 0
}

add_test_type shadedjars


Expand Down Expand Up @@ -262,6 +350,8 @@ function shadedjars_rebuild

big_console_header "Checking shaded client builds on ${repostatus}"

start_clock

echo_and_redirect "${logfile}" \
"${MAVEN}" "${MAVEN_ARGS[@]}" clean verify -fae --batch-mode \
-pl hbase-shaded/hbase-shaded-check-invariants -am \
Expand Down Expand Up @@ -343,6 +433,8 @@ function hadoopcheck_rebuild

big_console_header "Compiling against various Hadoop versions"

start_clock

# All supported Hadoop versions that we want to test the compilation with
# See the Hadoop section on prereqs in the HBase Reference Guide
hbase_common_hadoop2_versions="2.6.1 2.6.2 2.6.3 2.6.4 2.6.5 2.7.1 2.7.2 2.7.3 2.7.4"
Expand Down
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
/**
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
Expand Down Expand Up @@ -1415,15 +1414,25 @@ Future<Void> modifyTableAsync(TableDescriptor td)
throws IOException;

/**
* <p>
* Shuts down the HBase cluster.
*
* </p>
* <p>
* Notice that, a success shutdown call may ends with an error since the remote server has already
* been shutdown.
* </p>
* @throws IOException if a remote or network exception occurs
*/
void shutdown() throws IOException;

/**
* <p>
* Shuts down the current HBase master only. Does not shutdown the cluster.
*
* </p>
* <p>
* Notice that, a success stopMaster call may ends with an error since the remote server has
* already been shutdown.
* </p>
* @throws IOException if a remote or network exception occurs
* @see #shutdown()
*/
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,7 @@ public static void teardown() throws Exception {
}

private static void waitUntilZnodeAvailable(int replicaId) throws Exception {
String znode = util.getZooKeeperWatcher().znodePaths.getZNodeForReplica(replicaId);
String znode = util.getZooKeeperWatcher().getZNodePaths().getZNodeForReplica(replicaId);
int i = 0;
while (i < 1000) {
if (ZKUtil.checkExists(util.getZooKeeperWatcher(), znode) == -1) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -144,7 +144,7 @@ private void testZNodeACLs() throws IOException, KeeperException, InterruptedExc
ZKWatcher watcher = new ZKWatcher(conf, "IntegrationTestZnodeACLs", null);
RecoverableZooKeeper zk = ZKUtil.connect(this.conf, watcher);

String baseZNode = watcher.znodePaths.baseZNode;
String baseZNode = watcher.getZNodePaths().baseZNode;

LOG.info("");
LOG.info("***********************************************************************************");
Expand All @@ -160,7 +160,7 @@ private void testZNodeACLs() throws IOException, KeeperException, InterruptedExc
private void checkZnodePermsRecursive(ZKWatcher watcher,
RecoverableZooKeeper zk, String znode) throws KeeperException, InterruptedException {

boolean expectedWorldReadable = watcher.znodePaths.isClientReadable(znode);
boolean expectedWorldReadable = watcher.getZNodePaths().isClientReadable(znode);

assertZnodePerms(zk, znode, expectedWorldReadable);

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,7 @@
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.net.DNS;
import org.apache.hadoop.util.StringUtils;
import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;

/**
* A base for {@link TableInputFormat}s. Receives a {@link Connection}, a {@link TableName},
Expand Down Expand Up @@ -291,7 +292,7 @@ public List<InputSplit> getSplits(JobContext context) throws IOException {
*/
private List<InputSplit> oneInputSplitPerRegion() throws IOException {
RegionSizeCalculator sizeCalculator =
new RegionSizeCalculator(getRegionLocator(), getAdmin());
createRegionSizeCalculator(getRegionLocator(), getAdmin());

TableName tableName = getTable().getName();

Expand Down Expand Up @@ -478,7 +479,8 @@ public List<InputSplit> calculateAutoBalancedSplits(List<InputSplit> splits, lon
while (j < splits.size()) {
TableSplit nextRegion = (TableSplit) splits.get(j);
long nextRegionSize = nextRegion.getLength();
if (totalSize + nextRegionSize <= averageRegionSize) {
if (totalSize + nextRegionSize <= averageRegionSize
&& Bytes.equals(splitEndKey, nextRegion.getStartRow())) {
totalSize = totalSize + nextRegionSize;
splitEndKey = nextRegion.getEndRow();
j++;
Expand Down Expand Up @@ -586,6 +588,12 @@ protected void initializeTable(Connection connection, TableName tableName) throw
this.connection = connection;
}

@VisibleForTesting
protected RegionSizeCalculator createRegionSizeCalculator(RegionLocator locator, Admin admin)
throws IOException {
return new RegionSizeCalculator(locator, admin);
}

/**
* Gets the scan defining the actual details like columns etc.
*
Expand Down
Loading