Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

HBASE-23312 HBase Thrift SPNEGO configs (HBASE-19852) should be backw… #15

Merged
merged 13 commits into from
Dec 2, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
123 changes: 33 additions & 90 deletions dev-support/create-release/release-build.sh
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,13 @@ EOF
set -e

function cleanup {
rm ${tmp_settings} &> /dev/null || true
echo "Cleaning up temp settings file." >&2
rm "${tmp_settings}" &> /dev/null || true
# If REPO was set, then leave things be. Otherwise if we defined a repo clean it out.
if [[ -z "${REPO}" ]] && [[ -n "${tmp_repo}" ]]; then
echo "Cleaning up temp repo in '${tmp_repo}'. set REPO to reuse downloads." >&2
rm -rf "${tmp_repo}" &> /dev/null || true
fi
}

if [ $# -eq 0 ]; then
Expand Down Expand Up @@ -95,12 +101,7 @@ export LANG=C.UTF-8

# Commit ref to checkout when building
GIT_REF=${GIT_REF:-master}

RELEASE_STAGING_LOCATION="https://dist.apache.org/repos/dist/dev/hbase"

GPG="gpg --pinentry-mode loopback -u $GPG_KEY --no-tty --batch"
NEXUS_ROOT=https://repository.apache.org/service/local/staging
NEXUS_PROFILE=8e226b97c0c82 # Profile for project staging uploads via INFRA-17900 Need nexus "staging profile id" for the hbase project
BASE_DIR=$(pwd)

init_java
Expand Down Expand Up @@ -146,15 +147,14 @@ git clean -d -f -x
cd ..

tmp_repo="${REPO:-`pwd`/$(mktemp -d hbase-repo-XXXXX)}"
# Reexamine. Not sure this working. Pass as arg? That don't seem to work either!
tmp_settings="/${tmp_repo}/tmp-settings.xml"
echo "<settings><servers>" > $tmp_settings
echo "<server><id>apache.snapshots.https</id><username>$ASF_USERNAME</username>" >> $tmp_settings
echo "<password>$ASF_PASSWORD</password></server>" >> $tmp_settings
echo "<server><id>apache-release</id><username>$ASF_USERNAME</username>" >> $tmp_settings
echo "<password>$ASF_PASSWORD</password></server>" >> $tmp_settings
echo "</servers>" >> $tmp_settings
echo "</settings>" >> $tmp_settings
echo "<settings><servers>" > "$tmp_settings"
echo "<server><id>apache.snapshots.https</id><username>$ASF_USERNAME</username>" >> "$tmp_settings"
echo "<password>$ASF_PASSWORD</password></server>" >> "$tmp_settings"
echo "<server><id>apache.releases.https</id><username>$ASF_USERNAME</username>" >> "$tmp_settings"
echo "<password>$ASF_PASSWORD</password></server>" >> "$tmp_settings"
echo "</servers>" >> "$tmp_settings"
echo "</settings>" >> "$tmp_settings"
export tmp_settings

if [[ "$1" == "build" ]]; then
Expand Down Expand Up @@ -216,95 +216,38 @@ if [[ "$1" == "publish-snapshot" ]]; then
fi

if [[ "$1" == "publish-release" ]]; then
(
cd "${PROJECT}"
# Get list of modules from parent pom but filter out 'assembly' modules.
# Used below in a few places.
modules=`sed -n 's/<module>\(.*\)<.*$/\1/p' pom.xml | grep -v '-assembly' | tr '\n' ' '`
# Need to add the 'parent' module too. Its the SECOND artifactId instance in pom
artifactid=`sed -n 's/<artifactId>\(.*\)<.*$/\1/p' pom.xml | tr '\n' ' '| awk '{print $2}'`
modules="${artifactid} ${modules}"
# Get the second groupId in the pom. This is the groupId for these artifacts.
groupid=`sed -n 's/<groupId>\(.*\)<.*$/\1/p' pom.xml | tr '\n' ' '| awk '{print $2}'`
# Convert groupid to a dir path for use below reaching into repo for jars.
groupid_as_dir=`echo $groupid | sed -n 's/\./\//gp'`
echo "pwd=`pwd`, groupid_as_dir=${groupid_as_dir}"
# Publish ${PROJECT} to Maven release repo
echo "Publishing ${PROJECT} checkout at '$GIT_REF' ($git_hash)"
echo "Publish version is $VERSION"
# Coerce the requested version
$MVN versions:set -DnewVersion=$VERSION
MAVEN_OPTS="${MAVEN_OPTS}" ${MVN} --settings $tmp_settings \
clean install -DskipTests \
-Dcheckstyle.skip=true "${PUBLISH_PROFILES}" \
-Dmaven.repo.local="${tmp_repo}"
pushd "${tmp_repo}/${groupid_as_dir}"
# Remove any extra files generated during install
# Remove extaneous files from module subdirs
find $modules -type f | grep -v \.jar | grep -v \.pom | xargs rm -rf

# Using Nexus API documented here:
# https://support.sonatype.com/entries/39720203-Uploading-to-a-Staging-Repository-via-REST-API
declare -a mvn_goals=(clean install)
declare staged_repo_id="dryrun-no-repo"
if ! is_dry_run; then
echo "Creating Nexus staging repository"
repo_request="<promoteRequest><data><description>Apache ${PROJECT} $VERSION (commit $git_hash)</description></data></promoteRequest>"
out=$(curl -X POST -d "$repo_request" -u $ASF_USERNAME:$ASF_PASSWORD \
-H "Content-Type:application/xml" -v \
$NEXUS_ROOT/profiles/$NEXUS_PROFILE/start)
staged_repo_id=$(echo $out | sed -e "s/.*\(orgapachehbase-[0-9]\{4\}\).*/\1/")
echo "Created Nexus staging repository: $staged_repo_id"
mvn_goals=("${mvn_goals[@]}" deploy)
fi
echo "Staging release in nexus"
if ! MAVEN_OPTS="${MAVEN_OPTS}" ${MVN} --settings "$tmp_settings" \
-DskipTests -Dcheckstyle.skip=true "${PUBLISH_PROFILES}" \
-Dmaven.repo.local="${tmp_repo}" \
"${mvn_goals[@]}" > "${BASE_DIR}/mvn_deploy.log"; then
echo "Staging build failed, see 'mvn_deploy.log' for details." >&2
exit 1
fi

# this must have .asc, and .sha1 - it really doesn't like anything else there
for file in $(find $modules -type f)
do
if [[ "$file" == *.asc ]]; then
continue
fi
if [ ! -f $file.asc ]; then
echo "$GPG_PASSPHRASE" | $GPG --passphrase-fd 0 --output "$file.asc" \
--detach-sig --armour $file;
fi
if [ $(command -v md5) ]; then
# Available on OS X; -q to keep only hash
md5 -q "$file" > "$file.md5"
else
# Available on Linux; cut to keep only hash
md5sum "$file" | cut -f1 -d' ' > "$file.md5"
fi
if [ $(command -v sha1sum) ]; then
sha1sum "$file" | cut -f1 -d' ' > "$file.sha1"
else
shasum "$file" | cut -f1 -d' ' > "$file.sha1"
fi
done

if ! is_dry_run; then
nexus_upload=$NEXUS_ROOT/deployByRepositoryId/$staged_repo_id
echo "Uploading files to $nexus_upload"
for file in $(find ${modules} -type f)
do
# strip leading ./
file_short=$(echo $file | sed -e "s/\.\///")
dest_url="$nexus_upload/$groupid_as_dir/$file_short"
echo " Uploading $file to $dest_url"
curl -u "$ASF_USERNAME:$ASF_PASSWORD" --upload-file "${file_short}" "${dest_url}"
done

echo "Closing nexus staging repository"
repo_request="<promoteRequest><data><stagedRepositoryId>$staged_repo_id</stagedRepositoryId><description>Apache ${PROJECT} $VERSION (commit $git_hash)</description></data></promoteRequest>"
out=$(curl -X POST -d "$repo_request" -u $ASF_USERNAME:$ASF_PASSWORD \
-H "Content-Type:application/xml" -v \
$NEXUS_ROOT/profiles/$NEXUS_PROFILE/finish)
echo "Closed Nexus staging repository: $staged_repo_id"
staged_repo_id=$(grep -o "Closing staging repository with ID .*" "${BASE_DIR}/mvn_deploy.log" \
| sed -e 's/Closing staging repository with ID "\([^"]*\)"./\1/')
echo "Artifacts successfully staged to repo ${staged_repo_id}"
else
echo "Artifacts successfully built. not staged due to dry run."
fi

popd
rm -rf "$tmp_repo"
cd ..
# Dump out email to send. Where we find vote.tmpl depends
# on where this script is run from
export PROJECT_TEXT=$(echo "${PROJECT}" | sed "s/-/ /g")
eval "echo \"$(< ${SELF}/vote.tmpl)\"" |tee vote.txt
eval "echo \"$(< ${SELF}/vote.tmpl)\"" |tee "${BASE_DIR}/vote.txt"
)
exit 0
fi

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,22 +18,19 @@
package org.apache.hadoop.hbase.client;

import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.concurrent.Callable;
import java.util.stream.Collectors;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
import org.apache.yetus.audience.InterfaceAudience;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException;
import org.apache.hadoop.conf.Configuration;

import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
import org.apache.hadoop.hbase.master.RegionState;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignsResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BypassProcedureRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BypassProcedureResponse;
Expand All @@ -45,6 +42,13 @@
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ScheduleServerCrashProcedureResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignsResponse;

import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException;

import org.apache.yetus.audience.InterfaceAudience;

import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

/**
* Use {@link Connection#getHbck()} to obtain an instance of {@link Hbck} instead of
* constructing an HBaseHbck directly.
Expand Down Expand Up @@ -98,15 +102,34 @@ public boolean isAborted() {
public TableState setTableStateInMeta(TableState state) throws IOException {
try {
GetTableStateResponse response = hbck.setTableStateInMeta(
rpcControllerFactory.newController(),
RequestConverter.buildSetTableStateInMetaRequest(state));
rpcControllerFactory.newController(),
RequestConverter.buildSetTableStateInMetaRequest(state));
return TableState.convert(state.getTableName(), response.getTableState());
} catch (ServiceException se) {
LOG.debug("table={}, state={}", state.getTableName(), state.getState(), se);
throw new IOException(se);
}
}

@Override
public List<RegionState> setRegionStateInMeta(List<RegionState> states) throws IOException {
try {
if(LOG.isDebugEnabled()) {
states.forEach(s ->
LOG.debug("region={}, state={}", s.getRegion().getRegionName(), s.getState())
);
}
MasterProtos.GetRegionStateInMetaResponse response = hbck.setRegionStateInMeta(
rpcControllerFactory.newController(),
RequestConverter.buildSetRegionStateInMetaRequest(states));
final List<RegionState> result = new ArrayList<>();
response.getStatesList().forEach(s -> result.add(RegionState.convert(s)));
return result;
} catch (ServiceException se) {
throw new IOException(se);
}
}

@Override
public List<Long> assigns(List<String> encodedRegionNames, boolean override)
throws IOException {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.master.RegionState;
import org.apache.yetus.audience.InterfaceAudience;

import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
Expand Down Expand Up @@ -54,6 +55,14 @@ public interface Hbck extends Abortable, Closeable {
*/
TableState setTableStateInMeta(TableState state) throws IOException;

/**
* Update region state in Meta only. No procedures are submitted to manipulate the given region
* or any other region from same table.
* @param states list of all region states to be updated in meta
* @return previous state of the region in Meta
*/
List<RegionState> setRegionStateInMeta(List<RegionState> states) throws IOException;

/**
* Like {@link Admin#assign(byte[])} but 'raw' in that it can do more than one Region at a time
* -- good if many Regions to online -- and it will schedule the assigns even in the case where
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -116,6 +116,11 @@ public RegionInfoBuilder setOffline(boolean offLine) {
return this;
}

public RegionInfoBuilder setEncodedName(String encodedName) {
this.encodedName = encodedName;
return this;
}

public RegionInfo build() {
return new MutableRegionInfo(tableName, startKey, endKey, split,
regionId, replicaId, offLine, regionName, encodedName);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2226,6 +2226,14 @@ public static HBaseProtos.TableName toProtoTableName(TableName tableName) {
.setQualifier(UnsafeByteOperations.unsafeWrap(tableName.getQualifier())).build();
}

public static HBaseProtos.RegionInfo toProtoRegionInfo(
org.apache.hadoop.hbase.client.RegionInfo regionInfo) {
return HBaseProtos.RegionInfo.newBuilder()
.setRegionId(regionInfo.getRegionId())
.setRegionEncodedName(regionInfo.getEncodedName())
.setTableName(toProtoTableName(regionInfo.getTable())).build();
}

public static List<TableName> toTableNameList(List<HBaseProtos.TableName> tableNamesList) {
if (tableNamesList == null) {
return new ArrayList<>();
Expand Down Expand Up @@ -3145,6 +3153,7 @@ public static HBaseProtos.RegionInfo toRegionInfo(final org.apache.hadoop.hbase.
builder.setOffline(info.isOffline());
builder.setSplit(info.isSplit());
builder.setReplicaId(info.getReplicaId());
builder.setRegionEncodedName(info.getEncodedName());
return builder.build();
}

Expand Down Expand Up @@ -3184,6 +3193,9 @@ public static org.apache.hadoop.hbase.client.RegionInfo toRegionInfo(final HBase
if (proto.hasOffline()) {
rib.setOffline(proto.getOffline());
}
if (proto.hasRegionEncodedName()) {
rib.setEncodedName(proto.getRegionEncodedName());
}
return rib.build();
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -55,6 +55,7 @@
import org.apache.hadoop.hbase.exceptions.DeserializationException;
import org.apache.hadoop.hbase.filter.ByteArrayComparable;
import org.apache.hadoop.hbase.io.TimeRange;
import org.apache.hadoop.hbase.master.RegionState;
import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
import org.apache.hadoop.hbase.replication.SyncReplicationState;
import org.apache.hadoop.hbase.util.Bytes;
Expand Down Expand Up @@ -132,6 +133,7 @@
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetBalancerRunningRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetCleanerChoreRunningRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetNormalizerRunningRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetRegionStateInMetaRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos
.SetSnapshotCleanupRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledRequest;
Expand Down Expand Up @@ -1258,6 +1260,18 @@ public static SetTableStateInMetaRequest buildSetTableStateInMetaRequest(final T
.setTableName(ProtobufUtil.toProtoTableName(state.getTableName())).build();
}

/**
* Creates a protocol buffer SetRegionStateInMetaRequest
* @param states list of regions states to update in Meta
* @return a SetRegionStateInMetaRequest
*/
public static SetRegionStateInMetaRequest buildSetRegionStateInMetaRequest(
final List<RegionState> states) {
final SetRegionStateInMetaRequest.Builder builder = SetRegionStateInMetaRequest.newBuilder();
states.forEach(s -> builder.addStates(s.convert()));
return builder.build();
}

/**
* Creates a protocol buffer GetTableDescriptorsRequest for a single table
*
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -955,6 +955,10 @@ public enum OperationStatusCode {

public static final int REPLICATION_SOURCE_TOTAL_BUFFER_DFAULT = 256 * 1024 * 1024;

/** Configuration key for ReplicationSource shipeEdits timeout */
public static final String REPLICATION_SOURCE_SHIPEDITS_TIMEOUT =
"replication.source.shipedits.timeout";
public static final int REPLICATION_SOURCE_SHIPEDITS_TIMEOUT_DFAULT = 60000;

/**
* Directory where the source cluster file system client configuration are placed which is used by
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -210,7 +210,9 @@ public static int getCompressedSize(Algorithm algo, Compressor compressor,
} finally {
nullOutputStream.close();
compressedStream.close();
compressingStream.close();
if (compressingStream != null) {
compressingStream.close();
}
}
}

Expand Down
Loading