Skip to content

Commit

Permalink
HBASE-22783 shell with removal of status and load classes
Browse files Browse the repository at this point in the history
Signed-off-by: stack <stack@apache.org>
Signed-off-by: Andrew Purtell <apurtell@apache.org>
Signed-off-by: Jan Hentschel <jan.hentschel@ultratendency.com>
  • Loading branch information
virajjasani authored and HorizonNet committed Aug 6, 2019
1 parent 9d82716 commit f02f741
Show file tree
Hide file tree
Showing 6 changed files with 69 additions and 54 deletions.
2 changes: 1 addition & 1 deletion bin/draining_servers.rb
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@
# Return array of servernames where servername is hostname+port+startcode
# comma-delimited
def getServers(admin)
serverInfos = admin.getClusterStatus.getServers
serverInfos = admin.getClusterMetrics.getLiveServerMetrics.keySet
servers = []
serverInfos.each do |server|
servers << server.getServerName
Expand Down
2 changes: 1 addition & 1 deletion bin/region_status.rb
Original file line number Diff line number Diff line change
Expand Up @@ -132,7 +132,7 @@
$TableName = TableName.valueOf($tablename.to_java_bytes) unless $tablename.nil?
loop do
if $tablename.nil?
server_count = admin.getClusterStatus.getRegionsCount
server_count = admin.getClusterMetrics.getRegionCount
else
connection = ConnectionFactory.createConnection(config)
server_count = MetaTableAccessor.allTableRegions(connection, $TableName).size
Expand Down
86 changes: 50 additions & 36 deletions hbase-shell/src/main/ruby/hbase/admin.rb
Original file line number Diff line number Diff line change
Expand Up @@ -802,43 +802,45 @@ def alter(table_name_str, wait = true, *args)
end

def status(format, type)
status = org.apache.hadoop.hbase.ClusterStatus.new(@admin.getClusterMetrics)
cluster_metrics = @admin.getClusterMetrics
if format == 'detailed'
puts(format('version %s', status.getHBaseVersion))
puts(format('version %s', cluster_metrics.getHBaseVersion))
# Put regions in transition first because usually empty
puts(format('%d regionsInTransition', status.getRegionStatesInTransition.size))
for v in status.getRegionStatesInTransition
puts(format('%d regionsInTransition', cluster_metrics.getRegionStatesInTransition.size))
for v in cluster_metrics.getRegionStatesInTransition
puts(format(' %s', v))
end
master = status.getMaster
puts(format('active master: %s:%d %d', master.getHostname, master.getPort, master.getStartcode))
puts(format('%d backup masters', status.getBackupMastersSize))
for server in status.getBackupMasters
master = cluster_metrics.getMasterName
puts(format('active master: %s:%d %d', master.getHostname, master.getPort,
master.getStartcode))
puts(format('%d backup masters', cluster_metrics.getBackupMasterNames.size))
for server in cluster_metrics.getBackupMasterNames
puts(format(' %s:%d %d', server.getHostname, server.getPort, server.getStartcode))
end

master_coprocs = @admin.getMasterCoprocessorNames.toString
unless master_coprocs.nil?
puts(format('master coprocessors: %s', master_coprocs))
end
puts(format('%d live servers', status.getServersSize))
for server in status.getServers
puts(format('%d live servers', cluster_metrics.getLiveServerMetrics.size))
for server in cluster_metrics.getLiveServerMetrics.keySet
puts(format(' %s:%d %d', server.getHostname, server.getPort, server.getStartcode))
puts(format(' %s', status.getLoad(server).toString))
for name, region in status.getLoad(server).getRegionsLoad
puts(format(' %s', cluster_metrics.getLiveServerMetrics.get(server).toString))
for name, region in cluster_metrics.getLiveServerMetrics.get(server).getRegionMetrics
puts(format(' %s', region.getNameAsString.dump))
puts(format(' %s', region.toString))
end
end
puts(format('%d dead servers', status.getDeadServersSize))
for server in status.getDeadServerNames
puts(format('%d dead servers', cluster_metrics.getDeadServerNames.size))
for server in cluster_metrics.getDeadServerNames
puts(format(' %s', server))
end
elsif format == 'replication'
puts(format('version %<version>s', version: status.getHBaseVersion))
puts(format('%<servers>d live servers', servers: status.getServersSize))
status.getServers.each do |server_status|
sl = status.getLoad(server_status)
puts(format('version %<version>s', version: cluster_metrics.getHBaseVersion))
puts(format('%<servers>d live servers',
servers: cluster_metrics.getLiveServerMetrics.size))
cluster_metrics.getLiveServerMetrics.keySet.each do |server_name|
sl = cluster_metrics.getLiveServerMetrics.get(server_name)
r_sink_string = ' SINK:'
r_source_string = ' SOURCE:'
r_load_sink = sl.getReplicationLoadSink
Expand All @@ -851,7 +853,7 @@ def status(format, type)
.getTimestampsOfLastAppliedOp).toString
r_load_source_map = sl.getReplicationLoadSourceMap
build_source_string(r_load_source_map, r_source_string)
puts(format(' %<host>s:', host: server_status.getHostname))
puts(format(' %<host>s:', host: server_name.getHostname))
if type.casecmp('SOURCE').zero?
puts(format('%<source>s', source: r_source_string))
elsif type.casecmp('SINK').zero?
Expand All @@ -864,26 +866,30 @@ def status(format, type)
elsif format == 'simple'
load = 0
regions = 0
master = status.getMaster
puts(format('active master: %s:%d %d', master.getHostname, master.getPort, master.getStartcode))
puts(format('%d backup masters', status.getBackupMastersSize))
for server in status.getBackupMasters
master = cluster_metrics.getMasterName
puts(format('active master: %s:%d %d', master.getHostname, master.getPort,
master.getStartcode))
puts(format('%d backup masters', cluster_metrics.getBackupMasterNames.size))
for server in cluster_metrics.getBackupMasterNames
puts(format(' %s:%d %d', server.getHostname, server.getPort, server.getStartcode))
end
puts(format('%d live servers', status.getServersSize))
for server in status.getServers
puts(format('%d live servers', cluster_metrics.getLiveServerMetrics.size))
for server in cluster_metrics.getLiveServerMetrics.keySet
puts(format(' %s:%d %d', server.getHostname, server.getPort, server.getStartcode))
puts(format(' %s', status.getLoad(server).toString))
load += status.getLoad(server).getNumberOfRequests
regions += status.getLoad(server).getNumberOfRegions
puts(format(' %s', cluster_metrics.getLiveServerMetrics.get(server).toString))
load += cluster_metrics.getLiveServerMetrics.get(server).getRequestCountPerSecond
regions += cluster_metrics.getLiveServerMetrics.get(server).getRegionMetrics.size
end
puts(format('%d dead servers', status.getDeadServers))
for server in status.getDeadServerNames
puts(format('%d dead servers', cluster_metrics.getDeadServerNames.size))
for server in cluster_metrics.getDeadServerNames
puts(format(' %s', server))
end
puts(format('Aggregate load: %d, regions: %d', load, regions))
else
puts "1 active master, #{status.getBackupMastersSize} backup masters, #{status.getServersSize} servers, #{status.getDeadServers} dead, #{format('%.4f', status.getAverageLoad)} average load"
puts "1 active master, #{cluster_metrics.getBackupMasterNames.size} backup masters,
#{cluster_metrics.getLiveServerMetrics.size} servers,
#{cluster_metrics.getDeadServerNames.size} dead,
#{format('%.4f', cluster_metrics.getAverageLoad)} average load"
end
end

Expand Down Expand Up @@ -1176,15 +1182,23 @@ def list_table_snapshots(tableNameRegex, snapshotNameRegex = '.*')
end

#----------------------------------------------------------------------------------------------
# Returns the ClusterStatus of the cluster
def getClusterStatus
org.apache.hadoop.hbase.ClusterStatus.new(@admin.getClusterMetrics)
# Returns the whole ClusterMetrics containing details:
#
# hbase version
# cluster id
# primary/backup master(s)
# master's coprocessors
# live/dead regionservers
# balancer
# regions in transition
def getClusterMetrics
@admin.getClusterMetrics
end

#----------------------------------------------------------------------------------------------
# Returns a list of regionservers
def getRegionServers
org.apache.hadoop.hbase.ClusterStatus.new(@admin.getClusterMetrics).getServers.map { |serverName| serverName }
@admin.getClusterMetrics.getLiveServerMetrics.keySet.map { |server_name| server_name }
end

#----------------------------------------------------------------------------------------------
Expand Down Expand Up @@ -1447,7 +1461,7 @@ def clear_deadservers(dead_servers)
#----------------------------------------------------------------------------------------------
# List live region servers
def list_liveservers
org.apache.hadoop.hbase.ClusterStatus.new(@admin.getClusterMetrics).getServers.to_a
@admin.getClusterMetrics.getLiveServerMetrics.keySet.to_a
end

#---------------------------------------------------------------------------
Expand Down
2 changes: 1 addition & 1 deletion hbase-shell/src/main/ruby/hbase/taskmonitor.rb
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,7 @@ def tasksOnHost(filter, host)
java_import 'java.io.InputStreamReader'
java_import 'org.apache.hbase.thirdparty.com.google.gson.JsonParser'

infoport = org.apache.hadoop.hbase.ClusterStatus.new(@admin.getClusterMetrics).getLoad(host).getInfoServerPort.to_s
infoport = @admin.getClusterMetrics.getLiveServerMetrics.get(host).getInfoServerPort.to_s

# Note: This condition use constants from hbase-server
# if (!@admin.getConfiguration().getBoolean(org.apache.hadoop.hbase.http.ServerConfigurationKeys::HBASE_SSL_ENABLED_KEY,
Expand Down
29 changes: 15 additions & 14 deletions hbase-shell/src/main/ruby/shell/commands/list_regions.rb
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,7 @@ def command(table_name, options = nil, cols = nil)

admin_instance = admin.instance_variable_get('@admin')
conn_instance = admin_instance.getConnection
cluster_status = org.apache.hadoop.hbase.ClusterStatus.new(admin_instance.getClusterMetrics)
cluster_metrics = admin_instance.getClusterMetrics
hregion_locator_instance = conn_instance.getRegionLocator(TableName.valueOf(table_name))
hregion_locator_list = hregion_locator_instance.getAllRegionLocations.to_a
results = []
Expand All @@ -104,22 +104,23 @@ def command(table_name, options = nil, cols = nil)
regions.each do |hregion|
hregion_info = hregion.getRegion
server_name = hregion.getServerName
server_load = cluster_status.getLoad(server_name)
if server_load.nil?
region_load_map = java.util.HashMap.new
server_metrics_map = cluster_metrics.getLiveServerMetrics
server_metrics = server_metrics_map.get(server_name)
if server_metrics.nil?
region_metrics_map = java.util.HashMap.new
else
region_load_map = server_load.getRegionsLoad
region_metrics_map = server_metrics.getRegionMetrics
end
region_name = hregion_info.getRegionNameAsString
region_load = region_load_map.get(hregion_info.getRegionName)
region_metrics = region_metrics_map.get(hregion_info.getRegionName)

if region_load.nil?
if region_metrics.nil?
puts "Can not find all details for region: " \
"#{region_name.strip} ," \
" it may be disabled or in transition\n"
else
# Ignore regions which exceed our locality threshold
next unless accept_region_for_locality? region_load.getDataLocality,
next unless accept_region_for_locality? region_metrics.getDataLocality,
locality_threshold
end
result_hash = {}
Expand Down Expand Up @@ -147,30 +148,30 @@ def command(table_name, options = nil, cols = nil)
end

if size_hash.key?('SIZE')
if region_load.nil?
if region_metrics.nil?
region_store_file_size = ''
else
region_store_file_size = region_load.getStorefileSizeMB.to_s.strip
region_store_file_size = region_metrics.getStoreFileSize.to_s.strip
end
result_hash.store('SIZE', region_store_file_size)
size_hash['SIZE'] = [size_hash['SIZE'], region_store_file_size.length].max
end

if size_hash.key?('REQ')
if region_load.nil?
if region_metrics.nil?
region_requests = ''
else
region_requests = region_load.getRequestsCount.to_s.strip
region_requests = region_metrics.getRequestCount.to_s.strip
end
result_hash.store('REQ', region_requests)
size_hash['REQ'] = [size_hash['REQ'], region_requests.length].max
end

if size_hash.key?('LOCALITY')
if region_load.nil?
if region_metrics.nil?
locality = ''
else
locality = region_load.getDataLocality.to_s.strip
locality = region_metrics.getDataLocality.to_s.strip
end
result_hash.store('LOCALITY', locality)
size_hash['LOCALITY'] = [size_hash['LOCALITY'], locality.length].max
Expand Down
2 changes: 1 addition & 1 deletion hbase-shell/src/main/ruby/shell/commands/rit.rb
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ def help
end

def command
rit = admin.getClusterStatus.getRegionStatesInTransition
rit = admin.getClusterMetrics.getRegionStatesInTransition
rit.each do |v|
formatter.row([v.toDescriptiveString])
end
Expand Down

0 comments on commit f02f741

Please sign in to comment.