1919
2020package org .apache .hadoop .hdfs .server .diskbalancer .command ;
2121
22+ import org .apache .commons .text .TextStringBuilder ;
2223import org .apache .hadoop .util .Preconditions ;
2324import org .apache .commons .cli .CommandLine ;
2425import org .apache .commons .cli .HelpFormatter ;
3031import org .apache .hadoop .hdfs .tools .DiskBalancerCLI ;
3132import org .apache .hadoop .net .NetUtils ;
3233
34+ import java .io .PrintStream ;
35+ import java .util .Collections ;
36+ import java .util .Set ;
37+ import java .util .TreeSet ;
38+
3339/**
3440 * Gets the current status of disk balancer command.
3541 */
@@ -41,9 +47,13 @@ public class QueryCommand extends Command {
4147 * @param conf - Configuration.
4248 */
4349 public QueryCommand (Configuration conf ) {
44- super (conf );
50+ this (conf , System .out );
51+ }
52+
53+ public QueryCommand (Configuration conf , final PrintStream ps ) {
54+ super (conf , ps );
4555 addValidCommandParameters (DiskBalancerCLI .QUERY ,
46- "Queries the status of disk plan running on a given datanode." );
56+ "Queries the status of disk plan running on given datanode(s) ." );
4757 addValidCommandParameters (DiskBalancerCLI .VERBOSE ,
4858 "Prints verbose results." );
4959 }
@@ -56,52 +66,72 @@ public QueryCommand(Configuration conf) {
5666 @ Override
5767 public void execute (CommandLine cmd ) throws Exception {
5868 LOG .info ("Executing \" query plan\" command." );
69+ TextStringBuilder result = new TextStringBuilder ();
5970 Preconditions .checkState (cmd .hasOption (DiskBalancerCLI .QUERY ));
6071 verifyCommandOptions (DiskBalancerCLI .QUERY , cmd );
61- String nodeName = cmd .getOptionValue (DiskBalancerCLI .QUERY );
62- Preconditions .checkNotNull (nodeName );
63- nodeName = nodeName .trim ();
64- String nodeAddress = nodeName ;
65-
66- // if the string is not name:port format use the default port.
67- if (!nodeName .matches ("[^\\ :]+:[0-9]{2,5}" )) {
68- int defaultIPC = NetUtils .createSocketAddr (
69- getConf ().getTrimmed (DFSConfigKeys .DFS_DATANODE_IPC_ADDRESS_KEY ,
70- DFSConfigKeys .DFS_DATANODE_IPC_ADDRESS_DEFAULT )).getPort ();
71- nodeAddress = nodeName + ":" + defaultIPC ;
72- LOG .debug ("Using default data node port : {}" , nodeAddress );
72+ String nodeVal = cmd .getOptionValue (DiskBalancerCLI .QUERY );
73+ Preconditions .checkNotNull (nodeVal );
74+ nodeVal = nodeVal .trim ();
75+ Set <String > resultSet = new TreeSet <>();
76+ String [] nodes = nodeVal .split ("," );
77+ if (nodes .length == 0 ) {
78+ String warnMsg = "The number of input nodes is 0. "
79+ + "Please input the valid nodes." ;
80+ throw new DiskBalancerException (warnMsg ,
81+ DiskBalancerException .Result .INVALID_NODE );
7382 }
7483
75- ClientDatanodeProtocol dataNode = getDataNodeProxy (nodeAddress );
76- try {
77- DiskBalancerWorkStatus workStatus = dataNode .queryDiskBalancerPlan ();
78- System .out .printf ("Plan File: %s%nPlan ID: %s%nResult: %s%n" ,
79- workStatus .getPlanFile (),
80- workStatus .getPlanID (),
81- workStatus .getResult ().toString ());
84+ Collections .addAll (resultSet , nodes );
85+ String outputLine = String .format (
86+ "Get current status of the diskbalancer for DataNode(s). "
87+ + "These DataNode(s) are parsed from '%s'." , nodeVal );
88+ recordOutput (result , outputLine );
89+ for (String nodeName : resultSet ) {
90+ // if the string is not name:port format use the default port.
91+ String nodeAddress = nodeName ;
92+ if (!nodeName .matches ("[^\\ :]+:[0-9]{2,5}" )) {
93+ int defaultIPC = NetUtils .createSocketAddr (
94+ getConf ().getTrimmed (DFSConfigKeys .DFS_DATANODE_IPC_ADDRESS_KEY ,
95+ DFSConfigKeys .DFS_DATANODE_IPC_ADDRESS_DEFAULT )).getPort ();
96+ nodeAddress = nodeName + ":" + defaultIPC ;
97+ LOG .debug ("Using default data node port : {}" , nodeAddress );
98+ }
8299
83- if (cmd .hasOption (DiskBalancerCLI .VERBOSE )) {
84- System .out .printf ("%s" , workStatus .currentStateString ());
100+ ClientDatanodeProtocol dataNode = getDataNodeProxy (nodeAddress );
101+ try {
102+ DiskBalancerWorkStatus workStatus = dataNode .queryDiskBalancerPlan ();
103+ outputLine = String .format ("DataNode: %s%nPlan File: %s%nPlan ID: %s%nResult: %s%n" ,
104+ nodeAddress ,
105+ workStatus .getPlanFile (),
106+ workStatus .getPlanID (),
107+ workStatus .getResult ().toString ());
108+ result .append (outputLine );
109+ if (cmd .hasOption (DiskBalancerCLI .VERBOSE )) {
110+ outputLine = String .format ("%s" , workStatus .currentStateString ());
111+ result .append (outputLine );
112+ }
113+ result .append (System .lineSeparator ());
114+ } catch (DiskBalancerException ex ) {
115+ LOG .error ("Query plan failed by {}" , nodeAddress , ex );
116+ throw ex ;
85117 }
86- } catch (DiskBalancerException ex ) {
87- LOG .error ("Query plan failed." , ex );
88- throw ex ;
89118 }
119+ getPrintStream ().println (result .toString ());
90120 }
91121
92122 /**
93123 * Gets extended help for this command.
94124 */
95125 @ Override
96126 public void printHelp () {
97- String header = "Query Plan queries a given data node about the " +
127+ String header = "Query Plan queries given datanode(s) about the " +
98128 "current state of disk balancer execution.\n \n " ;
99129
100130 String footer = "\n Query command retrievs the plan ID and the current " +
101131 "running state. " ;
102-
103132 HelpFormatter helpFormatter = new HelpFormatter ();
104- helpFormatter .printHelp ("hdfs diskbalancer -query <hostname> [options]" ,
133+ helpFormatter .printHelp ("hdfs diskbalancer -query <hostname,hostname,...> " +
134+ " [options]" ,
105135 header , DiskBalancerCLI .getQueryOptions (), footer );
106136 }
107137}
0 commit comments