Skip to content

Commit 827b881

Browse files
authored
Merge branch 'apache:trunk' into HADOOP-18851
2 parents 913d690 + 2831c7c commit 827b881

File tree

76 files changed

+1250
-192
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

76 files changed

+1250
-192
lines changed

hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/metrics/RpcMetrics.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -141,7 +141,7 @@ public static RpcMetrics create(Server server, Configuration conf) {
141141
MutableCounterLong rpcAuthorizationSuccesses;
142142
@Metric("Number of client backoff requests")
143143
MutableCounterLong rpcClientBackoff;
144-
@Metric("Number of Slow RPC calls")
144+
@Metric("Number of slow RPC calls")
145145
MutableCounterLong rpcSlowCalls;
146146
@Metric("Number of requeue calls")
147147
MutableCounterLong rpcRequeueCalls;

hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md

Lines changed: 18 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -74,16 +74,21 @@ The default timeunit used for RPC metrics is milliseconds (as per the below desc
7474
| `SentBytes` | Total number of sent bytes |
7575
| `RpcQueueTimeNumOps` | Total number of RPC calls |
7676
| `RpcQueueTimeAvgTime` | Average queue time in milliseconds |
77-
| `RpcLockWaitTimeNumOps` | Total number of RPC call (same as RpcQueueTimeNumOps) |
77+
| `RpcLockWaitTimeNumOps` | Total number of RPC calls (same as RpcQueueTimeNumOps) |
7878
| `RpcLockWaitTimeAvgTime` | Average time waiting for lock acquisition in milliseconds |
7979
| `RpcProcessingTimeNumOps` | Total number of RPC calls (same to RpcQueueTimeNumOps) |
8080
| `RpcProcessingAvgTime` | Average Processing time in milliseconds |
81+
| `DeferredRpcProcessingTimeNumOps` | Total number of Deferred RPC calls |
82+
| `DeferredRpcProcessingAvgTime` | Average Deferred Processing time in milliseconds |
83+
| `RpcResponseTimeNumOps` | Total number of RPC calls (same to RpcQueueTimeNumOps) |
84+
| `RpcResponseAvgTime` | Average Response time in milliseconds |
8185
| `RpcAuthenticationFailures` | Total number of authentication failures |
8286
| `RpcAuthenticationSuccesses` | Total number of authentication successes |
8387
| `RpcAuthorizationFailures` | Total number of authorization failures |
8488
| `RpcAuthorizationSuccesses` | Total number of authorization successes |
8589
| `RpcClientBackoff` | Total number of client backoff requests |
8690
| `RpcSlowCalls` | Total number of slow RPC calls |
91+
| `RpcRequeueCalls` | Total number of requeue RPC calls |
8792
| `RpcCallsSuccesses` | Total number of RPC calls that are successfully processed |
8893
| `NumOpenConnections` | Current number of open connections |
8994
| `NumInProcessHandler` | Current number of handlers on working |
@@ -107,6 +112,18 @@ The default timeunit used for RPC metrics is milliseconds (as per the below desc
107112
| `rpcLockWaitTime`*num*`s90thPercentileLatency` | Shows the 90th percentile of RPC lock wait time in milliseconds (*num* seconds granularity) if `rpc.metrics.quantile.enable` is set to true. *num* is specified by `rpc.metrics.percentiles.intervals`. |
108113
| `rpcLockWaitTime`*num*`s95thPercentileLatency` | Shows the 95th percentile of RPC lock wait time in milliseconds (*num* seconds granularity) if `rpc.metrics.quantile.enable` is set to true. *num* is specified by `rpc.metrics.percentiles.intervals`. |
109114
| `rpcLockWaitTime`*num*`s99thPercentileLatency` | Shows the 99th percentile of RPC lock wait time in milliseconds (*num* seconds granularity) if `rpc.metrics.quantile.enable` is set to true. *num* is specified by `rpc.metrics.percentiles.intervals`. |
115+
| `rpcResponseTime`*num*`sNumOps` | Shows total number of RPC calls (*num* seconds granularity) if `rpc.metrics.quantile.enable` is set to true. *num* is specified by `rpc.metrics.percentiles.intervals`. |
116+
| `rpcResponseTime`*num*`s50thPercentileLatency` | Shows the 50th percentile of RPC response time in milliseconds (*num* seconds granularity) if `rpc.metrics.quantile.enable` is set to true. *num* is specified by `rpc.metrics.percentiles.intervals`. |
117+
| `rpcResponseTime`*num*`s75thPercentileLatency` | Shows the 75th percentile of RPC response time in milliseconds (*num* seconds granularity) if `rpc.metrics.quantile.enable` is set to true. *num* is specified by `rpc.metrics.percentiles.intervals`. |
118+
| `rpcResponseTime`*num*`s90thPercentileLatency` | Shows the 90th percentile of RPC response time in milliseconds (*num* seconds granularity) if `rpc.metrics.quantile.enable` is set to true. *num* is specified by `rpc.metrics.percentiles.intervals`. |
119+
| `rpcResponseTime`*num*`s95thPercentileLatency` | Shows the 95th percentile of RPC response time in milliseconds (*num* seconds granularity) if `rpc.metrics.quantile.enable` is set to true. *num* is specified by `rpc.metrics.percentiles.intervals`. |
120+
| `rpcResponseTime`*num*`s99thPercentileLatency` | Shows the 99th percentile of RPC response time in milliseconds (*num* seconds granularity) if `rpc.metrics.quantile.enable` is set to true. *num* is specified by `rpc.metrics.percentiles.intervals`. |
121+
| `deferredRpcProcessingTime`*num*`sNumOps` | Shows total number of Deferred RPC calls (*num* seconds granularity) if `rpc.metrics.quantile.enable` is set to true. *num* is specified by `rpc.metrics.percentiles.intervals`. |
122+
| `deferredRpcProcessingTime`*num*`s50thPercentileLatency` | Shows the 50th percentile of Deferred RPC processing time in milliseconds (*num* seconds granularity) if `rpc.metrics.quantile.enable` is set to true. *num* is specified by `rpc.metrics.percentiles.intervals`. |
123+
| `deferredRpcProcessingTime`*num*`s75thPercentileLatency` | Shows the 75th percentile of Deferred RPC processing time in milliseconds (*num* seconds granularity) if `rpc.metrics.quantile.enable` is set to true. *num* is specified by `rpc.metrics.percentiles.intervals`. |
124+
| `deferredRpcProcessingTime`*num*`s90thPercentileLatency` | Shows the 90th percentile of Deferred RPC processing time in milliseconds (*num* seconds granularity) if `rpc.metrics.quantile.enable` is set to true. *num* is specified by `rpc.metrics.percentiles.intervals`. |
125+
| `deferredRpcProcessingTime`*num*`s95thPercentileLatency` | Shows the 95th percentile of Deferred RPC processing time in milliseconds (*num* seconds granularity) if `rpc.metrics.quantile.enable` is set to true. *num* is specified by `rpc.metrics.percentiles.intervals`. |
126+
| `deferredRpcProcessingTime`*num*`s99thPercentileLatency` | Shows the 99th percentile of Deferred RPC processing time in milliseconds (*num* seconds granularity) if `rpc.metrics.quantile.enable` is set to true. *num* is specified by `rpc.metrics.percentiles.intervals`. |
110127
| `TotalRequests` | Total num of requests served by the RPC server. |
111128
| `TotalRequestsPerSeconds` | Total num of requests per second served by the RPC server. |
112129

hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOpsCountStatistics.java

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -75,6 +75,7 @@ public enum OpType {
7575
GET_STORAGE_POLICIES("op_get_storage_policies"),
7676
GET_STORAGE_POLICY("op_get_storage_policy"),
7777
GET_TRASH_ROOT("op_get_trash_root"),
78+
GET_TRASH_ROOTS("op_get_trash_roots"),
7879
GET_XATTR("op_get_xattr"),
7980
LIST_CACHE_DIRECTIVE("op_list_cache_directive"),
8081
LIST_CACHE_POOL("op_list_cache_pool"),

hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3612,6 +3612,8 @@ public Path getTrashRoot(Path path) {
36123612
*/
36133613
@Override
36143614
public Collection<FileStatus> getTrashRoots(boolean allUsers) {
3615+
statistics.incrementReadOps(1);
3616+
storageStatistics.incrementOpCounter(OpType.GET_TRASH_ROOTS);
36153617
Set<FileStatus> ret = new HashSet<>();
36163618
// Get normal trash roots
36173619
ret.addAll(super.getTrashRoots(allUsers));

hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java

Lines changed: 35 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,7 @@
2222

2323
import org.apache.hadoop.classification.VisibleForTesting;
2424
import org.apache.hadoop.fs.BlockLocation;
25+
import org.apache.hadoop.fs.Path;
2526
import org.apache.hadoop.util.Preconditions;
2627
import org.apache.hadoop.thirdparty.com.google.common.collect.Maps;
2728
import org.apache.hadoop.fs.ContentSummary;
@@ -877,6 +878,40 @@ public static Map<String, String> getErasureCodeCodecs(Map<?, ?> json) {
877878
return map;
878879
}
879880

881+
public static Collection<FileStatus> getTrashRoots(Map<?, ?> json) {
882+
List<?> objs = (List<?>) json.get("Paths");
883+
if (objs != null) {
884+
FileStatus[] trashRoots = new FileStatus[objs.size()];
885+
for (int i = 0; i < objs.size(); i++) {
886+
Map<?, ?> m = (Map<?, ?>) objs.get(i);
887+
trashRoots[i] = toFileStatus(m);
888+
}
889+
return Arrays.asList(trashRoots);
890+
}
891+
return new ArrayList<FileStatus>(0);
892+
}
893+
894+
public static FileStatus toFileStatus(Map<?, ?> json) {
895+
Path path = new Path(getString(json, "path", ""));
896+
long length = getLong(json, "length", 0);
897+
boolean isdir = getBoolean(json, "isdir", false);
898+
short replication = (short) getInt(json, "block_replication", -1);
899+
long blockSize = getLong(json, "blocksize", 256);
900+
long modificationTime = getLong(json, "modification_time", 0);
901+
long accessTime = getLong(json, "access_time", 0);
902+
String permString = getString(json, "permission", null);
903+
FsPermission permission = toFsPermission(permString);
904+
String owner = getString(json, "owner", null);
905+
String group = getString(json, "group", null);
906+
if (json.get("symlink") != null) {
907+
Path symlink = new Path((String) json.get("symlink"));
908+
return new FileStatus(length, isdir, replication, blockSize, modificationTime,
909+
accessTime, permission, owner, group, symlink, path);
910+
}
911+
return new FileStatus(length, isdir, replication, blockSize, modificationTime,
912+
accessTime, permission, owner, group, path);
913+
}
914+
880915
private static List<SnapshotDiffReport.DiffReportEntry> toDiffList(
881916
List<?> objs) {
882917
if (objs == null) {

hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java

Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1956,6 +1956,26 @@ String decodeResponse(Map<?, ?> json) throws IOException {
19561956
}
19571957
}
19581958

1959+
public Collection<FileStatus> getTrashRoots(boolean allUsers) {
1960+
statistics.incrementReadOps(1);
1961+
storageStatistics.incrementOpCounter(OpType.GET_TRASH_ROOTS);
1962+
1963+
final HttpOpParam.Op op = GetOpParam.Op.GETTRASHROOTS;
1964+
try {
1965+
Collection<FileStatus> trashRoots =
1966+
new FsPathResponseRunner<Collection<FileStatus>>(op, null,
1967+
new AllUsersParam(allUsers)) {
1968+
@Override
1969+
Collection<FileStatus> decodeResponse(Map<?, ?> json) throws IOException {
1970+
return JsonUtilClient.getTrashRoots(json);
1971+
}
1972+
}.run();
1973+
return trashRoots;
1974+
} catch (IOException e) {
1975+
return super.getTrashRoots(allUsers);
1976+
}
1977+
}
1978+
19591979
@Override
19601980
public void access(final Path path, final FsAction mode) throws IOException {
19611981
final HttpOpParam.Op op = GetOpParam.Op.CHECKACCESS;
Lines changed: 49 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,49 @@
1+
/**
2+
* Licensed to the Apache Software Foundation (ASF) under one
3+
* or more contributor license agreements. See the NOTICE file
4+
* distributed with this work for additional information
5+
* regarding copyright ownership. The ASF licenses this file
6+
* to you under the Apache License, Version 2.0 (the
7+
* "License"); you may not use this file except in compliance
8+
* with the License. You may obtain a copy of the License at
9+
*
10+
* http://www.apache.org/licenses/LICENSE-2.0
11+
*
12+
* Unless required by applicable law or agreed to in writing, software
13+
* distributed under the License is distributed on an "AS IS" BASIS,
14+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15+
* See the License for the specific language governing permissions and
16+
* limitations under the License.
17+
*/
18+
package org.apache.hadoop.hdfs.web.resources;
19+
20+
/** AllUsers parameter. */
21+
public class AllUsersParam extends BooleanParam {
22+
/** Parameter name. */
23+
public static final String NAME = "allusers";
24+
/** Default parameter value. */
25+
public static final String DEFAULT = FALSE;
26+
27+
private static final Domain DOMAIN = new Domain(NAME);
28+
29+
/**
30+
* Constructor.
31+
* @param value the parameter value.
32+
*/
33+
public AllUsersParam(final Boolean value) {
34+
super(DOMAIN, value);
35+
}
36+
37+
/**
38+
* Constructor.
39+
* @param str a string representation of the parameter value.
40+
*/
41+
public AllUsersParam(final String str) {
42+
this(DOMAIN.parse(str));
43+
}
44+
45+
@Override
46+
public String getName() {
47+
return NAME;
48+
}
49+
}

hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -69,6 +69,7 @@ public enum Op implements HttpOpParam.Op {
6969
GETSTATUS(false, HttpURLConnection.HTTP_OK),
7070
GETECPOLICIES(false, HttpURLConnection.HTTP_OK),
7171
GETECCODECS(false, HttpURLConnection.HTTP_OK),
72+
GETTRASHROOTS(false, HttpURLConnection.HTTP_OK),
7273
GETSNAPSHOTLIST(false, HttpURLConnection.HTTP_OK);
7374

7475
final boolean redirect;

hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterWebHdfsMethods.java

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -43,6 +43,7 @@
4343
import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
4444
import org.apache.hadoop.hdfs.web.resources.AccessTimeParam;
4545
import org.apache.hadoop.hdfs.web.resources.AclPermissionParam;
46+
import org.apache.hadoop.hdfs.web.resources.AllUsersParam;
4647
import org.apache.hadoop.hdfs.web.resources.BlockSizeParam;
4748
import org.apache.hadoop.hdfs.web.resources.BufferSizeParam;
4849
import org.apache.hadoop.hdfs.web.resources.ConcatSourcesParam;
@@ -344,7 +345,8 @@ protected Response get(
344345
final TokenKindParam tokenKind,
345346
final TokenServiceParam tokenService,
346347
final NoRedirectParam noredirectParam,
347-
final StartAfterParam startAfter
348+
final StartAfterParam startAfter,
349+
final AllUsersParam allUsers
348350
) throws IOException, URISyntaxException {
349351
try {
350352
final Router router = getRouter();
@@ -393,7 +395,7 @@ protected Response get(
393395
offset, length, renewer, bufferSize, xattrNames, xattrEncoding,
394396
excludeDatanodes, fsAction, snapshotName, oldSnapshotName,
395397
snapshotDiffStartPath, snapshotDiffIndex,
396-
tokenKind, tokenService, noredirectParam, startAfter);
398+
tokenKind, tokenService, noredirectParam, startAfter, allUsers);
397399
}
398400
default:
399401
throw new UnsupportedOperationException(op + " is not supported");

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java

Lines changed: 25 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,7 @@
3030
import java.security.PrivilegedExceptionAction;
3131
import java.util.Base64;
3232
import java.util.Base64.Encoder;
33+
import java.util.Collection;
3334
import java.util.EnumSet;
3435
import java.util.HashSet;
3536
import java.util.List;
@@ -56,6 +57,7 @@
5657
import javax.ws.rs.core.Response.ResponseBuilder;
5758
import javax.ws.rs.core.Response.Status;
5859

60+
import org.apache.hadoop.fs.FileSystem;
5961
import org.apache.hadoop.fs.InvalidPathException;
6062
import org.apache.hadoop.fs.QuotaUsage;
6163
import org.apache.hadoop.fs.StorageType;
@@ -1067,14 +1069,16 @@ public Response getRoot(
10671069
@QueryParam(NoRedirectParam.NAME) @DefaultValue(NoRedirectParam.DEFAULT)
10681070
final NoRedirectParam noredirect,
10691071
@QueryParam(StartAfterParam.NAME) @DefaultValue(StartAfterParam.DEFAULT)
1070-
final StartAfterParam startAfter
1072+
final StartAfterParam startAfter,
1073+
@QueryParam(AllUsersParam.NAME) @DefaultValue(AllUsersParam.DEFAULT)
1074+
final AllUsersParam allUsers
10711075
) throws IOException, InterruptedException {
10721076
return get(ugi, delegation, username, doAsUser, ROOT, op, offset, length,
10731077
renewer, bufferSize, xattrNames, xattrEncoding, excludeDatanodes,
10741078
fsAction, snapshotName, oldSnapshotName,
10751079
snapshotDiffStartPath, snapshotDiffIndex,
10761080
tokenKind, tokenService,
1077-
noredirect, startAfter);
1081+
noredirect, startAfter, allUsers);
10781082
}
10791083

10801084
/** Handle HTTP GET request. */
@@ -1124,12 +1128,14 @@ public Response get(
11241128
@QueryParam(NoRedirectParam.NAME) @DefaultValue(NoRedirectParam.DEFAULT)
11251129
final NoRedirectParam noredirect,
11261130
@QueryParam(StartAfterParam.NAME) @DefaultValue(StartAfterParam.DEFAULT)
1127-
final StartAfterParam startAfter
1131+
final StartAfterParam startAfter,
1132+
@QueryParam(AllUsersParam.NAME) @DefaultValue(AllUsersParam.DEFAULT)
1133+
final AllUsersParam allUsers
11281134
) throws IOException, InterruptedException {
11291135

11301136
init(ugi, delegation, username, doAsUser, path, op, offset, length,
11311137
renewer, bufferSize, xattrEncoding, excludeDatanodes, fsAction,
1132-
snapshotName, oldSnapshotName, tokenKind, tokenService, startAfter);
1138+
snapshotName, oldSnapshotName, tokenKind, tokenService, startAfter, allUsers);
11331139

11341140
return doAs(ugi, new PrivilegedExceptionAction<Response>() {
11351141
@Override
@@ -1138,7 +1144,7 @@ public Response run() throws IOException, URISyntaxException {
11381144
op, offset, length, renewer, bufferSize, xattrNames, xattrEncoding,
11391145
excludeDatanodes, fsAction, snapshotName, oldSnapshotName,
11401146
snapshotDiffStartPath, snapshotDiffIndex,
1141-
tokenKind, tokenService, noredirect, startAfter);
1147+
tokenKind, tokenService, noredirect, startAfter, allUsers);
11421148
}
11431149
});
11441150
}
@@ -1172,7 +1178,8 @@ protected Response get(
11721178
final TokenKindParam tokenKind,
11731179
final TokenServiceParam tokenService,
11741180
final NoRedirectParam noredirectParam,
1175-
final StartAfterParam startAfter
1181+
final StartAfterParam startAfter,
1182+
final AllUsersParam allUsers
11761183
) throws IOException, URISyntaxException {
11771184
final Configuration conf = (Configuration) context
11781185
.getAttribute(JspHelper.CURRENT_CONF);
@@ -1322,6 +1329,12 @@ protected Response get(
13221329
final String jsonStr = JsonUtil.toJsonString("Path", trashPath);
13231330
return Response.ok(jsonStr).type(MediaType.APPLICATION_JSON).build();
13241331
}
1332+
case GETTRASHROOTS: {
1333+
Boolean value = allUsers.getValue();
1334+
final Collection<FileStatus> trashPaths = getTrashRoots(conf, value);
1335+
final String js = JsonUtil.toJsonString(trashPaths);
1336+
return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
1337+
}
13251338
case LISTSTATUS_BATCH:
13261339
{
13271340
byte[] start = HdfsFileStatus.EMPTY_NAME;
@@ -1558,6 +1571,12 @@ public Void run() throws IOException {
15581571
};
15591572
}
15601573

1574+
private Collection<FileStatus> getTrashRoots(Configuration conf, boolean allUsers)
1575+
throws IOException {
1576+
FileSystem fs = FileSystem.get(conf != null ? conf : new Configuration());
1577+
return fs.getTrashRoots(allUsers);
1578+
}
1579+
15611580

15621581
/** Handle HTTP DELETE request for the root. */
15631582
@DELETE

0 commit comments

Comments
 (0)