Skip to content

Commit e9cdd09

Browse files
bshashikantMukul Kumar Singh
authored andcommitted
HDFS-15488. Add a command to list all snapshots for a snaphottable root with snapshot Ids. (apache#2166)
Conflicts: hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterSnapshot.java hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java Change-Id: I0f61f0cede6e0570e821f318dbc47f86d18efcf7
1 parent 4d0198d commit e9cdd09

File tree

26 files changed

+779
-4
lines changed

26 files changed

+779
-4
lines changed

hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,7 @@
2222
<Class name="org.apache.hadoop.hdfs.util.StripedBlockUtil$ChunkByteArray"/>
2323
<Class name="org.apache.hadoop.hdfs.protocol.SnapshotDiffReportListing$DiffReportListingEntry"/>
2424
<Class name="org.apache.hadoop.hdfs.protocol.SnapshotDiffReportListing"/>
25+
<Class name="org.apache.hadoop.hdfs.protocol.SnapshotStatus"/>
2526
</Or>
2627
<Bug pattern="EI_EXPOSE_REP,EI_EXPOSE_REP2" />
2728
</Match>

hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java

Lines changed: 19 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -147,6 +147,7 @@
147147
import org.apache.hadoop.hdfs.protocol.ZoneReencryptionStatus;
148148
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
149149
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReportListing;
150+
import org.apache.hadoop.hdfs.protocol.SnapshotStatus;
150151
import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil;
151152
import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair;
152153
import org.apache.hadoop.hdfs.protocol.datatransfer.ReplaceDatanodeOnFailure;
@@ -2148,6 +2149,24 @@ public SnapshottableDirectoryStatus[] getSnapshottableDirListing()
21482149
}
21492150
}
21502151

2152+
/**
2153+
* Get listing of all the snapshots for a snapshottable directory.
2154+
*
2155+
* @return Information about all the snapshots for a snapshottable directory
2156+
* @throws IOException If an I/O error occurred
2157+
* @see ClientProtocol#getSnapshotListing(String)
2158+
*/
2159+
public SnapshotStatus[] getSnapshotListing(String snapshotRoot)
2160+
throws IOException {
2161+
checkOpen();
2162+
try (TraceScope ignored = tracer.newScope("getSnapshotListing")) {
2163+
return namenode.getSnapshotListing(snapshotRoot);
2164+
} catch (RemoteException re) {
2165+
throw re.unwrapRemoteException();
2166+
}
2167+
}
2168+
2169+
21512170
/**
21522171
* Allow snapshot on a directory.
21532172
*

hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOpsCountStatistics.java

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -110,6 +110,7 @@ public enum OpType {
110110
SET_XATTR("op_set_xattr"),
111111
GET_SNAPSHOT_DIFF("op_get_snapshot_diff"),
112112
GET_SNAPSHOTTABLE_DIRECTORY_LIST("op_get_snapshottable_directory_list"),
113+
GET_SNAPSHOT_LIST("op_get_snapshot_list"),
113114
TRUNCATE(CommonStatisticNames.OP_TRUNCATE),
114115
UNSET_EC_POLICY("op_unset_ec_policy"),
115116
UNSET_STORAGE_POLICY("op_unset_storage_policy");

hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -105,6 +105,7 @@
105105
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReportListing.DiffReportListingEntry;
106106
import org.apache.hadoop.hdfs.client.impl.SnapshotDiffReportGenerator;
107107
import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
108+
import org.apache.hadoop.hdfs.protocol.SnapshotStatus;
108109
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
109110
import org.apache.hadoop.io.Text;
110111
import org.apache.hadoop.net.NetUtils;
@@ -2132,6 +2133,19 @@ public SnapshottableDirectoryStatus[] getSnapshottableDirListing()
21322133
return dfs.getSnapshottableDirListing();
21332134
}
21342135

2136+
/**
2137+
* @return all the snapshots for a snapshottable directory
2138+
* @throws IOException
2139+
*/
2140+
public SnapshotStatus[] getSnapshotListing(Path snapshotRoot)
2141+
throws IOException {
2142+
Path absF = fixRelativePart(snapshotRoot);
2143+
statistics.incrementReadOps(1);
2144+
storageStatistics
2145+
.incrementOpCounter(OpType.GET_SNAPSHOT_LIST);
2146+
return dfs.getSnapshotListing(getPathName(absF));
2147+
}
2148+
21352149
@Override
21362150
public void deleteSnapshot(final Path snapshotDir, final String snapshotName)
21372151
throws IOException {

hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -725,6 +725,18 @@ BatchedDirectoryListing getBatchedListing(
725725
SnapshottableDirectoryStatus[] getSnapshottableDirListing()
726726
throws IOException;
727727

728+
/**
729+
* Get listing of all the snapshots for a snapshottable directory.
730+
*
731+
* @return Information about all the snapshots for a snapshottable directory
732+
* @throws IOException If an I/O error occurred
733+
*/
734+
@Idempotent
735+
@ReadOnly(isCoordinated = true)
736+
SnapshotStatus[] getSnapshotListing(String snapshotRoot)
737+
throws IOException;
738+
739+
728740
///////////////////////////////////////
729741
// System issues and management
730742
///////////////////////////////////////
Lines changed: 226 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,226 @@
1+
/**
2+
* Licensed to the Apache Software Foundation (ASF) under one
3+
* or more contributor license agreements. See the NOTICE file
4+
* distributed with this work for additional information
5+
* regarding copyright ownership. The ASF licenses this file
6+
* to you under the Apache License, Version 2.0 (the
7+
* "License"); you may not use this file except in compliance
8+
* with the License. You may obtain a copy of the License at
9+
* <p>
10+
* http://www.apache.org/licenses/LICENSE-2.0
11+
* <p>
12+
* Unless required by applicable law or agreed to in writing, software
13+
* distributed under the License is distributed on an "AS IS" BASIS,
14+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15+
* See the License for the specific language governing permissions and
16+
* limitations under the License.
17+
*/
18+
package org.apache.hadoop.hdfs.protocol;
19+
20+
import java.io.PrintStream;
21+
import java.text.SimpleDateFormat;
22+
import java.util.Date;
23+
import java.util.EnumSet;
24+
25+
import org.apache.hadoop.fs.Path;
26+
import org.apache.hadoop.fs.permission.FsPermission;
27+
import org.apache.hadoop.hdfs.DFSUtilClient;
28+
29+
/**
30+
* Metadata about a snapshottable directory.
31+
*/
32+
public class SnapshotStatus {
33+
/**
34+
* Basic information of the snapshot directory.
35+
*/
36+
private final HdfsFileStatus dirStatus;
37+
38+
/**
39+
* Snapshot ID for the snapshot.
40+
*/
41+
private final int snapshotID;
42+
43+
/**
44+
* Full path of the parent.
45+
*/
46+
private byte[] parentFullPath;
47+
48+
public SnapshotStatus(long modificationTime, long accessTime,
49+
FsPermission permission,
50+
EnumSet<HdfsFileStatus.Flags> flags,
51+
String owner, String group, byte[] localName,
52+
long inodeId, int childrenNum, int snapshotID,
53+
byte[] parentFullPath) {
54+
this.dirStatus = new HdfsFileStatus.Builder()
55+
.isdir(true)
56+
.mtime(modificationTime)
57+
.atime(accessTime)
58+
.perm(permission)
59+
.flags(flags)
60+
.owner(owner)
61+
.group(group)
62+
.path(localName)
63+
.fileId(inodeId)
64+
.children(childrenNum)
65+
.build();
66+
this.snapshotID = snapshotID;
67+
this.parentFullPath = parentFullPath;
68+
}
69+
70+
public SnapshotStatus(HdfsFileStatus dirStatus,
71+
int snapshotNumber, byte[] parentFullPath) {
72+
this.dirStatus = dirStatus;
73+
this.snapshotID = snapshotNumber;
74+
this.parentFullPath = parentFullPath;
75+
}
76+
77+
/**
78+
* sets the prent path name.
79+
* @param path parent path
80+
*/
81+
public void setParentFullPath(byte[] path) {
82+
parentFullPath = path;
83+
}
84+
85+
/**
86+
* @return snapshot id for the snapshot
87+
*/
88+
public int getSnapshotID() {
89+
return snapshotID;
90+
}
91+
92+
/**
93+
* @return The basic information of the directory
94+
*/
95+
public HdfsFileStatus getDirStatus() {
96+
return dirStatus;
97+
}
98+
99+
/**
100+
* @return Full path of the file
101+
*/
102+
public byte[] getParentFullPath() {
103+
return parentFullPath;
104+
}
105+
106+
/**
107+
* @return Full path of the snapshot
108+
*/
109+
public Path getFullPath() {
110+
String parentFullPathStr =
111+
(parentFullPath == null || parentFullPath.length == 0) ?
112+
"/" : DFSUtilClient.bytes2String(parentFullPath);
113+
return new Path(getSnapshotPath(parentFullPathStr,
114+
dirStatus.getLocalName()));
115+
}
116+
117+
/**
118+
* Print a list of {@link SnapshotStatus} out to a given stream.
119+
*
120+
* @param stats The list of {@link SnapshotStatus}
121+
* @param out The given stream for printing.
122+
*/
123+
public static void print(SnapshotStatus[] stats,
124+
PrintStream out) {
125+
if (stats == null || stats.length == 0) {
126+
out.println();
127+
return;
128+
}
129+
int maxRepl = 0, maxLen = 0, maxOwner = 0, maxGroup = 0;
130+
int maxSnapshotID = 0;
131+
for (SnapshotStatus status : stats) {
132+
maxRepl = maxLength(maxRepl, status.dirStatus.getReplication());
133+
maxLen = maxLength(maxLen, status.dirStatus.getLen());
134+
maxOwner = maxLength(maxOwner, status.dirStatus.getOwner());
135+
maxGroup = maxLength(maxGroup, status.dirStatus.getGroup());
136+
maxSnapshotID = maxLength(maxSnapshotID, status.snapshotID);
137+
}
138+
139+
String lineFormat = "%s%s " // permission string
140+
+ "%" + maxRepl + "s "
141+
+ (maxOwner > 0 ? "%-" + maxOwner + "s " : "%s")
142+
+ (maxGroup > 0 ? "%-" + maxGroup + "s " : "%s")
143+
+ "%" + maxLen + "s "
144+
+ "%s " // mod time
145+
+ "%" + maxSnapshotID + "s "
146+
+ "%s"; // path
147+
SimpleDateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd HH:mm");
148+
149+
for (SnapshotStatus status : stats) {
150+
String line = String.format(lineFormat, "d",
151+
status.dirStatus.getPermission(),
152+
status.dirStatus.getReplication(),
153+
status.dirStatus.getOwner(),
154+
status.dirStatus.getGroup(),
155+
String.valueOf(status.dirStatus.getLen()),
156+
dateFormat.format(new Date(status.dirStatus.getModificationTime())),
157+
status.snapshotID,
158+
getSnapshotPath(DFSUtilClient.bytes2String(status.parentFullPath),
159+
status.dirStatus.getLocalName())
160+
);
161+
out.println(line);
162+
}
163+
}
164+
165+
private static int maxLength(int n, Object value) {
166+
return Math.max(n, String.valueOf(value).length());
167+
}
168+
169+
public static class Bean {
170+
private final String path;
171+
private final int snapshotID;
172+
private final long modificationTime;
173+
private final short permission;
174+
private final String owner;
175+
private final String group;
176+
177+
public Bean(String path, int snapshotID, long
178+
modificationTime, short permission, String owner, String group) {
179+
this.path = path;
180+
this.snapshotID = snapshotID;
181+
this.modificationTime = modificationTime;
182+
this.permission = permission;
183+
this.owner = owner;
184+
this.group = group;
185+
}
186+
187+
public String getPath() {
188+
return path;
189+
}
190+
191+
public int getSnapshotID() {
192+
return snapshotID;
193+
}
194+
195+
public long getModificationTime() {
196+
return modificationTime;
197+
}
198+
199+
public short getPermission() {
200+
return permission;
201+
}
202+
203+
public String getOwner() {
204+
return owner;
205+
}
206+
207+
public String getGroup() {
208+
return group;
209+
}
210+
}
211+
212+
static String getSnapshotPath(String snapshottableDir,
213+
String snapshotRelativePath) {
214+
String parentFullPathStr =
215+
snapshottableDir == null || snapshottableDir.isEmpty() ?
216+
"/" : snapshottableDir;
217+
final StringBuilder b = new StringBuilder(parentFullPathStr);
218+
if (b.charAt(b.length() - 1) != Path.SEPARATOR_CHAR) {
219+
b.append(Path.SEPARATOR);
220+
}
221+
return b.append(HdfsConstants.DOT_SNAPSHOT_DIR)
222+
.append(Path.SEPARATOR)
223+
.append(snapshotRelativePath)
224+
.toString();
225+
}
226+
}

hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java

Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -88,6 +88,7 @@
8888
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
8989
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReportListing;
9090
import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
91+
import org.apache.hadoop.hdfs.protocol.SnapshotStatus;
9192
import org.apache.hadoop.hdfs.protocol.proto.AclProtos.GetAclStatusRequestProto;
9293
import org.apache.hadoop.hdfs.protocol.proto.AclProtos.GetAclStatusResponseProto;
9394
import org.apache.hadoop.hdfs.protocol.proto.AclProtos.ModifyAclEntriesRequestProto;
@@ -149,6 +150,8 @@
149150
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportListingResponseProto;
150151
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingRequestProto;
151152
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingResponseProto;
153+
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotListingRequestProto;
154+
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotListingResponseProto;
152155
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesRequestProto;
153156
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesResponseProto;
154157
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePolicyRequestProto;
@@ -1288,6 +1291,25 @@ public SnapshottableDirectoryStatus[] getSnapshottableDirListing()
12881291
}
12891292
}
12901293

1294+
@Override
1295+
public SnapshotStatus[] getSnapshotListing(String path)
1296+
throws IOException {
1297+
GetSnapshotListingRequestProto req =
1298+
GetSnapshotListingRequestProto.newBuilder()
1299+
.setSnapshotRoot(path).build();
1300+
try {
1301+
GetSnapshotListingResponseProto result = rpcProxy
1302+
.getSnapshotListing(null, req);
1303+
1304+
if (result.hasSnapshotList()) {
1305+
return PBHelperClient.convert(result.getSnapshotList());
1306+
}
1307+
return null;
1308+
} catch (ServiceException e) {
1309+
throw ProtobufHelper.getRemoteException(e);
1310+
}
1311+
}
1312+
12911313
@Override
12921314
public SnapshotDiffReport getSnapshotDiffReport(String snapshotRoot,
12931315
String fromSnapshot, String toSnapshot) throws IOException {

0 commit comments

Comments
 (0)