Skip to content

Commit 6828737

Browse files
authored
HDFS-15488. Add a command to list all snapshots for a snaphottable root with snapshot Ids. (#2166)
1 parent 3eaf627 commit 6828737

File tree

28 files changed

+833
-6
lines changed

28 files changed

+833
-6
lines changed

hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,7 @@
2222
<Class name="org.apache.hadoop.hdfs.util.StripedBlockUtil$ChunkByteArray"/>
2323
<Class name="org.apache.hadoop.hdfs.protocol.SnapshotDiffReportListing$DiffReportListingEntry"/>
2424
<Class name="org.apache.hadoop.hdfs.protocol.SnapshotDiffReportListing"/>
25+
<Class name="org.apache.hadoop.hdfs.protocol.SnapshotStatus"/>
2526
</Or>
2627
<Bug pattern="EI_EXPOSE_REP,EI_EXPOSE_REP2" />
2728
</Match>

hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java

Lines changed: 19 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -150,6 +150,7 @@
150150
import org.apache.hadoop.hdfs.protocol.ZoneReencryptionStatus;
151151
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
152152
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReportListing;
153+
import org.apache.hadoop.hdfs.protocol.SnapshotStatus;
153154
import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil;
154155
import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair;
155156
import org.apache.hadoop.hdfs.protocol.datatransfer.ReplaceDatanodeOnFailure;
@@ -2190,6 +2191,24 @@ public SnapshottableDirectoryStatus[] getSnapshottableDirListing()
21902191
}
21912192
}
21922193

2194+
/**
2195+
* Get listing of all the snapshots for a snapshottable directory.
2196+
*
2197+
* @return Information about all the snapshots for a snapshottable directory
2198+
* @throws IOException If an I/O error occurred
2199+
* @see ClientProtocol#getSnapshotListing(String)
2200+
*/
2201+
public SnapshotStatus[] getSnapshotListing(String snapshotRoot)
2202+
throws IOException {
2203+
checkOpen();
2204+
try (TraceScope ignored = tracer.newScope("getSnapshotListing")) {
2205+
return namenode.getSnapshotListing(snapshotRoot);
2206+
} catch (RemoteException re) {
2207+
throw re.unwrapRemoteException();
2208+
}
2209+
}
2210+
2211+
21932212
/**
21942213
* Allow snapshot on a directory.
21952214
*

hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOpsCountStatistics.java

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -111,6 +111,7 @@ public enum OpType {
111111
SET_XATTR("op_set_xattr"),
112112
GET_SNAPSHOT_DIFF("op_get_snapshot_diff"),
113113
GET_SNAPSHOTTABLE_DIRECTORY_LIST("op_get_snapshottable_directory_list"),
114+
GET_SNAPSHOT_LIST("op_get_snapshot_list"),
114115
TRUNCATE(CommonStatisticNames.OP_TRUNCATE),
115116
UNSET_EC_POLICY("op_unset_ec_policy"),
116117
UNSET_STORAGE_POLICY("op_unset_storage_policy");

hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -109,6 +109,7 @@
109109
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReportListing.DiffReportListingEntry;
110110
import org.apache.hadoop.hdfs.client.impl.SnapshotDiffReportGenerator;
111111
import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
112+
import org.apache.hadoop.hdfs.protocol.SnapshotStatus;
112113
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
113114
import org.apache.hadoop.io.Text;
114115
import org.apache.hadoop.net.NetUtils;
@@ -2148,6 +2149,19 @@ public SnapshottableDirectoryStatus[] getSnapshottableDirListing()
21482149
return dfs.getSnapshottableDirListing();
21492150
}
21502151

2152+
/**
2153+
* @return all the snapshots for a snapshottable directory
2154+
* @throws IOException
2155+
*/
2156+
public SnapshotStatus[] getSnapshotListing(Path snapshotRoot)
2157+
throws IOException {
2158+
Path absF = fixRelativePart(snapshotRoot);
2159+
statistics.incrementReadOps(1);
2160+
storageStatistics
2161+
.incrementOpCounter(OpType.GET_SNAPSHOT_LIST);
2162+
return dfs.getSnapshotListing(getPathName(absF));
2163+
}
2164+
21512165
@Override
21522166
public void deleteSnapshot(final Path snapshotDir, final String snapshotName)
21532167
throws IOException {

hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -727,6 +727,18 @@ BatchedDirectoryListing getBatchedListing(
727727
SnapshottableDirectoryStatus[] getSnapshottableDirListing()
728728
throws IOException;
729729

730+
/**
731+
* Get listing of all the snapshots for a snapshottable directory.
732+
*
733+
* @return Information about all the snapshots for a snapshottable directory
734+
* @throws IOException If an I/O error occurred
735+
*/
736+
@Idempotent
737+
@ReadOnly(isCoordinated = true)
738+
SnapshotStatus[] getSnapshotListing(String snapshotRoot)
739+
throws IOException;
740+
741+
730742
///////////////////////////////////////
731743
// System issues and management
732744
///////////////////////////////////////
Lines changed: 226 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,226 @@
1+
/**
2+
* Licensed to the Apache Software Foundation (ASF) under one
3+
* or more contributor license agreements. See the NOTICE file
4+
* distributed with this work for additional information
5+
* regarding copyright ownership. The ASF licenses this file
6+
* to you under the Apache License, Version 2.0 (the
7+
* "License"); you may not use this file except in compliance
8+
* with the License. You may obtain a copy of the License at
9+
* <p>
10+
* http://www.apache.org/licenses/LICENSE-2.0
11+
* <p>
12+
* Unless required by applicable law or agreed to in writing, software
13+
* distributed under the License is distributed on an "AS IS" BASIS,
14+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15+
* See the License for the specific language governing permissions and
16+
* limitations under the License.
17+
*/
18+
package org.apache.hadoop.hdfs.protocol;
19+
20+
import java.io.PrintStream;
21+
import java.text.SimpleDateFormat;
22+
import java.util.Date;
23+
import java.util.EnumSet;
24+
25+
import org.apache.hadoop.fs.Path;
26+
import org.apache.hadoop.fs.permission.FsPermission;
27+
import org.apache.hadoop.hdfs.DFSUtilClient;
28+
29+
/**
30+
* Metadata about a snapshottable directory.
31+
*/
32+
public class SnapshotStatus {
33+
/**
34+
* Basic information of the snapshot directory.
35+
*/
36+
private final HdfsFileStatus dirStatus;
37+
38+
/**
39+
* Snapshot ID for the snapshot.
40+
*/
41+
private final int snapshotID;
42+
43+
/**
44+
* Full path of the parent.
45+
*/
46+
private byte[] parentFullPath;
47+
48+
public SnapshotStatus(long modificationTime, long accessTime,
49+
FsPermission permission,
50+
EnumSet<HdfsFileStatus.Flags> flags,
51+
String owner, String group, byte[] localName,
52+
long inodeId, int childrenNum, int snapshotID,
53+
byte[] parentFullPath) {
54+
this.dirStatus = new HdfsFileStatus.Builder()
55+
.isdir(true)
56+
.mtime(modificationTime)
57+
.atime(accessTime)
58+
.perm(permission)
59+
.flags(flags)
60+
.owner(owner)
61+
.group(group)
62+
.path(localName)
63+
.fileId(inodeId)
64+
.children(childrenNum)
65+
.build();
66+
this.snapshotID = snapshotID;
67+
this.parentFullPath = parentFullPath;
68+
}
69+
70+
public SnapshotStatus(HdfsFileStatus dirStatus,
71+
int snapshotNumber, byte[] parentFullPath) {
72+
this.dirStatus = dirStatus;
73+
this.snapshotID = snapshotNumber;
74+
this.parentFullPath = parentFullPath;
75+
}
76+
77+
/**
78+
* sets the prent path name.
79+
* @param path parent path
80+
*/
81+
public void setParentFullPath(byte[] path) {
82+
parentFullPath = path;
83+
}
84+
85+
/**
86+
* @return snapshot id for the snapshot
87+
*/
88+
public int getSnapshotID() {
89+
return snapshotID;
90+
}
91+
92+
/**
93+
* @return The basic information of the directory
94+
*/
95+
public HdfsFileStatus getDirStatus() {
96+
return dirStatus;
97+
}
98+
99+
/**
100+
* @return Full path of the file
101+
*/
102+
public byte[] getParentFullPath() {
103+
return parentFullPath;
104+
}
105+
106+
/**
107+
* @return Full path of the snapshot
108+
*/
109+
public Path getFullPath() {
110+
String parentFullPathStr =
111+
(parentFullPath == null || parentFullPath.length == 0) ?
112+
"/" : DFSUtilClient.bytes2String(parentFullPath);
113+
return new Path(getSnapshotPath(parentFullPathStr,
114+
dirStatus.getLocalName()));
115+
}
116+
117+
/**
118+
* Print a list of {@link SnapshotStatus} out to a given stream.
119+
*
120+
* @param stats The list of {@link SnapshotStatus}
121+
* @param out The given stream for printing.
122+
*/
123+
public static void print(SnapshotStatus[] stats,
124+
PrintStream out) {
125+
if (stats == null || stats.length == 0) {
126+
out.println();
127+
return;
128+
}
129+
int maxRepl = 0, maxLen = 0, maxOwner = 0, maxGroup = 0;
130+
int maxSnapshotID = 0;
131+
for (SnapshotStatus status : stats) {
132+
maxRepl = maxLength(maxRepl, status.dirStatus.getReplication());
133+
maxLen = maxLength(maxLen, status.dirStatus.getLen());
134+
maxOwner = maxLength(maxOwner, status.dirStatus.getOwner());
135+
maxGroup = maxLength(maxGroup, status.dirStatus.getGroup());
136+
maxSnapshotID = maxLength(maxSnapshotID, status.snapshotID);
137+
}
138+
139+
String lineFormat = "%s%s " // permission string
140+
+ "%" + maxRepl + "s "
141+
+ (maxOwner > 0 ? "%-" + maxOwner + "s " : "%s")
142+
+ (maxGroup > 0 ? "%-" + maxGroup + "s " : "%s")
143+
+ "%" + maxLen + "s "
144+
+ "%s " // mod time
145+
+ "%" + maxSnapshotID + "s "
146+
+ "%s"; // path
147+
SimpleDateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd HH:mm");
148+
149+
for (SnapshotStatus status : stats) {
150+
String line = String.format(lineFormat, "d",
151+
status.dirStatus.getPermission(),
152+
status.dirStatus.getReplication(),
153+
status.dirStatus.getOwner(),
154+
status.dirStatus.getGroup(),
155+
String.valueOf(status.dirStatus.getLen()),
156+
dateFormat.format(new Date(status.dirStatus.getModificationTime())),
157+
status.snapshotID,
158+
getSnapshotPath(DFSUtilClient.bytes2String(status.parentFullPath),
159+
status.dirStatus.getLocalName())
160+
);
161+
out.println(line);
162+
}
163+
}
164+
165+
private static int maxLength(int n, Object value) {
166+
return Math.max(n, String.valueOf(value).length());
167+
}
168+
169+
public static class Bean {
170+
private final String path;
171+
private final int snapshotID;
172+
private final long modificationTime;
173+
private final short permission;
174+
private final String owner;
175+
private final String group;
176+
177+
public Bean(String path, int snapshotID, long
178+
modificationTime, short permission, String owner, String group) {
179+
this.path = path;
180+
this.snapshotID = snapshotID;
181+
this.modificationTime = modificationTime;
182+
this.permission = permission;
183+
this.owner = owner;
184+
this.group = group;
185+
}
186+
187+
public String getPath() {
188+
return path;
189+
}
190+
191+
public int getSnapshotID() {
192+
return snapshotID;
193+
}
194+
195+
public long getModificationTime() {
196+
return modificationTime;
197+
}
198+
199+
public short getPermission() {
200+
return permission;
201+
}
202+
203+
public String getOwner() {
204+
return owner;
205+
}
206+
207+
public String getGroup() {
208+
return group;
209+
}
210+
}
211+
212+
static String getSnapshotPath(String snapshottableDir,
213+
String snapshotRelativePath) {
214+
String parentFullPathStr =
215+
snapshottableDir == null || snapshottableDir.isEmpty() ?
216+
"/" : snapshottableDir;
217+
final StringBuilder b = new StringBuilder(parentFullPathStr);
218+
if (b.charAt(b.length() - 1) != Path.SEPARATOR_CHAR) {
219+
b.append(Path.SEPARATOR);
220+
}
221+
return b.append(HdfsConstants.DOT_SNAPSHOT_DIR)
222+
.append(Path.SEPARATOR)
223+
.append(snapshotRelativePath)
224+
.toString();
225+
}
226+
}

hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java

Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -89,6 +89,7 @@
8989
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
9090
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReportListing;
9191
import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
92+
import org.apache.hadoop.hdfs.protocol.SnapshotStatus;
9293
import org.apache.hadoop.hdfs.protocol.proto.AclProtos.GetAclStatusRequestProto;
9394
import org.apache.hadoop.hdfs.protocol.proto.AclProtos.GetAclStatusResponseProto;
9495
import org.apache.hadoop.hdfs.protocol.proto.AclProtos.ModifyAclEntriesRequestProto;
@@ -150,6 +151,8 @@
150151
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportListingResponseProto;
151152
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingRequestProto;
152153
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingResponseProto;
154+
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotListingRequestProto;
155+
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotListingResponseProto;
153156
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesRequestProto;
154157
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesResponseProto;
155158
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePolicyRequestProto;
@@ -1299,6 +1302,25 @@ public SnapshottableDirectoryStatus[] getSnapshottableDirListing()
12991302
}
13001303
}
13011304

1305+
@Override
1306+
public SnapshotStatus[] getSnapshotListing(String path)
1307+
throws IOException {
1308+
GetSnapshotListingRequestProto req =
1309+
GetSnapshotListingRequestProto.newBuilder()
1310+
.setSnapshotRoot(path).build();
1311+
try {
1312+
GetSnapshotListingResponseProto result = rpcProxy
1313+
.getSnapshotListing(null, req);
1314+
1315+
if (result.hasSnapshotList()) {
1316+
return PBHelperClient.convert(result.getSnapshotList());
1317+
}
1318+
return null;
1319+
} catch (ServiceException e) {
1320+
throw ProtobufHelper.getRemoteException(e);
1321+
}
1322+
}
1323+
13021324
@Override
13031325
public SnapshotDiffReport getSnapshotDiffReport(String snapshotRoot,
13041326
String fromSnapshot, String toSnapshot) throws IOException {

0 commit comments

Comments
 (0)