Skip to content

Commit 989158a

Browse files
committed
HDFS-15488. Add a command to list all snapshots for a snaphottable root with snapshot Ids.
1 parent d02be17 commit 989158a

File tree

21 files changed

+760
-3
lines changed

21 files changed

+760
-3
lines changed

hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java

Lines changed: 19 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -150,6 +150,7 @@
150150
import org.apache.hadoop.hdfs.protocol.ZoneReencryptionStatus;
151151
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
152152
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReportListing;
153+
import org.apache.hadoop.hdfs.protocol.SnapshotStatus;
153154
import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil;
154155
import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair;
155156
import org.apache.hadoop.hdfs.protocol.datatransfer.ReplaceDatanodeOnFailure;
@@ -2190,6 +2191,24 @@ public SnapshottableDirectoryStatus[] getSnapshottableDirListing()
21902191
}
21912192
}
21922193

2194+
/**
2195+
* Get listing of all the snapshots for a snapshottable directory
2196+
*
2197+
* @return Information about all the snapshots for a snapshottable directory
2198+
* @throws IOException If an I/O error occurred
2199+
* @see ClientProtocol#getSnapshotListing()
2200+
*/
2201+
public SnapshotStatus[] getSnapshotListing(String snapshotRoot)
2202+
throws IOException {
2203+
checkOpen();
2204+
try (TraceScope ignored = tracer.newScope("getSnapshottableDirListing")) {
2205+
return namenode.getSnapshotListing(snapshotRoot);
2206+
} catch (RemoteException re) {
2207+
throw re.unwrapRemoteException();
2208+
}
2209+
}
2210+
2211+
21932212
/**
21942213
* Allow snapshot on a directory.
21952214
*

hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -109,6 +109,7 @@
109109
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReportListing.DiffReportListingEntry;
110110
import org.apache.hadoop.hdfs.client.impl.SnapshotDiffReportGenerator;
111111
import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
112+
import org.apache.hadoop.hdfs.protocol.SnapshotStatus;
112113
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
113114
import org.apache.hadoop.io.Text;
114115
import org.apache.hadoop.net.NetUtils;
@@ -2148,6 +2149,15 @@ public SnapshottableDirectoryStatus[] getSnapshottableDirListing()
21482149
return dfs.getSnapshottableDirListing();
21492150
}
21502151

2152+
/**
2153+
* @return all the snapshots for a snapshottable directory
2154+
* @throws IOException
2155+
*/
2156+
public SnapshotStatus[] getSnapshotListing(Path snapshotRoot)
2157+
throws IOException {
2158+
return dfs.getSnapshotListing(getPathName(snapshotRoot));
2159+
}
2160+
21512161
@Override
21522162
public void deleteSnapshot(final Path snapshotDir, final String snapshotName)
21532163
throws IOException {

hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -727,6 +727,18 @@ BatchedDirectoryListing getBatchedListing(
727727
SnapshottableDirectoryStatus[] getSnapshottableDirListing()
728728
throws IOException;
729729

730+
/**
731+
* Get listing of all the snapshots for a snapshottable directory
732+
*
733+
* @return Information about all the snapshots for a snapshottable directory
734+
* @throws IOException If an I/O error occurred
735+
*/
736+
@Idempotent
737+
@ReadOnly(isCoordinated = true)
738+
SnapshotStatus[] getSnapshotListing(String snapshotRoot)
739+
throws IOException;
740+
741+
730742
///////////////////////////////////////
731743
// System issues and management
732744
///////////////////////////////////////
Lines changed: 218 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,218 @@
1+
/**
2+
* Licensed to the Apache Software Foundation (ASF) under one
3+
* or more contributor license agreements. See the NOTICE file
4+
* distributed with this work for additional information
5+
* regarding copyright ownership. The ASF licenses this file
6+
* to you under the Apache License, Version 2.0 (the
7+
* "License"); you may not use this file except in compliance
8+
* with the License. You may obtain a copy of the License at
9+
* <p>
10+
* http://www.apache.org/licenses/LICENSE-2.0
11+
* <p>
12+
* Unless required by applicable law or agreed to in writing, software
13+
* distributed under the License is distributed on an "AS IS" BASIS,
14+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15+
* See the License for the specific language governing permissions and
16+
* limitations under the License.
17+
*/
18+
package org.apache.hadoop.hdfs.protocol;
19+
20+
import java.io.PrintStream;
21+
import java.text.SimpleDateFormat;
22+
import java.util.Date;
23+
import java.util.EnumSet;
24+
25+
import org.apache.hadoop.fs.Path;
26+
import org.apache.hadoop.fs.permission.FsPermission;
27+
import org.apache.hadoop.hdfs.DFSUtilClient;
28+
29+
/**
30+
* Metadata about a snapshottable directory
31+
*/
32+
public class SnapshotStatus {
33+
/**
34+
* Basic information of the snapshot directory
35+
*/
36+
private final HdfsFileStatus dirStatus;
37+
38+
/**
39+
* Snapshot ID for the snapshot
40+
*/
41+
private final int snapshotID;
42+
43+
/**
44+
* Full path of the parent.
45+
*/
46+
private final byte[] parentFullPath;
47+
48+
public SnapshotStatus(long modification_time, long access_time,
49+
FsPermission permission,
50+
EnumSet<HdfsFileStatus.Flags> flags,
51+
String owner, String group, byte[] localName,
52+
long inodeId, int childrenNum, int snapshotID,
53+
byte[] parentFullPath) {
54+
this.dirStatus = new HdfsFileStatus.Builder()
55+
.isdir(true)
56+
.mtime(modification_time)
57+
.atime(access_time)
58+
.perm(permission)
59+
.flags(flags)
60+
.owner(owner)
61+
.group(group)
62+
.path(localName)
63+
.fileId(inodeId)
64+
.children(childrenNum)
65+
.build();
66+
this.snapshotID = snapshotID;
67+
this.parentFullPath = parentFullPath;
68+
}
69+
70+
public SnapshotStatus(HdfsFileStatus dirStatus,
71+
int snapshotNumber, byte[] parentFullPath) {
72+
this.dirStatus = dirStatus;
73+
this.snapshotID = snapshotNumber;
74+
this.parentFullPath = parentFullPath;
75+
}
76+
77+
/**
78+
* @return snapshot id for the snapshot
79+
*/
80+
public int getSnapshotID() {
81+
return snapshotID;
82+
}
83+
84+
/**
85+
* @return The basic information of the directory
86+
*/
87+
public HdfsFileStatus getDirStatus() {
88+
return dirStatus;
89+
}
90+
91+
/**
92+
* @return Full path of the file
93+
*/
94+
public byte[] getParentFullPath() {
95+
return parentFullPath;
96+
}
97+
98+
/**
99+
* @return Full path of the snapshot
100+
*/
101+
public Path getFullPath() {
102+
String parentFullPathStr =
103+
(parentFullPath == null || parentFullPath.length == 0) ?
104+
"/" : DFSUtilClient.bytes2String(parentFullPath);
105+
return new Path(getSnapshotPath(parentFullPathStr,
106+
dirStatus.getLocalName()));
107+
}
108+
109+
/**
110+
* Print a list of {@link SnapshotStatus} out to a given stream.
111+
*
112+
* @param stats The list of {@link SnapshotStatus}
113+
* @param out The given stream for printing.
114+
*/
115+
public static void print(SnapshotStatus[] stats,
116+
PrintStream out) {
117+
if (stats == null || stats.length == 0) {
118+
out.println();
119+
return;
120+
}
121+
int maxRepl = 0, maxLen = 0, maxOwner = 0, maxGroup = 0;
122+
int maxSnapshotID = 0;
123+
for (SnapshotStatus status : stats) {
124+
maxRepl = maxLength(maxRepl, status.dirStatus.getReplication());
125+
maxLen = maxLength(maxLen, status.dirStatus.getLen());
126+
maxOwner = maxLength(maxOwner, status.dirStatus.getOwner());
127+
maxGroup = maxLength(maxGroup, status.dirStatus.getGroup());
128+
maxSnapshotID = maxLength(maxSnapshotID, status.snapshotID);
129+
}
130+
131+
String lineFormat = "%s%s " // permission string
132+
+ "%" + maxRepl + "s "
133+
+ (maxOwner > 0 ? "%-" + maxOwner + "s " : "%s")
134+
+ (maxGroup > 0 ? "%-" + maxGroup + "s " : "%s")
135+
+ "%" + maxLen + "s "
136+
+ "%s " // mod time
137+
+ "%" + maxSnapshotID + "s "
138+
+ "%s"; // path
139+
SimpleDateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd HH:mm");
140+
141+
for (SnapshotStatus status : stats) {
142+
String line = String.format(lineFormat, "d",
143+
status.dirStatus.getPermission(),
144+
status.dirStatus.getReplication(),
145+
status.dirStatus.getOwner(),
146+
status.dirStatus.getGroup(),
147+
String.valueOf(status.dirStatus.getLen()),
148+
dateFormat.format(new Date(status.dirStatus.getModificationTime())),
149+
status.snapshotID,
150+
getSnapshotPath(DFSUtilClient.bytes2String(status.parentFullPath),
151+
status.dirStatus.getLocalName())
152+
);
153+
out.println(line);
154+
}
155+
}
156+
157+
private static int maxLength(int n, Object value) {
158+
return Math.max(n, String.valueOf(value).length());
159+
}
160+
161+
public static class Bean {
162+
private final String path;
163+
private final int snapshotID;
164+
private final long modificationTime;
165+
private final short permission;
166+
private final String owner;
167+
private final String group;
168+
169+
public Bean(String path, int snapshotID, long
170+
modificationTime, short permission, String owner, String group) {
171+
this.path = path;
172+
this.snapshotID = snapshotID;
173+
this.modificationTime = modificationTime;
174+
this.permission = permission;
175+
this.owner = owner;
176+
this.group = group;
177+
}
178+
179+
public String getPath() {
180+
return path;
181+
}
182+
183+
public int getSnapshotID() {
184+
return snapshotID;
185+
}
186+
187+
public long getModificationTime() {
188+
return modificationTime;
189+
}
190+
191+
public short getPermission() {
192+
return permission;
193+
}
194+
195+
public String getOwner() {
196+
return owner;
197+
}
198+
199+
public String getGroup() {
200+
return group;
201+
}
202+
}
203+
204+
static String getSnapshotPath(String snapshottableDir,
205+
String snapshotRelativePath) {
206+
String parentFullPathStr =
207+
snapshottableDir == null || snapshottableDir.isEmpty() ?
208+
"/" : snapshottableDir;
209+
final StringBuilder b = new StringBuilder(parentFullPathStr);
210+
if (b.charAt(b.length() - 1) != Path.SEPARATOR_CHAR) {
211+
b.append(Path.SEPARATOR);
212+
}
213+
return b.append(HdfsConstants.DOT_SNAPSHOT_DIR)
214+
.append(Path.SEPARATOR)
215+
.append(snapshotRelativePath)
216+
.toString();
217+
}
218+
}

hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java

Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -89,6 +89,7 @@
8989
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
9090
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReportListing;
9191
import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
92+
import org.apache.hadoop.hdfs.protocol.SnapshotStatus;
9293
import org.apache.hadoop.hdfs.protocol.proto.AclProtos.GetAclStatusRequestProto;
9394
import org.apache.hadoop.hdfs.protocol.proto.AclProtos.GetAclStatusResponseProto;
9495
import org.apache.hadoop.hdfs.protocol.proto.AclProtos.ModifyAclEntriesRequestProto;
@@ -150,6 +151,8 @@
150151
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportListingResponseProto;
151152
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingRequestProto;
152153
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingResponseProto;
154+
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotListingRequestProto;
155+
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotListingResponseProto;
153156
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesRequestProto;
154157
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesResponseProto;
155158
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePolicyRequestProto;
@@ -1299,6 +1302,25 @@ public SnapshottableDirectoryStatus[] getSnapshottableDirListing()
12991302
}
13001303
}
13011304

1305+
@Override
1306+
public SnapshotStatus[] getSnapshotListing(String path)
1307+
throws IOException {
1308+
GetSnapshotListingRequestProto req =
1309+
GetSnapshotListingRequestProto.newBuilder()
1310+
.setSnapshotRoot(path).build();
1311+
try {
1312+
GetSnapshotListingResponseProto result = rpcProxy
1313+
.getSnapshotListing(null, req);
1314+
1315+
if (result.hasSnapshotList()) {
1316+
return PBHelperClient.convert(result.getSnapshotList());
1317+
}
1318+
return null;
1319+
} catch (ServiceException e) {
1320+
throw ProtobufHelper.getRemoteException(e);
1321+
}
1322+
}
1323+
13021324
@Override
13031325
public SnapshotDiffReport getSnapshotDiffReport(String snapshotRoot,
13041326
String fromSnapshot, String toSnapshot) throws IOException {

0 commit comments

Comments
 (0)