Skip to content

Commit d57462f

Browse files
authored
HDFS-15479. Ordered snapshot deletion: make it a configurable feature (#2156)
1 parent d9441f9 commit d57462f

File tree

5 files changed

+164
-7
lines changed

5 files changed

+164
-7
lines changed

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -500,8 +500,13 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
500500

501501
public static final String DFS_NAMENODE_SNAPSHOT_MAX_LIMIT =
502502
"dfs.namenode.snapshot.max.limit";
503-
504503
public static final int DFS_NAMENODE_SNAPSHOT_MAX_LIMIT_DEFAULT = 65536;
504+
505+
public static final String DFS_NAMENODE_SNAPSHOT_DELETION_ORDERED =
506+
"dfs.namenode.snapshot.deletion.ordered";
507+
public static final boolean DFS_NAMENODE_SNAPSHOT_DELETION_ORDERED_DEFAULT
508+
= false;
509+
505510
public static final String DFS_NAMENODE_SNAPSHOT_SKIPLIST_SKIP_INTERVAL =
506511
"dfs.namenode.snapshot.skiplist.interval";
507512
public static final int DFS_NAMENODE_SNAPSHOT_SKIPLIST_SKIP_INTERVAL_DEFAULT =

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSnapshotOp.java

Lines changed: 32 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,7 @@
2121
import org.apache.hadoop.fs.InvalidPathException;
2222
import org.apache.hadoop.fs.Path;
2323
import org.apache.hadoop.fs.permission.FsAction;
24+
import org.apache.hadoop.hdfs.DFSConfigKeys;
2425
import org.apache.hadoop.hdfs.DFSUtil;
2526
import org.apache.hadoop.hdfs.protocol.FSLimitException;
2627
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
@@ -249,12 +250,41 @@ static INode.BlocksMapUpdateInfo deleteSnapshot(
249250
fsd.checkOwner(pc, iip);
250251
}
251252

253+
// time of snapshot deletion
254+
final long now = Time.now();
255+
if (fsd.isSnapshotDeletionOrdered()) {
256+
final INodeDirectory srcRoot = snapshotManager.getSnapshottableRoot(iip);
257+
final DirectorySnapshottableFeature snapshottable
258+
= srcRoot.getDirectorySnapshottableFeature();
259+
final Snapshot snapshot = snapshottable.getSnapshotByName(
260+
srcRoot, snapshotName);
261+
262+
// Diffs must be not empty since a snapshot exists in the list
263+
final int earliest = snapshottable.getDiffs().iterator().next()
264+
.getSnapshotId();
265+
if (snapshot.getId() != earliest) {
266+
throw new SnapshotException("Failed to delete snapshot " + snapshotName
267+
+ " from directory " + srcRoot.getFullPathName()
268+
+ ": " + snapshot + " is not the earliest snapshot id=" + earliest
269+
+ " (" + DFSConfigKeys.DFS_NAMENODE_SNAPSHOT_DELETION_ORDERED
270+
+ " is " + fsd.isSnapshotDeletionOrdered() + ")");
271+
}
272+
}
273+
274+
final INode.BlocksMapUpdateInfo collectedBlocks = deleteSnapshot(
275+
fsd, snapshotManager, iip, snapshotName, now);
276+
fsd.getEditLog().logDeleteSnapshot(snapshotRoot, snapshotName,
277+
logRetryCache, now);
278+
return collectedBlocks;
279+
}
280+
281+
static INode.BlocksMapUpdateInfo deleteSnapshot(
282+
FSDirectory fsd, SnapshotManager snapshotManager, INodesInPath iip,
283+
String snapshotName, long now) throws IOException {
252284
INode.BlocksMapUpdateInfo collectedBlocks = new INode.BlocksMapUpdateInfo();
253285
ChunkedArrayList<INode> removedINodes = new ChunkedArrayList<>();
254286
INode.ReclaimContext context = new INode.ReclaimContext(
255287
fsd.getBlockStoragePolicySuite(), collectedBlocks, removedINodes, null);
256-
// time of snapshot deletion
257-
final long now = Time.now();
258288
fsd.writeLock();
259289
try {
260290
snapshotManager.deleteSnapshot(iip, snapshotName, context, now);
@@ -266,9 +296,6 @@ static INode.BlocksMapUpdateInfo deleteSnapshot(
266296
fsd.writeUnlock();
267297
}
268298
removedINodes.clear();
269-
fsd.getEditLog().logDeleteSnapshot(snapshotRoot, snapshotName,
270-
logRetryCache, now);
271-
272299
return collectedBlocks;
273300
}
274301

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java

Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -187,6 +187,7 @@ private static INodeDirectory createRoot(FSNamesystem namesystem) {
187187
private boolean posixAclInheritanceEnabled;
188188
private final boolean xattrsEnabled;
189189
private final int xattrMaxSize;
190+
private final boolean snapshotDeletionOrdered;
190191

191192
// precision of access times.
192193
private final long accessTimePrecision;
@@ -353,6 +354,20 @@ public enum DirOp {
353354
+ " hard limit " + DFSConfigKeys.DFS_NAMENODE_MAX_XATTR_SIZE_HARD_LIMIT
354355
+ ": (%s).", DFSConfigKeys.DFS_NAMENODE_MAX_XATTR_SIZE_KEY);
355356

357+
this.snapshotDeletionOrdered =
358+
conf.getBoolean(DFSConfigKeys.DFS_NAMENODE_SNAPSHOT_DELETION_ORDERED,
359+
DFSConfigKeys.DFS_NAMENODE_SNAPSHOT_DELETION_ORDERED_DEFAULT);
360+
LOG.info("{} = {}", DFSConfigKeys.DFS_NAMENODE_SNAPSHOT_DELETION_ORDERED,
361+
snapshotDeletionOrdered);
362+
if (snapshotDeletionOrdered && !xattrsEnabled) {
363+
throw new HadoopIllegalArgumentException("" +
364+
"XAttrs is required by snapshotDeletionOrdered:"
365+
+ DFSConfigKeys.DFS_NAMENODE_SNAPSHOT_DELETION_ORDERED
366+
+ " is true but "
367+
+ DFSConfigKeys.DFS_NAMENODE_MAX_XATTR_SIZE_KEY
368+
+ " is false.");
369+
}
370+
356371
this.accessTimePrecision = conf.getLong(
357372
DFS_NAMENODE_ACCESSTIME_PRECISION_KEY,
358373
DFS_NAMENODE_ACCESSTIME_PRECISION_DEFAULT);
@@ -610,6 +625,11 @@ boolean isXattrsEnabled() {
610625
return xattrsEnabled;
611626
}
612627
int getXattrMaxSize() { return xattrMaxSize; }
628+
629+
boolean isSnapshotDeletionOrdered() {
630+
return snapshotDeletionOrdered;
631+
}
632+
613633
boolean isAccessTimeSupported() {
614634
return accessTimePrecision > 0;
615635
}

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectorySnapshottableFeature.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -358,7 +358,7 @@ SnapshotDiffListingInfo computeDiff(final INodeDirectory snapshotRootDir,
358358
* @throws SnapshotException If snapshotName is not null or empty, but there
359359
* is no snapshot matching the name.
360360
*/
361-
private Snapshot getSnapshotByName(INodeDirectory snapshotRoot,
361+
public Snapshot getSnapshotByName(INodeDirectory snapshotRoot,
362362
String snapshotName) throws SnapshotException {
363363
Snapshot s = null;
364364
if (snapshotName != null && !snapshotName.isEmpty()) {
Lines changed: 105 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,105 @@
1+
/*
2+
* Licensed to the Apache Software Foundation (ASF) under one
3+
* or more contributor license agreements. See the NOTICE file
4+
* distributed with this work for additional information
5+
* regarding copyright ownership. The ASF licenses this file
6+
* to you under the Apache License, Version 2.0 (the
7+
* "License"); you may not use this file except in compliance
8+
* with the License. You may obtain a copy of the License at
9+
*
10+
* http://www.apache.org/licenses/LICENSE-2.0
11+
*
12+
* Unless required by applicable law or agreed to in writing, software
13+
* distributed under the License is distributed on an "AS IS" BASIS,
14+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15+
* See the License for the specific language governing permissions and
16+
* limitations under the License.
17+
*/
18+
package org.apache.hadoop.hdfs.server.namenode;
19+
20+
import org.apache.hadoop.conf.Configuration;
21+
import org.apache.hadoop.fs.Path;
22+
import org.apache.hadoop.hdfs.DistributedFileSystem;
23+
import org.apache.hadoop.hdfs.MiniDFSCluster;
24+
import org.apache.hadoop.hdfs.protocol.SnapshotException;
25+
import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper;
26+
import org.apache.hadoop.test.GenericTestUtils;
27+
import org.junit.After;
28+
import org.junit.Assert;
29+
import org.junit.Before;
30+
import org.junit.Test;
31+
import org.slf4j.Logger;
32+
import org.slf4j.LoggerFactory;
33+
import org.slf4j.event.Level;
34+
35+
import java.io.IOException;
36+
37+
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SNAPSHOT_DELETION_ORDERED;
38+
39+
/** Test ordered snapshot deletion. */
40+
public class TestOrderedSnapshotDeletion {
41+
static final Logger LOG = LoggerFactory.getLogger(FSDirectory.class);
42+
43+
{
44+
SnapshotTestHelper.disableLogs();
45+
GenericTestUtils.setLogLevel(INode.LOG, Level.TRACE);
46+
}
47+
48+
private final Path snapshottableDir
49+
= new Path("/" + getClass().getSimpleName());
50+
51+
private MiniDFSCluster cluster;
52+
53+
@Before
54+
public void setUp() throws Exception {
55+
final Configuration conf = new Configuration();
56+
conf.setBoolean(DFS_NAMENODE_SNAPSHOT_DELETION_ORDERED, true);
57+
58+
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
59+
cluster.waitActive();
60+
}
61+
62+
@After
63+
public void tearDown() throws Exception {
64+
if (cluster != null) {
65+
cluster.shutdown();
66+
cluster = null;
67+
}
68+
}
69+
70+
@Test (timeout=60000)
71+
public void testConf() throws Exception {
72+
DistributedFileSystem hdfs = cluster.getFileSystem();
73+
hdfs.mkdirs(snapshottableDir);
74+
hdfs.allowSnapshot(snapshottableDir);
75+
76+
final Path sub0 = new Path(snapshottableDir, "sub0");
77+
hdfs.mkdirs(sub0);
78+
hdfs.createSnapshot(snapshottableDir, "s0");
79+
80+
final Path sub1 = new Path(snapshottableDir, "sub1");
81+
hdfs.mkdirs(sub1);
82+
hdfs.createSnapshot(snapshottableDir, "s1");
83+
84+
final Path sub2 = new Path(snapshottableDir, "sub2");
85+
hdfs.mkdirs(sub2);
86+
hdfs.createSnapshot(snapshottableDir, "s2");
87+
88+
assertDeletionDenied(snapshottableDir, "s1", hdfs);
89+
assertDeletionDenied(snapshottableDir, "s2", hdfs);
90+
hdfs.deleteSnapshot(snapshottableDir, "s0");
91+
assertDeletionDenied(snapshottableDir, "s2", hdfs);
92+
hdfs.deleteSnapshot(snapshottableDir, "s1");
93+
hdfs.deleteSnapshot(snapshottableDir, "s2");
94+
}
95+
96+
static void assertDeletionDenied(Path snapshottableDir, String snapshot,
97+
DistributedFileSystem hdfs) throws IOException {
98+
try {
99+
hdfs.deleteSnapshot(snapshottableDir, snapshot);
100+
Assert.fail("deleting " +snapshot + " should fail");
101+
} catch (SnapshotException se) {
102+
LOG.info("Good, it is expected to have " + se);
103+
}
104+
}
105+
}

0 commit comments

Comments
 (0)