* Snapshots are considered unique based on the name of the snapshot. Attempts to take a - * snapshot with the same name (even a different type or with different parameters) will fail with - * a {@link SnapshotCreationException} indicating the duplicate naming. + * snapshot with the same name (even a different type or with different parameters) will fail + * with a {@link SnapshotCreationException} indicating the duplicate naming. *
* Snapshot names follow the same naming constraints as tables in HBase. * @param snapshotName name of the snapshot to be created @@ -3627,6 +3627,30 @@ public void snapshot(final byte[] snapshotName, final byte[] tableName, IOException, SnapshotCreationException, IllegalArgumentException { snapshot(Bytes.toString(snapshotName), Bytes.toString(tableName), flushType); } + + /** + * Create snapshot for the given table of given flush type. + *
+ * Snapshots are considered unique based on the name of the snapshot. Attempts to take a + * snapshot with the same name (even a different type or with different parameters) will fail + * with a {@link SnapshotCreationException} indicating the duplicate naming. + *
+ * Snapshot names follow the same naming constraints as tables in HBase.
+ * @param snapshotName name of the snapshot to be created
+ * @param tableName name of the table for which snapshot is created
+ * @param flushType if the snapshot should be taken without flush memstore first
+ * @param snapshotProps snapshot parameters
+ * @throws IOException if a remote or network exception occurs
+ * @throws SnapshotCreationException if snapshot creation failed
+ * @throws IllegalArgumentException if the snapshot request is formatted incorrectly
+ */
+ public void snapshot(final byte[] snapshotName, final byte[] tableName,
+ final SnapshotDescription.Type flushType, Mapoptional .hbase.pb.UsersAndPermissions users_and_permissions = 7;
*/
org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.UsersAndPermissionsOrBuilder getUsersAndPermissionsOrBuilder();
+
+ // optional int64 ttl = 8 [default = 0];
+ /**
+ * optional int64 ttl = 8 [default = 0];
+ */
+ boolean hasTtl();
+ /**
+ * optional int64 ttl = 8 [default = 0];
+ */
+ long getTtl();
}
/**
* Protobuf type {@code hbase.pb.SnapshotDescription}
@@ -17311,6 +17321,11 @@ private SnapshotDescription(
bitField0_ |= 0x00000040;
break;
}
+ case 64: {
+ bitField0_ |= 0x00000080;
+ ttl_ = input.readInt64();
+ break;
+ }
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
@@ -17653,6 +17668,22 @@ public org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.UsersAndPe
return usersAndPermissions_;
}
+ // optional int64 ttl = 8 [default = 0];
+ public static final int TTL_FIELD_NUMBER = 8;
+ private long ttl_;
+ /**
+ * optional int64 ttl = 8 [default = 0];
+ */
+ public boolean hasTtl() {
+ return ((bitField0_ & 0x00000080) == 0x00000080);
+ }
+ /**
+ * optional int64 ttl = 8 [default = 0];
+ */
+ public long getTtl() {
+ return ttl_;
+ }
+
private void initFields() {
name_ = "";
table_ = "";
@@ -17661,6 +17692,7 @@ private void initFields() {
version_ = 0;
owner_ = "";
usersAndPermissions_ = org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.UsersAndPermissions.getDefaultInstance();
+ ttl_ = 0L;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
@@ -17705,6 +17737,9 @@ public void writeTo(com.google.protobuf.CodedOutputStream output)
if (((bitField0_ & 0x00000040) == 0x00000040)) {
output.writeMessage(7, usersAndPermissions_);
}
+ if (((bitField0_ & 0x00000080) == 0x00000080)) {
+ output.writeInt64(8, ttl_);
+ }
getUnknownFields().writeTo(output);
}
@@ -17742,6 +17777,10 @@ public int getSerializedSize() {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(7, usersAndPermissions_);
}
+ if (((bitField0_ & 0x00000080) == 0x00000080)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeInt64Size(8, ttl_);
+ }
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
@@ -17800,6 +17839,11 @@ public boolean equals(final java.lang.Object obj) {
result = result && getUsersAndPermissions()
.equals(other.getUsersAndPermissions());
}
+ result = result && (hasTtl() == other.hasTtl());
+ if (hasTtl()) {
+ result = result && (getTtl()
+ == other.getTtl());
+ }
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
@@ -17841,6 +17885,10 @@ public int hashCode() {
hash = (37 * hash) + USERS_AND_PERMISSIONS_FIELD_NUMBER;
hash = (53 * hash) + getUsersAndPermissions().hashCode();
}
+ if (hasTtl()) {
+ hash = (37 * hash) + TTL_FIELD_NUMBER;
+ hash = (53 * hash) + hashLong(getTtl());
+ }
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
@@ -17974,6 +18022,8 @@ public Builder clear() {
usersAndPermissionsBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000040);
+ ttl_ = 0L;
+ bitField0_ = (bitField0_ & ~0x00000080);
return this;
}
@@ -18034,6 +18084,10 @@ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescriptio
} else {
result.usersAndPermissions_ = usersAndPermissionsBuilder_.build();
}
+ if (((from_bitField0_ & 0x00000080) == 0x00000080)) {
+ to_bitField0_ |= 0x00000080;
+ }
+ result.ttl_ = ttl_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
@@ -18077,6 +18131,9 @@ public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.
if (other.hasUsersAndPermissions()) {
mergeUsersAndPermissions(other.getUsersAndPermissions());
}
+ if (other.hasTtl()) {
+ setTtl(other.getTtl());
+ }
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
@@ -18579,6 +18636,39 @@ public org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.UsersAndPe
return usersAndPermissionsBuilder_;
}
+ // optional int64 ttl = 8 [default = 0];
+ private long ttl_ ;
+ /**
+ * optional int64 ttl = 8 [default = 0];
+ */
+ public boolean hasTtl() {
+ return ((bitField0_ & 0x00000080) == 0x00000080);
+ }
+ /**
+ * optional int64 ttl = 8 [default = 0];
+ */
+ public long getTtl() {
+ return ttl_;
+ }
+ /**
+ * optional int64 ttl = 8 [default = 0];
+ */
+ public Builder setTtl(long value) {
+ bitField0_ |= 0x00000080;
+ ttl_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * optional int64 ttl = 8 [default = 0];
+ */
+ public Builder clearTtl() {
+ bitField0_ = (bitField0_ & ~0x00000080);
+ ttl_ = 0L;
+ onChanged();
+ return this;
+ }
+
// @@protoc_insertion_point(builder_scope:hbase.pb.SnapshotDescription)
}
@@ -18760,21 +18850,21 @@ public org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.UsersAndPe
"rsion_major\030\007 \001(\r\022\025\n\rversion_minor\030\010 \001(\r" +
"\"Q\n\020RegionServerInfo\022\020\n\010infoPort\030\001 \001(\005\022+" +
"\n\014version_info\030\002 \001(\0132\025.hbase.pb.VersionI" +
- "nfo\"\223\002\n\023SnapshotDescription\022\014\n\004name\030\001 \002(" +
+ "nfo\"\243\002\n\023SnapshotDescription\022\014\n\004name\030\001 \002(" +
"\t\022\r\n\005table\030\002 \001(\t\022\030\n\rcreation_time\030\003 \001(\003:" +
"\0010\0227\n\004type\030\004 \001(\0162\".hbase.pb.SnapshotDesc" +
"ription.Type:\005FLUSH\022\017\n\007version\030\005 \001(\005\022\r\n\005",
"owner\030\006 \001(\t\022<\n\025users_and_permissions\030\007 \001" +
- "(\0132\035.hbase.pb.UsersAndPermissions\".\n\004Typ" +
- "e\022\014\n\010DISABLED\020\000\022\t\n\005FLUSH\020\001\022\r\n\tSKIPFLUSH\020" +
- "\002*r\n\013CompareType\022\010\n\004LESS\020\000\022\021\n\rLESS_OR_EQ" +
- "UAL\020\001\022\t\n\005EQUAL\020\002\022\r\n\tNOT_EQUAL\020\003\022\024\n\020GREAT" +
- "ER_OR_EQUAL\020\004\022\013\n\007GREATER\020\005\022\t\n\005NO_OP\020\006*n\n" +
- "\010TimeUnit\022\017\n\013NANOSECONDS\020\001\022\020\n\014MICROSECON" +
- "DS\020\002\022\020\n\014MILLISECONDS\020\003\022\013\n\007SECONDS\020\004\022\013\n\007M" +
- "INUTES\020\005\022\t\n\005HOURS\020\006\022\010\n\004DAYS\020\007B>\n*org.apa" +
- "che.hadoop.hbase.protobuf.generatedB\013HBa",
- "seProtosH\001\240\001\001"
+ "(\0132\035.hbase.pb.UsersAndPermissions\022\016\n\003ttl" +
+ "\030\010 \001(\003:\0010\".\n\004Type\022\014\n\010DISABLED\020\000\022\t\n\005FLUSH" +
+ "\020\001\022\r\n\tSKIPFLUSH\020\002*r\n\013CompareType\022\010\n\004LESS" +
+ "\020\000\022\021\n\rLESS_OR_EQUAL\020\001\022\t\n\005EQUAL\020\002\022\r\n\tNOT_" +
+ "EQUAL\020\003\022\024\n\020GREATER_OR_EQUAL\020\004\022\013\n\007GREATER" +
+ "\020\005\022\t\n\005NO_OP\020\006*n\n\010TimeUnit\022\017\n\013NANOSECONDS" +
+ "\020\001\022\020\n\014MICROSECONDS\020\002\022\020\n\014MILLISECONDS\020\003\022\013" +
+ "\n\007SECONDS\020\004\022\013\n\007MINUTES\020\005\022\t\n\005HOURS\020\006\022\010\n\004D" +
+ "AYS\020\007B>\n*org.apache.hadoop.hbase.protobu",
+ "f.generatedB\013HBaseProtosH\001\240\001\001"
};
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@@ -18918,7 +19008,7 @@ public com.google.protobuf.ExtensionRegistry assignDescriptors(
internal_static_hbase_pb_SnapshotDescription_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_SnapshotDescription_descriptor,
- new java.lang.String[] { "Name", "Table", "CreationTime", "Type", "Version", "Owner", "UsersAndPermissions", });
+ new java.lang.String[] { "Name", "Table", "CreationTime", "Type", "Version", "Owner", "UsersAndPermissions", "Ttl", });
return null;
}
};
diff --git a/hbase-protocol/src/main/protobuf/HBase.proto b/hbase-protocol/src/main/protobuf/HBase.proto
index 67645d4c9f40..44b722de8536 100644
--- a/hbase-protocol/src/main/protobuf/HBase.proto
+++ b/hbase-protocol/src/main/protobuf/HBase.proto
@@ -232,4 +232,5 @@ message SnapshotDescription {
optional int32 version = 5;
optional string owner = 6;
optional UsersAndPermissions users_and_permissions = 7;
+ optional int64 ttl = 8 [default = 0];
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index ab0865d8f253..243f448860b0 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -104,6 +104,7 @@
import org.apache.hadoop.hbase.master.cleaner.ReplicationZKLockCleanerChore;
import org.apache.hadoop.hbase.master.cleaner.ReplicationZKNodeCleaner;
import org.apache.hadoop.hbase.master.cleaner.ReplicationZKNodeCleanerChore;
+import org.apache.hadoop.hbase.master.cleaner.SnapshotCleanerChore;
import org.apache.hadoop.hbase.master.handler.DispatchMergingRegionHandler;
import org.apache.hadoop.hbase.master.normalizer.RegionNormalizer;
import org.apache.hadoop.hbase.master.normalizer.RegionNormalizerChore;
@@ -329,6 +330,7 @@ public void run() {
private ClusterStatusChore clusterStatusChore;
private ClusterStatusPublisher clusterStatusPublisherChore = null;
private PeriodicDoMetrics periodicDoMetricsChore = null;
+ private SnapshotCleanerChore snapshotCleanerChore = null;
CatalogJanitor catalogJanitorChore;
private ReplicationZKLockCleanerChore replicationZKLockCleanerChore;
@@ -1248,6 +1250,18 @@ this, conf, getMasterFileSystem().getOldLogDir().getFileSystem(conf),
this.hfileCleaner = new HFileCleaner(cleanerInterval, this, conf, getMasterFileSystem()
.getFileSystem(), archiveDir, params);
getChoreService().scheduleChore(hfileCleaner);
+
+ final boolean isSnapshotChoreDisabled = conf.getBoolean(HConstants.SNAPSHOT_CLEANER_DISABLE,
+ false);
+ if (isSnapshotChoreDisabled) {
+ if (LOG.isTraceEnabled()) {
+ LOG.trace("Snapshot Cleaner Chore is disabled. Not starting up the chore..");
+ }
+ } else {
+ this.snapshotCleanerChore = new SnapshotCleanerChore(this, conf, getSnapshotManager());
+ getChoreService().scheduleChore(this.snapshotCleanerChore);
+ }
+
serviceStarted = true;
if (LOG.isTraceEnabled()) {
LOG.trace("Started service threads");
@@ -1351,6 +1365,7 @@ private void stopChores() {
choreService.cancelChore(this.hfileCleaner);
choreService.cancelChore(this.replicationZKLockCleanerChore);
choreService.cancelChore(this.replicationZKNodeCleanerChore);
+ choreService.cancelChore(this.snapshotCleanerChore);
}
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/SnapshotCleanerChore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/SnapshotCleanerChore.java
new file mode 100644
index 000000000000..214ab620df5e
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/SnapshotCleanerChore.java
@@ -0,0 +1,107 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.master.cleaner;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.concurrent.TimeUnit;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.ScheduledChore;
+import org.apache.hadoop.hbase.Stoppable;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+
+/**
+ * This chore, every time it runs, will try to delete snapshots that are expired based on TTL in
+ * seconds configured for each Snapshot
+ */
+@InterfaceAudience.Private
+public class SnapshotCleanerChore extends ScheduledChore {
+
+ private static final Log LOG = LogFactory.getLog(SnapshotCleanerChore.class);
+ private static final String SNAPSHOT_CLEANER_CHORE_NAME = "SnapshotCleaner";
+ private static final String SNAPSHOT_CLEANER_INTERVAL = "hbase.master.cleaner.snapshot.interval";
+ private static final int SNAPSHOT_CLEANER_DEFAULT_INTERVAL = 1800 * 1000; // Default 30 min
+ private static final String DELETE_SNAPSHOT_EVENT =
+ "Eligible Snapshot for cleanup due to expired TTL.";
+
+ private final SnapshotManager snapshotManager;
+
+ /**
+ * Construct Snapshot Cleaner Chore with parameterized constructor
+ *
+ * @param stopper When {@link Stoppable#isStopped()} is true, this chore will cancel and cleanup
+ * @param configuration The configuration to set
+ * @param snapshotManager SnapshotManager instance to manage lifecycle of snapshot
+ */
+ public SnapshotCleanerChore(Stoppable stopper, Configuration configuration,
+ SnapshotManager snapshotManager) {
+ super(SNAPSHOT_CLEANER_CHORE_NAME, stopper, configuration.getInt(SNAPSHOT_CLEANER_INTERVAL,
+ SNAPSHOT_CLEANER_DEFAULT_INTERVAL));
+ this.snapshotManager = snapshotManager;
+ }
+
+ @Override
+ protected void chore() {
+ if (LOG.isTraceEnabled()) {
+ LOG.trace("Snapshot Cleaner Chore is starting up...");
+ }
+ try {
+ List
@@ -134,6 +135,13 @@
<% } %>
Table
Creation Time
+ Time To Live(Sec)
Type
Format Version
State
@@ -143,6 +146,13 @@
<% } %>
<%= new Date(snapshot.getCreationTime()) %>
+
+ <% if (snapshotTtl == 0) { %>
+ FOREVER
+ <% } else { %>
+ <%= snapshotTtl %>
+ <% } %>
+
<%= snapshot.getType() %>
<%= snapshot.getVersion() %>
<% if (stats.isSnapshotCorrupted()) { %>
diff --git a/hbase-server/src/main/resources/hbase-webapps/master/snapshotsStats.jsp b/hbase-server/src/main/resources/hbase-webapps/master/snapshotsStats.jsp
index 7df4708f1420..6259fb5623d9 100644
--- a/hbase-server/src/main/resources/hbase-webapps/master/snapshotsStats.jsp
+++ b/hbase-server/src/main/resources/hbase-webapps/master/snapshotsStats.jsp
@@ -110,6 +110,7 @@
Snapshot Name
Table
Creation Time
+ TTL(Sec)
Shared Storefile Size
Archived Storefile Size
<%= new Date(snapshotDesc.getCreationTime()) %>
+
+ <% if (snapshotDesc.getTtl() == 0) { %>
+ FOREVER
+ <% } else { %>
+ <%= snapshotDesc.getTtl() %>
+ <% } %>
+
<%= StringUtils.humanReadableInt(stats.getSharedStoreFilesSize()) %>
<%= StringUtils.humanReadableInt(stats.getArchivedStoreFileSize()) %>
(<%= StringUtils.humanReadableInt(stats.getNonSharedArchivedStoreFilesSize()) %>)
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestSnapshotCleanerChore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestSnapshotCleanerChore.java
new file mode 100644
index 000000000000..720387ee7b88
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestSnapshotCleanerChore.java
@@ -0,0 +1,194 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.master.cleaner;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.Stoppable;
+import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
+import org.apache.hadoop.hbase.testclassification.MasterTests;
+import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.mockito.Mockito;
+
+/**
+ * Tests for SnapshotsCleanerChore
+ */
+@Category({MasterTests.class, SmallTests.class})
+public class TestSnapshotCleanerChore {
+
+ private static final Log LOG = LogFactory.getLog(TestSnapshotCleanerChore.class);
+
+ private static final HBaseTestingUtility HBASE_TESTING_UTILITY = new HBaseTestingUtility();
+
+ private SnapshotManager snapshotManager;
+
+ private Configuration getSnapshotCleanerConf() {
+ Configuration conf = HBASE_TESTING_UTILITY.getConfiguration();
+ conf.setInt("hbase.master.cleaner.snapshot.interval", 100);
+ return conf;
+ }
+
+
+ @Test
+ public void testSnapshotCleanerWithoutAnyCompletedSnapshot() throws IOException {
+ snapshotManager = Mockito.mock(SnapshotManager.class);
+ Stoppable stopper = new StoppableImplementation();
+ Configuration conf = getSnapshotCleanerConf();
+ SnapshotCleanerChore snapshotCleanerChore =
+ new SnapshotCleanerChore(stopper, conf, snapshotManager);
+ try {
+ snapshotCleanerChore.chore();
+ } finally {
+ stopper.stop("Stopping Test Stopper");
+ }
+ Mockito.verify(snapshotManager, Mockito.times(0))
+ .deleteSnapshot(Mockito.any(SnapshotDescription.class));
+ }
+
+ @Test
+ public void testSnapshotCleanerWithNoTtlExpired() throws IOException {
+ snapshotManager = Mockito.mock(SnapshotManager.class);
+ Stoppable stopper = new StoppableImplementation();
+ Configuration conf = getSnapshotCleanerConf();
+ SnapshotCleanerChore snapshotCleanerChore =
+ new SnapshotCleanerChore(stopper, conf, snapshotManager);
+ List