diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java index 1485981e4070..07db5b7dfbcf 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java @@ -1183,17 +1183,38 @@ void snapshot(final String snapshotName, IllegalArgumentException; /** - * Take a snapshot and wait for the server to complete that snapshot (blocking). Only a single - * snapshot should be taken at a time for an instance of HBase, or results may be undefined (you - * can tell multiple HBase clusters to snapshot at the same time, but only one at a time for a - * single cluster). Snapshots are considered unique based on the name of the snapshot. - * Attempts to take a snapshot with the same name (even a different type or with different - * parameters) will fail with a {@link SnapshotCreationException} indicating the duplicate naming. - * Snapshot names follow the same naming constraints as tables in HBase. See {@link - * org.apache.hadoop.hbase.TableName#isLegalFullyQualifiedTableName(byte[])}. You should probably - * use {@link #snapshot(String, org.apache.hadoop.hbase.TableName)} or - * {@link #snapshot(byte[], org.apache.hadoop.hbase.TableName)} unless you are sure about the type - * of snapshot that you want to take. + * Create typed snapshot of the table. Snapshots are considered unique based on the name of the + * snapshot. Snapshots are taken sequentially even when requested concurrently, across + * all tables. Attempts to take a snapshot with the same name (even a different type or with + * different parameters) will fail with a {@link SnapshotCreationException} indicating the + * duplicate naming. Snapshot names follow the same naming constraints as tables in HBase. See + * {@link org.apache.hadoop.hbase.TableName#isLegalFullyQualifiedTableName(byte[])}. + * Snapshot can live with ttl seconds. + * + * @param snapshotName name to give the snapshot on the filesystem. Must be unique from all + * other snapshots stored on the cluster + * @param tableName name of the table to snapshot + * @param type type of snapshot to take + * @param snapshotProps snapshot additional properties e.g. TTL + * @throws IOException we fail to reach the master + * @throws SnapshotCreationException if snapshot creation failed + * @throws IllegalArgumentException if the snapshot request is formatted incorrectly + */ + void snapshot(String snapshotName, TableName tableName, SnapshotDescription.Type type, + Map snapshotProps) + throws IOException, SnapshotCreationException, IllegalArgumentException; + + /** + * Take a snapshot and wait for the server to complete that snapshot (blocking). Snapshots are + * considered unique based on the name of the snapshot. Snapshots are taken sequentially + * even when requested concurrently, across all tables. Attempts to take a snapshot with the + * same name (even a different type or with different parameters) will fail with a + * {@link SnapshotCreationException} indicating the duplicate naming. Snapshot names follow the + * same naming constraints as tables in HBase. See + * {@link org.apache.hadoop.hbase.TableName#isLegalFullyQualifiedTableName(byte[])}. You should + * probably use {@link #snapshot(String, org.apache.hadoop.hbase.TableName)} or + * {@link #snapshot(byte[], org.apache.hadoop.hbase.TableName)} unless you are sure about the + * type of snapshot that you want to take. * * @param snapshot snapshot to take * @throws IOException or we lose contact with the master. diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java index 499a8dc21d97..1b237f33d4de 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java @@ -38,7 +38,7 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; import java.util.regex.Pattern; - +import org.apache.commons.collections.MapUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; @@ -3611,8 +3611,8 @@ public void snapshot(final String snapshotName, * Create snapshot for the given table of given flush type. *

* Snapshots are considered unique based on the name of the snapshot. Attempts to take a - * snapshot with the same name (even a different type or with different parameters) will fail with - * a {@link SnapshotCreationException} indicating the duplicate naming. + * snapshot with the same name (even a different type or with different parameters) will fail + * with a {@link SnapshotCreationException} indicating the duplicate naming. *

* Snapshot names follow the same naming constraints as tables in HBase. * @param snapshotName name of the snapshot to be created @@ -3627,6 +3627,30 @@ public void snapshot(final byte[] snapshotName, final byte[] tableName, IOException, SnapshotCreationException, IllegalArgumentException { snapshot(Bytes.toString(snapshotName), Bytes.toString(tableName), flushType); } + + /** + * Create snapshot for the given table of given flush type. + *

+ * Snapshots are considered unique based on the name of the snapshot. Attempts to take a + * snapshot with the same name (even a different type or with different parameters) will fail + * with a {@link SnapshotCreationException} indicating the duplicate naming. + *

+ * Snapshot names follow the same naming constraints as tables in HBase. + * @param snapshotName name of the snapshot to be created + * @param tableName name of the table for which snapshot is created + * @param flushType if the snapshot should be taken without flush memstore first + * @param snapshotProps snapshot parameters + * @throws IOException if a remote or network exception occurs + * @throws SnapshotCreationException if snapshot creation failed + * @throws IllegalArgumentException if the snapshot request is formatted incorrectly + */ + public void snapshot(final byte[] snapshotName, final byte[] tableName, + final SnapshotDescription.Type flushType, Map snapshotProps) + throws IOException, SnapshotCreationException, IllegalArgumentException { + snapshot(Bytes.toString(snapshotName), TableName.valueOf(tableName), flushType, + snapshotProps); + } + /** public void snapshot(final String snapshotName, * Create a timestamp consistent snapshot for the given table. @@ -3671,34 +3695,46 @@ public void snapshot(final byte[] snapshotName, * snapshots stored on the cluster * @param tableName name of the table to snapshot * @param type type of snapshot to take + * @param snapshotProps snapshot parameters * @throws IOException we fail to reach the master * @throws SnapshotCreationException if snapshot creation failed * @throws IllegalArgumentException if the snapshot request is formatted incorrectly */ @Override - public void snapshot(final String snapshotName, - final TableName tableName, - SnapshotDescription.Type type) throws IOException, SnapshotCreationException, - IllegalArgumentException { + public void snapshot(final String snapshotName, final TableName tableName, + SnapshotDescription.Type type, Map snapshotProps) + throws IOException, SnapshotCreationException, IllegalArgumentException { SnapshotDescription.Builder builder = SnapshotDescription.newBuilder(); builder.setTable(tableName.getNameAsString()); builder.setName(snapshotName); builder.setType(type); + builder.setTtl(getTtlFromSnapshotProps(snapshotProps)); snapshot(builder.build()); } + private long getTtlFromSnapshotProps(Map snapshotProps) { + return MapUtils.getLongValue(snapshotProps, "TTL", -1); + } + + public void snapshot(final String snapshotName, + final TableName tableName, + SnapshotDescription.Type type) throws IOException, SnapshotCreationException, + IllegalArgumentException { + snapshot(snapshotName, tableName, type, null); + } + public void snapshot(final String snapshotName, final String tableName, SnapshotDescription.Type type) throws IOException, SnapshotCreationException, IllegalArgumentException { - snapshot(snapshotName, TableName.valueOf(tableName), type); + snapshot(snapshotName, TableName.valueOf(tableName), type, null); } public void snapshot(final String snapshotName, final byte[] tableName, SnapshotDescription.Type type) throws IOException, SnapshotCreationException, IllegalArgumentException { - snapshot(snapshotName, TableName.valueOf(tableName), type); + snapshot(snapshotName, TableName.valueOf(tableName), type, null); } /** diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/ClientSnapshotDescriptionUtils.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/ClientSnapshotDescriptionUtils.java index bba166b8358b..0b2d435ff93c 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/ClientSnapshotDescriptionUtils.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/ClientSnapshotDescriptionUtils.java @@ -61,8 +61,15 @@ public static String toString(SnapshotDescription ssd) { if (ssd == null) { return null; } - return "{ ss=" + ssd.getName() + - " table=" + (ssd.hasTable()?TableName.valueOf(ssd.getTable()):"") + - " type=" + ssd.getType() + " }"; + return new StringBuilder("{ ss=") + .append(ssd.getName()) + .append(" table=") + .append(ssd.hasTable() ? TableName.valueOf(ssd.getTable()) : "") + .append(" type=") + .append(ssd.getType()) + .append(" ttl=") + .append(ssd.getTtl()) + .append(" }") + .toString(); } } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java index 99c2cc0dcff9..5b7ae8acc483 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java @@ -1331,6 +1331,15 @@ public static enum Modify { + System.getProperty("user.name") + "/hbase-staging"; public static final String DEFAULT_LOSSY_COUNTING_ERROR_RATE = "hbase.util.default.lossycounting.errorrate"; + + // Default TTL - FOREVER + public static final long DEFAULT_SNAPSHOT_TTL = 0; + + // User defined Default TTL config key + public static final String DEFAULT_SNAPSHOT_TTL_CONFIG_KEY = "hbase.master.snapshot.ttl"; + + public static final String SNAPSHOT_CLEANER_DISABLE = "hbase.master.cleaner.snapshot.disable"; + private HConstants() { // Can't be instantiated with this ctor. } diff --git a/hbase-common/src/main/resources/hbase-default.xml b/hbase-common/src/main/resources/hbase-default.xml index 6835d841b232..3f409d7c60ca 100644 --- a/hbase-common/src/main/resources/hbase-default.xml +++ b/hbase-common/src/main/resources/hbase-default.xml @@ -1624,4 +1624,23 @@ possible configurations would overwhelm and obscure the important. Number of rows in a batch operation above which a warning will be logged. + + hbase.master.cleaner.snapshot.interval + 1800000 + + Snapshot Cleanup chore interval in milliseconds. + The cleanup thread keeps running at this interval + to find all snapshots that are expired based on TTL + and delete them. + + + + hbase.master.snapshot.ttl + 0 + + Default Snapshot TTL to be considered when the user does not specify TTL while + creating snapshot. Default value 0 indicates FOREVER - snapshot should not be + automatically deleted until it is manually deleted + + diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/HBaseProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/HBaseProtos.java index b4c6c04c6096..d5a7150efa55 100644 --- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/HBaseProtos.java +++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/HBaseProtos.java @@ -17205,6 +17205,16 @@ public interface SnapshotDescriptionOrBuilder * optional .hbase.pb.UsersAndPermissions users_and_permissions = 7; */ org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.UsersAndPermissionsOrBuilder getUsersAndPermissionsOrBuilder(); + + // optional int64 ttl = 8 [default = 0]; + /** + * optional int64 ttl = 8 [default = 0]; + */ + boolean hasTtl(); + /** + * optional int64 ttl = 8 [default = 0]; + */ + long getTtl(); } /** * Protobuf type {@code hbase.pb.SnapshotDescription} @@ -17311,6 +17321,11 @@ private SnapshotDescription( bitField0_ |= 0x00000040; break; } + case 64: { + bitField0_ |= 0x00000080; + ttl_ = input.readInt64(); + break; + } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { @@ -17653,6 +17668,22 @@ public org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.UsersAndPe return usersAndPermissions_; } + // optional int64 ttl = 8 [default = 0]; + public static final int TTL_FIELD_NUMBER = 8; + private long ttl_; + /** + * optional int64 ttl = 8 [default = 0]; + */ + public boolean hasTtl() { + return ((bitField0_ & 0x00000080) == 0x00000080); + } + /** + * optional int64 ttl = 8 [default = 0]; + */ + public long getTtl() { + return ttl_; + } + private void initFields() { name_ = ""; table_ = ""; @@ -17661,6 +17692,7 @@ private void initFields() { version_ = 0; owner_ = ""; usersAndPermissions_ = org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.UsersAndPermissions.getDefaultInstance(); + ttl_ = 0L; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { @@ -17705,6 +17737,9 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) if (((bitField0_ & 0x00000040) == 0x00000040)) { output.writeMessage(7, usersAndPermissions_); } + if (((bitField0_ & 0x00000080) == 0x00000080)) { + output.writeInt64(8, ttl_); + } getUnknownFields().writeTo(output); } @@ -17742,6 +17777,10 @@ public int getSerializedSize() { size += com.google.protobuf.CodedOutputStream .computeMessageSize(7, usersAndPermissions_); } + if (((bitField0_ & 0x00000080) == 0x00000080)) { + size += com.google.protobuf.CodedOutputStream + .computeInt64Size(8, ttl_); + } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; @@ -17800,6 +17839,11 @@ public boolean equals(final java.lang.Object obj) { result = result && getUsersAndPermissions() .equals(other.getUsersAndPermissions()); } + result = result && (hasTtl() == other.hasTtl()); + if (hasTtl()) { + result = result && (getTtl() + == other.getTtl()); + } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; @@ -17841,6 +17885,10 @@ public int hashCode() { hash = (37 * hash) + USERS_AND_PERMISSIONS_FIELD_NUMBER; hash = (53 * hash) + getUsersAndPermissions().hashCode(); } + if (hasTtl()) { + hash = (37 * hash) + TTL_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getTtl()); + } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; @@ -17974,6 +18022,8 @@ public Builder clear() { usersAndPermissionsBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000040); + ttl_ = 0L; + bitField0_ = (bitField0_ & ~0x00000080); return this; } @@ -18034,6 +18084,10 @@ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescriptio } else { result.usersAndPermissions_ = usersAndPermissionsBuilder_.build(); } + if (((from_bitField0_ & 0x00000080) == 0x00000080)) { + to_bitField0_ |= 0x00000080; + } + result.ttl_ = ttl_; result.bitField0_ = to_bitField0_; onBuilt(); return result; @@ -18077,6 +18131,9 @@ public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos. if (other.hasUsersAndPermissions()) { mergeUsersAndPermissions(other.getUsersAndPermissions()); } + if (other.hasTtl()) { + setTtl(other.getTtl()); + } this.mergeUnknownFields(other.getUnknownFields()); return this; } @@ -18579,6 +18636,39 @@ public org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.UsersAndPe return usersAndPermissionsBuilder_; } + // optional int64 ttl = 8 [default = 0]; + private long ttl_ ; + /** + * optional int64 ttl = 8 [default = 0]; + */ + public boolean hasTtl() { + return ((bitField0_ & 0x00000080) == 0x00000080); + } + /** + * optional int64 ttl = 8 [default = 0]; + */ + public long getTtl() { + return ttl_; + } + /** + * optional int64 ttl = 8 [default = 0]; + */ + public Builder setTtl(long value) { + bitField0_ |= 0x00000080; + ttl_ = value; + onChanged(); + return this; + } + /** + * optional int64 ttl = 8 [default = 0]; + */ + public Builder clearTtl() { + bitField0_ = (bitField0_ & ~0x00000080); + ttl_ = 0L; + onChanged(); + return this; + } + // @@protoc_insertion_point(builder_scope:hbase.pb.SnapshotDescription) } @@ -18760,21 +18850,21 @@ public org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.UsersAndPe "rsion_major\030\007 \001(\r\022\025\n\rversion_minor\030\010 \001(\r" + "\"Q\n\020RegionServerInfo\022\020\n\010infoPort\030\001 \001(\005\022+" + "\n\014version_info\030\002 \001(\0132\025.hbase.pb.VersionI" + - "nfo\"\223\002\n\023SnapshotDescription\022\014\n\004name\030\001 \002(" + + "nfo\"\243\002\n\023SnapshotDescription\022\014\n\004name\030\001 \002(" + "\t\022\r\n\005table\030\002 \001(\t\022\030\n\rcreation_time\030\003 \001(\003:" + "\0010\0227\n\004type\030\004 \001(\0162\".hbase.pb.SnapshotDesc" + "ription.Type:\005FLUSH\022\017\n\007version\030\005 \001(\005\022\r\n\005", "owner\030\006 \001(\t\022<\n\025users_and_permissions\030\007 \001" + - "(\0132\035.hbase.pb.UsersAndPermissions\".\n\004Typ" + - "e\022\014\n\010DISABLED\020\000\022\t\n\005FLUSH\020\001\022\r\n\tSKIPFLUSH\020" + - "\002*r\n\013CompareType\022\010\n\004LESS\020\000\022\021\n\rLESS_OR_EQ" + - "UAL\020\001\022\t\n\005EQUAL\020\002\022\r\n\tNOT_EQUAL\020\003\022\024\n\020GREAT" + - "ER_OR_EQUAL\020\004\022\013\n\007GREATER\020\005\022\t\n\005NO_OP\020\006*n\n" + - "\010TimeUnit\022\017\n\013NANOSECONDS\020\001\022\020\n\014MICROSECON" + - "DS\020\002\022\020\n\014MILLISECONDS\020\003\022\013\n\007SECONDS\020\004\022\013\n\007M" + - "INUTES\020\005\022\t\n\005HOURS\020\006\022\010\n\004DAYS\020\007B>\n*org.apa" + - "che.hadoop.hbase.protobuf.generatedB\013HBa", - "seProtosH\001\240\001\001" + "(\0132\035.hbase.pb.UsersAndPermissions\022\016\n\003ttl" + + "\030\010 \001(\003:\0010\".\n\004Type\022\014\n\010DISABLED\020\000\022\t\n\005FLUSH" + + "\020\001\022\r\n\tSKIPFLUSH\020\002*r\n\013CompareType\022\010\n\004LESS" + + "\020\000\022\021\n\rLESS_OR_EQUAL\020\001\022\t\n\005EQUAL\020\002\022\r\n\tNOT_" + + "EQUAL\020\003\022\024\n\020GREATER_OR_EQUAL\020\004\022\013\n\007GREATER" + + "\020\005\022\t\n\005NO_OP\020\006*n\n\010TimeUnit\022\017\n\013NANOSECONDS" + + "\020\001\022\020\n\014MICROSECONDS\020\002\022\020\n\014MILLISECONDS\020\003\022\013" + + "\n\007SECONDS\020\004\022\013\n\007MINUTES\020\005\022\t\n\005HOURS\020\006\022\010\n\004D" + + "AYS\020\007B>\n*org.apache.hadoop.hbase.protobu", + "f.generatedB\013HBaseProtosH\001\240\001\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -18918,7 +19008,7 @@ public com.google.protobuf.ExtensionRegistry assignDescriptors( internal_static_hbase_pb_SnapshotDescription_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_SnapshotDescription_descriptor, - new java.lang.String[] { "Name", "Table", "CreationTime", "Type", "Version", "Owner", "UsersAndPermissions", }); + new java.lang.String[] { "Name", "Table", "CreationTime", "Type", "Version", "Owner", "UsersAndPermissions", "Ttl", }); return null; } }; diff --git a/hbase-protocol/src/main/protobuf/HBase.proto b/hbase-protocol/src/main/protobuf/HBase.proto index 67645d4c9f40..44b722de8536 100644 --- a/hbase-protocol/src/main/protobuf/HBase.proto +++ b/hbase-protocol/src/main/protobuf/HBase.proto @@ -232,4 +232,5 @@ message SnapshotDescription { optional int32 version = 5; optional string owner = 6; optional UsersAndPermissions users_and_permissions = 7; + optional int64 ttl = 8 [default = 0]; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index ab0865d8f253..243f448860b0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -104,6 +104,7 @@ import org.apache.hadoop.hbase.master.cleaner.ReplicationZKLockCleanerChore; import org.apache.hadoop.hbase.master.cleaner.ReplicationZKNodeCleaner; import org.apache.hadoop.hbase.master.cleaner.ReplicationZKNodeCleanerChore; +import org.apache.hadoop.hbase.master.cleaner.SnapshotCleanerChore; import org.apache.hadoop.hbase.master.handler.DispatchMergingRegionHandler; import org.apache.hadoop.hbase.master.normalizer.RegionNormalizer; import org.apache.hadoop.hbase.master.normalizer.RegionNormalizerChore; @@ -329,6 +330,7 @@ public void run() { private ClusterStatusChore clusterStatusChore; private ClusterStatusPublisher clusterStatusPublisherChore = null; private PeriodicDoMetrics periodicDoMetricsChore = null; + private SnapshotCleanerChore snapshotCleanerChore = null; CatalogJanitor catalogJanitorChore; private ReplicationZKLockCleanerChore replicationZKLockCleanerChore; @@ -1248,6 +1250,18 @@ this, conf, getMasterFileSystem().getOldLogDir().getFileSystem(conf), this.hfileCleaner = new HFileCleaner(cleanerInterval, this, conf, getMasterFileSystem() .getFileSystem(), archiveDir, params); getChoreService().scheduleChore(hfileCleaner); + + final boolean isSnapshotChoreDisabled = conf.getBoolean(HConstants.SNAPSHOT_CLEANER_DISABLE, + false); + if (isSnapshotChoreDisabled) { + if (LOG.isTraceEnabled()) { + LOG.trace("Snapshot Cleaner Chore is disabled. Not starting up the chore.."); + } + } else { + this.snapshotCleanerChore = new SnapshotCleanerChore(this, conf, getSnapshotManager()); + getChoreService().scheduleChore(this.snapshotCleanerChore); + } + serviceStarted = true; if (LOG.isTraceEnabled()) { LOG.trace("Started service threads"); @@ -1351,6 +1365,7 @@ private void stopChores() { choreService.cancelChore(this.hfileCleaner); choreService.cancelChore(this.replicationZKLockCleanerChore); choreService.cancelChore(this.replicationZKNodeCleanerChore); + choreService.cancelChore(this.snapshotCleanerChore); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/SnapshotCleanerChore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/SnapshotCleanerChore.java new file mode 100644 index 000000000000..214ab620df5e --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/SnapshotCleanerChore.java @@ -0,0 +1,107 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.master.cleaner; + +import java.io.IOException; +import java.util.List; +import java.util.concurrent.TimeUnit; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.ScheduledChore; +import org.apache.hadoop.hbase.Stoppable; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.master.snapshot.SnapshotManager; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; + +/** + * This chore, every time it runs, will try to delete snapshots that are expired based on TTL in + * seconds configured for each Snapshot + */ +@InterfaceAudience.Private +public class SnapshotCleanerChore extends ScheduledChore { + + private static final Log LOG = LogFactory.getLog(SnapshotCleanerChore.class); + private static final String SNAPSHOT_CLEANER_CHORE_NAME = "SnapshotCleaner"; + private static final String SNAPSHOT_CLEANER_INTERVAL = "hbase.master.cleaner.snapshot.interval"; + private static final int SNAPSHOT_CLEANER_DEFAULT_INTERVAL = 1800 * 1000; // Default 30 min + private static final String DELETE_SNAPSHOT_EVENT = + "Eligible Snapshot for cleanup due to expired TTL."; + + private final SnapshotManager snapshotManager; + + /** + * Construct Snapshot Cleaner Chore with parameterized constructor + * + * @param stopper When {@link Stoppable#isStopped()} is true, this chore will cancel and cleanup + * @param configuration The configuration to set + * @param snapshotManager SnapshotManager instance to manage lifecycle of snapshot + */ + public SnapshotCleanerChore(Stoppable stopper, Configuration configuration, + SnapshotManager snapshotManager) { + super(SNAPSHOT_CLEANER_CHORE_NAME, stopper, configuration.getInt(SNAPSHOT_CLEANER_INTERVAL, + SNAPSHOT_CLEANER_DEFAULT_INTERVAL)); + this.snapshotManager = snapshotManager; + } + + @Override + protected void chore() { + if (LOG.isTraceEnabled()) { + LOG.trace("Snapshot Cleaner Chore is starting up..."); + } + try { + List completedSnapshotsList = + this.snapshotManager.getCompletedSnapshots(); + for (SnapshotDescription snapshotDescription : completedSnapshotsList) { + long snapshotCreatedTime = snapshotDescription.getCreationTime(); + long snapshotTtl = snapshotDescription.getTtl(); + /* + * Backward compatibility after the patch deployment on HMaster + * Any snapshot with ttl 0 is to be considered as snapshot to keep FOREVER + * Default ttl value specified by {@HConstants.DEFAULT_SNAPSHOT_TTL} + */ + if (snapshotCreatedTime > 0 && snapshotTtl > 0 && + snapshotTtl < TimeUnit.MILLISECONDS.toSeconds(Long.MAX_VALUE)) { + long currentTime = EnvironmentEdgeManager.currentTime(); + if ((snapshotCreatedTime + TimeUnit.SECONDS.toMillis(snapshotTtl)) < currentTime) { + LOG.info("Event: " + DELETE_SNAPSHOT_EVENT + " Name: " + snapshotDescription.getName() + + ", CreatedTime: " + snapshotCreatedTime + ", TTL: " + snapshotTtl + + ", currentTime: " + currentTime); + deleteExpiredSnapshot(snapshotDescription); + } + } + } + } catch (IOException e) { + LOG.error("Error while cleaning up Snapshots...", e); + } + if (LOG.isTraceEnabled()) { + LOG.trace("Snapshot Cleaner Chore is closing..."); + } + } + + private void deleteExpiredSnapshot(SnapshotDescription snapshotDescription) { + try { + this.snapshotManager.deleteSnapshot(snapshotDescription); + } catch (Exception e) { + LOG.error("Error while deleting Snapshot: " + snapshotDescription.getName(), e); + } + } + +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java index 35242c3fd180..be7cf13232e3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java @@ -22,6 +22,7 @@ import java.net.URI; import java.security.PrivilegedExceptionAction; import java.util.Collections; +import java.util.concurrent.TimeUnit; import com.google.common.collect.ListMultimap; import org.apache.commons.logging.Log; @@ -125,6 +126,8 @@ public CompletedSnaphotDirectoriesFilter(FileSystem fs) { /** Default value if no start time is specified */ public static final long NO_SNAPSHOT_START_TIME_SPECIFIED = 0; + // Default value if no ttl is specified for Snapshot + private static final long NO_SNAPSHOT_TTL_SPECIFIED = 0; public static final String MASTER_SNAPSHOT_TIMEOUT_MILLIS = "hbase.snapshot.master.timeout.millis"; @@ -316,6 +319,21 @@ public static SnapshotDescription validate(SnapshotDescription snapshot, Configu builder.setCreationTime(time); snapshot = builder.build(); } + long ttl = snapshot.getTtl(); + // set default ttl(sec) if it is not set already or the value is out of the range + if (ttl == SnapshotDescriptionUtils.NO_SNAPSHOT_TTL_SPECIFIED || + ttl > TimeUnit.MILLISECONDS.toSeconds(Long.MAX_VALUE)) { + final long defaultSnapshotTtl = conf.getLong(HConstants.DEFAULT_SNAPSHOT_TTL_CONFIG_KEY, + HConstants.DEFAULT_SNAPSHOT_TTL); + if (LOG.isDebugEnabled()) { + LOG.debug("Snapshot current TTL value: " + ttl + " resetting it to default value: " + + defaultSnapshotTtl); + } + ttl = defaultSnapshotTtl; + } + SnapshotDescription.Builder builder = snapshot.toBuilder(); + builder.setTtl(ttl); + snapshot = builder.build(); // set the acl to snapshot if security feature is enabled. if(isSecurityAvailable(conf)){ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotInfo.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotInfo.java index 1b9a94ce0b74..d4a4e03192cc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotInfo.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotInfo.java @@ -361,12 +361,12 @@ public int run(String[] args) throws IOException, InterruptedException { // List Available Snapshots if (listSnapshots) { SimpleDateFormat df = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss"); - System.out.printf("%-20s | %-20s | %s%n", "SNAPSHOT", "CREATION TIME", "TABLE NAME"); + System.out.printf("%-20s | %-20s | %-20s | %s%n", "SNAPSHOT", "CREATION TIME", "TTL IN SEC", + "TABLE NAME"); for (SnapshotDescription desc: getSnapshotList(conf)) { - System.out.printf("%-20s | %20s | %s%n", - desc.getName(), - df.format(new Date(desc.getCreationTime())), - desc.getTable()); + System.out.printf("%-20s | %20s | %20s | %s%n", desc.getName(), + df.format(new Date(desc.getCreationTime())), desc.getTtl(), + desc.getTable()); } return 0; } @@ -424,6 +424,7 @@ private void printInfo() { System.out.println(" Table: " + snapshotDesc.getTable()); System.out.println(" Format: " + snapshotDesc.getVersion()); System.out.println("Created: " + df.format(new Date(snapshotDesc.getCreationTime()))); + System.out.println(" Ttl: " + snapshotDesc.getTtl()); System.out.println(" Owner: " + snapshotDesc.getOwner()); System.out.println(); } diff --git a/hbase-server/src/main/resources/hbase-webapps/master/snapshot.jsp b/hbase-server/src/main/resources/hbase-webapps/master/snapshot.jsp index e13ac3b61c4f..ec90d1864876 100644 --- a/hbase-server/src/main/resources/hbase-webapps/master/snapshot.jsp +++ b/hbase-server/src/main/resources/hbase-webapps/master/snapshot.jsp @@ -37,12 +37,14 @@ SnapshotInfo.SnapshotStats stats = null; TableName snapshotTable = null; boolean tableExists = false; + long snapshotTtl = 0; try (Admin admin = master.getConnection().getAdmin()) { for (SnapshotDescription snapshotDesc: admin.listSnapshots()) { if (snapshotName.equals(snapshotDesc.getName())) { snapshot = snapshotDesc; stats = SnapshotInfo.getSnapshotStats(conf, snapshot); snapshotTable = TableName.valueOf(snapshot.getTable()); + snapshotTtl = snapshot.getTtl(); tableExists = admin.tableExists(snapshotTable); break; } @@ -128,6 +130,7 @@ Table Creation Time + Time To Live(Sec) Type Format Version State @@ -143,6 +146,13 @@ <% } %> <%= new Date(snapshot.getCreationTime()) %> + + <% if (snapshotTtl == 0) { %> + FOREVER + <% } else { %> + <%= snapshotTtl %> + <% } %> + <%= snapshot.getType() %> <%= snapshot.getVersion() %> <% if (stats.isSnapshotCorrupted()) { %> diff --git a/hbase-server/src/main/resources/hbase-webapps/master/snapshotsStats.jsp b/hbase-server/src/main/resources/hbase-webapps/master/snapshotsStats.jsp index 7df4708f1420..6259fb5623d9 100644 --- a/hbase-server/src/main/resources/hbase-webapps/master/snapshotsStats.jsp +++ b/hbase-server/src/main/resources/hbase-webapps/master/snapshotsStats.jsp @@ -110,6 +110,7 @@ Snapshot Name Table Creation Time + TTL(Sec) Shared Storefile Size Archived Storefile Size @@ -134,6 +135,13 @@ <% } %> <%= new Date(snapshotDesc.getCreationTime()) %> + + <% if (snapshotDesc.getTtl() == 0) { %> + FOREVER + <% } else { %> + <%= snapshotDesc.getTtl() %> + <% } %> + <%= StringUtils.humanReadableInt(stats.getSharedStoreFilesSize()) %> <%= StringUtils.humanReadableInt(stats.getArchivedStoreFileSize()) %> (<%= StringUtils.humanReadableInt(stats.getNonSharedArchivedStoreFilesSize()) %>) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestSnapshotCleanerChore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestSnapshotCleanerChore.java new file mode 100644 index 000000000000..720387ee7b88 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestSnapshotCleanerChore.java @@ -0,0 +1,194 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.master.cleaner; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.Stoppable; +import org.apache.hadoop.hbase.master.snapshot.SnapshotManager; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; +import org.apache.hadoop.hbase.testclassification.MasterTests; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.mockito.Mockito; + +/** + * Tests for SnapshotsCleanerChore + */ +@Category({MasterTests.class, SmallTests.class}) +public class TestSnapshotCleanerChore { + + private static final Log LOG = LogFactory.getLog(TestSnapshotCleanerChore.class); + + private static final HBaseTestingUtility HBASE_TESTING_UTILITY = new HBaseTestingUtility(); + + private SnapshotManager snapshotManager; + + private Configuration getSnapshotCleanerConf() { + Configuration conf = HBASE_TESTING_UTILITY.getConfiguration(); + conf.setInt("hbase.master.cleaner.snapshot.interval", 100); + return conf; + } + + + @Test + public void testSnapshotCleanerWithoutAnyCompletedSnapshot() throws IOException { + snapshotManager = Mockito.mock(SnapshotManager.class); + Stoppable stopper = new StoppableImplementation(); + Configuration conf = getSnapshotCleanerConf(); + SnapshotCleanerChore snapshotCleanerChore = + new SnapshotCleanerChore(stopper, conf, snapshotManager); + try { + snapshotCleanerChore.chore(); + } finally { + stopper.stop("Stopping Test Stopper"); + } + Mockito.verify(snapshotManager, Mockito.times(0)) + .deleteSnapshot(Mockito.any(SnapshotDescription.class)); + } + + @Test + public void testSnapshotCleanerWithNoTtlExpired() throws IOException { + snapshotManager = Mockito.mock(SnapshotManager.class); + Stoppable stopper = new StoppableImplementation(); + Configuration conf = getSnapshotCleanerConf(); + SnapshotCleanerChore snapshotCleanerChore = + new SnapshotCleanerChore(stopper, conf, snapshotManager); + List snapshotDescriptionList = new ArrayList<>(); + snapshotDescriptionList.add(getSnapshotDescription(-2, "snapshot01", "table01", + EnvironmentEdgeManager.currentTime() - 100000)); + snapshotDescriptionList.add(getSnapshotDescription(10, "snapshot02", "table02", + EnvironmentEdgeManager.currentTime())); + Mockito.when(snapshotManager.getCompletedSnapshots()).thenReturn(snapshotDescriptionList); + try { + LOG.info("2 Snapshots are completed but TTL is not expired for any of them"); + snapshotCleanerChore.chore(); + } finally { + stopper.stop("Stopping Test Stopper"); + } + Mockito.verify(snapshotManager, Mockito.times(0)) + .deleteSnapshot(Mockito.any(SnapshotDescription.class)); + } + + @Test + public void testSnapshotCleanerWithSomeTtlExpired() throws IOException { + snapshotManager = Mockito.mock(SnapshotManager.class); + Stoppable stopper = new StoppableImplementation(); + Configuration conf = getSnapshotCleanerConf(); + conf.setStrings("hbase.master.cleaner.snapshot.disable", "false"); + SnapshotCleanerChore snapshotCleanerChore = + new SnapshotCleanerChore(stopper, conf, snapshotManager); + List snapshotDescriptionList = new ArrayList<>(); + snapshotDescriptionList.add(getSnapshotDescription(10, "snapshot01", "table01", 1)); + snapshotDescriptionList.add(getSnapshotDescription(5, "snapshot02", "table02", 2)); + snapshotDescriptionList.add(getSnapshotDescription(30, "snapshot01", "table01", + EnvironmentEdgeManager.currentTime())); + snapshotDescriptionList.add(getSnapshotDescription(0, "snapshot02", "table02", + EnvironmentEdgeManager.currentTime())); + snapshotDescriptionList.add(getSnapshotDescription(40, "snapshot03", "table03", + EnvironmentEdgeManager.currentTime())); + Mockito.when(snapshotManager.getCompletedSnapshots()).thenReturn(snapshotDescriptionList); + try { + LOG.info("5 Snapshots are completed. TTL is expired for 2 them. Going to delete them"); + snapshotCleanerChore.chore(); + } finally { + stopper.stop("Stopping Test Stopper"); + } + Mockito.verify(snapshotManager, Mockito.times(2)) + .deleteSnapshot(Mockito.any(SnapshotDescription.class)); + } + + @Test + public void testSnapshotCleanerWithReadIOE() throws IOException { + snapshotManager = Mockito.mock(SnapshotManager.class); + Stoppable stopper = new StoppableImplementation(); + Configuration conf = new HBaseTestingUtility().getConfiguration(); + SnapshotCleanerChore snapshotCleanerChore = + new SnapshotCleanerChore(stopper, conf, snapshotManager); + Mockito.when(snapshotManager.getCompletedSnapshots()).thenThrow(IOException.class); + try { + LOG.info("While getting completed Snapshots, IOException would occur. Hence, No Snapshot" + + " should be deleted"); + snapshotCleanerChore.chore(); + } finally { + stopper.stop("Stopping Test Stopper"); + } + Mockito.verify(snapshotManager, Mockito.times(0)) + .deleteSnapshot(Mockito.any(SnapshotDescription.class)); + } + + @Test + public void testSnapshotChoreWithTtlOutOfRange() throws IOException { + snapshotManager = Mockito.mock(SnapshotManager.class); + Stoppable stopper = new StoppableImplementation(); + Configuration conf = getSnapshotCleanerConf(); + List snapshotDescriptionList = new ArrayList<>(); + snapshotDescriptionList.add(getSnapshotDescription(Long.MAX_VALUE, "snapshot01", "table01", 1)); + snapshotDescriptionList.add(getSnapshotDescription(5, "snapshot02", "table02", 2)); + Mockito.when(snapshotManager.getCompletedSnapshots()).thenReturn(snapshotDescriptionList); + SnapshotCleanerChore snapshotCleanerChore = + new SnapshotCleanerChore(stopper, conf, snapshotManager); + try { + LOG.info("Snapshot Chore is disabled. No cleanup performed for Expired Snapshots"); + snapshotCleanerChore.chore(); + } finally { + stopper.stop("Stopping Test Stopper"); + } + Mockito.verify(snapshotManager, Mockito.times(1)).getCompletedSnapshots(); + } + + private SnapshotDescription getSnapshotDescription(final long ttl, + final String snapshotName, final String tableName, final long snapshotCreationTime) { + SnapshotDescription.Builder snapshotDescriptionBuilder = + SnapshotDescription.newBuilder(); + snapshotDescriptionBuilder.setTtl(ttl); + snapshotDescriptionBuilder.setName(snapshotName); + snapshotDescriptionBuilder.setTable(tableName); + snapshotDescriptionBuilder.setType(SnapshotDescription.Type.FLUSH); + snapshotDescriptionBuilder.setCreationTime(snapshotCreationTime); + return snapshotDescriptionBuilder.build(); + } + + /** + * Simple helper class that just keeps track of whether or not its stopped. + */ + private static class StoppableImplementation implements Stoppable { + + private volatile boolean stop = false; + + @Override + public void stop(String why) { + this.stop = true; + } + + @Override + public boolean isStopped() { + return this.stop; + } + + } + +} \ No newline at end of file diff --git a/hbase-shell/src/main/ruby/hbase/admin.rb b/hbase-shell/src/main/ruby/hbase/admin.rb index 0403b8e8ee1f..062b6a322950 100644 --- a/hbase-shell/src/main/ruby/hbase/admin.rb +++ b/hbase-shell/src/main/ruby/hbase/admin.rb @@ -935,8 +935,13 @@ def snapshot(table, snapshot_name, *args) @admin.snapshot(snapshot_name.to_java_bytes, table.to_java_bytes) else args.each do |arg| + ttl = arg[TTL] + ttl = ttl ? ttl.to_java(:long) : -1 + snapshot_props = java.util.HashMap.new + snapshot_props.put("TTL", ttl) if arg[SKIP_FLUSH] == true - @admin.snapshot(snapshot_name.to_java_bytes, table.to_java_bytes, SnapshotDescription::Type::SKIPFLUSH) + @admin.snapshot(snapshot_name.to_java_bytes, table.to_java_bytes, + SnapshotDescription::Type::SKIPFLUSH, snapshot_props) else @admin.snapshot(snapshot_name.to_java_bytes, table.to_java_bytes) end diff --git a/src/main/asciidoc/_chapters/hbase-default.adoc b/src/main/asciidoc/_chapters/hbase-default.adoc index 1fa05378988c..caff49042a28 100644 --- a/src/main/asciidoc/_chapters/hbase-default.adoc +++ b/src/main/asciidoc/_chapters/hbase-default.adoc @@ -2178,4 +2178,33 @@ The percent of region server RPC threads failed to abort RS. .Default `0.5` - + +[[hbase.master.cleaner.snapshot.interval]] +*`hbase.master.cleaner.snapshot.interval`*:: ++ +.Description + + Snapshot Cleanup chore interval in milliseconds. + The cleanup thread keeps running at this interval + to find all snapshots that are expired based on TTL + and delete them. + ++ +.Default +`1800000` + + +[[hbase.master.snapshot.ttl]] +*`hbase.master.snapshot.ttl`*:: ++ +.Description + + Default Snapshot TTL to be considered when the user + does not specify TTL while creating snapshot. + Default value 0 indicates FOREVERE - snapshot should not be + automatically deleted until it is manually deleted + + ++ +.Default +`0` diff --git a/src/main/asciidoc/_chapters/ops_mgt.adoc b/src/main/asciidoc/_chapters/ops_mgt.adoc index fec7e8a1d36e..9c1eaba7224e 100644 --- a/src/main/asciidoc/_chapters/ops_mgt.adoc +++ b/src/main/asciidoc/_chapters/ops_mgt.adoc @@ -2004,6 +2004,47 @@ A snapshot is only a representation of a table during a window of time. The amount of time the snapshot operation will take to reach each Region Server may vary from a few seconds to a minute, depending on the resource load and speed of the hardware or network, among other factors. There is also no way to know whether a given insert or update is in memory or has been flushed. + +.Take a Snapshot With TTL +Snapshots have a lifecycle that is independent from the table from which they are created. +Although data in a table may be stored with TTL the data files containing them become +frozen by the snapshot. Space consumed by expired cells will not be reclaimed by normal +table housekeeping like compaction. While this is expected it can be inconvenient at scale. +When many snapshots are under management and the data in various tables is expired by +TTL some notion of optional TTL (and optional default TTL) for snapshots could be useful. + + +---- +hbase> snapshot 'mytable', 'snapshot1234', {TTL => 86400} +---- + +The above command creates snapshot `snapshot1234` with TTL of 86400 sec(24 hours) +and hence, the snapshot is supposed to be cleaned up after 24 hours + + + +.Default Snapshot TTL: + +- FOREVER by default +- User specified Default TTL with config `hbase.master.snapshot.ttl` + + +While creating a Snapshot, if TTL in seconds is not specified, by default the snapshot +would not be deleted automatically. i.e. it would be retained forever until it is +manually deleted. However, the user can update this default TTL behavior by +providing default TTL in sec for key: `hbase.master.snapshot.ttl`. +Value 0 for this config indicates TTL: FOREVER + + + + +At any point of time, if Snapshot cleanup is supposed to be stopped due to +some snapshot restore activity, it is advisable to disable Snapshot Cleaner with + config: + +`hbase.master.cleaner.snapshot.disable`: "true" + + [[ops.snapshots.list]] === Listing Snapshots