Skip to content

Commit

Permalink
HDDS-7911. Replace Time.now() with Instant.now()
Browse files Browse the repository at this point in the history
  • Loading branch information
ptlrs committed Nov 4, 2024
1 parent 58d1443 commit 8ad1f2e
Show file tree
Hide file tree
Showing 107 changed files with 326 additions and 318 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,6 @@
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeType;
import org.apache.hadoop.util.Time;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

Expand All @@ -32,6 +31,7 @@
import java.nio.file.DirectoryStream;
import java.nio.file.Files;
import java.nio.file.Path;
import java.time.Instant;
import java.util.Properties;
import java.util.function.IntSupplier;

Expand Down Expand Up @@ -84,7 +84,7 @@ public Storage(NodeType type, File root, String sdName,
if (state == StorageState.INITIALIZED) {
this.storageInfo = new StorageInfo(type, getVersionFile());
} else {
this.storageInfo = new StorageInfo(nodeType, id, Time.now(),
this.storageInfo = new StorageInfo(nodeType, id, Instant.now().toEpochMilli(),
defaultLayoutVersion);
setNodeProperties();
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@
import java.io.File;
import java.io.IOException;
import java.net.InetAddress;
import java.time.Instant;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
Expand Down Expand Up @@ -66,7 +67,6 @@
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authentication.client.AuthenticationException;
import org.apache.hadoop.util.ServicePlugin;
import org.apache.hadoop.util.Time;

import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
Expand Down Expand Up @@ -225,7 +225,7 @@ public void start() {
datanodeDetails.setIpAddress(ip);
datanodeDetails.setVersion(
HddsVersionInfo.HDDS_VERSION_INFO.getVersion());
datanodeDetails.setSetupTime(Time.now());
datanodeDetails.setSetupTime(Instant.now().toEpochMilli());
datanodeDetails.setRevision(
HddsVersionInfo.HDDS_VERSION_INFO.getRevision());
TracingUtil.initTracing(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -30,11 +30,11 @@
import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet;
import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
import org.apache.hadoop.ozone.container.upgrade.VersionedDatanodeFeatures;
import org.apache.hadoop.util.Time;
import org.slf4j.Logger;

import java.io.File;
import java.io.IOException;
import java.time.Instant;
import java.util.List;
import java.util.Properties;
import java.util.UUID;
Expand Down Expand Up @@ -137,7 +137,7 @@ public static long getCreationTime(Properties props, File versionFile)
String cTimeStr = getProperty(props, OzoneConsts.CTIME, versionFile);

long cTime = Long.parseLong(cTimeStr);
long currentTime = Time.now();
long currentTime = Instant.now().toEpochMilli();
if (cTime > currentTime || cTime < 0) {
throw new InconsistentStorageStateException("Invalid Creation time in " +
"Version File : " + versionFile + " - " + cTime + ". Current system" +
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,6 @@
import org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration;
import org.apache.hadoop.ozone.container.common.utils.DiskCheckUtil;
import org.apache.hadoop.ozone.container.common.utils.StorageVolumeUtil;
import org.apache.hadoop.util.Time;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

Expand All @@ -41,6 +40,7 @@
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.time.Instant;
import java.util.LinkedList;
import java.util.Objects;
import java.util.Optional;
Expand Down Expand Up @@ -302,7 +302,7 @@ private VolumeState analyzeVolumeState() {
*/
private void createVersionFile() throws IOException {
this.storageID = StorageVolumeUtil.generateUuid();
this.cTime = Time.now();
this.cTime = Instant.now().toEpochMilli();
this.layoutVersion = getLatestVersion().getVersion();

if (this.clusterID == null || datanodeUuid == null) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,6 @@
import org.apache.hadoop.ozone.common.InconsistentStorageStateException;
import org.apache.hadoop.ozone.container.common.HDDSVolumeLayoutVersion;
import org.apache.hadoop.ozone.container.common.utils.StorageVolumeUtil;
import org.apache.hadoop.util.Time;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.io.TempDir;
Expand All @@ -29,6 +28,7 @@
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.time.Instant;
import java.util.Properties;
import java.util.UUID;

Expand Down Expand Up @@ -62,7 +62,7 @@ public void setup() throws IOException {
storageID = UUID.randomUUID().toString();
clusterID = UUID.randomUUID().toString();
datanodeUUID = UUID.randomUUID().toString();
cTime = Time.now();
cTime = Instant.now().toEpochMilli();
lv = HDDSVolumeLayoutVersion.getLatestVersion().getVersion();

dnVersionFile = new DatanodeVersionFile(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
*/
package org.apache.hadoop.hdds.server.events;

import java.time.Instant;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
Expand All @@ -30,7 +31,6 @@
import com.google.gson.FieldAttributes;
import org.apache.hadoop.hdds.scm.net.NodeImpl;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Time;

import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
Expand Down Expand Up @@ -249,7 +249,7 @@ public <PAYLOAD, EVENT_TYPE extends Event<PAYLOAD>> void fireEvent(
*/
@VisibleForTesting
public void processAll(long timeout) {
long currentTime = Time.now();
long currentTime = Instant.now().toEpochMilli();
while (true) {

if (!isRunning) {
Expand Down Expand Up @@ -277,7 +277,7 @@ public void processAll(long timeout) {
Thread.currentThread().interrupt();
}

if (Time.now() > currentTime + timeout) {
if (Instant.now().toEpochMilli() > currentTime + timeout) {
throw new AssertionError(
"Messages are not processed in the given timeframe. Queued: "
+ queuedCount.get() + " Processed: " + processed);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,11 +19,11 @@
package org.apache.ozone.compaction.log;

import org.apache.hadoop.hdds.protocol.proto.HddsProtos.CompactionLogEntryProto;
import org.apache.hadoop.util.Time;
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.Arguments;
import org.junit.jupiter.params.provider.MethodSource;

import java.time.Instant;
import java.util.Arrays;
import java.util.List;
import java.util.stream.Collectors;
Expand Down Expand Up @@ -55,14 +55,14 @@ private static Stream<Arguments> compactionLogEntryValidScenarios() {
return Stream.of(
Arguments.of("With compaction reason.",
1000,
Time.now(),
Instant.now().toEpochMilli(),
inputFiles,
outputFiles,
"compactionReason"
),
Arguments.of("Without compaction reason.",
2000,
Time.now(),
Instant.now().toEpochMilli(),
inputFiles,
outputFiles,
null
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.time.Instant;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
Expand Down Expand Up @@ -89,7 +90,6 @@
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OM_SNAPSHOT_COMPACTION_DAG_MAX_TIME_ALLOWED_DEFAULT;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OM_SNAPSHOT_COMPACTION_DAG_PRUNE_DAEMON_RUN_INTERVAL;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OM_SNAPSHOT_PRUNE_COMPACTION_DAG_DAEMON_RUN_INTERVAL_DEFAULT;
import static org.apache.hadoop.util.Time.now;
import static org.apache.ozone.rocksdiff.RocksDBCheckpointDiffer.COLUMN_FAMILIES_TO_TRACK_IN_DAG;
import static org.apache.ozone.rocksdiff.RocksDBCheckpointDiffer.COMPACTION_LOG_FILE_NAME_SUFFIX;
import static org.apache.ozone.rocksdiff.RocksDBCheckpointDiffer.DEBUG_DAG_LIVE_NODES;
Expand Down Expand Up @@ -239,11 +239,11 @@ private static Stream<Arguments> casesGetSSTDiffListWithoutDB() {

String compactionLog =
// Snapshot 0
"S 1000 df6410c7-151b-4e90-870e-5ef12875acd5 " + now() + " \n"
"S 1000 df6410c7-151b-4e90-870e-5ef12875acd5 " + Instant.now().toEpochMilli() + " \n"
// Additional "compaction" to trigger and test early exit condition
+ "C 1291 000001,000002:000062\n"
// Snapshot 1
+ "S 3008 ef6410c7-151b-4e90-870e-5ef12875acd5 " + now() + " \n"
+ "S 3008 ef6410c7-151b-4e90-870e-5ef12875acd5 " + Instant.now().toEpochMilli() + " \n"
// Regular compaction
+ "C 4023 000068,000062:000069\n"
// Trivial move
Expand All @@ -254,53 +254,53 @@ private static Stream<Arguments> casesGetSSTDiffListWithoutDB() {
// Deletion?
+ "C 12755 000093,000090,000083:\n"
// Snapshot 2
+ "S 14980 e7ad72f8-52df-4430-93f6-0ee91d4a47fd " + now() + "\n"
+ "S 14980 e7ad72f8-52df-4430-93f6-0ee91d4a47fd " + Instant.now().toEpochMilli() + "\n"
+ "C 16192 000098,000096,000085,000078,000071,000064,000060,000052"
+ ":000099\n"
+ "C 16762 000105,000095,000088:000107\n"
// Snapshot 3
+ "S 17975 4f084f6e-ed3d-4780-8362-f832303309ea " + now() + "\n";
+ "S 17975 4f084f6e-ed3d-4780-8362-f832303309ea " + Instant.now().toEpochMilli() + "\n";

List<CompactionLogEntry> compactionLogEntries = Arrays.asList(
// Additional "compaction" to trigger and test early exit condition
createCompactionEntry(1291,
now(),
Instant.now().toEpochMilli(),
Arrays.asList("000001", "000002"),
Collections.singletonList("000062")),
// Regular compaction
createCompactionEntry(4023,
now(),
Instant.now().toEpochMilli(),
Arrays.asList("000068", "000062"),
Collections.singletonList("000069")),
// Trivial move
createCompactionEntry(5547,
now(),
Instant.now().toEpochMilli(),
Arrays.asList("000071", "000064", "000060", "000052"),
Arrays.asList("000071", "000064", "000060", "000062")),
createCompactionEntry(5647,
now(),
Instant.now().toEpochMilli(),
Arrays.asList("000073", "000066"),
Collections.singletonList("000074")),
createCompactionEntry(7872,
now(),
Instant.now().toEpochMilli(),
Arrays.asList("000082", "000076", "000069"),
Collections.singletonList("000083")),
createCompactionEntry(9001,
now(),
Instant.now().toEpochMilli(),
Arrays.asList("000087", "000080", "000074"),
Collections.singletonList("000088")),
// Deletion
createCompactionEntry(12755,
now(),
Instant.now().toEpochMilli(),
Arrays.asList("000093", "000090", "000083"),
Collections.emptyList()),
createCompactionEntry(16192,
now(),
Instant.now().toEpochMilli(),
Arrays.asList("000098", "000096", "000085", "000078", "000071",
"000064", "000060", "000052"),
Collections.singletonList("000099")),
createCompactionEntry(16762,
now(),
Instant.now().toEpochMilli(),
Arrays.asList("000105", "000095", "000088"),
Collections.singletonList("000107"))
);
Expand Down Expand Up @@ -1415,7 +1415,7 @@ private static Stream<Arguments> sstFilePruningScenarios() {
"One level compaction.",
null,
Collections.singletonList(createCompactionEntry(1,
now(),
Instant.now().toEpochMilli(),
asList("000015", "000013", "000011", "000009"),
asList("000018", "000016", "000017"))),
initialFiles2,
Expand All @@ -1425,22 +1425,22 @@ private static Stream<Arguments> sstFilePruningScenarios() {
"Multi-level compaction.",
null,
asList(createCompactionEntry(1,
now(),
Instant.now().toEpochMilli(),
asList("000015", "000013", "000011", "000009"),
asList("000018", "000016", "000017")),
createCompactionEntry(2,
now(),
Instant.now().toEpochMilli(),
asList("000018", "000016", "000017", "000026", "000024",
"000022", "000020"),
asList("000027", "000030", "000028", "000031", "000029")),
createCompactionEntry(3,
now(),
Instant.now().toEpochMilli(),
asList("000027", "000030", "000028", "000031", "000029",
"000039", "000037", "000035", "000033"),
asList("000040", "000044", "000042", "000043", "000046",
"000041", "000045")),
createCompactionEntry(4,
now(),
Instant.now().toEpochMilli(),
asList("000040", "000044", "000042", "000043", "000046",
"000041", "000045", "000054", "000052", "000050",
"000048"),
Expand Down Expand Up @@ -1611,7 +1611,7 @@ private void createFileWithContext(String fileName, String context)
"/volume/bucket3/key-0000099136",
"keyTable")),
null),
new CompactionLogEntry(397, now(),
new CompactionLogEntry(397, Instant.now().toEpochMilli(),
Arrays.asList(new CompactionFileInfo("000106",
"/volume/bucket1/key-0000000730",
"/volume/bucket3/key-0000099136",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
package org.apache.hadoop.hdds.scm.container;

import java.io.IOException;
import java.time.Instant;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
Expand Down Expand Up @@ -49,7 +50,6 @@
import org.apache.hadoop.hdds.scm.pipeline.PipelineManager;
import org.apache.hadoop.hdds.utils.db.Table;
import org.apache.hadoop.ozone.common.statemachine.InvalidStateTransitionException;
import org.apache.hadoop.util.Time;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

Expand Down Expand Up @@ -253,7 +253,7 @@ private ContainerInfo allocateContainer(final Pipeline pipeline,
.setPipelineID(pipeline.getId().getProtobuf())
.setUsedBytes(0)
.setNumberOfKeys(0)
.setStateEnterTime(Time.now())
.setStateEnterTime(Instant.now().toEpochMilli())
.setOwner(owner)
.setContainerID(containerID.getId())
.setDeleteTransactionId(0)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,6 @@
import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol;
import org.apache.hadoop.ozone.om.protocolPB.OmTransport;
import org.apache.hadoop.util.DataChecksum;
import org.apache.hadoop.util.Time;
import org.apache.ratis.thirdparty.com.google.protobuf.ByteString;
import jakarta.annotation.Nonnull;
import org.junit.jupiter.api.AfterEach;
Expand All @@ -62,6 +61,7 @@
import org.junit.jupiter.params.provider.EnumSource;

import java.io.IOException;
import java.time.Instant;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
Expand Down Expand Up @@ -133,8 +133,8 @@ private OmKeyInfo omKeyInfo(ReplicationType type, FileChecksum cachedChecksum, L
.setKeyName(null)
.setOmKeyLocationInfos(Collections.singletonList(
new OmKeyLocationInfoGroup(0, locationInfo)))
.setCreationTime(Time.now())
.setModificationTime(Time.now())
.setCreationTime(Instant.now().toEpochMilli())
.setModificationTime(Instant.now().toEpochMilli())
.setDataSize(0)
.setReplicationConfig(config)
.setFileEncryptionInfo(null)
Expand Down
Loading

0 comments on commit 8ad1f2e

Please sign in to comment.