diff --git a/atlasdb-api/src/main/java/com/palantir/atlasdb/spi/AtlasDbFactory.java b/atlasdb-api/src/main/java/com/palantir/atlasdb/spi/AtlasDbFactory.java index f21630f8a4f..90e5e6b2657 100644 --- a/atlasdb-api/src/main/java/com/palantir/atlasdb/spi/AtlasDbFactory.java +++ b/atlasdb-api/src/main/java/com/palantir/atlasdb/spi/AtlasDbFactory.java @@ -19,16 +19,17 @@ import com.palantir.atlasdb.keyvalue.api.KeyValueService; import com.palantir.atlasdb.keyvalue.api.TableReference; import com.palantir.atlasdb.util.MetricsManager; +import com.palantir.logsafe.SafeArg; +import com.palantir.logsafe.logger.SafeLogger; +import com.palantir.logsafe.logger.SafeLoggerFactory; import com.palantir.refreshable.Refreshable; import com.palantir.timestamp.ManagedTimestampService; import com.palantir.timestamp.TimestampStoreInvalidator; import java.util.Optional; import java.util.function.LongSupplier; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; public interface AtlasDbFactory { - Logger log = LoggerFactory.getLogger(AtlasDbFactory.class); + SafeLogger log = SafeLoggerFactory.get(AtlasDbFactory.class); long NO_OP_FAST_FORWARD_TIMESTAMP = Long.MIN_VALUE + 1; // Note: Long.MIN_VALUE itself is not allowed. boolean DEFAULT_INITIALIZE_ASYNC = false; @@ -81,7 +82,9 @@ ManagedTimestampService createManagedTimestampService( default TimestampStoreInvalidator createTimestampStoreInvalidator( KeyValueService rawKvs, Optional tableReferenceOverride) { return () -> { - log.warn("AtlasDB doesn't yet support automated migration for KVS type {}.", getType()); + log.warn( + "AtlasDB doesn't yet support automated migration for KVS type {}.", + SafeArg.of("kvsType", getType())); return NO_OP_FAST_FORWARD_TIMESTAMP; }; } diff --git a/atlasdb-cassandra/src/main/java/com/palantir/atlasdb/cassandra/CassandraAtlasDbFactory.java b/atlasdb-cassandra/src/main/java/com/palantir/atlasdb/cassandra/CassandraAtlasDbFactory.java index 07673fb4749..7b81da2816e 100644 --- a/atlasdb-cassandra/src/main/java/com/palantir/atlasdb/cassandra/CassandraAtlasDbFactory.java +++ b/atlasdb-cassandra/src/main/java/com/palantir/atlasdb/cassandra/CassandraAtlasDbFactory.java @@ -31,6 +31,9 @@ import com.palantir.atlasdb.spi.KeyValueServiceRuntimeConfig; import com.palantir.atlasdb.util.MetricsManager; import com.palantir.atlasdb.versions.AtlasDbVersion; +import com.palantir.logsafe.SafeArg; +import com.palantir.logsafe.logger.SafeLogger; +import com.palantir.logsafe.logger.SafeLoggerFactory; import com.palantir.refreshable.Refreshable; import com.palantir.timestamp.ManagedTimestampService; import com.palantir.timestamp.PersistentTimestampServiceImpl; @@ -39,12 +42,10 @@ import java.util.Optional; import java.util.function.LongSupplier; import java.util.function.Supplier; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; @AutoService(AtlasDbFactory.class) public class CassandraAtlasDbFactory implements AtlasDbFactory { - private static Logger log = LoggerFactory.getLogger(CassandraAtlasDbFactory.class); + private static SafeLogger log = SafeLoggerFactory.get(CassandraAtlasDbFactory.class); private CassandraKeyValueServiceRuntimeConfig latestValidRuntimeConfig = CassandraKeyValueServiceRuntimeConfig.getDefault(); @@ -104,7 +105,7 @@ Supplier preprocessKvsRuntimeConfig( "Invalid KeyValueServiceRuntimeConfig. Expected a KeyValueServiceRuntimeConfig of" + " type CassandraKeyValueServiceRuntimeConfig, found {}. Using latest valid" + " CassandraKeyValueServiceRuntimeConfig.", - config.getClass()); + SafeArg.of("configClass", config.getClass())); return latestValidRuntimeConfig; } diff --git a/atlasdb-cassandra/src/main/java/com/palantir/atlasdb/keyvalue/cassandra/CassandraTimestampBoundStore.java b/atlasdb-cassandra/src/main/java/com/palantir/atlasdb/keyvalue/cassandra/CassandraTimestampBoundStore.java index 1e94abc6170..d77d53a1dac 100644 --- a/atlasdb-cassandra/src/main/java/com/palantir/atlasdb/keyvalue/cassandra/CassandraTimestampBoundStore.java +++ b/atlasdb-cassandra/src/main/java/com/palantir/atlasdb/keyvalue/cassandra/CassandraTimestampBoundStore.java @@ -91,7 +91,7 @@ public static TimestampBoundStore create(CassandraKeyValueService kvs, boolean i private CassandraTimestampBoundStore(CassandraClientPool clientPool, CassandraKeyValueService kvs) { DebugLogger.logger.info( "Creating CassandraTimestampBoundStore object on thread {}. This should only happen once.", - Thread.currentThread().getName()); + SafeArg.of("thread", Thread.currentThread().getName())); this.clientPool = Preconditions.checkNotNull(clientPool, "clientPool cannot be null"); this.kvs = kvs; } @@ -125,7 +125,7 @@ public Long apply(CassandraClient client) { if (result == null) { DebugLogger.logger.info( "[GET] Null result, setting timestamp limit to {}", - CassandraTimestampUtils.INITIAL_VALUE); + SafeArg.of("newLimit", CassandraTimestampUtils.INITIAL_VALUE)); cas(client, null, CassandraTimestampUtils.INITIAL_VALUE); return CassandraTimestampUtils.INITIAL_VALUE; } @@ -152,14 +152,15 @@ private long extractUpperLimit(ColumnOrSuperColumn result) { } }); - DebugLogger.logger.debug("[GET] Setting cached timestamp limit to {}.", currentLimit); + DebugLogger.logger.debug( + "[GET] Setting cached timestamp limit to {}.", SafeArg.of("currentLimit", currentLimit)); currentLimit = upperLimit; return currentLimit; } @Override public synchronized void storeUpperLimit(final long limit) { - DebugLogger.logger.debug("[PUT] Storing upper limit of {}.", limit); + DebugLogger.logger.debug("[PUT] Storing upper limit of {}.", SafeArg.of("limit", limit)); clientPool.runWithRetry(new FunctionCheckedException() { @GuardedBy("CassandraTimestampBoundStore.this") @@ -175,7 +176,10 @@ public Void apply(CassandraClient client) { private void cas(CassandraClient client, Long oldVal, long newVal) { final CASResult result; try { - DebugLogger.logger.info("[CAS] Trying to set upper limit from {} to {}.", oldVal, newVal); + DebugLogger.logger.info( + "[CAS] Trying to set upper limit from {} to {}.", + SafeArg.of("oldLimit", oldVal), + SafeArg.of("newLimit", newVal)); result = client.cas( AtlasDbConstants.TIMESTAMP_TABLE, getRowName(), @@ -226,7 +230,7 @@ private void cas(CassandraClient client, Long oldVal, long newVal) { DebugLogger.logger.error("Thread dump: {}", SafeArg.of("threadDump", ThreadDumps.programmaticThreadDump())); throw err; } else { - DebugLogger.logger.debug("[CAS] Setting cached limit to {}.", newVal); + DebugLogger.logger.debug("[CAS] Setting cached limit to {}.", SafeArg.of("newLimit", newVal)); currentLimit = newVal; } } diff --git a/atlasdb-cassandra/src/main/java/com/palantir/atlasdb/keyvalue/cassandra/CqlExecutorImpl.java b/atlasdb-cassandra/src/main/java/com/palantir/atlasdb/keyvalue/cassandra/CqlExecutorImpl.java index 5386988f139..46a38b6553c 100644 --- a/atlasdb-cassandra/src/main/java/com/palantir/atlasdb/keyvalue/cassandra/CqlExecutorImpl.java +++ b/atlasdb-cassandra/src/main/java/com/palantir/atlasdb/keyvalue/cassandra/CqlExecutorImpl.java @@ -29,6 +29,8 @@ import com.palantir.logsafe.Arg; import com.palantir.logsafe.SafeArg; import com.palantir.logsafe.UnsafeArg; +import com.palantir.logsafe.logger.SafeLogger; +import com.palantir.logsafe.logger.SafeLoggerFactory; import java.net.InetSocketAddress; import java.nio.ByteBuffer; import java.nio.charset.StandardCharsets; @@ -51,12 +53,10 @@ import org.apache.cassandra.thrift.CqlResult; import org.apache.cassandra.thrift.CqlRow; import org.apache.thrift.TException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; public class CqlExecutorImpl implements CqlExecutor { private final QueryExecutor queryExecutor; - private static final Logger log = LoggerFactory.getLogger(CqlExecutorImpl.class); + private static final SafeLogger log = SafeLoggerFactory.get(CqlExecutorImpl.class); public interface QueryExecutor { CqlResult execute(CqlQuery cqlQuery, byte[] rowHintForHostSelection); @@ -151,7 +151,7 @@ private void scheduleSweepRowTask( // RejectedExecutionException are expected. // The executor is shutdown when we already fetched all the values we were interested // for the current iteration. - log.trace("Rejecting row {} because executor is closed", rows.get(rowIndex), e); + log.trace("Rejecting row {} because executor is closed", UnsafeArg.of("row", rows.get(rowIndex)), e); } } diff --git a/atlasdb-cassandra/src/main/java/com/palantir/atlasdb/keyvalue/cassandra/pool/CassandraService.java b/atlasdb-cassandra/src/main/java/com/palantir/atlasdb/keyvalue/cassandra/pool/CassandraService.java index e32aedb00ae..d32896b1de9 100644 --- a/atlasdb-cassandra/src/main/java/com/palantir/atlasdb/keyvalue/cassandra/pool/CassandraService.java +++ b/atlasdb-cassandra/src/main/java/com/palantir/atlasdb/keyvalue/cassandra/pool/CassandraService.java @@ -42,7 +42,10 @@ import com.palantir.common.base.Throwables; import com.palantir.common.streams.KeyedStream; import com.palantir.logsafe.SafeArg; +import com.palantir.logsafe.UnsafeArg; import com.palantir.logsafe.exceptions.SafeIllegalStateException; +import com.palantir.logsafe.logger.SafeLogger; +import com.palantir.logsafe.logger.SafeLoggerFactory; import java.net.InetAddress; import java.net.InetSocketAddress; import java.net.UnknownHostException; @@ -63,12 +66,10 @@ import java.util.stream.Collectors; import org.apache.cassandra.thrift.EndpointDetails; import org.apache.cassandra.thrift.TokenRange; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; public class CassandraService implements AutoCloseable { // TODO(tboam): keep logging on old class? - private static final Logger log = LoggerFactory.getLogger(CassandraService.class); + private static final SafeLogger log = SafeLoggerFactory.get(CassandraService.class); private static final Interner>> tokensInterner = Interners.newWeakInterner(); @@ -341,7 +342,7 @@ public void debugLogStateOfPool() { activeCheckouts > 0 ? Integer.toString(activeCheckouts) : "(unknown)", totalAllowed > 0 ? Integer.toString(totalAllowed) : "(not bounded)")); } - log.debug("Current pool state: {}", currentState.toString()); + log.debug("Current pool state: {}", UnsafeArg.of("currentState", currentState.toString())); } } diff --git a/atlasdb-client/src/main/java/com/palantir/atlasdb/keyvalue/impl/SweepStatsKeyValueService.java b/atlasdb-client/src/main/java/com/palantir/atlasdb/keyvalue/impl/SweepStatsKeyValueService.java index b5e1e310e4a..ef1c05995fd 100644 --- a/atlasdb-client/src/main/java/com/palantir/atlasdb/keyvalue/impl/SweepStatsKeyValueService.java +++ b/atlasdb-client/src/main/java/com/palantir/atlasdb/keyvalue/impl/SweepStatsKeyValueService.java @@ -33,6 +33,7 @@ import com.palantir.atlasdb.keyvalue.api.RangeRequest; import com.palantir.atlasdb.keyvalue.api.TableReference; import com.palantir.atlasdb.keyvalue.api.Value; +import com.palantir.atlasdb.logging.LoggingArgs; import com.palantir.atlasdb.schema.SweepSchema; import com.palantir.atlasdb.schema.generated.SweepPriorityTable; import com.palantir.atlasdb.schema.generated.SweepPriorityTable.SweepPriorityNamedColumn; @@ -43,6 +44,8 @@ import com.palantir.logsafe.Preconditions; import com.palantir.logsafe.SafeArg; import com.palantir.logsafe.UnsafeArg; +import com.palantir.logsafe.logger.SafeLogger; +import com.palantir.logsafe.logger.SafeLoggerFactory; import com.palantir.timestamp.TimestampService; import java.util.Collection; import java.util.Map; @@ -55,8 +58,6 @@ import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; import java.util.function.Supplier; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; /** * This kvs wrapper tracks the approximate number of writes to every table @@ -65,7 +66,7 @@ */ public final class SweepStatsKeyValueService extends ForwardingKeyValueService { - private static final Logger log = LoggerFactory.getLogger(SweepStatsKeyValueService.class); + private static final SafeLogger log = SafeLoggerFactory.get(SweepStatsKeyValueService.class); private static final int CLEAR_WEIGHT = 1 << 14; // 16384 private static final long FLUSH_DELAY_SECONDS = 42; @@ -262,7 +263,7 @@ private void flushTask() { } } catch (Throwable t) { if (!Thread.interrupted()) { - log.warn("Error occurred while flushing sweep stats: {}", t, t); + log.warn("Error occurred while flushing sweep stats: {}", UnsafeArg.of("throwable", t), t); } } } @@ -311,7 +312,11 @@ private void flushWrites(Multiset writes, Set cl .hydrateFromBytes(oldValue.getContents()) .getValue(); long newValue = clears.contains(tableRef) ? writes.count(tableRef) : oldCount + writes.count(tableRef); - log.debug("Sweep priority for {} has {} writes (was {})", tableRef, newValue, oldCount); + log.debug( + "Sweep priority for {} has {} writes (was {})", + LoggingArgs.tableRef(tableRef), + SafeArg.of("newValue", newValue), + SafeArg.of("oldCount", oldCount)); newWriteCounts.put( cell, SweepPriorityTable.WriteCount.of(newValue).persistValue()); } @@ -331,7 +336,11 @@ private void flushWrites(Multiset writes, Set cl // ignore problems when sweep or transaction tables don't exist log.warn("Ignoring failed sweep stats flush due to ", e); } - log.warn("Unable to flush sweep stats for writes {} and clears {}: ", writes, clears, e); + log.warn( + "Unable to flush sweep stats for writes {} and clears {}: ", + UnsafeArg.of("writes", writes), + UnsafeArg.of("clears", clears), + e); throw e; } } diff --git a/atlasdb-client/src/main/java/com/palantir/atlasdb/schema/KvsRangeMigrator.java b/atlasdb-client/src/main/java/com/palantir/atlasdb/schema/KvsRangeMigrator.java index bafdcc55d14..bb00ea735e1 100644 --- a/atlasdb-client/src/main/java/com/palantir/atlasdb/schema/KvsRangeMigrator.java +++ b/atlasdb-client/src/main/java/com/palantir/atlasdb/schema/KvsRangeMigrator.java @@ -28,6 +28,7 @@ import com.palantir.atlasdb.keyvalue.api.RowResult; import com.palantir.atlasdb.keyvalue.api.TableReference; import com.palantir.atlasdb.keyvalue.impl.Cells; +import com.palantir.atlasdb.logging.LoggingArgs; import com.palantir.atlasdb.transaction.api.Transaction; import com.palantir.atlasdb.transaction.api.TransactionManager; import com.palantir.atlasdb.transaction.impl.TransactionConstants; @@ -36,16 +37,18 @@ import com.palantir.common.base.AbortingVisitors; import com.palantir.common.base.BatchingVisitable; import com.palantir.common.collect.Maps2; +import com.palantir.logsafe.SafeArg; +import com.palantir.logsafe.UnsafeArg; +import com.palantir.logsafe.logger.SafeLogger; +import com.palantir.logsafe.logger.SafeLoggerFactory; import com.palantir.util.Mutable; import com.palantir.util.Mutables; import java.util.HashMap; import java.util.Map; import org.apache.commons.lang3.mutable.MutableLong; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; public class KvsRangeMigrator implements RangeMigrator { - private static final Logger log = LoggerFactory.getLogger(KvsRangeMigrator.class); + private static final SafeLogger log = SafeLoggerFactory.get(KvsRangeMigrator.class); private final TableReference srcTable; private final TableReference destTable; @@ -92,15 +95,18 @@ private void logStatus(Transaction tx, int numRangeBoundaries) { if (checkpoint != null) { log.info( "({}/{}) Migration from table {} to table {} will start/resume at {}", - rangeId, - numRangeBoundaries, - srcTable, - destTable, - PtBytes.encodeHexString(checkpoint)); + SafeArg.of("rangeId", rangeId), + SafeArg.of("numRangeBoundaries", numRangeBoundaries), + LoggingArgs.tableRef("srcTable", srcTable), + LoggingArgs.tableRef("destTable", destTable), + UnsafeArg.of("checkpoint", PtBytes.encodeHexString(checkpoint))); return; } } - log.info("Migration from table {} to {} has already been completed", srcTable, destTable); + log.info( + "Migration from table {} to {} has already been completed", + LoggingArgs.tableRef("srcTable", srcTable), + LoggingArgs.tableRef("destTable", destTable)); } @Override @@ -144,10 +150,14 @@ private byte[] copyOneTransactionInternal(RangeRequest range, long rangeId, Tran if (log.isTraceEnabled()) { log.trace( "Copying table {} range {} from {} to {}", - srcTable, - rangeId, - BaseEncoding.base16().lowerCase().encode(rangeToUse.getStartInclusive()), - BaseEncoding.base16().lowerCase().encode(rangeToUse.getEndExclusive())); + LoggingArgs.tableRef("srcTable", srcTable), + SafeArg.of("rangeId", rangeId), + UnsafeArg.of( + "rangeStartInclusive", + BaseEncoding.base16().lowerCase().encode(rangeToUse.getStartInclusive())), + UnsafeArg.of( + "rangeStartExclusive", + BaseEncoding.base16().lowerCase().encode(rangeToUse.getEndExclusive()))); } BatchingVisitable> bv = readT.getRange(srcTable, rangeToUse); @@ -155,7 +165,11 @@ private byte[] copyOneTransactionInternal(RangeRequest range, long rangeId, Tran Map writeMap = new HashMap<>(); byte[] lastRow = internalCopyRange(bv, maxBytes, writeMap); if (log.isTraceEnabled() && (lastRow != null)) { - log.trace("Copying {} bytes for range {} on table {}", lastRow.length, rangeId, srcTable); + log.trace( + "Copying {} bytes for range {} on table {}", + SafeArg.of("lengths", lastRow.length), + SafeArg.of("rangeId", rangeId), + LoggingArgs.tableRef("srcTable", srcTable)); } writeToKvs(writeMap); diff --git a/atlasdb-client/src/main/java/com/palantir/atlasdb/schema/TransactionRangeMigrator.java b/atlasdb-client/src/main/java/com/palantir/atlasdb/schema/TransactionRangeMigrator.java index 957288aecf1..2ba0f93e8f7 100644 --- a/atlasdb-client/src/main/java/com/palantir/atlasdb/schema/TransactionRangeMigrator.java +++ b/atlasdb-client/src/main/java/com/palantir/atlasdb/schema/TransactionRangeMigrator.java @@ -23,6 +23,7 @@ import com.palantir.atlasdb.keyvalue.api.RowResult; import com.palantir.atlasdb.keyvalue.api.TableReference; import com.palantir.atlasdb.keyvalue.impl.Cells; +import com.palantir.atlasdb.logging.LoggingArgs; import com.palantir.atlasdb.transaction.api.Transaction; import com.palantir.atlasdb.transaction.api.TransactionManager; import com.palantir.atlasdb.transaction.impl.TransactionConstants; @@ -30,15 +31,17 @@ import com.palantir.common.base.AbortingVisitor; import com.palantir.common.base.AbortingVisitors; import com.palantir.common.base.BatchingVisitable; +import com.palantir.logsafe.SafeArg; +import com.palantir.logsafe.UnsafeArg; +import com.palantir.logsafe.logger.SafeLogger; +import com.palantir.logsafe.logger.SafeLoggerFactory; import com.palantir.util.Mutable; import com.palantir.util.Mutables; import java.util.Map; import org.apache.commons.lang3.mutable.MutableLong; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; public class TransactionRangeMigrator implements RangeMigrator { - private static final Logger log = LoggerFactory.getLogger(TransactionRangeMigrator.class); + private static final SafeLogger log = SafeLoggerFactory.get(TransactionRangeMigrator.class); private final TableReference srcTable; private final TableReference destTable; @@ -79,15 +82,18 @@ private void logStatus(Transaction tx, int numRangeBoundaries) { if (checkpoint != null) { log.info( "({}/{}) Migration from table {} to table {} will start/resume at {}", - rangeId, - numRangeBoundaries, - srcTable, - destTable, - PtBytes.encodeHexString(checkpoint)); + SafeArg.of("rangeId", rangeId), + SafeArg.of("numRangeBoundaries", numRangeBoundaries), + LoggingArgs.tableRef("srcTable", srcTable), + LoggingArgs.tableRef("destTable", destTable), + UnsafeArg.of("checkpoint", PtBytes.encodeHexString(checkpoint))); return; } } - log.info("Migration from table {} to {} has already been completed", srcTable, destTable); + log.info( + "Migration from table {} to {} has already been completed", + LoggingArgs.tableRef("srcTable", srcTable), + LoggingArgs.tableRef("destTable", destTable)); } @Override diff --git a/atlasdb-client/src/main/java/com/palantir/atlasdb/stream/AbstractGenericStreamStore.java b/atlasdb-client/src/main/java/com/palantir/atlasdb/stream/AbstractGenericStreamStore.java index c8eee8a1fef..1be1b1db8fa 100644 --- a/atlasdb-client/src/main/java/com/palantir/atlasdb/stream/AbstractGenericStreamStore.java +++ b/atlasdb-client/src/main/java/com/palantir/atlasdb/stream/AbstractGenericStreamStore.java @@ -27,7 +27,11 @@ import com.palantir.atlasdb.transaction.api.TransactionManager; import com.palantir.common.base.Throwables; import com.palantir.common.compression.StreamCompression; +import com.palantir.logsafe.SafeArg; +import com.palantir.logsafe.UnsafeArg; import com.palantir.logsafe.exceptions.SafeIllegalArgumentException; +import com.palantir.logsafe.logger.SafeLogger; +import com.palantir.logsafe.logger.SafeLoggerFactory; import com.palantir.util.ByteArrayIOStream; import java.io.ByteArrayInputStream; import java.io.File; @@ -41,11 +45,9 @@ import java.util.Set; import java.util.stream.Collectors; import javax.annotation.CheckForNull; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; public abstract class AbstractGenericStreamStore implements GenericStreamStore { - protected static final Logger log = LoggerFactory.getLogger(AbstractGenericStreamStore.class); + protected static final SafeLogger log = SafeLoggerFactory.get(AbstractGenericStreamStore.class); @CheckForNull protected final TransactionManager txnMgr; @@ -101,7 +103,7 @@ private InputStream getStream(Transaction transaction, T id, StreamMetadata meta try { return compression.decompress(tryGetStream(transaction, id, metadata)); } catch (FileNotFoundException e) { - log.error("Error opening temp file for stream {}", id, e); + log.error("Error opening temp file for stream {}", UnsafeArg.of("stream", id), e); throw Throwables.rewrapAndThrowUncheckedException("Could not open temp file to create stream.", e); } } @@ -169,17 +171,20 @@ private File loadToNewTempFile(Transaction transaction, T id, StreamMetadata met writeStreamToFile(transaction, id, metadata, file); return file; } catch (IOException e) { - log.error("Could not create temp file for stream id {}", id, e); + log.error("Could not create temp file for stream id {}", UnsafeArg.of("stream", id), e); throw Throwables.rewrapAndThrowUncheckedException("Could not create file to create stream.", e); } } private void checkStreamStored(T id, StreamMetadata metadata) { if (metadata == null) { - log.error("Error loading stream {} because it was never stored.", id); + log.error("Error loading stream {} because it was never stored.", UnsafeArg.of("stream", id)); throw new IllegalArgumentException("Unable to load stream " + id + " because it was never stored."); } else if (metadata.getStatus() != Status.STORED) { - log.error("Error loading stream {} because it has status {}", id, metadata.getStatus()); + log.error( + "Error loading stream {} because it has status {}", + UnsafeArg.of("stream", id), + SafeArg.of("status", metadata.getStatus())); throw new SafeIllegalArgumentException("Could not get stream because it was not fully stored."); } } @@ -190,7 +195,7 @@ private void writeStreamToFile(Transaction transaction, T id, StreamMetadata met try { tryWriteStreamToFile(transaction, id, metadata, fos); } catch (IOException e) { - log.error("Could not finish streaming blocks to file for stream {}", id, e); + log.error("Could not finish streaming blocks to file for stream {}", UnsafeArg.of("stream", id), e); throw Throwables.rewrapAndThrowUncheckedException("Error writing blocks while opening a stream.", e); } finally { try { diff --git a/atlasdb-client/src/main/java/com/palantir/atlasdb/stream/AbstractPersistentStreamStore.java b/atlasdb-client/src/main/java/com/palantir/atlasdb/stream/AbstractPersistentStreamStore.java index 2ba284f169e..5baede50bce 100644 --- a/atlasdb-client/src/main/java/com/palantir/atlasdb/stream/AbstractPersistentStreamStore.java +++ b/atlasdb-client/src/main/java/com/palantir/atlasdb/stream/AbstractPersistentStreamStore.java @@ -32,6 +32,7 @@ import com.palantir.common.base.Throwables; import com.palantir.common.compression.StreamCompression; import com.palantir.common.streams.KeyedStream; +import com.palantir.logsafe.SafeArg; import com.palantir.util.Pair; import com.palantir.util.crypto.Sha256Hash; import java.io.IOException; @@ -184,7 +185,11 @@ protected final StreamMetadata storeBlocksAndGetHashlessMetadata( .setHash(ByteString.EMPTY) .build(); storeMetadataAndIndex(id, metadata); - log.error("Could not store stream {}. Failed after {} bytes.", id, length, e); + log.error( + "Could not store stream {}. Failed after {} bytes.", + SafeArg.of("stream", id), + SafeArg.of("length", length), + e); throw Throwables.rewrapAndThrowUncheckedException("Failed to store stream.", e); } diff --git a/atlasdb-client/src/main/java/com/palantir/atlasdb/table/description/ColumnValueDescription.java b/atlasdb-client/src/main/java/com/palantir/atlasdb/table/description/ColumnValueDescription.java index 1f7b87fea74..007c3246d78 100644 --- a/atlasdb-client/src/main/java/com/palantir/atlasdb/table/description/ColumnValueDescription.java +++ b/atlasdb-client/src/main/java/com/palantir/atlasdb/table/description/ColumnValueDescription.java @@ -36,20 +36,21 @@ import com.palantir.common.persist.Persistable; import com.palantir.common.persist.Persistables; import com.palantir.logsafe.Preconditions; +import com.palantir.logsafe.UnsafeArg; import com.palantir.logsafe.exceptions.SafeIllegalArgumentException; +import com.palantir.logsafe.logger.SafeLogger; +import com.palantir.logsafe.logger.SafeLoggerFactory; import java.io.IOException; import java.lang.reflect.InvocationTargetException; import java.lang.reflect.Method; import javax.annotation.Nullable; import javax.annotation.concurrent.Immutable; import org.apache.commons.lang3.Validate; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; @Immutable @SuppressWarnings("checkstyle:all") // too many warnings to fix public final class ColumnValueDescription { - private static final Logger log = LoggerFactory.getLogger(ColumnValueDescription.class); + private static final SafeLogger log = SafeLoggerFactory.get(ColumnValueDescription.class); public enum Format { PROTO, @@ -392,7 +393,9 @@ public TableMetadataPersistence.ColumnValueDescription.Builder persistToProto() builder.setProtoMessageName(protoDescriptor.getName()); builder.setProtoFileDescriptorTree(persistFileDescriptorTree(protoDescriptor.getFile())); if (protoDescriptor.getContainingType() != null) { - log.error("proto descriptors should be top level types: {}", protoDescriptor.getName()); + log.error( + "proto descriptors should be top level types: {}", + UnsafeArg.of("protoDescriptorName", protoDescriptor.getName())); } } return builder; diff --git a/atlasdb-client/src/main/java/com/palantir/atlasdb/table/description/Schema.java b/atlasdb-client/src/main/java/com/palantir/atlasdb/table/description/Schema.java index 37503dbae05..37573ac7b77 100644 --- a/atlasdb-client/src/main/java/com/palantir/atlasdb/table/description/Schema.java +++ b/atlasdb-client/src/main/java/com/palantir/atlasdb/table/description/Schema.java @@ -41,6 +41,9 @@ import com.palantir.atlasdb.transaction.api.ConflictHandler; import com.palantir.lock.watch.LockWatchReferences; import com.palantir.lock.watch.LockWatchReferences.LockWatchReference; +import com.palantir.logsafe.UnsafeArg; +import com.palantir.logsafe.logger.SafeLogger; +import com.palantir.logsafe.logger.SafeLoggerFactory; import java.io.File; import java.io.FileWriter; import java.io.IOException; @@ -55,8 +58,6 @@ import java.util.function.Supplier; import java.util.stream.Collectors; import org.apache.commons.lang3.StringUtils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; /** * Defines a schema. @@ -69,7 +70,7 @@ */ @SuppressWarnings("checkstyle:Indentation") public class Schema { - private static final Logger log = LoggerFactory.getLogger(Schema.class); + private static final SafeLogger log = SafeLoggerFactory.get(Schema.class); private final String name; private final String packageName; @@ -248,7 +249,7 @@ public void validate() { try { entry.getValue().validate(); } catch (Exception e) { - log.error("Failed to validate table {}.", entry.getKey(), e); + log.error("Failed to validate table {}.", UnsafeArg.of("tableName", entry.getKey()), e); throw e; } } @@ -259,7 +260,7 @@ public void validate() { def.toIndexMetadata(indexEntry.getKey()).getTableMetadata(); def.validate(); } catch (Exception e) { - log.error("Failed to validate index {}.", indexEntry.getKey(), e); + log.error("Failed to validate index {}.", UnsafeArg.of("indexName", indexEntry.getKey()), e); throw e; } } diff --git a/atlasdb-client/src/main/java/com/palantir/atlasdb/transaction/impl/CachingTransaction.java b/atlasdb-client/src/main/java/com/palantir/atlasdb/transaction/impl/CachingTransaction.java index 6c408069e3f..840ed66a999 100644 --- a/atlasdb-client/src/main/java/com/palantir/atlasdb/transaction/impl/CachingTransaction.java +++ b/atlasdb-client/src/main/java/com/palantir/atlasdb/transaction/impl/CachingTransaction.java @@ -35,6 +35,9 @@ import com.palantir.atlasdb.transaction.api.TransactionFailedException; import com.palantir.atlasdb.transaction.service.TransactionService; import com.palantir.common.base.Throwables; +import com.palantir.logsafe.SafeArg; +import com.palantir.logsafe.logger.SafeLogger; +import com.palantir.logsafe.logger.SafeLoggerFactory; import com.palantir.util.Pair; import java.util.Collection; import java.util.Collections; @@ -45,12 +48,10 @@ import java.util.Set; import java.util.SortedMap; import java.util.concurrent.ExecutionException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; public class CachingTransaction extends ForwardingTransaction { - private static final Logger log = LoggerFactory.getLogger(CachingTransaction.class); + private static final SafeLogger log = SafeLoggerFactory.get(CachingTransaction.class); private static final long DEFAULT_MAX_CACHED_CELLS = 10_000_000; private final Transaction delegate; @@ -239,7 +240,7 @@ public void commit() throws TransactionFailedException { super.commit(); } finally { if (log.isDebugEnabled()) { - log.debug("CachingTransaction cache stats on commit: {}", cellCache.stats()); + log.debug("CachingTransaction cache stats on commit: {}", SafeArg.of("stats", cellCache.stats())); } } } @@ -250,7 +251,9 @@ public void commit(TransactionService txService) throws TransactionFailedExcepti super.commit(txService); } finally { if (log.isDebugEnabled()) { - log.debug("CachingTransaction cache stats on commit(txService): {}", cellCache.stats()); + log.debug( + "CachingTransaction cache stats on commit(txService): {}", + SafeArg.of("stats", cellCache.stats())); } } } @@ -261,7 +264,7 @@ public void abort() { super.abort(); } finally { if (log.isDebugEnabled()) { - log.debug("CachingTransaction cache stats on abort: {}", cellCache.stats()); + log.debug("CachingTransaction cache stats on abort: {}", SafeArg.of("stats", cellCache.stats())); } } } diff --git a/atlasdb-client/src/main/java/com/palantir/atlasdb/transaction/impl/ConflictDetectionManagers.java b/atlasdb-client/src/main/java/com/palantir/atlasdb/transaction/impl/ConflictDetectionManagers.java index d574689061e..1adafc2732f 100644 --- a/atlasdb-client/src/main/java/com/palantir/atlasdb/transaction/impl/ConflictDetectionManagers.java +++ b/atlasdb-client/src/main/java/com/palantir/atlasdb/transaction/impl/ConflictDetectionManagers.java @@ -19,14 +19,15 @@ import com.google.common.collect.Maps; import com.palantir.atlasdb.keyvalue.api.KeyValueService; import com.palantir.atlasdb.keyvalue.api.TableReference; +import com.palantir.atlasdb.logging.LoggingArgs; import com.palantir.atlasdb.table.description.TableMetadata; import com.palantir.atlasdb.transaction.api.ConflictHandler; +import com.palantir.logsafe.logger.SafeLogger; +import com.palantir.logsafe.logger.SafeLoggerFactory; import java.util.Optional; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; public final class ConflictDetectionManagers { - private static final Logger log = LoggerFactory.getLogger(ConflictDetectionManagers.class); + private static final SafeLogger log = SafeLoggerFactory.get(ConflictDetectionManagers.class); private ConflictDetectionManagers() {} @@ -69,7 +70,9 @@ private static ConflictDetectionManager create(KeyValueService kvs, boolean warm public Optional load(TableReference tableReference) throws Exception { byte[] metadata = kvs.getMetadataForTable(tableReference); if (metadata == null) { - log.error("Tried to make a transaction over a table that has no metadata: {}.", tableReference); + log.error( + "Tried to make a transaction over a table that has no metadata: {}.", + LoggingArgs.tableRef("tableReference", tableReference)); return Optional.empty(); } else { return Optional.of(getConflictHandlerFromMetadata(metadata)); diff --git a/atlasdb-client/src/main/java/com/palantir/atlasdb/transaction/impl/NoDuplicateWritesTransaction.java b/atlasdb-client/src/main/java/com/palantir/atlasdb/transaction/impl/NoDuplicateWritesTransaction.java index 33f932cf5c8..7a580655600 100644 --- a/atlasdb-client/src/main/java/com/palantir/atlasdb/transaction/impl/NoDuplicateWritesTransaction.java +++ b/atlasdb-client/src/main/java/com/palantir/atlasdb/transaction/impl/NoDuplicateWritesTransaction.java @@ -23,6 +23,8 @@ import com.palantir.atlasdb.keyvalue.api.Cell; import com.palantir.atlasdb.keyvalue.api.TableReference; import com.palantir.atlasdb.transaction.api.Transaction; +import com.palantir.logsafe.logger.SafeLogger; +import com.palantir.logsafe.logger.SafeLoggerFactory; import com.palantir.util.AssertUtils; import java.util.Arrays; import java.util.Collections; @@ -30,8 +32,6 @@ import java.util.Map; import java.util.Set; import java.util.concurrent.ExecutionException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; /** * Disallows the same cell from being written twice with different values within @@ -39,7 +39,7 @@ * are allowed. */ public class NoDuplicateWritesTransaction extends ForwardingTransaction { - private static final Logger log = LoggerFactory.getLogger(NoDuplicateWritesTransaction.class); + private static final SafeLogger log = SafeLoggerFactory.get(NoDuplicateWritesTransaction.class); final Transaction delegate; final ImmutableSet noDoubleWritesTables; diff --git a/atlasdb-client/src/main/java/com/palantir/atlasdb/util/AtlasDbMetrics.java b/atlasdb-client/src/main/java/com/palantir/atlasdb/util/AtlasDbMetrics.java index 786097d249a..9a40e71af71 100644 --- a/atlasdb-client/src/main/java/com/palantir/atlasdb/util/AtlasDbMetrics.java +++ b/atlasdb-client/src/main/java/com/palantir/atlasdb/util/AtlasDbMetrics.java @@ -17,6 +17,9 @@ import com.codahale.metrics.MetricRegistry; import com.github.benmanes.caffeine.cache.Cache; +import com.palantir.logsafe.SafeArg; +import com.palantir.logsafe.logger.SafeLogger; +import com.palantir.logsafe.logger.SafeLoggerFactory; import com.palantir.tritium.api.event.InstrumentationFilter; import com.palantir.tritium.event.InstrumentationFilters; import com.palantir.tritium.event.InvocationContext; @@ -29,11 +32,10 @@ import java.util.Set; import java.util.function.Function; import java.util.stream.Collectors; -import org.slf4j.Logger; import org.slf4j.LoggerFactory; public final class AtlasDbMetrics { - private static final Logger log = LoggerFactory.getLogger(AtlasDbMetrics.class); + private static final SafeLogger log = SafeLoggerFactory.get(AtlasDbMetrics.class); private AtlasDbMetrics() {} @@ -98,8 +100,8 @@ public static void registerCache(MetricRegistry metricRegistry, Cache cach } else { log.info( "Not registering cache with prefix '{}' as metric registry already contains metrics: {}", - metricsPrefix, - existingMetrics); + SafeArg.of("metricsPrefix", metricsPrefix), + SafeArg.of("existingMetrics", existingMetrics)); } } diff --git a/atlasdb-commons/src/main/java/com/palantir/common/base/PrefetchingBatchingVisitable.java b/atlasdb-commons/src/main/java/com/palantir/common/base/PrefetchingBatchingVisitable.java index fdf6593d97e..27f047b61ca 100644 --- a/atlasdb-commons/src/main/java/com/palantir/common/base/PrefetchingBatchingVisitable.java +++ b/atlasdb-commons/src/main/java/com/palantir/common/base/PrefetchingBatchingVisitable.java @@ -17,6 +17,10 @@ import com.google.common.base.Stopwatch; import com.google.common.collect.Queues; +import com.palantir.logsafe.SafeArg; +import com.palantir.logsafe.UnsafeArg; +import com.palantir.logsafe.logger.SafeLogger; +import com.palantir.logsafe.logger.SafeLoggerFactory; import java.util.List; import java.util.Queue; import java.util.concurrent.ExecutorService; @@ -26,15 +30,13 @@ import java.util.concurrent.locks.Condition; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; /** * A {@link BatchingVisitable} that will prefetch in a background thread. If an exception happens on * the fetch thread, it will be thrown in batchAccept after all prefetched items have been visited. */ public class PrefetchingBatchingVisitable implements BatchingVisitable { - private static final Logger log = LoggerFactory.getLogger(PrefetchingBatchingVisitable.class); + private static final SafeLogger log = SafeLoggerFactory.get(PrefetchingBatchingVisitable.class); private final BatchingVisitable delegate; private final int capacity; @@ -138,11 +140,11 @@ public boolean batchAccept(final int batchSize, AbortingVi } finally { log.debug( "{} timings: fetch {}, fetchBlocked {}, visit {}, visitBlocked {}", - name, - fetchTime, - fetchBlockedTime, - visitTime, - visitBlockedTime); + UnsafeArg.of("name", name), + SafeArg.of("fetchTime", fetchTime), + SafeArg.of("fetchBlockedTime", fetchBlockedTime), + SafeArg.of("visitTime", visitTime), + SafeArg.of("visitBlockedTime", visitBlockedTime)); future.cancel(true); } } diff --git a/atlasdb-commons/src/main/java/com/palantir/common/base/Throwables.java b/atlasdb-commons/src/main/java/com/palantir/common/base/Throwables.java index 5eb38d1fe4e..a3dbaebf87b 100644 --- a/atlasdb-commons/src/main/java/com/palantir/common/base/Throwables.java +++ b/atlasdb-commons/src/main/java/com/palantir/common/base/Throwables.java @@ -19,8 +19,12 @@ import com.palantir.common.exception.PalantirRuntimeException; import com.palantir.exception.PalantirInterruptedException; import com.palantir.logsafe.Preconditions; +import com.palantir.logsafe.SafeArg; import com.palantir.logsafe.SafeLoggable; +import com.palantir.logsafe.UnsafeArg; import com.palantir.logsafe.exceptions.SafeIllegalStateException; +import com.palantir.logsafe.logger.SafeLogger; +import com.palantir.logsafe.logger.SafeLoggerFactory; import java.io.InterruptedIOException; import java.io.PrintWriter; import java.io.StringWriter; @@ -30,8 +34,6 @@ import java.util.Map; import java.util.concurrent.ExecutionException; import java.util.concurrent.Future; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; /** * Utilities for creating and propagating exceptions. @@ -42,7 +44,7 @@ */ public final class Throwables { - private static Logger log = LoggerFactory.getLogger(Throwables.class); + private static SafeLogger log = SafeLoggerFactory.get(Throwables.class); private Throwables() { /* uninstantiable */ @@ -204,7 +206,10 @@ public static T rewrap(T throwable) { */ public static T rewrap(final String newMessage, final T throwable) { Preconditions.checkNotNull(throwable); - log.info("Rewrapping throwable {} with newMessage {}", throwable, newMessage); + log.info( + "Rewrapping throwable {} with newMessage {}", + UnsafeArg.of("throwable", throwable), + UnsafeArg.of("newMessage", newMessage)); try { Constructor[] constructors = throwable.getClass().getConstructors(); // First see if we can create the exception in a way that lets us preserve the message text @@ -224,7 +229,10 @@ public static T rewrap(final String newMessage, final T th } catch (Exception e) { // If something goes wrong when we try to rewrap the exception, // we should log and throw a runtime exception. - log.error("Unexpected error encountered while rewrapping throwable of class {}", throwable.getClass(), e); + log.error( + "Unexpected error encountered while rewrapping throwable of class {}", + SafeArg.of("throwableClass", throwable.getClass()), + e); throw createPalantirRuntimeException(newMessage, throwable); } } diff --git a/atlasdb-commons/src/main/java/com/palantir/common/pooling/AbstractPoolingContainer.java b/atlasdb-commons/src/main/java/com/palantir/common/pooling/AbstractPoolingContainer.java index fa2bfbe2ebc..1aceea9b37f 100644 --- a/atlasdb-commons/src/main/java/com/palantir/common/pooling/AbstractPoolingContainer.java +++ b/atlasdb-commons/src/main/java/com/palantir/common/pooling/AbstractPoolingContainer.java @@ -19,20 +19,22 @@ import com.palantir.common.base.FunctionCheckedException; import com.palantir.common.collect.EmptyQueue; import com.palantir.logsafe.Preconditions; +import com.palantir.logsafe.SafeArg; +import com.palantir.logsafe.UnsafeArg; +import com.palantir.logsafe.logger.SafeLogger; +import com.palantir.logsafe.logger.SafeLoggerFactory; import java.util.Queue; import java.util.concurrent.ArrayBlockingQueue; import java.util.concurrent.ConcurrentLinkedQueue; import java.util.concurrent.atomic.AtomicLong; import javax.annotation.Nonnull; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; /** * This class will pool resources up to the passedLimit. It will never block and instead will always allocate a new * resource if there are none in the pool. */ public abstract class AbstractPoolingContainer implements PoolingContainer { - private static final Logger log = LoggerFactory.getLogger(AbstractPoolingContainer.class); + private static final SafeLogger log = SafeLoggerFactory.get(AbstractPoolingContainer.class); private volatile Queue pool; protected final AtomicLong allocatedResources = new AtomicLong(); @@ -144,7 +146,7 @@ protected final void returnResource(T resource) { } boolean wasReturned = returnToQueue(resource); if (!wasReturned) { - log.debug("Pool full, releasing resource: {}", resource); + log.debug("Pool full, releasing resource: {}", UnsafeArg.of("resource", resource)); allocatedResources.decrementAndGet(); try { cleanupForDiscard(resource); @@ -159,9 +161,9 @@ private void logPoolStats() { if (log.isDebugEnabled()) { log.debug( "Allocated {} instances, {} remaining in pool, {} max pool size", - getAllocatedResources(), - pool.size(), - getMaxPoolSize()); + SafeArg.of("resourceCount", getAllocatedResources()), + SafeArg.of("poolRemaining", pool.size()), + SafeArg.of("poolMax", getMaxPoolSize())); } } @@ -191,7 +193,7 @@ private void discardFromPool(Queue currentPool) { allocatedResources.decrementAndGet(); try { cleanupForDiscard(item); - log.debug("Discarded: {}", item); + log.debug("Discarded: {}", UnsafeArg.of("item", item)); } catch (RuntimeException e) { log.error("should not throw here", e); } @@ -206,7 +208,7 @@ private boolean returnToQueue(T resource) { discardFromPool(currentPool); } if (ret) { - log.debug("Returned: {}", resource); + log.debug("Returned: {}", UnsafeArg.of("resource", resource)); } return ret; } diff --git a/atlasdb-commons/src/main/java/com/palantir/common/random/SecureRandomPool.java b/atlasdb-commons/src/main/java/com/palantir/common/random/SecureRandomPool.java index 6e11135d8be..c08aa5589a4 100644 --- a/atlasdb-commons/src/main/java/com/palantir/common/random/SecureRandomPool.java +++ b/atlasdb-commons/src/main/java/com/palantir/common/random/SecureRandomPool.java @@ -15,17 +15,18 @@ */ package com.palantir.common.random; +import com.palantir.logsafe.SafeArg; import com.palantir.logsafe.exceptions.SafeIllegalArgumentException; +import com.palantir.logsafe.logger.SafeLogger; +import com.palantir.logsafe.logger.SafeLoggerFactory; import java.security.NoSuchAlgorithmException; import java.security.SecureRandom; import java.util.ArrayList; import java.util.List; import java.util.concurrent.atomic.AtomicLong; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; public class SecureRandomPool { - private static final Logger log = LoggerFactory.getLogger(SecureRandomPool.class); + private static final SafeLogger log = SafeLoggerFactory.get(SecureRandomPool.class); private final List pool; private final SecureRandom seedSource; @@ -62,7 +63,7 @@ public SecureRandomPool(String algorithm, int poolSize, SecureRandom seed) { pool.add(random); } } catch (NoSuchAlgorithmException e) { - log.error("Error getting SecureRandom using {} algorithm.", algorithm, e); + log.error("Error getting SecureRandom using {} algorithm.", SafeArg.of("algorithm", algorithm), e); throw new RuntimeException(String.format("Error getting SecureRandom using %s algorithm.", algorithm), e); } } @@ -92,7 +93,10 @@ private SecureRandom getSeedSource(String algorithm, SecureRandom seed) { seedSource.setSeed(seedBytes); return seedSource; } catch (NoSuchAlgorithmException e) { - log.error("Error getting SecureRandom using {} algorithm for seed source.", algorithm, e); + log.error( + "Error getting SecureRandom using {} algorithm for seed source.", + SafeArg.of("algorithm", algorithm), + e); return seed; } } diff --git a/atlasdb-commons/src/main/java/com/palantir/util/AssertUtils.java b/atlasdb-commons/src/main/java/com/palantir/util/AssertUtils.java index 92f3584baf6..343080a59ff 100644 --- a/atlasdb-commons/src/main/java/com/palantir/util/AssertUtils.java +++ b/atlasdb-commons/src/main/java/com/palantir/util/AssertUtils.java @@ -15,6 +15,7 @@ */ package com.palantir.util; +import com.palantir.logsafe.Arg; import com.palantir.logsafe.SafeArg; import com.palantir.logsafe.exceptions.SafeRuntimeException; import com.palantir.logsafe.logger.SafeLogger; @@ -23,6 +24,8 @@ import java.util.HashSet; import java.util.List; import java.util.Set; +import java.util.stream.Collectors; +import java.util.stream.Stream; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -78,6 +81,12 @@ public static void assertAndLog(SafeLogger log, boolean cheapTest, String msg) { } } + public static void assertAndLog(SafeLogger log, boolean cheapTest, String msg, Arg... args) { + if (!cheapTest) { + assertAndLogWithException(log, false, msg, getDebuggingException(), args); + } + } + /** * @deprecated Use {@link #assertAndLog(Logger, boolean, String)} instead. * This will make sure log events go to your logger instead of a hard-to-filter default. @@ -118,6 +127,19 @@ public static void assertAndLogWithException(Logger log, boolean cheapTest, Stri public static void assertAndLogWithException(SafeLogger log, boolean cheapTest, String msg, Throwable t) { if (!cheapTest) { log.error("An error occurred", SafeArg.of("message", msg), t); + assert false : msg; + } + } + + public static void assertAndLogWithException( + SafeLogger log, boolean cheapTest, String msg, Throwable t, Arg... args) { + if (!cheapTest) { + log.error( + "An error occurred", + Stream.concat(Stream.of(SafeArg.of("message", msg)), Arrays.stream(args)) + .collect(Collectors.toList()), + t); + assert false : msg; } } diff --git a/atlasdb-commons/src/main/java/com/palantir/util/JMXUtils.java b/atlasdb-commons/src/main/java/com/palantir/util/JMXUtils.java index 95c5911e724..103d7ed7dbd 100644 --- a/atlasdb-commons/src/main/java/com/palantir/util/JMXUtils.java +++ b/atlasdb-commons/src/main/java/com/palantir/util/JMXUtils.java @@ -16,7 +16,10 @@ package com.palantir.util; import com.google.common.collect.Collections2; +import com.palantir.logsafe.UnsafeArg; import com.palantir.logsafe.exceptions.SafeIllegalStateException; +import com.palantir.logsafe.logger.SafeLogger; +import com.palantir.logsafe.logger.SafeLoggerFactory; import java.io.IOException; import java.lang.management.ManagementFactory; import java.lang.ref.ReferenceQueue; @@ -48,13 +51,11 @@ import javax.management.remote.JMXConnectorServer; import javax.management.remote.JMXConnectorServerFactory; import javax.management.remote.JMXServiceURL; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; /** */ public final class JMXUtils { - private static final Logger log = LoggerFactory.getLogger(JMXUtils.class); + private static final SafeLogger log = SafeLoggerFactory.get(JMXUtils.class); private JMXUtils() { /* empty */ @@ -98,9 +99,9 @@ public static void registerMBeanCatchAndLogExceptions(final Object mbean, final server.registerMBean(mbean, on); } catch (InstanceAlreadyExistsException e) { // The bean was registered concurrently; log a warning, but don't fail tests - log.warn("Failed to register mbean for name {}", objectName, e); + log.warn("Failed to register mbean for name {}", UnsafeArg.of("name", objectName), e); } catch (Exception e) { - log.warn("Unexpected exception registering mbean for name {}", objectName, e); + log.warn("Unexpected exception registering mbean for name {}", UnsafeArg.of("name", objectName), e); } } @@ -139,7 +140,7 @@ public static DynamicMBean registerMBeanWeakRefPlusCatchAndLogExceptions( server.registerMBean(weakMBean, on); return bean; } catch (final Exception e) { - log.warn("Failed to register mbean for name {}", objectName, e); + log.warn("Failed to register mbean for name {}", UnsafeArg.of("name", objectName), e); return null; } } diff --git a/atlasdb-commons/src/main/java/com/palantir/util/SoftCache.java b/atlasdb-commons/src/main/java/com/palantir/util/SoftCache.java index 84bcd53948a..6edb9388bba 100644 --- a/atlasdb-commons/src/main/java/com/palantir/util/SoftCache.java +++ b/atlasdb-commons/src/main/java/com/palantir/util/SoftCache.java @@ -19,7 +19,10 @@ import com.google.common.collect.ImmutableSet; import com.google.common.collect.Maps; import com.palantir.logsafe.Preconditions; +import com.palantir.logsafe.SafeArg; import com.palantir.logsafe.UnsafeArg; +import com.palantir.logsafe.logger.SafeLogger; +import com.palantir.logsafe.logger.SafeLoggerFactory; import java.lang.ref.Reference; import java.lang.ref.ReferenceQueue; import java.lang.ref.WeakReference; @@ -33,8 +36,6 @@ import java.util.Timer; import java.util.TimerTask; import java.util.concurrent.ConcurrentLinkedQueue; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; /** * Thread Safe @@ -42,7 +43,7 @@ public class SoftCache extends MBeanCache { private static final int INITIAL_SIZE = 1000; - private static final Logger log = LoggerFactory.getLogger(SoftCache.class); + private static final SafeLogger log = SafeLoggerFactory.get(SoftCache.class); protected final Map> cacheEntries; @@ -96,7 +97,7 @@ private synchronized void removeReference(Reference ref) { // (it could have already been replaced by a new entry) if (entry != null && entry.valueRef == ref) { if (log.isDebugEnabled()) { - log.debug("Removing from cache reference with key: {}", key); + log.debug("Removing from cache reference with key: {}", UnsafeArg.of("key", key)); } cacheEntries.remove(key); @@ -185,7 +186,7 @@ public synchronized V get(K key) { if (entry == null) { mbean.misses.incrementAndGet(); if (log.isTraceEnabled()) { - log.trace("Cache miss (not cached) on {}", key); + log.trace("Cache miss (not cached) on {}", UnsafeArg.of("key", key)); } return null; } @@ -197,7 +198,7 @@ public synchronized V get(K key) { if (!entry.isValid()) { mbean.misses.incrementAndGet(); if (log.isTraceEnabled()) { - log.trace("Cache miss (stale entry) on {}", key); + log.trace("Cache miss (stale entry) on {}", UnsafeArg.of("key", key)); } cacheEntries.remove(key); return null; @@ -205,7 +206,7 @@ public synchronized V get(K key) { // c) fresh and valid, return value if (log.isTraceEnabled()) { - log.trace("Cache hit on {}", key); + log.trace("Cache hit on {}", UnsafeArg.of("key", key)); } mbean.hits.incrementAndGet(); return ret; @@ -270,7 +271,10 @@ public synchronized Set removeMatchingKeys(Predicate predicate) { public final void cleanup() { mbean.cleanups.incrementAndGet(); if (log.isTraceEnabled()) { - log.trace("cleanup() called on {} of size: {}", getName(), cacheEntries.size()); + log.trace( + "cleanup() called on {} of size: {}", + UnsafeArg.of("name", getName()), + SafeArg.of("size", cacheEntries.size())); } int i = 0; @@ -283,7 +287,10 @@ public final void cleanup() { } if (log.isTraceEnabled()) { - log.trace("cleanup() finished on {}. {} keys were cleaned up. ", getName(), i); + log.trace( + "cleanup() finished on {}. {} keys were cleaned up. ", + UnsafeArg.of("name", getName()), + SafeArg.of("cleanedUpCount", i)); } } diff --git a/atlasdb-commons/src/main/java/com/palantir/util/file/DeleteOnCloseFileInputStream.java b/atlasdb-commons/src/main/java/com/palantir/util/file/DeleteOnCloseFileInputStream.java index 471d2b58a24..e303f14b384 100644 --- a/atlasdb-commons/src/main/java/com/palantir/util/file/DeleteOnCloseFileInputStream.java +++ b/atlasdb-commons/src/main/java/com/palantir/util/file/DeleteOnCloseFileInputStream.java @@ -15,15 +15,16 @@ */ package com.palantir.util.file; +import com.palantir.logsafe.UnsafeArg; +import com.palantir.logsafe.logger.SafeLogger; +import com.palantir.logsafe.logger.SafeLoggerFactory; import java.io.File; import java.io.FileInputStream; import java.io.FileNotFoundException; import java.io.IOException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; public class DeleteOnCloseFileInputStream extends FileInputStream { - private static final Logger log = LoggerFactory.getLogger(DeleteOnCloseFileInputStream.class); + private static final SafeLogger log = SafeLoggerFactory.get(DeleteOnCloseFileInputStream.class); private File file; private boolean closed = false; @@ -44,9 +45,9 @@ public void close() throws IOException { } finally { if (!closed) { if (!file.delete()) { - log.warn("Failed to delete file {}", file.getAbsolutePath()); + log.warn("Failed to delete file {}", UnsafeArg.of("file", file.getAbsolutePath())); } else if (log.isDebugEnabled()) { - log.debug("Successfully deleted file {}", file.getAbsolutePath()); + log.debug("Successfully deleted file {}", UnsafeArg.of("file", file.getAbsolutePath())); } closed = true; } diff --git a/atlasdb-config/src/main/java/com/palantir/atlasdb/config/DiscoverableSubtypeResolver.java b/atlasdb-config/src/main/java/com/palantir/atlasdb/config/DiscoverableSubtypeResolver.java index c036135d3f6..ddc1e574527 100644 --- a/atlasdb-config/src/main/java/com/palantir/atlasdb/config/DiscoverableSubtypeResolver.java +++ b/atlasdb-config/src/main/java/com/palantir/atlasdb/config/DiscoverableSubtypeResolver.java @@ -33,6 +33,10 @@ import com.fasterxml.jackson.databind.jsontype.impl.StdSubtypeResolver; import com.google.common.collect.ImmutableList; +import com.palantir.logsafe.SafeArg; +import com.palantir.logsafe.UnsafeArg; +import com.palantir.logsafe.logger.SafeLogger; +import com.palantir.logsafe.logger.SafeLoggerFactory; import java.io.BufferedReader; import java.io.IOException; import java.io.InputStream; @@ -42,8 +46,6 @@ import java.util.ArrayList; import java.util.Enumeration; import java.util.List; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; /** * A Jackson subtype resolver which discovers subtypes via the META-INF/services directory @@ -51,7 +53,7 @@ */ class DiscoverableSubtypeResolver extends StdSubtypeResolver { private static final long serialVersionUID = 1L; - private static final Logger log = LoggerFactory.getLogger(DiscoverableSubtypeResolver.class); + private static final SafeLogger log = SafeLoggerFactory.get(DiscoverableSubtypeResolver.class); private final ImmutableList> discoveredSubtypes; @@ -91,13 +93,13 @@ private List> discoverServices(String className) { try { serviceClasses.add(getClassLoader().loadClass(line.trim())); } catch (ClassNotFoundException e) { - log.info("Unable to load {}", line, e); + log.info("Unable to load {}", UnsafeArg.of("line", line), e); } } } } } catch (IOException e) { - log.warn("Unable to load META-INF/services/{}", className, e); + log.warn("Unable to load META-INF/services/{}", SafeArg.of("className", className), e); } return serviceClasses; } diff --git a/atlasdb-config/src/main/java/com/palantir/atlasdb/factory/ServiceDiscoveringAtlasSupplier.java b/atlasdb-config/src/main/java/com/palantir/atlasdb/factory/ServiceDiscoveringAtlasSupplier.java index 604356a4a8a..bdb665b74c7 100644 --- a/atlasdb-config/src/main/java/com/palantir/atlasdb/factory/ServiceDiscoveringAtlasSupplier.java +++ b/atlasdb-config/src/main/java/com/palantir/atlasdb/factory/ServiceDiscoveringAtlasSupplier.java @@ -24,6 +24,10 @@ import com.palantir.atlasdb.spi.KeyValueServiceConfig; import com.palantir.atlasdb.spi.KeyValueServiceRuntimeConfig; import com.palantir.atlasdb.util.MetricsManager; +import com.palantir.logsafe.SafeArg; +import com.palantir.logsafe.UnsafeArg; +import com.palantir.logsafe.logger.SafeLogger; +import com.palantir.logsafe.logger.SafeLoggerFactory; import com.palantir.refreshable.Refreshable; import com.palantir.timestamp.ManagedTimestampService; import com.palantir.timestamp.TimestampStoreInvalidator; @@ -38,11 +42,9 @@ import java.util.Optional; import java.util.function.LongSupplier; import java.util.function.Supplier; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; public class ServiceDiscoveringAtlasSupplier { - private static final Logger log = LoggerFactory.getLogger(ServiceDiscoveringAtlasSupplier.class); + private static final SafeLogger log = SafeLoggerFactory.get(ServiceDiscoveringAtlasSupplier.class); private static String timestampServiceCreationInfo = null; @@ -91,7 +93,7 @@ public synchronized ManagedTimestampService getManagedTimestampService() { log.info( "[timestamp-service-creation] Fetching timestamp service from " + "thread {}. This should only happen once.", - Thread.currentThread().getName()); + SafeArg.of("threadName", Thread.currentThread().getName())); if (timestampServiceCreationInfo == null) { timestampServiceCreationInfo = ThreadDumps.programmaticThreadDump(); @@ -152,7 +154,7 @@ private void reportMultipleTimestampFetch(String path) { + " config. This means that you may soon encounter the MultipleRunningTimestampServices error." + " Thread dumps from both fetches of the timestamp service have been outputted to {}. If you" + " encounter a MultipleRunningTimestampServices error, please send this file to support.", - path); + UnsafeArg.of("path", path)); } else { log.warn( "[timestamp-service-creation] Timestamp service fetched for a second time. This is only OK if " @@ -162,7 +164,7 @@ private void reportMultipleTimestampFetch(String path) { + "Thread dumps from both fetches of the timestamp service have been outputted to {}. " + "If you encounter a MultipleRunningTimestampServices error, please send this file to " + "support.", - path); + UnsafeArg.of("path", path)); } } diff --git a/atlasdb-container-test-utils/src/main/java/com/palantir/atlasdb/containers/InterruptibleFileLogCollector.java b/atlasdb-container-test-utils/src/main/java/com/palantir/atlasdb/containers/InterruptibleFileLogCollector.java index 781ebcd44c3..1a08b0494d2 100644 --- a/atlasdb-container-test-utils/src/main/java/com/palantir/atlasdb/containers/InterruptibleFileLogCollector.java +++ b/atlasdb-container-test-utils/src/main/java/com/palantir/atlasdb/containers/InterruptibleFileLogCollector.java @@ -19,7 +19,11 @@ import com.palantir.docker.compose.execution.DockerCompose; import com.palantir.docker.compose.logging.LogCollector; import com.palantir.logsafe.Preconditions; +import com.palantir.logsafe.SafeArg; +import com.palantir.logsafe.UnsafeArg; import com.palantir.logsafe.exceptions.SafeRuntimeException; +import com.palantir.logsafe.logger.SafeLogger; +import com.palantir.logsafe.logger.SafeLoggerFactory; import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; import java.io.File; import java.io.FileOutputStream; @@ -28,12 +32,10 @@ import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; import org.apache.commons.lang3.Validate; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; @SuppressFBWarnings("SLF4J_ILLEGAL_PASSED_CLASS") public class InterruptibleFileLogCollector implements LogCollector { - private static final Logger log = LoggerFactory.getLogger(InterruptibleFileLogCollector.class); + private static final SafeLogger log = SafeLoggerFactory.get(InterruptibleFileLogCollector.class); private static final long STOP_TIMEOUT_IN_MILLIS = 50; @@ -66,7 +68,10 @@ public void collectLogs(DockerCompose dockerCompose) throws IOException, Interru .map(ContainerName::semanticName) .forEachOrdered(container -> executor.execute(() -> { File outputFile = new File(logDirectory, container + ".log"); - log.info("Writing logs for container '{}' to '{}'", container, outputFile.getAbsolutePath()); + log.info( + "Writing logs for container '{}' to '{}'", + SafeArg.of("container", container), + UnsafeArg.of("outputPath", outputFile.getAbsolutePath())); try (FileOutputStream outputStream = new FileOutputStream(outputFile)) { dockerCompose.writeLogs(container, outputStream); } catch (IOException e) { diff --git a/atlasdb-dbkvs-hikari/src/main/java/com/palantir/nexus/db/pool/HikariCPConnectionManager.java b/atlasdb-dbkvs-hikari/src/main/java/com/palantir/nexus/db/pool/HikariCPConnectionManager.java index 4196a4c8fff..f7cac8c7b33 100644 --- a/atlasdb-dbkvs-hikari/src/main/java/com/palantir/nexus/db/pool/HikariCPConnectionManager.java +++ b/atlasdb-dbkvs-hikari/src/main/java/com/palantir/nexus/db/pool/HikariCPConnectionManager.java @@ -20,6 +20,8 @@ import com.palantir.logsafe.SafeArg; import com.palantir.logsafe.UnsafeArg; import com.palantir.logsafe.exceptions.SafeRuntimeException; +import com.palantir.logsafe.logger.SafeLogger; +import com.palantir.logsafe.logger.SafeLoggerFactory; import com.palantir.nexus.db.DBType; import com.palantir.nexus.db.pool.config.ConnectionConfig; import com.palantir.nexus.db.sql.ExceptionCheck; @@ -41,8 +43,6 @@ import javax.management.ObjectName; import org.apache.commons.io.IOUtils; import org.apache.commons.lang3.exception.ExceptionUtils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; /** * HikariCP Connection Manager. @@ -51,7 +51,7 @@ */ @SuppressWarnings("checkstyle:AbbreviationAsWordInName") public class HikariCPConnectionManager extends BaseConnectionManager { - private static final Logger log = LoggerFactory.getLogger(HikariCPConnectionManager.class); + private static final SafeLogger log = SafeLoggerFactory.get(HikariCPConnectionManager.class); private final ConnectionConfig connConfig; private final HikariConfig hikariConfig; @@ -330,12 +330,15 @@ private HikariDataSource getDataSourcePool() { if (tzname.equals("Etc/Universal")) { // Really common failure case. UTC is both a name AND an abbreviation. - log.error("{} The timezone *name* should be UTC. {}", errorPreamble, errorAfterward); + log.error( + "{} The timezone *name* should be UTC. {}", + SafeArg.of("errorPreamble", errorPreamble), + SafeArg.of("errorAfterward", errorAfterward)); } else { log.error( "{} This is caused by using non-standard or unsupported timezone names. {}", - errorPreamble, - errorAfterward); + SafeArg.of("errorPreamble", errorPreamble), + SafeArg.of("errorAfterward", errorAfterward)); } } diff --git a/atlasdb-dbkvs/src/main/java/com/palantir/atlasdb/keyvalue/dbkvs/impl/DbKvs.java b/atlasdb-dbkvs/src/main/java/com/palantir/atlasdb/keyvalue/dbkvs/impl/DbKvs.java index c6c4ace0566..8ca93370508 100644 --- a/atlasdb-dbkvs/src/main/java/com/palantir/atlasdb/keyvalue/dbkvs/impl/DbKvs.java +++ b/atlasdb-dbkvs/src/main/java/com/palantir/atlasdb/keyvalue/dbkvs/impl/DbKvs.java @@ -85,6 +85,7 @@ import com.palantir.atlasdb.keyvalue.impl.Cells; import com.palantir.atlasdb.keyvalue.impl.IterablePartitioner; import com.palantir.atlasdb.keyvalue.impl.LocalRowColumnRangeIterator; +import com.palantir.atlasdb.logging.LoggingArgs; import com.palantir.common.annotation.Output; import com.palantir.common.base.ClosableIterator; import com.palantir.common.base.ClosableIterators; @@ -93,6 +94,9 @@ import com.palantir.common.concurrent.PTExecutors; import com.palantir.exception.PalantirSqlException; import com.palantir.logsafe.Preconditions; +import com.palantir.logsafe.SafeArg; +import com.palantir.logsafe.logger.SafeLogger; +import com.palantir.logsafe.logger.SafeLoggerFactory; import com.palantir.nexus.db.sql.AgnosticLightResultRow; import com.palantir.nexus.db.sql.AgnosticResultRow; import com.palantir.nexus.db.sql.AgnosticResultSet; @@ -128,11 +132,9 @@ import java.util.stream.Collectors; import javax.annotation.Nonnull; import javax.annotation.Nullable; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; public final class DbKvs extends AbstractKeyValueService implements DbKeyValueService { - private static final Logger log = LoggerFactory.getLogger(DbKvs.class); + private static final SafeLogger log = SafeLoggerFactory.get(DbKvs.class); public static final String ROW = "row_name"; public static final String COL = "col_name"; @@ -716,8 +718,8 @@ private TokenBackedBasicResultsPage>, Token> getTimestampsPa } finally { log.debug( "Call to KVS.getTimestampsPage on table {} took {} ms.", - tableRef, - watch.elapsed(TimeUnit.MILLISECONDS)); + LoggingArgs.tableRef(tableRef), + SafeArg.of("elapsed", watch.elapsed(TimeUnit.MILLISECONDS))); } } @@ -956,8 +958,8 @@ private Map>> getFirstRowsColumnRangePage( } finally { log.debug( "Call to KVS.getFirstRowColumnRangePage on table {} took {} ms.", - tableRef, - watch.elapsed(TimeUnit.MILLISECONDS)); + LoggingArgs.tableRef(tableRef), + SafeArg.of("elapsed", watch.elapsed(TimeUnit.MILLISECONDS))); } } diff --git a/atlasdb-dbkvs/src/main/java/com/palantir/atlasdb/keyvalue/dbkvs/impl/TableValueStyleCache.java b/atlasdb-dbkvs/src/main/java/com/palantir/atlasdb/keyvalue/dbkvs/impl/TableValueStyleCache.java index c2a350b142c..1a55bf4ca36 100644 --- a/atlasdb-dbkvs/src/main/java/com/palantir/atlasdb/keyvalue/dbkvs/impl/TableValueStyleCache.java +++ b/atlasdb-dbkvs/src/main/java/com/palantir/atlasdb/keyvalue/dbkvs/impl/TableValueStyleCache.java @@ -21,14 +21,15 @@ import com.google.common.cache.CacheBuilder; import com.google.common.collect.Iterables; import com.palantir.atlasdb.keyvalue.api.TableReference; +import com.palantir.atlasdb.logging.LoggingArgs; +import com.palantir.logsafe.logger.SafeLogger; +import com.palantir.logsafe.logger.SafeLoggerFactory; import com.palantir.nexus.db.sql.AgnosticResultSet; import com.palantir.nexus.db.sql.SqlConnection; import java.util.concurrent.ExecutionException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; public final class TableValueStyleCache { - private static final Logger log = LoggerFactory.getLogger(TableValueStyleCache.class); + private static final SafeLogger log = SafeLoggerFactory.get(TableValueStyleCache.class); private final Cache valueStyleByTableRef = CacheBuilder.newBuilder().build(); @@ -49,7 +50,10 @@ public TableValueStyle getTableType( Iterables.getOnlyElement(results.rows()).getInteger("table_size")); }); } catch (ExecutionException e) { - log.error("TableValueStyle for the table {} could not be retrieved.", tableRef.getQualifiedName(), e); + log.error( + "TableValueStyle for the table {} could not be retrieved.", + LoggingArgs.safeInternalTableName(tableRef.getQualifiedName()), + e); throw Throwables.propagate(e); } } diff --git a/atlasdb-dbkvs/src/main/java/com/palantir/atlasdb/keyvalue/dbkvs/impl/oracle/OracleDdlTable.java b/atlasdb-dbkvs/src/main/java/com/palantir/atlasdb/keyvalue/dbkvs/impl/oracle/OracleDdlTable.java index 8bfa41ce9d8..b26b1454163 100644 --- a/atlasdb-dbkvs/src/main/java/com/palantir/atlasdb/keyvalue/dbkvs/impl/oracle/OracleDdlTable.java +++ b/atlasdb-dbkvs/src/main/java/com/palantir/atlasdb/keyvalue/dbkvs/impl/oracle/OracleDdlTable.java @@ -35,6 +35,8 @@ import com.palantir.logsafe.SafeArg; import com.palantir.logsafe.UnsafeArg; import com.palantir.logsafe.exceptions.SafeIllegalArgumentException; +import com.palantir.logsafe.logger.SafeLogger; +import com.palantir.logsafe.logger.SafeLoggerFactory; import com.palantir.nexus.db.sql.AgnosticResultSet; import com.palantir.nexus.db.sql.SqlConnection; import com.palantir.util.VersionStrings; @@ -42,11 +44,9 @@ import java.util.concurrent.ExecutorService; import java.util.concurrent.TimeUnit; import org.apache.commons.lang3.StringUtils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; public final class OracleDdlTable implements DbDdlTable { - private static final Logger log = LoggerFactory.getLogger(OracleDdlTable.class); + private static final SafeLogger log = SafeLoggerFactory.get(OracleDdlTable.class); private static final String MIN_ORACLE_VERSION = "11.2.0.2.3"; private final OracleDdlConfig config; @@ -181,7 +181,11 @@ private void insertTableMappingIgnoringPrimaryKeyViolation(String fullTableName, shortTableName); } catch (PalantirSqlException ex) { if (!isPrimaryKeyViolation(ex)) { - log.error("Error occurred trying to create table mapping {} -> {}", fullTableName, shortTableName, ex); + log.error( + "Error occurred trying to create table mapping {} -> {}", + UnsafeArg.of("fullTableName", fullTableName), + UnsafeArg.of("shortTableName", shortTableName), + ex); dropTableInternal(fullTableName, shortTableName); throw ex; } @@ -275,8 +279,8 @@ public void checkDatabaseVersion() { + " of oracle. The minimum supported version is {}" + ". If you absolutely need to use an older version of oracle," + " please contact Palantir support for assistance.", - version, - MIN_ORACLE_VERSION); + SafeArg.of("version", version), + SafeArg.of("minVersion", MIN_ORACLE_VERSION)); } } @@ -289,7 +293,7 @@ private void insertIgnoringConstraintViolation(boolean needsOverflow, String sql needsOverflow ? TableValueStyle.OVERFLOW.getId() : TableValueStyle.RAW.getId()); } catch (PalantirSqlException e) { if (!e.getMessage().contains(OracleErrorConstants.ORACLE_CONSTRAINT_VIOLATION_ERROR)) { - log.error("Error occurred trying to execute the Oracle query {}.", sql, e); + log.error("Error occurred trying to execute the Oracle query {}.", UnsafeArg.of("sql", sql), e); throw e; } } @@ -300,7 +304,7 @@ private void executeIgnoringError(String sql, String errorToIgnore) { conns.get().executeUnregisteredQuery(sql); } catch (PalantirSqlException e) { if (!e.getMessage().contains(errorToIgnore)) { - log.error("Error occurred trying to execute the Oracle query {}.", sql, e); + log.error("Error occurred trying to execute the Oracle query {}.", UnsafeArg.of("sql", sql), e); throw e; } } diff --git a/atlasdb-dbkvs/src/main/java/com/palantir/atlasdb/keyvalue/dbkvs/impl/oracle/OracleOverflowWriteTable.java b/atlasdb-dbkvs/src/main/java/com/palantir/atlasdb/keyvalue/dbkvs/impl/oracle/OracleOverflowWriteTable.java index db8f594efb6..3b79a980c4f 100644 --- a/atlasdb-dbkvs/src/main/java/com/palantir/atlasdb/keyvalue/dbkvs/impl/oracle/OracleOverflowWriteTable.java +++ b/atlasdb-dbkvs/src/main/java/com/palantir/atlasdb/keyvalue/dbkvs/impl/oracle/OracleOverflowWriteTable.java @@ -36,6 +36,10 @@ import com.palantir.atlasdb.keyvalue.dbkvs.impl.WhereClauses; import com.palantir.common.exception.TableMappingNotFoundException; import com.palantir.exception.PalantirSqlException; +import com.palantir.logsafe.SafeArg; +import com.palantir.logsafe.UnsafeArg; +import com.palantir.logsafe.logger.SafeLogger; +import com.palantir.logsafe.logger.SafeLoggerFactory; import com.palantir.nexus.db.sql.ExceptionCheck; import com.palantir.nexus.db.sql.SqlConnection; import java.sql.SQLException; @@ -43,11 +47,9 @@ import java.util.Collection; import java.util.List; import java.util.Map; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; public final class OracleOverflowWriteTable implements DbWriteTable { - private static final Logger log = LoggerFactory.getLogger(OracleOverflowWriteTable.class); + private static final SafeLogger log = SafeLoggerFactory.get(OracleOverflowWriteTable.class); private final OracleDdlConfig config; private final ConnectionSupplier conns; @@ -240,9 +242,9 @@ public void delete(List> entries) { try { log.debug( "Got connection for delete on table {}: {}, autocommit={}", - shortTableName, - conn.getUnderlyingConnection(), - conn.getUnderlyingConnection().getAutoCommit()); + UnsafeArg.of("shortTableName", shortTableName), + UnsafeArg.of("connection", conn.getUnderlyingConnection()), + SafeArg.of("autocommit", conn.getUnderlyingConnection().getAutoCommit())); } catch (PalantirSqlException | SQLException e) { // } @@ -328,9 +330,9 @@ public void deleteAllTimestamps(Map deletes) { try { log.debug( "Got connection for deleteAllTimestamps on table {}: {}, autocommit={}", - shortTableName, - conn.getUnderlyingConnection(), - conn.getUnderlyingConnection().getAutoCommit()); + UnsafeArg.of("shortTableName", shortTableName), + UnsafeArg.of("connection", conn.getUnderlyingConnection()), + SafeArg.of("autocommit", conn.getUnderlyingConnection().getAutoCommit())); } catch (PalantirSqlException | SQLException e) { // } diff --git a/atlasdb-dbkvs/src/main/java/com/palantir/atlasdb/keyvalue/dbkvs/impl/oracle/OracleTableInitializer.java b/atlasdb-dbkvs/src/main/java/com/palantir/atlasdb/keyvalue/dbkvs/impl/oracle/OracleTableInitializer.java index d436cbaee6f..3301867d361 100644 --- a/atlasdb-dbkvs/src/main/java/com/palantir/atlasdb/keyvalue/dbkvs/impl/oracle/OracleTableInitializer.java +++ b/atlasdb-dbkvs/src/main/java/com/palantir/atlasdb/keyvalue/dbkvs/impl/oracle/OracleTableInitializer.java @@ -21,11 +21,12 @@ import com.palantir.atlasdb.keyvalue.dbkvs.impl.ConnectionSupplier; import com.palantir.atlasdb.keyvalue.dbkvs.impl.DbTableInitializer; import com.palantir.exception.PalantirSqlException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; +import com.palantir.logsafe.UnsafeArg; +import com.palantir.logsafe.logger.SafeLogger; +import com.palantir.logsafe.logger.SafeLoggerFactory; public class OracleTableInitializer implements DbTableInitializer { - private static final Logger log = LoggerFactory.getLogger(OracleTableInitializer.class); + private static final SafeLogger log = SafeLoggerFactory.get(OracleTableInitializer.class); private final ConnectionSupplier connectionSupplier; private final OracleDdlConfig config; @@ -94,7 +95,7 @@ private void executeIgnoringError(String sql, String errorToIgnore) { connectionSupplier.get().executeUnregisteredQuery(sql); } catch (PalantirSqlException e) { if (!e.getMessage().contains(errorToIgnore)) { - log.error("Error occurred trying to execute the query {}", sql, e); + log.error("Error occurred trying to execute the query {}", UnsafeArg.of("sql", sql), e); throw e; } } diff --git a/atlasdb-dbkvs/src/main/java/com/palantir/atlasdb/keyvalue/dbkvs/impl/postgres/PostgresDdlTable.java b/atlasdb-dbkvs/src/main/java/com/palantir/atlasdb/keyvalue/dbkvs/impl/postgres/PostgresDdlTable.java index 88244aa4e32..6e6682ac6ab 100644 --- a/atlasdb-dbkvs/src/main/java/com/palantir/atlasdb/keyvalue/dbkvs/impl/postgres/PostgresDdlTable.java +++ b/atlasdb-dbkvs/src/main/java/com/palantir/atlasdb/keyvalue/dbkvs/impl/postgres/PostgresDdlTable.java @@ -26,6 +26,10 @@ import com.palantir.atlasdb.keyvalue.dbkvs.impl.TableValueStyle; import com.palantir.atlasdb.keyvalue.dbkvs.impl.oracle.PrimaryKeyConstraintNames; import com.palantir.exception.PalantirSqlException; +import com.palantir.logsafe.SafeArg; +import com.palantir.logsafe.UnsafeArg; +import com.palantir.logsafe.logger.SafeLogger; +import com.palantir.logsafe.logger.SafeLoggerFactory; import com.palantir.nexus.db.sql.AgnosticResultRow; import com.palantir.nexus.db.sql.AgnosticResultSet; import com.palantir.nexus.db.sql.ExceptionCheck; @@ -36,7 +40,10 @@ import org.slf4j.helpers.MessageFormatter; public class PostgresDdlTable implements DbDdlTable { - private static final Logger log = LoggerFactory.getLogger(PostgresDdlTable.class); + @SuppressWarnings("ConsistentLoggerName") + private static final Logger oldLog = LoggerFactory.getLogger(PostgresDdlTable.class); + + private static final SafeLogger log = SafeLoggerFactory.get(PostgresDdlTable.class); private static final int POSTGRES_NAME_LENGTH_LIMIT = 63; public static final int ATLASDB_POSTGRES_TABLE_NAME_LIMIT = POSTGRES_NAME_LENGTH_LIMIT - AtlasDbConstants.PRIMARY_KEY_CONSTRAINT_PREFIX.length(); @@ -85,9 +92,9 @@ public void create(byte[] tableMetadata) { } else if (prefixedTableName.length() > ATLASDB_POSTGRES_TABLE_NAME_LIMIT) { log.error( FAILED_TO_CREATE_TABLE_MESSAGE, - prefixedTableName, - ATLASDB_POSTGRES_TABLE_NAME_LIMIT, - ATLASDB_POSTGRES_TABLE_NAME_LIMIT, + UnsafeArg.of("prefixedTableName", prefixedTableName), + SafeArg.of("limit", ATLASDB_POSTGRES_TABLE_NAME_LIMIT), + SafeArg.of("limit2", ATLASDB_POSTGRES_TABLE_NAME_LIMIT), e); String exceptionMsg = MessageFormatter.arrayFormat(FAILED_TO_CREATE_TABLE_MESSAGE, new Object[] { prefixedTableName, ATLASDB_POSTGRES_TABLE_NAME_LIMIT, ATLASDB_POSTGRES_TABLE_NAME_LIMIT @@ -128,7 +135,7 @@ public void truncate() { public void checkDatabaseVersion() { AgnosticResultSet result = conns.get().selectResultSetUnregisteredQuery("SHOW server_version"); String version = result.get(0).getString("server_version"); - PostgresVersionCheck.checkDatabaseVersion(version, log); + PostgresVersionCheck.checkDatabaseVersion(version, oldLog); } @Override diff --git a/atlasdb-dbkvs/src/main/java/com/palantir/atlasdb/keyvalue/dbkvs/impl/postgres/PostgresTableInitializer.java b/atlasdb-dbkvs/src/main/java/com/palantir/atlasdb/keyvalue/dbkvs/impl/postgres/PostgresTableInitializer.java index b944f6ee1e8..afc72b4624b 100644 --- a/atlasdb-dbkvs/src/main/java/com/palantir/atlasdb/keyvalue/dbkvs/impl/postgres/PostgresTableInitializer.java +++ b/atlasdb-dbkvs/src/main/java/com/palantir/atlasdb/keyvalue/dbkvs/impl/postgres/PostgresTableInitializer.java @@ -19,11 +19,12 @@ import com.palantir.atlasdb.keyvalue.dbkvs.impl.DbTableInitializer; import com.palantir.atlasdb.keyvalue.dbkvs.impl.oracle.PrimaryKeyConstraintNames; import com.palantir.exception.PalantirSqlException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; +import com.palantir.logsafe.UnsafeArg; +import com.palantir.logsafe.logger.SafeLogger; +import com.palantir.logsafe.logger.SafeLoggerFactory; public class PostgresTableInitializer implements DbTableInitializer { - private static final Logger log = LoggerFactory.getLogger(PostgresTableInitializer.class); + private static final SafeLogger log = SafeLoggerFactory.get(PostgresTableInitializer.class); private final ConnectionSupplier connectionSupplier; @@ -69,7 +70,7 @@ private void executeIgnoringError(String sql, String errorToIgnore) { connectionSupplier.get().executeUnregisteredQuery(sql); } catch (PalantirSqlException e) { if (!e.getMessage().contains(errorToIgnore)) { - log.error("Error occurred trying to execute the Postgres query {}", sql, e); + log.error("Error occurred trying to execute the Postgres query {}", UnsafeArg.of("sql", sql), e); throw e; } } diff --git a/atlasdb-dbkvs/src/main/java/com/palantir/atlasdb/keyvalue/dbkvs/timestamp/ConnectionDbTypes.java b/atlasdb-dbkvs/src/main/java/com/palantir/atlasdb/keyvalue/dbkvs/timestamp/ConnectionDbTypes.java index 765bff1157f..af658133bf4 100644 --- a/atlasdb-dbkvs/src/main/java/com/palantir/atlasdb/keyvalue/dbkvs/timestamp/ConnectionDbTypes.java +++ b/atlasdb-dbkvs/src/main/java/com/palantir/atlasdb/keyvalue/dbkvs/timestamp/ConnectionDbTypes.java @@ -15,15 +15,16 @@ */ package com.palantir.atlasdb.keyvalue.dbkvs.timestamp; +import com.palantir.logsafe.SafeArg; +import com.palantir.logsafe.logger.SafeLogger; +import com.palantir.logsafe.logger.SafeLoggerFactory; import com.palantir.nexus.db.DBType; import java.sql.Connection; import java.sql.DatabaseMetaData; import java.sql.SQLException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; public final class ConnectionDbTypes { - private static final Logger log = LoggerFactory.getLogger(ConnectionDbTypes.class); + private static final SafeLogger log = SafeLoggerFactory.get(ConnectionDbTypes.class); private ConnectionDbTypes() { // Utility class @@ -40,7 +41,9 @@ public static DBType getDbType(Connection conn) { } else if (driverName.contains("h2")) { return DBType.H2_MEMORY; } else { - log.error("Could not determine database type from connection with driver name {}", driverName); + log.error( + "Could not determine database type from connection with driver name {}", + SafeArg.of("driverName", driverName)); return null; } } catch (SQLException e) { diff --git a/atlasdb-ete-tests/src/main/java/com/palantir/atlasdb/todo/TodoClient.java b/atlasdb-ete-tests/src/main/java/com/palantir/atlasdb/todo/TodoClient.java index e4f38667ba5..36e1f29a370 100644 --- a/atlasdb-ete-tests/src/main/java/com/palantir/atlasdb/todo/TodoClient.java +++ b/atlasdb-ete-tests/src/main/java/com/palantir/atlasdb/todo/TodoClient.java @@ -47,6 +47,10 @@ import com.palantir.atlasdb.transaction.api.TransactionManager; import com.palantir.common.base.BatchingVisitable; import com.palantir.common.base.ClosableIterator; +import com.palantir.logsafe.SafeArg; +import com.palantir.logsafe.UnsafeArg; +import com.palantir.logsafe.logger.SafeLogger; +import com.palantir.logsafe.logger.SafeLoggerFactory; import com.palantir.util.Pair; import com.palantir.util.crypto.Sha256Hash; import java.io.InputStream; @@ -59,11 +63,9 @@ import java.util.Set; import java.util.function.Supplier; import java.util.stream.Collectors; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; public class TodoClient { - private static final Logger log = LoggerFactory.getLogger(TodoClient.class); + private static final SafeLogger log = SafeLoggerFactory.get(TodoClient.class); private final TransactionManager transactionManager; private final Supplier kvs; @@ -142,13 +144,19 @@ public void storeSnapshot(InputStream snapshot) { maybeRow.ifPresent(latestSnapshot -> { Long latestStreamId = maybeRow.get().getStreamId(); - log.info("Marking stream {}, ref {}, as unused", latestStreamId, PtBytes.toString(streamReference)); + log.info( + "Marking stream {}, ref {}, as unused", + SafeArg.of("latestStreamId", latestStreamId), + UnsafeArg.of("streamReference", PtBytes.toString(streamReference))); Map theMap = ImmutableMap.of(latestStreamId, streamReference); streamStore.unmarkStreamsAsUsed(transaction, theMap); }); streamStore.markStreamAsUsed(transaction, newStreamId, streamReference); - log.info("Marked stream {} as used with reference {}", newStreamId, PtBytes.toString(streamReference)); + log.info( + "Marked stream {} as used with reference {}", + SafeArg.of("newStreamId", newStreamId), + UnsafeArg.of("streamReference", PtBytes.toString(streamReference))); // Record the latest snapshot latestSnapshotTable.putStreamId(row, newStreamId); @@ -161,7 +169,7 @@ private Long storeStreamAndGetId(InputStream snapshot, SnapshotsStreamStore stre log.info("Storing stream..."); Pair storedStream = streamStore.storeStream(snapshot); Long newStreamId = storedStream.getLhSide(); - log.info("Stored stream with ID {}", newStreamId); + log.info("Stored stream with ID {}", SafeArg.of("newStreamId", newStreamId)); return newStreamId; } diff --git a/atlasdb-ete-tests/src/test/java/com/palantir/atlasdb/ete/DockerClientOrchestrationRule.java b/atlasdb-ete-tests/src/test/java/com/palantir/atlasdb/ete/DockerClientOrchestrationRule.java index 5af56078f96..8308e85eaca 100644 --- a/atlasdb-ete-tests/src/test/java/com/palantir/atlasdb/ete/DockerClientOrchestrationRule.java +++ b/atlasdb-ete-tests/src/test/java/com/palantir/atlasdb/ete/DockerClientOrchestrationRule.java @@ -28,6 +28,10 @@ import com.palantir.docker.compose.execution.ImmutableDockerComposeExecArgument; import com.palantir.docker.compose.logging.LogDirectory; import com.palantir.docker.proxy.DockerProxyRule; +import com.palantir.logsafe.SafeArg; +import com.palantir.logsafe.UnsafeArg; +import com.palantir.logsafe.logger.SafeLogger; +import com.palantir.logsafe.logger.SafeLoggerFactory; import java.io.File; import java.io.IOException; import java.util.Arrays; @@ -37,11 +41,9 @@ import org.joda.time.Duration; import org.junit.rules.ExternalResource; import org.junit.rules.TemporaryFolder; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; public class DockerClientOrchestrationRule extends ExternalResource { - private static final Logger log = LoggerFactory.getLogger(DockerClientOrchestrationRule.class); + private static final SafeLogger log = SafeLoggerFactory.get(DockerClientOrchestrationRule.class); private static final String CONTAINER = "ete1"; private static final Duration WAIT_TIMEOUT = Duration.standardMinutes(5); @@ -135,7 +137,9 @@ private Map getEnvironment() { private String dockerExecOnClient(String... arguments) { for (int i = 1; i <= MAX_EXEC_TRIES; i++) { try { - log.info("Attempting docker-exec with arguments: {}", Arrays.asList(arguments)); + log.info( + "Attempting docker-exec with arguments: {}", + UnsafeArg.of("arguments", Arrays.asList(arguments))); return dockerComposeRule.exec( DockerComposeExecOption.noOptions(), CONTAINER, @@ -146,9 +150,13 @@ private String dockerExecOnClient(String... arguments) { if (i != MAX_EXEC_TRIES) { // I have seen very odd flakes where exec terminates with exit code 129 // i.e. they are interrupted with the hangup signal. - log.warn("Encountered error in docker-exec, retrying (attempt {} of {})", i, MAX_EXEC_TRIES, e); + log.warn( + "Encountered error in docker-exec, retrying (attempt {} of {})", + SafeArg.of("attempt", i), + SafeArg.of("maxAttempts", MAX_EXEC_TRIES), + e); } else { - log.error("Made {} attempts, and now giving up", MAX_EXEC_TRIES, e); + log.error("Made {} attempts, and now giving up", SafeArg.of("maxAttempts", MAX_EXEC_TRIES), e); throw e; } } diff --git a/atlasdb-impl-shared/src/main/java/com/palantir/atlasdb/cleaner/Scrubber.java b/atlasdb-impl-shared/src/main/java/com/palantir/atlasdb/cleaner/Scrubber.java index a17b43086b1..b387162ad5b 100644 --- a/atlasdb-impl-shared/src/main/java/com/palantir/atlasdb/cleaner/Scrubber.java +++ b/atlasdb-impl-shared/src/main/java/com/palantir/atlasdb/cleaner/Scrubber.java @@ -35,6 +35,7 @@ import com.palantir.atlasdb.keyvalue.api.KeyValueService; import com.palantir.atlasdb.keyvalue.api.TableReference; import com.palantir.atlasdb.keyvalue.api.Value; +import com.palantir.atlasdb.logging.LoggingArgs; import com.palantir.atlasdb.table.description.UniformRowNamePartitioner; import com.palantir.atlasdb.table.description.ValueType; import com.palantir.atlasdb.transaction.api.Transaction; @@ -50,7 +51,11 @@ import com.palantir.common.concurrent.ExecutorInheritableThreadLocal; import com.palantir.common.concurrent.NamedThreadFactory; import com.palantir.common.concurrent.PTExecutors; +import com.palantir.logsafe.SafeArg; +import com.palantir.logsafe.UnsafeArg; import com.palantir.logsafe.exceptions.SafeIllegalStateException; +import com.palantir.logsafe.logger.SafeLogger; +import com.palantir.logsafe.logger.SafeLoggerFactory; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; @@ -69,8 +74,6 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.function.Supplier; import javax.annotation.concurrent.GuardedBy; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; /** * Scrubs individuals cells on-demand. @@ -82,7 +85,7 @@ */ @SuppressWarnings("checkstyle:FinalClass") // non-final for mocking public class Scrubber { - private static final Logger log = LoggerFactory.getLogger(Scrubber.class); + private static final SafeLogger log = SafeLoggerFactory.get(Scrubber.class); private static final int MAX_RETRY_ATTEMPTS = 100; private static final int RETRY_SLEEP_INTERVAL_IN_MILLIS = 1000; private static final int MAX_DELETES_IN_BATCH = 10_000; @@ -205,7 +208,9 @@ private synchronized void launchBackgroundScrubTask(final TransactionManager txM runBackgroundScrubTask(txManager); long sleepDuration = backgroundScrubFrequencyMillisSupplier.get(); - log.debug("Sleeping {} millis until next execution of scrub task", sleepDuration); + log.debug( + "Sleeping {} millis until next execution of scrub task", + SafeArg.of("sleepDuration", sleepDuration)); Thread.sleep(sleepDuration); } catch (InterruptedException e) { break; @@ -247,9 +252,9 @@ void runBackgroundScrubTask(final TransactionManager txManager) { aggressiveScrub ? immutableTimestamp : Math.min(unreadableTimestamp, immutableTimestamp); log.debug( "Scrub task immutableTimestamp: {}, unreadableTimestamp: {}, maxScrubTimestamp: {}", - immutableTimestamp, - unreadableTimestamp, - maxScrubTimestamp); + SafeArg.of("immutableTimestamp", immutableTimestamp), + SafeArg.of("unreadableTimestamp", unreadableTimestamp), + SafeArg.of("maxScrubTimestamp", maxScrubTimestamp)); final int batchSize = (int) Math.ceil(batchSizeSupplier.get() * ((double) threadCount / readThreadCount)); List rangeBoundaries = new ArrayList<>(); @@ -279,8 +284,8 @@ void runBackgroundScrubTask(final TransactionManager txManager) { int totalRead = totalCellsRead.addAndGet(numCellsRead); log.debug( "Scrub task processed {} cells in a batch, total {} processed so far.", - numCellsRead, - totalRead); + SafeArg.of("numCellsRead", numCellsRead), + SafeArg.of("totalRead", totalRead)); if (!isScrubEnabled.get()) { log.debug("Stopping scrub for banned hours."); break; @@ -298,8 +303,8 @@ void runBackgroundScrubTask(final TransactionManager txManager) { log.debug( "Scrub background task running at timestamp {} processed a total of {} cells", - maxScrubTimestamp, - totalCellsRead.get()); + SafeArg.of("maxScrubTimestamp", maxScrubTimestamp), + SafeArg.of("totalCellsRead", totalCellsRead.get())); log.debug("Finished scrub task"); } @@ -309,7 +314,7 @@ void runBackgroundScrubTask(final TransactionManager txManager) { final Multimap tableNameToCell, final long scrubTimestamp, final long commitTimestamp) { - log.debug("Scrubbing a total of {} cells immediately.", tableNameToCell.size()); + log.debug("Scrubbing a total of {} cells immediately.", SafeArg.of("count", tableNameToCell.size())); // Note that if the background scrub thread is also running at the same time, it will try to scrub // the same cells as the current thread (since these cells were queued for scrubbing right before @@ -320,8 +325,8 @@ void runBackgroundScrubTask(final TransactionManager txManager) { try { log.debug( "Sleeping because immutable timestamp {} has not advanced to at least commit timestamp {}", - nextImmutableTimestamp, - commitTimestamp); + SafeArg.of("nextImmutableTimestamp", nextImmutableTimestamp), + SafeArg.of("commitTimestamp", commitTimestamp)); Thread.sleep(AtlasDbConstants.SCRUBBER_RETRY_DELAY_MILLIS); } catch (InterruptedException e) { log.error("Interrupted while waiting for immutableTimestamp to advance past commitTimestamp", e); @@ -337,7 +342,7 @@ void runBackgroundScrubTask(final TransactionManager txManager) { } final Callable c = () -> { - log.debug("Scrubbing {} cells immediately.", batchMultimap.size()); + log.debug("Scrubbing {} cells immediately.", SafeArg.of("count", batchMultimap.size())); // Here we don't need to check scrub timestamps because we guarantee that scrubImmediately is called // AFTER the transaction commits scrubCells(txManager, batchMultimap, scrubTimestamp, TransactionType.AGGRESSIVE_HARD_DELETE); @@ -395,7 +400,7 @@ private long getCommitTimestampRollBackIfNecessary( + " before we could roll it back."; log.error( "This isn't a bug but it should be very infrequent. {}", - msg, + SafeArg.of("msg", msg), new TransactionFailedRetriableException(msg, e)); } commitTimestamp = transactionService.get(startTimestamp); @@ -421,7 +426,7 @@ private int scrubSomeCells( SortedMap> scrubTimestampToTableNameToCell, TransactionManager txManager, long maxScrubTimestamp) { - log.trace("Attempting to scrub cells: {}", scrubTimestampToTableNameToCell); + log.trace("Attempting to scrub cells: {}", UnsafeArg.of("cells", scrubTimestampToTableNameToCell)); if (log.isDebugEnabled()) { int numCells = 0; @@ -430,7 +435,10 @@ private int scrubSomeCells( tables.addAll(v.keySet()); numCells += v.size(); } - log.debug("Attempting to scrub {} cells from tables {}", numCells, tables); + log.debug( + "Attempting to scrub {} cells from tables {}", + SafeArg.of("numCells", numCells), + UnsafeArg.of("tables", tables)); } if (scrubTimestampToTableNameToCell.size() == 0) { @@ -494,7 +502,7 @@ private int scrubSomeCells( scrubberStore.markCellsAsScrubbed(failedWrites, batchSizeSupplier.get()); } - log.trace("Finished scrubbing cells: {}", scrubTimestampToTableNameToCell); + log.trace("Finished scrubbing cells: {}", UnsafeArg.of("cells", scrubTimestampToTableNameToCell)); if (log.isDebugEnabled()) { Set tables = new HashSet<>(); @@ -505,11 +513,11 @@ private int scrubSomeCells( long maxTimestamp = Collections.max(scrubTimestampToTableNameToCell.keySet()); log.debug( "Finished scrubbing {} cells at {} timestamps ({}...{}) from tables {}", - numCellsReadFromScrubTable, - scrubTimestampToTableNameToCell.size(), - minTimestamp, - maxTimestamp, - tables); + SafeArg.of("numCellsReadFromScrubTable", numCellsReadFromScrubTable), + SafeArg.of("tableCount", scrubTimestampToTableNameToCell.size()), + SafeArg.of("minTimestamp", minTimestamp), + SafeArg.of("maxTimestamp", maxTimestamp), + UnsafeArg.of("tables", tables)); } return numCellsReadFromScrubTable; @@ -527,8 +535,8 @@ private void scrubCells( TableReference tableRef = entry.getKey(); log.debug( "Attempting to immediately scrub {} cells from table {}", - entry.getValue().size(), - tableRef); + SafeArg.of("cellCount", entry.getValue().size()), + LoggingArgs.tableRef(tableRef)); for (List cells : Iterables.partition(entry.getValue(), batchSizeSupplier.get())) { Multimap allTimestamps = keyValueService.getAllTimestamps(tableRef, ImmutableSet.copyOf(cells), scrubTimestamp); @@ -547,8 +555,8 @@ private void scrubCells( } log.debug( "Immediately scrubbed {} cells from table {}", - entry.getValue().size(), - tableRef); + SafeArg.of("cellCount", entry.getValue().size()), + LoggingArgs.tableRef(tableRef)); } scrubberStore.markCellsAsScrubbed(allCellsToMarkScrubbed, batchSizeSupplier.get()); lazyWriteMetric(AtlasDbMetricNames.SCRUBBED_CELLS, allCellsToMarkScrubbed.size()); diff --git a/atlasdb-impl-shared/src/main/java/com/palantir/atlasdb/internalschema/metrics/MetadataCoordinationServiceMetrics.java b/atlasdb-impl-shared/src/main/java/com/palantir/atlasdb/internalschema/metrics/MetadataCoordinationServiceMetrics.java index 628ff331556..c84a1722003 100644 --- a/atlasdb-impl-shared/src/main/java/com/palantir/atlasdb/internalschema/metrics/MetadataCoordinationServiceMetrics.java +++ b/atlasdb-impl-shared/src/main/java/com/palantir/atlasdb/internalschema/metrics/MetadataCoordinationServiceMetrics.java @@ -24,15 +24,15 @@ import com.palantir.atlasdb.internalschema.TimestampPartitioningMap; import com.palantir.atlasdb.monitoring.TrackerUtils; import com.palantir.atlasdb.util.MetricsManager; +import com.palantir.logsafe.logger.SafeLogger; +import com.palantir.logsafe.logger.SafeLoggerFactory; import com.palantir.timestamp.TimestampService; import java.util.Optional; import java.util.function.Function; import java.util.function.Supplier; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; public final class MetadataCoordinationServiceMetrics { - private static final Logger log = LoggerFactory.getLogger(MetadataCoordinationServiceMetrics.class); + private static final SafeLogger log = SafeLoggerFactory.get(MetadataCoordinationServiceMetrics.class); private MetadataCoordinationServiceMetrics() { // utility class diff --git a/atlasdb-impl-shared/src/main/java/com/palantir/atlasdb/keyvalue/api/cache/ValidatingTransactionScopedCache.java b/atlasdb-impl-shared/src/main/java/com/palantir/atlasdb/keyvalue/api/cache/ValidatingTransactionScopedCache.java index ce523cda257..7ba5a1554d2 100644 --- a/atlasdb-impl-shared/src/main/java/com/palantir/atlasdb/keyvalue/api/cache/ValidatingTransactionScopedCache.java +++ b/atlasdb-impl-shared/src/main/java/com/palantir/atlasdb/keyvalue/api/cache/ValidatingTransactionScopedCache.java @@ -31,8 +31,12 @@ import com.palantir.atlasdb.util.ByteArrayUtilities; import com.palantir.common.streams.KeyedStream; import com.palantir.lock.watch.CommitUpdate; +import com.palantir.logsafe.Arg; import com.palantir.logsafe.SafeArg; import com.palantir.logsafe.UnsafeArg; +import com.palantir.logsafe.logger.SafeLogger; +import com.palantir.logsafe.logger.SafeLoggerFactory; +import java.util.Arrays; import java.util.Map; import java.util.NavigableMap; import java.util.Objects; @@ -43,11 +47,10 @@ import java.util.TreeSet; import java.util.function.BiFunction; import java.util.function.Function; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; +import java.util.stream.Collectors; final class ValidatingTransactionScopedCache implements TransactionScopedCache { - private static final Logger log = LoggerFactory.getLogger(ValidatingTransactionScopedCache.class); + private static final SafeLogger log = SafeLoggerFactory.get(ValidatingTransactionScopedCache.class); private final TransactionScopedCache delegate; private final Random random; @@ -191,11 +194,11 @@ private void validateCacheRowReads( } } - private void failAndLog(Object... args) { + private void failAndLog(Arg... args) { log.error( "Reading from lock watch cache returned a different result to a remote read - this indicates there " + "is a corruption bug in the caching logic", - args); + Arrays.stream(args).collect(Collectors.toList())); failureCallback.run(); throw new TransactionLockWatchFailedException( "Failed lock watch cache validation - will retry without caching"); diff --git a/atlasdb-impl-shared/src/main/java/com/palantir/atlasdb/monitoring/TimestampTracker.java b/atlasdb-impl-shared/src/main/java/com/palantir/atlasdb/monitoring/TimestampTracker.java index 5fd7021badb..ea15b9c2511 100644 --- a/atlasdb-impl-shared/src/main/java/com/palantir/atlasdb/monitoring/TimestampTracker.java +++ b/atlasdb-impl-shared/src/main/java/com/palantir/atlasdb/monitoring/TimestampTracker.java @@ -20,12 +20,12 @@ import com.palantir.atlasdb.cleaner.api.Cleaner; import com.palantir.atlasdb.util.MetricsManager; import com.palantir.lock.v2.TimelockService; +import com.palantir.logsafe.logger.SafeLogger; +import com.palantir.logsafe.logger.SafeLoggerFactory; import java.util.function.Supplier; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; public final class TimestampTracker { - private static final Logger log = LoggerFactory.getLogger(TimestampTracker.class); + private static final SafeLogger log = SafeLoggerFactory.get(TimestampTracker.class); private TimestampTracker() {} diff --git a/atlasdb-impl-shared/src/main/java/com/palantir/atlasdb/monitoring/TrackerUtils.java b/atlasdb-impl-shared/src/main/java/com/palantir/atlasdb/monitoring/TrackerUtils.java index 60d7d4fe75b..7f182e577d2 100644 --- a/atlasdb-impl-shared/src/main/java/com/palantir/atlasdb/monitoring/TrackerUtils.java +++ b/atlasdb-impl-shared/src/main/java/com/palantir/atlasdb/monitoring/TrackerUtils.java @@ -21,12 +21,12 @@ import com.codahale.metrics.Gauge; import com.google.common.annotations.VisibleForTesting; import com.palantir.logsafe.SafeArg; +import com.palantir.logsafe.logger.SafeLogger; import java.time.Duration; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicReference; import java.util.function.BinaryOperator; import java.util.function.Supplier; -import org.slf4j.Logger; public final class TrackerUtils { // We cache underlying calls, in case a hyper-aggressive metrics client repeatedly queries the values. @@ -56,7 +56,7 @@ private TrackerUtils() { * @return a caching, exception-handling gauge */ public static Gauge createCachingExceptionHandlingGauge( - Logger logger, Clock clock, String shortName, Supplier supplier) { + SafeLogger logger, Clock clock, String shortName, Supplier supplier) { return createCachingReducingGauge(logger, clock, shortName, supplier, null, (previous, current) -> current); } @@ -74,12 +74,12 @@ public static Gauge createCachingExceptionHandlingGauge( * @return a caching, monotonically increasing gauge */ public static Gauge createCachingMonotonicIncreasingGauge( - Logger logger, Clock clock, String shortName, Supplier supplier) { + SafeLogger logger, Clock clock, String shortName, Supplier supplier) { return createCachingReducingGauge(logger, clock, shortName, supplier, Long.MIN_VALUE, Math::max); } private static Gauge createCachingReducingGauge( - Logger logger, + SafeLogger logger, Clock clock, String shortName, Supplier supplier, diff --git a/atlasdb-impl-shared/src/main/java/com/palantir/atlasdb/persistentlock/LockStoreImpl.java b/atlasdb-impl-shared/src/main/java/com/palantir/atlasdb/persistentlock/LockStoreImpl.java index dbfa1edecd3..ec10c72a99d 100644 --- a/atlasdb-impl-shared/src/main/java/com/palantir/atlasdb/persistentlock/LockStoreImpl.java +++ b/atlasdb-impl-shared/src/main/java/com/palantir/atlasdb/persistentlock/LockStoreImpl.java @@ -29,13 +29,14 @@ import com.palantir.atlasdb.keyvalue.api.RowResult; import com.palantir.atlasdb.keyvalue.api.Value; import com.palantir.logsafe.SafeArg; +import com.palantir.logsafe.UnsafeArg; +import com.palantir.logsafe.logger.SafeLogger; +import com.palantir.logsafe.logger.SafeLoggerFactory; import java.util.List; import java.util.Objects; import java.util.Set; import java.util.UUID; import java.util.stream.Collectors; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; /** * LockStore manages {@link LockEntry} objects, specifically for the "Backup Lock" (to be taken out by backup and @@ -77,7 +78,7 @@ protected String getInitializingClassName() { } } - private static final Logger log = LoggerFactory.getLogger(LockStoreImpl.class); + private static final SafeLogger log = SafeLoggerFactory.get(LockStoreImpl.class); private static final String BACKUP_LOCK_NAME = "BackupLock"; private final InitializingWrapper wrapper = new InitializingWrapper(); @@ -195,7 +196,7 @@ void populate() { "Encountered a CheckAndSetException when creating the LockStore. This means that two" + " LockStore objects were created near-simultaneously, and is probably not a problem." + " For the record, we observed these values: {}", - values, + UnsafeArg.of("values", values), e); } } diff --git a/atlasdb-impl-shared/src/main/java/com/palantir/atlasdb/sweep/priority/NextTableToSweepProvider.java b/atlasdb-impl-shared/src/main/java/com/palantir/atlasdb/sweep/priority/NextTableToSweepProvider.java index efc4e9b5cec..86ca3028504 100644 --- a/atlasdb-impl-shared/src/main/java/com/palantir/atlasdb/sweep/priority/NextTableToSweepProvider.java +++ b/atlasdb-impl-shared/src/main/java/com/palantir/atlasdb/sweep/priority/NextTableToSweepProvider.java @@ -27,6 +27,8 @@ import com.palantir.lock.SingleLockService; import com.palantir.logsafe.SafeArg; import com.palantir.logsafe.UnsafeArg; +import com.palantir.logsafe.logger.SafeLogger; +import com.palantir.logsafe.logger.SafeLoggerFactory; import java.util.Collections; import java.util.Comparator; import java.util.List; @@ -34,11 +36,9 @@ import java.util.Optional; import java.util.Set; import java.util.stream.Collectors; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; public class NextTableToSweepProvider { - private static final Logger log = LoggerFactory.getLogger(NextTableToSweepProvider.class); + private static final SafeLogger log = SafeLoggerFactory.get(NextTableToSweepProvider.class); private final LockService lockService; private final StreamStoreRemappingSweepPriorityCalculator calculator; @@ -109,19 +109,19 @@ private Optional attemptToChooseTableFromPrioritisedList( if (sweepLockForTable.haveLocks()) { log.info( "Decided to start sweeping {} because {}.", - LoggingArgs.safeTableOrPlaceholder(tableRefToSweep), - reason); + LoggingArgs.tableRef(LoggingArgs.safeTableOrPlaceholder(tableRefToSweep)), + SafeArg.of("reason", reason)); return Optional.of(TableToSweep.newTable(tableRefToSweep, sweepLockForTable)); } } catch (InterruptedException e) { log.info( "Got interrupted while attempting to lock {} for sweeping.", - LoggingArgs.safeTableOrPlaceholder(tableRefToSweep), + LoggingArgs.tableRef(LoggingArgs.safeTableOrPlaceholder(tableRefToSweep)), e); } log.info( "Did not start sweeping {}, because it is being swept elsewhere. Another table will be chosen.", - LoggingArgs.safeTableOrPlaceholder(tableRefToSweep)); + LoggingArgs.tableRef(LoggingArgs.safeTableOrPlaceholder(tableRefToSweep))); } return Optional.empty(); diff --git a/atlasdb-impl-shared/src/main/java/com/palantir/atlasdb/table/common/RangeVisitor.java b/atlasdb-impl-shared/src/main/java/com/palantir/atlasdb/table/common/RangeVisitor.java index fd1fa2292e7..a3bfac723e7 100644 --- a/atlasdb-impl-shared/src/main/java/com/palantir/atlasdb/table/common/RangeVisitor.java +++ b/atlasdb-impl-shared/src/main/java/com/palantir/atlasdb/table/common/RangeVisitor.java @@ -22,12 +22,16 @@ import com.palantir.atlasdb.keyvalue.api.RangeRequests; import com.palantir.atlasdb.keyvalue.api.RowResult; import com.palantir.atlasdb.keyvalue.api.TableReference; +import com.palantir.atlasdb.logging.LoggingArgs; import com.palantir.atlasdb.transaction.api.LockAwareTransactionTask; import com.palantir.atlasdb.transaction.api.Transaction; import com.palantir.atlasdb.transaction.api.TransactionManager; import com.palantir.common.base.Throwables; import com.palantir.common.concurrent.BlockingWorkerPool; import com.palantir.lock.HeldLocksToken; +import com.palantir.logsafe.SafeArg; +import com.palantir.logsafe.logger.SafeLogger; +import com.palantir.logsafe.logger.SafeLoggerFactory; import java.math.BigInteger; import java.util.ArrayList; import java.util.Arrays; @@ -35,11 +39,9 @@ import java.util.List; import java.util.concurrent.ExecutorService; import java.util.concurrent.atomic.AtomicLong; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; public class RangeVisitor { - private static final Logger log = LoggerFactory.getLogger(RangeVisitor.class); + private static final SafeLogger log = SafeLoggerFactory.get(RangeVisitor.class); private final TransactionManager txManager; private final TableReference tableRef; private byte[] startRow = new byte[0]; @@ -125,9 +127,9 @@ public String toString() { counter.addAndGet(numVisited); log.info( "Visited {} rows from {} in {} ms.", - numVisited, - tableRef.getQualifiedName(), - System.currentTimeMillis() - startTime); + SafeArg.of("numVisisted", numVisited), + LoggingArgs.tableRef(tableRef), + SafeArg.of("duration", System.currentTimeMillis() - startTime)); } catch (InterruptedException e) { throw Throwables.rewrapAndThrowUncheckedException(e); } diff --git a/atlasdb-impl-shared/src/main/java/com/palantir/atlasdb/transaction/impl/AdvisoryLockConditionSuppliers.java b/atlasdb-impl-shared/src/main/java/com/palantir/atlasdb/transaction/impl/AdvisoryLockConditionSuppliers.java index b4999218057..33d170dc8ac 100644 --- a/atlasdb-impl-shared/src/main/java/com/palantir/atlasdb/transaction/impl/AdvisoryLockConditionSuppliers.java +++ b/atlasdb-impl-shared/src/main/java/com/palantir/atlasdb/transaction/impl/AdvisoryLockConditionSuppliers.java @@ -23,14 +23,15 @@ import com.palantir.lock.LockRequest; import com.palantir.lock.LockService; import com.palantir.logsafe.Preconditions; +import com.palantir.logsafe.SafeArg; +import com.palantir.logsafe.logger.SafeLogger; +import com.palantir.logsafe.logger.SafeLoggerFactory; import java.util.Set; import java.util.function.Supplier; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; public final class AdvisoryLockConditionSuppliers { - private static final Logger log = LoggerFactory.getLogger(AdvisoryLockConditionSuppliers.class); + private static final SafeLogger log = SafeLoggerFactory.get(AdvisoryLockConditionSuppliers.class); private static final int NUM_RETRIES = 10; @@ -81,7 +82,7 @@ private static HeldLocksToken acquireLock(LockService lockService, LockRequest l log.warn("Could not lock successfully", ex); ++failureCount; if (failureCount >= NUM_RETRIES) { - log.warn("Failing after {} tries", failureCount, ex); + log.warn("Failing after {} tries", SafeArg.of("failureCount", failureCount), ex); throw ex; } break; diff --git a/atlasdb-impl-shared/src/main/java/com/palantir/atlasdb/transaction/impl/BatchSizeIncreasingIterator.java b/atlasdb-impl-shared/src/main/java/com/palantir/atlasdb/transaction/impl/BatchSizeIncreasingIterator.java index 68f6a34dea7..fe1460e3d42 100644 --- a/atlasdb-impl-shared/src/main/java/com/palantir/atlasdb/transaction/impl/BatchSizeIncreasingIterator.java +++ b/atlasdb-impl-shared/src/main/java/com/palantir/atlasdb/transaction/impl/BatchSizeIncreasingIterator.java @@ -21,15 +21,15 @@ import com.palantir.common.base.ClosableIterator; import com.palantir.common.base.ClosableIterators; import com.palantir.logsafe.Preconditions; +import com.palantir.logsafe.logger.SafeLogger; +import com.palantir.logsafe.logger.SafeLoggerFactory; import com.palantir.util.AssertUtils; import java.util.List; import javax.annotation.Nullable; import org.immutables.value.Value; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; public class BatchSizeIncreasingIterator { - private static final Logger log = LoggerFactory.getLogger(BatchSizeIncreasingIterator.class); + private static final SafeLogger log = SafeLoggerFactory.get(BatchSizeIncreasingIterator.class); private final int originalBatchSize; private final BatchProvider batchProvider; diff --git a/atlasdb-impl-shared/src/main/java/com/palantir/atlasdb/transaction/impl/ReadOnlyTransactionManager.java b/atlasdb-impl-shared/src/main/java/com/palantir/atlasdb/transaction/impl/ReadOnlyTransactionManager.java index bd48f030405..d0a08da60a9 100644 --- a/atlasdb-impl-shared/src/main/java/com/palantir/atlasdb/transaction/impl/ReadOnlyTransactionManager.java +++ b/atlasdb-impl-shared/src/main/java/com/palantir/atlasdb/transaction/impl/ReadOnlyTransactionManager.java @@ -37,15 +37,16 @@ import com.palantir.lock.LockRequest; import com.palantir.lock.LockService; import com.palantir.lock.v2.TimelockService; +import com.palantir.logsafe.SafeArg; +import com.palantir.logsafe.logger.SafeLogger; +import com.palantir.logsafe.logger.SafeLoggerFactory; import com.palantir.timestamp.TimestampManagementService; import com.palantir.timestamp.TimestampService; import java.util.List; import java.util.function.Supplier; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; public final class ReadOnlyTransactionManager extends AbstractLockAwareTransactionManager { - private static final Logger log = LoggerFactory.getLogger(ReadOnlyTransactionManager.class); + private static final SafeLogger log = SafeLoggerFactory.get(ReadOnlyTransactionManager.class); private final MetricsManager metricsManager; private final KeyValueService keyValueService; @@ -135,7 +136,9 @@ public KeyValueServiceStatus getKeyValueServiceStatus() { case TERMINAL: return KeyValueServiceStatus.TERMINAL; default: - log.warn("The kvs returned a non-standard availability status: {}", clusterAvailabilityStatus); + log.warn( + "The kvs returned a non-standard availability status: {}", + SafeArg.of("status", clusterAvailabilityStatus)); return KeyValueServiceStatus.UNHEALTHY; } } diff --git a/atlasdb-impl-shared/src/main/java/com/palantir/atlasdb/transaction/impl/SnapshotTransaction.java b/atlasdb-impl-shared/src/main/java/com/palantir/atlasdb/transaction/impl/SnapshotTransaction.java index 4cbff86a02e..2bbd874c487 100644 --- a/atlasdb-impl-shared/src/main/java/com/palantir/atlasdb/transaction/impl/SnapshotTransaction.java +++ b/atlasdb-impl-shared/src/main/java/com/palantir/atlasdb/transaction/impl/SnapshotTransaction.java @@ -190,7 +190,7 @@ * different rows and using range scans. */ public class SnapshotTransaction extends AbstractTransaction implements ConstraintCheckingTransaction { - private static final Logger log = LoggerFactory.getLogger(SnapshotTransaction.class); + private static final SafeLogger log = SafeLoggerFactory.get(SnapshotTransaction.class); private static final Logger perfLogger = LoggerFactory.getLogger("dualschema.perf"); private static final SafeLogger constraintLogger = SafeLoggerFactory.get("dualschema.constraints"); @@ -1952,7 +1952,7 @@ private void throwIfImmutableTsOrCommitLocksExpired(@Nullable LockToken commitLo final String baseMsg = "Locks acquired as part of the transaction protocol are no longer valid. "; String expiredLocksErrorString = getExpiredLocksErrorString(commitLocksToken, expiredLocks); TransactionLockTimeoutException ex = new TransactionLockTimeoutException(baseMsg + expiredLocksErrorString); - log.warn(baseMsg + "{}", expiredLocksErrorString, ex); + log.warn(baseMsg + "{}", UnsafeArg.of("expiredLocksErrorString", expiredLocksErrorString), ex); transactionOutcomeMetrics.markLocksExpired(); throw ex; } @@ -2115,7 +2115,7 @@ protected Map detectWriteAlreadyCommittedInternal( Cell key = e.getKey(); long theirStartTimestamp = e.getValue(); AssertUtils.assertAndLog( - log, theirStartTimestamp != getStartTimestamp(), "Timestamp reuse is bad:%d", getStartTimestamp()); + log, theirStartTimestamp != getStartTimestamp(), "Timestamp reuse is bad:" + getStartTimestamp()); Long theirCommitTimestamp = commitTimestamps.get(theirStartTimestamp); if (theirCommitTimestamp == null || theirCommitTimestamp == TransactionConstants.FAILED_COMMIT_TS) { @@ -2126,7 +2126,7 @@ protected Map detectWriteAlreadyCommittedInternal( } AssertUtils.assertAndLog( - log, theirCommitTimestamp != getStartTimestamp(), "Timestamp reuse is bad:%d", getStartTimestamp()); + log, theirCommitTimestamp != getStartTimestamp(), "Timestamp reuse is bad:" + getStartTimestamp()); if (theirStartTimestamp > getStartTimestamp()) { dominatingWrites.add(Cells.createConflictWithMetadata( keyValueService, tableRef, key, theirStartTimestamp, theirCommitTimestamp)); diff --git a/atlasdb-impl-shared/src/test/java/com/palantir/atlasdb/monitoring/TrackerUtilsTest.java b/atlasdb-impl-shared/src/test/java/com/palantir/atlasdb/monitoring/TrackerUtilsTest.java index 743f3b44615..e899de6bc54 100644 --- a/atlasdb-impl-shared/src/test/java/com/palantir/atlasdb/monitoring/TrackerUtilsTest.java +++ b/atlasdb-impl-shared/src/test/java/com/palantir/atlasdb/monitoring/TrackerUtilsTest.java @@ -24,14 +24,14 @@ import com.codahale.metrics.Clock; import com.codahale.metrics.Gauge; +import com.palantir.logsafe.logger.SafeLogger; +import com.palantir.logsafe.logger.SafeLoggerFactory; import java.util.function.Supplier; import org.junit.Test; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; @SuppressWarnings("unchecked") // Mocks of known types public class TrackerUtilsTest { - private static final Logger log = LoggerFactory.getLogger(TrackerUtilsTest.class); + private static final SafeLogger log = SafeLoggerFactory.get(TrackerUtilsTest.class); private static final String SHORT_NAME = "shortName"; private final Supplier testSupplier = mock(Supplier.class); diff --git a/atlasdb-jepsen-tests/src/main/java/com/palantir/atlasdb/jepsen/lock/LockCorrectnessChecker.java b/atlasdb-jepsen-tests/src/main/java/com/palantir/atlasdb/jepsen/lock/LockCorrectnessChecker.java index fcb2490d916..cab12acb0d6 100644 --- a/atlasdb-jepsen-tests/src/main/java/com/palantir/atlasdb/jepsen/lock/LockCorrectnessChecker.java +++ b/atlasdb-jepsen-tests/src/main/java/com/palantir/atlasdb/jepsen/lock/LockCorrectnessChecker.java @@ -27,14 +27,16 @@ import com.palantir.atlasdb.jepsen.events.OkEvent; import com.palantir.atlasdb.jepsen.events.RequestType; import com.palantir.atlasdb.jepsen.utils.EventUtils; +import com.palantir.logsafe.SafeArg; +import com.palantir.logsafe.UnsafeArg; import com.palantir.logsafe.exceptions.SafeIllegalStateException; +import com.palantir.logsafe.logger.SafeLogger; +import com.palantir.logsafe.logger.SafeLoggerFactory; import com.palantir.util.Pair; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; /** * Checker verifying that whenever a lock is granted, there was a time point between the request and the @@ -66,7 +68,7 @@ */ public class LockCorrectnessChecker implements Checker { - private static final Logger log = LoggerFactory.getLogger(LockCorrectnessChecker.class); + private static final SafeLogger log = SafeLoggerFactory.get(LockCorrectnessChecker.class); @Override public CheckerResult check(List events) { @@ -146,10 +148,10 @@ private void verifyLockCorrectness() { log.error( "Lock {} granted to process {} between {} and {}, but lock was already held by " + "another process.", - lockName, - invokeEvent.process(), - invokeEvent.time(), - okEvent.time()); + UnsafeArg.of("lockName", lockName), + SafeArg.of("process", invokeEvent.process()), + SafeArg.of("invokeTime", invokeEvent.time()), + SafeArg.of("okTime", okEvent.time())); errors.add(invokeEvent); errors.add(okEvent); } diff --git a/atlasdb-jepsen-tests/src/main/java/com/palantir/atlasdb/jepsen/lock/RefreshCorrectnessChecker.java b/atlasdb-jepsen-tests/src/main/java/com/palantir/atlasdb/jepsen/lock/RefreshCorrectnessChecker.java index 73853e566ed..ee832924c39 100644 --- a/atlasdb-jepsen-tests/src/main/java/com/palantir/atlasdb/jepsen/lock/RefreshCorrectnessChecker.java +++ b/atlasdb-jepsen-tests/src/main/java/com/palantir/atlasdb/jepsen/lock/RefreshCorrectnessChecker.java @@ -27,13 +27,15 @@ import com.palantir.atlasdb.jepsen.events.OkEvent; import com.palantir.atlasdb.jepsen.events.RequestType; import com.palantir.atlasdb.jepsen.utils.EventUtils; +import com.palantir.logsafe.SafeArg; +import com.palantir.logsafe.UnsafeArg; import com.palantir.logsafe.exceptions.SafeIllegalStateException; +import com.palantir.logsafe.logger.SafeLogger; +import com.palantir.logsafe.logger.SafeLoggerFactory; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; /** * This checker verifies that refreshes of locks do not cause two processes to simultaneously hold the same lock. @@ -41,7 +43,7 @@ */ public class RefreshCorrectnessChecker implements Checker { - private static final Logger log = LoggerFactory.getLogger(RefreshCorrectnessChecker.class); + private static final SafeLogger log = SafeLoggerFactory.get(RefreshCorrectnessChecker.class); @Override public CheckerResult check(List events) { @@ -113,14 +115,14 @@ public Void visit(OkEvent event) { "A {} request for lock {} by process {} invoked at time {} was granted at " + "time {}, but another process was granted the lock between {} and {} " + "(last known time the lock was held by {})", - invokeEvent.function(), - invokeEvent.value(), - invokeEvent.process(), - invokeEvent.time(), - event.time(), - lastLockTime, - invokeEvent.time(), - invokeEvent.process()); + UnsafeArg.of("function", invokeEvent.function()), + UnsafeArg.of("value", invokeEvent.value()), + SafeArg.of("process", invokeEvent.process()), + SafeArg.of("invokeTime", invokeEvent.time()), + SafeArg.of("eventTime", event.time()), + SafeArg.of("lastLockTime", lastLockTime), + SafeArg.of("invokeTime2", invokeEvent.time()), + SafeArg.of("process2", invokeEvent.process())); errors.add(invokeEvent); errors.add(event); } diff --git a/atlasdb-tests-shared/src/test/java/com/palantir/atlasdb/keyvalue/impl/TracingKvsTest.java b/atlasdb-tests-shared/src/test/java/com/palantir/atlasdb/keyvalue/impl/TracingKvsTest.java index 1f023b06c08..050d6b3902d 100644 --- a/atlasdb-tests-shared/src/test/java/com/palantir/atlasdb/keyvalue/impl/TracingKvsTest.java +++ b/atlasdb-tests-shared/src/test/java/com/palantir/atlasdb/keyvalue/impl/TracingKvsTest.java @@ -17,6 +17,10 @@ import static org.assertj.core.api.Assertions.assertThat; +import com.palantir.logsafe.SafeArg; +import com.palantir.logsafe.UnsafeArg; +import com.palantir.logsafe.logger.SafeLogger; +import com.palantir.logsafe.logger.SafeLoggerFactory; import com.palantir.tracing.Tracer; import com.palantir.tracing.api.Span; import com.palantir.tracing.api.SpanObserver; @@ -28,15 +32,13 @@ import java.util.Optional; import java.util.stream.Collectors; import org.junit.ClassRule; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; public class TracingKvsTest extends AbstractKeyValueServiceTest { @ClassRule public static final TestResourceManager TRM = new TestResourceManager(() -> TracingKeyValueService.create(new InMemoryKeyValueService(false))); - private static final Logger log = LoggerFactory.getLogger(TracingKvsTest.class); + private static final SafeLogger log = SafeLoggerFactory.get(TracingKvsTest.class); private static final String TEST_OBSERVER_NAME = TracingKvsTest.class.getName(); public TracingKvsTest() { @@ -60,8 +62,8 @@ public void tearDown() throws Exception { List spans = ((TestSpanObserver) observer).spans(); log.warn( "{} spans: {}", - spans.size(), - spans.stream().map(Span::getOperation).collect(Collectors.toList())); + SafeArg.of("spanCount", spans.size()), + UnsafeArg.of("spans", spans.stream().map(Span::getOperation).collect(Collectors.toList()))); if (Tracer.isTraceObservable()) { assertThat(finishedSpan).isPresent(); assertThat(finishedSpan.get().getOperation()).isEqualTo("test"); @@ -89,7 +91,7 @@ private static final class TestSpanObserver implements SpanObserver { @Override public void consume(Span span) { - log.warn("{}", span); + log.warn("{}", UnsafeArg.of("span", span)); spans.add(span); } diff --git a/changelog/@unreleased/pr-5619.v2.yml b/changelog/@unreleased/pr-5619.v2.yml new file mode 100644 index 00000000000..e77e7d63513 --- /dev/null +++ b/changelog/@unreleased/pr-5619.v2.yml @@ -0,0 +1,5 @@ +type: improvement +improvement: + description: Refactor Logger to SafeLogger, part 1 + links: + - https://github.com/palantir/atlasdb/pull/5619 diff --git a/commons-db/src/main/java/com/palantir/nexus/db/ThreadConfinedProxy.java b/commons-db/src/main/java/com/palantir/nexus/db/ThreadConfinedProxy.java index e9d42aecd69..bab01e16eac 100644 --- a/commons-db/src/main/java/com/palantir/nexus/db/ThreadConfinedProxy.java +++ b/commons-db/src/main/java/com/palantir/nexus/db/ThreadConfinedProxy.java @@ -19,6 +19,8 @@ import com.google.common.reflect.AbstractInvocationHandler; import com.palantir.common.proxy.DelegatingInvocationHandler; import com.palantir.logsafe.Preconditions; +import com.palantir.logsafe.logger.SafeLogger; +import com.palantir.logsafe.logger.SafeLoggerFactory; import com.palantir.util.AssertUtils; import java.lang.reflect.InvocationHandler; import java.lang.reflect.InvocationTargetException; @@ -27,8 +29,6 @@ import java.util.concurrent.Callable; import javax.annotation.concurrent.GuardedBy; import org.apache.commons.lang3.Validate; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; /** * Dynamic Proxy for confining an object to a particular thread, but allowing explicit handoff. @@ -39,7 +39,7 @@ */ @SuppressWarnings("ProxyNonConstantType") public final class ThreadConfinedProxy extends AbstractInvocationHandler implements DelegatingInvocationHandler { - private static final Logger log = LoggerFactory.getLogger(ThreadConfinedProxy.class); + private static final SafeLogger log = SafeLoggerFactory.get(ThreadConfinedProxy.class); public enum Strictness { ASSERT_AND_LOG, diff --git a/commons-db/src/main/java/com/palantir/nexus/db/monitoring/profiler/SimpleSqlProfiler.java b/commons-db/src/main/java/com/palantir/nexus/db/monitoring/profiler/SimpleSqlProfiler.java index d700934c15b..13b76a3081c 100644 --- a/commons-db/src/main/java/com/palantir/nexus/db/monitoring/profiler/SimpleSqlProfiler.java +++ b/commons-db/src/main/java/com/palantir/nexus/db/monitoring/profiler/SimpleSqlProfiler.java @@ -17,6 +17,8 @@ import com.google.common.collect.ImmutableList; import com.palantir.common.concurrent.ExecutorInheritableThreadLocal; +import com.palantir.logsafe.logger.SafeLogger; +import com.palantir.logsafe.logger.SafeLoggerFactory; import com.palantir.util.AssertUtils; import com.palantir.util.sql.SqlCallStats; import java.util.Collection; @@ -24,13 +26,11 @@ import java.util.concurrent.ConcurrentMap; import java.util.concurrent.CopyOnWriteArrayList; import javax.annotation.Nullable; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; enum SimpleSqlProfiler implements SqlProfiler { INSTANCE; - private static final Logger log = LoggerFactory.getLogger(SimpleSqlProfiler.class); + private static final SafeLogger log = SafeLoggerFactory.get(SimpleSqlProfiler.class); private final ExecutorInheritableThreadLocal> currentTrace = new ExecutorInheritableThreadLocal>(); diff --git a/commons-db/src/main/java/com/palantir/nexus/db/pool/CloseTracking.java b/commons-db/src/main/java/com/palantir/nexus/db/pool/CloseTracking.java index 3f7a9901f10..ec51a5fafe4 100644 --- a/commons-db/src/main/java/com/palantir/nexus/db/pool/CloseTracking.java +++ b/commons-db/src/main/java/com/palantir/nexus/db/pool/CloseTracking.java @@ -18,16 +18,17 @@ import com.google.common.base.FinalizableReferenceQueue; import com.google.common.base.FinalizableWeakReference; import com.google.common.collect.Sets; +import com.palantir.logsafe.UnsafeArg; +import com.palantir.logsafe.logger.SafeLogger; +import com.palantir.logsafe.logger.SafeLoggerFactory; import com.palantir.nexus.db.ResourceCreationLocation; import java.sql.Connection; import java.util.Arrays; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; public final class CloseTracking { - private static final Logger log = LoggerFactory.getLogger(CloseTracking.class); + private static final SafeLogger log = SafeLoggerFactory.get(CloseTracking.class); private CloseTracking() { // nope @@ -75,7 +76,10 @@ public synchronized void close() { @SuppressWarnings("BadAssert") // only fail close check with asserts enabled public synchronized void check() { if (!closed) { - log.error("{} never closed!", typeName, createTrace); + log.error( + "{} never closed!", + UnsafeArg.of("typeName", typeName), + UnsafeArg.of("createTrace", createTrace)); assert false : typeName + " never closed!" + "\n" + Arrays.toString(createTrace.getStackTrace()); } } diff --git a/commons-db/src/main/java/com/palantir/nexus/db/pool/RetriableTransactions.java b/commons-db/src/main/java/com/palantir/nexus/db/pool/RetriableTransactions.java index 6bca1713606..be72c006c1c 100644 --- a/commons-db/src/main/java/com/palantir/nexus/db/pool/RetriableTransactions.java +++ b/commons-db/src/main/java/com/palantir/nexus/db/pool/RetriableTransactions.java @@ -19,6 +19,10 @@ import com.google.common.cache.CacheLoader; import com.google.common.cache.LoadingCache; import com.palantir.logsafe.Preconditions; +import com.palantir.logsafe.SafeArg; +import com.palantir.logsafe.UnsafeArg; +import com.palantir.logsafe.logger.SafeLogger; +import com.palantir.logsafe.logger.SafeLoggerFactory; import java.sql.Connection; import java.sql.PreparedStatement; import java.sql.ResultSet; @@ -27,11 +31,9 @@ import java.util.UUID; import java.util.concurrent.atomic.AtomicBoolean; import javax.annotation.Nullable; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; public final class RetriableTransactions { - private static final Logger log = LoggerFactory.getLogger(RetriableTransactions.class); + private static final SafeLogger log = SafeLoggerFactory.get(RetriableTransactions.class); private RetriableTransactions() { // hidden @@ -166,9 +168,9 @@ public TransactionResult run() { log.trace( "Got exception for retriable write transaction, startTimeMs = {}, attemptTimeMs =" + " {}, now = {}", - startTimeMs, - attemptTimeMs, - now, + SafeArg.of("startTimeMs", startTimeMs), + SafeArg.of("attemptTimeMs", attemptTimeMs), + SafeArg.of("now", now), e); } if (cm.isClosed()) { @@ -181,8 +183,8 @@ public TransactionResult run() { log.info( "Swallowing possible transient exception for retriable transaction, last attempt" + " took {} ms, total attempts have taken {}", - attemptLengthMs, - totalLengthMs, + SafeArg.of("attemptLengthMs", attemptLengthMs), + SafeArg.of("totalLengthMs", totalLengthMs), e); continue; } @@ -262,7 +264,10 @@ private boolean attemptVerify() throws SQLException { } private void squelch(SQLException e) { - log.warn("Squelching SQLException while trying to clean up retriable write transaction id {}", id, e); + log.warn( + "Squelching SQLException while trying to clean up retriable write transaction id {}", + UnsafeArg.of("id", id), + e); } } return new LexicalHelper().run(); @@ -315,7 +320,9 @@ private static void createTxTable(ConnectionManager cm) throws SQLException { } // Great, that worked, fallthrough to below but don't commit. } catch (SQLException e) { - log.info("The table {} has not been created yet, so we will try to create it.", TABLE_NAME); + log.info( + "The table {} has not been created yet, so we will try to create it.", + UnsafeArg.of("tableName", TABLE_NAME)); log.debug( "To check whether the table exists we tried to use it. This caused an exception indicating that it" + " did not exist. The exception was: ", diff --git a/commons-db/src/main/java/com/palantir/nexus/db/sql/AbstractAgnosticResultRow.java b/commons-db/src/main/java/com/palantir/nexus/db/sql/AbstractAgnosticResultRow.java index 359e485e7ef..aeef8413ba1 100644 --- a/commons-db/src/main/java/com/palantir/nexus/db/sql/AbstractAgnosticResultRow.java +++ b/commons-db/src/main/java/com/palantir/nexus/db/sql/AbstractAgnosticResultRow.java @@ -17,6 +17,9 @@ import com.palantir.common.base.Throwables; import com.palantir.exception.PalantirSqlException; +import com.palantir.logsafe.UnsafeArg; +import com.palantir.logsafe.logger.SafeLogger; +import com.palantir.logsafe.logger.SafeLoggerFactory; import com.palantir.nexus.db.DBType; import java.io.ByteArrayOutputStream; import java.io.IOException; @@ -24,12 +27,10 @@ import java.io.InputStreamReader; import java.sql.SQLException; import java.util.Map; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; public abstract class AbstractAgnosticResultRow implements AgnosticResultRow { - private static final Logger log = LoggerFactory.getLogger(AbstractAgnosticResultRow.class); + private static final SafeLogger log = SafeLoggerFactory.get(AbstractAgnosticResultRow.class); protected DBType dbType; protected Map columnMap; @@ -64,7 +65,7 @@ protected int findColumn(String colname) { log.debug( "Column name '{}' supplied in mixed case - leads to slower execution of" + " AgnosticResultRow.findColumn", - colname); //$NON-NLS-1$ + UnsafeArg.of("colname", colname)); // $NON-NLS-1$ } } return colIdx.intValue(); diff --git a/commons-db/src/main/java/com/palantir/nexus/db/sql/AgnosticLightResultSetImpl.java b/commons-db/src/main/java/com/palantir/nexus/db/sql/AgnosticLightResultSetImpl.java index bf3a6c38b5d..c020b5d2014 100644 --- a/commons-db/src/main/java/com/palantir/nexus/db/sql/AgnosticLightResultSetImpl.java +++ b/commons-db/src/main/java/com/palantir/nexus/db/sql/AgnosticLightResultSetImpl.java @@ -19,6 +19,7 @@ import com.palantir.common.visitor.Visitor; import com.palantir.exception.PalantirInterruptedException; import com.palantir.exception.PalantirSqlException; +import com.palantir.logsafe.UnsafeArg; import com.palantir.logsafe.logger.SafeLogger; import com.palantir.logsafe.logger.SafeLoggerFactory; import com.palantir.nexus.db.DBType; @@ -31,8 +32,6 @@ import java.sql.SQLException; import java.util.Iterator; import java.util.Map; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; /** * This result set only loads one row at a time, and thus provides a @@ -42,7 +41,7 @@ * */ class AgnosticLightResultSetImpl implements AgnosticLightResultSet { - private static final Logger log = LoggerFactory.getLogger(AgnosticLightResultSetImpl.class); + private static final SafeLogger log = SafeLoggerFactory.get(AgnosticLightResultSetImpl.class); private static final SafeLogger sqlExceptionlog = SafeLoggerFactory.get("sqlException." + AgnosticLightResultSetImpl.class.getName()); @@ -91,7 +90,7 @@ public void close() { stmt.close(); results.close(); hasBeenClosed = true; - log.debug("Closed {}", this); + log.debug("Closed {}", UnsafeArg.of("this", this)); } catch (SQLException sqlex) { log.error("Caught SQLException", sqlex); // $NON-NLS-1$ } diff --git a/flake-rule/src/main/java/com/palantir/flake/FlakeRetryingRule.java b/flake-rule/src/main/java/com/palantir/flake/FlakeRetryingRule.java index 91676ebdb34..0318e0a7f33 100644 --- a/flake-rule/src/main/java/com/palantir/flake/FlakeRetryingRule.java +++ b/flake-rule/src/main/java/com/palantir/flake/FlakeRetryingRule.java @@ -17,13 +17,14 @@ import com.google.common.base.Preconditions; import com.google.common.base.Throwables; +import com.palantir.logsafe.SafeArg; +import com.palantir.logsafe.logger.SafeLogger; +import com.palantir.logsafe.logger.SafeLoggerFactory; import java.util.Arrays; import java.util.Optional; import org.junit.rules.TestRule; import org.junit.runner.Description; import org.junit.runners.model.Statement; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; /** * A {@link TestRule} that retries methods and classes annotated with the {@link ShouldRetry} annotation. @@ -52,7 +53,7 @@ * Note: Please be very careful about ordering when chaining this with other JUnit test rules. */ public class FlakeRetryingRule implements TestRule { - private static final Logger log = LoggerFactory.getLogger(FlakeRetryingRule.class); + private static final SafeLogger log = SafeLoggerFactory.get(FlakeRetryingRule.class); @Override public Statement apply(Statement base, Description description) { @@ -88,10 +89,10 @@ private static void runStatementWithRetry(ShouldRetry retryAnnotation, Statement base.evaluate(); log.info( "Test {}.{} succeeded on attempt {} of {}.", - description.getClassName(), - description.getMethodName(), - attempt, - retryAnnotation.numAttempts()); + SafeArg.of("testClass", description.getClassName()), + SafeArg.of("testMethod", description.getMethodName()), + SafeArg.of("attempt", attempt), + SafeArg.of("maxAttempts", retryAnnotation.numAttempts())); return; } catch (Throwable t) { if (Arrays.stream(retryAnnotation.retryableExceptions()).anyMatch(type -> causeHasType(t, type))) { @@ -111,10 +112,10 @@ private static void logFailureAndThrowIfNeeded( ShouldRetry retryAnnotation, Description description, int attempt, Throwable throwable) { log.info( "Test {}.{} failed on attempt {} of {}.", - description.getClassName(), - description.getMethodName(), - attempt, - retryAnnotation.numAttempts(), + SafeArg.of("testClass", description.getClassName()), + SafeArg.of("testMethod", description.getMethodName()), + SafeArg.of("attempt", attempt), + SafeArg.of("maxAttempts", retryAnnotation.numAttempts()), throwable); if (attempt == retryAnnotation.numAttempts()) { throw Throwables.propagate(throwable); diff --git a/flake-rule/src/test/java/com/palantir/flake/fail/ExpectedFailureRule.java b/flake-rule/src/test/java/com/palantir/flake/fail/ExpectedFailureRule.java index 842cfbed0e1..033ccba845d 100644 --- a/flake-rule/src/test/java/com/palantir/flake/fail/ExpectedFailureRule.java +++ b/flake-rule/src/test/java/com/palantir/flake/fail/ExpectedFailureRule.java @@ -15,14 +15,15 @@ */ package com.palantir.flake.fail; +import com.palantir.logsafe.SafeArg; +import com.palantir.logsafe.logger.SafeLogger; +import com.palantir.logsafe.logger.SafeLoggerFactory; import org.junit.rules.TestRule; import org.junit.runner.Description; import org.junit.runners.model.Statement; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; public class ExpectedFailureRule implements TestRule { - private static final Logger log = LoggerFactory.getLogger(ExpectedFailureRule.class); + private static final SafeLogger log = SafeLoggerFactory.get(ExpectedFailureRule.class); @Override public Statement apply(Statement base, Description description) { @@ -44,7 +45,9 @@ private void evaluateBaseExpectingFailure() { String.format("%s was expected to fail, but passed!", getTestName(description))); } catch (Throwable t) { // PASS - a failure was expected - log.info("Test {} failed, which is in line with expectations.", getTestName(description)); + log.info( + "Test {} failed, which is in line with expectations.", + SafeArg.of("testName", getTestName(description))); } } }; diff --git a/leader-election-impl/src/main/java/com/palantir/paxos/PaxosAcceptorImpl.java b/leader-election-impl/src/main/java/com/palantir/paxos/PaxosAcceptorImpl.java index f7a41dc556c..628765fe554 100644 --- a/leader-election-impl/src/main/java/com/palantir/paxos/PaxosAcceptorImpl.java +++ b/leader-election-impl/src/main/java/com/palantir/paxos/PaxosAcceptorImpl.java @@ -16,15 +16,15 @@ package com.palantir.paxos; import com.palantir.logsafe.SafeArg; +import com.palantir.logsafe.logger.SafeLogger; +import com.palantir.logsafe.logger.SafeLoggerFactory; import java.io.IOException; import java.util.Optional; import java.util.OptionalLong; import java.util.concurrent.ConcurrentSkipListMap; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; public final class PaxosAcceptorImpl implements PaxosAcceptor { - private static final Logger log = LoggerFactory.getLogger(PaxosAcceptorImpl.class); + private static final SafeLogger log = SafeLoggerFactory.get(PaxosAcceptorImpl.class); public static PaxosAcceptor newAcceptor(String logDir) { PaxosStateLog stateLog = new PaxosStateLogImpl<>(logDir); @@ -61,7 +61,7 @@ public PaxosPromise prepare(long seq, PaxosProposalId pid) { try { checkLogIfNeeded(seq); } catch (Exception e) { - log.error("log read failed for request: {}", seq, e); + log.error("log read failed for request: {}", SafeArg.of("seq", seq), e); return PaxosPromise.reject(pid); } diff --git a/lock-api/src/main/java/com/palantir/lock/client/LockRefreshingLockService.java b/lock-api/src/main/java/com/palantir/lock/client/LockRefreshingLockService.java index 65e3c759779..c5771e99dbe 100644 --- a/lock-api/src/main/java/com/palantir/lock/client/LockRefreshingLockService.java +++ b/lock-api/src/main/java/com/palantir/lock/client/LockRefreshingLockService.java @@ -28,6 +28,10 @@ import com.palantir.lock.SimpleHeldLocksToken; import com.palantir.lock.SimplifyingLockService; import com.palantir.logsafe.Preconditions; +import com.palantir.logsafe.SafeArg; +import com.palantir.logsafe.UnsafeArg; +import com.palantir.logsafe.logger.SafeLogger; +import com.palantir.logsafe.logger.SafeLoggerFactory; import java.util.HashSet; import java.util.List; import java.util.Set; @@ -35,12 +39,10 @@ import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.ScheduledFuture; import java.util.concurrent.TimeUnit; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; public final class LockRefreshingLockService extends SimplifyingLockService { public static final int REFRESH_BATCH_SIZE = 500_000; - private static final Logger log = LoggerFactory.getLogger(LockRefreshingLockService.class); + private static final SafeLogger log = SafeLoggerFactory.get(LockRefreshingLockService.class); private static final ScheduledExecutorService executor = PTExecutors.newScheduledThreadPool(1, PTExecutors.newNamedThreadFactory(true)); @@ -65,13 +67,13 @@ public static LockRefreshingLockService create(LockService delegate) { if (elapsed > LockRequest.getDefaultLockTimeout().toMillis() / 2) { log.warn( "Refreshing locks took {} milliseconds" + " for tokens: {}", - elapsed, - ret.toRefresh); + SafeArg.of("elapsed", elapsed), + UnsafeArg.of("tokens", ret.toRefresh)); } else if (elapsed > ret.refreshFrequencyMillis) { log.info( "Refreshing locks took {} milliseconds" + " for tokens: {}", - elapsed, - ret.toRefresh); + SafeArg.of("elapsed", elapsed), + UnsafeArg.of("tokens", ret.toRefresh)); } } }, @@ -143,7 +145,7 @@ private void refreshLocks() { } for (LockRefreshToken token : refreshCopy) { if (!refreshedTokens.contains(token) && toRefresh.contains(token)) { - log.warn("failed to refresh lock: {}", token); + log.warn("failed to refresh lock: {}", UnsafeArg.of("token", token)); toRefresh.remove(token); } } diff --git a/lock-api/src/main/java/com/palantir/lock/client/LockRefreshingRemoteLockService.java b/lock-api/src/main/java/com/palantir/lock/client/LockRefreshingRemoteLockService.java index 0171ddb6019..986dc08e7ff 100644 --- a/lock-api/src/main/java/com/palantir/lock/client/LockRefreshingRemoteLockService.java +++ b/lock-api/src/main/java/com/palantir/lock/client/LockRefreshingRemoteLockService.java @@ -22,17 +22,19 @@ import com.palantir.lock.LockRefreshToken; import com.palantir.lock.LockRequest; import com.palantir.lock.RemoteLockService; +import com.palantir.logsafe.SafeArg; +import com.palantir.logsafe.UnsafeArg; +import com.palantir.logsafe.logger.SafeLogger; +import com.palantir.logsafe.logger.SafeLoggerFactory; import java.io.IOException; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; @SuppressWarnings("checkstyle:FinalClass") // Avoid breaking API in case someone extended this public class LockRefreshingRemoteLockService extends ForwardingRemoteLockService { - private static final Logger log = LoggerFactory.getLogger(LockRefreshingRemoteLockService.class); + private static final SafeLogger log = SafeLoggerFactory.get(LockRefreshingRemoteLockService.class); final RemoteLockService delegate; final Set toRefresh; @@ -55,13 +57,13 @@ public static LockRefreshingRemoteLockService create(RemoteLockService delegate) if (elapsed > LockRequest.getDefaultLockTimeout().toMillis() / 2) { log.warn( "Refreshing locks took {} milliseconds" + " for tokens: {}", - elapsed, - ret.toRefresh); + SafeArg.of("elapsed", elapsed), + UnsafeArg.of("tokens", ret.toRefresh)); } else if (elapsed > ret.refreshFrequencyMillis) { log.info( "Refreshing locks took {} milliseconds" + " for tokens: {}", - elapsed, - ret.toRefresh); + SafeArg.of("elapsed", elapsed), + UnsafeArg.of("tokens", ret.toRefresh)); } } }, @@ -114,7 +116,7 @@ private void refreshLocks() { Set refreshedTokens = delegate().refreshLockRefreshTokens(refreshCopy); for (LockRefreshToken token : refreshCopy) { if (!refreshedTokens.contains(token) && toRefresh.contains(token)) { - log.warn("failed to refresh lock: {}", token); + log.warn("failed to refresh lock: {}", UnsafeArg.of("token", token)); toRefresh.remove(token); } } diff --git a/lock-api/src/main/java/com/palantir/lock/client/ProfilingTimelockService.java b/lock-api/src/main/java/com/palantir/lock/client/ProfilingTimelockService.java index 5194bbbea0b..2451cdb0f6b 100644 --- a/lock-api/src/main/java/com/palantir/lock/client/ProfilingTimelockService.java +++ b/lock-api/src/main/java/com/palantir/lock/client/ProfilingTimelockService.java @@ -31,6 +31,8 @@ import com.palantir.lock.v2.WaitForLocksResponse; import com.palantir.logsafe.SafeArg; import com.palantir.logsafe.UnsafeArg; +import com.palantir.logsafe.logger.SafeLogger; +import com.palantir.logsafe.logger.SafeLoggerFactory; import com.palantir.timestamp.TimestampRange; import java.time.Duration; import java.util.List; @@ -40,8 +42,6 @@ import java.util.function.BooleanSupplier; import java.util.function.Supplier; import org.immutables.value.Value; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; /** * Logs operations on an underlying {@link TimelockService} that take a long time (defined as longer than @@ -59,14 +59,14 @@ * clients. */ public class ProfilingTimelockService implements AutoCloseable, TimelockService { - private static final Logger log = LoggerFactory.getLogger(ProfilingTimelockService.class); + private static final SafeLogger log = SafeLoggerFactory.get(ProfilingTimelockService.class); @VisibleForTesting static final Duration SLOW_THRESHOLD = Duration.ofSeconds(1); private static final Duration LOGGING_TIME_WINDOW = Duration.ofSeconds(10); - private final Logger logger; + private final SafeLogger logger; private final TimelockService delegate; private final Supplier stopwatchSupplier; @@ -75,7 +75,7 @@ public class ProfilingTimelockService implements AutoCloseable, TimelockService @VisibleForTesting ProfilingTimelockService( - Logger logger, + SafeLogger logger, TimelockService delegate, Supplier stopwatchSupplier, BooleanSupplier loggingPermissionSupplier) { diff --git a/lock-api/src/test/java/com/palantir/lock/client/ProfilingTimelockServiceTest.java b/lock-api/src/test/java/com/palantir/lock/client/ProfilingTimelockServiceTest.java index 307dde887c2..409471c64ce 100644 --- a/lock-api/src/test/java/com/palantir/lock/client/ProfilingTimelockServiceTest.java +++ b/lock-api/src/test/java/com/palantir/lock/client/ProfilingTimelockServiceTest.java @@ -33,6 +33,7 @@ import com.palantir.lock.v2.TimelockService; import com.palantir.lock.v2.WaitForLocksRequest; import com.palantir.logsafe.Arg; +import com.palantir.logsafe.logger.SafeLogger; import java.time.Duration; import java.time.temporal.ChronoUnit; import java.util.Optional; @@ -40,7 +41,6 @@ import java.util.function.BooleanSupplier; import org.junit.Test; import org.mockito.ArgumentCaptor; -import org.slf4j.Logger; public class ProfilingTimelockServiceTest { private static final Duration SHORT_DURATION = ProfilingTimelockService.SLOW_THRESHOLD.dividedBy(5); @@ -49,7 +49,7 @@ public class ProfilingTimelockServiceTest { ChronoUnit.CENTURIES.getDuration().multipliedBy(2); @SuppressWarnings("PreferStaticLoggers") - private final Logger logger = mock(Logger.class); + private final SafeLogger logger = mock(SafeLogger.class); private final TimelockService delegate = mock(TimelockService.class); @@ -199,13 +199,20 @@ private void verifyLoggerNeverInvoked() { @SuppressWarnings({ "unchecked", // Captors of generics - "Slf4jConstantLogMessage" + "CompileTimeConstant" }) // Logger verify private void verifyLoggerInvokedOnceWithSpecificProfile(String operation, Duration duration) { // XXX Maybe there's a better way? The approaches I tried didn't work because of conflict with other methods ArgumentCaptor> messageCaptor = ArgumentCaptor.forClass(Arg.class); ArgumentCaptor> durationCaptor = ArgumentCaptor.forClass(Arg.class); - verify(logger).info(any(String.class), messageCaptor.capture(), durationCaptor.capture(), any(), any(), any()); + verify(logger) + .info( + any(String.class), + messageCaptor.capture(), + durationCaptor.capture(), + any(), + any(), + any(Arg.class)); assertThat(messageCaptor.getValue().getValue()).isEqualTo(operation); assertThat(durationCaptor.getValue().getValue()).isEqualTo(duration); @@ -213,7 +220,7 @@ private void verifyLoggerInvokedOnceWithSpecificProfile(String operation, Durati @SuppressWarnings({ "unchecked", // Captors of generics - "Slf4jConstantLogMessage" + "CompileTimeConstant" }) // Logger verify private void verifyLoggerInvokedOnceWithException(Exception exception) { ArgumentCaptor>> exceptionCaptor = ArgumentCaptor.forClass(Arg.class); @@ -221,9 +228,9 @@ private void verifyLoggerInvokedOnceWithException(Exception exception) { assertThat(exceptionCaptor.getValue().getValue()).contains(exception); } - @SuppressWarnings("Slf4jConstantLogMessage") // only for tests + @SuppressWarnings("CompileTimeConstant") // only for tests private void verifyLoggerInvokedSpecificNumberOfTimes(int times) { - verify(logger, times(times)).info(any(String.class), any(), any(), any(), any(), any()); + verify(logger, times(times)).info(any(String.class), any(), any(), any(), any(), any(Arg.class)); } private void makeOperationsTakeSpecifiedDuration(Duration duration) { diff --git a/lock-impl/src/main/java/com/palantir/lock/impl/LockServerLock.java b/lock-impl/src/main/java/com/palantir/lock/impl/LockServerLock.java index 5252a105562..82b81abaf84 100644 --- a/lock-impl/src/main/java/com/palantir/lock/impl/LockServerLock.java +++ b/lock-impl/src/main/java/com/palantir/lock/impl/LockServerLock.java @@ -20,12 +20,13 @@ import com.palantir.lock.LockDescriptor; import com.palantir.lock.LockMode; import com.palantir.logsafe.Preconditions; +import com.palantir.logsafe.UnsafeArg; +import com.palantir.logsafe.logger.SafeLogger; +import com.palantir.logsafe.logger.SafeLoggerFactory; import java.util.concurrent.TimeUnit; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; public class LockServerLock implements ClientAwareReadWriteLock { - private static final Logger log = LoggerFactory.getLogger(LockServerLock.class); + private static final SafeLogger log = SafeLoggerFactory.get(LockServerLock.class); private final LockDescriptor descriptor; private final LockServerSync sync; @@ -78,7 +79,7 @@ public String toSanitizedString() { static IllegalMonitorStateException throwIllegalMonitorStateException(String message) { IllegalMonitorStateException ex = new IllegalMonitorStateException(message); - log.error("Illegal monitor state exception: {}", message, ex); + log.error("Illegal monitor state exception: {}", UnsafeArg.of("message", message), ex); throw ex; } diff --git a/lock-impl/src/main/java/com/palantir/lock/impl/LockServiceImpl.java b/lock-impl/src/main/java/com/palantir/lock/impl/LockServiceImpl.java index 01c25b6cbbe..aea350045db 100644 --- a/lock-impl/src/main/java/com/palantir/lock/impl/LockServiceImpl.java +++ b/lock-impl/src/main/java/com/palantir/lock/impl/LockServiceImpl.java @@ -109,8 +109,6 @@ import javax.annotation.concurrent.ThreadSafe; import org.joda.time.DateTime; import org.joda.time.format.ISODateTimeFormat; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.slf4j.helpers.MessageFormatter; /** @@ -126,7 +124,7 @@ public final class LockServiceImpl RemoteLockService, LockServiceImplMBean { - private static final Logger log = LoggerFactory.getLogger(LockServiceImpl.class); + private static final SafeLogger log = SafeLoggerFactory.get(LockServiceImpl.class); private static final SafeLogger requestLogger = SafeLoggerFactory.get("lock.request"); private static final String GRANT_MESSAGE = "Lock client {} tried to use a lock grant that" + " doesn't correspond to any held locks (grantId: {});" @@ -249,7 +247,7 @@ public static LockServiceImpl create(LockServerOptions options, ExecutorService private static LockServiceImpl create(LockServerOptions options, Ownable executor) { if (log.isTraceEnabled()) { - log.trace("Creating LockService with options={}", options); + log.trace("Creating LockService with options={}", SafeArg.of("options", options)); } final String jmxBeanRegistrationName = "com.palantir.lock:type=LockServer_" + instanceCount.getAndIncrement(); LockServiceImpl lockService = new LockServiceImpl( @@ -570,18 +568,14 @@ protected void logSlowLockAcquisition(String lockId, LockClient currentHolder, l UnsafeArg.of("lockId", lockId), SafeArg.of("outcome", currentHolder == null ? "successfully" : "unsuccessfully")); } else if (log.isDebugEnabled() && durationMillis > DEBUG_SLOW_LOG_TRIGGER_MILLIS) { - log.debug(slowLockLogMessage, constructSlowLockLogParams(lockId, currentHolder, durationMillis)); + log.debug( + slowLockLogMessage, + SafeArg.of("durationMillis", durationMillis), + UnsafeArg.of("lockId", lockId), + SafeArg.of("outcome", currentHolder == null ? "successfully" : "unsuccessfully")); } } - private Object[] constructSlowLockLogParams(String lockId, LockClient currentHolder, long durationMillis) { - return ImmutableList.of( - SafeArg.of("durationMillis", durationMillis), - UnsafeArg.of("lockId", lockId), - SafeArg.of("outcome", currentHolder == null ? "successfully" : "unsuccessfully")) - .toArray(); - } - @VisibleForTesting protected boolean isSlowLogEnabled() { return slowLogTriggerMillis > 0; @@ -615,7 +609,7 @@ public boolean unlock(HeldLocksToken token) { com.palantir.logsafe.Preconditions.checkNotNull(token); boolean success = unlockInternal(token, heldLocksTokenMap); if (log.isTraceEnabled()) { - log.trace(".unlock({}) returns {}", token, success); + log.trace(".unlock({}) returns {}", UnsafeArg.of("token", token), SafeArg.of("success", success)); } return success; } @@ -643,7 +637,7 @@ public boolean unlockAndFreeze(HeldLocksToken token) { @Nullable HeldLocks heldLocks = heldLocksTokenMap.remove(token); if (heldLocks == null) { if (log.isTraceEnabled()) { - log.trace(".unlockAndFreeze({}) returns false", token); + log.trace(".unlockAndFreeze({}) returns false", UnsafeArg.of("token", token)); } return false; } @@ -651,7 +645,7 @@ public boolean unlockAndFreeze(HeldLocksToken token) { if (client.isAnonymous()) { heldLocksTokenMap.put(token, heldLocks); lockTokenReaperQueue.add(token); - log.warn(UNLOCK_AND_FREEZE_FROM_ANONYMOUS_CLIENT, heldLocks.realToken); + log.warn(UNLOCK_AND_FREEZE_FROM_ANONYMOUS_CLIENT, UnsafeArg.of("token", heldLocks.realToken)); throw new IllegalArgumentException( MessageFormatter.format(UNLOCK_AND_FREEZE_FROM_ANONYMOUS_CLIENT, heldLocks.realToken) .getMessage()); @@ -659,7 +653,7 @@ public boolean unlockAndFreeze(HeldLocksToken token) { if (heldLocks.locks.hasReadLock()) { heldLocksTokenMap.put(token, heldLocks); lockTokenReaperQueue.add(token); - log.warn(UNLOCK_AND_FREEZE, heldLocks.realToken); + log.warn(UNLOCK_AND_FREEZE, UnsafeArg.of("token", heldLocks.realToken)); throw new IllegalArgumentException(MessageFormatter.format(UNLOCK_AND_FREEZE, heldLocks.realToken) .getMessage()); } @@ -671,7 +665,7 @@ public boolean unlockAndFreeze(HeldLocksToken token) { versionIdMap.remove(client, heldLocks.realToken.getVersionId()); } if (log.isTraceEnabled()) { - log.trace(".unlockAndFreeze({}) returns true", token); + log.trace(".unlockAndFreeze({}) returns true", UnsafeArg.of("token", token)); } return true; } @@ -724,7 +718,9 @@ public Set getTokens(LockClient client) { ImmutableSet tokenSet = tokens.build(); if (log.isTraceEnabled()) { log.trace( - ".getTokens({}) returns {}", client, Collections2.transform(tokenSet, LockServiceImpl::tokenToId)); + ".getTokens({}) returns {}", + UnsafeArg.of("client", client), + UnsafeArg.of("tokens", Collections2.transform(tokenSet, LockServiceImpl::tokenToId))); } return tokenSet; } @@ -743,8 +739,8 @@ public Set refreshTokens(Iterable tokens) { if (log.isTraceEnabled()) { log.trace( ".refreshTokens({}) returns {}", - Iterables.transform(tokens, LockServiceImpl::tokenToId), - Collections2.transform(refreshedTokenSet, LockServiceImpl::tokenToId)); + UnsafeArg.of("oldTokens", Iterables.transform(tokens, LockServiceImpl::tokenToId)), + UnsafeArg.of("newTokens", Collections2.transform(refreshedTokenSet, LockServiceImpl::tokenToId))); } return refreshedTokenSet; } @@ -808,7 +804,9 @@ public HeldLocksGrant refreshGrant(HeldLocksGrant grant) { @Nullable HeldLocks heldLocks = heldLocksGrantMap.get(grant); if (heldLocks == null) { if (log.isTraceEnabled()) { - log.trace(".refreshGrant({}) returns null", grant.getGrantId().toString(Character.MAX_RADIX)); + log.trace( + ".refreshGrant({}) returns null", + UnsafeArg.of("grant", grant.getGrantId().toString(Character.MAX_RADIX))); } return null; } @@ -821,7 +819,9 @@ public HeldLocksGrant refreshGrant(HeldLocksGrant grant) { heldLocks = heldLocksGrantMap.get(grant); if (heldLocks == null) { if (log.isTraceEnabled()) { - log.trace(".refreshGrant({}) returns null", grant.getGrantId().toString(Character.MAX_RADIX)); + log.trace( + ".refreshGrant({}) returns null", + UnsafeArg.of("grant", grant.getGrantId().toString(Character.MAX_RADIX))); } return null; } @@ -830,8 +830,8 @@ public HeldLocksGrant refreshGrant(HeldLocksGrant grant) { if (log.isTraceEnabled()) { log.trace( ".refreshGrant({}) returns {}", - grant.getGrantId().toString(Character.MAX_RADIX), - refreshedGrant.getGrantId().toString(Character.MAX_RADIX)); + UnsafeArg.of("oldGrant", grant.getGrantId().toString(Character.MAX_RADIX)), + UnsafeArg.of("newGrant", refreshedGrant.getGrantId().toString(Character.MAX_RADIX))); } return refreshedGrant; } @@ -966,7 +966,10 @@ public HeldLocksToken useGrant(LockClient client, BigInteger grantId) { HeldLocksGrant grant = new HeldLocksGrant(grantId); @Nullable HeldLocks heldLocks = heldLocksGrantMap.remove(grant); if (heldLocks == null) { - log.warn(GRANT_MESSAGE, client, grantId.toString(Character.MAX_RADIX)); + log.warn( + GRANT_MESSAGE, + UnsafeArg.of("client", client), + UnsafeArg.of("grant", grantId.toString(Character.MAX_RADIX))); String formattedMessage = MessageFormatter.format( GRANT_MESSAGE, client, grantId.toString(Character.MAX_RADIX)) .getMessage(); @@ -982,7 +985,11 @@ public HeldLocksToken useGrant(LockClient client, BigInteger grantId) { realGrant.getVersionId(), "Converted from Grant, Missing Thread Name"); if (log.isTraceEnabled()) { - log.trace(".useGrant({}, {}) returns {}", client, grantId.toString(Character.MAX_RADIX), token); + log.trace( + ".useGrant({}, {}) returns {}", + UnsafeArg.of("client", client), + UnsafeArg.of("grant", grantId.toString(Character.MAX_RADIX)), + UnsafeArg.of("token", token)); } return token; } @@ -1049,7 +1056,7 @@ public Long getMinLockedInVersionId(LockClient client) { } } if (log.isTraceEnabled()) { - log.trace(".getMinLockedInVersionId() returns {}", versionId); + log.trace(".getMinLockedInVersionId() returns {}", UnsafeArg.of("versionId", versionId)); } return versionId; } @@ -1131,7 +1138,7 @@ public LockServerOptions getLockServerOptions() { .build(); if (log.isTraceEnabled()) { - log.trace(".getLockServerOptions() returns {}", options); + log.trace(".getLockServerOptions() returns {}", SafeArg.of("options", options)); } return options; } @@ -1178,7 +1185,7 @@ private List getLockHolders(List readHolders, Optional existingBucket = getExistingBucketForParameters(); if (existingBucket.isPresent()) { this.bucket = existingBucket.get(); - log.info("found existing test data under bucket {}", bucket); + log.info("found existing test data under bucket {}", UnsafeArg.of("bucket", bucket)); return; } this.bucket = UUID.randomUUID().toString(); - log.info("creating new test data under bucket {}", bucket); + log.info("creating new test data under bucket {}", UnsafeArg.of("bucket", bucket)); writeData(); diff --git a/timelock-server/src/main/java/com/palantir/atlasdb/timelock/TimeLockServerLauncher.java b/timelock-server/src/main/java/com/palantir/atlasdb/timelock/TimeLockServerLauncher.java index 7ec0b765c06..54696de4c14 100644 --- a/timelock-server/src/main/java/com/palantir/atlasdb/timelock/TimeLockServerLauncher.java +++ b/timelock-server/src/main/java/com/palantir/atlasdb/timelock/TimeLockServerLauncher.java @@ -29,6 +29,9 @@ import com.palantir.conjure.java.api.config.service.UserAgent; import com.palantir.conjure.java.serialization.ObjectMappers; import com.palantir.conjure.java.server.jersey.ConjureJerseyFeature; +import com.palantir.logsafe.UnsafeArg; +import com.palantir.logsafe.logger.SafeLogger; +import com.palantir.logsafe.logger.SafeLoggerFactory; import com.palantir.refreshable.Refreshable; import com.palantir.sls.versions.OrderableSlsVersion; import com.palantir.timelock.config.TimeLockRuntimeConfiguration; @@ -48,15 +51,13 @@ import javax.ws.rs.ext.ExceptionMapper; import javax.ws.rs.ext.Provider; import org.eclipse.jetty.util.component.LifeCycle; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; /** * Provides a way of launching an embedded TimeLock server using Dropwizard. Should only be used in tests. */ public class TimeLockServerLauncher extends Application { - private static final Logger log = LoggerFactory.getLogger(TimeLockServerLauncher.class); + private static final SafeLogger log = SafeLoggerFactory.get(TimeLockServerLauncher.class); private static final UserAgent USER_AGENT = UserAgent.of(UserAgent.Agent.of("TimeLockServerLauncher", "0.0.0")); @@ -92,10 +93,12 @@ public void run(CombinedTimeLockServerConfiguration configuration, Environment e log.info( "Paxos configuration\n{}", - environment - .getObjectMapper() - .writerWithDefaultPrettyPrinter() - .writeValueAsString(configuration.install().paxos())); + UnsafeArg.of( + "paxosConfig", + environment + .getObjectMapper() + .writerWithDefaultPrettyPrinter() + .writeValueAsString(configuration.install().paxos()))); TimeLockRuntimeConfiguration runtime = configuration.runtime(); TimeLockAgent timeLockAgent = TimeLockAgent.create( metricsManager, diff --git a/timestamp-impl/src/main/java/com/palantir/timestamp/DebugLogger.java b/timestamp-impl/src/main/java/com/palantir/timestamp/DebugLogger.java index 6754eebe9c4..752bcffcfca 100644 --- a/timestamp-impl/src/main/java/com/palantir/timestamp/DebugLogger.java +++ b/timestamp-impl/src/main/java/com/palantir/timestamp/DebugLogger.java @@ -17,9 +17,9 @@ import com.palantir.logsafe.SafeArg; import com.palantir.logsafe.UnsafeArg; +import com.palantir.logsafe.logger.SafeLogger; +import com.palantir.logsafe.logger.SafeLoggerFactory; import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; /** * This is a logger intended for use tracking down problems arising from @@ -29,8 +29,7 @@ */ @SuppressFBWarnings("SLF4J_LOGGER_SHOULD_BE_PRIVATE") public final class DebugLogger { - // TODO(nziebart): move remaining logging calls into this class - public static final Logger logger = LoggerFactory.getLogger(DebugLogger.class); + public static final SafeLogger logger = SafeLoggerFactory.get(DebugLogger.class); private DebugLogger() { // Logging utility class @@ -53,7 +52,7 @@ public static void createdPersistentTimestamp() { } public static void willStoreNewUpperLimit(long newLimit) { - logger.trace("storing new upper limit: {}.", newLimit); + logger.trace("storing new upper limit: {}.", SafeArg.of("newLimit", newLimit)); } public static void didStoreNewUpperLimit(long newLimit) {