diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/QueryTable.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/QueryTable.java index 60189aba160..5301eb54b48 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/QueryTable.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/QueryTable.java @@ -1268,11 +1268,8 @@ void handleUncaughtException(Exception throwable) { final BasePerformanceEntry basePerformanceEntry = initialFilterExecution.getBasePerformanceEntry(); if (basePerformanceEntry != null) { - final QueryPerformanceNugget outerNugget = - QueryPerformanceRecorder.getInstance().getOuterNugget(); - if (outerNugget != null) { - outerNugget.addBaseEntry(basePerformanceEntry); - } + QueryPerformanceRecorder.getInstance().getEnclosingNugget() + .accumulate(basePerformanceEntry); } } currentMapping.initializePreviousValue(); @@ -1516,11 +1513,7 @@ this, mode, columns, rowSet, getModifiedColumnSetForUpdates(), publishTheseSourc } finally { final BasePerformanceEntry baseEntry = jobScheduler.getAccumulatedPerformance(); if (baseEntry != null) { - final QueryPerformanceNugget outerNugget = - QueryPerformanceRecorder.getInstance().getOuterNugget(); - if (outerNugget != null) { - outerNugget.addBaseEntry(baseEntry); - } + QueryPerformanceRecorder.getInstance().getEnclosingNugget().accumulate(baseEntry); } } } @@ -3572,12 +3565,9 @@ public static void checkInitiateBinaryOperation(@NotNull final Table first, @Not } private R applyInternal(@NotNull final Function function) { - final QueryPerformanceNugget nugget = - QueryPerformanceRecorder.getInstance().getNugget("apply(" + function + ")"); - try { + try (final SafeCloseable ignored = + QueryPerformanceRecorder.getInstance().getNugget("apply(" + function + ")")) { return function.apply(this); - } finally { - nugget.done(); } } diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/SelectOrUpdateListener.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/SelectOrUpdateListener.java index 724fe1ff0cc..1cab4f19722 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/SelectOrUpdateListener.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/SelectOrUpdateListener.java @@ -134,11 +134,9 @@ private void completionRoutine(TableUpdate upstream, JobScheduler jobScheduler, getUpdateGraph().addNotification(new TerminalNotification() { @Override public void run() { - synchronized (accumulated) { - final PerformanceEntry entry = getEntry(); - if (entry != null) { - entry.accumulate(accumulated); - } + final PerformanceEntry entry = getEntry(); + if (entry != null) { + entry.accumulate(accumulated); } } }); diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/BasePerformanceEntry.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/BasePerformanceEntry.java index d63ce199cac..4da70a34572 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/BasePerformanceEntry.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/BasePerformanceEntry.java @@ -7,6 +7,7 @@ import io.deephaven.base.log.LogOutputAppendable; import io.deephaven.base.verify.Assert; import io.deephaven.util.profiling.ThreadProfiler; +import org.jetbrains.annotations.NotNull; import static io.deephaven.engine.table.impl.lang.QueryLanguageFunctionUtils.minus; import static io.deephaven.engine.table.impl.lang.QueryLanguageFunctionUtils.plus; @@ -15,13 +16,13 @@ * A smaller entry that simply records usage data, meant for aggregating into the larger entry. */ public class BasePerformanceEntry implements LogOutputAppendable { - private long intervalUsageNanos; + private long usageNanos; - private long intervalCpuNanos; - private long intervalUserCpuNanos; + private long cpuNanos; + private long userCpuNanos; - private long intervalAllocatedBytes; - private long intervalPoolAllocatedBytes; + private long allocatedBytes; + private long poolAllocatedBytes; private long startTimeNanos; @@ -31,26 +32,26 @@ public class BasePerformanceEntry implements LogOutputAppendable { private long startAllocatedBytes; private long startPoolAllocatedBytes; - public void onBaseEntryStart() { + public synchronized void onBaseEntryStart() { startAllocatedBytes = ThreadProfiler.DEFAULT.getCurrentThreadAllocatedBytes(); - startPoolAllocatedBytes = QueryPerformanceRecorder.getPoolAllocatedBytesForCurrentThread(); + startPoolAllocatedBytes = QueryPerformanceRecorderState.getPoolAllocatedBytesForCurrentThread(); startUserCpuNanos = ThreadProfiler.DEFAULT.getCurrentThreadUserTime(); startCpuNanos = ThreadProfiler.DEFAULT.getCurrentThreadCpuTime(); startTimeNanos = System.nanoTime(); } - public void onBaseEntryEnd() { - intervalUserCpuNanos = plus(intervalUserCpuNanos, + public synchronized void onBaseEntryEnd() { + userCpuNanos = plus(userCpuNanos, minus(ThreadProfiler.DEFAULT.getCurrentThreadUserTime(), startUserCpuNanos)); - intervalCpuNanos = - plus(intervalCpuNanos, minus(ThreadProfiler.DEFAULT.getCurrentThreadCpuTime(), startCpuNanos)); + cpuNanos = + plus(cpuNanos, minus(ThreadProfiler.DEFAULT.getCurrentThreadCpuTime(), startCpuNanos)); - intervalUsageNanos += System.nanoTime() - startTimeNanos; + usageNanos += System.nanoTime() - startTimeNanos; - intervalPoolAllocatedBytes = plus(intervalPoolAllocatedBytes, - minus(QueryPerformanceRecorder.getPoolAllocatedBytesForCurrentThread(), startPoolAllocatedBytes)); - intervalAllocatedBytes = plus(intervalAllocatedBytes, + poolAllocatedBytes = plus(poolAllocatedBytes, + minus(QueryPerformanceRecorderState.getPoolAllocatedBytesForCurrentThread(), startPoolAllocatedBytes)); + allocatedBytes = plus(allocatedBytes, minus(ThreadProfiler.DEFAULT.getCurrentThreadAllocatedBytes(), startAllocatedBytes)); startAllocatedBytes = 0; @@ -61,46 +62,76 @@ public void onBaseEntryEnd() { startTimeNanos = 0; } - void baseEntryReset() { + synchronized void baseEntryReset() { Assert.eqZero(startTimeNanos, "startTimeNanos"); - intervalUsageNanos = 0; + usageNanos = 0; - intervalCpuNanos = 0; - intervalUserCpuNanos = 0; + cpuNanos = 0; + userCpuNanos = 0; - intervalAllocatedBytes = 0; - intervalPoolAllocatedBytes = 0; + allocatedBytes = 0; + poolAllocatedBytes = 0; } - public long getIntervalUsageNanos() { - return intervalUsageNanos; + /** + * Get the aggregate usage in nanoseconds. This getter should be called by exclusive owners of the entry, and never + * concurrently with mutators. + * + * @return total wall clock time in nanos + */ + public long getUsageNanos() { + return usageNanos; } - public long getIntervalCpuNanos() { - return intervalCpuNanos; + /** + * Get the aggregate cpu time in nanoseconds. This getter should be called by exclusive owners of the entry, and + * never concurrently with mutators. + * + * @return total cpu time in nanos + */ + public long getCpuNanos() { + return cpuNanos; } - public long getIntervalUserCpuNanos() { - return intervalUserCpuNanos; + /** + * Get the aggregate cpu user time in nanoseconds. This getter should be called by exclusive owners of the entry, + * and never concurrently with mutators. + * + * @return total cpu user time in nanos + */ + public long getUserCpuNanos() { + return userCpuNanos; } - public long getIntervalAllocatedBytes() { - return intervalAllocatedBytes; + /** + * Get the aggregate allocated memory in bytes. This getter should be called by exclusive owners of the entry, and + * never concurrently with mutators. + * + * @return The bytes of allocated memory attributed to the instrumented operation. + */ + public long getAllocatedBytes() { + return allocatedBytes; } - public long getIntervalPoolAllocatedBytes() { - return intervalPoolAllocatedBytes; + /** + * Get allocated pooled/reusable memory attributed to the instrumented operation in bytes. This getter should be + * called by exclusive owners of the entry, and never concurrently with mutators. + * + * @return total pool allocated memory in bytes + */ + public long getPoolAllocatedBytes() { + return poolAllocatedBytes; } @Override - public LogOutput append(LogOutput logOutput) { + public LogOutput append(@NotNull final LogOutput logOutput) { final LogOutput currentValues = logOutput.append("BasePerformanceEntry{") - .append(", intervalUsageNanos=").append(intervalUsageNanos) - .append(", intervalCpuNanos=").append(intervalCpuNanos) - .append(", intervalUserCpuNanos=").append(intervalUserCpuNanos) - .append(", intervalAllocatedBytes=").append(intervalAllocatedBytes) - .append(", intervalPoolAllocatedBytes=").append(intervalPoolAllocatedBytes); + .append(", intervalUsageNanos=").append(usageNanos) + .append(", intervalCpuNanos=").append(cpuNanos) + .append(", intervalUserCpuNanos=").append(userCpuNanos) + .append(", intervalAllocatedBytes=").append(allocatedBytes) + .append(", intervalPoolAllocatedBytes=").append(poolAllocatedBytes); return appendStart(currentValues) .append('}'); } @@ -114,12 +145,17 @@ LogOutput appendStart(LogOutput logOutput) { .append(", startPoolAllocatedBytes=").append(startPoolAllocatedBytes); } - public void accumulate(BasePerformanceEntry entry) { - this.intervalUsageNanos += entry.intervalUsageNanos; - this.intervalCpuNanos = plus(this.intervalCpuNanos, entry.intervalCpuNanos); - this.intervalUserCpuNanos = plus(this.intervalUserCpuNanos, entry.intervalUserCpuNanos); - - this.intervalAllocatedBytes = plus(this.intervalAllocatedBytes, entry.intervalAllocatedBytes); - this.intervalPoolAllocatedBytes = plus(this.intervalPoolAllocatedBytes, entry.intervalPoolAllocatedBytes); + /** + * Accumulate the values from another entry into this one. The provided entry will not be mutated. + * + * @param entry the entry to accumulate + */ + public synchronized void accumulate(@NotNull final BasePerformanceEntry entry) { + this.usageNanos += entry.usageNanos; + this.cpuNanos = plus(this.cpuNanos, entry.cpuNanos); + this.userCpuNanos = plus(this.userCpuNanos, entry.userCpuNanos); + + this.allocatedBytes = plus(this.allocatedBytes, entry.allocatedBytes); + this.poolAllocatedBytes = plus(this.poolAllocatedBytes, entry.poolAllocatedBytes); } } diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/PerformanceEntry.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/PerformanceEntry.java index fb03b488ffb..889581ab928 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/PerformanceEntry.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/PerformanceEntry.java @@ -13,13 +13,14 @@ import io.deephaven.io.log.impl.LogOutputStringImpl; import io.deephaven.time.DateTimeUtils; import io.deephaven.util.QueryConstants; +import org.jetbrains.annotations.NotNull; /** * Entry class for tracking the performance characteristics of a single recurring update event. */ public class PerformanceEntry extends BasePerformanceEntry implements TableListener.Entry { - private final int id; - private final int evaluationNumber; + private final long id; + private final long evaluationNumber; private final int operationNumber; private final String description; private final String callerLine; @@ -42,7 +43,7 @@ public class PerformanceEntry extends BasePerformanceEntry implements TableListe private final RuntimeMemory.Sample startSample; private final RuntimeMemory.Sample endSample; - PerformanceEntry(final int id, final int evaluationNumber, final int operationNumber, + PerformanceEntry(final long id, final long evaluationNumber, final int operationNumber, final String description, final String callerLine, final String updateGraphName) { this.id = id; this.evaluationNumber = evaluationNumber; @@ -114,7 +115,7 @@ public String toString() { } @Override - public LogOutput append(final LogOutput logOutput) { + public LogOutput append(@NotNull final LogOutput logOutput) { final LogOutput beginning = logOutput.append("PerformanceEntry{") .append(", id=").append(id) .append(", evaluationNumber=").append(evaluationNumber) @@ -122,16 +123,16 @@ public LogOutput append(final LogOutput logOutput) { .append(", description='").append(description).append('\'') .append(", callerLine='").append(callerLine).append('\'') .append(", authContext=").append(authContext) - .append(", intervalUsageNanos=").append(getIntervalUsageNanos()) - .append(", intervalCpuNanos=").append(getIntervalCpuNanos()) - .append(", intervalUserCpuNanos=").append(getIntervalUserCpuNanos()) + .append(", intervalUsageNanos=").append(getUsageNanos()) + .append(", intervalCpuNanos=").append(getCpuNanos()) + .append(", intervalUserCpuNanos=").append(getUserCpuNanos()) .append(", intervalInvocationCount=").append(intervalInvocationCount) .append(", intervalAdded=").append(intervalAdded) .append(", intervalRemoved=").append(intervalRemoved) .append(", intervalModified=").append(intervalModified) .append(", intervalShifted=").append(intervalShifted) - .append(", intervalAllocatedBytes=").append(getIntervalAllocatedBytes()) - .append(", intervalPoolAllocatedBytes=").append(getIntervalPoolAllocatedBytes()) + .append(", intervalAllocatedBytes=").append(getAllocatedBytes()) + .append(", intervalPoolAllocatedBytes=").append(getPoolAllocatedBytes()) .append(", maxTotalMemory=").append(maxTotalMemory) .append(", minFreeMemory=").append(minFreeMemory) .append(", collections=").append(collections) @@ -140,11 +141,11 @@ public LogOutput append(final LogOutput logOutput) { .append('}'); } - public int getId() { + public long getId() { return id; } - public int getEvaluationNumber() { + public long getEvaluationNumber() { return evaluationNumber; } @@ -217,7 +218,7 @@ public long getIntervalInvocationCount() { */ boolean shouldLogEntryInterval() { return intervalInvocationCount > 0 && - UpdatePerformanceTracker.LOG_THRESHOLD.shouldLog(getIntervalUsageNanos()); + UpdatePerformanceTracker.LOG_THRESHOLD.shouldLog(getUsageNanos()); } public void accumulate(PerformanceEntry entry) { diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceNugget.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceNugget.java index 76761e8b561..950e279af2a 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceNugget.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceNugget.java @@ -4,15 +4,20 @@ package io.deephaven.engine.table.impl.perf; import io.deephaven.auth.AuthContext; +import io.deephaven.base.clock.SystemClock; +import io.deephaven.base.log.LogOutput; +import io.deephaven.base.verify.Assert; import io.deephaven.engine.context.ExecutionContext; +import io.deephaven.io.log.impl.LogOutputStringImpl; import io.deephaven.time.DateTimeUtils; import io.deephaven.engine.table.impl.util.RuntimeMemory; import io.deephaven.util.QueryConstants; -import io.deephaven.util.profiling.ThreadProfiler; +import io.deephaven.util.SafeCloseable; +import org.jetbrains.annotations.NotNull; +import org.jetbrains.annotations.Nullable; -import java.io.Serializable; +import java.util.function.Consumer; -import static io.deephaven.engine.table.impl.lang.QueryLanguageFunctionUtils.minus; import static io.deephaven.util.QueryConstants.*; /** @@ -20,78 +25,185 @@ * intimate relationship with another class, {@link QueryPerformanceRecorder}. Changes to either should take this lack * of encapsulation into account. */ -public class QueryPerformanceNugget implements Serializable, AutoCloseable { - private static final QueryPerformanceLogThreshold LOG_THRESHOLD = new QueryPerformanceLogThreshold("", 1_000_000); - private static final QueryPerformanceLogThreshold UNINSTRUMENTED_LOG_THRESHOLD = - new QueryPerformanceLogThreshold("Uninstrumented", 1_000_000_000); +public class QueryPerformanceNugget extends BasePerformanceEntry implements SafeCloseable { private static final int MAX_DESCRIPTION_LENGTH = 16 << 10; - private static final long serialVersionUID = 2L; - /** * A re-usable "dummy" nugget which will never collect any information or be recorded. */ - static final QueryPerformanceNugget DUMMY_NUGGET = new QueryPerformanceNugget(); + static final QueryPerformanceNugget DUMMY_NUGGET = new QueryPerformanceNugget() { + @Override + public void accumulate(@NotNull BasePerformanceEntry entry) { + // non-synchronized no-op override + } + + @Override + public boolean shouldLog() { + return false; + } + }; + + public interface Factory { + /** + * Factory method for query-level nuggets. + * + * @param evaluationNumber A unique identifier for the query evaluation that triggered this nugget creation + * @param description The operation description + * @param sessionId The gRPC client session-id if applicable + * @param onCloseCallback A callback that is invoked when the nugget is closed. It returns whether the nugget + * should be logged. + * @return A new nugget + */ + default QueryPerformanceNugget createForQuery( + final long evaluationNumber, + @NotNull final String description, + @Nullable final String sessionId, + @NotNull final Consumer onCloseCallback) { + return new QueryPerformanceNugget(evaluationNumber, NULL_LONG, NULL_INT, NULL_INT, NULL_INT, description, + sessionId, false, NULL_LONG, onCloseCallback); + } + + /** + * Factory method for sub-query-level nuggets. + * + * @param parentQuery The parent query nugget + * @param evaluationNumber A unique identifier for the sub-query evaluation that triggered this nugget creation + * @param description The operation description + * @param onCloseCallback A callback that is invoked when the nugget is closed. It returns whether the nugget + * should be logged. + * @return A new nugget + */ + default QueryPerformanceNugget createForSubQuery( + @NotNull final QueryPerformanceNugget parentQuery, + final long evaluationNumber, + @NotNull final String description, + @NotNull final Consumer onCloseCallback) { + Assert.eqTrue(parentQuery.isQueryLevel(), "parentQuery.isQueryLevel()"); + return new QueryPerformanceNugget(evaluationNumber, parentQuery.getEvaluationNumber(), NULL_INT, NULL_INT, + NULL_INT, description, parentQuery.getSessionId(), false, NULL_LONG, onCloseCallback); + } + + /** + * Factory method for operation-level nuggets. + * + * @param parentQueryOrOperation The parent query / operation nugget + * @param operationNumber A query-unique identifier for the operation + * @param description The operation description + * @param onCloseCallback A callback that is invoked when the nugget is closed. It returns whether the nugget + * should be logged. + * @return A new nugget + */ + default QueryPerformanceNugget createForOperation( + @NotNull final QueryPerformanceNugget parentQueryOrOperation, + final int operationNumber, + final String description, + final long inputSize, + @NotNull final Consumer onCloseCallback) { + int depth = parentQueryOrOperation.getDepth(); + if (depth == NULL_INT) { + depth = 0; + } else { + ++depth; + } + + return new QueryPerformanceNugget( + parentQueryOrOperation.getEvaluationNumber(), + parentQueryOrOperation.getParentEvaluationNumber(), + operationNumber, + parentQueryOrOperation.getOperationNumber(), + depth, + description, + parentQueryOrOperation.getSessionId(), + true, // operations are always user + inputSize, + onCloseCallback); + } + + /** + * Factory method for catch-all nuggets. + * + * @param parentQuery The parent query nugget + * @param operationNumber A query-unique identifier for the operation + * @param onCloseCallback A callback that is invoked when the nugget is closed. It returns whether the nugget + * should be logged. + * @return A new nugget + */ + default QueryPerformanceNugget createForCatchAll( + @NotNull final QueryPerformanceNugget parentQuery, + final int operationNumber, + @NotNull final Consumer onCloseCallback) { + Assert.eqTrue(parentQuery.isQueryLevel(), "parentQuery.isQueryLevel()"); + return new QueryPerformanceNugget( + parentQuery.getEvaluationNumber(), + parentQuery.getParentEvaluationNumber(), + operationNumber, + NULL_INT, // catch all has no parent operation + 0, // catch all is a root operation + QueryPerformanceRecorder.UNINSTRUMENTED_CODE_DESCRIPTION, + parentQuery.getSessionId(), + false, // catch all is not user + NULL_LONG, + onCloseCallback); // catch all has no input size + } + } - private final int evaluationNumber; + public static final Factory DEFAULT_FACTORY = new Factory() {}; + + private final long evaluationNumber; + private final long parentEvaluationNumber; + private final int operationNumber; + private final int parentOperationNumber; private final int depth; private final String description; + private final String sessionId; private final boolean isUser; private final long inputSize; - + private final Consumer onCloseCallback; private final AuthContext authContext; private final String callerLine; - private final long startClockTime; + private long startClockEpochNanos; + private long endClockEpochNanos; - private final long startTimeNanos; - private final long startCpuNanos; - private final long startUserCpuNanos; - private final long startAllocatedBytes; - private final long startPoolAllocatedBytes; private volatile QueryState state; - private Long totalTimeNanos; - private long diffCpuNanos; - private long diffUserCpuNanos; - private long diffAllocatedBytes; - private long diffPoolAllocatedBytes; - - private final RuntimeMemory.Sample startMemorySample; - private final RuntimeMemory.Sample endMemorySample; - - private boolean shouldLogMeAndStackParents; + private RuntimeMemory.Sample startMemorySample; + private RuntimeMemory.Sample endMemorySample; - /** - * For threaded operations we want to accumulate the CPU time, allocations, and read operations to the enclosing - * nugget of the main operation. For the initialization we ignore the wall clock time taken in the thread pool. - */ - private BasePerformanceEntry basePerformanceEntry; - - /** - * Constructor for query-level nuggets. - * - * @param evaluationNumber A unique identifier for the query evaluation that triggered this nugget creation - * @param description The operation description - */ - QueryPerformanceNugget(final int evaluationNumber, final String description) { - this(evaluationNumber, NULL_INT, description, false, NULL_LONG); - } + /** whether this nugget triggers the logging of itself and every other nugget in its stack of nesting operations */ + private boolean shouldLog; /** * Full constructor for nuggets. * * @param evaluationNumber A unique identifier for the query evaluation that triggered this nugget creation + * @param parentEvaluationNumber The unique identifier of the parent evaluation or {@link QueryConstants#NULL_LONG} + * if none + * @param operationNumber A unique identifier for the operation within a query evaluation + * @param parentOperationNumber The unique identifier of the parent operation or {@link QueryConstants#NULL_INT} if + * none * @param depth Depth in the evaluation chain for the respective operation * @param description The operation description * @param isUser Whether this is a "user" nugget or one created by the system * @param inputSize The size of the input data + * @param onCloseCallback A callback that is invoked when the nugget is closed. It returns whether the nugget should + * be logged. */ - QueryPerformanceNugget(final int evaluationNumber, final int depth, - final String description, final boolean isUser, final long inputSize) { - startMemorySample = new RuntimeMemory.Sample(); - endMemorySample = new RuntimeMemory.Sample(); + protected QueryPerformanceNugget( + final long evaluationNumber, + final long parentEvaluationNumber, + final int operationNumber, + final int parentOperationNumber, + final int depth, + @NotNull final String description, + @Nullable final String sessionId, + final boolean isUser, + final long inputSize, + @NotNull final Consumer onCloseCallback) { this.evaluationNumber = evaluationNumber; + this.parentEvaluationNumber = parentEvaluationNumber; + this.operationNumber = operationNumber; + this.parentOperationNumber = parentOperationNumber; this.depth = depth; if (description.length() > MAX_DESCRIPTION_LENGTH) { this.description = description.substring(0, MAX_DESCRIPTION_LENGTH) + " ... [truncated " @@ -99,85 +211,92 @@ public class QueryPerformanceNugget implements Serializable, AutoCloseable { } else { this.description = description; } + this.sessionId = sessionId; this.isUser = isUser; this.inputSize = inputSize; + this.onCloseCallback = onCloseCallback; authContext = ExecutionContext.getContext().getAuthContext(); callerLine = QueryPerformanceRecorder.getCallerLine(); - final RuntimeMemory runtimeMemory = RuntimeMemory.getInstance(); - runtimeMemory.read(startMemorySample); - - startAllocatedBytes = ThreadProfiler.DEFAULT.getCurrentThreadAllocatedBytes(); - startPoolAllocatedBytes = QueryPerformanceRecorder.getPoolAllocatedBytesForCurrentThread(); - - startClockTime = System.currentTimeMillis(); - startTimeNanos = System.nanoTime(); + startClockEpochNanos = NULL_LONG; + endClockEpochNanos = NULL_LONG; - startCpuNanos = ThreadProfiler.DEFAULT.getCurrentThreadCpuTime(); - startUserCpuNanos = ThreadProfiler.DEFAULT.getCurrentThreadUserTime(); - - state = QueryState.RUNNING; - shouldLogMeAndStackParents = false; + state = QueryState.NOT_STARTED; } /** * Construct a "dummy" nugget, which will never gather any information or be recorded. */ private QueryPerformanceNugget() { - startMemorySample = null; - endMemorySample = null; - evaluationNumber = NULL_INT; + evaluationNumber = NULL_LONG; + parentEvaluationNumber = NULL_LONG; + operationNumber = NULL_INT; + parentOperationNumber = NULL_INT; depth = 0; description = null; + sessionId = null; isUser = false; inputSize = NULL_LONG; - + onCloseCallback = null; authContext = null; callerLine = null; - startAllocatedBytes = NULL_LONG; - startPoolAllocatedBytes = NULL_LONG; - - startClockTime = NULL_LONG; - startTimeNanos = NULL_LONG; + startClockEpochNanos = NULL_LONG; + endClockEpochNanos = NULL_LONG; - startCpuNanos = NULL_LONG; - startUserCpuNanos = NULL_LONG; + state = QueryState.NOT_STARTED; + } - basePerformanceEntry = null; + /** + * Start clock epoch nanos is set if this is the first time this nugget has been started. + */ + @Override + public synchronized void onBaseEntryStart() { + // note that we explicitly do not call super.onBaseEntryStart() on query level nuggets as all top level nuggets + // accumulate into it to account for parallelized execution + if (operationNumber != NULL_INT) { + super.onBaseEntryStart(); + } + if (state == QueryState.RUNNING) { + throw new IllegalStateException("Nugget was already started"); + } + if (startClockEpochNanos == NULL_LONG) { + startClockEpochNanos = SystemClock.systemUTC().currentTimeNanos(); + } + startMemorySample = new RuntimeMemory.Sample(); + endMemorySample = new RuntimeMemory.Sample(); + final RuntimeMemory runtimeMemory = RuntimeMemory.getInstance(); + runtimeMemory.read(startMemorySample); - state = null; // This turns close into a no-op. - shouldLogMeAndStackParents = false; + state = QueryState.RUNNING; } - public void done() { - done(QueryPerformanceRecorder.getInstance()); + @Override + public synchronized void onBaseEntryEnd() { + if (state != QueryState.RUNNING) { + throw new IllegalStateException("Nugget isn't running"); + } + state = QueryState.SUSPENDED; + // note that we explicitly do not call super.onBaseEntryEnd() on query level nuggets as all top level nuggets + // accumulate into it to account for parallelized execution + if (operationNumber != NULL_INT) { + super.onBaseEntryEnd(); + } } /** * Mark this nugget {@link QueryState#FINISHED} and notify the recorder. - * - * @param recorder The recorder to notify - * @return if the nugget passes logging thresholds. - */ - public boolean done(final QueryPerformanceRecorder recorder) { - return close(QueryState.FINISHED, recorder); - } - - /** - * AutoCloseable implementation - wraps the no-argument version of done() used by query code outside of the - * QueryPerformance(Recorder/Nugget), reporting successful completion to the thread-local QueryPerformanceRecorder - * instance. + *

+ * {@link SafeCloseable} implementation for try-with-resources. */ @Override public void close() { - done(); + close(QueryState.FINISHED); } - @SuppressWarnings("WeakerAccess") - public boolean abort(final QueryPerformanceRecorder recorder) { - return close(QueryState.INTERRUPTED, recorder); + public void abort() { + close(QueryState.INTERRUPTED); } /** @@ -185,71 +304,87 @@ public boolean abort(final QueryPerformanceRecorder recorder) { * * @param closingState The current query state. If it is anything other than {@link QueryState#RUNNING} nothing will * happen and it will return false; - * - * @param recorderToNotify The {@link QueryPerformanceRecorder} to notify this nugget is closing. - * @return If the nugget passes criteria for logging. */ - private boolean close(final QueryState closingState, final QueryPerformanceRecorder recorderToNotify) { - final long currentThreadUserTime = ThreadProfiler.DEFAULT.getCurrentThreadUserTime(); - final long currentThreadCpuTime = ThreadProfiler.DEFAULT.getCurrentThreadCpuTime(); + private void close(final QueryState closingState) { if (state != QueryState.RUNNING) { - return false; + return; } synchronized (this) { if (state != QueryState.RUNNING) { - return false; + return; } - diffUserCpuNanos = minus(currentThreadUserTime, startUserCpuNanos); - diffCpuNanos = minus(currentThreadCpuTime, startCpuNanos); - - totalTimeNanos = System.nanoTime() - startTimeNanos; + onBaseEntryEnd(); + endClockEpochNanos = SystemClock.systemUTC().currentTimeNanos(); final RuntimeMemory runtimeMemory = RuntimeMemory.getInstance(); runtimeMemory.read(endMemorySample); - diffPoolAllocatedBytes = - minus(QueryPerformanceRecorder.getPoolAllocatedBytesForCurrentThread(), startPoolAllocatedBytes); - diffAllocatedBytes = minus(ThreadProfiler.DEFAULT.getCurrentThreadAllocatedBytes(), startAllocatedBytes); - - if (basePerformanceEntry != null) { - diffUserCpuNanos += basePerformanceEntry.getIntervalUserCpuNanos(); - diffCpuNanos += basePerformanceEntry.getIntervalCpuNanos(); - - diffAllocatedBytes += basePerformanceEntry.getIntervalAllocatedBytes(); - diffPoolAllocatedBytes += basePerformanceEntry.getIntervalPoolAllocatedBytes(); - } - state = closingState; - return recorderToNotify.releaseNugget(this); + onCloseCallback.accept(this); } } @Override public String toString() { - return evaluationNumber - + ":" + description - + ":" + callerLine; + return new LogOutputStringImpl().append(this).toString(); } - public int getEvaluationNumber() { + @Override + public LogOutput append(@NotNull final LogOutput logOutput) { + // override BasePerformanceEntry's impl + return logOutput.append(evaluationNumber) + .append(":").append(isQueryLevel() ? "query_level" : Integer.toString(operationNumber)) + .append(":").append(description) + .append(":").append(callerLine); + } + + public long getEvaluationNumber() { return evaluationNumber; } + public long getParentEvaluationNumber() { + return parentEvaluationNumber; + } + + public int getOperationNumber() { + return operationNumber; + } + + public int getParentOperationNumber() { + return parentOperationNumber; + } + public int getDepth() { return depth; } - public String getName() { + public String getDescription() { return description; } + @Nullable + public String getSessionId() { + return sessionId; + } + public boolean isUser() { return isUser; } - public boolean isTopLevel() { + public boolean isQueryLevel() { + return operationNumber == NULL_INT; + } + + @SuppressWarnings("unused") + public boolean isTopLevelQuery() { + return isQueryLevel() && parentEvaluationNumber == NULL_LONG; + } + + @SuppressWarnings("unused") + public boolean isTopLevelOperation() { + // note that query level nuggets have depth == NULL_INT return depth == 0; } @@ -269,37 +404,17 @@ public String getCallerLine() { } /** - * @return nanoseconds elapsed, once state != QueryState.RUNNING() has been called. + * @return wall clock start time in nanoseconds from the epoch */ - public Long getTotalTimeNanos() { - return totalTimeNanos; + public long getStartClockEpochNanos() { + return startClockEpochNanos; } /** - * @return wall clock time in milliseconds from the epoch + * @return wall clock end time in nanoseconds from the epoch */ - public long getStartClockTime() { - return startClockTime; - } - - /** - * Get nanoseconds of CPU time attributed to the instrumented operation. - * - * @return The nanoseconds of CPU time attributed to the instrumented operation, or {@link QueryConstants#NULL_LONG} - * if not enabled/supported. - */ - public long getCpuNanos() { - return diffCpuNanos; - } - - /** - * Get nanoseconds of user mode CPU time attributed to the instrumented operation. - * - * @return The nanoseconds of user mode CPU time attributed to the instrumented operation, or - * {@link QueryConstants#NULL_LONG} if not enabled/supported. - */ - public long getUserCpuNanos() { - return diffUserCpuNanos; + public long getEndClockEpochNanos() { + return endClockEpochNanos; } /** @@ -324,7 +439,7 @@ public long getDiffFreeMemory() { } /** - * @return total (allocated high water mark) memory difference between time of completion and creation + * @return total (allocated high watermark) memory difference between time of completion and creation */ public long getDiffTotalMemory() { return endMemorySample.totalMemory - startMemorySample.totalMemory; @@ -345,26 +460,6 @@ public long getDiffCollectionTimeNanos() { .millisToNanos(endMemorySample.totalCollectionTimeMs - startMemorySample.totalCollectionTimeMs); } - /** - * Get bytes of allocated memory attributed to the instrumented operation. - * - * @return The bytes of allocated memory attributed to the instrumented operation, or - * {@link QueryConstants#NULL_LONG} if not enabled/supported. - */ - public long getAllocatedBytes() { - return diffAllocatedBytes; - } - - /** - * Get bytes of allocated pooled/reusable memory attributed to the instrumented operation. - * - * @return The bytes of allocated pooled/reusable memory attributed to the instrumented operation, or - * {@link QueryConstants#NULL_LONG} if not enabled/supported. - */ - public long getPoolAllocatedBytes() { - return diffPoolAllocatedBytes; - } - /** * @return true if this nugget was interrupted by an abort() call. */ @@ -375,54 +470,15 @@ public boolean wasInterrupted() { /** * Ensure this nugget gets logged, alongside its stack of nesting operations. */ - public void setShouldLogMeAndStackParents() { - shouldLogMeAndStackParents = true; + void setShouldLog() { + shouldLog = true; } /** * @return true if this nugget triggers the logging of itself and every other nugget in its stack of nesting * operations. */ - public boolean shouldLogMenAndStackParents() { - return shouldLogMeAndStackParents; - } - - /** - * When we track data from other threads that should be attributed to this operation, we tack extra - * BasePerformanceEntry values onto this nugget when it is closed. - * - * The CPU time, reads, and allocations are counted against this nugget. Wall clock time is ignored. - */ - public void addBaseEntry(BasePerformanceEntry baseEntry) { - if (this.basePerformanceEntry == null) { - this.basePerformanceEntry = baseEntry; - } else { - this.basePerformanceEntry.accumulate(baseEntry); - } - } - - /** - * Suppress de minimus performance nuggets using the properties defined above. - * - * @param isUninstrumented this nugget for uninstrumented code? If so the thresholds for inclusion in the logs are - * configured distinctly. - * - * @return if this nugget is significant enough to be logged. - */ - boolean shouldLogNugget(final boolean isUninstrumented) { - if (shouldLogMeAndStackParents) { - return true; - } - // Nuggets will have a null value for total time if they weren't closed for a RUNNING query; this is an abnormal - // condition and the nugget should be logged - if (getTotalTimeNanos() == null) { - return true; - } - - if (isUninstrumented) { - return UNINSTRUMENTED_LOG_THRESHOLD.shouldLog(getTotalTimeNanos()); - } else { - return LOG_THRESHOLD.shouldLog(getTotalTimeNanos()); - } + boolean shouldLog() { + return shouldLog; } } diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceRecorder.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceRecorder.java index f92c2b15a98..6a91ae9e201 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceRecorder.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceRecorder.java @@ -3,428 +3,269 @@ */ package io.deephaven.engine.table.impl.perf; -import io.deephaven.base.verify.Assert; -import io.deephaven.configuration.Configuration; -import io.deephaven.datastructures.util.CollectionUtil; -import io.deephaven.chunk.util.pools.ChunkPoolInstrumentation; -import io.deephaven.engine.exceptions.CancellationException; -import io.deephaven.engine.table.Table; -import io.deephaven.engine.util.TableTools; -import io.deephaven.engine.updategraph.UpdateGraphLock; import io.deephaven.util.QueryConstants; +import io.deephaven.util.SafeCloseable; +import io.deephaven.util.annotations.FinalDefault; import io.deephaven.util.function.ThrowingRunnable; import io.deephaven.util.function.ThrowingSupplier; -import io.deephaven.util.profiling.ThreadProfiler; -import org.apache.commons.lang3.mutable.MutableLong; import org.jetbrains.annotations.NotNull; +import org.jetbrains.annotations.Nullable; -import java.io.*; -import java.net.URL; import java.util.*; -import java.util.concurrent.atomic.AtomicInteger; import java.util.function.Supplier; -import static io.deephaven.engine.table.impl.lang.QueryLanguageFunctionUtils.minus; -import static io.deephaven.engine.table.impl.lang.QueryLanguageFunctionUtils.plus; - /** * Query performance instrumentation tools. Manages a hierarchy of {@link QueryPerformanceNugget} instances. - *

- * Thread-safety note: This used to be thread-safe only by virtue of using a thread-local instance. Now it's - * aggressively synchronized so we can abort it from outside the "owner" thread. */ -public class QueryPerformanceRecorder implements Serializable { - - public static final String UNINSTRUMENTED_CODE_DESCRIPTION = "Uninstrumented code"; - - private static final long serialVersionUID = 2L; - private static final String[] packageFilters; - - private QueryPerformanceNugget queryNugget; - private final ArrayList operationNuggets = new ArrayList<>(); - - private QueryState state; - private transient QueryPerformanceNugget catchAllNugget; - private final transient Deque userNuggetStack = new ArrayDeque<>(); - - private static final AtomicInteger queriesProcessed = new AtomicInteger(0); - - private static final ThreadLocal theLocal = - ThreadLocal.withInitial(QueryPerformanceRecorder::new); - private static final ThreadLocal poolAllocatedBytes = ThreadLocal.withInitial( - () -> new MutableLong(ThreadProfiler.DEFAULT.memoryProfilingAvailable() ? 0L - : io.deephaven.util.QueryConstants.NULL_LONG)); - private static final ThreadLocal cachedCallsite = new ThreadLocal<>(); - - static { - final Configuration config = Configuration.getInstance(); - final Set filters = new HashSet<>(); - - final String propVal = config.getProperty("QueryPerformanceRecorder.packageFilter.internal"); - final URL path = QueryPerformanceRecorder.class.getResource("/" + propVal); - if (path == null) { - throw new RuntimeException("Can not locate package filter file " + propVal + " in classpath"); - } - - try (final BufferedReader reader = new BufferedReader(new InputStreamReader(path.openStream()))) { - String line; - while ((line = reader.readLine()) != null) { - if (!line.isEmpty()) { - filters.add(line); - } - } - } catch (IOException e) { - throw new UncheckedIOException("Error reading file " + propVal, e); - } +public interface QueryPerformanceRecorder { - packageFilters = filters.toArray(CollectionUtil.ZERO_LENGTH_STRING_ARRAY); - } + String UNINSTRUMENTED_CODE_DESCRIPTION = "Uninstrumented code"; - public static QueryPerformanceRecorder getInstance() { - return theLocal.get(); - } + ///////////////////////////////////// + // Core Engine Instrumentation API // + ///////////////////////////////////// - public static void resetInstance() { - // clear interrupted - because this is a good place to do it - no cancellation exception here though - // noinspection ResultOfMethodCallIgnored - Thread.interrupted(); - theLocal.remove(); + static QueryPerformanceRecorder getInstance() { + return QueryPerformanceRecorderState.getInstance(); } /** - * Start a query. - * - * @param description A description for the query. + * Create a nugget at the top of the user stack. May return a {@link QueryPerformanceNugget#DUMMY_NUGGET} if no + * recorder is installed. * - * @return a unique evaluation number to identify this query execution. - */ - public synchronized int startQuery(final String description) { - clear(); - final int evaluationNumber = queriesProcessed.getAndIncrement(); - queryNugget = new QueryPerformanceNugget(evaluationNumber, description); - state = QueryState.RUNNING; - startCatchAll(evaluationNumber); - return evaluationNumber; - } - - /** - * Abort a query. + * @param name the nugget name + * @return A new QueryPerformanceNugget to encapsulate user query operations. {@link QueryPerformanceNugget#close()} + * must be called on the nugget. */ - public synchronized void abortQuery() { - if (state != QueryState.RUNNING) { - return; - } - state = QueryState.INTERRUPTED; - if (catchAllNugget != null) { - stopCatchAll(true); - } else { - while (!userNuggetStack.isEmpty()) { - userNuggetStack.peekLast().abort(this); - } - } - queryNugget.abort(this); + @FinalDefault + default QueryPerformanceNugget getNugget(@NotNull String name) { + return getNugget(name, QueryConstants.NULL_LONG); } /** - * Return the query's current state - * - * @return the query's state or null if it isn't initialized yet + * Create a nugget at the top of the user stack. May return a {@link QueryPerformanceNugget#DUMMY_NUGGET} if no + * recorder is installed. + * + * @param name the nugget name + * @param inputSize the nugget's input size + * @return A new QueryPerformanceNugget to encapsulate user query operations. {@link QueryPerformanceNugget#close()} + * must be called on the nugget. */ - public synchronized QueryState getState() { - return state; - } + QueryPerformanceNugget getNugget(@NotNull String name, long inputSize); /** - * End a query. + * This is the nugget enclosing the current operation. It may belong to the dummy recorder, or a real one. + * + * @return Either a "catch-all" nugget, or the top of the user nugget stack. */ - public synchronized boolean endQuery() { - if (state != QueryState.RUNNING) { - return false; - } - - state = QueryState.FINISHED; - Assert.neqNull(catchAllNugget, "catchAllNugget"); - Assert.neqNull(queryNugget, "queryNugget"); - stopCatchAll(false); - return queryNugget.done(this); - } + QueryPerformanceNugget getEnclosingNugget(); - private void startCatchAll(final int evaluationNumber) { - catchAllNugget = new QueryPerformanceNugget( - evaluationNumber, 0, UNINSTRUMENTED_CODE_DESCRIPTION, false, QueryConstants.NULL_LONG); - } - private void stopCatchAll(final boolean abort) { - final boolean shouldLog; - if (abort) { - shouldLog = catchAllNugget.abort(this); - } else { - shouldLog = catchAllNugget.done(this); - } - if (shouldLog) { - operationNuggets.add(catchAllNugget); - } - catchAllNugget = null; + interface QueryDataConsumer { + void accept(long evaluationNumber, int operationNumber, boolean uninstrumented); } /** - * @param name the nugget name - * @return A new QueryPerformanceNugget to encapsulate user query operations. done() must be called on the nugget. + * Provide current query data via the consumer. + * + * @param consumer a callback to receive query data */ - public QueryPerformanceNugget getNugget(String name) { - return getNugget(name, QueryConstants.NULL_LONG); - } + void supplyQueryData(@NotNull QueryDataConsumer consumer); /** - * @param name the nugget name - * @param inputSize the nugget's input size - * @return A new QueryPerformanceNugget to encapsulate user query operations. done() must be called on the nugget. + * @return The current callsite. This is the last set callsite or the line number of the user's detected callsite. */ - public synchronized QueryPerformanceNugget getNugget(final String name, final long inputSize) { - if (state != QueryState.RUNNING) { - return QueryPerformanceNugget.DUMMY_NUGGET; - } - if (Thread.interrupted()) { - throw new CancellationException("interrupted in QueryPerformanceNugget"); - } - if (catchAllNugget != null) { - stopCatchAll(false); - } - final QueryPerformanceNugget nugget = new QueryPerformanceNugget( - queryNugget.getEvaluationNumber(), userNuggetStack.size(), - name, true, inputSize); - operationNuggets.add(nugget); - userNuggetStack.addLast(nugget); - return nugget; + static String getCallerLine() { + return QueryPerformanceRecorderState.getCallerLine(); } /** - * Note: Do not call this directly - it's for nugget use only. Call nugget.done(), instead. TODO: Reverse the - * disclaimer above - I think it's much better for the recorder to support done/abort(nugget), rather than - * continuing to have the nugget support done/abort(recorder). - * - * @param nugget the nugget to be released - * @return If the nugget passes criteria for logging. + * Attempt to set the thread local callsite so that invocations of {@link #getCallerLine()} will not spend time + * trying to recompute. + *

+ * This method returns a boolean if the value was successfully set. In the event this returns true, it's the + * responsibility of the caller to invoke {@link #clearCallsite()} when the operation is complete. + *

+ * It is good practice to do this with try{} finally{} block + * + *

+     * final boolean shouldClear = QueryPerformanceRecorder.setCallsite("CALLSITE");
+     * try {
+     *     // Do work
+     * } finally {
+     *     if (shouldClear) {
+     *         QueryPerformanceRecorder.clearCallsite();
+     *     }
+     * }
+     * 
+ * + * @param callsite The call site to use. + * + * @return true if successfully set, false otherwise */ - synchronized boolean releaseNugget(QueryPerformanceNugget nugget) { - boolean shouldLog = nugget.shouldLogNugget(nugget == catchAllNugget); - if (!nugget.isUser()) { - return shouldLog; - } - - final QueryPerformanceNugget removed = userNuggetStack.removeLast(); - if (nugget != removed) { - throw new IllegalStateException( - "Released query performance nugget " + nugget + " (" + System.identityHashCode(nugget) + - ") didn't match the top of the user nugget stack " + removed + " (" - + System.identityHashCode(removed) + - ") - did you follow the correct try/finally pattern?"); - } - - if (removed.shouldLogMenAndStackParents()) { - shouldLog = true; - if (userNuggetStack.size() > 0) { - userNuggetStack.getLast().setShouldLogMeAndStackParents(); - } - } - if (!shouldLog) { - // If we have filtered this nugget, by our filter design we will also have filtered any nuggets it encloses. - // This means it *must* be the last entry in operationNuggets, so we can safely remove it in O(1). - final QueryPerformanceNugget lastNugget = operationNuggets.remove(operationNuggets.size() - 1); - if (nugget != lastNugget) { - throw new IllegalStateException( - "Filtered query performance nugget " + nugget + " (" + System.identityHashCode(nugget) + - ") didn't match the last operation nugget " + lastNugget + " (" - + System.identityHashCode(lastNugget) + - ")"); - } - } - - if (userNuggetStack.isEmpty() && queryNugget != null && state == QueryState.RUNNING) { - startCatchAll(queryNugget.getEvaluationNumber()); - } - - return shouldLog; - } - - public interface EntrySetter { - void set(int evaluationNumber, int operationNumber, boolean uninstrumented); - } - - public synchronized QueryPerformanceNugget getOuterNugget() { - return userNuggetStack.peekLast(); + static boolean setCallsite(@NotNull final String callsite) { + return QueryPerformanceRecorderState.setCallsite(callsite); } - // returns true if uninstrumented code data was captured. - public void setQueryData(final EntrySetter setter) { - final int evaluationNumber; - final int operationNumber; - boolean uninstrumented = false; - synchronized (this) { - if (state != QueryState.RUNNING) { - setter.set(QueryConstants.NULL_INT, QueryConstants.NULL_INT, false); - return; - } - evaluationNumber = queryNugget.getEvaluationNumber(); - operationNumber = operationNuggets.size(); - if (operationNumber > 0) { - // ensure UPL and QOPL are consistent/joinable. - if (userNuggetStack.size() > 0) { - userNuggetStack.getLast().setShouldLogMeAndStackParents(); - } else { - uninstrumented = true; - if (catchAllNugget != null) { - catchAllNugget.setShouldLogMeAndStackParents(); - } - } - } - } - setter.set(evaluationNumber, operationNumber, uninstrumented); + /** + * Attempt to compute and set the thread local callsite so that invocations of {@link #getCallerLine()} will not + * spend time trying to recompute. + *

+ * Users should follow the best practice as described by {@link #setCallsite(String)} + * + * @return true if the callsite was computed and set. + */ + static boolean setCallsite() { + return QueryPerformanceRecorderState.setCallsite(); } - private void clear() { - queryNugget = null; - catchAllNugget = null; - operationNuggets.clear(); - userNuggetStack.clear(); + /** + * Clear any previously set callsite. See {@link #setCallsite(String)} + */ + static void clearCallsite() { + QueryPerformanceRecorderState.clearCallsite(); } - public synchronized QueryPerformanceNugget getQueryLevelPerformanceData() { - return queryNugget; - } + //////////////////////////////////////////// + // Server-Level Performance Recording API // + //////////////////////////////////////////// - public synchronized List getOperationLevelPerformanceData() { - return operationNuggets; + /** + * Construct a QueryPerformanceRecorder for a top-level query. + * + * @param description the query description + * @param nuggetFactory the nugget factory + * @return a new QueryPerformanceRecorder + */ + static QueryPerformanceRecorder newQuery( + @NotNull final String description, + @Nullable final String sessionId, + @NotNull final QueryPerformanceNugget.Factory nuggetFactory) { + return new QueryPerformanceRecorderImpl(description, sessionId, null, nuggetFactory); } - @SuppressWarnings("unused") - public synchronized Table getTimingResultsAsTable() { - final int count = operationNuggets.size(); - final String[] names = new String[count]; - final Long[] timeNanos = new Long[count]; - final String[] callerLine = new String[count]; - final Boolean[] isTopLevel = new Boolean[count]; - final Boolean[] isCompileTime = new Boolean[count]; - - for (int i = 0; i < operationNuggets.size(); i++) { - timeNanos[i] = operationNuggets.get(i).getTotalTimeNanos(); - names[i] = operationNuggets.get(i).getName(); - callerLine[i] = operationNuggets.get(i).getCallerLine(); - isTopLevel[i] = operationNuggets.get(i).isTopLevel(); - isCompileTime[i] = operationNuggets.get(i).getName().startsWith("Compile:"); - } - return TableTools.newTable( - TableTools.col("names", names), - TableTools.col("line", callerLine), - TableTools.col("timeNanos", timeNanos), - TableTools.col("isTopLevel", isTopLevel), - TableTools.col("isCompileTime", isCompileTime)); + /** + * Construct a QueryPerformanceRecorder for a sub-level query. + * + * @param description the query description + * @param nuggetFactory the nugget factory + * @return a new QueryPerformanceRecorder + */ + static QueryPerformanceRecorder newSubQuery( + @NotNull final String description, + @Nullable final QueryPerformanceRecorder parent, + @NotNull final QueryPerformanceNugget.Factory nuggetFactory) { + return new QueryPerformanceRecorderImpl(description, null, parent, nuggetFactory); } /** - * Install {@link QueryPerformanceRecorder#recordPoolAllocation(java.util.function.Supplier)} as the allocation - * recorder for {@link io.deephaven.chunk.util.pools.ChunkPool chunk pools}. + * Return the query's current state + * + * @return the query's state */ - public static void installPoolAllocationRecorder() { - ChunkPoolInstrumentation.setAllocationRecorder(QueryPerformanceRecorder::recordPoolAllocation); - } + QueryState getState(); /** - * Install this {@link QueryPerformanceRecorder} as the lock action recorder for {@link UpdateGraphLock}. + * Starts a query. + *

+ * A query is {@link QueryState#RUNNING RUNNING} if it has been started or {@link #resumeQuery() resumed} without a + * subsequent {@link #endQuery() end}, {@link #suspendQuery() suspend}, or {@link #abortQuery() abort}. + * + * @throws IllegalStateException if the query state isn't {@link QueryState#NOT_STARTED NOT_STARTED} or another + * query is running on this thread */ - public static void installUpdateGraphLockInstrumentation() { - UpdateGraphLock.installInstrumentation(new UpdateGraphLock.Instrumentation() { - - @Override - public void recordAction(@NotNull String description, @NotNull Runnable action) { - QueryPerformanceRecorder.withNugget(description, action::run); - } - - @Override - public void recordActionInterruptibly(@NotNull String description, - @NotNull ThrowingRunnable action) - throws InterruptedException { - QueryPerformanceRecorder.withNuggetThrowing(description, action); - } - }); - } + SafeCloseable startQuery(); /** - * Record a single-threaded operation's allocations as "pool" allocated memory attributable to the current thread. + * End a query. + *

+ * A query is {@link QueryState#RUNNING RUNNING} if it has been {@link #startQuery() started} or + * {@link #resumeQuery() resumed} without a subsequent end, {@link #suspendQuery() suspend}, or {@link #abortQuery() + * abort}. * - * @param operation The operation to record allocation for - * @return The result of the operation. + * @return whether the query should be logged + * @throws IllegalStateException if the query state isn't {@link QueryState#RUNNING RUNNING}, + * {@link QueryState#INTERRUPTED INTERRUPTED}, or was not running on this thread */ - public static RESULT_TYPE recordPoolAllocation(@NotNull final Supplier operation) { - final long startThreadAllocatedBytes = ThreadProfiler.DEFAULT.getCurrentThreadAllocatedBytes(); - try { - return operation.get(); - } finally { - final long endThreadAllocatedBytes = ThreadProfiler.DEFAULT.getCurrentThreadAllocatedBytes(); - final MutableLong poolAllocatedBytesForCurrentThread = poolAllocatedBytes.get(); - poolAllocatedBytesForCurrentThread.setValue(plus(poolAllocatedBytesForCurrentThread.longValue(), - minus(endThreadAllocatedBytes, startThreadAllocatedBytes))); - } - } + boolean endQuery(); /** - * Get the total bytes of pool-allocated memory attributed to this thread via - * {@link #recordPoolAllocation(Supplier)}. + * Suspends a query. + *

+ * A query is {@link QueryState#RUNNING RUNNING} if it has been {@link #startQuery() started} or + * {@link #resumeQuery() resumed} without a subsequent {@link #endQuery() end}, suspend, or {@link #abortQuery() + * abort}. * - * @return The total bytes of pool-allocated memory attributed to this thread. + * @throws IllegalStateException if the query state isn't {@link QueryState#RUNNING RUNNING} or was not running on + * this thread */ - public static long getPoolAllocatedBytesForCurrentThread() { - return poolAllocatedBytes.get().longValue(); - } + void suspendQuery(); - public static String getCallerLine() { - String callerLineCandidate = cachedCallsite.get(); + /** + * Resumes a suspend query. + *

+ * A query is {@link QueryState#RUNNING RUNNING} if it has been {@link #startQuery() started} or resumed without a + * subsequent {@link #endQuery() end}, {@link #suspendQuery() suspend}, or {@link #abortQuery() abort}. + * + * @throws IllegalStateException if the query state isn't {@link QueryState#SUSPENDED SUSPENDED} or another query is + * running on this thread + */ + SafeCloseable resumeQuery(); - if (callerLineCandidate == null) { - final StackTraceElement[] stack = (new Exception()).getStackTrace(); - for (int i = stack.length - 1; i > 0; i--) { - final String className = stack[i].getClassName(); + /** + * Abort a query. + *

+ * A query is {@link QueryState#RUNNING RUNNING} if it has been {@link #startQuery() started} or + * {@link #resumeQuery() resumed} without a subsequent {@link #endQuery() end}, {@link #suspendQuery() suspend}, or + * abort. + *

+ * Note that this method is invoked out-of-band and does not throw if the query has been completed. + */ + @SuppressWarnings("unused") + void abortQuery(); - if (className.startsWith("io.deephaven.engine.util.GroovyDeephavenSession")) { - callerLineCandidate = "Groovy Script"; - } else if (Arrays.stream(packageFilters).noneMatch(className::startsWith)) { - callerLineCandidate = stack[i].getFileName() + ":" + stack[i].getLineNumber(); - } - } - } + /** + * @return the query level performance data + */ + QueryPerformanceNugget getQueryLevelPerformanceData(); - return callerLineCandidate == null ? "Internal" : callerLineCandidate; - } + /** + * This getter should be called by exclusive owners of the recorder, and never concurrently with mutators. + * + * @return A list of loggable operation performance data. + */ + List getOperationLevelPerformanceData(); - /*------------------------------------------------------------------------------------------------------------------ - * TODO: the following execute-around methods might be better in a separate class or interface - */ + /** + * Accumulate performance information from another recorder into this one. The provided recorder will not be + * mutated. + * + * @param subQuery the recorder to accumulate into this + */ + void accumulate(@NotNull QueryPerformanceRecorder subQuery); - private static void finishAndClear(QueryPerformanceNugget nugget, boolean needClear) { - if (nugget != null) { - nugget.done(); - } + /** + * @return whether a sub-query was ever accumulated into this recorder + */ + @SuppressWarnings("unused") + boolean hasSubQueries(); - if (needClear) { - clearCallsite(); - } - } + /////////////////////////////////////////////////// + // Convenience Methods for Recording Performance // + /////////////////////////////////////////////////// /** * Surround the given code with a Performance Nugget - * + * * @param name the nugget name * @param r the stuff to run */ - public static void withNugget(final String name, final Runnable r) { + static void withNugget(final String name, final Runnable r) { final boolean needClear = setCallsite(); - QueryPerformanceNugget nugget = null; - - try { - nugget = getInstance().getNugget(name); + try (final QueryPerformanceNugget ignored = getInstance().getNugget(name)) { r.run(); } finally { - finishAndClear(nugget, needClear); + maybeClearCallsite(needClear); } } @@ -435,15 +276,12 @@ public static void withNugget(final String name, final Runnable r) { * @param r the stuff to run * @return the result of the stuff to run */ - public static T withNugget(final String name, final Supplier r) { + static T withNugget(final String name, final Supplier r) { final boolean needClear = setCallsite(); - QueryPerformanceNugget nugget = null; - - try { - nugget = getInstance().getNugget(name); + try (final QueryPerformanceNugget ignored = getInstance().getNugget(name)) { return r.get(); } finally { - finishAndClear(nugget, needClear); + maybeClearCallsite(needClear); } } @@ -453,16 +291,14 @@ public static T withNugget(final String name, final Supplier r) { * @param r the stuff to run * @throws T exception of type T */ - public static void withNuggetThrowing( + static void withNuggetThrowing( final String name, final ThrowingRunnable r) throws T { final boolean needClear = setCallsite(); - QueryPerformanceNugget nugget = null; - try { - nugget = getInstance().getNugget(name); + try (final QueryPerformanceNugget ignored = getInstance().getNugget(name)) { r.run(); } finally { - finishAndClear(nugget, needClear); + maybeClearCallsite(needClear); } } @@ -474,33 +310,29 @@ public static void withNuggetThrowing( * @return the result of the stuff to run * @throws ExceptionType exception of type ExceptionType */ - public static R withNuggetThrowing( + static R withNuggetThrowing( final String name, final ThrowingSupplier r) throws ExceptionType { final boolean needClear = setCallsite(); - QueryPerformanceNugget nugget = null; - try { - nugget = getInstance().getNugget(name); + try (final QueryPerformanceNugget ignored = getInstance().getNugget(name)) { return r.get(); } finally { - finishAndClear(nugget, needClear); + maybeClearCallsite(needClear); } } /** * Surround the given code with a Performance Nugget - * + * * @param name the nugget name * @param r the stuff to run */ - public static void withNugget(final String name, final long inputSize, final Runnable r) { + static void withNugget(final String name, final long inputSize, final Runnable r) { final boolean needClear = setCallsite(); - QueryPerformanceNugget nugget = null; - try { - nugget = getInstance().getNugget(name, inputSize); + try (final QueryPerformanceNugget ignored = getInstance().getNugget(name, inputSize)) { r.run(); } finally { - finishAndClear(nugget, needClear); + maybeClearCallsite(needClear); } } @@ -511,14 +343,12 @@ public static void withNugget(final String name, final long inputSize, final Run * @param r the stuff to run * @return the result of the stuff to run */ - public static T withNugget(final String name, final long inputSize, final Supplier r) { + static T withNugget(final String name, final long inputSize, final Supplier r) { final boolean needClear = setCallsite(); - QueryPerformanceNugget nugget = null; - try { - nugget = getInstance().getNugget(name, inputSize); + try (final QueryPerformanceNugget ignored = getInstance().getNugget(name, inputSize)) { return r.get(); } finally { - finishAndClear(nugget, needClear); + maybeClearCallsite(needClear); } } @@ -529,17 +359,15 @@ public static T withNugget(final String name, final long inputSize, final Su * @throws T exception of type T */ @SuppressWarnings("unused") - public static void withNuggetThrowing( + static void withNuggetThrowing( final String name, final long inputSize, final ThrowingRunnable r) throws T { final boolean needClear = setCallsite(); - QueryPerformanceNugget nugget = null; - try { - nugget = getInstance().getNugget(name, inputSize); + try (final QueryPerformanceNugget ignored = getInstance().getNugget(name, inputSize)) { r.run(); } finally { - finishAndClear(nugget, needClear); + maybeClearCallsite(needClear); } } @@ -552,86 +380,26 @@ public static void withNuggetThrowing( * @throws ExceptionType exception of type ExceptionType */ @SuppressWarnings("unused") - public static R withNuggetThrowing( + static R withNuggetThrowing( final String name, final long inputSize, final ThrowingSupplier r) throws ExceptionType { final boolean needClear = setCallsite(); - QueryPerformanceNugget nugget = null; - try { - nugget = getInstance().getNugget(name, inputSize); + try (final QueryPerformanceNugget ignored = getInstance().getNugget(name, inputSize)) { return r.get(); } finally { - finishAndClear(nugget, needClear); + maybeClearCallsite(needClear); } } /** - *

- * Attempt to set the thread local callsite so that invocations of {@link #getCallerLine()} will not spend time - * trying to recompute. - *

- * - *

- * This method returns a boolean if the value was successfully set. In the event this returns true, it's the - * responsibility of the caller to invoke {@link #clearCallsite()} when the operation is complete. - *

- * - *

- * It is good practice to do this with try{} finally{} block - *

+ * Clear the callsite if needed. * - *
-     * final boolean shouldClear = QueryPerformanceRecorder.setCallsite("CALLSITE");
-     * try {
-     *     // Do work
-     * } finally {
-     *     if (shouldClear) {
-     *         QueryPerformanceRecorder.clearCallsite();
-     *     }
-     * }
-     * 
- * - * @param callsite The call site to use. - * - * @return true if successfully set, false otherwise/ + * @param needClear true if the callsite needs to be cleared */ - public static boolean setCallsite(String callsite) { - if (cachedCallsite.get() == null) { - cachedCallsite.set(callsite); - return true; - } - - return false; - } - - /** - *

- * Attempt to compute and set the thread local callsite so that invocations of {@link #getCallerLine()} will not - * spend time trying to recompute. - *

- * - *

- * Users should follow the best practice as described by {@link #setCallsite(String)} - *

- * - * @return true if the callsite was computed and set. - */ - public static boolean setCallsite() { - // This is very similar to the other getCallsite, but we don't want to invoke getCallerLine() unless we - // really need to. - if (cachedCallsite.get() == null) { - cachedCallsite.set(getCallerLine()); - return true; + private static void maybeClearCallsite(final boolean needClear) { + if (needClear) { + clearCallsite(); } - - return false; - } - - /** - * Clear any previously set callsite. See {@link #setCallsite(String)} - */ - public static void clearCallsite() { - cachedCallsite.remove(); } } diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceRecorderImpl.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceRecorderImpl.java new file mode 100644 index 00000000000..1900a70bcdf --- /dev/null +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceRecorderImpl.java @@ -0,0 +1,344 @@ +/** + * Copyright (c) 2016-2023 Deephaven Data Labs and Patent Pending + */ +package io.deephaven.engine.table.impl.perf; + +import io.deephaven.base.verify.Assert; +import io.deephaven.engine.exceptions.CancellationException; +import io.deephaven.util.SafeCloseable; +import org.jetbrains.annotations.NotNull; +import org.jetbrains.annotations.Nullable; + +import java.util.*; + +import static io.deephaven.util.QueryConstants.NULL_LONG; + +/** + * Query performance instrumentation implementation. Manages a hierarchy of {@link QueryPerformanceNugget} instances. + *

+ * Many methods are synchronized to 1) support external abort of query and 2) for scenarios where the query is suspended + * and resumed on another thread. + */ +public class QueryPerformanceRecorderImpl implements QueryPerformanceRecorder { + private static final QueryPerformanceLogThreshold LOG_THRESHOLD = new QueryPerformanceLogThreshold("", 1_000_000); + private static final QueryPerformanceLogThreshold UNINSTRUMENTED_LOG_THRESHOLD = + new QueryPerformanceLogThreshold("Uninstrumented", 1_000_000_000); + + @Nullable + private final QueryPerformanceRecorder parent; + private final QueryPerformanceNugget queryNugget; + private final QueryPerformanceNugget.Factory nuggetFactory; + private final ArrayList operationNuggets = new ArrayList<>(); + private final Deque userNuggetStack = new ArrayDeque<>(); + + private QueryState state = QueryState.NOT_STARTED; + private volatile boolean hasSubQueries; + private QueryPerformanceNugget catchAllNugget; + + /** + * Constructs a QueryPerformanceRecorderImpl. + * + * @param description a description for the query + * @param nuggetFactory the factory to use for creating new nuggets + * @param parent the parent query if it exists + */ + QueryPerformanceRecorderImpl( + @NotNull final String description, + @Nullable final String sessionId, + @Nullable final QueryPerformanceRecorder parent, + @NotNull final QueryPerformanceNugget.Factory nuggetFactory) { + if (parent == null) { + queryNugget = nuggetFactory.createForQuery( + QueryPerformanceRecorderState.QUERIES_PROCESSED.getAndIncrement(), description, sessionId, + this::releaseNugget); + } else { + queryNugget = nuggetFactory.createForSubQuery( + parent.getQueryLevelPerformanceData(), + QueryPerformanceRecorderState.QUERIES_PROCESSED.getAndIncrement(), description, + this::releaseNugget); + } + this.parent = parent; + this.nuggetFactory = nuggetFactory; + } + + @Override + public synchronized void abortQuery() { + // TODO (https://github.com/deephaven/deephaven-core/issues/53): support out-of-order abort + if (state != QueryState.RUNNING) { + return; + } + state = QueryState.INTERRUPTED; + if (catchAllNugget != null) { + stopCatchAll(true); + } else { + while (!userNuggetStack.isEmpty()) { + userNuggetStack.peekLast().abort(); + } + } + queryNugget.abort(); + } + + /** + * Return the query's current state + * + * @return the query's state or null if it isn't initialized yet + */ + @Override + public synchronized QueryState getState() { + return state; + } + + @Override + public synchronized SafeCloseable startQuery() { + if (state != QueryState.NOT_STARTED) { + throw new IllegalStateException("Can't resume a query that has already started"); + } + return resumeInternal(); + } + + @Override + public synchronized boolean endQuery() { + if (state != QueryState.RUNNING) { + if (state != QueryState.INTERRUPTED) { + // We only allow the query to be RUNNING or INTERRUPTED when we end it; else we are in an illegal state. + throw new IllegalStateException("Can't end a query that isn't running or interrupted"); + } + return false; + } + state = QueryState.FINISHED; + suspendInternal(); + + queryNugget.close(); + if (parent != null) { + parent.accumulate(this); + } + return shouldLogNugget(queryNugget) || !operationNuggets.isEmpty() || hasSubQueries; + } + + /** + * Suspends a query. + *

+ * This resets the thread local and assumes that this performance nugget may be resumed on another thread. + */ + public synchronized void suspendQuery() { + if (state != QueryState.RUNNING) { + throw new IllegalStateException("Can't suspend a query that isn't running"); + } + state = QueryState.SUSPENDED; + suspendInternal(); + queryNugget.onBaseEntryEnd(); + } + + private void suspendInternal() { + final QueryPerformanceRecorder threadLocalInstance = QueryPerformanceRecorderState.getInstance(); + if (threadLocalInstance != this) { + throw new IllegalStateException("Can't suspend a query that doesn't belong to this thread"); + } + + Assert.neqNull(catchAllNugget, "catchAllNugget"); + stopCatchAll(false); + + // uninstall this instance from the thread local + QueryPerformanceRecorderState.resetInstance(); + } + + /** + * Resumes a suspend query. + *

+ * It is an error to resume a query while another query is running on this thread. + * + * @return this + */ + public synchronized SafeCloseable resumeQuery() { + if (state != QueryState.SUSPENDED) { + throw new IllegalStateException("Can't resume a query that isn't suspended"); + } + + return resumeInternal(); + } + + private SafeCloseable resumeInternal() { + final QueryPerformanceRecorder threadLocalInstance = QueryPerformanceRecorderState.getInstance(); + if (threadLocalInstance != QueryPerformanceRecorderState.DUMMY_RECORDER) { + throw new IllegalStateException("Can't resume a query while another query is in operation"); + } + QueryPerformanceRecorderState.THE_LOCAL.set(this); + + queryNugget.onBaseEntryStart(); + state = QueryState.RUNNING; + Assert.eqNull(catchAllNugget, "catchAllNugget"); + startCatchAll(); + + return QueryPerformanceRecorderState::resetInstance; + } + + private void startCatchAll() { + catchAllNugget = nuggetFactory.createForCatchAll(queryNugget, operationNuggets.size(), this::releaseNugget); + catchAllNugget.onBaseEntryStart(); + } + + private void stopCatchAll(final boolean abort) { + if (abort) { + catchAllNugget.abort(); + } else { + catchAllNugget.close(); + } + if (catchAllNugget.shouldLog()) { + Assert.eq(operationNuggets.size(), "operationsNuggets.size()", + catchAllNugget.getOperationNumber(), "catchAllNugget.getOperationNumber()"); + operationNuggets.add(catchAllNugget); + } + catchAllNugget = null; + } + + /** + * @param name the nugget name + * @param inputSize the nugget's input size + * @return A new QueryPerformanceNugget to encapsulate user query operations. done() must be called on the nugget. + */ + public synchronized QueryPerformanceNugget getNugget(@NotNull final String name, final long inputSize) { + Assert.eq(state, "state", QueryState.RUNNING, "QueryState.RUNNING"); + if (Thread.interrupted()) { + throw new CancellationException("interrupted in QueryPerformanceNugget"); + } + if (catchAllNugget != null) { + stopCatchAll(false); + } + + final QueryPerformanceNugget parent; + if (userNuggetStack.isEmpty()) { + parent = queryNugget; + } else { + parent = userNuggetStack.peekLast(); + parent.onBaseEntryEnd(); + } + + final QueryPerformanceNugget nugget = nuggetFactory.createForOperation( + parent, operationNuggets.size(), name, inputSize, this::releaseNugget); + nugget.onBaseEntryStart(); + operationNuggets.add(nugget); + userNuggetStack.addLast(nugget); + return nugget; + } + + /** + * This is our onCloseCallback from the nugget. + * + * @param nugget the nugget to be released + */ + private synchronized void releaseNugget(@NotNull final QueryPerformanceNugget nugget) { + final boolean shouldLog = shouldLogNugget(nugget); + if (!nugget.isUser()) { + return; + } + + final QueryPerformanceNugget removed = userNuggetStack.removeLast(); + if (nugget != removed) { + throw new IllegalStateException( + "Released query performance nugget " + nugget + " (" + System.identityHashCode(nugget) + + ") didn't match the top of the user nugget stack " + removed + " (" + + System.identityHashCode(removed) + + ") - did you follow the correct try/finally pattern?"); + } + + // accumulate into the parent and resume it + if (!userNuggetStack.isEmpty()) { + final QueryPerformanceNugget parent = userNuggetStack.getLast(); + parent.accumulate(nugget); + + if (shouldLog) { + parent.setShouldLog(); + } + + // resume the parent + parent.onBaseEntryStart(); + } else { + queryNugget.accumulate(nugget); + } + + if (!shouldLog) { + // If we have filtered this nugget, by our filter design we will also have filtered any nuggets it encloses. + // This means it *must* be the last entry in operationNuggets, so we can safely remove it in O(1). + final QueryPerformanceNugget lastNugget = operationNuggets.remove(operationNuggets.size() - 1); + if (nugget != lastNugget) { + throw new IllegalStateException( + "Filtered query performance nugget " + nugget + " (" + System.identityHashCode(nugget) + + ") didn't match the last operation nugget " + lastNugget + " (" + + System.identityHashCode(lastNugget) + + ")"); + } + } + + if (userNuggetStack.isEmpty() && queryNugget != null && state == QueryState.RUNNING) { + startCatchAll(); + } + } + + private boolean shouldLogNugget(@NotNull QueryPerformanceNugget nugget) { + if (nugget.shouldLog()) { + return true; + } else if (nugget.getEndClockEpochNanos() == NULL_LONG) { + // Nuggets will have a null value for end time if they weren't closed for a RUNNING query; this is an + // abnormal condition and the nugget should be logged + return true; + } else if (nugget == catchAllNugget) { + return UNINSTRUMENTED_LOG_THRESHOLD.shouldLog(nugget.getUsageNanos()); + } else { + return LOG_THRESHOLD.shouldLog(nugget.getUsageNanos()); + } + } + + @Override + public synchronized QueryPerformanceNugget getEnclosingNugget() { + if (userNuggetStack.isEmpty()) { + Assert.neqNull(catchAllNugget, "catchAllNugget"); + return catchAllNugget; + } + return userNuggetStack.peekLast(); + } + + @Override + public void supplyQueryData(final @NotNull QueryDataConsumer consumer) { + final long evaluationNumber; + final int operationNumber; + boolean uninstrumented = false; + synchronized (this) { + // we should never be called if we're not running + Assert.eq(state, "state", QueryState.RUNNING, "QueryState.RUNNING"); + evaluationNumber = queryNugget.getEvaluationNumber(); + operationNumber = operationNuggets.size(); + if (operationNumber > 0) { + // ensure UPL and QOPL are consistent/joinable. + if (!userNuggetStack.isEmpty()) { + userNuggetStack.getLast().setShouldLog(); + } else { + uninstrumented = true; + Assert.neqNull(catchAllNugget, "catchAllNugget"); + catchAllNugget.setShouldLog(); + } + } + } + consumer.accept(evaluationNumber, operationNumber, uninstrumented); + } + + @Override + public QueryPerformanceNugget getQueryLevelPerformanceData() { + return queryNugget; + } + + @Override + public List getOperationLevelPerformanceData() { + return operationNuggets; + } + + @Override + public void accumulate(@NotNull final QueryPerformanceRecorder subQuery) { + hasSubQueries = true; + queryNugget.accumulate(subQuery.getQueryLevelPerformanceData()); + } + + @Override + public boolean hasSubQueries() { + return hasSubQueries; + } +} diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceRecorderState.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceRecorderState.java new file mode 100644 index 00000000000..958cbf4ac0f --- /dev/null +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceRecorderState.java @@ -0,0 +1,269 @@ +/** + * Copyright (c) 2016-2022 Deephaven Data Labs and Patent Pending + */ +package io.deephaven.engine.table.impl.perf; + +import io.deephaven.chunk.util.pools.ChunkPoolInstrumentation; +import io.deephaven.configuration.Configuration; +import io.deephaven.datastructures.util.CollectionUtil; +import io.deephaven.engine.updategraph.UpdateGraphLock; +import io.deephaven.util.QueryConstants; +import io.deephaven.util.SafeCloseable; +import io.deephaven.util.function.ThrowingRunnable; +import io.deephaven.util.profiling.ThreadProfiler; +import org.apache.commons.lang3.mutable.MutableLong; +import org.jetbrains.annotations.NotNull; + +import java.io.BufferedReader; +import java.io.IOException; +import java.io.InputStreamReader; +import java.io.UncheckedIOException; +import java.net.URL; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.concurrent.atomic.AtomicLong; +import java.util.function.Supplier; + +import static io.deephaven.engine.table.impl.lang.QueryLanguageFunctionUtils.minus; +import static io.deephaven.engine.table.impl.lang.QueryLanguageFunctionUtils.plus; + +public abstract class QueryPerformanceRecorderState { + + static final QueryPerformanceRecorder DUMMY_RECORDER = new DummyQueryPerformanceRecorder(); + static final AtomicLong QUERIES_PROCESSED = new AtomicLong(0); + static final ThreadLocal THE_LOCAL = ThreadLocal.withInitial(() -> DUMMY_RECORDER); + + private static final String[] PACKAGE_FILTERS; + private static final ThreadLocal CACHED_CALLSITE = new ThreadLocal<>(); + private static final ThreadLocal POOL_ALLOCATED_BYTES = ThreadLocal.withInitial( + () -> new MutableLong(ThreadProfiler.DEFAULT.memoryProfilingAvailable() + ? 0L + : io.deephaven.util.QueryConstants.NULL_LONG)); + + static { + // initialize the packages to skip when determining the callsite + + final Configuration config = Configuration.getInstance(); + final Set filters = new HashSet<>(); + + final String propVal = config.getProperty("QueryPerformanceRecorder.packageFilter.internal"); + final URL path = QueryPerformanceRecorder.class.getResource("/" + propVal); + if (path == null) { + throw new RuntimeException("Can not locate package filter file " + propVal + " in classpath"); + } + + try (final BufferedReader reader = new BufferedReader(new InputStreamReader(path.openStream()))) { + String line; + while ((line = reader.readLine()) != null) { + if (!line.isEmpty()) { + filters.add(line); + } + } + } catch (IOException e) { + throw new UncheckedIOException("Error reading file " + propVal, e); + } + + PACKAGE_FILTERS = filters.toArray(CollectionUtil.ZERO_LENGTH_STRING_ARRAY); + } + + private QueryPerformanceRecorderState() { + throw new UnsupportedOperationException("static use only"); + } + + public static QueryPerformanceRecorder getInstance() { + return THE_LOCAL.get(); + } + + static void resetInstance() { + // clear interrupted - because this is a good place to do it - no cancellation exception here though + // noinspection ResultOfMethodCallIgnored + Thread.interrupted(); + THE_LOCAL.remove(); + } + + /** + * Install {@link QueryPerformanceRecorderState#recordPoolAllocation(java.util.function.Supplier)} as the allocation + * recorder for {@link io.deephaven.chunk.util.pools.ChunkPool chunk pools}. + */ + public static void installPoolAllocationRecorder() { + ChunkPoolInstrumentation.setAllocationRecorder(QueryPerformanceRecorderState::recordPoolAllocation); + } + + /** + * Use nuggets from the current {@link QueryPerformanceRecorder} as the lock action recorder for + * {@link UpdateGraphLock}. + */ + public static void installUpdateGraphLockInstrumentation() { + UpdateGraphLock.installInstrumentation(new UpdateGraphLock.Instrumentation() { + + @Override + public void recordAction(@NotNull final String description, @NotNull final Runnable action) { + QueryPerformanceRecorder.withNugget(description, action); + } + + @Override + public void recordActionInterruptibly( + @NotNull final String description, + @NotNull final ThrowingRunnable action) throws InterruptedException { + QueryPerformanceRecorder.withNuggetThrowing(description, action); + } + }); + } + + /** + * Record a single-threaded operation's allocations as "pool" allocated memory attributable to the current thread. + * + * @param operation The operation to record allocation for + * @return The result of the operation. + */ + private static RESULT_TYPE recordPoolAllocation(@NotNull final Supplier operation) { + final long startThreadAllocatedBytes = ThreadProfiler.DEFAULT.getCurrentThreadAllocatedBytes(); + try { + return operation.get(); + } finally { + final long endThreadAllocatedBytes = ThreadProfiler.DEFAULT.getCurrentThreadAllocatedBytes(); + final MutableLong poolAllocatedBytesForCurrentThread = POOL_ALLOCATED_BYTES.get(); + poolAllocatedBytesForCurrentThread.setValue(plus(poolAllocatedBytesForCurrentThread.longValue(), + minus(endThreadAllocatedBytes, startThreadAllocatedBytes))); + } + } + + /** + * Get the total bytes of pool-allocated memory attributed to this thread via + * {@link #recordPoolAllocation(Supplier)}. + * + * @return The total bytes of pool-allocated memory attributed to this thread. + */ + static long getPoolAllocatedBytesForCurrentThread() { + return POOL_ALLOCATED_BYTES.get().longValue(); + } + + /** + * See {@link QueryPerformanceRecorder#getCallerLine()}. + */ + static String getCallerLine() { + String callerLineCandidate = CACHED_CALLSITE.get(); + + if (callerLineCandidate == null) { + final StackTraceElement[] stack = (new Exception()).getStackTrace(); + for (int i = stack.length - 1; i > 0; i--) { + final String className = stack[i].getClassName(); + + if (className.startsWith("io.deephaven.engine.util.GroovyDeephavenSession")) { + callerLineCandidate = "Groovy Script"; + } else if (Arrays.stream(PACKAGE_FILTERS).noneMatch(className::startsWith)) { + callerLineCandidate = stack[i].getFileName() + ":" + stack[i].getLineNumber(); + } + } + } + + return callerLineCandidate == null ? "Internal" : callerLineCandidate; + } + + /** + * See {@link QueryPerformanceRecorder#setCallsite(String)}. + */ + static boolean setCallsite(String callsite) { + if (CACHED_CALLSITE.get() == null) { + CACHED_CALLSITE.set(callsite); + return true; + } + + return false; + } + + /** + * See {@link QueryPerformanceRecorder#setCallsite()}. + */ + static boolean setCallsite() { + // This is very similar to the other setCallsite overload, but we don't want to invoke getCallerLine() unless we + // really need to. + if (CACHED_CALLSITE.get() == null) { + CACHED_CALLSITE.set(getCallerLine()); + return true; + } + + return false; + } + + /** + * Clear any previously set callsite. See {@link #setCallsite(String)} + */ + static void clearCallsite() { + CACHED_CALLSITE.remove(); + } + + /** + * Dummy recorder for use when no recorder is installed. + */ + private static class DummyQueryPerformanceRecorder implements QueryPerformanceRecorder { + + @Override + public QueryPerformanceNugget getNugget(@NotNull final String name, long inputSize) { + return QueryPerformanceNugget.DUMMY_NUGGET; + } + + @Override + public QueryPerformanceNugget getEnclosingNugget() { + return QueryPerformanceNugget.DUMMY_NUGGET; + } + + @Override + public void supplyQueryData(final @NotNull QueryDataConsumer consumer) { + consumer.accept(QueryConstants.NULL_LONG, QueryConstants.NULL_INT, false); + } + + @Override + public QueryPerformanceNugget getQueryLevelPerformanceData() { + return QueryPerformanceNugget.DUMMY_NUGGET; + } + + @Override + public List getOperationLevelPerformanceData() { + return Collections.emptyList(); + } + + @Override + public void accumulate(@NotNull QueryPerformanceRecorder subQuery) { + // no-op + } + + @Override + public boolean hasSubQueries() { + return false; + } + + @Override + public QueryState getState() { + throw new UnsupportedOperationException("Dummy recorder does not support getState()"); + } + + @Override + public SafeCloseable startQuery() { + throw new UnsupportedOperationException("Dummy recorder does not support startQuery()"); + } + + @Override + public boolean endQuery() { + throw new UnsupportedOperationException("Dummy recorder does not support endQuery()"); + } + + @Override + public void suspendQuery() { + throw new UnsupportedOperationException("Dummy recorder does not support suspendQuery()"); + } + + @Override + public SafeCloseable resumeQuery() { + throw new UnsupportedOperationException("Dummy recorder does not support resumeQuery()"); + } + + @Override + public void abortQuery() { + throw new UnsupportedOperationException("Dummy recorder does not support abortQuery()"); + } + } +} diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryProcessingResults.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryProcessingResults.java deleted file mode 100644 index afb475ec9ce..00000000000 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryProcessingResults.java +++ /dev/null @@ -1,43 +0,0 @@ -/** - * Copyright (c) 2016-2022 Deephaven Data Labs and Patent Pending - */ -package io.deephaven.engine.table.impl.perf; - -import io.deephaven.util.QueryConstants; - -import java.io.Serializable; - -public class QueryProcessingResults implements Serializable { - - private static final long serialVersionUID = 2L; - - private final QueryPerformanceRecorder recorder; - - private volatile Boolean isReplayer = QueryConstants.NULL_BOOLEAN; - private volatile String exception = null; - - - public QueryProcessingResults(final QueryPerformanceRecorder recorder) { - this.recorder = recorder; - } - - public Boolean isReplayer() { - return isReplayer; - } - - public void setReplayer(Boolean replayer) { - isReplayer = replayer; - } - - public String getException() { - return exception; - } - - public void setException(String exception) { - this.exception = exception; - } - - public QueryPerformanceRecorder getRecorder() { - return recorder; - } -} diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryState.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryState.java index baa9341e116..8585b970436 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryState.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryState.java @@ -5,5 +5,5 @@ public enum QueryState { - RUNNING, FINISHED, INTERRUPTED + NOT_STARTED, RUNNING, FINISHED, SUSPENDED, INTERRUPTED } diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/UpdatePerformanceStreamPublisher.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/UpdatePerformanceStreamPublisher.java index 268ac0ba3ec..4139aefabdf 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/UpdatePerformanceStreamPublisher.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/UpdatePerformanceStreamPublisher.java @@ -9,7 +9,6 @@ import io.deephaven.engine.table.TableDefinition; import io.deephaven.engine.table.impl.perf.UpdatePerformanceTracker.IntervalLevelDetails; import io.deephaven.engine.table.impl.sources.ArrayBackedColumnSource; -import io.deephaven.engine.table.impl.util.EngineMetrics; import io.deephaven.stream.StreamChunkUtils; import io.deephaven.stream.StreamConsumer; import io.deephaven.stream.StreamPublisher; @@ -20,9 +19,8 @@ class UpdatePerformanceStreamPublisher implements StreamPublisher { private static final TableDefinition DEFINITION = TableDefinition.of( - ColumnDefinition.ofString("ProcessUniqueId"), - ColumnDefinition.ofInt("EntryId"), - ColumnDefinition.ofInt("EvaluationNumber"), + ColumnDefinition.ofLong("EntryId"), + ColumnDefinition.ofLong("EvaluationNumber"), ColumnDefinition.ofInt("OperationNumber"), ColumnDefinition.ofString("EntryDescription"), ColumnDefinition.ofString("EntryCallerLine"), @@ -68,33 +66,57 @@ public void register(@NotNull StreamConsumer consumer) { } public synchronized void add(IntervalLevelDetails intervalLevelDetails, PerformanceEntry performanceEntry) { - chunks[0].asWritableObjectChunk().add(EngineMetrics.getProcessInfo().getId().value()); - chunks[1].asWritableIntChunk().add(performanceEntry.getId()); - chunks[2].asWritableIntChunk().add(performanceEntry.getEvaluationNumber()); - chunks[3].asWritableIntChunk().add(performanceEntry.getOperationNumber()); - chunks[4].asWritableObjectChunk().add(performanceEntry.getDescription()); - chunks[5].asWritableObjectChunk().add(performanceEntry.getCallerLine()); - chunks[6].asWritableLongChunk() + // ColumnDefinition.ofInt("EntryId"), + chunks[0].asWritableLongChunk().add(performanceEntry.getId()); + // ColumnDefinition.ofLong("EvaluationNumber"), + chunks[1].asWritableLongChunk().add(performanceEntry.getEvaluationNumber()); + // ColumnDefinition.ofInt("OperationNumber"), + chunks[2].asWritableIntChunk().add(performanceEntry.getOperationNumber()); + // ColumnDefinition.ofString("EntryDescription"), + chunks[3].asWritableObjectChunk().add(performanceEntry.getDescription()); + // ColumnDefinition.ofString("EntryCallerLine"), + chunks[4].asWritableObjectChunk().add(performanceEntry.getCallerLine()); + // ColumnDefinition.ofTime("IntervalStartTime"), + chunks[5].asWritableLongChunk() .add(DateTimeUtils.millisToNanos(intervalLevelDetails.getIntervalStartTimeMillis())); - chunks[7].asWritableLongChunk() + // ColumnDefinition.ofTime("IntervalEndTime"), + chunks[6].asWritableLongChunk() .add(DateTimeUtils.millisToNanos(intervalLevelDetails.getIntervalEndTimeMillis())); - chunks[8].asWritableLongChunk().add(intervalLevelDetails.getIntervalDurationNanos()); - chunks[9].asWritableLongChunk().add(performanceEntry.getIntervalUsageNanos()); - chunks[10].asWritableLongChunk().add(performanceEntry.getIntervalCpuNanos()); - chunks[11].asWritableLongChunk().add(performanceEntry.getIntervalUserCpuNanos()); - chunks[12].asWritableLongChunk().add(performanceEntry.getIntervalAdded()); - chunks[13].asWritableLongChunk().add(performanceEntry.getIntervalRemoved()); - chunks[14].asWritableLongChunk().add(performanceEntry.getIntervalModified()); - chunks[15].asWritableLongChunk().add(performanceEntry.getIntervalShifted()); - chunks[16].asWritableLongChunk().add(performanceEntry.getIntervalInvocationCount()); - chunks[17].asWritableLongChunk().add(performanceEntry.getMinFreeMemory()); - chunks[18].asWritableLongChunk().add(performanceEntry.getMaxTotalMemory()); - chunks[19].asWritableLongChunk().add(performanceEntry.getCollections()); - chunks[20].asWritableLongChunk().add(performanceEntry.getCollectionTimeNanos()); - chunks[21].asWritableLongChunk().add(performanceEntry.getIntervalAllocatedBytes()); - chunks[22].asWritableLongChunk().add(performanceEntry.getIntervalPoolAllocatedBytes()); - chunks[23].asWritableObjectChunk().add(Objects.toString(performanceEntry.getAuthContext())); - chunks[24].asWritableObjectChunk().add(Objects.toString(performanceEntry.getUpdateGraphName())); + // ColumnDefinition.ofLong("IntervalDurationNanos"), + chunks[7].asWritableLongChunk().add(intervalLevelDetails.getIntervalDurationNanos()); + // ColumnDefinition.ofLong("EntryIntervalUsage"), + chunks[8].asWritableLongChunk().add(performanceEntry.getUsageNanos()); + // ColumnDefinition.ofLong("EntryIntervalCpuNanos"), + chunks[9].asWritableLongChunk().add(performanceEntry.getCpuNanos()); + // ColumnDefinition.ofLong("EntryIntervalUserCpuNanos"), + chunks[10].asWritableLongChunk().add(performanceEntry.getUserCpuNanos()); + // ColumnDefinition.ofLong("EntryIntervalAdded"), + chunks[11].asWritableLongChunk().add(performanceEntry.getIntervalAdded()); + // ColumnDefinition.ofLong("EntryIntervalRemoved"), + chunks[12].asWritableLongChunk().add(performanceEntry.getIntervalRemoved()); + // ColumnDefinition.ofLong("EntryIntervalModified"), + chunks[13].asWritableLongChunk().add(performanceEntry.getIntervalModified()); + // ColumnDefinition.ofLong("EntryIntervalShifted"), + chunks[14].asWritableLongChunk().add(performanceEntry.getIntervalShifted()); + // ColumnDefinition.ofLong("EntryIntervalInvocationCount"), + chunks[15].asWritableLongChunk().add(performanceEntry.getIntervalInvocationCount()); + // ColumnDefinition.ofLong("MinFreeMemory"), + chunks[16].asWritableLongChunk().add(performanceEntry.getMinFreeMemory()); + // ColumnDefinition.ofLong("MaxTotalMemory"), + chunks[17].asWritableLongChunk().add(performanceEntry.getMaxTotalMemory()); + // ColumnDefinition.ofLong("Collections"), + chunks[18].asWritableLongChunk().add(performanceEntry.getCollections()); + // ColumnDefinition.ofLong("CollectionTimeNanos"), + chunks[19].asWritableLongChunk().add(performanceEntry.getCollectionTimeNanos()); + // ColumnDefinition.ofLong("EntryIntervalAllocatedBytes"), + chunks[20].asWritableLongChunk().add(performanceEntry.getAllocatedBytes()); + // ColumnDefinition.ofLong("EntryIntervalPoolAllocatedBytes"), + chunks[21].asWritableLongChunk().add(performanceEntry.getPoolAllocatedBytes()); + // ColumnDefinition.ofString("AuthContext"), + chunks[22].asWritableObjectChunk().add(Objects.toString(performanceEntry.getAuthContext())); + // ColumnDefinition.ofString("UpdateGraph")); + chunks[23].asWritableObjectChunk().add(Objects.toString(performanceEntry.getUpdateGraphName())); + if (chunks[0].size() == CHUNK_SIZE) { flushInternal(); } diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/UpdatePerformanceTracker.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/UpdatePerformanceTracker.java index df7dff85b61..332104884ef 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/UpdatePerformanceTracker.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/UpdatePerformanceTracker.java @@ -32,7 +32,7 @@ import java.util.Objects; import java.util.Queue; import java.util.concurrent.LinkedBlockingDeque; -import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicLong; /** *

@@ -122,7 +122,7 @@ private synchronized void publish( } } - private static final AtomicInteger entryIdCounter = new AtomicInteger(1); + private static final AtomicLong entryIdCounter = new AtomicLong(1); private final UpdateGraph updateGraph; private final PerformanceEntry aggregatedSmallUpdatesEntry; @@ -137,10 +137,10 @@ private synchronized void publish( public UpdatePerformanceTracker(final UpdateGraph updateGraph) { this.updateGraph = Objects.requireNonNull(updateGraph); this.aggregatedSmallUpdatesEntry = new PerformanceEntry( - QueryConstants.NULL_INT, QueryConstants.NULL_INT, QueryConstants.NULL_INT, + QueryConstants.NULL_LONG, QueryConstants.NULL_LONG, QueryConstants.NULL_INT, "Aggregated Small Updates", null, updateGraph.getName()); this.flushEntry = new PerformanceEntry( - QueryConstants.NULL_INT, QueryConstants.NULL_INT, QueryConstants.NULL_INT, + QueryConstants.NULL_LONG, QueryConstants.NULL_LONG, QueryConstants.NULL_INT, "UpdatePerformanceTracker Flush", null, updateGraph.getName()); } @@ -192,7 +192,7 @@ public final PerformanceEntry getEntry(final String description) { final QueryPerformanceRecorder qpr = QueryPerformanceRecorder.getInstance(); final MutableObject entryMu = new MutableObject<>(); - qpr.setQueryData((evaluationNumber, operationNumber, uninstrumented) -> { + qpr.supplyQueryData((evaluationNumber, operationNumber, uninstrumented) -> { final String effectiveDescription; if (StringUtils.isNullOrEmpty(description) && uninstrumented) { effectiveDescription = QueryPerformanceRecorder.UNINSTRUMENTED_CODE_DESCRIPTION; diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/select/ConditionFilter.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/select/ConditionFilter.java index d77db0aeb98..c66db402565 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/select/ConditionFilter.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/select/ConditionFilter.java @@ -18,7 +18,6 @@ import io.deephaven.engine.table.impl.util.codegen.CodeGenerator; import io.deephaven.engine.context.QueryScopeParam; import io.deephaven.time.TimeLiteralReplacedExpression; -import io.deephaven.engine.table.impl.perf.QueryPerformanceNugget; import io.deephaven.engine.table.impl.perf.QueryPerformanceRecorder; import io.deephaven.engine.table.ColumnSource; import io.deephaven.chunk.*; @@ -382,8 +381,7 @@ protected void generateFilterCode( final StringBuilder classBody = getClassBody(tableDefinition, timeConversionResult, result); if (classBody == null) return; - final QueryPerformanceNugget nugget = QueryPerformanceRecorder.getInstance().getNugget("Compile:" + formula); - try { + try (final SafeCloseable ignored = QueryPerformanceRecorder.getInstance().getNugget("Compile:" + formula)) { final List> paramClasses = new ArrayList<>(); final Consumer> addParamClass = (cls) -> { if (cls != null) { @@ -409,8 +407,6 @@ protected void generateFilterCode( filterKernelClass = ExecutionContext.getContext().getQueryCompiler() .compile("GeneratedFilterKernel", this.classBody = classBody.toString(), QueryCompiler.FORMULA_PREFIX, QueryScopeParamTypeUtil.expandParameterClasses(paramClasses)); - } finally { - nugget.done(); } } diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/select/DhFormulaColumn.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/select/DhFormulaColumn.java index f9b97f739fe..c1caeede3c2 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/select/DhFormulaColumn.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/select/DhFormulaColumn.java @@ -14,7 +14,6 @@ import io.deephaven.engine.table.impl.MatchPair; import io.deephaven.engine.table.Table; import io.deephaven.engine.table.impl.lang.QueryLanguageParser; -import io.deephaven.engine.table.impl.perf.QueryPerformanceNugget; import io.deephaven.engine.table.impl.perf.QueryPerformanceRecorder; import io.deephaven.engine.table.impl.select.codegen.FormulaAnalyzer; import io.deephaven.engine.table.impl.select.codegen.JavaKernelBuilder; @@ -32,6 +31,7 @@ import io.deephaven.internal.log.LoggerFactory; import io.deephaven.io.logger.Logger; import io.deephaven.time.TimeLiteralReplacedExpression; +import io.deephaven.util.SafeCloseable; import io.deephaven.util.type.TypeUtils; import io.deephaven.vector.ObjectVector; import io.deephaven.vector.Vector; @@ -772,8 +772,7 @@ private FormulaFactory createFormulaFactory() { @SuppressWarnings("SameParameterValue") private Class compileFormula(final String what, final String classBody, final String className) { // System.out.printf("compileFormula: what is %s. Code is...%n%s%n", what, classBody); - try (final QueryPerformanceNugget ignored = - QueryPerformanceRecorder.getInstance().getNugget("Compile:" + what)) { + try (final SafeCloseable ignored = QueryPerformanceRecorder.getInstance().getNugget("Compile:" + what)) { // Compilation needs to take place with elevated privileges, but the created object should not have them. final List> paramClasses = new ArrayList<>(); diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/select/codegen/JavaKernelBuilder.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/select/codegen/JavaKernelBuilder.java index e34be36fc18..94702af0e11 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/select/codegen/JavaKernelBuilder.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/select/codegen/JavaKernelBuilder.java @@ -5,9 +5,9 @@ import io.deephaven.engine.context.QueryCompiler; import io.deephaven.engine.context.ExecutionContext; +import io.deephaven.util.SafeCloseable; import io.deephaven.vector.Vector; import io.deephaven.engine.context.QueryScopeParam; -import io.deephaven.engine.table.impl.perf.QueryPerformanceNugget; import io.deephaven.engine.table.impl.perf.QueryPerformanceRecorder; import io.deephaven.engine.table.impl.select.Formula; import io.deephaven.engine.table.impl.select.DhFormulaColumn; @@ -260,8 +260,7 @@ private List visitFormulaParameters( @SuppressWarnings("SameParameterValue") private static Class compileFormula(final String what, final String classBody, final String className) { // System.out.printf("compileFormula: formulaString is %s. Code is...%n%s%n", what, classBody); - try (final QueryPerformanceNugget nugget = - QueryPerformanceRecorder.getInstance().getNugget("Compile:" + what)) { + try (final SafeCloseable ignored = QueryPerformanceRecorder.getInstance().getNugget("Compile:" + what)) { // Compilation needs to take place with elevated privileges, but the created object should not have them. final QueryCompiler compiler = ExecutionContext.getContext().getQueryCompiler(); return compiler.compile(className, classBody, QueryCompiler.FORMULA_PREFIX); diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/updateby/UpdateBy.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/updateby/UpdateBy.java index 6cf58505ba4..245d312793f 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/updateby/UpdateBy.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/updateby/UpdateBy.java @@ -22,7 +22,6 @@ import io.deephaven.engine.table.impl.*; import io.deephaven.engine.table.impl.perf.BasePerformanceEntry; import io.deephaven.engine.table.impl.perf.PerformanceEntry; -import io.deephaven.engine.table.impl.perf.QueryPerformanceNugget; import io.deephaven.engine.table.impl.perf.QueryPerformanceRecorder; import io.deephaven.engine.table.impl.sources.*; import io.deephaven.engine.table.impl.sources.sparse.SparseConstants; @@ -908,19 +907,14 @@ private void cleanUpAndNotify(final Runnable onCleanupComplete) { final BasePerformanceEntry accumulated = jobScheduler.getAccumulatedPerformance(); if (accumulated != null) { if (initialStep) { - final QueryPerformanceNugget outerNugget = QueryPerformanceRecorder.getInstance().getOuterNugget(); - if (outerNugget != null) { - outerNugget.addBaseEntry(accumulated); - } + QueryPerformanceRecorder.getInstance().getEnclosingNugget().accumulate(accumulated); } else { source.getUpdateGraph().addNotification(new TerminalNotification() { @Override public void run() { - synchronized (accumulated) { - final PerformanceEntry entry = sourceListener().getEntry(); - if (entry != null) { - entry.accumulate(accumulated); - } + final PerformanceEntry entry = sourceListener().getEntry(); + if (entry != null) { + entry.accumulate(accumulated); } } }); diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/util/AsyncErrorImpl.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/util/AsyncErrorImpl.java index f13d3799136..45cf2166e00 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/util/AsyncErrorImpl.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/util/AsyncErrorImpl.java @@ -39,7 +39,7 @@ public void add( @Nullable TableListener.Entry entry, @Nullable TableListener.Entry sourceEntry, Throwable originalException) { - final int evaluationNumber; + final long evaluationNumber; final int operationNumber; final String description; if (entry instanceof PerformanceEntry) { @@ -52,7 +52,7 @@ public void add( operationNumber = QueryConstants.NULL_INT; description = null; } - final int sourceEvaluationNumber; + final long sourceEvaluationNumber; final int sourceOperationNumber; final String sourceDescription; if (sourceEntry instanceof PerformanceEntry) { diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/util/AsyncErrorStreamPublisher.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/util/AsyncErrorStreamPublisher.java index f719fe2d623..d891df9e041 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/util/AsyncErrorStreamPublisher.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/util/AsyncErrorStreamPublisher.java @@ -20,10 +20,10 @@ class AsyncErrorStreamPublisher implements StreamPublisher { private static final TableDefinition DEFINITION = TableDefinition.of( ColumnDefinition.ofTime("Time"), - ColumnDefinition.ofInt("EvaluationNumber"), + ColumnDefinition.ofLong("EvaluationNumber"), ColumnDefinition.ofInt("OperationNumber"), ColumnDefinition.ofString("Description"), - ColumnDefinition.ofInt("SourceQueryEvaluationNumber"), + ColumnDefinition.ofLong("SourceQueryEvaluationNumber"), ColumnDefinition.ofInt("SourceQueryOperationNumber"), ColumnDefinition.ofString("SourceQueryDescription"), ColumnDefinition.of("Cause", Type.ofCustom(Throwable.class))); @@ -51,18 +51,18 @@ public void register(@NotNull StreamConsumer consumer) { public synchronized void add( long timeNanos, - int evaluationNumber, + long evaluationNumber, int operationNumber, String description, - int sourceQueryEvaluationNumber, + long sourceQueryEvaluationNumber, int sourceQueryOperationNumber, String sourceQueryDescription, Throwable cause) { chunks[0].asWritableLongChunk().add(timeNanos); - chunks[1].asWritableIntChunk().add(evaluationNumber); + chunks[1].asWritableLongChunk().add(evaluationNumber); chunks[2].asWritableIntChunk().add(operationNumber); chunks[3].asWritableObjectChunk().add(description); - chunks[4].asWritableIntChunk().add(sourceQueryEvaluationNumber); + chunks[4].asWritableLongChunk().add(sourceQueryEvaluationNumber); chunks[5].asWritableIntChunk().add(sourceQueryOperationNumber); chunks[6].asWritableObjectChunk().add(sourceQueryDescription); chunks[7].asWritableObjectChunk().add(cause); diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/util/EngineMetrics.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/util/EngineMetrics.java index ee32cea7f69..e55dbcc3c8e 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/util/EngineMetrics.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/util/EngineMetrics.java @@ -4,9 +4,12 @@ package io.deephaven.engine.table.impl.util; import io.deephaven.base.clock.Clock; +import io.deephaven.base.verify.Require; import io.deephaven.configuration.Configuration; import io.deephaven.engine.table.impl.BlinkTableTools; import io.deephaven.engine.table.impl.QueryTable; +import io.deephaven.engine.table.impl.perf.QueryPerformanceNugget; +import io.deephaven.engine.table.impl.perf.QueryPerformanceRecorder; import io.deephaven.engine.tablelogger.EngineTableLoggers; import io.deephaven.engine.tablelogger.QueryOperationPerformanceLogLogger; import io.deephaven.engine.tablelogger.QueryPerformanceLogLogger; @@ -16,10 +19,14 @@ import io.deephaven.process.ProcessInfoConfig; import io.deephaven.stats.Driver; import io.deephaven.stats.StatsIntradayLogger; +import org.jetbrains.annotations.NotNull; +import org.jetbrains.annotations.Nullable; import java.io.IOException; +import java.util.List; public class EngineMetrics { + private static final Logger log = LoggerFactory.getLogger(EngineMetrics.class); private static final boolean STATS_LOGGING_ENABLED = Configuration.getInstance().getBooleanWithDefault( "statsLoggingEnabled", true); private static volatile ProcessInfo PROCESS_INFO; @@ -68,9 +75,8 @@ private EngineMetrics() { } catch (IOException e) { log.fatal().append("Failed to configure process info: ").append(e.toString()).endl(); } - qpImpl = new QueryPerformanceImpl(pInfo.getId(), tableLoggerFactory.queryPerformanceLogLogger()); - qoplImpl = new QueryOperationPerformanceImpl(pInfo.getId(), - tableLoggerFactory.queryOperationPerformanceLogLogger()); + qpImpl = new QueryPerformanceImpl(tableLoggerFactory.queryPerformanceLogLogger()); + qoplImpl = new QueryOperationPerformanceImpl(tableLoggerFactory.queryOperationPerformanceLogLogger()); if (STATS_LOGGING_ENABLED) { statsImpl = new StatsImpl(pInfo.getId(), tableLoggerFactory.processMetricsLogLogger()); } else { @@ -106,6 +112,31 @@ private StatsIntradayLogger getStatsLogger() { return statsImpl; } + public void logQueryProcessingResults( + @NotNull final QueryPerformanceRecorder recorder, + @Nullable final Exception exception) { + final QueryPerformanceLogLogger qplLogger = getQplLogger(); + final QueryOperationPerformanceLogLogger qoplLogger = getQoplLogger(); + try { + final QueryPerformanceNugget queryNugget = Require.neqNull( + recorder.getQueryLevelPerformanceData(), + "queryProcessingResults.getRecorder().getQueryLevelPerformanceData()"); + + synchronized (qplLogger) { + qplLogger.log(queryNugget, exception); + } + final List nuggets = + recorder.getOperationLevelPerformanceData(); + synchronized (qoplLogger) { + for (QueryPerformanceNugget nugget : nuggets) { + qoplLogger.log(nugget); + } + } + } catch (final Exception e) { + log.error().append("Failed to log query performance data: ").append(e).endl(); + } + } + public static boolean maybeStartStatsCollection() { if (!EngineMetrics.STATS_LOGGING_ENABLED) { return false; diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/util/OperationInitializationPoolJobScheduler.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/util/OperationInitializationPoolJobScheduler.java index 7037dd34811..2722d61fd35 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/util/OperationInitializationPoolJobScheduler.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/util/OperationInitializationPoolJobScheduler.java @@ -32,9 +32,7 @@ public void submit( throw e; } finally { basePerformanceEntry.onBaseEntryEnd(); - synchronized (accumulatedBaseEntry) { - accumulatedBaseEntry.accumulate(basePerformanceEntry); - } + accumulatedBaseEntry.accumulate(basePerformanceEntry); } }); } diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/util/QueryOperationPerformanceImpl.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/util/QueryOperationPerformanceImpl.java index c95c4b0a236..d43d8ab1ce2 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/util/QueryOperationPerformanceImpl.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/util/QueryOperationPerformanceImpl.java @@ -7,23 +7,21 @@ import io.deephaven.engine.table.Table; import io.deephaven.engine.table.impl.perf.QueryPerformanceNugget; import io.deephaven.engine.tablelogger.QueryOperationPerformanceLogLogger; -import io.deephaven.process.ProcessUniqueId; import io.deephaven.stream.StreamToBlinkTableAdapter; import io.deephaven.tablelogger.Row.Flags; +import org.jetbrains.annotations.NotNull; import java.io.IOException; import java.util.Objects; class QueryOperationPerformanceImpl implements QueryOperationPerformanceLogLogger { - private final ProcessUniqueId id; private final QueryOperationPerformanceLogLogger qoplLogger; private final QueryOperationPerformanceStreamPublisher publisher; @SuppressWarnings("FieldCanBeLocal") private final StreamToBlinkTableAdapter adapter; private final Table blink; - public QueryOperationPerformanceImpl(ProcessUniqueId id, QueryOperationPerformanceLogLogger qoplLogger) { - this.id = Objects.requireNonNull(id); + public QueryOperationPerformanceImpl(QueryOperationPerformanceLogLogger qoplLogger) { this.qoplLogger = Objects.requireNonNull(qoplLogger); this.publisher = new QueryOperationPerformanceStreamPublisher(); this.adapter = new StreamToBlinkTableAdapter( @@ -39,8 +37,10 @@ public Table blinkTable() { } @Override - public void log(Flags flags, int operationNumber, QueryPerformanceNugget nugget) throws IOException { - publisher.add(id.value(), operationNumber, nugget); - qoplLogger.log(flags, operationNumber, nugget); + public void log( + @NotNull final Flags flags, + @NotNull final QueryPerformanceNugget nugget) throws IOException { + publisher.add(nugget); + qoplLogger.log(flags, nugget); } } diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/util/QueryOperationPerformanceStreamPublisher.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/util/QueryOperationPerformanceStreamPublisher.java index 223549a6fc9..c0529be8681 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/util/QueryOperationPerformanceStreamPublisher.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/util/QueryOperationPerformanceStreamPublisher.java @@ -12,9 +12,7 @@ import io.deephaven.stream.StreamChunkUtils; import io.deephaven.stream.StreamConsumer; import io.deephaven.stream.StreamPublisher; -import io.deephaven.time.DateTimeUtils; import io.deephaven.util.BooleanUtils; -import io.deephaven.util.QueryConstants; import org.jetbrains.annotations.NotNull; import java.util.Objects; @@ -22,19 +20,22 @@ class QueryOperationPerformanceStreamPublisher implements StreamPublisher { private static final TableDefinition DEFINITION = TableDefinition.of( - ColumnDefinition.ofString("ProcessUniqueId"), - ColumnDefinition.ofInt("EvaluationNumber"), + ColumnDefinition.ofLong("EvaluationNumber"), + ColumnDefinition.ofLong("ParentEvaluationNumber"), ColumnDefinition.ofInt("OperationNumber"), + ColumnDefinition.ofInt("ParentOperationNumber"), ColumnDefinition.ofInt("Depth"), ColumnDefinition.ofString("Description"), + ColumnDefinition.ofString("SessionId"), ColumnDefinition.ofString("CallerLine"), - ColumnDefinition.ofBoolean("IsTopLevel"), ColumnDefinition.ofBoolean("IsCompilation"), ColumnDefinition.ofTime("StartTime"), ColumnDefinition.ofTime("EndTime"), ColumnDefinition.ofLong("DurationNanos"), ColumnDefinition.ofLong("CpuNanos"), ColumnDefinition.ofLong("UserCpuNanos"), + ColumnDefinition.ofLong("FreeMemory"), + ColumnDefinition.ofLong("TotalMemory"), ColumnDefinition.ofLong("FreeMemoryChange"), ColumnDefinition.ofLong("TotalMemoryChange"), ColumnDefinition.ofLong("Collections"), @@ -65,36 +66,83 @@ public void register(@NotNull StreamConsumer consumer) { this.consumer = Objects.requireNonNull(consumer); } - public synchronized void add( - final String id, - final int operationNumber, - final QueryPerformanceNugget nugget) { - - chunks[0].asWritableObjectChunk().add(id); - chunks[1].asWritableIntChunk().add(nugget.getEvaluationNumber()); - chunks[2].asWritableIntChunk().add(operationNumber); - chunks[3].asWritableIntChunk().add(nugget.getDepth()); - chunks[4].asWritableObjectChunk().add(nugget.getName()); - chunks[5].asWritableObjectChunk().add(nugget.getCallerLine()); - chunks[6].asWritableByteChunk().add(BooleanUtils.booleanAsByte(nugget.isTopLevel())); - chunks[7].asWritableByteChunk().add(BooleanUtils.booleanAsByte(nugget.getName().startsWith("Compile:"))); - chunks[8].asWritableLongChunk().add(DateTimeUtils.millisToNanos(nugget.getStartClockTime())); - // this is a lie; timestamps should _NOT_ be created based on adding nano time durations to timestamps. - chunks[9].asWritableLongChunk().add(nugget.getTotalTimeNanos() == null ? QueryConstants.NULL_LONG - : DateTimeUtils.millisToNanos(nugget.getStartClockTime()) + nugget.getTotalTimeNanos()); - chunks[10].asWritableLongChunk() - .add(nugget.getTotalTimeNanos() == null ? QueryConstants.NULL_LONG : nugget.getTotalTimeNanos()); - chunks[11].asWritableLongChunk().add(nugget.getCpuNanos()); - chunks[12].asWritableLongChunk().add(nugget.getUserCpuNanos()); - chunks[13].asWritableLongChunk().add(nugget.getEndFreeMemory()); - chunks[14].asWritableLongChunk().add(nugget.getEndTotalMemory()); - chunks[15].asWritableLongChunk().add(nugget.getDiffFreeMemory()); - chunks[16].asWritableLongChunk().add(nugget.getDiffTotalMemory()); - chunks[17].asWritableLongChunk().add(nugget.getDiffCollectionTimeNanos()); - chunks[18].asWritableLongChunk().add(nugget.getAllocatedBytes()); - chunks[19].asWritableLongChunk().add(nugget.getPoolAllocatedBytes()); - chunks[20].asWritableByteChunk().add(BooleanUtils.booleanAsByte(nugget.wasInterrupted())); - chunks[21].asWritableObjectChunk().add(Objects.toString(nugget.getAuthContext())); + public synchronized void add(final QueryPerformanceNugget nugget) { + + // ColumnDefinition.ofLong("EvaluationNumber"), + chunks[0].asWritableLongChunk().add(nugget.getEvaluationNumber()); + + // ColumnDefinition.ofLong("ParentEvaluationNumber"), + chunks[1].asWritableLongChunk().add(nugget.getParentEvaluationNumber()); + + // ColumnDefinition.ofInt("OperationNumber"), + chunks[2].asWritableIntChunk().add(nugget.getOperationNumber()); + + // ColumnDefinition.ofInt("ParentOperationNumber"), + chunks[3].asWritableIntChunk().add(nugget.getParentOperationNumber()); + + // ColumnDefinition.ofInt("Depth"), + chunks[4].asWritableIntChunk().add(nugget.getDepth()); + + // ColumnDefinition.ofString("Description"), + chunks[5].asWritableObjectChunk().add(nugget.getDescription()); + + // ColumnDefinition.ofString("SessionId"), + chunks[6].asWritableObjectChunk().add(nugget.getSessionId()); + + // ColumnDefinition.ofString("CallerLine"), + chunks[7].asWritableObjectChunk().add(nugget.getCallerLine()); + + // ColumnDefinition.ofBoolean("IsCompilation"), + chunks[8].asWritableByteChunk().add(BooleanUtils.booleanAsByte(nugget.getDescription().startsWith("Compile:"))); + + // ColumnDefinition.ofTime("StartTime"), + chunks[9].asWritableLongChunk().add(nugget.getStartClockEpochNanos()); + + // ColumnDefinition.ofTime("EndTime"), + chunks[10].asWritableLongChunk().add(nugget.getEndClockEpochNanos()); + + // ColumnDefinition.ofLong("DurationNanos"), + chunks[11].asWritableLongChunk().add(nugget.getUsageNanos()); + + // ColumnDefinition.ofLong("CpuNanos"), + chunks[12].asWritableLongChunk().add(nugget.getCpuNanos()); + + // ColumnDefinition.ofLong("UserCpuNanos"), + chunks[13].asWritableLongChunk().add(nugget.getUserCpuNanos()); + + // ColumnDefinition.ofLong("FreeMemory"), + chunks[14].asWritableLongChunk().add(nugget.getEndFreeMemory()); + + // ColumnDefinition.ofLong("TotalMemory"), + chunks[15].asWritableLongChunk().add(nugget.getEndTotalMemory()); + + // ColumnDefinition.ofLong("FreeMemoryChange"), + chunks[16].asWritableLongChunk().add(nugget.getDiffFreeMemory()); + + // ColumnDefinition.ofLong("TotalMemoryChange"), + chunks[17].asWritableLongChunk().add(nugget.getDiffTotalMemory()); + + // ColumnDefinition.ofLong("Collections") + chunks[18].asWritableLongChunk().add(nugget.getDiffCollections()); + + // ColumnDefinition.ofLong("CollectionTimeNanos"), + chunks[19].asWritableLongChunk().add(nugget.getDiffCollectionTimeNanos()); + + // ColumnDefinition.ofLong("AllocatedBytes"), + chunks[20].asWritableLongChunk().add(nugget.getAllocatedBytes()); + + // ColumnDefinition.ofLong("PoolAllocatedBytes"), + chunks[21].asWritableLongChunk().add(nugget.getPoolAllocatedBytes()); + + // ColumnDefinition.ofLong("InputSizeLong"), + chunks[22].asWritableLongChunk().add(nugget.getInputSize()); + + // ColumnDefinition.ofBoolean("WasInterrupted") + chunks[23].asWritableByteChunk().add(BooleanUtils.booleanAsByte(nugget.wasInterrupted())); + + // ColumnDefinition.ofString("AuthContext") + chunks[24].asWritableObjectChunk().add(Objects.toString(nugget.getAuthContext())); + if (chunks[0].size() == CHUNK_SIZE) { flushInternal(); } diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/util/QueryPerformanceImpl.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/util/QueryPerformanceImpl.java index b092073921b..355dabe72ae 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/util/QueryPerformanceImpl.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/util/QueryPerformanceImpl.java @@ -6,25 +6,23 @@ import io.deephaven.engine.context.ExecutionContext; import io.deephaven.engine.table.Table; import io.deephaven.engine.table.impl.perf.QueryPerformanceNugget; -import io.deephaven.engine.table.impl.perf.QueryProcessingResults; import io.deephaven.engine.tablelogger.QueryPerformanceLogLogger; -import io.deephaven.process.ProcessUniqueId; import io.deephaven.stream.StreamToBlinkTableAdapter; import io.deephaven.tablelogger.Row.Flags; +import org.jetbrains.annotations.NotNull; +import org.jetbrains.annotations.Nullable; import java.io.IOException; import java.util.Objects; class QueryPerformanceImpl implements QueryPerformanceLogLogger { - private final ProcessUniqueId id; private final QueryPerformanceLogLogger qplLogger; private final QueryPerformanceStreamPublisher publisher; @SuppressWarnings("FieldCanBeLocal") private final StreamToBlinkTableAdapter adapter; private final Table blink; - public QueryPerformanceImpl(ProcessUniqueId id, QueryPerformanceLogLogger qplLogger) { - this.id = Objects.requireNonNull(id); + public QueryPerformanceImpl(QueryPerformanceLogLogger qplLogger) { this.qplLogger = Objects.requireNonNull(qplLogger); this.publisher = new QueryPerformanceStreamPublisher(); this.adapter = new StreamToBlinkTableAdapter( @@ -40,9 +38,11 @@ public Table blinkTable() { } @Override - public void log(Flags flags, long evaluationNumber, QueryProcessingResults queryProcessingResults, - QueryPerformanceNugget nugget) throws IOException { - publisher.add(id.value(), evaluationNumber, queryProcessingResults, nugget); - qplLogger.log(flags, evaluationNumber, queryProcessingResults, nugget); + public void log( + @NotNull final Flags flags, + @NotNull final QueryPerformanceNugget nugget, + @Nullable final Exception exception) throws IOException { + publisher.add(nugget, exception); + qplLogger.log(flags, nugget, exception); } } diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/util/QueryPerformanceStreamPublisher.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/util/QueryPerformanceStreamPublisher.java index adb4511a71e..6923360f032 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/util/QueryPerformanceStreamPublisher.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/util/QueryPerformanceStreamPublisher.java @@ -8,23 +8,23 @@ import io.deephaven.engine.table.ColumnDefinition; import io.deephaven.engine.table.TableDefinition; import io.deephaven.engine.table.impl.perf.QueryPerformanceNugget; -import io.deephaven.engine.table.impl.perf.QueryProcessingResults; import io.deephaven.engine.table.impl.sources.ArrayBackedColumnSource; import io.deephaven.stream.StreamChunkUtils; import io.deephaven.stream.StreamConsumer; import io.deephaven.stream.StreamPublisher; -import io.deephaven.time.DateTimeUtils; import io.deephaven.util.BooleanUtils; -import io.deephaven.util.QueryConstants; import org.jetbrains.annotations.NotNull; +import org.jetbrains.annotations.Nullable; import java.util.Objects; class QueryPerformanceStreamPublisher implements StreamPublisher { private static final TableDefinition DEFINITION = TableDefinition.of( - ColumnDefinition.ofString("ProcessUniqueId"), ColumnDefinition.ofLong("EvaluationNumber"), + ColumnDefinition.ofLong("ParentEvaluationNumber"), + ColumnDefinition.ofString("Description"), + ColumnDefinition.ofString("SessionId"), ColumnDefinition.ofTime("StartTime"), ColumnDefinition.ofTime("EndTime"), ColumnDefinition.ofLong("DurationNanos"), @@ -39,7 +39,6 @@ class QueryPerformanceStreamPublisher implements StreamPublisher { ColumnDefinition.ofLong("AllocatedBytes"), ColumnDefinition.ofLong("PoolAllocatedBytes"), ColumnDefinition.ofBoolean("WasInterrupted"), - ColumnDefinition.ofBoolean("IsReplayer"), ColumnDefinition.ofString("Exception"), ColumnDefinition.ofString("AuthContext")); private static final int CHUNK_SIZE = ArrayBackedColumnSource.BLOCK_SIZE; @@ -64,69 +63,68 @@ public void register(@NotNull StreamConsumer consumer) { } public synchronized void add( - final String id, - final long evaluationNumber, - final QueryProcessingResults queryProcessingResults, - final QueryPerformanceNugget nugget) { - // ColumnDefinition.ofString("ProcessUniqueId"), - chunks[0].asWritableObjectChunk().add(id); + @NotNull final QueryPerformanceNugget nugget, + @Nullable final Exception exception) { // ColumnDefinition.ofLong("EvaluationNumber") - chunks[1].asWritableLongChunk().add(evaluationNumber); + chunks[0].asWritableLongChunk().add(nugget.getEvaluationNumber()); + + // ColumnDefinition.ofLong("ParentEvaluationNumber") + chunks[1].asWritableLongChunk().add(nugget.getParentEvaluationNumber()); + + // ColumnDefinition.ofString("Description") + chunks[2].asWritableObjectChunk().add(nugget.getDescription()); + + // ColumnDefinition.ofString("SessionId") + chunks[3].asWritableObjectChunk().add(nugget.getSessionId()); // ColumnDefinition.ofTime("StartTime"); - chunks[2].asWritableLongChunk().add(DateTimeUtils.millisToNanos(nugget.getStartClockTime())); + chunks[4].asWritableLongChunk().add(nugget.getStartClockEpochNanos()); // ColumnDefinition.ofTime("EndTime") - // this is a lie; timestamps should _NOT_ be created based on adding nano time durations to timestamps. - chunks[3].asWritableLongChunk().add(nugget.getTotalTimeNanos() == null ? QueryConstants.NULL_LONG - : DateTimeUtils.millisToNanos(nugget.getStartClockTime()) + nugget.getTotalTimeNanos()); + chunks[5].asWritableLongChunk().add(nugget.getEndClockEpochNanos()); // ColumnDefinition.ofLong("DurationNanos") - chunks[4].asWritableLongChunk() - .add(nugget.getTotalTimeNanos() == null ? QueryConstants.NULL_LONG : nugget.getTotalTimeNanos()); + chunks[6].asWritableLongChunk().add(nugget.getUsageNanos()); // ColumnDefinition.ofLong("CpuNanos") - chunks[5].asWritableLongChunk().add(nugget.getCpuNanos()); + chunks[7].asWritableLongChunk().add(nugget.getCpuNanos()); // ColumnDefinition.ofLong("UserCpuNanos") - chunks[6].asWritableLongChunk().add(nugget.getUserCpuNanos()); + chunks[8].asWritableLongChunk().add(nugget.getUserCpuNanos()); // ColumnDefinition.ofLong("FreeMemory") - chunks[7].asWritableLongChunk().add(nugget.getEndFreeMemory()); + chunks[9].asWritableLongChunk().add(nugget.getEndFreeMemory()); // ColumnDefinition.ofLong("TotalMemory") - chunks[8].asWritableLongChunk().add(nugget.getEndTotalMemory()); + chunks[10].asWritableLongChunk().add(nugget.getEndTotalMemory()); // ColumnDefinition.ofLong("FreeMemoryChange") - chunks[9].asWritableLongChunk().add(nugget.getDiffFreeMemory()); + chunks[11].asWritableLongChunk().add(nugget.getDiffFreeMemory()); // ColumnDefinition.ofLong("TotalMemoryChange") - chunks[10].asWritableLongChunk().add(nugget.getDiffTotalMemory()); + chunks[12].asWritableLongChunk().add(nugget.getDiffTotalMemory()); // ColumnDefinition.ofLong("Collections") - chunks[11].asWritableLongChunk().add(nugget.getDiffCollections()); + chunks[13].asWritableLongChunk().add(nugget.getDiffCollections()); // ColumnDefinition.ofLong("CollectionTimeNanos") - chunks[12].asWritableLongChunk().add(nugget.getDiffCollectionTimeNanos()); + chunks[14].asWritableLongChunk().add(nugget.getDiffCollectionTimeNanos()); // ColumnDefinition.ofLong("AllocatedBytes") - chunks[13].asWritableLongChunk().add(nugget.getAllocatedBytes()); + chunks[15].asWritableLongChunk().add(nugget.getAllocatedBytes()); // ColumnDefinition.ofLong("PoolAllocatedBytes") - chunks[14].asWritableLongChunk().add(nugget.getPoolAllocatedBytes()); + chunks[16].asWritableLongChunk().add(nugget.getPoolAllocatedBytes()); // ColumnDefinition.ofBoolean("WasInterrupted") - chunks[15].asWritableByteChunk().add(BooleanUtils.booleanAsByte(nugget.wasInterrupted())); - - // ColumnDefinition.ofBoolean("IsReplayer") - chunks[16].asWritableByteChunk().add(BooleanUtils.booleanAsByte(queryProcessingResults.isReplayer())); + chunks[17].asWritableByteChunk().add(BooleanUtils.booleanAsByte(nugget.wasInterrupted())); // ColumnDefinition.ofString("Exception") - chunks[17].asWritableObjectChunk().add(queryProcessingResults.getException()); + chunks[18].asWritableObjectChunk().add(exception == null ? null : exception.getMessage()); // ColumnDefinition.ofString("AuthContext") - chunks[18].asWritableObjectChunk().add(Objects.toString(nugget.getAuthContext())); + chunks[19].asWritableObjectChunk().add(Objects.toString(nugget.getAuthContext())); if (chunks[0].size() == CHUNK_SIZE) { flushInternal(); diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/util/UpdateGraphJobScheduler.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/util/UpdateGraphJobScheduler.java index 2d799e0582b..345b08aa24e 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/util/UpdateGraphJobScheduler.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/util/UpdateGraphJobScheduler.java @@ -47,9 +47,7 @@ public void run() { throw e; } finally { baseEntry.onBaseEntryEnd(); - synchronized (accumulatedBaseEntry) { - accumulatedBaseEntry.accumulate(baseEntry); - } + accumulatedBaseEntry.accumulate(baseEntry); } } diff --git a/engine/table/src/main/java/io/deephaven/engine/tablelogger/QueryOperationPerformanceLogLogger.java b/engine/table/src/main/java/io/deephaven/engine/tablelogger/QueryOperationPerformanceLogLogger.java index a981c646b09..1b828022a00 100644 --- a/engine/table/src/main/java/io/deephaven/engine/tablelogger/QueryOperationPerformanceLogLogger.java +++ b/engine/table/src/main/java/io/deephaven/engine/tablelogger/QueryOperationPerformanceLogLogger.java @@ -3,6 +3,7 @@ import io.deephaven.engine.table.impl.perf.QueryPerformanceNugget; import io.deephaven.tablelogger.Row; import io.deephaven.tablelogger.Row.Flags; +import org.jetbrains.annotations.NotNull; import java.io.IOException; @@ -13,18 +14,18 @@ * queries. */ public interface QueryOperationPerformanceLogLogger { - default void log(final int operationNumber, final QueryPerformanceNugget nugget) throws IOException { - log(DEFAULT_INTRADAY_LOGGER_FLAGS, operationNumber, nugget); + default void log(@NotNull final QueryPerformanceNugget nugget) throws IOException { + log(DEFAULT_INTRADAY_LOGGER_FLAGS, nugget); } - void log(final Row.Flags flags, final int operationNumber, final QueryPerformanceNugget nugget) throws IOException; + void log(@NotNull Row.Flags flags, @NotNull QueryPerformanceNugget nugget) throws IOException; enum Noop implements QueryOperationPerformanceLogLogger { INSTANCE; @Override - public void log(Flags flags, int operationNumber, QueryPerformanceNugget nugget) throws IOException { - - } + public void log( + @NotNull final Flags flags, + @NotNull final QueryPerformanceNugget nugget) throws IOException {} } } diff --git a/engine/table/src/main/java/io/deephaven/engine/tablelogger/QueryPerformanceLogLogger.java b/engine/table/src/main/java/io/deephaven/engine/tablelogger/QueryPerformanceLogLogger.java index 28a46f61a61..97a7aa0c834 100644 --- a/engine/table/src/main/java/io/deephaven/engine/tablelogger/QueryPerformanceLogLogger.java +++ b/engine/table/src/main/java/io/deephaven/engine/tablelogger/QueryPerformanceLogLogger.java @@ -1,34 +1,37 @@ package io.deephaven.engine.tablelogger; import io.deephaven.engine.table.impl.perf.QueryPerformanceNugget; -import io.deephaven.engine.table.impl.perf.QueryProcessingResults; import io.deephaven.tablelogger.Row; -import io.deephaven.tablelogger.Row.Flags; +import org.jetbrains.annotations.NotNull; +import org.jetbrains.annotations.Nullable; import java.io.IOException; +import static io.deephaven.tablelogger.TableLogger.DEFAULT_INTRADAY_LOGGER_FLAGS; + /** * Logs data that describes the query-level performance for each worker. A given worker may be running multiple queries; * each will have its own set of query performance log entries. */ -import static io.deephaven.tablelogger.TableLogger.DEFAULT_INTRADAY_LOGGER_FLAGS; - public interface QueryPerformanceLogLogger { - default void log(final long evaluationNumber, final QueryProcessingResults queryProcessingResults, - final QueryPerformanceNugget nugget) throws IOException { - log(DEFAULT_INTRADAY_LOGGER_FLAGS, evaluationNumber, queryProcessingResults, nugget); + default void log( + @NotNull final QueryPerformanceNugget nugget, + @Nullable final Exception exception) throws IOException { + log(DEFAULT_INTRADAY_LOGGER_FLAGS, nugget, exception); } - void log(final Row.Flags flags, final long evaluationNumber, final QueryProcessingResults queryProcessingResults, - final QueryPerformanceNugget nugget) throws IOException; + void log( + @NotNull final Row.Flags flags, + @NotNull final QueryPerformanceNugget nugget, + @Nullable final Exception exception) throws IOException; enum Noop implements QueryPerformanceLogLogger { INSTANCE; @Override - public void log(Flags flags, long evaluationNumber, QueryProcessingResults queryProcessingResults, - QueryPerformanceNugget nugget) throws IOException { - - } + public void log( + @NotNull final Row.Flags flags, + @NotNull final QueryPerformanceNugget nugget, + @Nullable final Exception exception) {} } } diff --git a/engine/table/src/main/java/io/deephaven/engine/util/TableShowTools.java b/engine/table/src/main/java/io/deephaven/engine/util/TableShowTools.java index ac65b727082..4867ab01de6 100644 --- a/engine/table/src/main/java/io/deephaven/engine/util/TableShowTools.java +++ b/engine/table/src/main/java/io/deephaven/engine/util/TableShowTools.java @@ -5,11 +5,11 @@ import io.deephaven.datastructures.util.CollectionUtil; import io.deephaven.engine.table.Table; -import io.deephaven.engine.table.impl.perf.QueryPerformanceNugget; import io.deephaven.engine.table.impl.perf.QueryPerformanceRecorder; import io.deephaven.engine.table.ColumnSource; import io.deephaven.engine.rowset.RowSet; import io.deephaven.time.DateTimeUtils; +import io.deephaven.util.SafeCloseable; import io.deephaven.util.type.ArrayTypeUtils; import java.io.PrintStream; @@ -28,8 +28,7 @@ class TableShowTools { static void showInternal(Table source, long firstRow, long lastRowExclusive, ZoneId timeZone, String delimiter, PrintStream out, boolean showRowSet, String[] columns) { - final QueryPerformanceNugget nugget = QueryPerformanceRecorder.getInstance().getNugget("TableTools.show()"); - try { + try (final SafeCloseable ignored = QueryPerformanceRecorder.getInstance().getNugget("TableTools.show()")) { if (columns.length == 0) { final List columnNames = source.getDefinition().getColumnNames(); columns = columnNames.toArray(CollectionUtil.ZERO_LENGTH_STRING_ARRAY); @@ -107,8 +106,6 @@ static void showInternal(Table source, long firstRow, long lastRowExclusive, Zon } out.println(); out.flush(); - } finally { - nugget.done(); } } diff --git a/engine/table/src/test/java/io/deephaven/engine/table/impl/select/TestConstantFormulaEvaluation.java b/engine/table/src/test/java/io/deephaven/engine/table/impl/select/TestConstantFormulaEvaluation.java index c3db84fd0bd..da3be26e696 100644 --- a/engine/table/src/test/java/io/deephaven/engine/table/impl/select/TestConstantFormulaEvaluation.java +++ b/engine/table/src/test/java/io/deephaven/engine/table/impl/select/TestConstantFormulaEvaluation.java @@ -7,12 +7,12 @@ import io.deephaven.engine.table.Table; import io.deephaven.engine.table.impl.QueryTable; import io.deephaven.engine.table.impl.lang.JavaExpressionParser; -import io.deephaven.engine.table.impl.perf.QueryPerformanceNugget; import io.deephaven.engine.table.impl.perf.QueryPerformanceRecorder; import io.deephaven.engine.table.impl.sources.SingleValueColumnSource; import io.deephaven.engine.testutil.TstUtils; import io.deephaven.engine.testutil.junit4.EngineCleanup; import io.deephaven.engine.util.TableTools; +import io.deephaven.util.SafeCloseable; import org.junit.Assert; import org.junit.Rule; import org.junit.Test; @@ -190,8 +190,7 @@ public void constantLongValueTest() { private void singleColumnConstantValueFormulaTest(final String formula, final Class columnType, final T columnRowValue, final int tableLength, final String description) { - final QueryPerformanceNugget nugget = QueryPerformanceRecorder.getInstance().getNugget(description); - try { + try (final SafeCloseable ignored = QueryPerformanceRecorder.getInstance().getNugget(description)) { final Table source = TableTools.emptyTable(tableLength).update(formula); String[] columns = source.getDefinition().getColumnNamesArray(); Assert.assertEquals("length of columns = 1", 1, columns.length); @@ -202,8 +201,6 @@ private void singleColumnConstantValueFormulaTest(final String formula, fina Assert.assertEquals(columnType, source.getColumnSource(columns[0]).getType()); Assert.assertEquals(columnRowValue, source.getColumnSource(columns[0]).get(key)); }); - } finally { - nugget.done(); } } @@ -230,8 +227,7 @@ public void threeColumnConstantValueFormulaTest() { private void threeColumnConstantValueFormulaTest(final String[] formulas, final Class calculatedColType, final T expectedConstValue, final ColumnFormula columnFormula, final int tableLength, final String description) { - final QueryPerformanceNugget nugget = QueryPerformanceRecorder.getInstance().getNugget(description); - try { + try (final SafeCloseable nugget = QueryPerformanceRecorder.getInstance().getNugget(description)) { final Table source = TableTools.emptyTable(tableLength).update(formulas); String[] columns = source.getDefinition().getColumnNamesArray(); boolean constantValueColFound = false; @@ -262,16 +258,12 @@ private void threeColumnConstantValueFormulaTest(final String[] formulas, fi (T) source.getColumnSource(columns[1]).get(key)); Assert.assertEquals(expected, source.getColumnSource(columns[2]).get(key)); }); - } finally { - nugget.done(); } } @Test public void queryScopeForAtomicIntPlusConstantFormulaTest() { - final QueryPerformanceNugget nugget = QueryPerformanceRecorder.getInstance() - .getNugget("queryScopeForAtomicInt"); - try { + try (final SafeCloseable ignored = QueryPerformanceRecorder.getInstance().getNugget("queryScopeForAtomicInt")) { final AtomicInteger atomicValue = new AtomicInteger(1); QueryScope.addParam("atomicValue", atomicValue); String[] formulas = new String[] { @@ -309,8 +301,6 @@ public void queryScopeForAtomicIntPlusConstantFormulaTest() { Assert.assertEquals("Calculate Col verification", expectedCalculatedColValue, source.getColumnSource(columns[2]).get(key)); }); - } finally { - nugget.done(); } } @@ -379,9 +369,7 @@ public void testRefreshingTableForConstantFormulaColumnSource() { @SuppressWarnings("SameParameterValue") private void checkConstantFormula(final Table source, final Set expectedConstValueColumns, final T[] expectedConstValues, final Class calculatedColType) { - final QueryPerformanceNugget nugget = - QueryPerformanceRecorder.getInstance().getNugget("queryScopeForAtomicInt"); - try { + try (final SafeCloseable ignored = QueryPerformanceRecorder.getInstance().getNugget("queryScopeForAtomicInt")) { int count = 0; int[] constantColIndex = new int[expectedConstValues.length]; String[] columns = source.getDefinition().getColumnNamesArray(); @@ -412,8 +400,6 @@ private void checkConstantFormula(final Table source, final Set expe source.getColumnSource(columns[constantColIndex[i]]).get(key)); } }); - } finally { - nugget.done(); } } } diff --git a/extensions/csv/src/main/java/io/deephaven/csv/CsvTools.java b/extensions/csv/src/main/java/io/deephaven/csv/CsvTools.java index 9e9a431923b..305bdc9a8f9 100644 --- a/extensions/csv/src/main/java/io/deephaven/csv/CsvTools.java +++ b/extensions/csv/src/main/java/io/deephaven/csv/CsvTools.java @@ -41,7 +41,6 @@ import io.deephaven.engine.table.impl.DataAccessHelpers; import io.deephaven.engine.table.WritableColumnSource; import io.deephaven.engine.table.impl.InMemoryTable; -import io.deephaven.engine.table.impl.perf.QueryPerformanceNugget; import io.deephaven.engine.table.impl.perf.QueryPerformanceRecorder; import io.deephaven.engine.table.impl.sources.BooleanArraySource; import io.deephaven.engine.table.impl.sources.ByteArraySource; @@ -59,6 +58,7 @@ import io.deephaven.time.DateTimeUtils; import io.deephaven.util.BooleanUtils; import io.deephaven.util.QueryConstants; +import io.deephaven.util.SafeCloseable; import io.deephaven.util.annotations.ScriptApi; import org.jetbrains.annotations.Nullable; @@ -917,9 +917,8 @@ private static void writeCsvContentsSeq( final boolean nullsAsEmpty, final char separator, @Nullable final BiConsumer progress) throws IOException { - final QueryPerformanceNugget nugget = - QueryPerformanceRecorder.getInstance().getNugget("CsvTools.writeCsvContentsSeq()"); - try { + try (final SafeCloseable ignored = + QueryPerformanceRecorder.getInstance().getNugget("CsvTools.writeCsvContentsSeq()")) { String separatorStr = String.valueOf(separator); for (long i = 0; i < size; i++) { for (int j = 0; j < cols.length; j++) { @@ -945,8 +944,6 @@ private static void writeCsvContentsSeq( progress.accept(i, size); } } - } finally { - nugget.done(); } } diff --git a/extensions/performance/src/main/java/io/deephaven/engine/table/impl/util/PerformanceQueries.java b/extensions/performance/src/main/java/io/deephaven/engine/table/impl/util/PerformanceQueries.java index 14e6177f8be..0ec22a8b8bb 100644 --- a/extensions/performance/src/main/java/io/deephaven/engine/table/impl/util/PerformanceQueries.java +++ b/extensions/performance/src/main/java/io/deephaven/engine/table/impl/util/PerformanceQueries.java @@ -6,6 +6,7 @@ import com.google.auto.service.AutoService; import io.deephaven.engine.table.Table; +import io.deephaven.engine.table.hierarchical.TreeTable; import io.deephaven.engine.util.GroovyDeephavenSession; import io.deephaven.util.annotations.ScriptApi; @@ -68,6 +69,27 @@ public static Table queryOperationPerformance(final long evaluationNumber) { evaluationNumber); } + /** + * Converts the query performance table into a tree table. + * + * @return query performance tree table. + */ + @ScriptApi + public static TreeTable queryPerformanceAsTreeTable() { + return PerformanceQueriesGeneral.queryPerformanceAsTreeTable(TableLoggers.queryPerformanceLog()); + } + + /** + * Merges the query performance and query operation performance tables into a single tree table. + * + * @return query operation performance tree table. + */ + @ScriptApi + public static TreeTable queryOperationPerformanceAsTreeTable() { + return PerformanceQueriesGeneral.queryOperationPerformanceAsTreeTable( + TableLoggers.queryPerformanceLog(), TableLoggers.queryOperationPerformanceLog()); + } + /** * Gets the information for a process. * diff --git a/extensions/performance/src/main/java/io/deephaven/engine/table/impl/util/PerformanceQueriesGeneral.java b/extensions/performance/src/main/java/io/deephaven/engine/table/impl/util/PerformanceQueriesGeneral.java index 43024345ed2..b06e5946845 100644 --- a/extensions/performance/src/main/java/io/deephaven/engine/table/impl/util/PerformanceQueriesGeneral.java +++ b/extensions/performance/src/main/java/io/deephaven/engine/table/impl/util/PerformanceQueriesGeneral.java @@ -3,16 +3,20 @@ */ package io.deephaven.engine.table.impl.util; +import com.google.common.collect.Sets; import io.deephaven.engine.table.Table; -import io.deephaven.engine.table.impl.DataAccessHelpers; +import io.deephaven.engine.table.hierarchical.TreeTable; +import io.deephaven.engine.util.TableTools; import io.deephaven.plot.Figure; import io.deephaven.plot.PlottingConvenience; import io.deephaven.util.QueryConstants; +import org.jetbrains.annotations.NotNull; import java.util.Arrays; import java.util.HashMap; import java.util.Map; -import java.util.OptionalLong; +import java.util.Set; +import java.util.stream.Stream; import static io.deephaven.api.agg.Aggregation.AggFirst; import static io.deephaven.api.agg.Aggregation.AggMax; @@ -23,7 +27,11 @@ * Generalizes {@link PerformanceQueries} to accept table parameters and make evaluation number parameter optional. */ public class PerformanceQueriesGeneral { - private static boolean formatPctColumns = true; + private static final boolean FORMAT_PCT_COLUMNS = true; + private static final Set ALLOWED_MISSING_COLUMN_NAMES = Sets.newHashSet( + "ProcessUniqueId", + "ParentEvaluationNumber", + "ParentOperationNumber"); public static Table queryPerformance(Table queryPerformanceLog, final long evaluationNumber) { @@ -35,21 +43,22 @@ public static Table queryPerformance(Table queryPerformanceLog, final long evalu queryPerformanceLog = queryPerformanceLog .updateView( "WorkerHeapSize = " + workerHeapSizeBytes + "L", - "TimeSecs = nanosToMillis(EndTime - StartTime) / 1000d", // How long this query ran for, in - // seconds + // How long this query ran for, in seconds + "TimeSecs = nanosToMillis(EndTime - StartTime) / 1000d", "NetMemoryChange = FreeMemoryChange - TotalMemoryChange", - "QueryMemUsed = TotalMemory - FreeMemory", // Memory in use by the query. (Only - // includes active heap memory.) - "QueryMemUsedPct = QueryMemUsed / WorkerHeapSize", // Memory usage as a percenage of max heap - // size (-Xmx) - "QueryMemFree = WorkerHeapSize - QueryMemUsed" // Remaining memory until the query runs into the - // max heap size - ) - .moveColumnsUp( - "ProcessUniqueId", "EvaluationNumber", - "QueryMemUsed", "QueryMemFree", "QueryMemUsedPct", - "EndTime", "TimeSecs", "NetMemoryChange"); - if (formatPctColumns) { + // Memory in use by the query. (Only includes active heap memory.) + "QueryMemUsed = TotalMemory - FreeMemory", + // Memory usage as a percenage of max heap size (-Xmx) + "QueryMemUsedPct = QueryMemUsed / WorkerHeapSize", + // Remaining memory until the query runs into the max heap size + "QueryMemFree = WorkerHeapSize - QueryMemUsed"); + + queryPerformanceLog = maybeMoveColumnsUp(queryPerformanceLog, + "ProcessUniqueId", "EvaluationNumber", "ParentEvaluationNumber", + "QueryMemUsed", "QueryMemFree", "QueryMemUsedPct", + "EndTime", "TimeSecs", "NetMemoryChange"); + + if (FORMAT_PCT_COLUMNS) { queryPerformanceLog = formatColumnsAsPct(queryPerformanceLog, "QueryMemUsedPct"); } return queryPerformanceLog; @@ -64,15 +73,16 @@ public static Table queryOperationPerformance(Table queryOps, final long evaluat queryOps = queryOps.where(whereConditionForEvaluationNumber(evaluationNumber)); } - return queryOps + queryOps = queryOps .updateView( "TimeSecs = nanosToMillis(EndTime - StartTime) / 1000d", - "NetMemoryChange = FreeMemoryChange - TotalMemoryChange" // Change in memory usage delta while - // this query was executing - ) - .moveColumnsUp( - "ProcessUniqueId", "EvaluationNumber", "OperationNumber", - "EndTime", "TimeSecs", "NetMemoryChange"); + // Change in memory usage delta while this query was executing + "NetMemoryChange = FreeMemoryChange - TotalMemoryChange"); + + return maybeMoveColumnsUp(queryOps, + "ProcessUniqueId", "EvaluationNumber", "ParentEvaluationNumber", + "OperationNumber", "ParentOperationNumber", + "EndTime", "TimeSecs", "NetMemoryChange"); } public static Table queryOperationPerformance(final Table queryOps) { @@ -84,11 +94,7 @@ public static String processInfo(Table processInfo, final String processInfoId, processInfo = processInfo .where("Id = `" + processInfoId + "`", "Type = `" + type + "`", "Key = `" + key + "`") .select("Value"); - try { - return (String) DataAccessHelpers.getColumn(processInfo, 0).get(0); - } catch (Exception e) { - return null; - } + return processInfo.getColumnSource("Value").get(processInfo.getRowSet().firstRowKey()); } public static Table queryUpdatePerformance(Table queryUpdatePerformance, final long evaluationNumber, @@ -101,25 +107,27 @@ public static Table queryUpdatePerformance(Table queryUpdatePerformance, final l queryUpdatePerformance = queryUpdatePerformance .updateView( "WorkerHeapSize = " + workerHeapSizeBytes + "L", - "Ratio = EntryIntervalUsage / IntervalDurationNanos", // % of time during this interval that the - // operation was using CPU - "QueryMemUsed = MaxTotalMemory - MinFreeMemory", // Memory in use by the query. (Only - // includes active heap memory.) - "QueryMemUsedPct = QueryMemUsed / WorkerHeapSize", // Memory usage as a percenage of the max - // heap size (-Xmx) - "QueryMemFree = WorkerHeapSize - QueryMemUsed", // Remaining memory until the query runs into - // the max heap size - "NRows = EntryIntervalAdded + EntryIntervalRemoved + EntryIntervalModified", // Total number of - // changed rows - "RowsPerSec = round(NRows / IntervalDurationNanos * 1.0e9)", // Average rate data is ticking at - "RowsPerCPUSec = round(NRows / EntryIntervalUsage * 1.0e9)" // Approximation of how fast CPU - // handles row changes - ) - .moveColumnsUp( - "ProcessUniqueId", "EvaluationNumber", "OperationNumber", - "Ratio", "QueryMemUsed", "QueryMemUsedPct", "IntervalEndTime", - "RowsPerSec", "RowsPerCPUSec", "EntryDescription"); - if (formatPctColumnsLocal && formatPctColumns) { + // % of time during this interval that the operation was using CPU + "Ratio = EntryIntervalUsage / IntervalDurationNanos", + // Memory in use by the query. (Only includes active heap memory.) + "QueryMemUsed = MaxTotalMemory - MinFreeMemory", + // Memory usage as a percentage of the max heap size (-Xmx) + "QueryMemUsedPct = QueryMemUsed / WorkerHeapSize", + // Remaining memory until the query runs into the max heap size + "QueryMemFree = WorkerHeapSize - QueryMemUsed", + // Total number of changed rows + "NRows = EntryIntervalAdded + EntryIntervalRemoved + EntryIntervalModified", + // Average rate data is ticking at + "RowsPerSec = round(NRows / IntervalDurationNanos * 1.0e9)", + // Approximation of how fast CPU handles row changes + "RowsPerCPUSec = round(NRows / EntryIntervalUsage * 1.0e9)"); + + queryUpdatePerformance = maybeMoveColumnsUp(queryUpdatePerformance, + "ProcessUniqueId", "EvaluationNumber", "OperationNumber", + "Ratio", "QueryMemUsed", "QueryMemUsedPct", "IntervalEndTime", + "RowsPerSec", "RowsPerCPUSec", "EntryDescription"); + + if (formatPctColumnsLocal && FORMAT_PCT_COLUMNS) { queryUpdatePerformance = formatColumnsAsPctUpdatePerformance(queryUpdatePerformance); } return queryUpdatePerformance; @@ -149,6 +157,7 @@ public static Map queryUpdatePerformanceMap(final Table queryUpda "EntryIntervalAdded", "EntryIntervalRemoved", "EntryIntervalModified", + "EntryIntervalShifted", "NRows"); // Create a table showing the 'worst' updates, i.e. the operations with the greatest 'Ratio' @@ -174,7 +183,7 @@ public static Map queryUpdatePerformanceMap(final Table queryUpda AggPct(0.50, "Ratio_50_Percentile = Ratio", "QueryMemUsedPct_50_Percentile = QueryMemUsedPct"), AggMax("Ratio_Max = Ratio", "QueryMemUsedPct_Max = QueryMemUsedPct"))); - if (formatPctColumns) { + if (FORMAT_PCT_COLUMNS) { qup = formatColumnsAsPctUpdatePerformance(qup); worstInterval = formatColumnsAsPct(worstInterval, "Ratio"); updateWorst = formatColumnsAsPctUpdatePerformance(updateWorst); @@ -270,7 +279,7 @@ public static Map serverStateWithPlots(final Table pml) { final Table pm = serverState(pml); resultMap.put("ServerState", pm); - int maxMemMiB = DataAccessHelpers.getColumn(pm, "MaxMemMiB").getInt(0); + int maxMemMiB = pm.getColumnSource("MaxMemMiB").getInt(pm.getRowSet().firstRowKey()); if (maxMemMiB == QueryConstants.NULL_INT) { maxMemMiB = 4096; } @@ -310,6 +319,35 @@ public static Map serverStateWithPlots(final Table pml) { return resultMap; } + public static TreeTable queryPerformanceAsTreeTable(@NotNull final Table qpl) { + return qpl.tree("EvaluationNumber", "ParentEvaluationNumber"); + } + + public static TreeTable queryOperationPerformanceAsTreeTable( + @NotNull final Table qpl, @NotNull final Table qopl) { + // TODO (https://github.com/deephaven/deephaven-core/issues/4814): use NULL_INT for ParentOperationNumber and + // Depth once we can prevent any compilation or at least reduce multiple usages to a single formula + Table mergeWithAggKeys = TableTools.merge( + qpl.updateView( + "EvalKey = Long.toString(EvaluationNumber)", + "ParentEvalKey = ParentEvaluationNumber == null ? null : (Long.toString(ParentEvaluationNumber))", + "OperationNumber = NULL_INT", + "ParentOperationNumber = OperationNumber", + "Depth = OperationNumber", + "CallerLine = (String) null", + "IsCompilation = NULL_BOOLEAN", + "InputSizeLong = NULL_LONG"), + qopl.updateView( + "EvalKey = EvaluationNumber + `:` + OperationNumber", + "ParentEvalKey = EvaluationNumber + (ParentOperationNumber == null ? `` : (`:` + ParentOperationNumber))", + "Exception = (String) null")) + .moveColumnsUp("EvalKey", "ParentEvalKey") + .moveColumnsDown("EvaluationNumber", "ParentEvaluationNumber", "OperationNumber", + "ParentOperationNumber"); + + return mergeWithAggKeys.tree("EvalKey", "ParentEvalKey"); + } + private static Table formatColumnsAsPct(final Table t, final String... cols) { final String[] formats = new String[cols.length]; for (int i = 0; i < cols.length; ++i) { @@ -323,11 +361,17 @@ private static Table formatColumnsAsPctUpdatePerformance(final Table updatePerfo } private static long getWorkerHeapSizeBytes() { - final OptionalLong opt = EngineMetrics.getProcessInfo().getMemoryInfo().heap().max(); - return opt.orElse(0); + return EngineMetrics.getProcessInfo().getMemoryInfo().heap().max().orElse(0); } private static String whereConditionForEvaluationNumber(final long evaluationNumber) { - return "EvaluationNumber = " + evaluationNumber + ""; + return "EvaluationNumber = " + evaluationNumber; + } + + private static Table maybeMoveColumnsUp(final Table source, final String... cols) { + return source.moveColumnsUp(Stream.of(cols) + .filter(columnName -> !ALLOWED_MISSING_COLUMN_NAMES.contains(columnName) + || source.hasColumns(columnName)) + .toArray(String[]::new)); } } diff --git a/props/configs/src/main/resources/defaultPackageFilters.qpr b/props/configs/src/main/resources/defaultPackageFilters.qpr index 866e95bcd36..df0b4c41167 100644 --- a/props/configs/src/main/resources/defaultPackageFilters.qpr +++ b/props/configs/src/main/resources/defaultPackageFilters.qpr @@ -1,6 +1,10 @@ java. sun. -groovy.lang -org.codehaus.groovy +groovy.lang. +org.codehaus.groovy. io.deephaven. -io.deephaven.engine +io.grpc. +com.google.common. +org.eclipse. +jdk.internal. +org.jpy. diff --git a/py/server/deephaven/perfmon.py b/py/server/deephaven/perfmon.py index ccf602a35b4..4f9b76c1483 100644 --- a/py/server/deephaven/perfmon.py +++ b/py/server/deephaven/perfmon.py @@ -11,7 +11,8 @@ from deephaven import DHError from deephaven.jcompat import j_map_to_dict -from deephaven.table import Table +from deephaven.table import Table, TreeTable +from deephaven.update_graph import auto_locking_ctx _JPerformanceQueries = jpy.get_type("io.deephaven.engine.table.impl.util.PerformanceQueries") _JMetricsManager = jpy.get_type("io.deephaven.util.metrics.MetricsManager") @@ -95,6 +96,40 @@ def query_performance_log() -> Table: except Exception as e: raise DHError(e, "failed to obtain the query performance log table.") from e +def query_operation_performance_tree_table() -> TreeTable: + """ Returns a tree table with Deephaven performance data for individual subqueries. + + Returns: + a TreeTable + + Raises: + DHError + """ + try: + with auto_locking_ctx(query_performance_log()): + return TreeTable(j_tree_table=_JPerformanceQueries.queryOperationPerformanceAsTreeTable(), + id_col = "EvalKey", parent_col = "ParentEvalKey") + except Exception as e: + raise DHError(e, "failed to obtain the query operation performance log as tree table.") from e + + +def query_performance_tree_table() -> TreeTable: + """ Returns a tree table with Deephaven query performance data. Performance data for individual sub-operations as + a tree table is available from calling `query_operation_performance_tree_table`. + + Returns: + a TreeTable + + Raises: + DHError + """ + try: + with auto_locking_ctx(query_performance_log()): + return TreeTable(j_tree_table=_JPerformanceQueries.queryPerformanceAsTreeTable(), + id_col = "EvaluationNumber", parent_col = "ParentEvaluationNumber") + except Exception as e: + raise DHError(e, "failed to obtain the query performance log as tree table.") from e + def update_performance_log() -> Table: """ Returns a table with Deephaven update performance data. diff --git a/py/server/tests/test_perfmon.py b/py/server/tests/test_perfmon.py index 121b774aea4..60b864cd696 100644 --- a/py/server/tests/test_perfmon.py +++ b/py/server/tests/test_perfmon.py @@ -7,7 +7,7 @@ from deephaven import empty_table from deephaven.perfmon import process_info_log, process_metrics_log, server_state_log, \ query_operation_performance_log, query_performance_log, update_performance_log, metrics_get_counters, \ - metrics_reset_counters + metrics_reset_counters, query_performance_tree_table, query_operation_performance_tree_table from deephaven.perfmon import query_update_performance, query_performance, query_operation_performance, server_state from tests.testbase import BaseTestCase @@ -62,6 +62,8 @@ def test_query_logs(self): self.assertTrue(log_table.to_string()) log_table = update_performance_log() self.assertTrue(log_table.to_string()) + log_table = query_performance_tree_table() + self.assertIsNotNone(log_table) def test_performance_queries(self): q = query_performance(1) @@ -72,6 +74,8 @@ def test_performance_queries(self): self.assertTrue(q.to_string()) q = query_update_performance(1) self.assertTrue(q.to_string()) + q = query_operation_performance_tree_table() + self.assertIsNotNone(q) if __name__ == '__main__': diff --git a/server/src/main/java/io/deephaven/server/arrow/ArrowFlightUtil.java b/server/src/main/java/io/deephaven/server/arrow/ArrowFlightUtil.java index c2351dfeae2..73df0eead12 100644 --- a/server/src/main/java/io/deephaven/server/arrow/ArrowFlightUtil.java +++ b/server/src/main/java/io/deephaven/server/arrow/ArrowFlightUtil.java @@ -19,6 +19,8 @@ import io.deephaven.engine.table.Table; import io.deephaven.engine.table.impl.BaseTable; import io.deephaven.engine.table.impl.QueryTable; +import io.deephaven.engine.table.impl.perf.QueryPerformanceNugget; +import io.deephaven.engine.table.impl.perf.QueryPerformanceRecorder; import io.deephaven.engine.table.impl.util.BarrageMessage; import io.deephaven.engine.updategraph.UpdateGraph; import io.deephaven.extensions.barrage.BarragePerformanceLog; @@ -71,37 +73,44 @@ public static void DoGetCustom( final Flight.Ticket request, final StreamObserver observer) { - final SessionState.ExportObject> export = - ticketRouter.resolve(session, request, "request"); - - final BarragePerformanceLog.SnapshotMetricsHelper metrics = - new BarragePerformanceLog.SnapshotMetricsHelper(); - - final long queueStartTm = System.nanoTime(); - session.nonExport() - .require(export) - .onError(observer) - .submit(() -> { - metrics.queueNanos = System.nanoTime() - queueStartTm; - final BaseTable table = export.get(); - metrics.tableId = Integer.toHexString(System.identityHashCode(table)); - metrics.tableKey = BarragePerformanceLog.getKeyFor(table); - - // create an adapter for the response observer - final StreamObserver listener = - ArrowModule.provideListenerAdapter().adapt(observer); - - // push the schema to the listener - listener.onNext(streamGeneratorFactory.getSchemaView( - fbb -> BarrageUtil.makeTableSchemaPayload(fbb, DEFAULT_SNAPSHOT_DESER_OPTIONS, - table.getDefinition(), table.getAttributes()))); - - // shared code between `DoGet` and `BarrageSnapshotRequest` - BarrageUtil.createAndSendSnapshot(streamGeneratorFactory, table, null, null, false, - DEFAULT_SNAPSHOT_DESER_OPTIONS, listener, metrics); - - listener.onCompleted(); - }); + final String description = "FlightService#DoGet(table=" + ticketRouter.getLogNameFor(request, "table") + ")"; + final QueryPerformanceRecorder queryPerformanceRecorder = QueryPerformanceRecorder.newQuery( + description, session.getSessionId(), QueryPerformanceNugget.DEFAULT_FACTORY); + + try (final SafeCloseable ignored = queryPerformanceRecorder.startQuery()) { + final SessionState.ExportObject> tableExport = + ticketRouter.resolve(session, request, "table"); + + final BarragePerformanceLog.SnapshotMetricsHelper metrics = + new BarragePerformanceLog.SnapshotMetricsHelper(); + + final long queueStartTm = System.nanoTime(); + session.nonExport() + .queryPerformanceRecorder(queryPerformanceRecorder) + .require(tableExport) + .onError(observer) + .submit(() -> { + metrics.queueNanos = System.nanoTime() - queueStartTm; + final BaseTable table = tableExport.get(); + metrics.tableId = Integer.toHexString(System.identityHashCode(table)); + metrics.tableKey = BarragePerformanceLog.getKeyFor(table); + + // create an adapter for the response observer + final StreamObserver listener = + ArrowModule.provideListenerAdapter().adapt(observer); + + // push the schema to the listener + listener.onNext(streamGeneratorFactory.getSchemaView( + fbb -> BarrageUtil.makeTableSchemaPayload(fbb, DEFAULT_SNAPSHOT_DESER_OPTIONS, + table.getDefinition(), table.getAttributes()))); + + // shared code between `DoGet` and `BarrageSnapshotRequest` + BarrageUtil.createAndSendSnapshot(streamGeneratorFactory, table, null, null, false, + DEFAULT_SNAPSHOT_DESER_OPTIONS, listener, metrics); + + listener.onCompleted(); + }); + } } /** @@ -478,69 +487,78 @@ public void handleMessage(@NotNull final BarrageProtoUtil.MessageInfo message) { final BarrageSnapshotRequest snapshotRequest = BarrageSnapshotRequest .getRootAsBarrageSnapshotRequest(message.app_metadata.msgPayloadAsByteBuffer()); - final SessionState.ExportObject> parent = - ticketRouter.resolve(session, snapshotRequest.ticketAsByteBuffer(), "ticket"); - - final BarragePerformanceLog.SnapshotMetricsHelper metrics = - new BarragePerformanceLog.SnapshotMetricsHelper(); - - final long queueStartTm = System.nanoTime(); - session.nonExport() - .require(parent) - .onError(listener) - .submit(() -> { - metrics.queueNanos = System.nanoTime() - queueStartTm; - final BaseTable table = parent.get(); - metrics.tableId = Integer.toHexString(System.identityHashCode(table)); - metrics.tableKey = BarragePerformanceLog.getKeyFor(table); - - if (table.isFailed()) { - throw Exceptions.statusRuntimeException(Code.FAILED_PRECONDITION, - "Table is already failed"); - } - - // push the schema to the listener - listener.onNext(streamGeneratorFactory.getSchemaView( - fbb -> BarrageUtil.makeTableSchemaPayload(fbb, - snapshotOptAdapter.adapt(snapshotRequest), - table.getDefinition(), table.getAttributes()))); - - // collect the viewport and columnsets (if provided) - final boolean hasColumns = snapshotRequest.columnsVector() != null; - final BitSet columns = - hasColumns ? BitSet.valueOf(snapshotRequest.columnsAsByteBuffer()) : null; - - final boolean hasViewport = snapshotRequest.viewportVector() != null; - RowSet viewport = - hasViewport - ? BarrageProtoUtil.toRowSet(snapshotRequest.viewportAsByteBuffer()) - : null; - - final boolean reverseViewport = snapshotRequest.reverseViewport(); - - // leverage common code for `DoGet` and `BarrageSnapshotOptions` - BarrageUtil.createAndSendSnapshot(streamGeneratorFactory, table, columns, viewport, - reverseViewport, snapshotOptAdapter.adapt(snapshotRequest), listener, metrics); - HalfClosedState newState = halfClosedState.updateAndGet(current -> { - switch (current) { - case DONT_CLOSE: - // record that we have finished sending - return HalfClosedState.FINISHED_SENDING; - case CLIENT_HALF_CLOSED: - // since streaming has now finished, and client already half-closed, time to - // half close from server - return HalfClosedState.CLOSED; - case FINISHED_SENDING: - case CLOSED: - throw new IllegalStateException("Can't finish streaming twice"); - default: - throw new IllegalStateException("Unknown state " + current); + final String description = "FlightService#DoExchange(snapshot, table=" + + ticketRouter.getLogNameFor(snapshotRequest.ticketAsByteBuffer(), "table") + ")"; + final QueryPerformanceRecorder queryPerformanceRecorder = QueryPerformanceRecorder.newQuery( + description, session.getSessionId(), QueryPerformanceNugget.DEFAULT_FACTORY); + + try (final SafeCloseable ignored = queryPerformanceRecorder.startQuery()) { + final SessionState.ExportObject> tableExport = + ticketRouter.resolve(session, snapshotRequest.ticketAsByteBuffer(), "table"); + + final BarragePerformanceLog.SnapshotMetricsHelper metrics = + new BarragePerformanceLog.SnapshotMetricsHelper(); + + final long queueStartTm = System.nanoTime(); + session.nonExport() + .queryPerformanceRecorder(queryPerformanceRecorder) + .require(tableExport) + .onError(listener) + .submit(() -> { + metrics.queueNanos = System.nanoTime() - queueStartTm; + final BaseTable table = tableExport.get(); + metrics.tableId = Integer.toHexString(System.identityHashCode(table)); + metrics.tableKey = BarragePerformanceLog.getKeyFor(table); + + if (table.isFailed()) { + throw Exceptions.statusRuntimeException(Code.FAILED_PRECONDITION, + "Table is already failed"); + } + + // push the schema to the listener + listener.onNext(streamGeneratorFactory.getSchemaView( + fbb -> BarrageUtil.makeTableSchemaPayload(fbb, + snapshotOptAdapter.adapt(snapshotRequest), + table.getDefinition(), table.getAttributes()))); + + // collect the viewport and columnsets (if provided) + final boolean hasColumns = snapshotRequest.columnsVector() != null; + final BitSet columns = + hasColumns ? BitSet.valueOf(snapshotRequest.columnsAsByteBuffer()) : null; + + final boolean hasViewport = snapshotRequest.viewportVector() != null; + RowSet viewport = + hasViewport + ? BarrageProtoUtil.toRowSet(snapshotRequest.viewportAsByteBuffer()) + : null; + + final boolean reverseViewport = snapshotRequest.reverseViewport(); + + // leverage common code for `DoGet` and `BarrageSnapshotOptions` + BarrageUtil.createAndSendSnapshot(streamGeneratorFactory, table, columns, viewport, + reverseViewport, snapshotOptAdapter.adapt(snapshotRequest), listener, + metrics); + HalfClosedState newState = halfClosedState.updateAndGet(current -> { + switch (current) { + case DONT_CLOSE: + // record that we have finished sending + return HalfClosedState.FINISHED_SENDING; + case CLIENT_HALF_CLOSED: + // since streaming has now finished, and client already half-closed, + // time to half close from server + return HalfClosedState.CLOSED; + case FINISHED_SENDING: + case CLOSED: + throw new IllegalStateException("Can't finish streaming twice"); + default: + throw new IllegalStateException("Unknown state " + current); + } + }); + if (newState == HalfClosedState.CLOSED) { + listener.onCompleted(); } }); - if (newState == HalfClosedState.CLOSED) { - listener.onCompleted(); - } - }); + } } } @@ -623,14 +641,23 @@ public void handleMessage(@NotNull final MessageInfo message) { preExportSubscriptions = new ArrayDeque<>(); preExportSubscriptions.add(subscriptionRequest); - final SessionState.ExportObject parent = - ticketRouter.resolve(session, subscriptionRequest.ticketAsByteBuffer(), "ticket"); - - synchronized (this) { - onExportResolvedContinuation = session.nonExport() - .require(parent) - .onErrorHandler(DoExchangeMarshaller.this::onError) - .submit(() -> onExportResolved(parent)); + + final String description = "FlightService#DoExchange(subscription, table=" + + ticketRouter.getLogNameFor(subscriptionRequest.ticketAsByteBuffer(), "table") + ")"; + final QueryPerformanceRecorder queryPerformanceRecorder = QueryPerformanceRecorder.newQuery( + description, session.getSessionId(), QueryPerformanceNugget.DEFAULT_FACTORY); + + try (final SafeCloseable ignored = queryPerformanceRecorder.startQuery()) { + final SessionState.ExportObject table = + ticketRouter.resolve(session, subscriptionRequest.ticketAsByteBuffer(), "table"); + + synchronized (this) { + onExportResolvedContinuation = session.nonExport() + .queryPerformanceRecorder(queryPerformanceRecorder) + .require(table) + .onErrorHandler(DoExchangeMarshaller.this::onError) + .submit(() -> onExportResolved(table)); + } } } } diff --git a/server/src/main/java/io/deephaven/server/arrow/FlightServiceGrpcImpl.java b/server/src/main/java/io/deephaven/server/arrow/FlightServiceGrpcImpl.java index 66a7de0d37a..ca0b55a69b5 100644 --- a/server/src/main/java/io/deephaven/server/arrow/FlightServiceGrpcImpl.java +++ b/server/src/main/java/io/deephaven/server/arrow/FlightServiceGrpcImpl.java @@ -10,6 +10,9 @@ import io.deephaven.auth.AuthenticationException; import io.deephaven.auth.AuthenticationRequestHandler; import io.deephaven.auth.BasicAuthMarshaller; +import io.deephaven.engine.table.impl.perf.QueryPerformanceNugget; +import io.deephaven.engine.table.impl.perf.QueryPerformanceRecorder; +import io.deephaven.engine.table.impl.util.EngineMetrics; import io.deephaven.extensions.barrage.BarrageStreamGenerator; import io.deephaven.extensions.barrage.util.GrpcUtil; import io.deephaven.internal.log.LoggerFactory; @@ -22,6 +25,8 @@ import io.deephaven.server.session.SessionState; import io.deephaven.server.session.TicketRouter; import io.deephaven.auth.AuthContext; +import io.deephaven.util.SafeCloseable; +import io.grpc.StatusRuntimeException; import io.grpc.stub.StreamObserver; import org.apache.arrow.flight.impl.Flight; import org.apache.arrow.flight.impl.FlightServiceGrpc; @@ -170,30 +175,43 @@ public void getFlightInfo( @NotNull final StreamObserver responseObserver) { final SessionState session = sessionService.getOptionalSession(); - final SessionState.ExportObject export = - ticketRouter.flightInfoFor(session, request, "request"); - - if (session != null) { - session.nonExport() - .require(export) - .onError(responseObserver) - .submit(() -> { - responseObserver.onNext(export.get()); - responseObserver.onCompleted(); - }); - } else { + final String description = "FlightService#getFlightInfo(request=" + request + ")"; + final QueryPerformanceRecorder queryPerformanceRecorder = QueryPerformanceRecorder.newQuery( + description, session == null ? null : session.getSessionId(), QueryPerformanceNugget.DEFAULT_FACTORY); + + try (final SafeCloseable ignored = queryPerformanceRecorder.startQuery()) { + final SessionState.ExportObject export = + ticketRouter.flightInfoFor(session, request, "request"); + + if (session != null) { + session.nonExport() + .queryPerformanceRecorder(queryPerformanceRecorder) + .require(export) + .onError(responseObserver) + .submit(() -> { + responseObserver.onNext(export.get()); + responseObserver.onCompleted(); + }); + return; + } + + StatusRuntimeException exception = null; if (export.tryRetainReference()) { try { if (export.getState() == ExportNotification.State.EXPORTED) { - responseObserver.onNext(export.get()); - responseObserver.onCompleted(); + GrpcUtil.safelyOnNext(responseObserver, export.get()); + GrpcUtil.safelyComplete(responseObserver); } } finally { export.dropReference(); } } else { - responseObserver.onError( - Exceptions.statusRuntimeException(Code.FAILED_PRECONDITION, "Could not find flight info")); + exception = Exceptions.statusRuntimeException(Code.FAILED_PRECONDITION, "Could not find flight info"); + GrpcUtil.safelyError(responseObserver, exception); + } + + if (queryPerformanceRecorder.endQuery() || exception != null) { + EngineMetrics.getInstance().logQueryProcessingResults(queryPerformanceRecorder, exception); } } } @@ -204,33 +222,48 @@ public void getSchema( @NotNull final StreamObserver responseObserver) { final SessionState session = sessionService.getOptionalSession(); - final SessionState.ExportObject export = - ticketRouter.flightInfoFor(session, request, "request"); + final String description = "FlightService#getSchema(request=" + request + ")"; + final QueryPerformanceRecorder queryPerformanceRecorder = QueryPerformanceRecorder.newQuery( + description, session == null ? null : session.getSessionId(), QueryPerformanceNugget.DEFAULT_FACTORY); + + try (final SafeCloseable ignored = queryPerformanceRecorder.startQuery()) { + final SessionState.ExportObject export = + ticketRouter.flightInfoFor(session, request, "request"); - if (session != null) { - session.nonExport() - .require(export) - .onError(responseObserver) - .submit(() -> { - responseObserver.onNext(Flight.SchemaResult.newBuilder() + if (session != null) { + session.nonExport() + .queryPerformanceRecorder(queryPerformanceRecorder) + .require(export) + .onError(responseObserver) + .submit(() -> { + responseObserver.onNext(Flight.SchemaResult.newBuilder() + .setSchema(export.get().getSchema()) + .build()); + responseObserver.onCompleted(); + }); + return; + } + + StatusRuntimeException exception = null; + if (export.tryRetainReference()) { + try { + if (export.getState() == ExportNotification.State.EXPORTED) { + GrpcUtil.safelyOnNext(responseObserver, Flight.SchemaResult.newBuilder() .setSchema(export.get().getSchema()) .build()); - responseObserver.onCompleted(); - }); - } else if (export.tryRetainReference()) { - try { - if (export.getState() == ExportNotification.State.EXPORTED) { - responseObserver.onNext(Flight.SchemaResult.newBuilder() - .setSchema(export.get().getSchema()) - .build()); - responseObserver.onCompleted(); + GrpcUtil.safelyComplete(responseObserver); + } + } finally { + export.dropReference(); } - } finally { - export.dropReference(); + } else { + exception = Exceptions.statusRuntimeException(Code.FAILED_PRECONDITION, "Could not find flight info"); + responseObserver.onError(exception); + } + + if (queryPerformanceRecorder.endQuery() || exception != null) { + EngineMetrics.getInstance().logQueryProcessingResults(queryPerformanceRecorder, exception); } - } else { - responseObserver.onError( - Exceptions.statusRuntimeException(Code.FAILED_PRECONDITION, "Could not find flight info")); } } diff --git a/server/src/main/java/io/deephaven/server/barrage/BarrageMessageProducer.java b/server/src/main/java/io/deephaven/server/barrage/BarrageMessageProducer.java index d06c189d4be..c8ad8192fed 100644 --- a/server/src/main/java/io/deephaven/server/barrage/BarrageMessageProducer.java +++ b/server/src/main/java/io/deephaven/server/barrage/BarrageMessageProducer.java @@ -151,7 +151,7 @@ public Operation( @Override public String getDescription() { - return "BarrageMessageProducer(" + updateIntervalMs + ")"; + return "BarrageMessageProducer(" + updateIntervalMs + "," + System.identityHashCode(parent) + ")"; } @Override diff --git a/server/src/main/java/io/deephaven/server/console/ConsoleServiceGrpcImpl.java b/server/src/main/java/io/deephaven/server/console/ConsoleServiceGrpcImpl.java index e99e14f1efb..3fec26cab7b 100644 --- a/server/src/main/java/io/deephaven/server/console/ConsoleServiceGrpcImpl.java +++ b/server/src/main/java/io/deephaven/server/console/ConsoleServiceGrpcImpl.java @@ -8,6 +8,8 @@ import io.deephaven.base.LockFreeArrayQueue; import io.deephaven.configuration.Configuration; import io.deephaven.engine.table.Table; +import io.deephaven.engine.table.impl.perf.QueryPerformanceNugget; +import io.deephaven.engine.table.impl.perf.QueryPerformanceRecorder; import io.deephaven.engine.table.impl.util.RuntimeMemory; import io.deephaven.engine.table.impl.util.RuntimeMemory.Sample; import io.deephaven.engine.updategraph.DynamicNode; @@ -35,6 +37,7 @@ import io.deephaven.server.session.SessionState.ExportBuilder; import io.deephaven.server.session.TicketRouter; import io.deephaven.server.util.Scheduler; +import io.deephaven.util.SafeCloseable; import io.grpc.stub.ServerCallStreamObserver; import io.grpc.stub.StreamObserver; import org.jetbrains.annotations.NotNull; @@ -164,29 +167,38 @@ public void executeCommand( throw Exceptions.statusRuntimeException(Code.FAILED_PRECONDITION, "No consoleId supplied"); } - SessionState.ExportObject exportedConsole = - ticketRouter.resolve(session, consoleId, "consoleId"); - session.nonExport() - .requiresSerialQueue() - .require(exportedConsole) - .onError(responseObserver) - .submit(() -> { - ScriptSession scriptSession = exportedConsole.get(); - ScriptSession.Changes changes = scriptSession.evaluateScript(request.getCode()); - ExecuteCommandResponse.Builder diff = ExecuteCommandResponse.newBuilder(); - FieldsChangeUpdate.Builder fieldChanges = FieldsChangeUpdate.newBuilder(); - changes.created.entrySet() - .forEach(entry -> fieldChanges.addCreated(makeVariableDefinition(entry))); - changes.updated.entrySet() - .forEach(entry -> fieldChanges.addUpdated(makeVariableDefinition(entry))); - changes.removed.entrySet() - .forEach(entry -> fieldChanges.addRemoved(makeVariableDefinition(entry))); - if (changes.error != null) { - diff.setErrorMessage(Throwables.getStackTraceAsString(changes.error)); - log.error().append("Error running script: ").append(changes.error).endl(); - } - safelyComplete(responseObserver, diff.setChanges(fieldChanges).build()); - }); + final String description = "ConsoleService#executeCommand(console=" + + ticketRouter.getLogNameFor(consoleId, "consoleId") + ")"; + final QueryPerformanceRecorder queryPerformanceRecorder = QueryPerformanceRecorder.newQuery( + description, session.getSessionId(), QueryPerformanceNugget.DEFAULT_FACTORY); + + try (final SafeCloseable ignored = queryPerformanceRecorder.startQuery()) { + final SessionState.ExportObject exportedConsole = + ticketRouter.resolve(session, consoleId, "consoleId"); + + session.nonExport() + .queryPerformanceRecorder(queryPerformanceRecorder) + .requiresSerialQueue() + .require(exportedConsole) + .onError(responseObserver) + .submit(() -> { + ScriptSession scriptSession = exportedConsole.get(); + ScriptSession.Changes changes = scriptSession.evaluateScript(request.getCode()); + ExecuteCommandResponse.Builder diff = ExecuteCommandResponse.newBuilder(); + FieldsChangeUpdate.Builder fieldChanges = FieldsChangeUpdate.newBuilder(); + changes.created.entrySet() + .forEach(entry -> fieldChanges.addCreated(makeVariableDefinition(entry))); + changes.updated.entrySet() + .forEach(entry -> fieldChanges.addUpdated(makeVariableDefinition(entry))); + changes.removed.entrySet() + .forEach(entry -> fieldChanges.addRemoved(makeVariableDefinition(entry))); + if (changes.error != null) { + diff.setErrorMessage(Throwables.getStackTraceAsString(changes.error)); + log.error().append("Error running script: ").append(changes.error).endl(); + } + safelyComplete(responseObserver, diff.setChanges(fieldChanges).build()); + }); + } } @Override @@ -236,36 +248,48 @@ public void bindTableToVariable( @NotNull final StreamObserver responseObserver) { final SessionState session = sessionService.getCurrentSession(); - Ticket tableId = request.getTableId(); + final Ticket tableId = request.getTableId(); if (tableId.getTicket().isEmpty()) { throw Exceptions.statusRuntimeException(Code.FAILED_PRECONDITION, "No source tableId supplied"); } - final SessionState.ExportObject exportedTable = ticketRouter.resolve(session, tableId, "tableId"); - final SessionState.ExportObject exportedConsole; - ExportBuilder exportBuilder = session.nonExport() - .requiresSerialQueue() - .onError(responseObserver); + final String description = "ConsoleService#bindTableToVariable(table=" + + ticketRouter.getLogNameFor(tableId, "tableId") + ", variableName=" + request.getVariableName() + + ")"; + final QueryPerformanceRecorder queryPerformanceRecorder = QueryPerformanceRecorder.newQuery( + description, session.getSessionId(), QueryPerformanceNugget.DEFAULT_FACTORY); + + try (final SafeCloseable ignored = queryPerformanceRecorder.startQuery()) { + final SessionState.ExportObject
exportedTable = + ticketRouter.resolve(session, tableId, "tableId"); + + final SessionState.ExportObject exportedConsole; + + ExportBuilder exportBuilder = session.nonExport() + .queryPerformanceRecorder(queryPerformanceRecorder) + .requiresSerialQueue() + .onError(responseObserver); + + if (request.hasConsoleId()) { + exportedConsole = ticketRouter.resolve(session, request.getConsoleId(), "consoleId"); + exportBuilder.require(exportedTable, exportedConsole); + } else { + exportedConsole = null; + exportBuilder.require(exportedTable); + } - if (request.hasConsoleId()) { - exportedConsole = ticketRouter.resolve(session, request.getConsoleId(), "consoleId"); - exportBuilder.require(exportedTable, exportedConsole); - } else { - exportedConsole = null; - exportBuilder.require(exportedTable); + exportBuilder.submit(() -> { + ScriptSession scriptSession = + exportedConsole != null ? exportedConsole.get() : scriptSessionProvider.get(); + Table table = exportedTable.get(); + scriptSession.setVariable(request.getVariableName(), table); + if (DynamicNode.notDynamicOrIsRefreshing(table)) { + scriptSession.manage(table); + } + responseObserver.onNext(BindTableToVariableResponse.getDefaultInstance()); + responseObserver.onCompleted(); + }); } - - exportBuilder.submit(() -> { - ScriptSession scriptSession = - exportedConsole != null ? exportedConsole.get() : scriptSessionProvider.get(); - Table table = exportedTable.get(); - scriptSession.setVariable(request.getVariableName(), table); - if (DynamicNode.notDynamicOrIsRefreshing(table)) { - scriptSession.manage(table); - } - responseObserver.onNext(BindTableToVariableResponse.getDefaultInstance()); - responseObserver.onCompleted(); - }); } @Override diff --git a/server/src/main/java/io/deephaven/server/hierarchicaltable/HierarchicalTableServiceGrpcImpl.java b/server/src/main/java/io/deephaven/server/hierarchicaltable/HierarchicalTableServiceGrpcImpl.java index 8824b4f358e..96d5d018b09 100644 --- a/server/src/main/java/io/deephaven/server/hierarchicaltable/HierarchicalTableServiceGrpcImpl.java +++ b/server/src/main/java/io/deephaven/server/hierarchicaltable/HierarchicalTableServiceGrpcImpl.java @@ -18,6 +18,8 @@ import io.deephaven.engine.table.impl.AbsoluteSortColumnConventions; import io.deephaven.engine.table.impl.BaseGridAttributes; import io.deephaven.engine.table.impl.hierarchical.RollupTableImpl; +import io.deephaven.engine.table.impl.perf.QueryPerformanceNugget; +import io.deephaven.engine.table.impl.perf.QueryPerformanceRecorder; import io.deephaven.engine.table.impl.select.WhereFilter; import io.deephaven.extensions.barrage.util.ExportUtil; import io.deephaven.internal.log.LoggerFactory; @@ -31,6 +33,7 @@ import io.deephaven.server.table.ops.AggregationAdapter; import io.deephaven.server.table.ops.FilterTableGrpcImpl; import io.deephaven.server.table.ops.filter.FilterFactory; +import io.deephaven.util.SafeCloseable; import io.grpc.stub.StreamObserver; import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; @@ -73,31 +76,39 @@ public void rollup( final SessionState session = sessionService.getCurrentSession(); - final SessionState.ExportObject
sourceTableExport = ticketRouter.resolve( - session, request.getSourceTableId(), "rollup.sourceTableId"); - - session.newExport(request.getResultRollupTableId(), "rollup.resultRollupTableId") - .require(sourceTableExport) - .onError(responseObserver) - .submit(() -> { - final Table sourceTable = sourceTableExport.get(); - - authWiring.checkPermissionRollup(session.getAuthContext(), request, List.of(sourceTable)); - - final Collection aggregations = request.getAggregationsList().stream() - .map(AggregationAdapter::adapt) - .collect(Collectors.toList()); - final boolean includeConstituents = request.getIncludeConstituents(); - final Collection groupByColumns = request.getGroupByColumnsList().stream() - .map(ColumnName::of) - .collect(Collectors.toList()); - final RollupTable result = sourceTable.rollup( - aggregations, includeConstituents, groupByColumns); - - final RollupTable transformedResult = authTransformation.transform(result); - safelyComplete(responseObserver, RollupResponse.getDefaultInstance()); - return transformedResult; - }); + final String description = "HierarchicalTableService#rollup(table=" + + ticketRouter.getLogNameFor(request.getSourceTableId(), "sourceTableId") + ")"; + final QueryPerformanceRecorder queryPerformanceRecorder = QueryPerformanceRecorder.newQuery( + description, session.getSessionId(), QueryPerformanceNugget.DEFAULT_FACTORY); + + try (final SafeCloseable ignored = queryPerformanceRecorder.startQuery()) { + final SessionState.ExportObject
sourceTableExport = + ticketRouter.resolve(session, request.getSourceTableId(), "sourceTableId"); + + session.newExport(request.getResultRollupTableId(), "resultRollupTableId") + .queryPerformanceRecorder(queryPerformanceRecorder) + .require(sourceTableExport) + .onError(responseObserver) + .submit(() -> { + final Table sourceTable = sourceTableExport.get(); + + authWiring.checkPermissionRollup(session.getAuthContext(), request, List.of(sourceTable)); + + final Collection aggregations = request.getAggregationsList().stream() + .map(AggregationAdapter::adapt) + .collect(Collectors.toList()); + final boolean includeConstituents = request.getIncludeConstituents(); + final Collection groupByColumns = request.getGroupByColumnsList().stream() + .map(ColumnName::of) + .collect(Collectors.toList()); + final RollupTable result = sourceTable.rollup( + aggregations, includeConstituents, groupByColumns); + + final RollupTable transformedResult = authTransformation.transform(result); + safelyComplete(responseObserver, RollupResponse.getDefaultInstance()); + return transformedResult; + }); + } } private static void validate(@NotNull final RollupRequest request) { @@ -117,35 +128,43 @@ public void tree( final SessionState session = sessionService.getCurrentSession(); - final SessionState.ExportObject
sourceTableExport = ticketRouter.resolve( - session, request.getSourceTableId(), "tree.sourceTableId"); + final String description = "HierarchicalTableService#tree(table=" + + ticketRouter.getLogNameFor(request.getSourceTableId(), "sourceTableId") + ")"; + final QueryPerformanceRecorder queryPerformanceRecorder = QueryPerformanceRecorder.newQuery( + description, session.getSessionId(), QueryPerformanceNugget.DEFAULT_FACTORY); - session.newExport(request.getResultTreeTableId(), "tree.resultTreeTableId") - .require(sourceTableExport) - .onError(responseObserver) - .submit(() -> { - final Table sourceTable = sourceTableExport.get(); + try (final SafeCloseable ignored = queryPerformanceRecorder.startQuery()) { + final SessionState.ExportObject
sourceTableExport = + ticketRouter.resolve(session, request.getSourceTableId(), "sourceTableId"); - authWiring.checkPermissionTree(session.getAuthContext(), request, List.of(sourceTable)); + session.newExport(request.getResultTreeTableId(), "resultTreeTableId") + .queryPerformanceRecorder(queryPerformanceRecorder) + .require(sourceTableExport) + .onError(responseObserver) + .submit(() -> { + final Table sourceTable = sourceTableExport.get(); - final ColumnName identifierColumn = ColumnName.of(request.getIdentifierColumn()); - final ColumnName parentIdentifierColumn = ColumnName.of(request.getParentIdentifierColumn()); + authWiring.checkPermissionTree(session.getAuthContext(), request, List.of(sourceTable)); - final Table sourceTableToUse; - if (request.getPromoteOrphans()) { - sourceTableToUse = TreeTable.promoteOrphans( - sourceTable, identifierColumn.name(), parentIdentifierColumn.name()); - } else { - sourceTableToUse = sourceTable; - } + final ColumnName identifierColumn = ColumnName.of(request.getIdentifierColumn()); + final ColumnName parentIdentifierColumn = ColumnName.of(request.getParentIdentifierColumn()); - final TreeTable result = sourceTableToUse.tree( - identifierColumn.name(), parentIdentifierColumn.name()); + final Table sourceTableToUse; + if (request.getPromoteOrphans()) { + sourceTableToUse = TreeTable.promoteOrphans( + sourceTable, identifierColumn.name(), parentIdentifierColumn.name()); + } else { + sourceTableToUse = sourceTable; + } - final TreeTable transformedResult = authTransformation.transform(result); - safelyComplete(responseObserver, TreeResponse.getDefaultInstance()); - return transformedResult; - }); + final TreeTable result = sourceTableToUse.tree( + identifierColumn.name(), parentIdentifierColumn.name()); + + final TreeTable transformedResult = authTransformation.transform(result); + safelyComplete(responseObserver, TreeResponse.getDefaultInstance()); + return transformedResult; + }); + } } private static void validate(@NotNull final TreeRequest request) { @@ -166,79 +185,87 @@ public void apply( final SessionState session = sessionService.getCurrentSession(); - final SessionState.ExportObject> inputHierarchicalTableExport = ticketRouter.resolve( - session, request.getInputHierarchicalTableId(), "apply.inputHierarchicalTableId"); - - session.newExport(request.getResultHierarchicalTableId(), "apply.resultHierarchicalTableId") - .require(inputHierarchicalTableExport) - .onError(responseObserver) - .submit(() -> { - final HierarchicalTable inputHierarchicalTable = inputHierarchicalTableExport.get(); - - authWiring.checkPermissionApply(session.getAuthContext(), request, - List.of(inputHierarchicalTable.getSource())); - - if (request.getFiltersCount() == 0 && request.getSortsCount() == 0) { - throw Exceptions.statusRuntimeException(Code.INVALID_ARGUMENT, "No operations specified"); - } - final Collection finishedConditions = request.getFiltersCount() == 0 - ? null - : FilterTableGrpcImpl.finishConditions(request.getFiltersList()); - final Collection translatedSorts = - translateAndValidateSorts(request, (BaseGridAttributes) inputHierarchicalTable); - - final HierarchicalTable result; - if (inputHierarchicalTable instanceof RollupTable) { - RollupTable rollupTable = (RollupTable) inputHierarchicalTable; - // Rollups only support filtering on the group-by columns, so we can safely use the - // aggregated node definition here. - final TableDefinition nodeDefinition = - rollupTable.getNodeDefinition(RollupTable.NodeType.Aggregated); - if (finishedConditions != null) { - final Collection filters = - makeWhereFilters(finishedConditions, nodeDefinition); - RollupTableImpl.initializeAndValidateFilters( - rollupTable.getSource(), - rollupTable.getGroupByColumns(), - filters, - message -> Exceptions.statusRuntimeException(Code.INVALID_ARGUMENT, message)); - rollupTable = rollupTable.withFilter(Filter.and(filters)); + final String description = "HierarchicalTableService#apply(table=" + + ticketRouter.getLogNameFor(request.getInputHierarchicalTableId(), "inputHierarchicalTableId") + ")"; + final QueryPerformanceRecorder queryPerformanceRecorder = QueryPerformanceRecorder.newQuery( + description, session.getSessionId(), QueryPerformanceNugget.DEFAULT_FACTORY); + + try (final SafeCloseable ignored = queryPerformanceRecorder.startQuery()) { + final SessionState.ExportObject> inputHierarchicalTableExport = + ticketRouter.resolve(session, request.getInputHierarchicalTableId(), "inputHierarchicalTableId"); + + session.newExport(request.getResultHierarchicalTableId(), "resultHierarchicalTableId") + .queryPerformanceRecorder(queryPerformanceRecorder) + .require(inputHierarchicalTableExport) + .onError(responseObserver) + .submit(() -> { + final HierarchicalTable inputHierarchicalTable = inputHierarchicalTableExport.get(); + + authWiring.checkPermissionApply(session.getAuthContext(), request, + List.of(inputHierarchicalTable.getSource())); + + if (request.getFiltersCount() == 0 && request.getSortsCount() == 0) { + throw Exceptions.statusRuntimeException(Code.INVALID_ARGUMENT, "No operations specified"); } - if (translatedSorts != null) { - RollupTable.NodeOperationsRecorder aggregatedSorts = - rollupTable.makeNodeOperationsRecorder(RollupTable.NodeType.Aggregated); - aggregatedSorts = aggregatedSorts.sort(translatedSorts); - if (rollupTable.includesConstituents()) { - final RollupTable.NodeOperationsRecorder constituentSorts = rollupTable - .translateAggregatedNodeOperationsForConstituentNodes(aggregatedSorts); - rollupTable = rollupTable.withNodeOperations(aggregatedSorts, constituentSorts); - } else { - rollupTable = rollupTable.withNodeOperations(aggregatedSorts); + final Collection finishedConditions = request.getFiltersCount() == 0 + ? null + : FilterTableGrpcImpl.finishConditions(request.getFiltersList()); + final Collection translatedSorts = + translateAndValidateSorts(request, (BaseGridAttributes) inputHierarchicalTable); + + final HierarchicalTable result; + if (inputHierarchicalTable instanceof RollupTable) { + RollupTable rollupTable = (RollupTable) inputHierarchicalTable; + // Rollups only support filtering on the group-by columns, so we can safely use the + // aggregated node definition here. + final TableDefinition nodeDefinition = + rollupTable.getNodeDefinition(RollupTable.NodeType.Aggregated); + if (finishedConditions != null) { + final Collection filters = + makeWhereFilters(finishedConditions, nodeDefinition); + RollupTableImpl.initializeAndValidateFilters( + rollupTable.getSource(), + rollupTable.getGroupByColumns(), + filters, + message -> Exceptions.statusRuntimeException(Code.INVALID_ARGUMENT, message)); + rollupTable = rollupTable.withFilter(Filter.and(filters)); } + if (translatedSorts != null) { + RollupTable.NodeOperationsRecorder aggregatedSorts = + rollupTable.makeNodeOperationsRecorder(RollupTable.NodeType.Aggregated); + aggregatedSorts = aggregatedSorts.sort(translatedSorts); + if (rollupTable.includesConstituents()) { + final RollupTable.NodeOperationsRecorder constituentSorts = rollupTable + .translateAggregatedNodeOperationsForConstituentNodes(aggregatedSorts); + rollupTable = rollupTable.withNodeOperations(aggregatedSorts, constituentSorts); + } else { + rollupTable = rollupTable.withNodeOperations(aggregatedSorts); + } + } + result = rollupTable; + } else if (inputHierarchicalTable instanceof TreeTable) { + TreeTable treeTable = (TreeTable) inputHierarchicalTable; + final TableDefinition nodeDefinition = treeTable.getNodeDefinition(); + if (finishedConditions != null) { + treeTable = treeTable + .withFilter(Filter.and(makeWhereFilters(finishedConditions, nodeDefinition))); + } + if (translatedSorts != null) { + TreeTable.NodeOperationsRecorder treeSorts = treeTable.makeNodeOperationsRecorder(); + treeSorts = treeSorts.sort(translatedSorts); + treeTable = treeTable.withNodeOperations(treeSorts); + } + result = treeTable; + } else { + throw Exceptions.statusRuntimeException(Code.INVALID_ARGUMENT, + "Input is not a supported HierarchicalTable type"); } - result = rollupTable; - } else if (inputHierarchicalTable instanceof TreeTable) { - TreeTable treeTable = (TreeTable) inputHierarchicalTable; - final TableDefinition nodeDefinition = treeTable.getNodeDefinition(); - if (finishedConditions != null) { - treeTable = treeTable - .withFilter(Filter.and(makeWhereFilters(finishedConditions, nodeDefinition))); - } - if (translatedSorts != null) { - TreeTable.NodeOperationsRecorder treeSorts = treeTable.makeNodeOperationsRecorder(); - treeSorts = treeSorts.sort(translatedSorts); - treeTable = treeTable.withNodeOperations(treeSorts); - } - result = treeTable; - } else { - throw Exceptions.statusRuntimeException(Code.INVALID_ARGUMENT, - "Input is not a supported HierarchicalTable type"); - } - - final HierarchicalTable transformedResult = authTransformation.transform(result); - safelyComplete(responseObserver, HierarchicalTableApplyResponse.getDefaultInstance()); - return transformedResult; - }); + + final HierarchicalTable transformedResult = authTransformation.transform(result); + safelyComplete(responseObserver, HierarchicalTableApplyResponse.getDefaultInstance()); + return transformedResult; + }); + } } private static void validate(@NotNull final HierarchicalTableApplyRequest request) { @@ -315,9 +342,6 @@ public void view( final SessionState session = sessionService.getCurrentSession(); - final SessionState.ExportBuilder resultExportBuilder = - session.newExport(request.getResultViewId(), "view.resultViewId"); - final boolean usedExisting; final Ticket targetTicket; switch (request.getTargetCase()) { @@ -331,65 +355,78 @@ public void view( break; case TARGET_NOT_SET: default: - throw new IllegalStateException(); - } - final SessionState.ExportObject targetExport = ticketRouter.resolve( - session, targetTicket, "view.target"); - - final SessionState.ExportObject
keyTableExport; - if (request.hasExpansions()) { - keyTableExport = ticketRouter.resolve( - session, request.getExpansions().getKeyTableId(), "view.expansions.keyTableId"); - resultExportBuilder.require(targetExport, keyTableExport); - } else { - keyTableExport = null; - resultExportBuilder.require(targetExport); + throw Exceptions.statusRuntimeException(Code.INVALID_ARGUMENT, "No target specified"); } - resultExportBuilder.onError(responseObserver) - .submit(() -> { - final Table keyTable = keyTableExport == null ? null : keyTableExport.get(); - final Object target = targetExport.get(); - final HierarchicalTableView targetExistingView = usedExisting - ? (HierarchicalTableView) target - : null; - final HierarchicalTable targetHierarchicalTable = usedExisting - ? targetExistingView.getHierarchicalTable() - : (HierarchicalTable) target; - - authWiring.checkPermissionView(session.getAuthContext(), request, keyTable == null - ? List.of(targetHierarchicalTable.getSource()) - : List.of(keyTable, targetHierarchicalTable.getSource())); - - final HierarchicalTableView result; - if (usedExisting) { - if (keyTable != null) { - result = HierarchicalTableView.makeFromExistingView( - targetExistingView, - keyTable, - request.getExpansions().hasKeyTableActionColumn() - ? ColumnName.of(request.getExpansions().getKeyTableActionColumn()) - : null); - } else { - result = HierarchicalTableView.makeFromExistingView(targetExistingView); - } - } else { - if (keyTable != null) { - result = HierarchicalTableView.makeFromHierarchicalTable( - targetHierarchicalTable, - keyTable, - request.getExpansions().hasKeyTableActionColumn() - ? ColumnName.of(request.getExpansions().getKeyTableActionColumn()) - : null); + final String description = "HierarchicalTableService#view(table=" + + ticketRouter.getLogNameFor(targetTicket, "targetTableId") + ")"; + final QueryPerformanceRecorder queryPerformanceRecorder = QueryPerformanceRecorder.newQuery( + description, session.getSessionId(), QueryPerformanceNugget.DEFAULT_FACTORY); + + try (final SafeCloseable ignored = queryPerformanceRecorder.startQuery()) { + final SessionState.ExportBuilder resultExportBuilder = + session.newExport(request.getResultViewId(), "resultViewId"); + + final SessionState.ExportObject targetExport = + ticketRouter.resolve(session, targetTicket, "targetTableId"); + + final SessionState.ExportObject
keyTableExport; + if (request.hasExpansions()) { + keyTableExport = ticketRouter.resolve( + session, request.getExpansions().getKeyTableId(), "expansions.keyTableId"); + resultExportBuilder.require(targetExport, keyTableExport); + } else { + keyTableExport = null; + resultExportBuilder.require(targetExport); + } + + resultExportBuilder + .queryPerformanceRecorder(queryPerformanceRecorder) + .onError(responseObserver) + .submit(() -> { + final Table keyTable = keyTableExport == null ? null : keyTableExport.get(); + final Object target = targetExport.get(); + final HierarchicalTableView targetExistingView = usedExisting + ? (HierarchicalTableView) target + : null; + final HierarchicalTable targetHierarchicalTable = usedExisting + ? targetExistingView.getHierarchicalTable() + : (HierarchicalTable) target; + + authWiring.checkPermissionView(session.getAuthContext(), request, keyTable == null + ? List.of(targetHierarchicalTable.getSource()) + : List.of(keyTable, targetHierarchicalTable.getSource())); + + final HierarchicalTableView result; + if (usedExisting) { + if (keyTable != null) { + result = HierarchicalTableView.makeFromExistingView( + targetExistingView, + keyTable, + request.getExpansions().hasKeyTableActionColumn() + ? ColumnName.of(request.getExpansions().getKeyTableActionColumn()) + : null); + } else { + result = HierarchicalTableView.makeFromExistingView(targetExistingView); + } } else { - result = HierarchicalTableView.makeFromHierarchicalTable(targetHierarchicalTable); + if (keyTable != null) { + result = HierarchicalTableView.makeFromHierarchicalTable( + targetHierarchicalTable, + keyTable, + request.getExpansions().hasKeyTableActionColumn() + ? ColumnName.of(request.getExpansions().getKeyTableActionColumn()) + : null); + } else { + result = HierarchicalTableView.makeFromHierarchicalTable(targetHierarchicalTable); + } } - } - final HierarchicalTableView transformedResult = authTransformation.transform(result); - safelyComplete(responseObserver, HierarchicalTableViewResponse.getDefaultInstance()); - return transformedResult; - }); + final HierarchicalTableView transformedResult = authTransformation.transform(result); + safelyComplete(responseObserver, HierarchicalTableViewResponse.getDefaultInstance()); + return transformedResult; + }); + } } private static void validate(@NotNull final HierarchicalTableViewRequest request) { @@ -421,24 +458,32 @@ public void exportSource( final SessionState session = sessionService.getCurrentSession(); - final SessionState.ExportObject> hierarchicalTableExport = ticketRouter.resolve( - session, request.getHierarchicalTableId(), "exportSource.hierarchicalTableId"); - - session.newExport(request.getResultTableId(), "exportSource.resultTableId") - .require(hierarchicalTableExport) - .onError(responseObserver) - .submit(() -> { - final HierarchicalTable hierarchicalTable = hierarchicalTableExport.get(); - - final Table result = hierarchicalTable.getSource(); - authWiring.checkPermissionExportSource(session.getAuthContext(), request, List.of(result)); - - final Table transformedResult = authTransformation.transform(result); - final ExportedTableCreationResponse response = - ExportUtil.buildTableCreationResponse(request.getResultTableId(), transformedResult); - safelyComplete(responseObserver, response); - return transformedResult; - }); + final String description = "HierarchicalTableService#exportSource(table=" + + ticketRouter.getLogNameFor(request.getHierarchicalTableId(), "hierarchicalTableId") + ")"; + final QueryPerformanceRecorder queryPerformanceRecorder = QueryPerformanceRecorder.newQuery( + description, session.getSessionId(), QueryPerformanceNugget.DEFAULT_FACTORY); + + try (final SafeCloseable ignored = queryPerformanceRecorder.startQuery()) { + final SessionState.ExportObject> hierarchicalTableExport = + ticketRouter.resolve(session, request.getHierarchicalTableId(), "hierarchicalTableId"); + + session.newExport(request.getResultTableId(), "resultTableId") + .queryPerformanceRecorder(queryPerformanceRecorder) + .require(hierarchicalTableExport) + .onError(responseObserver) + .submit(() -> { + final HierarchicalTable hierarchicalTable = hierarchicalTableExport.get(); + + final Table result = hierarchicalTable.getSource(); + authWiring.checkPermissionExportSource(session.getAuthContext(), request, List.of(result)); + + final Table transformedResult = authTransformation.transform(result); + final ExportedTableCreationResponse response = + ExportUtil.buildTableCreationResponse(request.getResultTableId(), transformedResult); + safelyComplete(responseObserver, response); + return transformedResult; + }); + } } private static void validate(@NotNull final HierarchicalTableSourceExportRequest request) { diff --git a/server/src/main/java/io/deephaven/server/object/ObjectServiceGrpcImpl.java b/server/src/main/java/io/deephaven/server/object/ObjectServiceGrpcImpl.java index 56e56388eb3..9b0fa5560d8 100644 --- a/server/src/main/java/io/deephaven/server/object/ObjectServiceGrpcImpl.java +++ b/server/src/main/java/io/deephaven/server/object/ObjectServiceGrpcImpl.java @@ -8,6 +8,8 @@ import io.deephaven.base.verify.Assert; import io.deephaven.engine.liveness.LivenessScope; import io.deephaven.engine.liveness.LivenessScopeStack; +import io.deephaven.engine.table.impl.perf.QueryPerformanceNugget; +import io.deephaven.engine.table.impl.perf.QueryPerformanceRecorder; import io.deephaven.extensions.barrage.util.GrpcUtil; import io.deephaven.plugin.type.ObjectCommunicationException; import io.deephaven.plugin.type.ObjectType; @@ -257,55 +259,65 @@ public void fetchObject( if (request.getSourceId().getTicket().getTicket().isEmpty()) { throw Exceptions.statusRuntimeException(Code.INVALID_ARGUMENT, "No ticket supplied"); } - final SessionState.ExportObject object = ticketRouter.resolve( - session, request.getSourceId().getTicket(), "sourceId"); - session.nonExport() - .require(object) - .onError(responseObserver) - .submit(() -> { - final Object o = object.get(); - ObjectType objectTypeInstance = getObjectTypeInstance(type, o); - - AtomicReference singleResponse = new AtomicReference<>(); - AtomicBoolean isClosed = new AtomicBoolean(false); - StreamObserver wrappedResponseObserver = new StreamObserver<>() { - @Override - public void onNext(StreamResponse value) { - singleResponse.set(FetchObjectResponse.newBuilder() - .setType(type) - .setData(value.getData().getPayload()) - .addAllTypedExportIds(value.getData().getExportedReferencesList()) - .build()); - } - @Override - public void onError(Throwable t) { - responseObserver.onError(t); - } + final String description = "ObjectService#fetchObject(object=" + + ticketRouter.getLogNameFor(request.getSourceId().getTicket(), "sourceId") + ")"; + final QueryPerformanceRecorder queryPerformanceRecorder = QueryPerformanceRecorder.newQuery( + description, session.getSessionId(), QueryPerformanceNugget.DEFAULT_FACTORY); + + try (final SafeCloseable ignored = queryPerformanceRecorder.startQuery()) { + final SessionState.ExportObject object = + ticketRouter.resolve(session, request.getSourceId().getTicket(), "sourceId"); + + session.nonExport() + .queryPerformanceRecorder(queryPerformanceRecorder) + .require(object) + .onError(responseObserver) + .submit(() -> { + final Object o = object.get(); + ObjectType objectTypeInstance = getObjectTypeInstance(type, o); + + AtomicReference singleResponse = new AtomicReference<>(); + AtomicBoolean isClosed = new AtomicBoolean(false); + StreamObserver wrappedResponseObserver = new StreamObserver<>() { + @Override + public void onNext(StreamResponse value) { + singleResponse.set(FetchObjectResponse.newBuilder() + .setType(type) + .setData(value.getData().getPayload()) + .addAllTypedExportIds(value.getData().getExportedReferencesList()) + .build()); + } + + @Override + public void onError(Throwable t) { + responseObserver.onError(t); + } - @Override - public void onCompleted() { - isClosed.set(true); + @Override + public void onCompleted() { + isClosed.set(true); + } + }; + PluginMessageSender connection = new PluginMessageSender(wrappedResponseObserver, session); + objectTypeInstance.clientConnection(o, connection); + + FetchObjectResponse message = singleResponse.get(); + if (message == null) { + connection.onClose(); + throw Exceptions.statusRuntimeException(Code.INVALID_ARGUMENT, + "Plugin didn't send a response before returning from clientConnection()"); } - }; - PluginMessageSender connection = new PluginMessageSender(wrappedResponseObserver, session); - objectTypeInstance.clientConnection(o, connection); - - FetchObjectResponse message = singleResponse.get(); - if (message == null) { - connection.onClose(); - throw Exceptions.statusRuntimeException(Code.INVALID_ARGUMENT, - "Plugin didn't send a response before returning from clientConnection()"); - } - if (!isClosed.get()) { - connection.onClose(); - throw Exceptions.statusRuntimeException(Code.INVALID_ARGUMENT, - "Plugin didn't close response, use MessageStream instead for this object"); - } - GrpcUtil.safelyComplete(responseObserver, message); + if (!isClosed.get()) { + connection.onClose(); + throw Exceptions.statusRuntimeException(Code.INVALID_ARGUMENT, + "Plugin didn't close response, use MessageStream instead for this object"); + } + GrpcUtil.safelyComplete(responseObserver, message); - return null; - }); + return null; + }); + } } @Override diff --git a/server/src/main/java/io/deephaven/server/partitionedtable/PartitionedTableServiceGrpcImpl.java b/server/src/main/java/io/deephaven/server/partitionedtable/PartitionedTableServiceGrpcImpl.java index 91effc96849..87590cb6de6 100644 --- a/server/src/main/java/io/deephaven/server/partitionedtable/PartitionedTableServiceGrpcImpl.java +++ b/server/src/main/java/io/deephaven/server/partitionedtable/PartitionedTableServiceGrpcImpl.java @@ -7,6 +7,8 @@ import io.deephaven.auth.codegen.impl.PartitionedTableServiceContextualAuthWiring; import io.deephaven.engine.table.PartitionedTable; import io.deephaven.engine.table.Table; +import io.deephaven.engine.table.impl.perf.QueryPerformanceNugget; +import io.deephaven.engine.table.impl.perf.QueryPerformanceRecorder; import io.deephaven.internal.log.LoggerFactory; import io.deephaven.io.logger.Logger; import io.deephaven.proto.backplane.grpc.ExportedTableCreationResponse; @@ -18,6 +20,7 @@ import io.deephaven.proto.util.Exceptions; import io.deephaven.server.auth.AuthorizationProvider; import io.deephaven.server.session.*; +import io.deephaven.util.SafeCloseable; import io.grpc.stub.StreamObserver; import org.jetbrains.annotations.NotNull; @@ -55,20 +58,28 @@ public void partitionBy( @NotNull final StreamObserver responseObserver) { final SessionState session = sessionService.getCurrentSession(); - SessionState.ExportObject
targetTable = - ticketRouter.resolve(session, request.getTableId(), "tableId"); - - session.newExport(request.getResultId(), "resultId") - .require(targetTable) - .onError(responseObserver) - .submit(() -> { - authWiring.checkPermissionPartitionBy(session.getAuthContext(), request, - Collections.singletonList(targetTable.get())); - PartitionedTable partitionedTable = targetTable.get().partitionBy(request.getDropKeys(), - request.getKeyColumnNamesList().toArray(String[]::new)); - safelyComplete(responseObserver, PartitionByResponse.getDefaultInstance()); - return partitionedTable; - }); + final String description = "PartitionedTableService#partitionBy(table=" + + ticketRouter.getLogNameFor(request.getTableId(), "tableId") + ")"; + final QueryPerformanceRecorder queryPerformanceRecorder = QueryPerformanceRecorder.newQuery( + description, session.getSessionId(), QueryPerformanceNugget.DEFAULT_FACTORY); + + try (final SafeCloseable ignored = queryPerformanceRecorder.startQuery()) { + final SessionState.ExportObject
targetTable = + ticketRouter.resolve(session, request.getTableId(), "tableId"); + + session.newExport(request.getResultId(), "resultId") + .queryPerformanceRecorder(queryPerformanceRecorder) + .require(targetTable) + .onError(responseObserver) + .submit(() -> { + authWiring.checkPermissionPartitionBy(session.getAuthContext(), request, + Collections.singletonList(targetTable.get())); + PartitionedTable partitionedTable = targetTable.get().partitionBy(request.getDropKeys(), + request.getKeyColumnNamesList().toArray(String[]::new)); + safelyComplete(responseObserver, PartitionByResponse.getDefaultInstance()); + return partitionedTable; + }); + } } @Override @@ -77,28 +88,36 @@ public void merge( @NotNull final StreamObserver responseObserver) { final SessionState session = sessionService.getCurrentSession(); - SessionState.ExportObject partitionedTable = - ticketRouter.resolve(session, request.getPartitionedTable(), "partitionedTable"); - - session.newExport(request.getResultId(), "resultId") - .require(partitionedTable) - .onError(responseObserver) - .submit(() -> { - final Table table = partitionedTable.get().table(); - authWiring.checkPermissionMerge(session.getAuthContext(), request, - Collections.singletonList(table)); - Table merged; - if (table.isRefreshing()) { - merged = table.getUpdateGraph().sharedLock().computeLocked(partitionedTable.get()::merge); - } else { - merged = partitionedTable.get().merge(); - } - merged = authorizationTransformation.transform(merged); - final ExportedTableCreationResponse response = - buildTableCreationResponse(request.getResultId(), merged); - safelyComplete(responseObserver, response); - return merged; - }); + final String description = "PartitionedTableService#merge(table=" + + ticketRouter.getLogNameFor(request.getPartitionedTable(), "partitionedTable") + ")"; + final QueryPerformanceRecorder queryPerformanceRecorder = QueryPerformanceRecorder.newQuery( + description, session.getSessionId(), QueryPerformanceNugget.DEFAULT_FACTORY); + + try (final SafeCloseable ignored = queryPerformanceRecorder.startQuery()) { + final SessionState.ExportObject partitionedTable = + ticketRouter.resolve(session, request.getPartitionedTable(), "partitionedTable"); + + session.newExport(request.getResultId(), "resultId") + .queryPerformanceRecorder(queryPerformanceRecorder) + .require(partitionedTable) + .onError(responseObserver) + .submit(() -> { + final Table table = partitionedTable.get().table(); + authWiring.checkPermissionMerge(session.getAuthContext(), request, + Collections.singletonList(table)); + Table merged; + if (table.isRefreshing()) { + merged = table.getUpdateGraph().sharedLock().computeLocked(partitionedTable.get()::merge); + } else { + merged = partitionedTable.get().merge(); + } + merged = authorizationTransformation.transform(merged); + final ExportedTableCreationResponse response = + buildTableCreationResponse(request.getResultId(), merged); + safelyComplete(responseObserver, response); + return merged; + }); + } } @Override @@ -107,61 +126,70 @@ public void getTable( @NotNull final StreamObserver responseObserver) { final SessionState session = sessionService.getCurrentSession(); - SessionState.ExportObject partitionedTable = - ticketRouter.resolve(session, request.getPartitionedTable(), "partitionedTable"); - SessionState.ExportObject
keys = - ticketRouter.resolve(session, request.getKeyTableTicket(), "keyTableTicket"); - - session.newExport(request.getResultId(), "resultId") - .require(partitionedTable, keys) - .onError(responseObserver) - .submit(() -> { - Table table; - Table keyTable = keys.get(); - authWiring.checkPermissionGetTable(session.getAuthContext(), request, - List.of(partitionedTable.get().table(), keyTable)); - if (!keyTable.isRefreshing()) { - long keyTableSize = keyTable.size(); - if (keyTableSize != 1) { - throw Exceptions.statusRuntimeException(Code.INVALID_ARGUMENT, - "Provided key table does not have one row, instead has " + keyTableSize); - } - long row = keyTable.getRowSet().firstRowKey(); - Object[] values = - partitionedTable.get().keyColumnNames().stream() - .map(keyTable::getColumnSource) - .map(cs -> cs.get(row)) - .toArray(); - table = partitionedTable.get().constituentFor(values); - } else { - table = keyTable.getUpdateGraph().sharedLock().computeLocked(() -> { + final String description = "PartitionedTableService#getTable(table=" + + ticketRouter.getLogNameFor(request.getPartitionedTable(), "partitionedTable") + ", keyTable=" + + ticketRouter.getLogNameFor(request.getKeyTableTicket(), "keyTable") + ")"; + final QueryPerformanceRecorder queryPerformanceRecorder = QueryPerformanceRecorder.newQuery( + description, session.getSessionId(), QueryPerformanceNugget.DEFAULT_FACTORY); + + try (final SafeCloseable ignored = queryPerformanceRecorder.startQuery()) { + final SessionState.ExportObject partitionedTable = + ticketRouter.resolve(session, request.getPartitionedTable(), "partitionedTable"); + final SessionState.ExportObject
keys = + ticketRouter.resolve(session, request.getKeyTableTicket(), "keyTable"); + + session.newExport(request.getResultId(), "resultId") + .queryPerformanceRecorder(queryPerformanceRecorder) + .require(partitionedTable, keys) + .onError(responseObserver) + .submit(() -> { + Table table; + Table keyTable = keys.get(); + authWiring.checkPermissionGetTable(session.getAuthContext(), request, + List.of(partitionedTable.get().table(), keyTable)); + if (!keyTable.isRefreshing()) { long keyTableSize = keyTable.size(); if (keyTableSize != 1) { throw Exceptions.statusRuntimeException(Code.INVALID_ARGUMENT, "Provided key table does not have one row, instead has " + keyTableSize); } - Table requestedRow = partitionedTable.get().table().whereIn(keyTable, - partitionedTable.get().keyColumnNames().toArray(String[]::new)); - if (requestedRow.size() != 1) { - if (requestedRow.isEmpty()) { - throw Exceptions.statusRuntimeException(Code.NOT_FOUND, - "Key matches zero rows in the partitioned table"); - } else { - throw Exceptions.statusRuntimeException(Code.FAILED_PRECONDITION, - "Key matches more than one entry in the partitioned table: " - + requestedRow.size()); + long row = keyTable.getRowSet().firstRowKey(); + Object[] values = + partitionedTable.get().keyColumnNames().stream() + .map(keyTable::getColumnSource) + .map(cs -> cs.get(row)) + .toArray(); + table = partitionedTable.get().constituentFor(values); + } else { + table = keyTable.getUpdateGraph().sharedLock().computeLocked(() -> { + long keyTableSize = keyTable.size(); + if (keyTableSize != 1) { + throw Exceptions.statusRuntimeException(Code.INVALID_ARGUMENT, + "Provided key table does not have one row, instead has " + keyTableSize); } - } - return (Table) requestedRow - .getColumnSource(partitionedTable.get().constituentColumnName()) - .get(requestedRow.getRowSet().firstRowKey()); - }); - } - table = authorizationTransformation.transform(table); - final ExportedTableCreationResponse response = - buildTableCreationResponse(request.getResultId(), table); - safelyComplete(responseObserver, response); - return table; - }); + Table requestedRow = partitionedTable.get().table().whereIn(keyTable, + partitionedTable.get().keyColumnNames().toArray(String[]::new)); + if (requestedRow.size() != 1) { + if (requestedRow.isEmpty()) { + throw Exceptions.statusRuntimeException(Code.NOT_FOUND, + "Key matches zero rows in the partitioned table"); + } else { + throw Exceptions.statusRuntimeException(Code.FAILED_PRECONDITION, + "Key matches more than one entry in the partitioned table: " + + requestedRow.size()); + } + } + return (Table) requestedRow + .getColumnSource(partitionedTable.get().constituentColumnName()) + .get(requestedRow.getRowSet().firstRowKey()); + }); + } + table = authorizationTransformation.transform(table); + final ExportedTableCreationResponse response = + buildTableCreationResponse(request.getResultId(), table); + safelyComplete(responseObserver, response); + return table; + }); + } } } diff --git a/server/src/main/java/io/deephaven/server/runner/DeephavenApiServer.java b/server/src/main/java/io/deephaven/server/runner/DeephavenApiServer.java index fa3de22157b..a13a29da41e 100644 --- a/server/src/main/java/io/deephaven/server/runner/DeephavenApiServer.java +++ b/server/src/main/java/io/deephaven/server/runner/DeephavenApiServer.java @@ -8,7 +8,7 @@ import io.deephaven.engine.context.ExecutionContext; import io.deephaven.engine.liveness.LivenessScopeStack; import io.deephaven.engine.table.impl.OperationInitializationThreadPool; -import io.deephaven.engine.table.impl.perf.QueryPerformanceRecorder; +import io.deephaven.engine.table.impl.perf.QueryPerformanceRecorderState; import io.deephaven.engine.table.impl.util.AsyncErrorLogger; import io.deephaven.engine.table.impl.util.EngineMetrics; import io.deephaven.engine.table.impl.util.ServerStateTracker; @@ -153,8 +153,8 @@ public DeephavenApiServer run() throws IOException, ClassNotFoundException, Time EngineMetrics.maybeStartStatsCollection(); log.info().append("Starting Performance Trackers...").endl(); - QueryPerformanceRecorder.installPoolAllocationRecorder(); - QueryPerformanceRecorder.installUpdateGraphLockInstrumentation(); + QueryPerformanceRecorderState.installPoolAllocationRecorder(); + QueryPerformanceRecorderState.installUpdateGraphLockInstrumentation(); ServerStateTracker.start(); AsyncErrorLogger.init(); diff --git a/server/src/main/java/io/deephaven/server/session/SessionServiceGrpcImpl.java b/server/src/main/java/io/deephaven/server/session/SessionServiceGrpcImpl.java index cd4ce627a59..b355ba776d8 100644 --- a/server/src/main/java/io/deephaven/server/session/SessionServiceGrpcImpl.java +++ b/server/src/main/java/io/deephaven/server/session/SessionServiceGrpcImpl.java @@ -9,6 +9,8 @@ import io.deephaven.auth.AuthenticationException; import io.deephaven.csv.util.MutableObject; import io.deephaven.engine.liveness.LivenessScopeStack; +import io.deephaven.engine.table.impl.perf.QueryPerformanceNugget; +import io.deephaven.engine.table.impl.perf.QueryPerformanceRecorder; import io.deephaven.extensions.barrage.util.GrpcUtil; import io.deephaven.internal.log.LoggerFactory; import io.deephaven.io.logger.Logger; @@ -165,16 +167,25 @@ public void exportFromTicket( return; } - final SessionState.ExportObject source = ticketRouter.resolve( - session, request.getSourceId(), "sourceId"); - session.newExport(request.getResultId(), "resultId") - .require(source) - .onError(responseObserver) - .submit(() -> { - final Object o = source.get(); - GrpcUtil.safelyComplete(responseObserver, ExportResponse.getDefaultInstance()); - return o; - }); + final String description = "SessionService#exportFromTicket(object=" + + ticketRouter.getLogNameFor(request.getSourceId(), "sourceId") + ")"; + final QueryPerformanceRecorder queryPerformanceRecorder = QueryPerformanceRecorder.newQuery( + description, session.getSessionId(), QueryPerformanceNugget.DEFAULT_FACTORY); + + try (final SafeCloseable ignored = queryPerformanceRecorder.startQuery()) { + final SessionState.ExportObject source = + ticketRouter.resolve(session, request.getSourceId(), "sourceId"); + + session.newExport(request.getResultId(), "resultId") + .queryPerformanceRecorder(queryPerformanceRecorder) + .require(source) + .onError(responseObserver) + .submit(() -> { + final Object o = source.get(); + GrpcUtil.safelyComplete(responseObserver, ExportResponse.getDefaultInstance()); + return o; + }); + } } @Override @@ -194,18 +205,26 @@ public void publishFromTicket( return; } - final SessionState.ExportObject source = ticketRouter.resolve( - session, request.getSourceId(), "sourceId"); - Ticket resultId = request.getResultId(); - - final SessionState.ExportBuilder publisher = ticketRouter.publish( - session, resultId, "resultId", () -> { - // when publish is complete, complete the gRPC request - GrpcUtil.safelyComplete(responseObserver, PublishResponse.getDefaultInstance()); - }); - publisher.require(source) - .onError(responseObserver) - .submit(source::get); + final String description = "SessionService#publishFromTicket(object=" + + ticketRouter.getLogNameFor(request.getSourceId(), "sourceId") + ")"; + final QueryPerformanceRecorder queryPerformanceRecorder = QueryPerformanceRecorder.newQuery( + description, session.getSessionId(), QueryPerformanceNugget.DEFAULT_FACTORY); + + try (final SafeCloseable ignored = queryPerformanceRecorder.startQuery()) { + final SessionState.ExportObject source = + ticketRouter.resolve(session, request.getSourceId(), "sourceId"); + + Ticket resultId = request.getResultId(); + + ticketRouter.publish(session, resultId, "resultId", () -> { + // when publish is complete, complete the gRPC request + GrpcUtil.safelyComplete(responseObserver, PublishResponse.getDefaultInstance()); + }) + .queryPerformanceRecorder(queryPerformanceRecorder) + .require(source) + .onError(responseObserver) + .submit(source::get); + } } @Override diff --git a/server/src/main/java/io/deephaven/server/session/SessionState.java b/server/src/main/java/io/deephaven/server/session/SessionState.java index 56b98bbc751..862cb131855 100644 --- a/server/src/main/java/io/deephaven/server/session/SessionState.java +++ b/server/src/main/java/io/deephaven/server/session/SessionState.java @@ -10,16 +10,13 @@ import dagger.assisted.AssistedInject; import io.deephaven.base.reference.WeakSimpleReference; import io.deephaven.base.verify.Assert; -import io.deephaven.base.verify.Require; import io.deephaven.engine.liveness.LivenessArtifact; import io.deephaven.engine.liveness.LivenessReferent; import io.deephaven.engine.liveness.LivenessScopeStack; import io.deephaven.engine.table.impl.perf.QueryPerformanceNugget; import io.deephaven.engine.table.impl.perf.QueryPerformanceRecorder; -import io.deephaven.engine.table.impl.perf.QueryProcessingResults; +import io.deephaven.engine.table.impl.perf.QueryState; import io.deephaven.engine.table.impl.util.EngineMetrics; -import io.deephaven.engine.tablelogger.QueryOperationPerformanceLogLogger; -import io.deephaven.engine.tablelogger.QueryPerformanceLogLogger; import io.deephaven.engine.updategraph.DynamicNode; import io.deephaven.hash.KeyedIntObjectHash; import io.deephaven.hash.KeyedIntObjectHashMap; @@ -219,6 +216,13 @@ protected void updateExpiration(@NotNull final SessionService.TokenExpiration ex .append(MILLIS_FROM_EPOCH_FORMATTER, expiration.deadlineMillis).append(".").endl(); } + /** + * @return the session id + */ + public String getSessionId() { + return sessionId; + } + /** * @return the current expiration token for this session */ @@ -531,6 +535,9 @@ public final static class ExportObject extends LivenessArtifact { private final SessionService.ErrorTransformer errorTransformer; private final SessionState session; + /** used to keep track of performance details either for aggregation or for the async ticket resolution */ + private QueryPerformanceRecorder queryPerformanceRecorder; + /** final result of export */ private volatile T result; private volatile ExportNotification.State state = ExportNotification.State.UNKNOWN; @@ -620,6 +627,15 @@ private boolean isNonExport() { return exportId == NON_EXPORT_ID; } + private synchronized void setQueryPerformanceRecorder( + final QueryPerformanceRecorder queryPerformanceRecorder) { + if (this.queryPerformanceRecorder != null) { + throw new IllegalStateException( + "performance query recorder can only be set once on an exportable object"); + } + this.queryPerformanceRecorder = queryPerformanceRecorder; + } + /** * Sets the dependencies and tracks liveness dependencies. * @@ -662,6 +678,11 @@ private synchronized void setWork( throw new IllegalStateException("export object can only be defined once"); } hasHadWorkSet = true; + + if (queryPerformanceRecorder != null && queryPerformanceRecorder.getState() == QueryState.RUNNING) { + // transfer ownership of the qpr to the export before it can be resumed by the scheduler + queryPerformanceRecorder.suspendQuery(); + } this.requiresSerialQueue = requiresSerialQueue; if (isExportStateTerminal(state)) { @@ -963,68 +984,56 @@ private void doExport() { T localResult = null; boolean shouldLog = false; - int evaluationNumber = -1; - QueryProcessingResults queryProcessingResults = null; + final QueryPerformanceRecorder exportRecorder; try (final SafeCloseable ignored1 = session.executionContext.open(); final SafeCloseable ignored2 = LivenessScopeStack.open()) { - try { - queryProcessingResults = new QueryProcessingResults( - QueryPerformanceRecorder.getInstance()); - evaluationNumber = QueryPerformanceRecorder.getInstance() - .startQuery("session=" + session.sessionId + ",exportId=" + logIdentity); + final String queryId; + if (isNonExport()) { + queryId = "nonExport=" + logIdentity; + } else { + queryId = "exportId=" + logIdentity; + } + + final boolean isResume = queryPerformanceRecorder != null + && queryPerformanceRecorder.getState() == QueryState.SUSPENDED; + exportRecorder = Objects.requireNonNullElseGet(queryPerformanceRecorder, + () -> QueryPerformanceRecorder.newQuery("ExportObject#doWork(" + queryId + ")", + session.getSessionId(), QueryPerformanceNugget.DEFAULT_FACTORY)); + try (final SafeCloseable ignored3 = isResume + ? exportRecorder.resumeQuery() + : exportRecorder.startQuery()) { try { localResult = capturedExport.call(); - } finally { - shouldLog = QueryPerformanceRecorder.getInstance().endQuery(); + } catch (final Exception err) { + caughtException = err; } + shouldLog = exportRecorder.endQuery(); } catch (final Exception err) { - caughtException = err; + // end query will throw if the export runner left the QPR in a bad state + if (caughtException == null) { + caughtException = err; + } + } + + if (caughtException != null) { synchronized (this) { if (!isExportStateTerminal(state)) { maybeAssignErrorId(); if (!(caughtException instanceof StatusRuntimeException)) { - log.error().append("Internal Error '").append(errorId).append("' ").append(err).endl(); + log.error().append("Internal Error '").append(errorId).append("' ") + .append(caughtException).endl(); } setState(ExportNotification.State.FAILED); } } - } finally { - if (caughtException != null && queryProcessingResults != null) { - queryProcessingResults.setException(caughtException.toString()); - } - QueryPerformanceRecorder.resetInstance(); } - if ((shouldLog || caughtException != null) && queryProcessingResults != null) { - final EngineMetrics memLoggers = EngineMetrics.getInstance(); - final QueryPerformanceLogLogger qplLogger = memLoggers.getQplLogger(); - final QueryOperationPerformanceLogLogger qoplLogger = memLoggers.getQoplLogger(); - try { - final QueryPerformanceNugget nugget = Require.neqNull( - queryProcessingResults.getRecorder().getQueryLevelPerformanceData(), - "queryProcessingResults.getRecorder().getQueryLevelPerformanceData()"); - - // noinspection SynchronizationOnLocalVariableOrMethodParameter - synchronized (qplLogger) { - qplLogger.log(evaluationNumber, - queryProcessingResults, - nugget); - } - final List nuggets = - queryProcessingResults.getRecorder().getOperationLevelPerformanceData(); - // noinspection SynchronizationOnLocalVariableOrMethodParameter - synchronized (qoplLogger) { - int opNo = 0; - for (QueryPerformanceNugget n : nuggets) { - qoplLogger.log(opNo++, n); - } - } - } catch (final Exception e) { - log.error().append("Failed to log query performance data: ").append(e).endl(); - } + if (shouldLog || caughtException != null) { + EngineMetrics.getInstance().logQueryProcessingResults(exportRecorder, caughtException); } if (caughtException == null) { + // must set result after ending the query so that onSuccess may resume / finalize a parent query setResult(localResult); } } @@ -1310,6 +1319,18 @@ public class ExportBuilder { } } + /** + * Set the performance recorder to resume when running this export. + * + * @param queryPerformanceRecorder the performance recorder + * @return this builder + */ + public ExportBuilder queryPerformanceRecorder( + @NotNull final QueryPerformanceRecorder queryPerformanceRecorder) { + export.setQueryPerformanceRecorder(queryPerformanceRecorder); + return this; + } + /** * Some exports must happen serially w.r.t. other exports. For example, an export that acquires the exclusive * UGP lock. We enqueue these dependencies independently of the otherwise regularly concurrent exports. diff --git a/server/src/main/java/io/deephaven/server/session/TicketRouter.java b/server/src/main/java/io/deephaven/server/session/TicketRouter.java index fc8375e0411..792cad9a0c7 100644 --- a/server/src/main/java/io/deephaven/server/session/TicketRouter.java +++ b/server/src/main/java/io/deephaven/server/session/TicketRouter.java @@ -5,6 +5,7 @@ import com.google.rpc.Code; import io.deephaven.engine.table.Table; +import io.deephaven.engine.table.impl.perf.QueryPerformanceRecorder; import io.deephaven.extensions.barrage.util.BarrageUtil; import io.deephaven.hash.KeyedIntObjectHashMap; import io.deephaven.hash.KeyedIntObjectKey; @@ -13,6 +14,7 @@ import io.deephaven.proto.backplane.grpc.Ticket; import io.deephaven.proto.util.Exceptions; import io.deephaven.server.auth.AuthorizationProvider; +import io.deephaven.util.SafeCloseable; import org.apache.arrow.flight.impl.Flight; import org.jetbrains.annotations.Nullable; @@ -65,7 +67,11 @@ public SessionState.ExportObject resolve( throw Exceptions.statusRuntimeException(Code.FAILED_PRECONDITION, "could not resolve '" + logId + "' it's an empty ticket"); } - return getResolver(ticket.get(ticket.position()), logId).resolve(session, ticket, logId); + final String ticketName = getLogNameFor(ticket, logId); + try (final SafeCloseable ignored = QueryPerformanceRecorder.getInstance().getNugget( + "resolveTicket:" + ticketName)) { + return getResolver(ticket.get(ticket.position()), logId).resolve(session, ticket, logId); + } } /** @@ -113,7 +119,10 @@ public SessionState.ExportObject resolve( @Nullable final SessionState session, final Flight.FlightDescriptor descriptor, final String logId) { - return getResolver(descriptor, logId).resolve(session, descriptor, logId); + try (final SafeCloseable ignored = QueryPerformanceRecorder.getInstance().getNugget( + "resolveDescriptor:" + descriptor)) { + return getResolver(descriptor, logId).resolve(session, descriptor, logId); + } } /** @@ -134,9 +143,13 @@ public SessionState.ExportBuilder publish( final ByteBuffer ticket, final String logId, @Nullable final Runnable onPublish) { - final TicketResolver resolver = getResolver(ticket.get(ticket.position()), logId); - authorization.authorizePublishRequest(resolver, ticket); - return resolver.publish(session, ticket, logId, onPublish); + final String ticketName = getLogNameFor(ticket, logId); + try (final SafeCloseable ignored = QueryPerformanceRecorder.getInstance().getNugget( + "publishTicket:" + ticketName)) { + final TicketResolver resolver = getResolver(ticket.get(ticket.position()), logId); + authorization.authorizePublishRequest(resolver, ticket); + return resolver.publish(session, ticket, logId, onPublish); + } } /** @@ -201,9 +214,12 @@ public SessionState.ExportBuilder publish( final Flight.FlightDescriptor descriptor, final String logId, @Nullable final Runnable onPublish) { - final TicketResolver resolver = getResolver(descriptor, logId); - authorization.authorizePublishRequest(resolver, descriptor); - return resolver.publish(session, descriptor, logId, onPublish); + try (final SafeCloseable ignored = QueryPerformanceRecorder.getInstance().getNugget( + "publishDescriptor:" + descriptor)) { + final TicketResolver resolver = getResolver(descriptor, logId); + authorization.authorizePublishRequest(resolver, descriptor); + return resolver.publish(session, descriptor, logId, onPublish); + } } /** @@ -220,7 +236,10 @@ public SessionState.ExportObject flightInfoFor( @Nullable final SessionState session, final Flight.FlightDescriptor descriptor, final String logId) { - return getResolver(descriptor, logId).flightInfoFor(session, descriptor, logId); + try (final SafeCloseable ignored = QueryPerformanceRecorder.getInstance().getNugget( + "flightInfoForDescriptor:" + descriptor)) { + return getResolver(descriptor, logId).flightInfoFor(session, descriptor, logId); + } } /** diff --git a/server/src/main/java/io/deephaven/server/table/inputtables/InputTableServiceGrpcImpl.java b/server/src/main/java/io/deephaven/server/table/inputtables/InputTableServiceGrpcImpl.java index 4a52597c444..673e39e35b1 100644 --- a/server/src/main/java/io/deephaven/server/table/inputtables/InputTableServiceGrpcImpl.java +++ b/server/src/main/java/io/deephaven/server/table/inputtables/InputTableServiceGrpcImpl.java @@ -8,6 +8,8 @@ import io.deephaven.engine.context.ExecutionContext; import io.deephaven.engine.table.Table; import io.deephaven.engine.table.TableDefinition; +import io.deephaven.engine.table.impl.perf.QueryPerformanceNugget; +import io.deephaven.engine.table.impl.perf.QueryPerformanceRecorder; import io.deephaven.engine.util.config.MutableInputTable; import io.deephaven.extensions.barrage.util.GrpcUtil; import io.deephaven.internal.log.LoggerFactory; @@ -21,6 +23,7 @@ import io.deephaven.server.session.SessionService; import io.deephaven.server.session.SessionState; import io.deephaven.server.session.TicketRouter; +import io.deephaven.util.SafeCloseable; import io.grpc.stub.StreamObserver; import org.jetbrains.annotations.NotNull; @@ -52,46 +55,56 @@ public void addTableToInputTable( @NotNull final StreamObserver responseObserver) { final SessionState session = sessionService.getCurrentSession(); - SessionState.ExportObject
targetTable = - ticketRouter.resolve(session, request.getInputTable(), "inputTable"); - SessionState.ExportObject
tableToAddExport = - ticketRouter.resolve(session, request.getTableToAdd(), "tableToAdd"); - - session.nonExport() - .requiresSerialQueue() - .onError(responseObserver) - .require(targetTable, tableToAddExport) - .submit(() -> { - Object inputTable = targetTable.get().getAttribute(Table.INPUT_TABLE_ATTRIBUTE); - if (!(inputTable instanceof MutableInputTable)) { - throw Exceptions.statusRuntimeException(Code.INVALID_ARGUMENT, - "Table can't be used as an input table"); - } - - MutableInputTable mutableInputTable = (MutableInputTable) inputTable; - Table tableToAdd = tableToAddExport.get(); - - authWiring.checkPermissionAddTableToInputTable( - ExecutionContext.getContext().getAuthContext(), request, - List.of(targetTable.get(), tableToAdd)); - - // validate that the columns are compatible - try { - mutableInputTable.validateAddOrModify(tableToAdd); - } catch (TableDefinition.IncompatibleTableDefinitionException exception) { - throw Exceptions.statusRuntimeException(Code.INVALID_ARGUMENT, - "Provided tables's columns are not compatible: " + exception.getMessage()); - } - - // actually add the tables contents - try { - mutableInputTable.add(tableToAdd); - GrpcUtil.safelyComplete(responseObserver, AddTableResponse.getDefaultInstance()); - } catch (IOException ioException) { - throw Exceptions.statusRuntimeException(Code.DATA_LOSS, - "Error adding table to input table"); - } - }); + final String description = "InputTableService#addTableToInputTable(inputTable=" + + ticketRouter.getLogNameFor(request.getInputTable(), "inputTable") + ", tableToAdd=" + + ticketRouter.getLogNameFor(request.getTableToAdd(), "tableToAdd") + ")"; + final QueryPerformanceRecorder queryPerformanceRecorder = QueryPerformanceRecorder.newQuery( + description, session.getSessionId(), QueryPerformanceNugget.DEFAULT_FACTORY); + + try (final SafeCloseable ignored = queryPerformanceRecorder.startQuery()) { + final SessionState.ExportObject
targetTable = + ticketRouter.resolve(session, request.getInputTable(), "inputTable"); + + final SessionState.ExportObject
tableToAddExport = + ticketRouter.resolve(session, request.getTableToAdd(), "tableToAdd"); + + session.nonExport() + .queryPerformanceRecorder(queryPerformanceRecorder) + .requiresSerialQueue() + .onError(responseObserver) + .require(targetTable, tableToAddExport) + .submit(() -> { + Object inputTable = targetTable.get().getAttribute(Table.INPUT_TABLE_ATTRIBUTE); + if (!(inputTable instanceof MutableInputTable)) { + throw Exceptions.statusRuntimeException(Code.INVALID_ARGUMENT, + "Table can't be used as an input table"); + } + + MutableInputTable mutableInputTable = (MutableInputTable) inputTable; + Table tableToAdd = tableToAddExport.get(); + + authWiring.checkPermissionAddTableToInputTable( + ExecutionContext.getContext().getAuthContext(), request, + List.of(targetTable.get(), tableToAdd)); + + // validate that the columns are compatible + try { + mutableInputTable.validateAddOrModify(tableToAdd); + } catch (TableDefinition.IncompatibleTableDefinitionException exception) { + throw Exceptions.statusRuntimeException(Code.INVALID_ARGUMENT, + "Provided tables's columns are not compatible: " + exception.getMessage()); + } + + // actually add the tables contents + try { + mutableInputTable.add(tableToAdd); + GrpcUtil.safelyComplete(responseObserver, AddTableResponse.getDefaultInstance()); + } catch (IOException ioException) { + throw Exceptions.statusRuntimeException(Code.DATA_LOSS, + "Error adding table to input table"); + } + }); + } } @Override @@ -100,48 +113,58 @@ public void deleteTableFromInputTable( @NotNull final StreamObserver responseObserver) { final SessionState session = sessionService.getCurrentSession(); - SessionState.ExportObject
targetTable = - ticketRouter.resolve(session, request.getInputTable(), "inputTable"); - SessionState.ExportObject
tableToDeleteExport = - ticketRouter.resolve(session, request.getTableToRemove(), "tableToDelete"); - - session.nonExport() - .requiresSerialQueue() - .onError(responseObserver) - .require(targetTable, tableToDeleteExport) - .submit(() -> { - Object inputTable = targetTable.get().getAttribute(Table.INPUT_TABLE_ATTRIBUTE); - if (!(inputTable instanceof MutableInputTable)) { - throw Exceptions.statusRuntimeException(Code.INVALID_ARGUMENT, - "Table can't be used as an input table"); - } - - MutableInputTable mutableInputTable = (MutableInputTable) inputTable; - Table tableToDelete = tableToDeleteExport.get(); - - authWiring.checkPermissionDeleteTableFromInputTable( - ExecutionContext.getContext().getAuthContext(), request, - List.of(targetTable.get(), tableToDelete)); - - // validate that the columns are compatible - try { - mutableInputTable.validateDelete(tableToDelete); - } catch (TableDefinition.IncompatibleTableDefinitionException exception) { - throw Exceptions.statusRuntimeException(Code.INVALID_ARGUMENT, - "Provided tables's columns are not compatible: " + exception.getMessage()); - } catch (UnsupportedOperationException exception) { - throw Exceptions.statusRuntimeException(Code.INVALID_ARGUMENT, - "Provided input table does not support delete."); - } - - // actually delete the table's contents - try { - mutableInputTable.delete(tableToDelete); - GrpcUtil.safelyComplete(responseObserver, DeleteTableResponse.getDefaultInstance()); - } catch (IOException ioException) { - throw Exceptions.statusRuntimeException(Code.DATA_LOSS, - "Error deleting table from inputtable"); - } - }); + final String description = "InputTableService#deleteTableFromInputTable(inputTable=" + + ticketRouter.getLogNameFor(request.getInputTable(), "inputTable") + ", tableToRemove=" + + ticketRouter.getLogNameFor(request.getTableToRemove(), "tableToRemove") + ")"; + final QueryPerformanceRecorder queryPerformanceRecorder = QueryPerformanceRecorder.newQuery( + description, session.getSessionId(), QueryPerformanceNugget.DEFAULT_FACTORY); + + try (final SafeCloseable ignored = queryPerformanceRecorder.startQuery()) { + final SessionState.ExportObject
targetTable = + ticketRouter.resolve(session, request.getInputTable(), "inputTable"); + + final SessionState.ExportObject
tableToRemoveExport = + ticketRouter.resolve(session, request.getTableToRemove(), "tableToRemove"); + + session.nonExport() + .queryPerformanceRecorder(queryPerformanceRecorder) + .requiresSerialQueue() + .onError(responseObserver) + .require(targetTable, tableToRemoveExport) + .submit(() -> { + Object inputTable = targetTable.get().getAttribute(Table.INPUT_TABLE_ATTRIBUTE); + if (!(inputTable instanceof MutableInputTable)) { + throw Exceptions.statusRuntimeException(Code.INVALID_ARGUMENT, + "Table can't be used as an input table"); + } + + MutableInputTable mutableInputTable = (MutableInputTable) inputTable; + Table tableToRemove = tableToRemoveExport.get(); + + authWiring.checkPermissionDeleteTableFromInputTable( + ExecutionContext.getContext().getAuthContext(), request, + List.of(targetTable.get(), tableToRemove)); + + // validate that the columns are compatible + try { + mutableInputTable.validateDelete(tableToRemove); + } catch (TableDefinition.IncompatibleTableDefinitionException exception) { + throw Exceptions.statusRuntimeException(Code.INVALID_ARGUMENT, + "Provided tables's columns are not compatible: " + exception.getMessage()); + } catch (UnsupportedOperationException exception) { + throw Exceptions.statusRuntimeException(Code.INVALID_ARGUMENT, + "Provided input table does not support delete."); + } + + // actually delete the table's contents + try { + mutableInputTable.delete(tableToRemove); + GrpcUtil.safelyComplete(responseObserver, DeleteTableResponse.getDefaultInstance()); + } catch (IOException ioException) { + throw Exceptions.statusRuntimeException(Code.DATA_LOSS, + "Error deleting table from inputtable"); + } + }); + } } } diff --git a/server/src/main/java/io/deephaven/server/table/ops/TableServiceGrpcImpl.java b/server/src/main/java/io/deephaven/server/table/ops/TableServiceGrpcImpl.java index db19d235805..7574253f2c7 100644 --- a/server/src/main/java/io/deephaven/server/table/ops/TableServiceGrpcImpl.java +++ b/server/src/main/java/io/deephaven/server/table/ops/TableServiceGrpcImpl.java @@ -4,9 +4,13 @@ package io.deephaven.server.table.ops; import com.google.rpc.Code; +import io.deephaven.base.verify.Assert; import io.deephaven.clientsupport.gotorow.SeekRow; import io.deephaven.auth.codegen.impl.TableServiceContextualAuthWiring; import io.deephaven.engine.table.Table; +import io.deephaven.engine.table.impl.perf.QueryPerformanceNugget; +import io.deephaven.engine.table.impl.perf.QueryPerformanceRecorder; +import io.deephaven.engine.table.impl.util.EngineMetrics; import io.deephaven.extensions.barrage.util.ExportUtil; import io.deephaven.internal.log.LoggerFactory; import io.deephaven.io.logger.Logger; @@ -64,9 +68,11 @@ import io.deephaven.server.session.TicketRouter; import io.deephaven.server.table.ExportedTableUpdateListener; import io.deephaven.time.DateTimeUtils; +import io.deephaven.util.SafeCloseable; import io.grpc.StatusRuntimeException; import io.grpc.stub.ServerCallStreamObserver; import io.grpc.stub.StreamObserver; +import org.apache.commons.lang3.mutable.MutableInt; import org.jetbrains.annotations.NotNull; import javax.inject.Inject; @@ -313,62 +319,76 @@ public void exactJoinTables( } @Override - public void leftJoinTables(LeftJoinTablesRequest request, - StreamObserver responseObserver) { + public void leftJoinTables( + @NotNull final LeftJoinTablesRequest request, + @NotNull final StreamObserver responseObserver) { oneShotOperationWrapper(BatchTableRequest.Operation.OpCase.LEFT_JOIN, request, responseObserver); } @Override - public void asOfJoinTables(AsOfJoinTablesRequest request, - StreamObserver responseObserver) { + public void asOfJoinTables( + @NotNull final AsOfJoinTablesRequest request, + @NotNull final StreamObserver responseObserver) { oneShotOperationWrapper(BatchTableRequest.Operation.OpCase.AS_OF_JOIN, request, responseObserver); } @Override - public void ajTables(AjRajTablesRequest request, StreamObserver responseObserver) { + public void ajTables( + @NotNull final AjRajTablesRequest request, + @NotNull final StreamObserver responseObserver) { oneShotOperationWrapper(BatchTableRequest.Operation.OpCase.AJ, request, responseObserver); } @Override - public void rajTables(AjRajTablesRequest request, StreamObserver responseObserver) { + public void rajTables( + @NotNull final AjRajTablesRequest request, + @NotNull final StreamObserver responseObserver) { oneShotOperationWrapper(BatchTableRequest.Operation.OpCase.RAJ, request, responseObserver); } @Override - public void rangeJoinTables(RangeJoinTablesRequest request, - StreamObserver responseObserver) { + public void rangeJoinTables( + @NotNull final RangeJoinTablesRequest request, + @NotNull final StreamObserver responseObserver) { oneShotOperationWrapper(BatchTableRequest.Operation.OpCase.RANGE_JOIN, request, responseObserver); } @Override - public void runChartDownsample(RunChartDownsampleRequest request, - StreamObserver responseObserver) { + public void runChartDownsample( + @NotNull final RunChartDownsampleRequest request, + @NotNull final StreamObserver responseObserver) { oneShotOperationWrapper(BatchTableRequest.Operation.OpCase.RUN_CHART_DOWNSAMPLE, request, responseObserver); } @Override - public void fetchTable(FetchTableRequest request, StreamObserver responseObserver) { + public void fetchTable( + @NotNull final FetchTableRequest request, + @NotNull final StreamObserver responseObserver) { oneShotOperationWrapper(BatchTableRequest.Operation.OpCase.FETCH_TABLE, request, responseObserver); } @Override - public void applyPreviewColumns(ApplyPreviewColumnsRequest request, - StreamObserver responseObserver) { + public void applyPreviewColumns( + @NotNull final ApplyPreviewColumnsRequest request, + @NotNull final StreamObserver responseObserver) { oneShotOperationWrapper(BatchTableRequest.Operation.OpCase.APPLY_PREVIEW_COLUMNS, request, responseObserver); } @Override - public void createInputTable(CreateInputTableRequest request, - StreamObserver responseObserver) { + public void createInputTable( + @NotNull final CreateInputTableRequest request, + @NotNull final StreamObserver responseObserver) { oneShotOperationWrapper(BatchTableRequest.Operation.OpCase.CREATE_INPUT_TABLE, request, responseObserver); } @Override - public void updateBy(UpdateByRequest request, StreamObserver responseObserver) { + public void updateBy( + @NotNull final UpdateByRequest request, + @NotNull final StreamObserver responseObserver) { oneShotOperationWrapper(BatchTableRequest.Operation.OpCase.UPDATE_BY, request, responseObserver); } - private Object getSeekValue(Literal literal, Class dataType) { + private Object getSeekValue(@NotNull final Literal literal, @NotNull final Class dataType) { if (literal.hasStringValue()) { if (BigDecimal.class.isAssignableFrom(dataType)) { return new BigDecimal(literal.getStringValue()); @@ -449,33 +469,43 @@ public void seekRow( if (sourceId.getTicket().isEmpty()) { throw Exceptions.statusRuntimeException(Code.FAILED_PRECONDITION, "No consoleId supplied"); } - SessionState.ExportObject
exportedTable = - ticketRouter.resolve(session, sourceId, "sourceId"); - session.nonExport() - .require(exportedTable) - .onError(responseObserver) - .submit(() -> { - final Table table = exportedTable.get(); - authWiring.checkPermissionSeekRow(session.getAuthContext(), request, - Collections.singletonList(table)); - final String columnName = request.getColumnName(); - final Class dataType = table.getDefinition().getColumn(columnName).getDataType(); - final Object seekValue = getSeekValue(request.getSeekValue(), dataType); - final Long result = table.apply(new SeekRow( - request.getStartingRow(), - columnName, - seekValue, - request.getInsensitive(), - request.getContains(), - request.getIsBackward())); - SeekRowResponse.Builder rowResponse = SeekRowResponse.newBuilder(); - safelyComplete(responseObserver, rowResponse.setResultRow(result).build()); - }); - } - - @Override - public void computeColumnStatistics(ColumnStatisticsRequest request, - StreamObserver responseObserver) { + final String description = "TableService#seekRow(table=" + + ticketRouter.getLogNameFor(sourceId, "sourceId") + ")"; + final QueryPerformanceRecorder queryPerformanceRecorder = QueryPerformanceRecorder.newQuery( + description, session.getSessionId(), QueryPerformanceNugget.DEFAULT_FACTORY); + + try (final SafeCloseable ignored = queryPerformanceRecorder.startQuery()) { + final SessionState.ExportObject
exportedTable = + ticketRouter.resolve(session, sourceId, "sourceId"); + + session.nonExport() + .queryPerformanceRecorder(queryPerformanceRecorder) + .require(exportedTable) + .onError(responseObserver) + .submit(() -> { + final Table table = exportedTable.get(); + authWiring.checkPermissionSeekRow(session.getAuthContext(), request, + Collections.singletonList(table)); + final String columnName = request.getColumnName(); + final Class dataType = table.getDefinition().getColumn(columnName).getDataType(); + final Object seekValue = getSeekValue(request.getSeekValue(), dataType); + final Long result = table.apply(new SeekRow( + request.getStartingRow(), + columnName, + seekValue, + request.getInsensitive(), + request.getContains(), + request.getIsBackward())); + SeekRowResponse.Builder rowResponse = SeekRowResponse.newBuilder(); + safelyComplete(responseObserver, rowResponse.setResultRow(result).build()); + }); + } + } + + @Override + public void computeColumnStatistics( + @NotNull final ColumnStatisticsRequest request, + @NotNull final StreamObserver responseObserver) { oneShotOperationWrapper(BatchTableRequest.Operation.OpCase.COLUMN_STATISTICS, request, responseObserver); } @@ -491,65 +521,85 @@ public void batch( } final SessionState session = sessionService.getCurrentSession(); - // step 1: initialize exports - final List> exportBuilders = request.getOpsList().stream() - .map(op -> createBatchExportBuilder(session, op)) - .collect(Collectors.toList()); + final QueryPerformanceRecorder queryPerformanceRecorder = QueryPerformanceRecorder.newQuery( + "TableService#batch()", session.getSessionId(), QueryPerformanceNugget.DEFAULT_FACTORY); - // step 2: resolve dependencies - exportBuilders.forEach(export -> export.resolveDependencies(session, exportBuilders)); + try (final SafeCloseable ignored1 = queryPerformanceRecorder.startQuery()) { + // step 1: initialize exports + final MutableInt offset = new MutableInt(0); + final List> exportBuilders = request.getOpsList().stream() + .map(op -> createBatchExportBuilder( + offset.getAndIncrement(), session, queryPerformanceRecorder, op)) + .collect(Collectors.toList()); - // step 3: check for cyclical dependencies; this is our only opportunity to check non-export cycles - // TODO: check for cycles + // step 2: resolve dependencies + exportBuilders.forEach(export -> export.resolveDependencies(session, exportBuilders)); - // step 4: submit the batched operations - final AtomicInteger remaining = new AtomicInteger(exportBuilders.size()); - final AtomicReference firstFailure = new AtomicReference<>(); + // step 3: check for cyclical dependencies; this is our only opportunity to check non-export cycles + // TODO: check for cycles - final Runnable onOneResolved = () -> { - if (remaining.decrementAndGet() == 0) { - final StatusRuntimeException failure = firstFailure.get(); - if (failure != null) { - safelyError(responseObserver, failure); - } else { - safelyComplete(responseObserver); + // step 4: submit the batched operations + final AtomicInteger remaining = new AtomicInteger(1 + exportBuilders.size()); + final AtomicReference firstFailure = new AtomicReference<>(); + + final Runnable onOneResolved = () -> { + int numRemaining = remaining.decrementAndGet(); + Assert.geqZero(numRemaining, "numRemaining"); + if (numRemaining > 0) { + return; } - } - }; - for (int i = 0; i < exportBuilders.size(); ++i) { - final BatchExportBuilder exportBuilder = exportBuilders.get(i); - final int exportId = exportBuilder.exportBuilder.getExportId(); + try (final SafeCloseable ignored2 = queryPerformanceRecorder.resumeQuery()) { + final StatusRuntimeException failure = firstFailure.get(); + if (failure != null) { + safelyError(responseObserver, failure); + } else { + safelyComplete(responseObserver); + } + if (queryPerformanceRecorder.endQuery()) { + EngineMetrics.getInstance().logQueryProcessingResults(queryPerformanceRecorder, failure); + } + } + }; - final TableReference resultId; - if (exportId == SessionState.NON_EXPORT_ID) { - resultId = TableReference.newBuilder().setBatchOffset(i).build(); - } else { - resultId = ExportTicketHelper.tableReference(exportId); - } + for (int i = 0; i < exportBuilders.size(); ++i) { + final BatchExportBuilder exportBuilder = exportBuilders.get(i); + final int exportId = exportBuilder.exportBuilder.getExportId(); - exportBuilder.exportBuilder.onError((result, errorContext, cause, dependentId) -> { - String errorInfo = errorContext; - if (dependentId != null) { - errorInfo += " dependency: " + dependentId; - } - if (cause instanceof StatusRuntimeException) { - errorInfo += " cause: " + cause.getMessage(); - firstFailure.compareAndSet(null, (StatusRuntimeException) cause); + final TableReference resultId; + if (exportId == SessionState.NON_EXPORT_ID) { + resultId = TableReference.newBuilder().setBatchOffset(i).build(); + } else { + resultId = ExportTicketHelper.tableReference(exportId); } - final ExportedTableCreationResponse response = ExportedTableCreationResponse.newBuilder() - .setResultId(resultId) - .setSuccess(false) - .setErrorInfo(errorInfo) - .build(); - safelyOnNext(responseObserver, response); - onOneResolved.run(); - }).onSuccess(table -> { - final ExportedTableCreationResponse response = - ExportUtil.buildTableCreationResponse(resultId, table); - safelyOnNext(responseObserver, response); - onOneResolved.run(); - }).submit(exportBuilder::doExport); + + exportBuilder.exportBuilder.onError((result, errorContext, cause, dependentId) -> { + String errorInfo = errorContext; + if (dependentId != null) { + errorInfo += " dependency: " + dependentId; + } + if (cause instanceof StatusRuntimeException) { + errorInfo += " cause: " + cause.getMessage(); + firstFailure.compareAndSet(null, (StatusRuntimeException) cause); + } + final ExportedTableCreationResponse response = ExportedTableCreationResponse.newBuilder() + .setResultId(resultId) + .setSuccess(false) + .setErrorInfo(errorInfo) + .build(); + safelyOnNext(responseObserver, response); + onOneResolved.run(); + }).onSuccess(table -> { + final ExportedTableCreationResponse response = + ExportUtil.buildTableCreationResponse(resultId, table); + safelyOnNext(responseObserver, response); + onOneResolved.run(); + }).submit(exportBuilder::doExport); + } + + // now that we've submitted everything we'll suspend the query and release our refcount + queryPerformanceRecorder.suspendQuery(); + onOneResolved.run(); } } @@ -575,25 +625,33 @@ public void getExportedTableCreationResponse( throw Exceptions.statusRuntimeException(Code.FAILED_PRECONDITION, "No request ticket supplied"); } - final SessionState.ExportObject export = ticketRouter.resolve(session, request, "request"); - - session.nonExport() - .require(export) - .onError(responseObserver) - .submit(() -> { - final Object obj = export.get(); - if (!(obj instanceof Table)) { - responseObserver.onError( - Exceptions.statusRuntimeException(Code.FAILED_PRECONDITION, - "Ticket is not a table")); - return; - } - authWiring.checkPermissionGetExportedTableCreationResponse( - session.getAuthContext(), request, Collections.singletonList((Table) obj)); - final ExportedTableCreationResponse response = - ExportUtil.buildTableCreationResponse(request, (Table) obj); - safelyComplete(responseObserver, response); - }); + final String description = "TableService#getExportedTableCreationResponse(table=" + + ticketRouter.getLogNameFor(request, "request") + ")"; + final QueryPerformanceRecorder queryPerformanceRecorder = QueryPerformanceRecorder.newQuery( + description, session.getSessionId(), QueryPerformanceNugget.DEFAULT_FACTORY); + + try (final SafeCloseable ignored = queryPerformanceRecorder.startQuery()) { + final SessionState.ExportObject export = ticketRouter.resolve(session, request, "request"); + + session.nonExport() + .queryPerformanceRecorder(queryPerformanceRecorder) + .require(export) + .onError(responseObserver) + .submit(() -> { + final Object obj = export.get(); + if (!(obj instanceof Table)) { + responseObserver.onError( + Exceptions.statusRuntimeException(Code.FAILED_PRECONDITION, + "Ticket is not a table")); + return; + } + authWiring.checkPermissionGetExportedTableCreationResponse( + session.getAuthContext(), request, Collections.singletonList((Table) obj)); + final ExportedTableCreationResponse response = + ExportUtil.buildTableCreationResponse(request, (Table) obj); + safelyComplete(responseObserver, response); + }); + } } /** @@ -606,44 +664,59 @@ public void getExportedTableCreationResponse( */ private void oneShotOperationWrapper( final BatchTableRequest.Operation.OpCase op, - final T request, - final StreamObserver responseObserver) { + @NotNull final T request, + @NotNull final StreamObserver responseObserver) { final SessionState session = sessionService.getCurrentSession(); final GrpcTableOperation operation = getOp(op); - operation.validateRequest(request); final Ticket resultId = operation.getResultTicket(request); if (resultId.getTicket().isEmpty()) { throw Exceptions.statusRuntimeException(Code.FAILED_PRECONDITION, "No result ticket supplied"); } - final List> dependencies = operation.getTableReferences(request).stream() - .map(ref -> resolveOneShotReference(session, ref)) - .collect(Collectors.toList()); + final String description = "TableService#" + op.name() + "(resultId=" + + ticketRouter.getLogNameFor(resultId, "TableService") + ")"; - session.newExport(resultId, "resultId") - .require(dependencies) - .onError(responseObserver) - .submit(() -> { - operation.checkPermission(request, dependencies); - final Table result = operation.create(request, dependencies); - final ExportedTableCreationResponse response = - ExportUtil.buildTableCreationResponse(resultId, result); - safelyComplete(responseObserver, response); - return result; - }); + final QueryPerformanceRecorder queryPerformanceRecorder = QueryPerformanceRecorder.newQuery( + description, session.getSessionId(), QueryPerformanceNugget.DEFAULT_FACTORY); + + try (final SafeCloseable ignored = queryPerformanceRecorder.startQuery()) { + operation.validateRequest(request); + + final List> dependencies = operation.getTableReferences(request).stream() + .map(ref -> resolveOneShotReference(session, ref)) + .collect(Collectors.toList()); + + session.newExport(resultId, "resultId") + .require(dependencies) + .onError(responseObserver) + .queryPerformanceRecorder(queryPerformanceRecorder) + .submit(() -> { + operation.checkPermission(request, dependencies); + final Table result = operation.create(request, dependencies); + final ExportedTableCreationResponse response = + ExportUtil.buildTableCreationResponse(resultId, result); + safelyComplete(responseObserver, response); + return result; + }); + } } - private SessionState.ExportObject
resolveOneShotReference(SessionState session, TableReference ref) { + private SessionState.ExportObject
resolveOneShotReference( + @NotNull final SessionState session, + @NotNull final TableReference ref) { if (!ref.hasTicket()) { throw Exceptions.statusRuntimeException(Code.FAILED_PRECONDITION, "One-shot operations must use ticket references"); } + return ticketRouter.resolve(session, ref.getTicket(), "sourceId"); } - private SessionState.ExportObject
resolveBatchReference(SessionState session, - List> exportBuilders, TableReference ref) { + private SessionState.ExportObject
resolveBatchReference( + @NotNull final SessionState session, + @NotNull final List> exportBuilders, + @NotNull final TableReference ref) { switch (ref.getRefCase()) { case TICKET: return ticketRouter.resolve(session, ref.getTicket(), "sourceId"); @@ -658,14 +731,28 @@ private SessionState.ExportObject
resolveBatchReference(SessionState sess } } - private BatchExportBuilder createBatchExportBuilder(SessionState session, BatchTableRequest.Operation op) { + private BatchExportBuilder createBatchExportBuilder( + final int offset, + @NotNull final SessionState session, + @NotNull final QueryPerformanceRecorder batchQueryPerformanceRecorder, + final BatchTableRequest.Operation op) { final GrpcTableOperation operation = getOp(op.getOpCase()); final T request = operation.getRequestFromOperation(op); operation.validateRequest(request); final Ticket resultId = operation.getResultTicket(request); + final boolean hasResultId = !resultId.getTicket().isEmpty(); final ExportBuilder
exportBuilder = - resultId.getTicket().isEmpty() ? session.nonExport() : session.newExport(resultId, "resultId"); + hasResultId ? session.newExport(resultId, "resultId") : session.nonExport(); + final String resultDescription = hasResultId + ? "resultId=" + ticketRouter.getLogNameFor(resultId, "resultId") + ", " + : ""; + + final String description = "TableService#" + op.getOpCase().name() + "(" + resultDescription + "batchOffset=" + + offset + ")"; + exportBuilder.queryPerformanceRecorder(QueryPerformanceRecorder.newSubQuery( + description, batchQueryPerformanceRecorder, QueryPerformanceNugget.DEFAULT_FACTORY)); + return new BatchExportBuilder<>(operation, request, exportBuilder); } @@ -676,13 +763,18 @@ private class BatchExportBuilder { List> dependencies; - BatchExportBuilder(GrpcTableOperation operation, T request, ExportBuilder
exportBuilder) { + BatchExportBuilder( + @NotNull final GrpcTableOperation operation, + @NotNull final T request, + @NotNull final ExportBuilder
exportBuilder) { this.operation = Objects.requireNonNull(operation); this.request = Objects.requireNonNull(request); this.exportBuilder = Objects.requireNonNull(exportBuilder); } - void resolveDependencies(SessionState session, List> exportBuilders) { + void resolveDependencies( + @NotNull final SessionState session, + @NotNull final List> exportBuilders) { dependencies = operation.getTableReferences(request).stream() .map(ref -> resolveBatchReference(session, exportBuilders, ref)) .collect(Collectors.toList());