Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

sql: emit more tracing events from the stats cache #54035

Merged
merged 2 commits into from
Sep 10, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions pkg/sql/plan_opt.go
Original file line number Diff line number Diff line change
Expand Up @@ -405,6 +405,7 @@ func (opc *optPlanningCtx) buildReusableMemo(ctx context.Context) (_ *memo.Memo,
// operators that can be constant folded, then fully optimize it now - it
// can be reused without further changes to build the execution tree.
if !f.Memo().HasPlaceholders() && !f.FoldingControl().PreventedStableFold() {
opc.log(ctx, "optimizing (no placeholders)")
if _, err := opc.optimizer.Optimize(); err != nil {
return nil, err
}
Expand Down Expand Up @@ -499,6 +500,8 @@ func (opc *optPlanningCtx) buildExecMemo(ctx context.Context) (_ *memo.Memo, _ e
}
opc.flags.Set(planFlagOptCacheMiss)
opc.log(ctx, "query cache miss")
} else {
opc.log(ctx, "not using query cache")
}

// We are executing a statement for which there is no reusable memo
Expand Down
33 changes: 15 additions & 18 deletions pkg/sql/stats/stats_cache.go
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@ import (
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/protoutil"
"github.com/cockroachdb/cockroach/pkg/util/syncutil"
"github.com/cockroachdb/cockroach/pkg/util/tracing"
"github.com/cockroachdb/errors"
)

Expand Down Expand Up @@ -188,11 +189,11 @@ func (sc *TableStatisticsCache) lookupStatsLocked(
if e.mustWait {
// We are in the process of grabbing stats for this table. Wait until
// that is complete, at which point e.stats will be populated.
if log.V(1) {
log.Infof(ctx, "waiting for statistics for table %d", tableID)
}
log.VEventf(ctx, 1, "waiting for statistics for table %d", tableID)
e.waitCond.Wait()
log.VEventf(ctx, 1, "finished waiting for statistics for table %d", tableID)
} else {
// This is the expected "fast" path; don't emit an event.
if log.V(2) {
log.Infof(ctx, "statistics for table %d found in cache", tableID)
}
Expand All @@ -211,10 +212,6 @@ func (sc *TableStatisticsCache) lookupStatsLocked(
func (sc *TableStatisticsCache) addCacheEntryLocked(
ctx context.Context, tableID descpb.ID,
) (stats []*TableStatistic, err error) {
if log.V(1) {
log.Infof(ctx, "reading statistics for table %d", tableID)
}

// Add a cache entry that other queries can find and wait on until we have the
// stats.
e := &cacheEntry{
Expand All @@ -228,7 +225,9 @@ func (sc *TableStatisticsCache) addCacheEntryLocked(
sc.mu.Unlock()
defer sc.mu.Lock()

log.VEventf(ctx, 1, "reading statistics for table %d", tableID)
stats, err = sc.getTableStatsFromDB(ctx, tableID)
log.VEventf(ctx, 1, "finished reading statistics for table %d", tableID)
}()

e.mustWait = false
Expand Down Expand Up @@ -258,10 +257,6 @@ func (sc *TableStatisticsCache) refreshCacheEntry(ctx context.Context, tableID d
sc.mu.Lock()
defer sc.mu.Unlock()

if log.V(1) {
log.Infof(ctx, "reading statistics for table %d", tableID)
}

// If the stats don't already exist in the cache, don't bother performing
// the refresh. If e.err is not nil, the stats are in the process of being
// removed from the cache (see addCacheEntryLocked), so don't refresh in this
Expand All @@ -287,7 +282,9 @@ func (sc *TableStatisticsCache) refreshCacheEntry(ctx context.Context, tableID d
sc.mu.Unlock()
defer sc.mu.Lock()

log.VEventf(ctx, 1, "refreshing statistics for table %d", tableID)
stats, err = sc.getTableStatsFromDB(ctx, tableID)
log.VEventf(ctx, 1, "done refreshing statistics for table %d", tableID)
}()
if !e.mustRefreshAgain {
break
Expand All @@ -307,11 +304,13 @@ func (sc *TableStatisticsCache) refreshCacheEntry(ctx context.Context, tableID d
// RefreshTableStats refreshes the cached statistics for the given table ID
// by fetching the new stats from the database.
func (sc *TableStatisticsCache) RefreshTableStats(ctx context.Context, tableID descpb.ID) {
if log.V(1) {
log.Infof(ctx, "refreshing statistics for table %d", tableID)
}
log.VEventf(ctx, 1, "refreshing statistics for table %d", tableID)
ctx, span := tracing.ForkCtxSpan(ctx, "refresh-table-stats")
// Perform an asynchronous refresh of the cache.
go sc.refreshCacheEntry(ctx, tableID)
go func() {
defer tracing.FinishSpan(span)
sc.refreshCacheEntry(ctx, tableID)
}()
}

// InvalidateTableStats invalidates the cached statistics for the given table ID.
Expand All @@ -320,9 +319,7 @@ func (sc *TableStatisticsCache) RefreshTableStats(ctx context.Context, tableID d
// This function is used only when we want to guarantee that the next query
// uses updated stats.
func (sc *TableStatisticsCache) InvalidateTableStats(ctx context.Context, tableID descpb.ID) {
if log.V(1) {
log.Infof(ctx, "evicting statistics for table %d", tableID)
}
log.VEventf(ctx, 1, "evicting statistics for table %d", tableID)
sc.mu.Lock()
defer sc.mu.Unlock()
sc.mu.cache.Del(tableID)
Expand Down