Skip to content

Commit

Permalink
Update profile options and put in subdirectory by default
Browse files Browse the repository at this point in the history
  • Loading branch information
kburns committed Jan 31, 2024
1 parent a5671ff commit 09a3bc0
Show file tree
Hide file tree
Showing 3 changed files with 41 additions and 27 deletions.
44 changes: 22 additions & 22 deletions dedalus/core/solvers.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,20 +9,20 @@
import cProfile
import pstats
from math import prod

from collections import defaultdict
import shelve
import pickle

from . import subsystems
from . import timesteppers
from .evaluator import Evaluator
from ..libraries.matsolvers import matsolvers
from ..tools.config import config
from ..tools.array import scipy_sparse_eigs
from ..tools.parallel import ProfileWrapper
from ..tools.parallel import ProfileWrapper, parallel_mkdir

PROFILE_SCRIPT = config['profiling'].getboolean('PROFILE_SCRIPT')
PROFILE_MODE_DEFAULT = config['profiling'].get('PROFILE_MODE_DEFAULT')
PROFILE_DEFAULT = config['profiling'].getboolean('PROFILE_DEFAULT')
PARALLEL_PROFILE_DEFAULT = config['profiling'].getboolean('PARALLEL_PROFILE_DEFAULT')
PROFILE_DIRECTORY = pathlib.Path(config['profiling'].get('PROFILE_DIRECTORY'))

import logging
logger = logging.getLogger(__name__.split('.')[-1])
Expand Down Expand Up @@ -495,10 +495,9 @@ class InitialValueSolver(SolverBase):
warmup_iterations : int, optional
Number of warmup iterations to disregard when computing runtime statistics (default: 10).
profile : bool, optional
Save profiles with cProfile (default: False).
profile_mode : string, optional
Output joined files [summary] or also include per-core analysis [full] (default: full).
Save accumulated profiles with cProfile (default: False).
parallel_profile : bool, optional
Save per-process and accumulated profiles with cProfile (default: False).
**kw :
Other options passed to ProblemBase.
Expand All @@ -524,15 +523,16 @@ class InitialValueSolver(SolverBase):
matsolver_default = 'MATRIX_FACTORIZER'
matrices = ['M', 'L']

def __init__(self, problem, timestepper, enforce_real_cadence=100, warmup_iterations=10, profile=PROFILE_SCRIPT, profile_mode=PROFILE_MODE_DEFAULT, **kw):
def __init__(self, problem, timestepper, enforce_real_cadence=100, warmup_iterations=10, profile=PROFILE_DEFAULT, parallel_profile=PARALLEL_PROFILE_DEFAULT, **kw):
logger.debug('Beginning IVP instantiation')
# Setup timing and profiling
self.dist = problem.dist
self._bcast_array = np.zeros(1, dtype=float)
self.init_time = self.world_time
self.profile = profile
if profile:
self.profile_mode = profile_mode.lower()
if profile or parallel_profile:
parallel_mkdir(PROFILE_DIRECTORY, comm=self.dist.comm)
self.profile = True
self.parallel_profile = parallel_profile
self.setup_profiler = cProfile.Profile()
self.warmup_profiler = cProfile.Profile()
self.run_profiler = cProfile.Profile()
Expand Down Expand Up @@ -750,6 +750,7 @@ def log_stats(self, format=".4g"):
logger.info(f"Timings unavailable because warmup did not complete.")

def dump_profiles(self, profiler, name):
"Save profiling data to disk."
comm = self.dist.comm
# Disable and create stats on each process
profiler.create_stats()
Expand All @@ -759,19 +760,18 @@ def dump_profiles(self, profiler, name):
profiles = comm.gather(ProfileWrapper(p.stats), root=0)
# Sum stats on root process
if comm.rank == 0:
if self.profile_mode=='full':
profile_database = pathlib.Path(f"{name}_profiles")
stats = {'primcalls':defaultdict(list),'totcalls':defaultdict(list),'tottime':defaultdict(list),'cumtime':defaultdict(list)}
if self.parallel_profile:
stats = {'primcalls': defaultdict(list),
'totcalls': defaultdict(list),
'tottime': defaultdict(list),
'cumtime': defaultdict(list)}
for profile in profiles:
for func, (primcalls, totcalls, tottime, cumtime, callers) in profile.stats.items():
stats['primcalls'][func].append(primcalls)
stats['totcalls'][func].append(totcalls)
stats['tottime'][func].append(tottime)
stats['cumtime'][func].append(cumtime)
with shelve.open(str(profile_database), flag='n') as shelf:
for key in stats:
shelf[key] = stats[key]

# creation of joint_stats destroys profiles, so do this second
pickle.dump(stats, open(PROFILE_DIRECTORY / f"{name}_parallel.pickle", 'wb'))
# Creation of joint_stats destroys profiles, so do this second
joint_stats = pstats.Stats(*profiles)
joint_stats.dump_stats(f"{name}.prof")
joint_stats.dump_stats(PROFILE_DIRECTORY / f"{name}.prof")
14 changes: 9 additions & 5 deletions dedalus/dedalus.cfg
Original file line number Diff line number Diff line change
Expand Up @@ -118,11 +118,15 @@
# This works around NFS caching issues
FILEHANDLER_TOUCH_TMPFILE = False


[profiling]

# Profile script using cProfile
PROFILE_SCRIPT = False
# Default profile setting for solvers
# This saves accumulated profiling data using cProfile
PROFILE_DEFAULT = False

# Default parallel profile setting for solvers
# This saves per-process and accumulated profiling data using cProfile
PARALLEL_PROFILE_DEFAULT = False

# Level of profiling to conduct (summary, full)
PROFILE_MODE_DEFAULT = full
# Profile directory base (will be expanded to <PROFILE_DIRECTORY>/runtime.prof, etc)
PROFILE_DIRECTORY = profiles
10 changes: 10 additions & 0 deletions dedalus/tools/parallel.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
"""

import pathlib
from mpi4py import MPI


Expand Down Expand Up @@ -67,3 +68,12 @@ def __init__(self, stats):
def create_stats(self):
pass


def parallel_mkdir(path, comm=MPI.COMM_WORLD):
"""Create a directory from root process."""
path = pathlib.Path(path)
with Sync(comm=comm, enter=False, exit=True) as sync:
if sync.comm.rank == 0:
if not path.exists():
path.mkdir()

0 comments on commit 09a3bc0

Please sign in to comment.