Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Issue #78: Formatted docstrings to better utilize sphinx-apidoc #94

Closed
wants to merge 2 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
35 changes: 34 additions & 1 deletion .ruff.toml
Original file line number Diff line number Diff line change
@@ -1 +1,34 @@
ignore-init-module-imports = true
ignore-init-module-imports = true
exclude = [
".txt",
".bzr",
".direnv",
".eggs",
".git",
".git-rewrite",
".hg",
".mypy_cache",
".nox",
".pants.d",
".pytype",
".ruff_cache",
".svn",
".tox",
".venv",
"__pypackages__",
"__pycache__",
"_build",
"buck-out",
"build",
"docs",
"dist",
"node_modules",
"venv",
]

select = [
"D205", # Blank line after summary
"D103", # Public function needs docstring
"D417", # Document all arguments
"D419", # Docstring is empty
]
12 changes: 7 additions & 5 deletions src/bsk_rl/agents/genetic_algorithm.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,9 @@

"""
This file is designed to provide generitc interfaces to a DEAP-based genetic algorithm
for solving arbitrary gymnasium environments. It does a few things:
for solving arbitrary gymnasium environments.

It does a few things:
1. Includes the ability to cast arbitrary gymnasium environments with a max_length
parameter as many-input, single-output optimzation problems
2. Allows a genetic algorithm to be called to optimize said environments in a parellel
Expand All @@ -27,6 +29,7 @@ def mutUniformIntList(individual, num_samples=2, low=0, up=5, indpb=0.3):
"""
This function is designed to mutate a list of integers, rather than a single
integer.

:param individual: The individual to be mutated
:param num_samples: The number of samples to be mutated
:param low: The lower bound for the mutation
Expand Down Expand Up @@ -68,9 +71,7 @@ def __init__(self, env_name, initial_conditions=None):
self.action_space = tmp_env.action_space

def evaluate(self, action_set):
"""
Evaluates a full run of the environment given a list of actions.
"""
"""Evaluates a full run of the environment given a list of actions."""
total_reward = 0

self.env = gym.make(self.env_name)
Expand Down Expand Up @@ -266,7 +267,8 @@ def optimize(self, seed=datetime.now(), checkpoint_freq=1, checkpoint=None):
def plot_results(self):
"""
Generates plots of GA convergence, performance, and final population vector
behavior
behavior.

:param checkpoint_names: list of checkpoint names, assumed to be pickle files.
:return:
"""
Expand Down
10 changes: 5 additions & 5 deletions src/bsk_rl/agents/mcts.py
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ def __init__(

# Define the environment
def setEnv(self, envType, initial_conditions, max_steps=30, max_length=90):
"""Sets the environment and initial conditions MCTS will step through"""
"""Sets the environment and initial conditions MCTS will step through."""
# Create the environment
self.envType = envType
self.env = gym.make(envType)
Expand All @@ -91,7 +91,7 @@ def setEnv(self, envType, initial_conditions, max_steps=30, max_length=90):

# @profiler.profile
def selectAction(self, s, d, actHist):
"""Selects the next action for the true environment to step through"""
"""Selects the next action for the true environment to step through."""
# We make a tuple out of s so it an be used as a dictionary key
s_tuple = tuple(s.reshape(1, -1)[0])

Expand Down Expand Up @@ -144,7 +144,7 @@ def selectAction(self, s, d, actHist):
return max(self.Q[s_tuple], key=self.Q[s_tuple].get)

def simulate(self, s, d):
"""Simulates a trajectory through the environment and updates Q_search"""
"""Simulates a trajectory through the environment and updates Q_search."""
# We make a tuple out of s so it an be used as a dictionary key
try:
s_tuple = tuple(s.reshape(1, -1)[0])
Expand Down Expand Up @@ -211,7 +211,7 @@ def simulate(self, s, d):
return q

def rollout(self, s, d):
"""Executes a rollout to the desired depth or end of the environment"""
"""Executes a rollout to the desired depth or end of the environment."""
# If we have reached max depth, just return 0
if d == 0:
return 0.0
Expand All @@ -231,7 +231,7 @@ def rollout(self, s, d):
return reward + self.rollout(sp, d - 1)

def backup_tree(self):
"""Backs up the value along the main tree once the sim has terminated"""
"""Backs up the value along the main tree once the sim has terminated."""
# 'Anti-Sum' is added to at each node to subtract from r_sum at last node
r_anti_sum = 0
# Grab the total reward from the last node
Expand Down
10 changes: 9 additions & 1 deletion src/bsk_rl/agents/state_machine.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ class StateMachine:
def loadTransferConditions(self, strategy_file):
"""
Load the transfer conditions into the dict machine_strat from a given .adv file

:param adv_file:
:return:
"""
Expand All @@ -24,6 +25,7 @@ def loadTransferConditions(self, strategy_file):
def selectAction(self, discrete_state):
"""
Select an action based on the discretized state

:param discrete_state:
:return action:
"""
Expand All @@ -34,6 +36,7 @@ def selectAction(self, discrete_state):
def SimpleEOSDiscretizer(self, obs):
"""
Discretizes the simplEOS states into 16 bins

:param SimpleEOS environment, obs:
:return system_state:
"""
Expand Down Expand Up @@ -127,6 +130,7 @@ def SimpleEOSDiscretizer(self, obs):
def earthObsEnvDiscretizer(self, obs):
"""
Discretizes the MultiSensorEOS environment states into 8 bins

:param MultiSensorEOS, obs:
:return system_state:
"""
Expand Down Expand Up @@ -186,6 +190,7 @@ def earthObsEnvDiscretizer(self, obs):
def AgileEOSEnvDiscretizer(self, obs):
"""
Discretizes the AgileEOS states into 16 bins

:param AgileEOS environment, obs:
:return system_state:
"""
Expand Down Expand Up @@ -279,6 +284,7 @@ def AgileEOSEnvDiscretizer(self, obs):
def smallBodyScienceEnvDiscretizer(self, obs):
"""
Discretizes the SmallBodyScience environment into bins

:param SmallBodyScience, obs:
:return system_state:
"""
Expand Down Expand Up @@ -521,6 +527,7 @@ def smallBodyScienceAct(
):
"""
Called to determine the next action to take in the small body environment

:param discretized_state: The discretized state of the environment
:param phi_c: The current latitude of the spacecraft
:param lambda_c: The current longitude of the spacecraft
Expand All @@ -529,7 +536,8 @@ def smallBodyScienceAct(
:param requiredWaypointTime: The time required to reach the current waypoint
:param obs: The current observation
:param target_hist: The history of the waypoints
:return action: The next action to take"""
:return action: The next action to take.
"""
started_tour = False
on_tour = False
finished_tour = False
Expand Down
34 changes: 19 additions & 15 deletions src/bsk_rl/envs/agile_eos/bsk_sim.py
Original file line number Diff line number Diff line change
Expand Up @@ -76,9 +76,7 @@ def __init__(
max_length=270.0,
target_tuple_size=4,
):
"""
Creates the simulation, but does not initialize the initial conditions.
"""
"""Creates the simulation, but does not initialize the initial conditions."""
self.initialized = False
self.dynRate = dynRate
self.fswRate = fswRate
Expand Down Expand Up @@ -320,8 +318,8 @@ def set_ICs(self):
def set_dynamics(self):
"""
Sets up the dynamics modules for the sim.

By default, parameters are set to those for a 6U cubesat.

:return:
"""
sc_number = 0
Expand Down Expand Up @@ -1008,7 +1006,9 @@ def init_obs(self):

def set_fsw(self):
"""
Sets up the attitude guidance stack for the simulation. This simulator runs:
Sets up the attitude guidance stack for the simulation.

This simulator runs:
inertial3Dpoint - Sets the attitude guidance objective to point the main panel
at the sun.
hillPointTask: Sets the attitude guidance objective to point a "camera" angle
Expand Down Expand Up @@ -1369,16 +1369,17 @@ def setup_viz(self):
self.vizInterface.settings.showLocationLabels = 1

def setupGatewayMsgs(self):
"""create C-wrapped gateway messages such that different modules can write to
this message and provide a common input msg for down-stream modules"""
"""Create C-wrapped gateway messages such that different modules can write to
this message and provide a common input msg for down-stream modules.
"""

self.attRefMsg = cMsgPy.AttRefMsg_C()
self.attGuidMsg = cMsgPy.AttGuidMsg_C()

self.zeroGateWayMsgs()

def zeroGateWayMsgs(self):
"""Zero all the FSW gateway message payloads"""
"""Zero all the FSW gateway message payloads."""
self.attRefMsg.write(messaging.AttRefMsgPayload())
self.attGuidMsg.write(messaging.AttGuidMsgPayload())

Expand Down Expand Up @@ -1429,6 +1430,7 @@ def set_logging(self):
def run_sim(self, action, return_obs=True):
"""
Executes the sim for a specified duration given a mode command.

:param action:
:param duration:
:return:
Expand Down Expand Up @@ -1848,7 +1850,8 @@ def get_obs(self):

def close_gracefully(self):
"""
makes sure spice gets shut down right when we close.
Makes sure spice gets shut down right when we close.

:return:
"""
self.gravFactory.unloadSpiceKernels()
Expand All @@ -1863,7 +1866,7 @@ def compute_image_tuples(self, r_BN_N, v_BN_N):
4: Imaged?
5: Downlinked?
:return: image state tuples (in a single np array) - normalized and
non-normalized
non-normalized.
"""
# Initialize the image tuple array
image_tuples = np.zeros(self.target_tuple_size * self.n_target_buffer)
Expand Down Expand Up @@ -1909,7 +1912,8 @@ def compute_image_tuples(self, r_BN_N, v_BN_N):
def update_imaged_targets(self):
"""
Updates which targets have been imaged and which have been downlinked
:return: downlinked

:return: downlinked.
"""

# Initialize list of targets that were just downlinked or imaged, helpful for
Expand Down Expand Up @@ -1951,8 +1955,9 @@ def update_imaged_targets(self):
def check_image_update(self, idx, storedData):
"""
Checks the storageUnitLog to see if data was added or not

:param idx:
:return: 1 if data was added, 0 otherwise
:return: 1 if data was added, 0 otherwise.
"""
if storedData is not None:
if storedData[-1, idx]:
Expand All @@ -1966,6 +1971,7 @@ def check_image_update(self, idx, storedData):
def check_downlink_update(self, idx, storedData):
"""
Checks the storageUnitLog to see if an image was downlinked

:param idx: The specific buffer to look at
:return:
"""
Expand All @@ -1983,9 +1989,7 @@ def check_downlink_update(self, idx, storedData):
return 0

def check_target_switch(self):
"""
Grabs the index(s) of the next upcoming target(s)
"""
"""Grabs the index(s) of the next upcoming target(s)."""
times = self.initial_conditions.get("target_times")
idx = 0
upcoming_tgts = []
Expand Down
11 changes: 8 additions & 3 deletions src/bsk_rl/envs/agile_eos/gym_env.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,9 @@ class AgileEOS(gym.Env):
"""
This Gymnasium environment is designed to simulate an agile EOS scheduling problem
in which a satellite in low-Earth orbit attempts to maximize the number of targets
imaged and downlinked while avoiding resource constraint violations. Resource
imaged and downlinked while avoiding resource constraint violations.

Resource
constraint include:

1. Power: The spacecraft must keep its battery charge above zero
Expand Down Expand Up @@ -108,6 +110,7 @@ def __init__(self, failure_penalty=1, image_component=0.1, downlink_component=0.
def step(self, action):
"""
The agent takes a step in the environment.

Parameters
----------
action : int
Expand Down Expand Up @@ -228,6 +231,7 @@ def step(self, action):
def _take_action(self, action):
"""
Interfaces with the simulator to

:param action:
:return:
"""
Expand All @@ -247,7 +251,7 @@ def _take_action(self, action):
def _get_reward(self, downlinked, imaged):
"""
Reward is based on the total number of imaged and downlinked targets, failure i
f it occurs
f it occurs.
"""
reward = 0
if self.failure:
Expand Down Expand Up @@ -275,6 +279,7 @@ def _get_reward(self, downlinked, imaged):
def reset(self, seed=None, options=None):
"""
Reset the state of the environment and returns an initial observation.

Returns
-------
observation (object): the initial observation of the space.
Expand Down Expand Up @@ -316,7 +321,7 @@ def _render(self, mode="human", close=False):
return

def _get_state(self):
"""Return the non-normalized observation to the environment"""
"""Return the non-normalized observation to the environment."""

return self.simulator.obs

Expand Down
Loading
Loading