Skip to content

Commit

Permalink
black version 23.1 format updates (facebookresearch#1110)
Browse files Browse the repository at this point in the history
  • Loading branch information
aclegg3 authored Feb 1, 2023
1 parent f91b2b3 commit 862cc43
Show file tree
Hide file tree
Showing 39 changed files with 9 additions and 53 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,6 @@ def construct_envs(
for i in range(num_environments):
proc_config = config.copy()
with read_write(proc_config):

task_config = proc_config.habitat
task_config.seed = task_config.seed + i
if len(scenes) > 0:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -785,7 +785,6 @@ def calculate_zfactor(
def forward(
self, batch: torch.Tensor, is_depth: bool = False
) -> torch.Tensor:

# Depth conversion for input tensors
if is_depth and self.input_zfactor is not None:
input_b = batch.size()[0] // self.input_len
Expand Down Expand Up @@ -931,7 +930,6 @@ def transform_observation_space(
def forward(
self, observations: Dict[str, torch.Tensor]
) -> Dict[str, torch.Tensor]:

for i, target_sensor_uuid in enumerate(self.target_uuids):
# number of input and input sensor uuids
in_len = self.converter.input_len
Expand Down Expand Up @@ -1185,7 +1183,6 @@ def __init__(

@classmethod
def from_config(cls, config):

if hasattr(config, "target_uuids"):
# Optional Config Value to specify target uuid
target_uuids = config.target_uuids
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,6 @@ def __init__(
numsteps + 1, num_envs, *action_shape
)
if discrete_actions:

assert isinstance(self.buffers["actions"], torch.Tensor)
assert isinstance(self.buffers["prev_actions"], torch.Tensor)
self.buffers["actions"] = self.buffers["actions"].long()
Expand Down
1 change: 0 additions & 1 deletion habitat-baselines/habitat_baselines/il/data/data.py
Original file line number Diff line number Diff line change
Expand Up @@ -111,7 +111,6 @@ def __init__(
self.scene_episode_dict[scene],
desc="Saving episode frames for each scene",
):

if self.only_vqa_task:
pos_queue = episode.shortest_paths[0][
-self.num_frames : # noqa: E203
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -98,7 +98,6 @@ def save_frames(self, pos_queue: List[ShortestPathPoint]) -> None:
"""

for pos in pos_queue:

observation = self.env.sim.get_observations_at(
pos.position, pos.rotation
)
Expand Down
1 change: 0 additions & 1 deletion habitat-baselines/habitat_baselines/il/data/nav_data.py
Original file line number Diff line number Diff line change
Expand Up @@ -280,7 +280,6 @@ def preprocess_actions(self) -> None:
0 - NULL; 1 - START; 2 - FWD; 3 - LEFT; 4 - RIGHT; 5 - STOP;
"""
for ep in self.episodes:

ep.actions = [x.action + 2 for x in ep.shortest_paths[0]]
ep.action_length = len(ep.actions)
(
Expand Down
2 changes: 0 additions & 2 deletions habitat-baselines/habitat_baselines/il/metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,6 @@ def update(self, values: List) -> None:
self.stats.append(copy.deepcopy(current_stats))

def get_stat_string(self, mode: int = 1) -> str:

stat_string = ""

for k, v in self.info.items():
Expand All @@ -88,7 +87,6 @@ def get_stats(self, mode: int = 1) -> List[float]:
return stats

def dump_log(self) -> bool:

if self.log_json is None:
return False

Expand Down
5 changes: 0 additions & 5 deletions habitat-baselines/habitat_baselines/il/models/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -325,7 +325,6 @@ def __init__(
def forward(
self, images: Tensor, questions: Tensor
) -> Tuple[Tensor, Tensor]:

N, T, _, _, _ = images.size()
# bs x 5 x 3 x 256 x 256
img_feats = self.cnn(
Expand Down Expand Up @@ -446,7 +445,6 @@ def forward(
controller_actions_in: Tensor,
controller_action_lengths: Tensor,
) -> Tuple[Tensor, Tensor, Tensor]:

N_p, T_p, _ = planner_img_feats.size()

planner_img_feats = self.cnn_fc_layer(planner_img_feats)
Expand Down Expand Up @@ -528,7 +526,6 @@ def planner_step(
def controller_step(
self, img_feats: Tensor, actions_in: Tensor, hidden_in: Tensor
) -> Tensor:

img_feats = self.cnn_fc_layer(img_feats)
actions_embed = self.planner_nav_rnn.action_embed(actions_in)

Expand Down Expand Up @@ -647,7 +644,6 @@ def forward(
action_lengths: Tensor,
hidden: bool = False,
) -> Union[Tuple[Tensor, Tensor], Tuple[Tensor, Tensor, Tensor]]:

T: Union[int, bool] = False
if self.image_input is True:
N, T, _ = img_feats.size()
Expand Down Expand Up @@ -695,7 +691,6 @@ def step_forward(
actions_in: Tensor,
hidden: HiddenState,
) -> Tuple[Tensor, Tensor]:

T: Union[bool, int] = False
if self.image_input is True:
N, T, _ = img_feats.size()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -268,7 +268,6 @@ def _eval_checkpoint(
% config.habitat_baselines.il.eval_save_results_interval
== 0
):

result_id = "ckpt_{}_{}".format(
checkpoint_index, idx[0].item()
)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,6 @@ def _save_nav_results(
writer: TensorboardWriter,
video_option: list,
) -> None:

r"""For saving NAV-PACMAN eval results.
Args:
ckpt_path: path of checkpoint being evaluated
Expand Down Expand Up @@ -444,7 +443,6 @@ def _eval_checkpoint(
)

for step in range(planner_actions_in.size(0)):

(
planner_scores,
planner_hidden,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,6 @@ def _save_vqa_results(
q_vocab_dict: VocabDict,
ans_vocab_dict: VocabDict,
) -> None:

r"""For saving VQA results.
Args:
ckpt_idx: idx of checkpoint being evaluated
Expand Down Expand Up @@ -395,7 +394,6 @@ def _eval_checkpoint(
% config.habitat_baselines.il.eval_save_results_interval
== 0
):

self._save_vqa_results(
checkpoint_index,
episode_ids,
Expand Down
1 change: 1 addition & 0 deletions habitat-baselines/habitat_baselines/rl/ddppo/algo/ddppo.py
Original file line number Diff line number Diff line change
Expand Up @@ -98,6 +98,7 @@ def init_distributed(self, find_unused_params: bool = True) -> None:
forward pass, otherwise the gradient reduction
will not work correctly.
"""

# NB: Used to hide the hooks from the nn.Module,
# so they don't show up in the state_dict
class Guard: # noqa: SIM119
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -172,7 +172,6 @@ def act(
masks,
deterministic=False,
):

self._high_level_policy.apply_mask(masks) # type: ignore[attr-defined]

should_terminate: torch.BoolTensor = torch.zeros(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,6 @@ def on_enter(
def _is_skill_done(
self, observations, rnn_hidden_states, prev_actions, masks, batch_idx
) -> torch.BoolTensor:

cur_resting_pos = observations[RelativeRestingPositionSensor.cls_uuid]

did_leave_start_zone = (
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@ class BoxActionEmbedding(nn.Module):
x = (a - low) / (high - low). This assumes that the bounds
in the action space are tight
"""

# NeRF style sinusoidal embedding for continuous actions
def __init__(self, action_space: gym.spaces.Box, dim_per_action: int = 32):
super().__init__()
Expand Down Expand Up @@ -113,7 +114,6 @@ def __init__(self, action_space: ActionSpace, dim_per_action: int = 32):
)
self.embedding_slices.append(slice(0, 1))
else:

ptr = 0
for space in iterate_action_space_recursively(action_space):
if isinstance(space, gym.spaces.Box):
Expand Down
2 changes: 0 additions & 2 deletions habitat-baselines/habitat_baselines/rl/ppo/ppo.py
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,6 @@ def __init__(
entropy_target_factor: float = 0.0,
use_adaptive_entropy_pen: bool = False,
) -> None:

super().__init__()

self.actor_critic = actor_critic
Expand Down Expand Up @@ -159,7 +158,6 @@ def update(
self,
rollouts: RolloutStorage,
) -> Dict[str, float]:

advantages = self.get_advantages(rollouts)

learner_metrics = collections.defaultdict(list)
Expand Down
1 change: 0 additions & 1 deletion habitat-baselines/habitat_baselines/rl/ver/queue.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,6 @@ def get_many(
return msgs

def put_many(self, xs, block=True, timeout=10.0):

t_start = time.perf_counter()
n_put = 0
for x in xs:
Expand Down
1 change: 0 additions & 1 deletion habitat-baselines/habitat_baselines/rl/ver/ver_trainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -383,7 +383,6 @@ def _update_agent(self):
ppo_cfg = self.config.habitat_baselines.rl.ppo

with self.timer.avg_time("learn"):

t_compute_returns = time.perf_counter()

with self.timer.avg_time("compute returns"), inference_mode():
Expand Down
1 change: 0 additions & 1 deletion habitat-lab/habitat/core/dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -194,7 +194,6 @@ def get_episode_iterator(self, *args: Any, **kwargs: Any) -> Iterator[T]:
return EpisodeIterator(self.episodes, *args, **kwargs)

def to_json(self) -> str:

result = DatasetJSONEncoder().encode(self)
return result

Expand Down
1 change: 0 additions & 1 deletion habitat-lab/habitat/core/embodied_task.py
Original file line number Diff line number Diff line change
Expand Up @@ -261,7 +261,6 @@ def __init__(
self._is_episode_active = False

def _init_entities(self, entities_configs, register_func) -> OrderedDict:

entities = OrderedDict()
for entity_name, entity_cfg in entities_configs.items():
entity_cfg = OmegaConf.create(entity_cfg)
Expand Down
1 change: 0 additions & 1 deletion habitat-lab/habitat/core/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -143,7 +143,6 @@ class DatasetFloatJSONEncoder(DatasetJSONEncoder):
# Overriding method to inject own `_repr` function for floats with needed
# precision.
def iterencode(self, o, _one_shot=False):

markers: Optional[Dict] = {} if self.check_circular else None
if self.ensure_ascii:
_encoder = encode_basestring_ascii
Expand Down
1 change: 0 additions & 1 deletion habitat-lab/habitat/datasets/vln/r2r_vln_dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,6 @@ def __init__(self, config: Optional["DictConfig"] = None) -> None:
def from_json(
self, json_str: str, scenes_dir: Optional[str] = None
) -> None:

deserialized = json.loads(json_str)
self.instruction_vocab = VocabDict(
word_list=deserialized["instruction_vocab"]["word_list"]
Expand Down
3 changes: 2 additions & 1 deletion habitat-lab/habitat/robots/manipulator.py
Original file line number Diff line number Diff line change
Expand Up @@ -181,7 +181,8 @@ def update(self) -> None:

def reset(self) -> None:
"""Reset the joints on the existing robot.
NOTE: only arm and gripper joint motors (not gains) are reset by default, derived class should handle any other changes."""
NOTE: only arm and gripper joint motors (not gains) are reset by default, derived class should handle any other changes.
"""
self.sim_obj.clear_joint_states()
self.arm_joint_pos = self.params.arm_init_params
self._fix_joint_values = None
Expand Down
3 changes: 2 additions & 1 deletion habitat-lab/habitat/robots/mobile_manipulator.py
Original file line number Diff line number Diff line change
Expand Up @@ -152,6 +152,7 @@ def update(self) -> None:

def reset(self) -> None:
"""Reset the joints on the existing robot.
NOTE: only arm and gripper joint motors (not gains) are reset by default, derived class should handle any other changes."""
NOTE: only arm and gripper joint motors (not gains) are reset by default, derived class should handle any other changes.
"""
Manipulator.reset(self)
RobotBase.reset(self)
1 change: 0 additions & 1 deletion habitat-lab/habitat/robots/robot_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -226,7 +226,6 @@ def is_base_link(self, link_id: int) -> bool:
)

def update_base(self, rigid_state, target_rigid_state):

end_pos = self._sim.step_filter(
rigid_state.translation, target_rigid_state.translation
)
Expand Down
3 changes: 2 additions & 1 deletion habitat-lab/habitat/robots/static_manipulator.py
Original file line number Diff line number Diff line change
Expand Up @@ -96,5 +96,6 @@ def update(self) -> None:

def reset(self) -> None:
"""Reset the joints on the existing robot.
NOTE: only arm and gripper joint motors (not gains) are reset by default, derived class should handle any other changes."""
NOTE: only arm and gripper joint motors (not gains) are reset by default, derived class should handle any other changes.
"""
Manipulator.reset(self)
2 changes: 0 additions & 2 deletions habitat-lab/habitat/tasks/nav/instance_image_nav_task.py
Original file line number Diff line number Diff line change
Expand Up @@ -200,7 +200,6 @@ def get_observation(
episode: InstanceImageGoalNavEpisode,
**kwargs: Any,
) -> Optional[VisualObservation]:

if len(episode.goals) == 0:
logger.error(
f"No goal specified for episode {episode.episode_id}."
Expand Down Expand Up @@ -243,7 +242,6 @@ def _get_sensor_type(self, *args: Any, **kwargs: Any):
def get_observation(
self, *args: Any, episode: InstanceImageGoalNavEpisode, **kwargs: Any
) -> np.ndarray:

if len(episode.goals) == 0:
logger.error(
f"No goal specified for episode {episode.episode_id}."
Expand Down
1 change: 0 additions & 1 deletion habitat-lab/habitat/tasks/nav/nav.py
Original file line number Diff line number Diff line change
Expand Up @@ -752,7 +752,6 @@ def _draw_goals_view_points(self, episode):

def _draw_goals_positions(self, episode):
if self._config.draw_goal_positions:

for goal in episode.goals:
if self._is_on_same_floor(goal.position[1]):
try:
Expand Down
1 change: 0 additions & 1 deletion habitat-lab/habitat/tasks/nav/object_nav_task.py
Original file line number Diff line number Diff line change
Expand Up @@ -152,7 +152,6 @@ def get_observation(
episode: ObjectGoalNavEpisode,
**kwargs: Any,
) -> Optional[np.ndarray]:

if len(episode.goals) == 0:
logger.error(
f"No goal specified for episode {episode.episode_id}."
Expand Down
1 change: 0 additions & 1 deletion habitat-lab/habitat/tasks/nav/shortest_path_follower.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,6 @@ def __init__(
return_one_hot: bool = True,
stop_on_error: bool = True,
):

self._return_one_hot = return_one_hot
self._sim = sim
self._goal_radius = goal_radius
Expand Down
1 change: 0 additions & 1 deletion habitat-lab/habitat/tasks/rearrange/actions/actions.py
Original file line number Diff line number Diff line change
Expand Up @@ -329,7 +329,6 @@ def _set_robot_state(self, set_dat):
self.cur_robot.sim_obj.joint_forces = set_dat["pos"]

def update_base(self):

ctrl_freq = self._sim.ctrl_freq

before_trans_state = self._capture_robot_state()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -193,7 +193,6 @@ def init_task(
should_reset: bool = True,
add_task_kwargs: Optional[Dict[str, Any]] = None,
) -> RearrangeTask:

rearrange_logger.debug(
f"Loading task {self._task_info.task} with definition {self._task_info.task_def}"
)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,6 @@ def _generate_nav_start_goal(self, episode, force_idx=None) -> NavToInfo:
# Select an object at random and navigate to that object.
all_pos = self._sim.get_target_objs_start()
if force_idx is None:

nav_to_pos = all_pos[np.random.randint(0, len(all_pos))]
else:
nav_to_pos = all_pos[force_idx]
Expand Down
1 change: 0 additions & 1 deletion test/test_baselines_hydra.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,6 @@ def my_app_compose_api() -> "DictConfig":
# This is needed for apps that cannot have
# a standard @hydra.main() entry point
with initialize(version_base=None):

cfg = compose(
overrides=[
"+habitat_baselines=habitat_baselines_rl_config_base",
Expand Down
1 change: 0 additions & 1 deletion test/test_habitat_sim.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,6 @@ def test_sim_trajectory():
with open("test/data/habitat-sim_trajectory_data.json", "r") as f:
test_trajectory = json.load(f)
with init_sim() as sim:

sim.reset()
sim.set_agent_state(
position=test_trajectory["positions"][0],
Expand Down
1 change: 0 additions & 1 deletion test/test_mp3d_eqa.py
Original file line number Diff line number Diff line change
Expand Up @@ -103,7 +103,6 @@ def test_mp3d_eqa_dataset():

@pytest.mark.parametrize("split", ["train", "val"])
def test_dataset_splitting(split):

dataset_config = get_config(CFG_TEST).habitat.dataset
with habitat.config.read_write(dataset_config):
dataset_config.split = split
Expand Down
1 change: 0 additions & 1 deletion test/test_pyrobot.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,6 @@ def test_pyrobot(mocker):
_try_register_pyrobot()

with make_sim("PyRobot-v0", config=PyrobotConfig()) as reality:

_ = reality.reset()
_ = reality.step(
"go_to_relative",
Expand Down
Loading

0 comments on commit 862cc43

Please sign in to comment.