Skip to content

Commit

Permalink
Use ruff also as formatter
Browse files Browse the repository at this point in the history
  • Loading branch information
larsevj committed Apr 2, 2024
1 parent f4433dc commit c801375
Show file tree
Hide file tree
Showing 21 changed files with 37 additions and 61 deletions.
10 changes: 3 additions & 7 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -9,18 +9,14 @@ repos:
exclude: test-data/eclipse/parse/ERROR.PRT # exact format is needed for testing

- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.1.14
rev: v0.3.5
hooks:
- id: ruff
args: [ --fix ]

- repo: https://github.com/psf/black
rev: 24.1.0
hooks:
- id: black
- id: ruff-format

- repo: https://github.com/pre-commit/mirrors-clang-format
rev: v17.0.6
rev: v18.1.1
hooks:
- id: clang-format
args: [ --style=file, --Werror]
Expand Down
3 changes: 0 additions & 3 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -160,9 +160,6 @@ log_cli = "false"
asyncio_mode = "auto"
timeout = 360

[tool.black]
include = '(\.pyi?|\.ipynb|\.py\.j2)$'

[tool.setuptools_scm]
write_to = "src/ert/shared/version.py"

Expand Down
10 changes: 5 additions & 5 deletions script/build
Original file line number Diff line number Diff line change
@@ -1,12 +1,12 @@
#!/usr/bin/env python3
import argparse
import os
import re
import shutil
import subprocess
import sys
import re
from typing import Optional, Union
from pathlib import Path
from typing import List, Optional, Union

SOURCE_ROOT = Path(__file__).parent.parent
VERBOSE = False
Expand Down Expand Up @@ -140,7 +140,7 @@ def install_build_requires() -> None:
"""
extra_req: List[str] = []
try:
import importlib.metadata
import importlib.metadata # noqa F401
except ImportError:
# Add 'importlib_metadata' to dependencies so that we can later detect
# the runtime dependencies of this project.
Expand Down Expand Up @@ -233,7 +233,7 @@ def main() -> None:
sys.exit("This script must be run from a virtualenv (override with -f).")

if args.verbose:
global VERBOSE
global VERBOSE # noqa PLW0603
VERBOSE = True

setup_args: List[str] = []
Expand Down Expand Up @@ -273,7 +273,7 @@ def main() -> None:
*cmake_args,
verbose=True,
)
except subprocess.CalledProcessError as exc:
except subprocess.CalledProcessError:
print_error("Build failed. If the output makes no sense, try these steps:")
print_error("1. Delete '_skbuild' directory")
print_error(f"2. Delete conan directory: {Path.home() / '.conan'}")
Expand Down
2 changes: 1 addition & 1 deletion src/_ert_job_runner/__init__.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
""" _ert_job_runner is called by ert to run jobs in the runpath.
"""_ert_job_runner is called by ert to run jobs in the runpath.
Its is split into its own toplevel package for performance reasons,
simply importing ert can take several seconds, which is not ideal when
Expand Down
4 changes: 3 additions & 1 deletion src/ert/analysis/_es_update.py
Original file line number Diff line number Diff line change
Expand Up @@ -177,7 +177,9 @@ def _get_obs_and_measure_data(
response = ensemble.load_responses(group, tuple(iens_active_index))
if "time" in observation.coords:
response = response.reindex(
time=observation.time, method="nearest", tolerance="1s" # type: ignore
time=observation.time,
method="nearest",
tolerance="1s", # type: ignore
)
try:
filtered_response = observation.merge(response, join="left")
Expand Down
1 change: 0 additions & 1 deletion src/ert/cli/model_factory.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,6 @@
from ert.validation import ActiveRange

if TYPE_CHECKING:

import numpy.typing as npt

from ert.namespace import Namespace
Expand Down
2 changes: 1 addition & 1 deletion src/ert/config/parsing/lark_parser.py
Original file line number Diff line number Diff line change
Expand Up @@ -244,7 +244,7 @@ def substitute_arglist_tuple(
return tuple([key, substituted_value])

def substitute_arg(
arg: Union[FileContextToken, List[Tuple[FileContextToken]]]
arg: Union[FileContextToken, List[Tuple[FileContextToken]]],
) -> Union[FileContextToken, List[Tuple[FileContextToken]]]:
if isinstance(arg, FileContextToken):
return _substitute_token(defines, arg, constraints.expand_envvar)
Expand Down
6 changes: 4 additions & 2 deletions src/ert/config/parsing/observations_parser.py
Original file line number Diff line number Diff line change
Expand Up @@ -115,7 +115,9 @@ def parse(filename: str) -> ConfContent:
)


def _parse_content(content: str, filename: str) -> List[
def _parse_content(
content: str, filename: str
) -> List[
Union[
SimpleHistoryDeclaration,
Tuple[ObservationType, FileContextToken, Dict[FileContextToken, Any]],
Expand Down Expand Up @@ -278,7 +280,7 @@ def _validate_conf_content(


def _validate_unique_names(
conf_content: Sequence[Tuple[FileContextToken, Any]]
conf_content: Sequence[Tuple[FileContextToken, Any]],
) -> None:
names_counter = Counter(n for n, _ in conf_content)
duplicate_names = [n for n, c in names_counter.items() if c > 1]
Expand Down
2 changes: 1 addition & 1 deletion src/ert/dark_storage/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -233,7 +233,7 @@ def get_observation_keys_for_response(


def _prepare_x_axis(
x_axis: Sequence[Union[int, float, str, pd.Timestamp]]
x_axis: Sequence[Union[int, float, str, pd.Timestamp]],
) -> List[str]:
"""Converts the elements of x_axis of an observation to a string suitable
for json. If the elements are timestamps, convert to ISO-8601 format.
Expand Down
2 changes: 1 addition & 1 deletion src/ert/ensemble_evaluator/snapshot.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ class UnsupportedOperationException(ValueError):


def convert_iso8601_to_datetime(
timestamp: Union[datetime.datetime, str]
timestamp: Union[datetime.datetime, str],
) -> datetime.datetime:
if isinstance(timestamp, datetime.datetime):
return timestamp
Expand Down
2 changes: 1 addition & 1 deletion src/ert/gui/model/snapshot.py
Original file line number Diff line number Diff line change
Expand Up @@ -98,7 +98,7 @@ def __init__(self, parent=None) -> None:

@staticmethod
def prerender(
snapshot: Union[Snapshot, PartialSnapshot]
snapshot: Union[Snapshot, PartialSnapshot],
) -> Optional[Union[Snapshot, PartialSnapshot]]:
"""Pre-render some data that is required by this model. Ideally, this
is called outside the GUI thread. This is a requirement of the model,
Expand Down
1 change: 0 additions & 1 deletion src/ert/gui/simulation/view/update.py
Original file line number Diff line number Diff line change
Expand Up @@ -191,7 +191,6 @@ def begin(self, _: RunModelUpdateBeginEvent) -> None:

@Slot(RunModelUpdateEndEvent)
def end(self, event: RunModelUpdateEndEvent) -> None:

self._progress_msg.setText(
f"Update completed ({humanize.precisedelta(time.perf_counter() - self._start_time)})"
)
Expand Down
1 change: 0 additions & 1 deletion src/ert/job_queue/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,6 @@
# the use of an LSF_HOME variable is something invented with ERT,
# and not standard LSF approach.


import os
import os.path
import warnings
Expand Down
11 changes: 4 additions & 7 deletions tests/unit_tests/config/parsing/test_observations_parser.py
Original file line number Diff line number Diff line change
Expand Up @@ -124,16 +124,13 @@ def test_that_unexpected_character_gives_observation_config_error():


def test_that_double_comments_are_handled():
assert (
_parse_content(
"""
assert _parse_content(
"""
SUMMARY_OBSERVATION -- foo -- bar -- baz
FOPR;
""",
"",
)
== [(ObservationType.SUMMARY, "FOPR")]
)
"",
) == [(ObservationType.SUMMARY, "FOPR")]


@pytest.mark.usefixtures("use_tmpdir")
Expand Down
2 changes: 0 additions & 2 deletions tests/unit_tests/config/test_ert_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -617,7 +617,6 @@ def test_queue_config_max_running_invalid_values(max_running_value, expected_err
def test_num_cpu_vs_torque_queue_cpu_configuration(
num_cpu_int, num_nodes_int, num_cpus_per_node_int
):

# Only strictly positive ints are valid configuration values,
# zero values are used to represent the "not configured" scenario:
num_cpu = str(num_cpu_int) if num_cpu_int > 0 else ""
Expand Down Expand Up @@ -1525,7 +1524,6 @@ def test_that_context_types_are_json_serializable():

@pytest.mark.usefixtures("copy_snake_oil_case")
def test_no_timemap_or_refcase_provides_clear_error():

with fileinput.input("snake_oil.ert", inplace=True) as fin:
for line in fin:
if line.startswith("REFCASE"):
Expand Down
18 changes: 4 additions & 14 deletions tests/unit_tests/config/test_forward_model_data_to_json.py
Original file line number Diff line number Diff line change
Expand Up @@ -612,9 +612,7 @@ def test_forward_model_job(job, forward_model, expected_args):
"",
0,
0,
)["jobList"][
0
]["argList"]
)["jobList"][0]["argList"]
== expected_args
)

Expand Down Expand Up @@ -644,11 +642,7 @@ def test_that_config_path_is_the_directory_of_the_main_ert_config():
"",
0,
0,
)[
"jobList"
][0][
"argList"
] == [os.getcwd()]
)["jobList"][0]["argList"] == [os.getcwd()]


@pytest.mark.usefixtures("use_tmpdir")
Expand Down Expand Up @@ -718,9 +712,7 @@ def test_simulation_job(job, forward_model, expected_args):
"",
0,
0,
)[
"jobList"
][0]
)["jobList"][0]
assert job_data["argList"] == expected_args


Expand Down Expand Up @@ -828,8 +820,6 @@ def test_that_environment_variables_are_set_in_forward_model(
"",
0,
0,
)["jobList"][
0
]["argList"]
)["jobList"][0]["argList"]
== expected_args
)
9 changes: 5 additions & 4 deletions tests/unit_tests/config/test_queue_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -132,10 +132,11 @@ def test_that_overwriting_QUEUE_OPTIONS_warns(
ErtConfig.from_file(filename)
assert (
f"Overwriting QUEUE_OPTION {queue_system} {queue_system_option}: \n Old value:"
" test_0 \n New value: test_1" in caplog.text
and f"Overwriting QUEUE_OPTION {queue_system} MAX_RUNNING: \n Old value:"
" 10 \n New value: 10" not in caplog.text
)
" test_0 \n New value: test_1"
) in caplog.text and (
f"Overwriting QUEUE_OPTION {queue_system} MAX_RUNNING: \n Old value:"
" 10 \n New value: 10"
) not in caplog.text


@pytest.mark.usefixtures("use_tmpdir", "set_site_config")
Expand Down
3 changes: 2 additions & 1 deletion tests/unit_tests/gui/simulation/test_run_dialog.py
Original file line number Diff line number Diff line change
Expand Up @@ -239,7 +239,8 @@ def test_large_snapshot(
index="0",
status=state.FORWARD_MODEL_STATE_FINISHED,
name="job_0",
).build(["1"], status=state.REALIZATION_STATE_RUNNING)
)
.build(["1"], status=state.REALIZATION_STATE_RUNNING)
),
phase_name="Foo",
current_phase=0,
Expand Down
4 changes: 1 addition & 3 deletions tests/unit_tests/job_queue/test_ert_qstat_proxy.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,9 +37,7 @@

MOCKED_QSTAT_BACKEND = (
# NB: This mock does not support the job id as an argument.
'import time; time.sleep(0.5); print("""'
+ EXAMPLE_QSTAT_CONTENT
+ '""")'
'import time; time.sleep(0.5); print("""' + EXAMPLE_QSTAT_CONTENT + '""")'
)
MOCKED_QSTAT_BACKEND_FAILS = "import sys; sys.exit(1)"
MOCKED_QSTAT_BACKEND_LOGGING = (
Expand Down
4 changes: 1 addition & 3 deletions tests/unit_tests/job_runner/test_job_dispatch.py
Original file line number Diff line number Diff line change
Expand Up @@ -270,9 +270,7 @@ def test_job_dispatch_kills_itself_after_unsuccessful_job(unused_tcp_port):
"_ert_job_runner.cli.os.getpgid"
) as mock_getpgid, patch(
"_ert_job_runner.cli.open", new=mock_open(read_data=jobs_json)
), patch(
"_ert_job_runner.cli.JobRunner"
) as mock_runner:
), patch("_ert_job_runner.cli.JobRunner") as mock_runner:
mock_runner.return_value.run.return_value = [
Init([], 0, 0),
Finish().with_error("overall bad run"),
Expand Down
1 change: 0 additions & 1 deletion tests/unit_tests/scheduler/test_lsf_driver.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,6 @@ def capturing_bsub(monkeypatch, tmp_path):

@given(st.lists(st.sampled_from(JobState.__args__)))
async def test_events_produced_from_jobstate_updates(jobstate_sequence: List[str]):

started = any(
state in jobstate_sequence
for state in RunningJob.model_json_schema()["properties"]["job_state"]["enum"]
Expand Down

0 comments on commit c801375

Please sign in to comment.