Skip to content

Commit

Permalink
Merge pull request #343 from incf-nidash/mktmps
Browse files Browse the repository at this point in the history
Write test temp files to temp directories
  • Loading branch information
yarikoptic authored May 3, 2023
2 parents 5cdc778 + 9cfd0b7 commit a4c1ba2
Show file tree
Hide file tree
Showing 12 changed files with 476 additions and 706 deletions.
24 changes: 12 additions & 12 deletions tests/core/test_provone.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ def doc():
return d1


def test_ispartof(doc):
def test_ispartof(doc, tmp_path) -> None:
workflow_1ex1 = doc.processExec(
"dcterms:identifier:wf1_ex1",
"2013-08-21 13:37:54",
Expand All @@ -37,11 +37,11 @@ def test_ispartof(doc):
doc.isPartOf(pe1, workflow_1ex1)

# save a turtle file
with open("test.ttl", "w") as f:
with open(tmp_path / "test.ttl", "w") as f:
f.write(doc.serialize(format="rdf", rdf_format="ttl"))


def test_used(doc):
def test_used(doc, tmp_path) -> None:
pe1 = doc.processExec(
"dcterms:identifier:e1_ex1", "2013-08-21 13:37:53", "2013-08-21 13:37:53"
)
Expand All @@ -56,11 +56,11 @@ def test_used(doc):
doc.used(pe1, dt1)

# save a turtle file
with open("test.ttl", "w") as f:
with open(tmp_path / "test.ttl", "w") as f:
f.write(doc.serialize(format="rdf", rdf_format="ttl"))


def test_wasderivedfrom(doc):
def test_wasderivedfrom(doc, tmp_path) -> None:
dt1 = doc.data(
"dcterms:identifier:defparam1",
{
Expand All @@ -73,27 +73,27 @@ def test_wasderivedfrom(doc):
doc.wasDerivedFrom(dt1, dt2)

# save a turtle file
with open("test.ttl", "w") as f:
with open(tmp_path / "test.ttl", "w") as f:
f.write(doc.serialize(format="rdf", rdf_format="ttl"))


def test_dataonlink(doc):
def test_dataonlink(doc, tmp_path) -> None:
dt2 = doc.data("dcterms:identifier:defparam2", {"rdfs:label": "filename"})
dl1 = doc.dataLink("dcterms:identifier:e1_e2DL")
# save a turtle file
with open("test.ttl", "w") as f:
with open(tmp_path / "test.ttl", "w") as f:
f.write(doc.serialize(format="rdf", rdf_format="ttl"))
doc.dataOnLink(dt2, dl1)


def test_wasgeneratedby(doc):
def test_wasgeneratedby(doc, tmp_path) -> None:
dt2 = doc.data("dcterms:identifier:defparam2", {"rdfs:label": "filename"})
pe1 = doc.processExec(
"dcterms:identifier:e1_ex1", "2013-08-21 13:37:53", "2013-08-21 13:37:53"
)
doc.wasGeneratedBy(dt2, pe1)
# save a turtle file
with open("test.ttl", "w") as f:
with open(tmp_path / "test.ttl", "w") as f:
f.write(doc.serialize(format="rdf", rdf_format="ttl"))


Expand Down Expand Up @@ -135,9 +135,9 @@ def test_dltoinport(doc):
doc.DLToInPort(dl1, i1)


def test_documentserialize(doc):
def test_documentserialize(doc, tmp_path) -> None:
# save a turtle file
with open("test.ttl", "w") as f:
with open(tmp_path / "test.ttl", "w") as f:
f.write(doc.serialize(format="rdf", rdf_format="ttl"))


Expand Down
66 changes: 66 additions & 0 deletions tests/experiment/conftest.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,66 @@
from __future__ import annotations
from dataclasses import dataclass
from typing import Optional
import urllib.request
import pytest
from nidm.experiment.tools.rest import RestParser

# We will test example NIDM files downloaded from
# the GitHub dbkeator/simple2_NIDM_examples repo
#
# DBK: this is a bit unsafe as the TTL files in the github repo above can change and the UUID will change since they are randomly
# generated at this point. It's probably more robust to explicitly create these files for the time being and explicitly set the
# UUID in the test file:
# For example: kwargs={Constants.NIDM_PROJECT_NAME:"FBIRN_PhaseIII",Constants.NIDM_PROJECT_IDENTIFIER:1200,Constants.NIDM_PROJECT_DESCRIPTION:"Test investigation2"}
# project = Project(uuid="_654321",attributes=kwargs)


@pytest.fixture(scope="session")
def brain_vol_files(tmp_path_factory: pytest.TempPathFactory) -> list[str]:
tmp_path = tmp_path_factory.mktemp("brain_vol_files")
urllib.request.urlretrieve(
"https://raw.githubusercontent.com/dbkeator/simple2_NIDM_examples/master/datasets.datalad.org/abide/RawDataBIDS/CMU_a/nidm.ttl",
tmp_path / "cmu_a.nidm.ttl",
)
urllib.request.urlretrieve(
"https://raw.githubusercontent.com/dbkeator/simple2_NIDM_examples/master/datasets.datalad.org/abide/RawDataBIDS/Caltech/nidm.ttl",
tmp_path / "caltech.nidm.ttl",
)
return [
str(tmp_path / "cmu_a.nidm.ttl"),
str(tmp_path / "caltech.nidm.ttl"),
]


@dataclass
class BrainVol:
files: list[str]
restParser: RestParser
cmu_test_project_uuid: str
cmu_test_subject_uuid: str


@pytest.fixture(scope="session")
def brain_vol(brain_vol_files: list[str]) -> BrainVol:
restParser = RestParser(output_format=RestParser.OBJECT_FORMAT)
projects = restParser.run(brain_vol_files, "/projects")
cmu_test_project_uuid: Optional[str] = None
for p in projects:
proj_info = restParser.run(brain_vol_files, f"/projects/{p}")
if (
isinstance(proj_info, dict)
and proj_info.get("dctypes:title") == "ABIDE - CMU_a"
):
cmu_test_project_uuid = p
break
assert cmu_test_project_uuid is not None
subjects = restParser.run(
brain_vol_files, f"/projects/{cmu_test_project_uuid}/subjects"
)
cmu_test_subject_uuid = subjects["uuid"][0]
return BrainVol(
brain_vol_files,
restParser,
cmu_test_project_uuid,
cmu_test_subject_uuid,
)
5 changes: 4 additions & 1 deletion tests/experiment/test_experiment.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
from pathlib import Path
import pytest
from nidm.core import Constants
from nidm.experiment import (
AssessmentAcquisition,
Expand Down Expand Up @@ -102,5 +104,6 @@ def main():


# very simple test, just checking if main does not give any error
def test_main():
def test_main(monkeypatch: pytest.MonkeyPatch, tmp_path: Path) -> None:
monkeypatch.chdir(tmp_path)
main()
33 changes: 10 additions & 23 deletions tests/experiment/test_experiment_basic.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,39 +2,31 @@
import json
from pathlib import Path
import prov
import pytest
import rdflib
from nidm.core import Constants
from nidm.experiment import Project, Session


def test_1(monkeypatch: pytest.MonkeyPatch, tmp_path: Path) -> None:
monkeypatch.chdir(tmp_path)

def test_1(tmp_path: Path) -> None:
project = Project()

# save a turtle file
with open("test.ttl", "w") as f:
with open(tmp_path / "test.ttl", "w") as f:
f.write(project.serializeTurtle())


def test_2(monkeypatch: pytest.MonkeyPatch, tmp_path: Path) -> None:
monkeypatch.chdir(tmp_path)

def test_2(tmp_path: Path) -> None:
kwargs = {
Constants.NIDM_PROJECT_NAME: "FBIRN_PhaseII",
Constants.NIDM_PROJECT_IDENTIFIER: 9610,
Constants.NIDM_PROJECT_DESCRIPTION: "Test investigation",
}
project = Project(attributes=kwargs)

with open("test.ttl", "w") as f:
with open(tmp_path / "test.ttl", "w") as f:
f.write(project.serializeTurtle())


def test_sessions_1(monkeypatch: pytest.MonkeyPatch, tmp_path: Path) -> None:
monkeypatch.chdir(tmp_path)

def test_sessions_1() -> None:
project = Project()
assert project.sessions == []

Expand All @@ -48,19 +40,15 @@ def test_sessions_1(monkeypatch: pytest.MonkeyPatch, tmp_path: Path) -> None:
assert session2.label == project.sessions[1].label


def test_sessions_2(monkeypatch: pytest.MonkeyPatch, tmp_path: Path) -> None:
monkeypatch.chdir(tmp_path)

def test_sessions_2() -> None:
project = Project()
assert project.sessions == []

session1 = Session(project)
assert project.sessions[0].label == session1.label


def test_sessions_3(monkeypatch: pytest.MonkeyPatch, tmp_path: Path) -> None:
monkeypatch.chdir(tmp_path)

def test_sessions_3() -> None:
project1 = Project()
project2 = Project()

Expand Down Expand Up @@ -188,7 +176,7 @@ def test_session_noparameters():
assert len(proj.graph.get_records()) == 2


def test_jsonld_exports():
def test_jsonld_exports(tmp_path: Path) -> None:
kwargs = {
Constants.NIDM_PROJECT_NAME: "FBIRN_PhaseII",
Constants.NIDM_PROJECT_IDENTIFIER: 9610,
Expand All @@ -197,16 +185,15 @@ def test_jsonld_exports():
project = Project(uuid="_123456", attributes=kwargs)

# save a turtle file
with open("test.json", "w") as f:
with open(tmp_path / "test.json", "w") as f:
f.write(project.serializeJSONLD())

# load in JSON file
with open("test.json") as json_file:
with open(tmp_path / "test.json") as json_file:
data = json.load(json_file)

assert data["Identifier"]["@value"] == "9610"
# WIP Read back in json-ld file and check that we have the project info
# remove("test.json")


def test_project_trig_serialization():
Expand Down
52 changes: 29 additions & 23 deletions tests/experiment/test_map_vars_to_terms.py
Original file line number Diff line number Diff line change
@@ -1,15 +1,20 @@
from dataclasses import dataclass
import json
from os.path import join
import tempfile
from pathlib import Path
import pandas as pd
import pytest
from nidm.experiment.Utils import map_variables_to_terms


@pytest.fixture(scope="module", autouse="True")
def setup():
global DATA, REPROSCHEMA_JSON_MAP, BIDS_SIDECAR
@dataclass
class Setup:
data: pd.DataFrame
reproschema_json_map: dict
bids_sidecar: dict


@pytest.fixture(scope="module")
def setup() -> Setup:
temp = {
"participant_id": [
"100",
Expand All @@ -27,9 +32,9 @@ def setup():
"sex": ["m", "m", "f", "m", "f", "f", "f", "f", "m", "m"],
}

DATA = pd.DataFrame(temp)
data = pd.DataFrame(temp)

REPROSCHEMA_JSON_MAP = json.loads(
reproschema_json_map = json.loads(
"""
{
"DD(source='participants.tsv', variable='participant_id')": {
Expand Down Expand Up @@ -89,7 +94,7 @@ def setup():
}"""
)

BIDS_SIDECAR = json.loads(
bids_sidecar = json.loads(
"""
{
"age": {
Expand Down Expand Up @@ -128,23 +133,26 @@ def setup():
]
}
}
"""
)

return Setup(
data=data,
reproschema_json_map=reproschema_json_map,
bids_sidecar=bids_sidecar,
)


def test_map_vars_to_terms_BIDS():
def test_map_vars_to_terms_BIDS(setup: Setup, tmp_path: Path) -> None:
"""
This function will test the Utils.py "map_vars_to_terms" function with a BIDS-formatted
JSON sidecar file
"""

global DATA, BIDS_SIDECAR

column_to_terms, cde = map_variables_to_terms(
df=DATA,
json_source=BIDS_SIDECAR,
directory=tempfile.gettempdir(),
df=setup.data,
json_source=setup.bids_sidecar,
directory=str(tmp_path),
assessment_name="test",
bids=True,
)
Expand Down Expand Up @@ -197,7 +205,7 @@ def test_map_vars_to_terms_BIDS():
)

# now check the JSON sidecar file created by map_variables_to_terms which should match BIDS format
with open(join(tempfile.gettempdir(), "nidm_annotations.json")) as fp:
with open(tmp_path / "nidm_annotations.json") as fp:
bids_sidecar = json.load(fp)

assert "age" in bids_sidecar.keys()
Expand Down Expand Up @@ -237,18 +245,16 @@ def test_map_vars_to_terms_BIDS():
assert len(results) == 20


def test_map_vars_to_terms_reproschema():
def test_map_vars_to_terms_reproschema(setup: Setup, tmp_path: Path) -> None:
"""
This function will test the Utils.py "map_vars_to_terms" function with a reproschema-formatted
JSON sidecar file
"""

global DATA, REPROSCHEMA_JSON_MAP

column_to_terms, cde = map_variables_to_terms(
df=DATA,
json_source=REPROSCHEMA_JSON_MAP,
directory=tempfile.gettempdir(),
df=setup.data,
json_source=setup.reproschema_json_map,
directory=str(tmp_path),
assessment_name="test",
)

Expand Down Expand Up @@ -300,7 +306,7 @@ def test_map_vars_to_terms_reproschema():
)

# now check the JSON mapping file created by map_variables_to_terms which should match Reproschema format
with open(join(tempfile.gettempdir(), "nidm_annotations.json")) as fp:
with open(tmp_path / "nidm_annotations_annotations.json") as fp:
json.load(fp)

assert "DD(source='test', variable='age')" in column_to_terms.keys()
Expand Down
Loading

0 comments on commit a4c1ba2

Please sign in to comment.