Skip to content

Commit

Permalink
Add tests for all non-census sources (#1899)
Browse files Browse the repository at this point in the history
* Refactor CDC life-expectancy (1554)

* Update to new tract list (#1554)

* Adjust for tests (#1848)

* Add tests for cdc_places (#1848)

* Add EJScreen tests (#1848)

* Add tests for HUD housing (#1848)

* Add tests for GeoCorr (#1848)

* Add persistent poverty tests (#1848)

* Update for sources without zips, for new validation (#1848)

* Update tests for new multi-CSV but (#1848)

Lucas updated the CDC life expectancy data to handle a bug where two
states are missing from the US Overall download. Since virtually none of
our other ETL classes download multiple CSVs directly like this, it
required a pretty invasive new mocking strategy.

* Add basic tests for nature deprived (#1848)

* Add wildfire tests (#1848)

* Add flood risk tests (#1848)

* Add DOT travel tests (#1848)

* Add historic redlining tests (#1848)

* Add tests for ME and WI (#1848)

* Update now that validation exists (#1848)

* Adjust for validation (#1848)

* Add health insurance back to cdc places (#1848)

Ooops

* Update tests with new field (#1848)

* Test for blank tract removal (#1848)

* Add tracts for clipping behavior

* Test clipping and zfill behavior (#1848)

* Fix bad test assumption (#1848)

* Simplify class, add test for tract padding (#1848)

* Fix percentage inversion, update tests (#1848)

Looking through the transformations, I noticed that we were subtracting
a percentage that is usually between 0-100 from 1 instead of 100, and so
were endind up with some surprising results. Confirmed with lucasmbrown-usds

* Add note about first street data (#1848)
  • Loading branch information
mattbowen-usds authored Sep 19, 2022
1 parent 4d02525 commit 876655d
Show file tree
Hide file tree
Showing 88 changed files with 2,032 additions and 178 deletions.
81 changes: 42 additions & 39 deletions data/data-pipeline/data_pipeline/etl/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -115,56 +115,59 @@ class ExtractTransformLoad:
# periods. https://github.com/usds/justice40-tool/issues/964
EXPECTED_MAX_CENSUS_TRACTS: int = 74160

# Should this dataset load its configuration from
# the YAML files?
LOAD_YAML_CONFIG: bool = False

# We use output_df as the final dataframe to use to write to the CSV
# It is used on the "load" base class method
output_df: pd.DataFrame = None

def __init_subclass__(cls) -> None:
cls.DATASET_CONFIG = cls.yaml_config_load()
if cls.LOAD_YAML_CONFIG:
cls.DATASET_CONFIG = cls.yaml_config_load()

@classmethod
def yaml_config_load(cls) -> Optional[dict]:
def yaml_config_load(cls) -> dict:
"""Generate config dictionary and set instance variables from YAML dataset."""
if cls.NAME is not None:
# check if the class instance has score YAML definitions
datasets_config = load_yaml_dict_from_file(
cls.DATASET_CONFIG_PATH / "datasets.yml",
DatasetsConfig,
# check if the class instance has score YAML definitions
datasets_config = load_yaml_dict_from_file(
cls.DATASET_CONFIG_PATH / "datasets.yml",
DatasetsConfig,
)

# get the config for this dataset
try:
dataset_config = next(
item
for item in datasets_config.get("datasets")
if item["module_name"] == cls.NAME
)
except StopIteration:
# Note: it'd be nice to log the name of the dataframe, but that's not accessible in this scope.
logger.error(
f"Exception encountered while extracting dataset config for dataset {cls.NAME}"
)
sys.exit()

# get the config for this dataset
try:
dataset_config = next(
item
for item in datasets_config.get("datasets")
if item["module_name"] == cls.NAME
)
except StopIteration:
# Note: it'd be nice to log the name of the dataframe, but that's not accessible in this scope.
logger.error(
f"Exception encountered while extracting dataset config for dataset {cls.NAME}"
)
sys.exit()

# set some of the basic fields
if "input_geoid_tract_field_name" in dataset_config:
cls.INPUT_GEOID_TRACT_FIELD_NAME = dataset_config[
"input_geoid_tract_field_name"
]

# get the columns to write on the CSV
# and set the constants
cls.COLUMNS_TO_KEEP = [
cls.GEOID_TRACT_FIELD_NAME, # always index with geoid tract id
# set some of the basic fields
if "input_geoid_tract_field_name" in dataset_config:
cls.INPUT_GEOID_TRACT_FIELD_NAME = dataset_config[
"input_geoid_tract_field_name"
]
for field in dataset_config["load_fields"]:
cls.COLUMNS_TO_KEEP.append(field["long_name"])
setattr(cls, field["df_field_name"], field["long_name"])

# set the constants for the class
setattr(cls, field["df_field_name"], field["long_name"])
return dataset_config
return None

# get the columns to write on the CSV
# and set the constants
cls.COLUMNS_TO_KEEP = [
cls.GEOID_TRACT_FIELD_NAME, # always index with geoid tract id
]
for field in dataset_config["load_fields"]:
cls.COLUMNS_TO_KEEP.append(field["long_name"])
setattr(cls, field["df_field_name"], field["long_name"])

# set the constants for the class
setattr(cls, field["df_field_name"], field["long_name"])
return dataset_config

# This is a classmethod so it can be used by `get_data_frame` without
# needing to create an instance of the class. This is a use case in `etl_score`.
Expand Down
16 changes: 15 additions & 1 deletion data/data-pipeline/data_pipeline/etl/score/config/datasets.yml
Original file line number Diff line number Diff line change
Expand Up @@ -289,4 +289,18 @@ datasets:
field_type: percentage
include_in_tiles: true
include_in_downloadable_files: true
create_percentile: true
create_percentile: true
- long_name: "CDC Life Expeectancy"
short_name: "cdc_life_expectancy"
module_name: "cdc_life_expectancy"
input_geoid_tract_field_name: "Tract ID"
load_fields:
- short_name: "LLEF"
df_field_name: "LIFE_EXPECTANCY_FIELD_NAME"
long_name: "Life expectancy (years)"
field_type: float
include_in_tiles: false
include_in_downloadable_files: true
create_percentile: false
create_reverse_percentile: true

Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,12 @@ class CDCLifeExpectancy(ExtractTransformLoad):
GEO_LEVEL = ValidGeoLevel.CENSUS_TRACT
PUERTO_RICO_EXPECTED_IN_DATA = False

NAME = "cdc_life_expectancy"

USA_FILE_URL: str = "https://ftp.cdc.gov/pub/Health_Statistics/NCHS/Datasets/NVSS/USALEEP/CSV/US_A.CSV"
LOAD_YAML_CONFIG: bool = False
LIFE_EXPECTANCY_FIELD_NAME = "Life expectancy (years)"
INPUT_GEOID_TRACT_FIELD_NAME = "Tract ID"

STATES_MISSING_FROM_USA_FILE = ["23", "55"]

Expand Down Expand Up @@ -69,8 +74,7 @@ def extract(self) -> None:
all_usa_raw_df = self._download_and_prep_data(
file_url=self.USA_FILE_URL,
download_file_name=self.get_tmp_path()
/ "cdc_life_expectancy"
/ "usa.csv",
/ "US_A.CSV",
)

# Check which states are missing
Expand All @@ -91,15 +95,13 @@ def extract(self) -> None:
maine_raw_df = self._download_and_prep_data(
file_url=self.MAINE_FILE_URL,
download_file_name=self.get_tmp_path()
/ "cdc_life_expectancy"
/ "maine.csv",
)

logger.info("Downloading data for Wisconsin")
wisconsin_raw_df = self._download_and_prep_data(
file_url=self.WISCONSIN_FILE_URL,
download_file_name=self.get_tmp_path()
/ "cdc_life_expectancy"
/ "wisconsin.csv",
)

Expand Down
38 changes: 21 additions & 17 deletions data/data-pipeline/data_pipeline/etl/sources/cdc_places/etl.py
Original file line number Diff line number Diff line change
@@ -1,30 +1,43 @@
import typing
import pandas as pd

from data_pipeline.etl.base import ExtractTransformLoad
from data_pipeline.etl.base import ExtractTransformLoad, ValidGeoLevel
from data_pipeline.utils import get_module_logger, download_file_from_url
from data_pipeline.score import field_names

logger = get_module_logger(__name__)


class CDCPlacesETL(ExtractTransformLoad):
NAME = "cdc_places"
GEO_LEVEL: ValidGeoLevel = ValidGeoLevel.CENSUS_TRACT
PUERTO_RICO_EXPECTED_IN_DATA = False

CDC_GEOID_FIELD_NAME = "LocationID"
CDC_VALUE_FIELD_NAME = "Data_Value"
CDC_MEASURE_FIELD_NAME = "Measure"

def __init__(self):
self.OUTPUT_PATH = self.DATA_PATH / "dataset" / "cdc_places"

self.CDC_PLACES_URL = "https://chronicdata.cdc.gov/api/views/cwsq-ngmh/rows.csv?accessType=DOWNLOAD"
self.CDC_GEOID_FIELD_NAME = "LocationID"
self.CDC_VALUE_FIELD_NAME = "Data_Value"
self.CDC_MEASURE_FIELD_NAME = "Measure"
self.COLUMNS_TO_KEEP: typing.List[str] = [
self.GEOID_TRACT_FIELD_NAME,
field_names.DIABETES_FIELD,
field_names.ASTHMA_FIELD,
field_names.HEART_DISEASE_FIELD,
field_names.CANCER_FIELD,
field_names.HEALTH_INSURANCE_FIELD,
field_names.PHYS_HEALTH_NOT_GOOD_FIELD,
]

self.df: pd.DataFrame

def extract(self) -> None:
logger.info("Starting to download 520MB CDC Places file.")
file_path = download_file_from_url(
file_url=self.CDC_PLACES_URL,
download_file_name=self.get_tmp_path()
/ "cdc_places"
/ "census_tract.csv",
download_file_name=self.get_tmp_path() / "census_tract.csv",
)

self.df = pd.read_csv(
Expand All @@ -42,7 +55,6 @@ def transform(self) -> None:
inplace=True,
errors="raise",
)

# Note: Puerto Rico not included.
self.df = self.df.pivot(
index=self.GEOID_TRACT_FIELD_NAME,
Expand All @@ -65,12 +77,4 @@ def transform(self) -> None:
)

# Make the index (the census tract ID) a column, not the index.
self.df.reset_index(inplace=True)

def load(self) -> None:
logger.info("Saving CDC Places Data")

# mkdir census
self.OUTPUT_PATH.mkdir(parents=True, exist_ok=True)

self.df.to_csv(path_or_buf=self.OUTPUT_PATH / "usa.csv", index=False)
self.output_df = self.df.reset_index()
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@ class ChildOpportunityIndex(ExtractTransformLoad):
# Metadata for the baseclass
NAME = "child_opportunity_index"
GEO_LEVEL = ValidGeoLevel.CENSUS_TRACT
LOAD_YAML_CONFIG: bool = True

# Define these for easy code completion
EXTREME_HEAT_FIELD: str
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ class DOEEnergyBurden(ExtractTransformLoad):
+ "/DOE_LEAD_AMI_TRACT_2018_ALL.csv.zip"
)
GEO_LEVEL = ValidGeoLevel.CENSUS_TRACT
LOAD_YAML_CONFIG: bool = True

REVISED_ENERGY_BURDEN_FIELD_NAME: str

Expand Down Expand Up @@ -56,8 +57,3 @@ def transform(self) -> None:
)

self.output_df = output_df

def load(self) -> None:
logger.info("Saving DOE Energy Burden CSV")

super().load()
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ class TravelCompositeETL(ExtractTransformLoad):
SOURCE_URL = "https://www.transportation.gov/sites/dot.gov/files/Shapefile_and_Metadata.zip"
GEO_LEVEL = ValidGeoLevel.CENSUS_TRACT
PUERTO_RICO_EXPECTED_IN_DATA = False
LOAD_YAML_CONFIG: bool = True

# Output score variables (values set on datasets.yml) for linting purposes
TRAVEL_BURDEN_FIELD_NAME: str
Expand Down
1 change: 1 addition & 0 deletions data/data-pipeline/data_pipeline/etl/sources/eamlis/etl.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ class AbandonedMineETL(ExtractTransformLoad):
NAME = "eamlis"
GEO_LEVEL = ValidGeoLevel.CENSUS_TRACT
AML_BOOLEAN: str
LOAD_YAML_CONFIG: bool = True

PUERTO_RICO_EXPECTED_IN_DATA = False
EXPECTED_MISSING_STATES = [
Expand Down
21 changes: 8 additions & 13 deletions data/data-pipeline/data_pipeline/etl/sources/ejscreen/etl.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
import pandas as pd

from data_pipeline.etl.base import ExtractTransformLoad
from data_pipeline.etl.base import ExtractTransformLoad, ValidGeoLevel
from data_pipeline.score import field_names
from data_pipeline.utils import get_module_logger

Expand All @@ -10,6 +10,10 @@
class EJSCREENETL(ExtractTransformLoad):
"""Load updated EJSCREEN data."""

NAME = "ejscreen"
GEO_LEVEL: ValidGeoLevel = ValidGeoLevel.CENSUS_TRACT
INPUT_GEOID_TRACT_FIELD_NAME: str = "ID"

def __init__(self):
self.EJSCREEN_FTP_URL = "https://gaftp.epa.gov/EJSCREEN/2021/EJSCREEN_2021_USPR_Tracts.csv.zip"
self.EJSCREEN_CSV = (
Expand Down Expand Up @@ -52,16 +56,16 @@ def transform(self) -> None:
logger.info("Transforming EJScreen Data")
self.df = pd.read_csv(
self.EJSCREEN_CSV,
dtype={"ID": str},
dtype={self.INPUT_GEOID_TRACT_FIELD_NAME: str},
# EJSCREEN writes the word "None" for NA data.
na_values=["None"],
low_memory=False,
)

# rename ID to Tract ID
self.df.rename(
self.output_df = self.df.rename(
columns={
"ID": self.GEOID_TRACT_FIELD_NAME,
self.INPUT_GEOID_TRACT_FIELD_NAME: self.GEOID_TRACT_FIELD_NAME,
"ACSTOTPOP": field_names.TOTAL_POP_FIELD,
"CANCER": field_names.AIR_TOXICS_CANCER_RISK_FIELD,
"RESP": field_names.RESPIRATORY_HAZARD_FIELD,
Expand All @@ -80,13 +84,4 @@ def transform(self) -> None:
"PRE1960PCT": field_names.LEAD_PAINT_FIELD,
"UST": field_names.UST_FIELD, # added for 2021 update
},
inplace=True,
)

def load(self) -> None:
logger.info("Saving EJScreen CSV")
# write nationwide csv
self.CSV_PATH.mkdir(parents=True, exist_ok=True)
self.df[self.COLUMNS_TO_KEEP].to_csv(
self.CSV_PATH / "usa.csv", index=False
)
Original file line number Diff line number Diff line change
Expand Up @@ -14,8 +14,11 @@ class FloodRiskETL(ExtractTransformLoad):
"""ETL class for the First Street Foundation flood risk dataset"""

NAME = "fsf_flood_risk"
# These data were emailed to the J40 team while first street got
# their official data sharing channels setup.
SOURCE_URL = settings.AWS_JUSTICE40_DATASOURCES_URL + "/fsf_flood.zip"
GEO_LEVEL = ValidGeoLevel.CENSUS_TRACT
LOAD_YAML_CONFIG: bool = True

# Output score variables (values set on datasets.yml) for linting purposes
COUNT_PROPERTIES: str
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -14,9 +14,12 @@ class WildfireRiskETL(ExtractTransformLoad):
"""ETL class for the First Street Foundation wildfire risk dataset"""

NAME = "fsf_wildfire_risk"
# These data were emailed to the J40 team while first street got
# their official data sharing channels setup.
SOURCE_URL = settings.AWS_JUSTICE40_DATASOURCES_URL + "/fsf_fire.zip"
GEO_LEVEL = ValidGeoLevel.CENSUS_TRACT
PUERTO_RICO_EXPECTED_IN_DATA = False
LOAD_YAML_CONFIG: bool = True
ALASKA_AND_HAWAII_EXPECTED_IN_DATA = False

# Output score variables (values set on datasets.yml) for linting purposes
Expand Down
Loading

0 comments on commit 876655d

Please sign in to comment.