Skip to content

Commit

Permalink
lint & format weights/*.py part 7 (#655)
Browse files Browse the repository at this point in the history
* lint&format raster.py

* lint&format test__contW_lists.py

* lint&format test_adjlist.py

* lint&format test_distance.py

* lint&format test_weights_IO.py

* lint&format test_raster.py

* lint&format test_spatial_lag.py

* clean up forgotten

* revert raster change

* forgot to add noqa
  • Loading branch information
jGaboardi authored Nov 16, 2023
1 parent c655e99 commit cf6b05e
Show file tree
Hide file tree
Showing 10 changed files with 130 additions and 116 deletions.
22 changes: 12 additions & 10 deletions libpysal/weights/raster.py
Original file line number Diff line number Diff line change
@@ -1,21 +1,23 @@
# ruff: noqa: B006, N802

from .util import lat2SW
from .weights import WSP, W
import numpy as np
from warnings import warn
import os
import sys
from warnings import warn

import numpy as np
from scipy import sparse

from .util import lat2SW
from .weights import WSP, W

if os.path.basename(sys.argv[0]) in ("pytest", "py.test"):

def jit(*dec_args, **dec_kwargs):
def jit(*dec_args, **dec_kwargs): # noqa ARG001
"""
decorator mimicking numba.jit
"""

def intercepted_function(f, *f_args, **f_kwargs):
def intercepted_function(f, *f_args, **f_kwargs): # noqa ARG001
return f

return intercepted_function
Expand Down Expand Up @@ -241,7 +243,7 @@ def da2WSP(
n = len(ids)

try:
import numba
import numba # noqa F401
except (ModuleNotFoundError, ImportError):
warn(
"numba cannot be imported, parallel processing "
Expand All @@ -261,7 +263,7 @@ def da2WSP(

if n_jobs != 1:
try:
import joblib
import joblib # noqa F401
except (ModuleNotFoundError, ImportError):
warn(
f"Parallel processing is requested (n_jobs={n_jobs}),"
Expand Down Expand Up @@ -296,7 +298,7 @@ def da2WSP(
# then eliminate zeros from the data. This changes the
# sparcity of the csr_matrix !!
if k > 1 and not include_nodata:
sw = sum(map(lambda x: sw**x, range(1, k + 1)))
sw = sum(sw**x for x in range(1, k + 1))
sw.setdiag(0)
sw.eliminate_zeros()
sw.data[:] = np.ones_like(sw.data, dtype=np.int8)
Expand Down Expand Up @@ -563,7 +565,7 @@ def _index2da(data, index, attrs, coords):
else:
min_data = np.min(data)
fill_value = min_data - 1 if min_data < 0 else -1
attrs["nodatavals"] = tuple([fill_value])
attrs["nodatavals"] = tuple([fill_value]) # noqa C409
data_complete = np.full(shape, fill_value, data.dtype)
else:
data_complete = np.empty(shape, data.dtype)
Expand Down
89 changes: 48 additions & 41 deletions libpysal/weights/tests/test__contW_lists.py
Original file line number Diff line number Diff line change
@@ -1,19 +1,21 @@
# ruff: noqa: N999

import os

import geopandas as gpd

from ... import examples as pysal_examples
from ...io.fileio import FileIO as ps_open
from ...io.fileio import FileIO
from .._contW_lists import QUEEN, ROOK, ContiguityWeightsLists
from ..weights import W


class TestContiguityWeights:
def setup_method(self):
"""Setup the binning contiguity weights"""
shpObj = ps_open(pysal_examples.get_path("virginia.shp"), "r")
self.binningW = ContiguityWeightsLists(shpObj, QUEEN)
shpObj.close()
shp_obj = FileIO(pysal_examples.get_path("virginia.shp"), "r")
self.binningW = ContiguityWeightsLists(shp_obj, QUEEN)
shp_obj.close()

def test_w_type(self):
assert isinstance(self.binningW, ContiguityWeightsLists)
Expand All @@ -31,29 +33,31 @@ def test_contiguity_weights_lists(self):

def test_nested_polygons(self):
# load queen gal file created using Open Geoda.
geodaW = ps_open(pysal_examples.get_path("virginia.gal"), "r").read()
geoda_w = FileIO(pysal_examples.get_path("virginia.gal"), "r").read()
# build matching W with pysal
pysalWb = self.build_W(
pysal_wb = self.build_w(
pysal_examples.get_path("virginia.shp"), QUEEN, "POLY_ID"
)
# compare output.
for key in geodaW.neighbors:
geoda_neighbors = list(map(int, geodaW.neighbors[key]))
pysalb_neighbors = pysalWb.neighbors[int(key)]
for key in geoda_w.neighbors:
geoda_neighbors = list(map(int, geoda_w.neighbors[key]))
pysalb_neighbors = pysal_wb.neighbors[int(key)]
geoda_neighbors.sort()
pysalb_neighbors.sort()
assert geoda_neighbors == pysalb_neighbors

def test_true_rook(self):
# load queen gal file created using Open Geoda.
geodaW = ps_open(pysal_examples.get_path("rook31.gal"), "r").read()
geoda_w = FileIO(pysal_examples.get_path("rook31.gal"), "r").read()
# build matching W with pysal
# pysalW = pysal.rook_from_shapefile(pysal_examples.get_path('rook31.shp'),','POLY_ID')
pysalWb = self.build_W(pysal_examples.get_path("rook31.shp"), ROOK, "POLY_ID")
# pysalW = pysal.rook_from_shapefile(
# pysal_examples.get_path('rook31.shp'),','POLY_ID'
# )
pysal_wb = self.build_w(pysal_examples.get_path("rook31.shp"), ROOK, "POLY_ID")
# compare output.
for key in geodaW.neighbors:
geoda_neighbors = list(map(int, geodaW.neighbors[key]))
pysalb_neighbors = pysalWb.neighbors[int(key)]
for key in geoda_w.neighbors:
geoda_neighbors = list(map(int, geoda_w.neighbors[key]))
pysalb_neighbors = pysal_wb.neighbors[int(key)]
geoda_neighbors.sort()
pysalb_neighbors.sort()
assert geoda_neighbors == pysalb_neighbors
Expand All @@ -62,64 +66,67 @@ def test_true_rook2(self):
# load queen gal file created using Open Geoda.

stl = pysal_examples.load_example("stl")
gal_file = test_file = stl.get_path("stl_hom_rook.gal")
geodaW = ps_open(gal_file, "r").read()
gal_file = stl.get_path("stl_hom_rook.gal")
geoda_w = FileIO(gal_file, "r").read()
# build matching W with pysal
pysalWb = self.build_W(stl.get_path("stl_hom.shp"), ROOK, "POLY_ID_OG")
pysal_wb = self.build_w(stl.get_path("stl_hom.shp"), ROOK, "POLY_ID_OG")
# compare output.
for key in geodaW.neighbors:
geoda_neighbors = list(map(int, geodaW.neighbors[key]))
pysalb_neighbors = pysalWb.neighbors[int(key)]
for key in geoda_w.neighbors:
geoda_neighbors = list(map(int, geoda_w.neighbors[key]))
pysalb_neighbors = pysal_wb.neighbors[int(key)]
geoda_neighbors.sort()
pysalb_neighbors.sort()
assert geoda_neighbors == pysalb_neighbors

def test_true_rook3(self):
# load queen gal file created using Open Geoda.
geodaW = ps_open(pysal_examples.get_path("virginia_rook.gal"), "r").read()
geoda_w = FileIO(pysal_examples.get_path("virginia_rook.gal"), "r").read()
# build matching W with pysal
pysalWb = self.build_W(pysal_examples.get_path("virginia.shp"), ROOK, "POLY_ID")
pysal_wb = self.build_w(
pysal_examples.get_path("virginia.shp"), ROOK, "POLY_ID"
)
# compare output.
for key in geodaW.neighbors:
geoda_neighbors = list(map(int, geodaW.neighbors[key]))
pysalb_neighbors = pysalWb.neighbors[int(key)]
for key in geoda_w.neighbors:
geoda_neighbors = list(map(int, geoda_w.neighbors[key]))
pysalb_neighbors = pysal_wb.neighbors[int(key)]
geoda_neighbors.sort()
pysalb_neighbors.sort()
assert geoda_neighbors == pysalb_neighbors

def test_shapely(self):
pysalneighbs = ContiguityWeightsLists(
ps_open(pysal_examples.get_path("virginia.shp")), ROOK
FileIO(pysal_examples.get_path("virginia.shp")), ROOK
)
gdf = gpd.read_file(pysal_examples.get_path("virginia.shp"))
shplyneighbs = ContiguityWeightsLists(gdf.geometry.tolist(), ROOK)
assert pysalneighbs.w == shplyneighbs.w
pysalneighbs = ContiguityWeightsLists(
ps_open(pysal_examples.get_path("virginia.shp")), QUEEN
FileIO(pysal_examples.get_path("virginia.shp")), QUEEN
)
shplyneighbs = ContiguityWeightsLists(gdf.geometry.tolist(), QUEEN)
assert pysalneighbs.w == shplyneighbs.w

def build_W(self, shapefile, type, idVariable=None):
"""Building 2 W's the hard way. We need to do this so we can test both rtree and binning"""
def build_w(self, shapefile, type_, idVariable=None): # noqa N803
"""Building 2 W's the hard way. We need to do this so we
can test both rtree and binning
"""
dbname = os.path.splitext(shapefile)[0] + ".dbf"
db = ps_open(dbname)
shpObj = ps_open(shapefile)
neighbor_data = ContiguityWeightsLists(shpObj, type).w
db = FileIO(dbname)
shp_obj = FileIO(shapefile)
neighbor_data = ContiguityWeightsLists(shp_obj, type_).w
neighbors = {}
weights = {}
if idVariable:
ids = db.by_col[idVariable]
assert len(ids) == len(set(ids))
for key in neighbor_data:
id = ids[key]
if id not in neighbors:
neighbors[id] = set()
neighbors[id].update([ids[x] for x in neighbor_data[key]])
id_ = ids[key]
if id_ not in neighbors:
neighbors[id_] = set()
neighbors[id_].update([ids[x] for x in neighbor_data[key]])
for key in neighbors:
neighbors[key] = list(neighbors[key])
binningW = W(neighbors, id_order=ids)
binning_w = W(neighbors, id_order=ids)
else:
neighbors[key] = list(neighbors[key])
binningW = W(neighbors)
return binningW
binning_w = W(neighbors)
return binning_w
52 changes: 26 additions & 26 deletions libpysal/weights/tests/test_adjlist.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
from ..util import lat2W


class Test_Adjlist:
class TestAdjlist:
def setup_method(self):
self.knownW = io.open(examples.get_path("columbus.gal")).read()

Expand Down Expand Up @@ -60,14 +60,14 @@ def test_filter(self):
alist_neighbors[idx] = []
badgrid = weights.W(alist_neighbors)
np.testing.assert_allclose(badgrid.sparse.toarray(), grid.sparse.toarray())
tuples = set([tuple(t) for t in alist[["focal", "neighbor"]].values])
tuples = {tuple(t) for t in alist[["focal", "neighbor"]].values}
full_alist = grid.to_adjlist(drop_islands=True)
all_possible = set([tuple(t) for t in full_alist[["focal", "neighbor"]].values])
all_possible = {tuple(t) for t in full_alist[["focal", "neighbor"]].values}
assert tuples.issubset(all_possible), (
"the de-duped adjlist has links " "not in the duplicated adjlist."
)
complements = all_possible.difference(tuples)
reversed_complements = set([t[::-1] for t in complements])
reversed_complements = {t[::-1] for t in complements}
assert reversed_complements == tuples, (
"the remaining links in the duplicated"
" adjlist are not the reverse of the links"
Expand All @@ -79,12 +79,12 @@ def apply_and_compare_columbus(self, col):
import geopandas

df = geopandas.read_file(examples.get_path("columbus.dbf")).head()
W = weights.Queen.from_dataframe(df)
alist = adj.adjlist_apply(df[col], W=W, to_adjlist_kws=dict(drop_islands=True))
w = weights.Queen.from_dataframe(df)
alist = adj.adjlist_apply(df[col], W=w, to_adjlist_kws={"drop_islands": True})
right_hovals = alist.groupby("focal").att_focal.unique()
assert (right_hovals == df[col]).all()
allpairs = np.subtract.outer(df[col].values, df[col].values)
flat_diffs = allpairs[W.sparse.toarray().astype(bool)]
flat_diffs = allpairs[w.sparse.toarray().astype(bool)]
np.testing.assert_allclose(flat_diffs, alist["subtract"].values)
return flat_diffs

Expand All @@ -95,15 +95,15 @@ def test_mvapply(self):
import geopandas

df = geopandas.read_file(examples.get_path("columbus.dbf")).head()
W = weights.Queen.from_dataframe(df)
w = weights.Queen.from_dataframe(df)

ssq = lambda x_y: np.sum((x_y[0] - x_y[1]) ** 2).item()
ssq = lambda x_y: np.sum((x_y[0] - x_y[1]) ** 2).item() # noqa E731
ssq.__name__ = "sum_of_squares"
alist = adj.adjlist_apply(
df[["HOVAL", "CRIME", "INC"]],
W=W,
W=w,
func=ssq,
to_adjlist_kws=dict(drop_islands=True),
to_adjlist_kws={"drop_islands": True},
)
known_ssq = [
1301.1639302990804,
Expand All @@ -128,10 +128,10 @@ def test_mvapply(self):
def test_map(self):
atts = ["HOVAL", "CRIME", "INC"]
df = geopandas.read_file(examples.get_path("columbus.dbf")).head()
W = weights.Queen.from_dataframe(df)
w = weights.Queen.from_dataframe(df)
hoval, crime, inc = list(map(self.apply_and_compare_columbus, atts))
mapped = adj.adjlist_map(df[atts], W=W, to_adjlist_kws=dict(drop_islands=True))
for name, data in zip(atts, (hoval, crime, inc)):
mapped = adj.adjlist_map(df[atts], W=w, to_adjlist_kws={"drop_islands": True})
for name, data in zip(atts, (hoval, crime, inc), strict=True):
np.testing.assert_allclose(
data, mapped["_".join(("subtract", name))].values
)
Expand All @@ -154,28 +154,28 @@ def test_sort(self):
def test_ids(self):
df = geopandas.read_file(examples.get_path("columbus.dbf")).head()
df["my_id"] = range(3, len(df) + 3)
W = weights.Queen.from_dataframe(df, ids="my_id")
W_adj = W.to_adjlist(drop_islands=True)
w = weights.Queen.from_dataframe(df, ids="my_id")
w_adj = w.to_adjlist(drop_islands=True)
for i in range(3, 8):
assert i in W_adj.focal
assert i in W_adj.neighbor
for i in W_adj.focal:
assert i in w_adj.focal
assert i in w_adj.neighbor
for i in w_adj.focal:
assert i in list(range(3, len(df) + 3))
for i in W_adj.neighbor:
for i in w_adj.neighbor:
assert i in list(range(3, len(df) + 3))

def test_str_ids(self):
df = geopandas.read_file(examples.get_path("columbus.dbf")).head()
snakes = ["mamba", "boa", "python", "rattlesnake", "cobra"]
df["my_str_id"] = snakes
W = weights.Queen.from_dataframe(df, ids="my_str_id")
W_adj = W.to_adjlist(drop_islands=True)
w = weights.Queen.from_dataframe(df, ids="my_str_id")
w_adj = w.to_adjlist(drop_islands=True)
for i in snakes:
(W_adj.focal == i).any()
(W_adj.neighbor == i).any()
for i in W_adj.focal:
(w_adj.focal == i).any()
(w_adj.neighbor == i).any()
for i in w_adj.focal:
assert i in snakes
for i in W_adj.neighbor:
for i in w_adj.neighbor:
assert i in snakes

def test_lat2w(self):
Expand Down
6 changes: 3 additions & 3 deletions libpysal/weights/tests/test_contiguity.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,14 +19,14 @@ class ContiguityMixin:
f.seek(0) # go back to head of file
cls = object # class constructor
known_wi = None # index of known w entry to compare
known_w = dict() # actual w entry
known_w = {} # actual w entry
known_name = known_wi
known_namedw = known_w
idVariable = None # id variable from file or column
known_wspi_da = None
known_wsp_da = dict()
known_wsp_da = {}
known_wi_da = None
known_w_da = dict()
known_w_da = {}
try:
from .. import raster

Expand Down
Loading

0 comments on commit cf6b05e

Please sign in to comment.