Skip to content

Commit

Permalink
Type-hinting: modernize: remove some legacy typing imports (#1267)
Browse files Browse the repository at this point in the history
  • Loading branch information
jayaddison authored Oct 2, 2024
1 parent a708b83 commit 4408721
Show file tree
Hide file tree
Showing 8 changed files with 30 additions and 35 deletions.
2 changes: 1 addition & 1 deletion .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ repos:
rev: v3.15.1
hooks:
- id: pyupgrade
args: [--py38-plus]
args: [--py39-plus]
- repo: https://github.com/sphinx-contrib/sphinx-lint
rev: v0.9.1
hooks:
Expand Down
5 changes: 2 additions & 3 deletions recipe_scrapers/_abstract.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
import inspect
from collections import OrderedDict
from typing import List
from urllib.parse import urljoin

from bs4 import BeautifulSoup
Expand Down Expand Up @@ -98,15 +97,15 @@ def ingredients(self):
"""Ingredients of the recipe."""
raise NotImplementedError("This should be implemented.")

def ingredient_groups(self) -> List[IngredientGroup]:
def ingredient_groups(self) -> list[IngredientGroup]:
"""List of ingredient groups."""
return [IngredientGroup(purpose=None, ingredients=self.ingredients())]

def instructions(self) -> str:
"""Instructions to prepare the recipe."""
raise NotImplementedError("This should be implemented.")

def instructions_list(self) -> List[str]:
def instructions_list(self) -> list[str]:
"""Instructions to prepare the recipe as a list."""
return [
instruction
Expand Down
21 changes: 11 additions & 10 deletions recipe_scrapers/_grouping_utils.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
from __future__ import annotations

from collections import defaultdict
from dataclasses import dataclass
from typing import Dict, List, Optional

from bs4 import BeautifulSoup

Expand All @@ -9,8 +10,8 @@

@dataclass
class IngredientGroup:
ingredients: List[str]
purpose: Optional[str] = (
ingredients: list[str]
purpose: str | None = (
None # this group of ingredients is {purpose} (e.g. "For the dressing")
)

Expand Down Expand Up @@ -50,7 +51,7 @@ def score_sentence_similarity(first: str, second: str) -> float:
return 2 * intersection / (len(first_bigrams) + len(second_bigrams))


def best_match(test_string: str, target_strings: List[str]) -> str:
def best_match(test_string: str, target_strings: list[str]) -> str:
"""Find the best match for a given test string within a list of target strings.
This function utilizes the score_sentence_similarity function to compare the test string
Expand All @@ -61,7 +62,7 @@ def best_match(test_string: str, target_strings: List[str]) -> str:
----------
test_string : str
The string to find the best match for.
target_strings : List[str]
target_strings : list[str]
A list of strings to compare against the test string.
Returns
Expand All @@ -78,11 +79,11 @@ def best_match(test_string: str, target_strings: List[str]) -> str:


def group_ingredients(
ingredients_list: List[str],
ingredients_list: list[str],
soup: BeautifulSoup,
group_heading: str,
group_element: str,
) -> List[IngredientGroup]:
) -> list[IngredientGroup]:
"""
Group ingredients into sublists according to the heading in the recipe.
Expand All @@ -93,7 +94,7 @@ def group_ingredients(
Parameters
----------
ingredients_list : List[str]
ingredients_list : list[str]
Ingredients extracted by the scraper.
soup : BeautifulSoup
Parsed HTML of the recipe page.
Expand All @@ -104,7 +105,7 @@ def group_ingredients(
Returns
-------
List[IngredientGroup]
list[IngredientGroup]
groupings of ingredients categorized by their purpose or heading.
Raises
Expand All @@ -119,7 +120,7 @@ def group_ingredients(
f"Found {len(found_ingredients)} grouped ingredients but was expecting to find {len(ingredients_list)}."
)

groupings: Dict[Optional[str], List[str]] = defaultdict(list)
groupings: dict[str | None, list[str]] = defaultdict(list)
current_heading = None

elements = soup.select(f"{group_heading}, {group_element}")
Expand Down
7 changes: 3 additions & 4 deletions recipe_scrapers/kitchenaidaustralia.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
import re
from typing import List

from ._abstract import AbstractScraper
from ._grouping_utils import IngredientGroup
Expand Down Expand Up @@ -36,7 +35,7 @@ def ingredients(self):
elements = self._parse_list(ingredients)
return elements

def ingredient_groups(self) -> List[IngredientGroup]:
def ingredient_groups(self) -> list[IngredientGroup]:
recipe = self._get_recipe()
ingredients = recipe.find("div", {"class": "leftPanel"})

Expand All @@ -54,7 +53,7 @@ def ingredient_groups(self) -> List[IngredientGroup]:
def instructions(self):
return "\n".join(self.instructions_list())

def instructions_list(self) -> List[str]:
def instructions_list(self) -> list[str]:
recipe = self._get_recipe()
method = recipe.find("div", {"class": "rightPanel"})

Expand Down Expand Up @@ -87,7 +86,7 @@ def _parse_summary_item(self, item):
"""
return item.find_next_sibling("p").text

def _parse_list(self, container) -> List[str]:
def _parse_list(self, container) -> list[str]:
"""
Get the text from each of the li elements contained by the given container.
"""
Expand Down
6 changes: 2 additions & 4 deletions recipe_scrapers/nihhealthyeating.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,3 @@
from typing import List

from ._abstract import AbstractScraper
from ._exceptions import ElementNotFoundInHtml, StaticValueException
from ._grouping_utils import IngredientGroup
Expand Down Expand Up @@ -69,7 +67,7 @@ def image(self):

return image_relative_url

def ingredient_groups(self) -> List[IngredientGroup]:
def ingredient_groups(self) -> list[IngredientGroup]:
# This content must be present for recipes on this website.
ingredients_div = self.soup.find("div", {"id": "ingredients"})
section = []
Expand Down Expand Up @@ -123,7 +121,7 @@ def ingredient_groups(self) -> List[IngredientGroup]:

return [IngredientGroup(ingredients_list)]

def ingredients(self) -> List[str]:
def ingredients(self) -> list[str]:
results = []
for ingredient_group in self.ingredient_groups():
results.extend(ingredient_group.ingredients)
Expand Down
2 changes: 1 addition & 1 deletion recipe_scrapers/plugins/_interface.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
from abc import ABC, abstractmethod
from typing import Iterable
from collections.abc import Iterable


class PluginInterface(ABC):
Expand Down
4 changes: 1 addition & 3 deletions recipe_scrapers/streetkitchen.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,3 @@
from typing import List

from ._abstract import AbstractScraper
from ._grouping_utils import IngredientGroup, group_ingredients
from ._utils import get_minutes, get_yields, normalize_string
Expand Down Expand Up @@ -61,7 +59,7 @@ def author(self):
self.soup.find("a", {"rel": "author"}).find("img")["alt"]
)

def ingredient_groups(self) -> List[IngredientGroup]:
def ingredient_groups(self) -> list[IngredientGroup]:
return group_ingredients(
self.ingredients(),
self.soup,
Expand Down
18 changes: 9 additions & 9 deletions tests/library/test_readme.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,14 +11,14 @@
# only available in importlib.metadata from py3.10 onwards
from importlib_metadata import PackageNotFoundError, metadata

from typing import Dict, List, Optional, Tuple
from typing import Optional

from recipe_scrapers import SCRAPERS, AbstractScraper

START_LIST = "-----------------------"
END_LIST = "(*) offline saved files only"

ScraperIndex = Dict[str, Tuple[AbstractScraper, List[str]]]
ScraperIndex = dict[str, tuple[AbstractScraper, list[str]]]


def get_scraper_domains():
Expand Down Expand Up @@ -56,7 +56,7 @@ def get_scraper_index() -> ScraperIndex:
return scraper_index


def get_shared_prefix(domains: List[str]) -> str:
def get_shared_prefix(domains: list[str]) -> str:
"""
Find the longest-common-prefix of the domains
"""
Expand All @@ -78,12 +78,12 @@ def get_shared_prefix(domains: List[str]) -> str:

def get_secondary_domains(
scraper_index: ScraperIndex, primary_domain: str
) -> List[str]:
) -> list[str]:
_, suffixes = scraper_index[primary_domain]
return [suffix for suffix in suffixes if not primary_domain.endswith(suffix)]


def parse_primary_line(line: str) -> Optional[Tuple[str, str]]:
def parse_primary_line(line: str) -> Optional[tuple[str, str]]:
match = re.search(
r"^- `https?://(?:www\.)?([^/\s]+)[^<]*<https?://(?:www\.)?([^/\s]*)[^>]*>`_(?: \(\*\))?$",
line,
Expand All @@ -95,17 +95,17 @@ def parse_primary_line(line: str) -> Optional[Tuple[str, str]]:
return None


def parse_secondary_line(line: str) -> List[Tuple[str, str]]:
def parse_secondary_line(line: str) -> list[tuple[str, str]]:
return re.findall(r"`(\.[^\s]+)\s<https?://(?:www\.)?([^/>]+)[^>]*>`_", line)


def get_package_description() -> List[str]:
def get_package_description() -> list[str]:
pkg_metadata = metadata("recipe_scrapers")
return pkg_metadata["Description"].splitlines()


def get_list_lines() -> List[str]:
list_lines: List[str] = []
def get_list_lines() -> list[str]:
list_lines: list[str] = []
started_list = False
for line in get_package_description():
stripped_line = line.strip()
Expand Down

0 comments on commit 4408721

Please sign in to comment.