Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Support csv with single column #72

Merged
merged 2 commits into from
Jan 25, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 1 addition & 2 deletions examples/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,11 +27,10 @@ def pretty_print_df(
df: DataFrame,
*,
sample_size: Optional[int] = 10,
sort_key: Optional[str] = "id",
inferred_schema: Optional[ShmessySchema] = None
) -> None:
df = df[:sample_size]
df = df.sort_values(by=[sort_key])
df = df.sort_values(df.columns[0])
df = df.rename(columns=add_data_types_to_column_names(df, inferred_schema))
print(tabulate(df, headers="keys", tablefmt="rounded_outline", showindex=False))

Expand Down
42 changes: 22 additions & 20 deletions src/shmessy/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -92,27 +92,29 @@ def read_csv(
fix_column_names: Optional[bool] = False,
) -> DataFrame:
try:
dialect = None

if use_sniffer:
dialect = csv.Sniffer().sniff(
sample=_get_sample_from_csv(
filepath_or_buffer=filepath_or_buffer,
sample_size=self.__sample_size,
encoding=self.__reader_encoding,
),
delimiters="".join([",", "\t", ";", " ", ":"]),
)
df = pd.read_csv(
filepath_or_buffer=filepath_or_buffer,
dialect=dialect(),
low_memory=False,
encoding=self.__reader_encoding,
)
else:
df = pd.read_csv(
filepath_or_buffer=filepath_or_buffer,
low_memory=False,
encoding=self.__reader_encoding,
)
try:
dialect = csv.Sniffer().sniff(
sample=_get_sample_from_csv(
filepath_or_buffer=filepath_or_buffer,
sample_size=self.__sample_size,
encoding=self.__reader_encoding,
),
delimiters="".join([",", "\t", ";", " ", ":"]),
)
except Exception as e: # noqa
logger.debug(
f"Could not use python sniffer to infer csv schema, Using pandas default settings: {e}"
)

df = pd.read_csv(
filepath_or_buffer=filepath_or_buffer,
dialect=dialect() if dialect else None,
low_memory=False,
encoding=self.__reader_encoding,
)

if fixed_schema is None:
fixed_schema = self.infer_schema(df)
Expand Down
11 changes: 0 additions & 11 deletions src/shmessy/exceptions.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,9 +4,6 @@
def exception_router(exception: Exception):
error_message = str(exception)

if "Could not determine delimiter" in error_message:
raise CouldNotDetermineDelimiterException()

match = re.match(
r"(.*)'(.*)' codec can't decode byte (.*) in position (.*):(.*)", error_message
)
Expand Down Expand Up @@ -64,11 +61,3 @@ def __init__(self, expected_encoding: str):
super().__init__(
f"The given file cannot be read using {expected_encoding} encoding."
)


class CouldNotDetermineDelimiterException(ShmessyException):
def __init__(self):
super().__init__(
"Could not determine delimiter. "
"Make sure a delimiter is shown the same number of times on every row in the file."
)
9 changes: 9 additions & 0 deletions tests/data/data_7.csv
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
header_name
this
is
csv
file
with
single
column
only
59 changes: 33 additions & 26 deletions tests/intg/test_read_csv.py
Original file line number Diff line number Diff line change
@@ -1,55 +1,62 @@
from numpy import dtypes
import numpy as np

from shmessy import Shmessy


def test_read_csv(files_folder):
df = Shmessy().read_csv(files_folder.as_posix() + "/data_1.csv")

assert isinstance(df["created_at"].dtype, dtypes.DateTime64DType)
assert isinstance(df["modified_at"].dtype, dtypes.DateTime64DType)
assert isinstance(df["deleted_at"].dtype, dtypes.DateTime64DType)
assert isinstance(df["celebrated_at"].dtype, dtypes.DateTime64DType)
assert isinstance(df["joined_at"].dtype, dtypes.DateTime64DType)
assert isinstance(df["laughed_at"].dtype, dtypes.DateTime64DType)
assert isinstance(df["loled_at"].dtype, dtypes.DateTime64DType)
assert isinstance(df["fooled_at"].dtype, dtypes.DateTime64DType)
assert isinstance(df["emerged_at"].dtype, dtypes.DateTime64DType)
assert isinstance(df["processed_at"].dtype, dtypes.DateTime64DType)
assert isinstance(df["isolated_at"].dtype, dtypes.DateTime64DType)
assert isinstance(df["scheduled_at"].dtype, dtypes.DateTime64DType)
assert isinstance(df["unixed_at"].dtype, dtypes.DateTime64DType)
assert df["created_at"].dtype == np.dtype("datetime64[ns]")
assert df["modified_at"].dtype == np.dtype("datetime64[ns]")
assert df["deleted_at"].dtype == np.dtype("datetime64[ns]")
assert df["celebrated_at"].dtype == np.dtype("datetime64[ns]")
assert df["joined_at"].dtype == np.dtype("datetime64[ns]")
assert df["laughed_at"].dtype == np.dtype("datetime64[ns]")
assert df["loled_at"].dtype == np.dtype("datetime64[ns]")
assert df["fooled_at"].dtype == np.dtype("datetime64[ns]")
assert df["emerged_at"].dtype == np.dtype("datetime64[ns]")
assert df["processed_at"].dtype == np.dtype("datetime64[ns]")
assert df["isolated_at"].dtype == np.dtype("datetime64[ns]")
assert df["scheduled_at"].dtype == np.dtype("datetime64[ns]")
assert df["unixed_at"].dtype == np.dtype("datetime64[ns]")


def test_read_csv_colon_as_delimiter(files_folder):
df = Shmessy().read_csv(files_folder.as_posix() + "/data_3.csv")
assert isinstance(df["id"].dtype, dtypes.Int64DType)
assert isinstance(df["name"].dtype, dtypes.ObjectDType)
assert isinstance(df["value"].dtype, dtypes.Int64DType)
assert df["id"].dtype == np.dtype("int64")
assert df["name"].dtype == np.dtype("O")
assert df["value"].dtype == np.dtype("int64")


def test_read_csv_semicolon_as_delimiter(files_folder):
df = Shmessy().read_csv(files_folder.as_posix() + "/data_4.csv")
assert isinstance(df["id"].dtype, dtypes.Int64DType)
assert isinstance(df["name"].dtype, dtypes.ObjectDType)
assert isinstance(df["value"].dtype, dtypes.Int64DType)
assert df["id"].dtype == np.dtype("int64")
assert df["name"].dtype == np.dtype("O")
assert df["value"].dtype == np.dtype("int64")


def test_buffer_as_read_csv_input(files_folder):
path = files_folder.as_posix() + "/data_4.csv"
with open(path, mode="rt") as file_input:
df = Shmessy().read_csv(file_input)

assert isinstance(df["id"].dtype, dtypes.Int64DType)
assert isinstance(df["name"].dtype, dtypes.ObjectDType)
assert isinstance(df["value"].dtype, dtypes.Int64DType)
assert df["id"].dtype == np.dtype("int64")
assert df["name"].dtype == np.dtype("O")
assert df["value"].dtype == np.dtype("int64")


def test_binary_buffer_as_read_csv_input(files_folder):
path = files_folder.as_posix() + "/data_4.csv"
with open(path, mode="rb") as file_input:
df = Shmessy().read_csv(file_input)

assert isinstance(df["id"].dtype, dtypes.Int64DType)
assert isinstance(df["name"].dtype, dtypes.ObjectDType)
assert isinstance(df["value"].dtype, dtypes.Int64DType)
assert df["id"].dtype == np.dtype("int64")
assert df["name"].dtype == np.dtype("O")
assert df["value"].dtype == np.dtype("int64")


def test_read_csv_file_with_single_column(files_folder):
path = files_folder.as_posix() + "/data_7.csv"
with open(path, mode="rb") as file_input:
df = Shmessy().read_csv(file_input)
assert df["header_name"].dtype == np.dtype("O")
Loading