diff --git a/README.rst b/README.rst index cdf37d696..6a485ec81 100644 --- a/README.rst +++ b/README.rst @@ -1,4 +1,4 @@ -|PyPI| |Python Version| |Codecov| |Tests| |License| +|PyPI| |Python Version| |Codecov| |Tests| .. |PyPI| image:: https://img.shields.io/pypi/v/datachain.svg :target: https://pypi.org/project/datachain/ @@ -12,257 +12,310 @@ .. |Tests| image:: https://github.com/iterative/dvcx/workflows/Tests/badge.svg :target: https://github.com/iterative/dvcx/actions?workflow=Tests :alt: Tests -.. |License| image:: https://img.shields.io/pypi/l/datachain - :target: https://opensource.org/licenses/Apache-2.0 - :alt: License AI 🔗 DataChain ---------------- DataChain is an open-source Python data processing library for wrangling unstructured AI data at scale. -It enables batch LLM API calls and local language and vision AI model inferences to run in parallel over many samples as chained operations resolving to table-like datasets. These datasets can be saved, versioned, and sent directly to PyTorch and TensorFlow for training. DataChain employs rigorous `Pydantic`_ data structures, promoting better data processing practices and enabling vectorized analytical operations normally found in databases. +Datachain enables multimodal API calls and local AI inferences to run in parallel over many samples as chained operations. The resulting datasets can be saved, versioned, and sent directly to PyTorch and TensorFlow for training. Datachain can persist features of Python objects returned by AI models, and enables vectorized analytical operations over them. -The DataChain fills the gap between dataframe libraries, data warehouses, and Python-based multimodal AI applications. Our primary use cases include massive data curation, LLM analytics and validation, batch image segmentation and pose detection, GenAI data alignment, etc. +The typical use cases are data curation, LLM analytics and validation, image segmentation, pose detection, and GenAI alignment. Datachain is especially helpful if batch operations can be optimized – for instance, when synchronous API calls can be parallelized or where an LLM API offers batch processing. .. code:: console $ pip install datachain -Basic operation ---------------- +Operation basics +---------------- DataChain is built by composing wrangling operations. -For example, it can be instructed to read files from the cloud, map them onto a modern AI service returning a Python object, parallelize API calls, save the result as a dataset, and export a column: +For example, let us consider a dataset from Karlsruhe Institute of Technology detailing dialogs between users and customer service chatbots. We can use the chain to read data from the cloud, map it onto the parallel API calls for LLM evaluation, and organize the output into a dataset : .. code:: py - import os - import datachain as dc - - from anthropic.types.message import Message - ClaudeModel = dc.pydantic_to_feature(Message) - PROMPT = "summarize this book in less than 200 words" - service = anthropic.Anthropic(api_key=os.getenv("ANTHROPIC_API_KEY")) - source = "gs://datachain-demo/mybooks/" - - chain = dc.DataChain(source) \ - .filter(File.name.glob("*.txt")) \ - .settings(parallel=4) \ - .map( \ - claude = lambda file: \ - ClaudeModel(**service.messages.create( \ - model="claude-3-haiku-20240307", \ - system=PROMPT, \ - messages=[{"role": "user", \ - "content": file.get_value()}] \ - ), \ - ).model_dump() \ - ) \ - .save("mydataset") - - dc.DataChain("mydataset").export("./", "claude.response") # export summaries + # pip install mistralai + # this example requires a free Mistral API key, get yours at https://console.mistral.ai + # add the key to your shell environment: $ export MISTRAL_API_KEY= your key -Dataset persistence -------------------- + # pip install mistralai + # this example requires a free Mistral API key, get yours at https://console.mistral.ai + # add the key to your shell environment: $ export MISTRAL_API_KEY= your key -In the example above, the chain resolves to a saved dataset “mydataset”. DataChain datasets are immutable and versioned. A saved dataset version can be used as a data source: + import os -.. code:: py + from mistralai.client import MistralClient + from mistralai.models.chat_completion import ChatMessage - ds = dc.DataChain("mydataset", version = 1) + from datachain.lib.dc import DataChain, Column -Note that DataChain represents file samples as pointers into their respective storage locations. This means a newly created dataset version does not duplicate files in storage, and storage remains the single source of truth for the original samples + PROMPT = "Was this bot dialog successful? Describe the 'result' as 'Yes' or 'No' in a short JSON" + + model = "mistral-large-latest" + api_key = os.environ["MISTRAL_API_KEY"] + + chain = ( + DataChain.from_storage("gs://datachain-demo/chatbot-KiT/") + .limit(5) + .settings(cache=True, parallel=5) + .map( + mistral_response=lambda file: MistralClient(api_key=api_key) + .chat( + model=model, + response_format={"type": "json_object"}, + messages=[ + ChatMessage(role="user", content=f"{PROMPT}: {file.get_value()}") + ], + ) + .choices[0] + .message.content, + ) + .save() + ) + + try: + print(chain.select("mistral_response").results()) + except Exception as e: + print(f"do you have the right Mistral API key? {e}") + + +.. code:: shell + + [('{"result": "Yes"}',), ('{"result": "No"}',), ... , ('{"result": "Yes"}',)] + +Now we have parallel-processed an LLM API-based query over cloud data and persisted the results. Vectorized analytics ---------------------- -Since datasets are internally represented as tables, analytical queries can be vectorized: +-------------------- + +Datachain internally represents datasets as tables, so analytical queries on the chain are automatically vectorized: .. code:: py - rate = ds.filter(chain.response == "Success").count() / chain.count() # ?? - print(f"API class success rate: {100*rate:.2f}%") - >> 74.68% + failed_dialogs = chain.filter(Column("mistral_response") == '{"result": "No"}') + success_rate = failed_dialogs.count() / chain.count() + print(f"Chatbot dialog success rate: {100*success_rate:.2f}%") - price_input = 0.25 - price_output = 1.25 - price=(ds.sum(C.claude.usage.input_tokens)*price_input \ - + ds.sum(C.claude.usage.output_tokens)*price_output)/1_000_000 - print(f"Cost of API calls: ${price:.2f}") - >> Cost of API calls: $1.42 +.. code:: shell -Importing metadata ------------------------- + "40.00%" -It is common for AI data to come together with metadata (annotations, classes, etc). -DataChain understands many metadata formats, and can connect data samples in storage with external metadata (e.g. CSV columns) to form a single dataset: +Note that DataChain represents file samples as pointers into their respective storage locations. This means a newly created dataset version does not duplicate files in storage, and storage remains the single source of truth for the original samples + +Handling Python objects +----------------------- +In addition to storing primitive Python data types, chain is also capable of using data models. + +For example, instead of collecting just a text response from Mistral API, we might be interested in more fields of the Mistral response object. For this task, we can define a Pydantic-like model and populate it from the API replies: .. code:: py - from dc import parse_csv + import os - files = dc.DataChain("gs://datachain-demo/myimages/") - metadata = dc.DataChain("gs://datachain-demo/myimagesmetadata.csv") \ - .gen(meta=parse_csv) # TBD, also dependent on dropping file - dataset = chain1.merge(chain2, on = "file.name", right_on="name"]) + from mistralai.client import MistralClient + from mistralai.models.chat_completion import ChatMessage - print(dataset.select("file.name", "class", "prob").limit(5).to_pandas()) - .... - .... - .... - .... - .... + from datachain.lib.dc import DataChain + from datachain.lib.feature import Feature -Nested annotations (like JSON) can be unrolled into rows and columns in the way that best fits the application. For example, the MS COCO dataset includes JSON annotations detailing segmentations. To build a dataset consisting of all segmented objects in all COCO images: -.. code:: py + PROMPT = ( + "Was this dialog successful? Describe the 'result' as 'Yes' or 'No' in a short JSON" + ) - image_files = dc.DataChain("gs://datachain-demo/coco/images/") - image_meta = dc.DataChain("gs://datachain-demo/coco.json") \ - .gen(meta=parse_json, key="images") # list of images - images = image_files.merge(image_meta, on = "file.name", right_on="file_name") - objects_meta = dc.DataChain("gs://datachain-demo/coco.json") \ - .gen(meta=parse_json, key="annotations") # annotated objects + model = "mistral-large-latest" + api_key = os.environ["MISTRAL_API_KEY"] - objects = image.full_merge(objects_meta, on = "id", right_on = "image_id") -Generating metadata ---------------------- + ## define the data model ### + class Usage(Feature): + prompt_tokens: int = 0 + completion_tokens: int = 0 -A typical step in data curation is to create features from data samples for future selection. DataChain represents the newly created metadata as columns, which makes it easy to create new features and filter on them: -.. code:: py + class MyChatMessage(Feature): + role: str = "" + content: str = "" + + + class CompletionResponseChoice(Feature): + message: MyChatMessage = MyChatMessage() - from fashion_clip.fashion_clip import FashionCLIP - from sqlalchemy import JSON - from tabulate import tabulate - from datachain.lib.param import Image - from datachain.query import C, DatasetQuery, udf + class MistralModel(Feature): + id: str = "" + choices: list[CompletionResponseChoice] + usage: Usage = Usage() - @udf( - params=(Image(),), - output={"fclip": JSON}, - method="fashion_clip", - batch=10, + ## Populate model instances ### + chain = ( + DataChain.from_storage("gs://datachain-demo/chatbot-KiT/") + .limit(5) + .settings(cache=True, parallel=5) + .map( + mistral_response=lambda file: MistralModel( + **MistralClient(api_key=api_key) + .chat( + model=model, + response_format={"type": "json_object"}, + messages=[ + ChatMessage(role="user", content=f"{PROMPT}: {file.get_value()}") + ], + ) + .dict() + ), + output=MistralModel, + ) + .save("dialog-eval") ) - class MyFashionClip: - def __init__(self): - self.fclip = FashionCLIP("fashion-clip") - def fashion_clip(self, inputs): - embeddings = self.fclip.encode_images( - [input[0] for input in inputs], batch_size=1 - ) - return [(json.dumps(emb),) for emb in embeddings.tolist()] +After the chain execution, we can collect the objects: - chain = dc.DataChain("gs://datachain-demo/zalando/images/").filter( - C.name.glob("*.jpg") - ).limit(5).add_signals(MyFashionClip).save("zalando_hd_emb") +.. code:: py - test_image = "cs://datachain-demo/zalando/test/banner.jpg" - test_embedding = MyFashionClip.fashion_clip.encode_images(Image(test_image)) + for obj in responses: + assert isinstance(obj, MistralModel) + print(obj.dict()) - best_matches = chain.filter(similarity_search(test_embeding)).limit(5) +.. code:: shell - print best_matches.to_result() + {'choices': [{'message': {'role': 'assistant', 'content': '{"result": "Yes"}'}}], 'usage': {'prompt_tokens': 610, 'completion_tokens': 6}} + {'choices': [{'message': {'role': 'assistant', 'content': '{"result": "No"}'}}], 'usage': {'prompt_tokens': 3983, 'completion_tokens': 6}} + {'choices': [{'message': {'role': 'assistant', 'content': '{"result": "Yes"}'}}], 'usage': {'prompt_tokens': 706, 'completion_tokens': 6}} + {'choices': [{'message': {'role': 'assistant', 'content': '{"result": "No"}'}}], 'usage': {'prompt_tokens': 1250, 'completion_tokens': 6}} + {'choices': [{'message': {'role': 'assistant', 'content': '{"result": "Yes"}'}}], 'usage': {'prompt_tokens': 1217, 'completion_tokens': 6}} -Delta updates -------------- +Dataset persistence +-------------------- -DataChain is capable of “delta updates” – that is, batch-processing only the newly added data samples. For example, let us copy some images into a local folder and run a chain to generate captions with a locally served captioning model from HuggingFace: +The “save” operation makes chain dataset persistent in the current (working) directory of the query. A hidden folder .datachain/ holds the records. A persistent dataset can be accessed later to start a derivative chain: -.. code:: console +.. code:: py + + DataChain.from_dataset("dialog-eval").limit(2).save("dialog-eval") - > mkdir demo-images/ - > datachain cp gs://datachain-demo/images/ /tmp/demo-images +Persistent datasets are immutable and automatically versioned. Versions can be listed from shell: +.. code:: shell + + $ datachain ls-datasets + + dialog-rate (v1) + dialog-rate (v2) + +By default, when a persistent dataset is loaded, the latest version is fetched but another version can be requested: .. code:: py - import torch - - from datachain.lib.hf_image_to_text import LLaVAdescribe - from datachain.query import C, DatasetQuery - - source = "/tmp/demo-images" - - if torch.cuda.is_available(): - device = "cuda" - else: - device = "cpu" - - if __name__ == "__main__": - results = ( - DatasetQuery( - source, - anon=True, - ) - .filter(C.name.glob("*.jpg")) - .add_signals( - LLaVAdescribe( - device=device, - model=model, - ), - parallel=False, - ) - .save("annotated-images") - ) - -Now let us add few more more images to the same folder: + ds = DataChain.from_dataset("dialog-eval", version = 1) -.. code:: console +Chain optimization and execution +-------------------------------- + +Datachain avoids redundant operations. Execution is triggered only when a downstream operation requests the processed results. However, it would be inefficient to run, say, LLM queries again every time you just want to collect several objects. + +“Save” operation nails execution results and automatically refers to them every time the downstream functions ask for data. Saving without an explicit name generates an auto-named dataset which serves the same purpose. + + +Matching data with metadata +---------------------------- +It is common for AI data to come with pre-computed metadata (annotations, classes, etc). - > datachain cp gs://datachain-demo/extra-images/ /tmp/demo-images +DataChain library understands common metadata formats (JSON, CSV and parquet), and can unite data samples from storage with side-loaded metadata. The schema for metadata can be set explicitly or be inferred. -and calculate updates only for the delta: +Here is an example of reading a CSV file where schema is heuristically derived from the header: .. code:: py - processed = dc.DataChain("annotated-images") - delta = dc.dataChain("/tmp/demo-images").subtract(processed) + from datachain.lib.dc import DataChain + csv_dataset = DataChain.from_csv("gs://datachain-demo/chatbot-csv/") + + print(csv_dataset.to_pandas()) + +Reading metadata from JSON format is a more complicated scenario because a JSON-annotated dataset typically references data samples (e.g. images) in annotation arrays somewhere within JSON files. + +Here is an example from MS COCO “captions” JSON which employs separate sections for image meta and captions: + +.. code:: json + + + { + "images": [ + { + "license": 4, + "file_name": "000000397133.jpg", + "coco_url": "http://images.cocodataset.org/val2017/000000397133.jpg", + "height": 427, + "width": 640, + "date_captured": "2013-11-14 17:02:52", + "flickr_url": "http://farm7.staticflickr.com/6116/6255196340_da26cf2c9e_z.jpg", + "id": 397133 + }, + ... + ], + "annotations": [ + { + "image_id" : "179765", + "id" : 38, + "caption" : "A black Honda motorcycle parked in front of a garage." + }, + ... + ], + ... + } + +To deal with this layout, we can take the following steps: + +1. Generate a dataset of raw image files from storage +2. Generate a meta-information dataset from the JSON section “images” +3. Join these datasets via the matching id keys + +.. code:: python + + + from datachain.lib.dc import DataChain + + images = DataChain.from_storage("gs://datachain-demo/coco2017/images/val/") + meta = DataChain.from_json("gs://datachain-demo/coco2017/annotations_captions", jmespath = "images") + + images_with_meta = images.merge(meta, on="file.name", right_on="images.file_name") + + print(images_with_meta.limit(1).results()) + +.. code:: shell + + + Processed: 5000 rows [00:00, 15481.66 rows/s] + Processed: 1 rows [00:00, 1291.75 rows/s] + Processed: 1 rows [00:00, 4.70 rows/s] + Generated: 5000 rows [00:00, 27128.67 rows/s] + [(1, 2336066478558845549, '', 0, 'coco2017/images/val', '000000000139.jpg', 'CNvXoemj8IYDEAE=', '1719096046021595', 1, datetime.datetime(2024, 6, 22, 22, 40, 46, 70000, tzinfo=datetime.timezone.utc), 161811, '', '', None, 'gs://datachain-demo', 'gs://datachain-demo', 'coco2017/images/val', '000000000139.jpg', 161811, '1719096046021595', 'CNvXoemj8IYDEAE=', 1, datetime.datetime(1970, 1, 1, 0, 0, tzinfo=datetime.timezone.utc), None, '', 4146, 6967063844996569113, 2, '000000000139.jpg', 'http://images.cocodataset.org/val2017/000000000139.jpg', 426, 640, '2013-11-21 01:34:01', 'http://farm9.staticflickr.com/8035/8024364858_9c41dc1666_z.jpg', 139)] Passing data to training ------------------------ -Datasets can be exported to CSV or webdataset formats. However, a much better way to pass data to training which avoids data copies and re-sharding is to wrap a DataChain dataset into a PyTorch class, and let the library take care of file downloads and caching under the hood: +Chain results can be exported or passed directly to Pytorch dataloader. For example, if we are interested in passing three columns to training, the following Pytorch code will do it: .. code:: py - ds = dc.DataChain("gs://datachain-demo/name-labeled/images/") - .filter(C.name.glob("*.jpg")) - .map(lambda name: (name[:3],), output={"label": str}, parallel=4) - ) - - train_loader = DataLoader( - ds.to_pytorch( - ImageReader(), - LabelReader("label", classes=CLASSES), - transform=transform, - ), - batch_size=16, - parallel=2, - ) + ds = train.select("file", "caption_choices", "label_ind").to_pytorch( + transform=preprocess, + tokenizer=clip.tokenize, + ) + + loader = DataLoader(ds, batch_size=2) + optimizer = torch.optim.Adam(model.parameters(), lr=1e-4) + train(loader, model, optimizer) Tutorials ------------------ -* `Computer Vision `_ (try in `Colab `__) * `Multimodal `_ (try in `Colab `__) -💻  More examples ------------------- - -* Curating images to train a custom CLIP model without re-sharding the Webdataset files -* Batch-transforming and indexing images to create a searchable merchandise catalog -* Evaluating an LLM application at scale -* Ranking the LLM retrieval strategies -* Delta updates in batch processing - Contributions -------------------- diff --git a/src/datachain/__init__.py b/src/datachain/__init__.py index e69de29bb..0242cdbdd 100644 --- a/src/datachain/__init__.py +++ b/src/datachain/__init__.py @@ -0,0 +1,34 @@ +from datachain.lib.dc import C, DataChain +from datachain.lib.feature import Feature +from datachain.lib.feature_utils import pydantic_to_feature +from datachain.lib.file import File, FileError, FileFeature, IndexedFile, TarVFile +from datachain.lib.image import ImageFile, convert_images +from datachain.lib.text import convert_text +from datachain.lib.udf import Aggregator, Generator, Mapper +from datachain.lib.utils import AbstractUDF, DataChainError +from datachain.query.dataset import UDF as BaseUDF # noqa: N811 +from datachain.query.schema import Column +from datachain.query.session import Session + +__all__ = [ + "AbstractUDF", + "Aggregator", + "BaseUDF", + "C", + "Column", + "DataChain", + "DataChainError", + "Feature", + "File", + "FileError", + "FileFeature", + "Generator", + "ImageFile", + "IndexedFile", + "Mapper", + "Session", + "TarVFile", + "convert_images", + "convert_text", + "pydantic_to_feature", +] diff --git a/src/datachain/lib/feature.py b/src/datachain/lib/feature.py index aec5a5778..ca208655d 100644 --- a/src/datachain/lib/feature.py +++ b/src/datachain/lib/feature.py @@ -4,6 +4,7 @@ import warnings from collections.abc import Iterable, Sequence from datetime import datetime +from enum import Enum from functools import lru_cache from types import GenericAlias from typing import ( @@ -63,6 +64,7 @@ str: String, Literal: String, LiteralEx: String, + Enum: String, float: Float, bool: Boolean, datetime: DateTime, # Note, list of datetime is not supported yet @@ -364,8 +366,11 @@ def _resolve(cls, name, field_info, prefix: list[str]): def convert_type_to_datachain(typ): # noqa: PLR0911 - if inspect.isclass(typ) and issubclass(typ, SQLType): - return typ + if inspect.isclass(typ): + if issubclass(typ, SQLType): + return typ + if issubclass(typ, Enum): + return str res = TYPE_TO_DATACHAIN.get(typ) if res: diff --git a/src/datachain/lib/feature_utils.py b/src/datachain/lib/feature_utils.py index e2ee4e2bb..15d716c72 100644 --- a/src/datachain/lib/feature_utils.py +++ b/src/datachain/lib/feature_utils.py @@ -1,5 +1,7 @@ +import inspect import string from collections.abc import Sequence +from enum import Enum from typing import Any, Union, get_args, get_origin from pydantic import BaseModel, create_model @@ -35,23 +37,7 @@ def pydantic_to_feature(data_cls: type[BaseModel]) -> type[Feature]: for name, field_info in data_cls.model_fields.items(): anno = field_info.annotation if anno not in TYPE_TO_DATACHAIN: - orig = get_origin(anno) - if orig is list: - anno = get_args(anno) # type: ignore[assignment] - if isinstance(anno, Sequence): - anno = anno[0] # type: ignore[unreachable] - is_list = True - else: - is_list = False - - try: - convert_type_to_datachain(anno) - except TypeError: - if not Feature.is_feature(anno): # type: ignore[arg-type] - anno = pydantic_to_feature(anno) # type: ignore[arg-type] - - if is_list: - anno = list[anno] # type: ignore[valid-type] + anno = _to_feature_type(anno) fields[name] = (anno, field_info.default) cls = create_model( @@ -63,6 +49,38 @@ def pydantic_to_feature(data_cls: type[BaseModel]) -> type[Feature]: return cls +def _to_feature_type(anno): + if inspect.isclass(anno) and issubclass(anno, Enum): + return str + + orig = get_origin(anno) + if orig is list: + anno = get_args(anno) # type: ignore[assignment] + if isinstance(anno, Sequence): + anno = anno[0] # type: ignore[unreachable] + is_list = True + else: + is_list = False + + try: + convert_type_to_datachain(anno) + except TypeError: + if not Feature.is_feature(anno): # type: ignore[arg-type] + orig = get_origin(anno) + if orig in TYPE_TO_DATACHAIN: + anno = _to_feature_type(anno) + else: + if orig == Union: + args = get_args(anno) + if len(args) == 2 and (type(None) in args): + return _to_feature_type(args[0]) + + anno = pydantic_to_feature(anno) # type: ignore[arg-type] + if is_list: + anno = list[anno] # type: ignore[valid-type] + return anno + + def dict_to_feature(name: str, data_dict: dict[str, FeatureType]) -> type[Feature]: fields = {name: (anno, ...) for name, anno in data_dict.items()} return create_model( # type: ignore[call-overload] diff --git a/tests/conftest.py b/tests/conftest.py index 87ccfde12..c79000ddd 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -42,6 +42,14 @@ def monkeypatch_session() -> Generator[MonkeyPatch, None, None]: mpatch.undo() +@pytest.fixture(autouse=True) +def clean_session() -> None: + """ + Make sure we clean leftover session before each test case + """ + Session.cleanup_for_tests() + + @pytest.fixture(scope="session", autouse=True) def clean_environment( monkeypatch_session: MonkeyPatch, diff --git a/tests/examples/wds_data.py b/tests/examples/wds_data.py index 90a7ceca1..fe9e46c00 100644 --- a/tests/examples/wds_data.py +++ b/tests/examples/wds_data.py @@ -7,7 +7,7 @@ "url": "https://i.imgur.com/mXQrfNs.png", "key": "000000000000", "status": "success", - "error_message": None, + "error_message": "", "width": 512, "height": 270, "original_width": 1704, @@ -29,7 +29,7 @@ "url": "http://i.ytimg.com/vi/if2V1iszwuA/default.jpg", "key": "000000000001", "status": "success", - "error_message": None, + "error_message": "", "width": 120, "height": 90, "original_width": 120, @@ -44,7 +44,7 @@ "url": "http://t0.gstatic.com/images?q=tbn:ANd9GcScIHR33LnMpupkxbZRqnj1YMvOXsc9uUTj8Wa2v8bhSjWTxTRo1w", "key": "000000000002", "status": "success", - "error_message": None, + "error_message": "", "width": 275, "height": 183, "original_width": 275, @@ -59,7 +59,7 @@ "url": "http://thumbs.ebaystatic.com/images/g/5kAAAOSwc1FXcDFI/s-l225.jpg", "key": "000000000003", "status": "success", - "error_message": None, + "error_message": "", "width": 80, "height": 80, "original_width": 80, @@ -74,7 +74,7 @@ "url": "https://www.dhresource.com/600x600/f2/albu/g8/M00/78/73/rBVaV150TlWAcLR4AAHizzfChbU318.jpg", "key": "000000000004", "status": "success", - "error_message": None, + "error_message": "", "width": 512, "height": 384, "original_width": 600, @@ -86,6 +86,8 @@ # data that represents metadata and goes to webdataset parquet file of webdataset +# TODO change float values to something other than 0.5 to test if double precision +# works as expected when https://github.com/iterative/datachain/issues/12 is done WDS_META = { "uid": { "0": "d142ae70686e14ccc379c01a571501b5", @@ -123,22 +125,22 @@ "4": 450, }, "clip_b32_similarity_score": { - "0": 0.2734375, - "1": 0.3813476562, - "2": 0.3312988281, - "3": 0.2091064453, - "4": 0.2038574219, + "0": 0.5, + "1": 0.5, + "2": 0.5, + "3": 0.5, + "4": 0.5, }, "clip_l14_similarity_score": { - "0": 0.2553710938, - "1": 0.3391113281, - "2": 0.2318115234, - "3": 0.1966552734, - "4": 0.1300048828, + "0": 0.5, + "1": 0.5, + "2": 0.5, + "3": 0.5, + "4": 0.5, }, "face_bboxes": { "0": [], - "1": [[0.5005972981, 0.1360414922, 0.8109994531, 0.7247588038]], + "1": [[0.5, 0.5, 0.5, 0.5]], "2": [], "3": [], "4": [], diff --git a/tests/unit/lib/test_datachain_bootstrap.py b/tests/unit/lib/test_datachain_bootstrap.py index 349f71634..6c7088fc5 100644 --- a/tests/unit/lib/test_datachain_bootstrap.py +++ b/tests/unit/lib/test_datachain_bootstrap.py @@ -24,7 +24,7 @@ def teardown(self): self.value = MyMapper.TEARDOWN_VALUE -def test_udf(catalog): +def test_udf(): vals = ["a", "b", "c", "d", "e", "f"] chain = DataChain.from_features(key=vals) @@ -36,7 +36,7 @@ def test_udf(catalog): @pytest.mark.skip(reason="Skip until tests module will be importer for unit-tests") -def test_udf_parallel(catalog): +def test_udf_parallel(): vals = ["a", "b", "c", "d", "e", "f"] chain = DataChain.from_features(key=vals) @@ -45,7 +45,7 @@ def test_udf_parallel(catalog): assert res == [MyMapper.BOOTSTRAP_VALUE] * len(vals) -def test_no_bootstrap_for_callable(catalog): +def test_no_bootstrap_for_callable(): class MyMapper: def __init__(self): self._had_bootstrap = False diff --git a/tests/unit/lib/test_feature_utils.py b/tests/unit/lib/test_feature_utils.py index 4577847e5..b87a4e2d0 100644 --- a/tests/unit/lib/test_feature_utils.py +++ b/tests/unit/lib/test_feature_utils.py @@ -1,9 +1,16 @@ +from enum import Enum from typing import get_args, get_origin import pytest +from pydantic import BaseModel from datachain.lib.dc import DataChain -from datachain.lib.feature_utils import FeatureToTupleError, features_to_tuples +from datachain.lib.feature import Feature +from datachain.lib.feature_utils import ( + FeatureToTupleError, + features_to_tuples, + pydantic_to_feature, +) from datachain.query.schema import Column @@ -104,3 +111,32 @@ def test_resolve_column(): def test_resolve_column_attr(): signal = Column.hello.world.again assert signal.name == "hello__world__again" + + +def test_to_feature_list_of_lists(): + class MyName1(BaseModel): + id: int + name: str + + class Mytest2(BaseModel): + loc: str + identity: list[list[MyName1]] + + cls = pydantic_to_feature(Mytest2) + + assert issubclass(cls, Feature) + + +def test_to_feature_function(): + class MyEnum(str, Enum): + func = "function" + + class MyCall(BaseModel): + id: str + type: MyEnum + + cls = pydantic_to_feature(MyCall) + assert issubclass(cls, Feature) + + type_ = cls.model_fields["type"].annotation + assert type_ is str diff --git a/tests/unit/test_module_exports.py b/tests/unit/test_module_exports.py new file mode 100644 index 000000000..77aec6dea --- /dev/null +++ b/tests/unit/test_module_exports.py @@ -0,0 +1,31 @@ +# flake8: noqa: F401 + +import pytest + + +def test_module_exports(): + try: + from datachain import ( + AbstractUDF, + Aggregator, + BaseUDF, + C, + Column, + DataChain, + DataChainError, + Feature, + File, + FileError, + FileFeature, + Generator, + ImageFile, + IndexedFile, + Mapper, + Session, + TarVFile, + convert_images, + convert_text, + pydantic_to_feature, + ) + except Exception as e: # noqa: BLE001 + pytest.fail(f"Importing raised an exception: {e}")