diff --git a/.changeset/shaggy-rats-draw.md b/.changeset/shaggy-rats-draw.md new file mode 100644 index 000000000..d579556be --- /dev/null +++ b/.changeset/shaggy-rats-draw.md @@ -0,0 +1,5 @@ +--- +"create-llama": patch +--- + +Bump the LlamaCloud library and fix breaking changes (Python). diff --git a/helpers/python.ts b/helpers/python.ts index 4cd8cb75c..5e08008b4 100644 --- a/helpers/python.ts +++ b/helpers/python.ts @@ -37,21 +37,21 @@ const getAdditionalDependencies = ( case "mongo": { dependencies.push({ name: "llama-index-vector-stores-mongodb", - version: "^0.3.1", + version: "^0.6.0", }); break; } case "pg": { dependencies.push({ name: "llama-index-vector-stores-postgres", - version: "^0.2.5", + version: "^0.3.2", }); break; } case "pinecone": { dependencies.push({ name: "llama-index-vector-stores-pinecone", - version: "^0.2.1", + version: "^0.4.1", constraints: { python: ">=3.11,<3.13", }, @@ -61,7 +61,7 @@ const getAdditionalDependencies = ( case "milvus": { dependencies.push({ name: "llama-index-vector-stores-milvus", - version: "^0.2.0", + version: "^0.3.0", }); dependencies.push({ name: "pymilvus", @@ -72,14 +72,14 @@ const getAdditionalDependencies = ( case "astra": { dependencies.push({ name: "llama-index-vector-stores-astra-db", - version: "^0.2.0", + version: "^0.4.0", }); break; } case "qdrant": { dependencies.push({ name: "llama-index-vector-stores-qdrant", - version: "^0.3.0", + version: "^0.4.0", constraints: { python: ">=3.11,<3.13", }, @@ -89,21 +89,21 @@ const getAdditionalDependencies = ( case "chroma": { dependencies.push({ name: "llama-index-vector-stores-chroma", - version: "^0.2.0", + version: "^0.4.0", }); break; } case "weaviate": { dependencies.push({ name: "llama-index-vector-stores-weaviate", - version: "^1.1.1", + version: "^1.2.3", }); break; } case "llamacloud": dependencies.push({ name: "llama-index-indices-managed-llama-cloud", - version: "^0.6.0", + version: "^0.6.3", }); break; } @@ -122,13 +122,13 @@ const getAdditionalDependencies = ( case "web": dependencies.push({ name: "llama-index-readers-web", - version: "^0.2.2", + version: "^0.3.0", }); break; case "db": dependencies.push({ name: "llama-index-readers-database", - version: "^0.2.0", + version: "^0.3.0", }); dependencies.push({ name: "pymysql", @@ -167,15 +167,15 @@ const getAdditionalDependencies = ( if (templateType !== "multiagent") { dependencies.push({ name: "llama-index-llms-openai", - version: "^0.2.0", + version: "^0.3.2", }); dependencies.push({ name: "llama-index-embeddings-openai", - version: "^0.2.3", + version: "^0.3.1", }); dependencies.push({ name: "llama-index-agent-openai", - version: "^0.3.0", + version: "^0.4.0", }); } break; @@ -524,7 +524,7 @@ export const installPythonTemplate = async ({ if (observability === "llamatrace") { addOnDependencies.push({ name: "llama-index-callbacks-arize-phoenix", - version: "^0.2.1", + version: "^0.3.0", constraints: { python: ">=3.11,<3.13", }, diff --git a/helpers/tools.ts b/helpers/tools.ts index c3a34390c..33bd96e60 100644 --- a/helpers/tools.ts +++ b/helpers/tools.ts @@ -41,7 +41,7 @@ export const supportedTools: Tool[] = [ dependencies: [ { name: "llama-index-tools-google", - version: "^0.2.0", + version: "^0.3.0", }, ], supportedFrameworks: ["fastapi"], @@ -82,7 +82,7 @@ For better results, you can specify the region parameter to get results from a s dependencies: [ { name: "llama-index-tools-wikipedia", - version: "^0.2.0", + version: "^0.3.0", }, ], supportedFrameworks: ["fastapi", "express", "nextjs"], diff --git a/templates/components/vectordbs/python/llamacloud/generate.py b/templates/components/vectordbs/python/llamacloud/generate.py index acd28777e..3932b86ab 100644 --- a/templates/components/vectordbs/python/llamacloud/generate.py +++ b/templates/components/vectordbs/python/llamacloud/generate.py @@ -1,5 +1,4 @@ # flake8: noqa: E402 -import os from dotenv import load_dotenv @@ -7,62 +6,24 @@ import logging -from app.engine.index import get_client, get_index +from llama_index.core.readers import SimpleDirectoryReader +from tqdm import tqdm + +from app.engine.index import get_index from app.engine.service import LLamaCloudFileService # type: ignore from app.settings import init_settings -from llama_cloud import PipelineType -from llama_index.core.readers import SimpleDirectoryReader -from llama_index.core.settings import Settings logging.basicConfig(level=logging.INFO) logger = logging.getLogger() -def ensure_index(index): - project_id = index._get_project_id() - client = get_client() - pipelines = client.pipelines.search_pipelines( - project_id=project_id, - pipeline_name=index.name, - pipeline_type=PipelineType.MANAGED.value, - ) - if len(pipelines) == 0: - from llama_index.embeddings.openai import OpenAIEmbedding - - if not isinstance(Settings.embed_model, OpenAIEmbedding): - raise ValueError( - "Creating a new pipeline with a non-OpenAI embedding model is not supported." - ) - client.pipelines.upsert_pipeline( - project_id=project_id, - request={ - "name": index.name, - "embedding_config": { - "type": "OPENAI_EMBEDDING", - "component": { - "api_key": os.getenv("OPENAI_API_KEY"), # editable - "model_name": os.getenv("EMBEDDING_MODEL"), - }, - }, - "transform_config": { - "mode": "auto", - "config": { - "chunk_size": Settings.chunk_size, # editable - "chunk_overlap": Settings.chunk_overlap, # editable - }, - }, - }, - ) - - def generate_datasource(): init_settings() logger.info("Generate index for the provided data") - index = get_index() - ensure_index(index) - project_id = index._get_project_id() - pipeline_id = index._get_pipeline_id() + index = get_index(create_if_missing=True) + if index is None: + raise ValueError("Index not found and could not be created") # use SimpleDirectoryReader to retrieve the files to process reader = SimpleDirectoryReader( @@ -72,14 +33,30 @@ def generate_datasource(): files_to_process = reader.input_files # add each file to the LlamaCloud pipeline - for input_file in files_to_process: + error_files = [] + for input_file in tqdm( + files_to_process, + desc="Processing files", + unit="file", + ): with open(input_file, "rb") as f: - logger.info( + logger.debug( f"Adding file {input_file} to pipeline {index.name} in project {index.project_name}" ) - LLamaCloudFileService.add_file_to_pipeline( - project_id, pipeline_id, f, custom_metadata={} - ) + try: + LLamaCloudFileService.add_file_to_pipeline( + index.project.id, + index.pipeline.id, + f, + custom_metadata={}, + wait_for_processing=False, + ) + except Exception as e: + error_files.append(input_file) + logger.error(f"Error adding file {input_file}: {e}") + + if error_files: + logger.error(f"Failed to add the following files: {error_files}") logger.info("Finished generating the index") diff --git a/templates/components/vectordbs/python/llamacloud/index.py b/templates/components/vectordbs/python/llamacloud/index.py index f6f7e4f67..97261900a 100644 --- a/templates/components/vectordbs/python/llamacloud/index.py +++ b/templates/components/vectordbs/python/llamacloud/index.py @@ -2,10 +2,12 @@ import os from typing import Optional +from llama_cloud import PipelineType from llama_index.core.callbacks import CallbackManager from llama_index.core.ingestion.api_utils import ( get_client as llama_cloud_get_client, ) +from llama_index.core.settings import Settings from llama_index.indices.managed.llama_cloud import LlamaCloudIndex from pydantic import BaseModel, Field, field_validator @@ -82,14 +84,63 @@ def to_index_kwargs(self) -> dict: } -def get_index(config: IndexConfig = None): +def get_index( + config: IndexConfig = None, + create_if_missing: bool = False, +): if config is None: config = IndexConfig() - index = LlamaCloudIndex(**config.to_index_kwargs()) - - return index + # Check whether the index exists + try: + index = LlamaCloudIndex(**config.to_index_kwargs()) + return index + except ValueError: + logger.warning("Index not found") + if create_if_missing: + logger.info("Creating index") + _create_index(config) + return LlamaCloudIndex(**config.to_index_kwargs()) + return None def get_client(): config = LlamaCloudConfig() return llama_cloud_get_client(**config.to_client_kwargs()) + + +def _create_index( + config: IndexConfig, +): + client = get_client() + pipeline_name = config.llama_cloud_pipeline_config.pipeline + + pipelines = client.pipelines.search_pipelines( + pipeline_name=pipeline_name, + pipeline_type=PipelineType.MANAGED.value, + ) + if len(pipelines) == 0: + from llama_index.embeddings.openai import OpenAIEmbedding + + if not isinstance(Settings.embed_model, OpenAIEmbedding): + raise ValueError( + "Creating a new pipeline with a non-OpenAI embedding model is not supported." + ) + client.pipelines.upsert_pipeline( + request={ + "name": pipeline_name, + "embedding_config": { + "type": "OPENAI_EMBEDDING", + "component": { + "api_key": os.getenv("OPENAI_API_KEY"), # editable + "model_name": os.getenv("EMBEDDING_MODEL"), + }, + }, + "transform_config": { + "mode": "auto", + "config": { + "chunk_size": Settings.chunk_size, # editable + "chunk_overlap": Settings.chunk_overlap, # editable + }, + }, + }, + ) diff --git a/templates/components/vectordbs/python/llamacloud/service.py b/templates/components/vectordbs/python/llamacloud/service.py index 68216f98e..31b91365d 100644 --- a/templates/components/vectordbs/python/llamacloud/service.py +++ b/templates/components/vectordbs/python/llamacloud/service.py @@ -1,18 +1,18 @@ -from io import BytesIO import logging import os import time -from typing import Any, Dict, List, Optional, Set, Tuple, Union import typing +from io import BytesIO +from typing import Any, Dict, List, Optional, Set, Tuple, Union +import requests from fastapi import BackgroundTasks from llama_cloud import ManagedIngestionStatus, PipelineFileCreateCustomMetadataValue +from llama_index.core.schema import NodeWithScore from pydantic import BaseModel -import requests + from app.api.routers.models import SourceNodes from app.engine.index import get_client -from llama_index.core.schema import NodeWithScore - logger = logging.getLogger("uvicorn") @@ -64,27 +64,34 @@ def add_file_to_pipeline( pipeline_id: str, upload_file: Union[typing.IO, Tuple[str, BytesIO]], custom_metadata: Optional[Dict[str, PipelineFileCreateCustomMetadataValue]], + wait_for_processing: bool = True, ) -> str: client = get_client() file = client.files.upload_file(project_id=project_id, upload_file=upload_file) + file_id = file.id files = [ { - "file_id": file.id, - "custom_metadata": {"file_id": file.id, **(custom_metadata or {})}, + "file_id": file_id, + "custom_metadata": {"file_id": file_id, **(custom_metadata or {})}, } ] files = client.pipelines.add_files_to_pipeline(pipeline_id, request=files) + if not wait_for_processing: + return file_id + # Wait 2s for the file to be processed max_attempts = 20 attempt = 0 while attempt < max_attempts: - result = client.pipelines.get_pipeline_file_status(pipeline_id, file.id) + result = client.pipelines.get_pipeline_file_status( + file_id=file_id, pipeline_id=pipeline_id + ) if result.status == ManagedIngestionStatus.ERROR: raise Exception(f"File processing failed: {str(result)}") if result.status == ManagedIngestionStatus.SUCCESS: # File is ingested - return the file id - return file.id + return file_id attempt += 1 time.sleep(0.1) # Sleep for 100ms raise Exception( diff --git a/templates/types/extractor/fastapi/app/ui/components/upload.py b/templates/types/extractor/fastapi/app/ui/components/upload.py index 64421feb9..e404840a1 100644 --- a/templates/types/extractor/fastapi/app/ui/components/upload.py +++ b/templates/types/extractor/fastapi/app/ui/components/upload.py @@ -2,6 +2,7 @@ from typing import List import reflex as rx + from app.engine.generate import generate_datasource @@ -78,10 +79,10 @@ def upload_component() -> rx.Component: UploadedFilesState.uploaded_files, lambda file: rx.card( rx.stack( - rx.text(file.file_name, size="sm"), + rx.text(file.file_name, size="2"), rx.button( "x", - size="sm", + size="2", on_click=UploadedFilesState.remove_file(file.file_name), ), justify="between", diff --git a/templates/types/extractor/fastapi/pyproject.toml b/templates/types/extractor/fastapi/pyproject.toml index e9574a019..a9cad0f76 100644 --- a/templates/types/extractor/fastapi/pyproject.toml +++ b/templates/types/extractor/fastapi/pyproject.toml @@ -14,7 +14,7 @@ fastapi = "^0.109.1" uvicorn = { extras = ["standard"], version = "^0.23.2" } python-dotenv = "^1.0.0" pydantic = "<2.10" -llama-index = "^0.11.1" +llama-index = "^0.12.1" cachetools = "^5.3.3" reflex = "^0.6.2.post1" diff --git a/templates/types/streaming/fastapi/app/services/file.py b/templates/types/streaming/fastapi/app/services/file.py index 7aa6696c3..3fc1a64f1 100644 --- a/templates/types/streaming/fastapi/app/services/file.py +++ b/templates/types/streaming/fastapi/app/services/file.py @@ -249,6 +249,7 @@ def _add_file_to_llama_cloud_index( index.pipeline.id, upload_file, custom_metadata={}, + wait_for_processing=True, ) return doc_id diff --git a/templates/types/streaming/fastapi/pyproject.toml b/templates/types/streaming/fastapi/pyproject.toml index 4d181fa6b..46672bc51 100644 --- a/templates/types/streaming/fastapi/pyproject.toml +++ b/templates/types/streaming/fastapi/pyproject.toml @@ -19,7 +19,7 @@ python-dotenv = "^1.0.0" pydantic = "<2.10" aiostream = "^0.5.2" cachetools = "^5.3.3" -llama-index = "^0.11.17" +llama-index = "^0.12.1" rich = "^13.9.4" [tool.poetry.group.dev.dependencies]