diff --git a/.refactory.tags.cache.v3/cache.db b/.refactory.tags.cache.v3/cache.db new file mode 100644 index 0000000..b61236b Binary files /dev/null and b/.refactory.tags.cache.v3/cache.db differ diff --git a/.refactory.tags.cache.v3/cache.db-shm b/.refactory.tags.cache.v3/cache.db-shm new file mode 100644 index 0000000..fc17058 Binary files /dev/null and b/.refactory.tags.cache.v3/cache.db-shm differ diff --git a/.refactory.tags.cache.v3/cache.db-wal b/.refactory.tags.cache.v3/cache.db-wal new file mode 100644 index 0000000..4d040a1 Binary files /dev/null and b/.refactory.tags.cache.v3/cache.db-wal differ diff --git "a/0_\360\237\224\214API_KEY.py" "b/0_\360\237\224\214API_KEY.py" index 112f3af..f86917e 100644 --- "a/0_\360\237\224\214API_KEY.py" +++ "b/0_\360\237\224\214API_KEY.py" @@ -1,35 +1,109 @@ -import streamlit as st -from streamlit_extras.switch_page_button import switch_page -import os -import time -import tempfile -import openai -from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, ServiceContext, set_global_service_context -from llama_index.llms.openai import OpenAI -from functions import sidebar_stuff1 - - -st.set_page_config(page_title="Talk to PDF", page_icon=":robot_face:", layout="wide") -st.title("Talk to your PDF 🤖 📑️") - - -st.write("#### Enter your OpenAI api key below :") -api_key = st.text_input("Enter your OpenAI API key (https://platform.openai.com/account/api-keys)", type="password") -st.session_state['api_key'] = api_key - -if not api_key : - st.sidebar.warning("⚠️ Please enter OpenAI API key") -else: - openai.api_key = api_key - -submit = st.button("Submit",use_container_width=True) -if submit: - st.sidebar.success("✅ API key entered successfully") - time.sleep(1.5) - switch_page('upload pdf') -sidebar_stuff1() - - - - - +""" +API Key Configuration Module for Talk to PDF Application + +This module handles the OpenAI API key configuration and validation for the Talk to PDF application. +It provides a user interface for API key input and manages the transition to the PDF upload page. +""" + +import os +import time +from typing import Optional + +import openai +import streamlit as st +from streamlit_extras.switch_page_button import switch_page +from llama_index.core import ( + VectorStoreIndex, + SimpleDirectoryReader, + ServiceContext, + set_global_service_context +) +from llama_index.llms.openai import OpenAI + +from functions import sidebar_stuff1 + +# Constants +TITLE = "Talk to your PDF 🤖 📑️" +PAGE_ICON = ":robot_face:" +API_KEY_PLACEHOLDER = "Enter your OpenAI API key (https://platform.openai.com/account/api-keys)" +SUCCESS_MESSAGE = "✅ API key entered successfully" +WARNING_MESSAGE = "⚠️ Please enter OpenAI API key" +NEXT_PAGE = "upload pdf" +TRANSITION_DELAY = 1.5 + +def initialize_page_config() -> None: + """Initialize Streamlit page configuration with title and layout settings.""" + st.set_page_config( + page_title="Talk to PDF", + page_icon=PAGE_ICON, + layout="wide" + ) + st.title(TITLE) + +def validate_api_key(api_key: str) -> bool: + """ + Validate the provided OpenAI API key. + + Args: + api_key (str): The API key to validate + + Returns: + bool: True if the API key is valid, False otherwise + """ + if not api_key: + return False + + # TODO: Add actual API key validation by making a test request to OpenAI + return True + +def handle_api_key_submission() -> None: + """Handle the API key input and submission process.""" + st.write("#### Enter your OpenAI api key below :") + + # API key input with password protection + api_key = st.text_input( + API_KEY_PLACEHOLDER, + type="password", + key="api_key_input" + ) + + # Store API key in session state + st.session_state['api_key'] = api_key + + # Display warning if API key is missing + if not api_key: + st.sidebar.warning(WARNING_MESSAGE) + else: + try: + openai.api_key = api_key + except Exception as e: + st.error(f"Error setting API key: {str(e)}") + return + + # Handle submit button + if st.button("Submit", use_container_width=True): + if validate_api_key(api_key): + st.sidebar.success(SUCCESS_MESSAGE) + time.sleep(TRANSITION_DELAY) + switch_page(NEXT_PAGE) + else: + st.error("Invalid API key. Please check and try again.") + +def main(): + """Main function to run the API key configuration interface.""" + try: + # Initialize page configuration + initialize_page_config() + + # Handle API key submission + handle_api_key_submission() + + # Display sidebar content + sidebar_stuff1() + + except Exception as e: + st.error(f"An error occurred: {str(e)}") + # TODO: Add proper error logging + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/LEARN.md b/LEARN.md index f12fa99..e923b59 100644 --- a/LEARN.md +++ b/LEARN.md @@ -1,29 +1,115 @@ # Talk to PDF 🤖 📑️ -This is the README file for the "Talk to PDF" code. The code is written in Python and uses the Streamlit library to create an interactive web application. The application allows users to ask questions about the content of a PDF file using natural language and receive instant answers powered by an AI question-answering system. +[![Python 3.11](https://img.shields.io/badge/python-3.11-blue.svg)](https://www.python.org/downloads/) +[![Streamlit](https://img.shields.io/badge/Streamlit-1.38.0-FF4B4B.svg)](https://streamlit.io) +[![OpenAI](https://img.shields.io/badge/OpenAI-1.43.0-412991.svg)](https://openai.com) +[![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT) -## Usage +An intelligent document interaction system that enables natural language conversations with PDF documents. Powered by OpenAI's language models and built with Streamlit, this application allows users to extract information and insights from PDFs through simple questions and answers. -1. **Step 1: Enter your OpenAI API key** - - Open the application and find the "Step 1: Enter your OpenAI API key" section. - - Obtain an OpenAI API key from the OpenAI platform if you don't have one. - - Enter your API key in the text input field. - - Click the "Submit" button to set the OpenAI API key. +## Features -2. **Step 2: Upload your PDF** - - In the "Step 2: Upload your PDF" section, click the "Browse Files" button. - - Select a PDF file from your device to upload. - - Wait for the PDF to finish uploading. +- 📚 Natural language interaction with PDF documents +- 🔍 Context-aware question answering +- 🚀 Real-time response streaming +- 🎯 Customizable AI model parameters +- 🔒 Secure API key management +- 📱 Responsive web interface -3. **Ask a question** - - Once the PDF is uploaded, you will see an input box labeled "Ask a question." - - Enter your question about the PDF content in the input box. +## Installation -4. **Get the answer** - - Click the "Ask" button to submit your question. - - The application will use the uploaded PDF and the question to generate a response using the AI question-answering system. - - The response will be displayed on the screen. +```bash +# Clone the repository +git clone https://github.com/yourusername/talk-to-pdf.git +cd talk-to-pdf -Feel free to reach out to the author, [@Obelisk_1531](https://twitter.com/Obelisk_1531), for any questions or feedback. +# Install dependencies +pip install -r requirements.txt +``` -Enjoy interacting with your PDFs using natural language! 🚀📄 +## Requirements + +- Python 3.11 or higher +- OpenAI API key +- Dependencies listed in `requirements.txt` + +## Usage Guide + +### 1. API Key Configuration + +- Launch the application +- Navigate to the API key configuration section +- Obtain an OpenAI API key from [OpenAI Platform](https://platform.openai.com/account/api-keys) +- Enter your API key in the secure input field +- Click "Submit" to save your configuration + +### 2. Document Upload + +- Go to the PDF upload section +- Click "Browse Files" to select your PDF +- Support for multiple PDF uploads +- Wait for document processing to complete + +### 3. Interactive Chat + +- Type your questions in the chat input box +- Get AI-powered responses based on document content +- Maintain context throughout the conversation +- Adjust model parameters for different response styles + +### 4. Advanced Options + +- Choose between different OpenAI models (GPT-3.5-Turbo or GPT-4) +- Adjust temperature settings for response creativity +- Configure chunk size for document processing +- Customize response formatting + +## Configuration + +The application supports various configuration options: + +```python +# Model Configuration +model_name = "gpt-3.5-turbo" # or "gpt-4" +temperature = 0.5 # Range: 0.1 - 1.0 +``` + +## Architecture + +- Built on Streamlit framework +- Uses LlamaIndex for document processing +- OpenAI API integration for natural language understanding +- Vector-based document indexing for efficient retrieval +- Stream-based response generation + +## Contributing + +Contributions are welcome! Please feel free to submit pull requests or create issues for bugs and feature requests. + +1. Fork the repository +2. Create your feature branch (`git checkout -b feature/AmazingFeature`) +3. Commit your changes (`git commit -m 'Add some AmazingFeature'`) +4. Push to the branch (`git push origin feature/AmazingFeature`) +5. Open a Pull Request + +## Support + +For support and questions, please reach out to: +- Author: [Kaushal](https://twitter.com/holy_kau) +- GitHub Issues: [Project Issues](https://github.com/yourusername/talk-to-pdf/issues) + +## License + +This project is licensed under the MIT License - see the [LICENSE](LICENSE) file for details. + +## Acknowledgments + +- OpenAI for providing the language models +- Streamlit team for the excellent web framework +- LlamaIndex for document processing capabilities + +--- + +Made with ❤️ by [Kaushal](https://twitter.com/holy_kau) + +Enjoy interacting with your PDFs using natural language! 🚀📄 \ No newline at end of file diff --git a/README.md b/README.md index c73ae80..1758105 100644 --- a/README.md +++ b/README.md @@ -1,146 +1,86 @@ # Talk to PDF 🤖📑 +[![Streamlit App](https://static.streamlit.io/badges/streamlit_badge_black_white.svg)](https://talk-to-pdf.streamlit.app/) +[![Python 3.11](https://img.shields.io/badge/python-3.11-blue.svg)](https://www.python.org/downloads/) +[![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT) +> An AI-powered application that enables natural language interaction with PDF documents through an intuitive chat interface. -[![Streamlit App](https://static.streamlit.io/badges/streamlit_badge_black_white.svg)](https://talk-to-pdf.streamlit.app/) +## Table of Contents +- [Overview](#overview) +- [Features](#features) +- [Prerequisites](#prerequisites) +- [Quick Start](#quick-start) +- [Architecture](#architecture) +- [Technical Implementation](#technical-implementation) +- [Benefits](#benefits) +- [Development](#development) +- [License](#license) +- [Contact](#contact) -## Project Summary +## Overview -Talk to PDF is an innovative web application that enables natural language interaction with PDF documents through an AI-powered interface. The project leverages cutting-edge technologies including OpenAI's language models and LlamaIndex for document processing to create a seamless question-answering system for PDF content. +Talk to PDF is a modern web application that transforms how users interact with PDF documents. By leveraging OpenAI's language models and LlamaIndex for document processing, it creates a seamless question-answering system that allows users to extract information through natural conversation rather than manual searching. -The application follows a multi-page architecture built on Streamlit, with three primary components: +### Core Components 1. API Configuration Interface 2. Document Upload and Processing System 3. Interactive Chat Interface -The system processes uploaded PDFs through a sophisticated pipeline that includes document indexing, vector storage creation, and context-aware response generation. This architecture enables users to extract information from PDFs through natural conversation rather than manual searching. - -### Technical Architecture - -The application is structured using a modular approach with clear separation of concerns: - -```python -talk_to_pdf/ -├── 0_🔌API_KEY.py # API configuration entry point -├── functions.py # Shared utility functions -└── pages/ - ├── 1_📑UPLOAD_PDF.py # Document processing - └── 2_💬CHAT_WITH_PDF.py # Chat interface -``` - -The system utilizes Streamlit's session state management for maintaining application state and LlamaIndex for document processing and retrieval operations. This architecture ensures efficient document handling and responsive user interactions. - -## Key Features - -### 1. Intelligent PDF Processing - -The application implements advanced PDF processing capabilities using LlamaIndex and OpenAI's embedding models. The document processing pipeline includes: - -```python -def load_document(uploaded_files): - temp_dir = tempfile.TemporaryDirectory() - for file in uploaded_files: - temp_filepath = os.path.join(temp_dir.name, file.name) - with open(temp_filepath, "wb") as f: - f.write(file.getvalue()) - reader = SimpleDirectoryReader(input_dir=temp_dir.name) - docs = reader.load_data() - return docs -``` - -This implementation ensures efficient document handling while maintaining document integrity and security. +## Features -### 2. Context-Aware Question Answering +- 🔍 **Intelligent PDF Processing** + - Advanced document indexing + - Vector-based content storage + - Efficient retrieval system -The system employs a sophisticated chat engine that maintains conversation context and generates accurate responses: +- 💬 **Context-Aware Chat** + - Natural language interaction + - Conversation history tracking + - Contextual response generation -```python -custom_prompt = PromptTemplate("""\ -Given a conversation (between Human and Assistant) and a follow up message from Human, \ -rewrite the message to be a standalone question that captures all relevant context \ -from the conversation. -""") +- ⚡ **Real-Time Processing** + - Streaming responses + - Progressive document loading + - Instant feedback -chat_engine = CondenseQuestionChatEngine.from_defaults( - query_engine=query_engine, - condense_question_prompt=custom_prompt, - chat_history=custom_chat_history -) -``` +- 🎛️ **Customization Options** + - Multiple language models (GPT-3.5, GPT-4) + - Adjustable response parameters + - Temperature control for outputs -This feature enables natural conversation flow while maintaining context accuracy throughout the interaction. +## Prerequisites -### 3. Real-Time Response Streaming +- Python 3.11 or higher +- OpenAI API key +- Internet connection +- Modern web browser -The application implements streaming responses for improved user experience: +## Quick Start -```python -def conversational_chat(query): - streaming_response = chat_engine.stream_chat(query) - response_tokens = [] - for token in streaming_response.response_gen: - response_tokens.append(token) - return ''.join(response_tokens) -``` - -This implementation provides immediate feedback while processing complex queries. - -### 4. Flexible Model Selection - -Users can customize their experience by selecting different language models and adjusting response parameters: - -```python -model_name = st.selectbox("Select the model you want to use", - ("gpt-3.5-turbo","gpt-4")) -temperature = st.slider("Set temperature", 0.1, 1.0, 0.5, 0.1) +1. **Install Dependencies** +```bash +pip install -r requirements.txt ``` -## Benefits - -### 1. Enhanced Document Analysis Efficiency +2. **Configure API Key** +- Launch the application +- Enter your OpenAI API key when prompted +- Verify successful configuration -The application significantly reduces the time required to extract information from PDF documents through: -- Instant access to document content through natural language queries -- Context-aware responses that understand document structure -- Efficient document indexing for quick retrieval of relevant information +3. **Upload Document** +- Select PDF file(s) for processing +- Wait for indexing completion +- Confirm successful upload -### 2. User-Friendly Interface +4. **Start Chatting** +- Enter questions about your document +- Receive AI-powered responses +- Engage in natural conversation -The application provides several usability advantages: -- Progressive disclosure of functionality through a step-by-step interface -- Clear visual feedback for all operations -- Intuitive chat-based interaction model -- Customizable response parameters for different use cases +## Architecture -### 3. Technical Advantages - -The implementation offers several technical benefits: -- Scalable architecture supporting multiple document formats -- Efficient memory management through temporary file handling -- Secure document processing with proper cleanup -- Modular design enabling easy feature additions and modifications - -### 4. Integration Capabilities - -The application's architecture facilitates easy integration with existing systems through: -- Clear API-based communication -- Standardized document processing pipeline -- Modular component structure -- Session-based state management - -The Talk to PDF project represents a significant advancement in document interaction technology, combining sophisticated AI capabilities with a user-friendly interface to create a powerful tool for document analysis and information extraction. - -# Talk to PDF - Architectural Overview - -## System Architecture - -The Talk to PDF application implements a modern, modular architecture built on the Streamlit framework, leveraging OpenAI's language models and LlamaIndex for document processing. The system follows a three-tier architecture pattern with clear separation of concerns: - -1. **Presentation Layer**: Streamlit-based user interface -2. **Processing Layer**: Document indexing and query processing -3. **Integration Layer**: OpenAI API integration and vector storage - -### Architecture Diagram +### System Components ```mermaid graph TD @@ -153,121 +93,16 @@ graph TD G --> E ``` -## Core Components - -### 1. API Key Configuration Module - -The API key configuration module (`0_🔌API_KEY.py`) serves as the entry point for the application, implementing a secure way to handle OpenAI API credentials: - -```python -def handle_api_key(): - api_key = st.text_input("Enter your OpenAI API key", type="password") - st.session_state['api_key'] = api_key - if not api_key: - st.sidebar.warning("⚠️ Please enter OpenAI API key") - else: - openai.api_key = api_key -``` - -This component features: -- Secure password-masked input -- Session state persistence -- Automatic validation -- Seamless navigation flow - -### 2. Document Processing Engine - -The document processing engine (`pages/1_📑UPLOAD_PDF.py`) handles PDF upload and indexing operations. It utilizes LlamaIndex for efficient document processing: - -```python -def query_engine(docs, model_name, temperature): - llm = OpenAI(model=model_name, temperature=temperature) - with st.spinner("Indexing document..."): - index = VectorStoreIndex.from_documents(docs, llm=llm) - with st.spinner("Creating query engine..."): - query_engine = index.as_query_engine() - return query_engine -``` - -Key features include: -- Multiple PDF file support -- Automatic document indexing -- Vector store creation -- Configurable model parameters - -### 3. Chat Interface System - -The chat interface (`pages/2_💬CHAT_WITH_PDF.py`) implements an interactive conversation system with context-aware responses: - -```python -custom_prompt = PromptTemplate("""\ -Given a conversation (between Human and Assistant) and a follow up message from Human, \ -rewrite the message to be a standalone question that captures all relevant context \ -from the conversation. - - -{chat_history} - - -{question} - - -""") -``` - -Notable features: -- Streaming responses -- Chat history management -- Context-aware question processing -- Custom prompt templates - -## Data Flow Architecture - -### 1. Input Processing Flow - -The application implements a sequential data flow pattern: - -1. **API Key Validation** - - User inputs API key - - System validates and stores in session state - - Enables access to document processing - -2. **Document Processing Pipeline** - - PDF upload triggers document reader - - Content extraction and preprocessing - - Vector index generation - - Storage in session state +### Technology Stack -3. **Query Processing Chain** - - User input captured - - Context integration - - Query reformation - - Response generation and streaming +- **Frontend**: Streamlit 1.38.0 +- **AI/ML**: OpenAI API 1.43.0 +- **Document Processing**: LlamaIndex 0.11.3 +- **PDF Handling**: PyPDF 4.3.1 -### 2. State Management +## Technical Implementation -The application utilizes Streamlit's session state for persistent data management: - -```python -if 'history' not in st.session_state: - st.session_state['history'] = [] -if 'generated' not in st.session_state: - st.session_state['generated'] = ["Hello! Ask me anything about the uploaded document 🤗"] -if 'past' not in st.session_state: - st.session_state['past'] = ["Hey! 👋"] -``` - -Key state components: -- API key storage -- Document index persistence -- Chat history management -- Query engine state - -## Technical Implementation Details - -### 1. Vector Store Implementation - -The system uses LlamaIndex's VectorStoreIndex for efficient document querying: +### Document Processing Pipeline ```python def load_document(uploaded_files): @@ -277,13 +112,10 @@ def load_document(uploaded_files): with open(temp_filepath, "wb") as f: f.write(file.getvalue()) reader = SimpleDirectoryReader(input_dir=temp_dir.name) - docs = reader.load_data() - return docs + return reader.load_data() ``` -### 2. Chat Engine Configuration - -The chat engine implements a custom configuration for context-aware responses: +### Chat Engine Configuration ```python chat_engine = CondenseQuestionChatEngine.from_defaults( @@ -293,298 +125,61 @@ chat_engine = CondenseQuestionChatEngine.from_defaults( ) ``` -## Architectural Decisions and Rationale - -### 1. Technology Choices - -- **Streamlit**: Selected for rapid development and interactive UI capabilities -- **LlamaIndex**: Chosen for efficient document processing and vector storage -- **OpenAI Integration**: Provides powerful language understanding capabilities - -### 2. Design Patterns - -The application implements several key design patterns: - -1. **Modular Architecture** - - Separate pages for distinct functionality - - Centralized utility functions - - Clear component boundaries - -2. **State Management Pattern** - - Session-based state persistence - - Centralized state management - - Clear state initialization - -3. **Stream Processing Pattern** - - Real-time response streaming - - Asynchronous document processing - - Progressive UI updates - -This architecture ensures scalability, maintainability, and a smooth user experience while maintaining robust security and performance characteristics. - -# Component Breakdown: Talk to PDF System - -## API Configuration Module - -The API Configuration module serves as the initial entry point for the Talk to PDF application, handling OpenAI API key validation and storage. This component is crucial for enabling the AI-powered functionality throughout the application. - -### Primary Functions - -1. **API Key Input and Validation** - - Provides a secure input interface for users to enter their OpenAI API key - - Validates the key format and stores it in the session state - - Manages the transition to the PDF upload page upon successful configuration - -### Implementation Details - -The module is implemented in `0_🔌API_KEY.py` and utilizes Streamlit's session state management for persistent storage. Key features include: - -```python -st.set_page_config(page_title="Talk to PDF", page_icon=":robot_face:", layout="wide") -st.title("Talk to your PDF 🤖 📑️") - -api_key = st.text_input("Enter your OpenAI API key", type="password") -st.session_state['api_key'] = api_key - -if not api_key: - st.sidebar.warning("⚠️ Please enter OpenAI API key") -else: - openai.api_key = api_key -``` - -The module implements secure key storage using password-masked input and provides immediate feedback through the sidebar. Upon successful key submission, it triggers a page transition: - -```python -submit = st.button("Submit", use_container_width=True) -if submit: - st.sidebar.success("✅ API key entered successfully") - time.sleep(1.5) - switch_page('upload pdf') -``` - -## Document Processing Module - -The Document Processing module handles PDF file uploads, document indexing, and vector store creation. This component transforms raw PDF documents into queryable knowledge bases. - -### Primary Functions - -1. **Document Upload Handling** - - Manages file uploads through Streamlit's file uploader - - Validates PDF file format - - Creates temporary storage for document processing - -2. **Document Indexing** - - Processes PDF content using LlamaIndex - - Creates vector embeddings for efficient querying - - Establishes the query engine for chat functionality - -### Implementation Details - -Located in `pages/1_📑UPLOAD_PDF.py`, the module implements sophisticated document processing: - -```python -def load_document(uploaded_files): - temp_dir = tempfile.TemporaryDirectory() - for file in uploaded_files: - temp_filepath = os.path.join(temp_dir.name, file.name) - with open(temp_filepath, "wb") as f: - f.write(file.getvalue()) - - reader = SimpleDirectoryReader(input_dir=temp_dir.name) - docs = reader.load_data() - return docs -``` - -The indexing process utilizes OpenAI's language models for creating searchable document representations: - -```python -def query_engine(docs, model_name, temperature): - llm = OpenAI(model=model_name, temperature=temperature) - Settings.llm = llm - with st.spinner("Indexing document..."): - index = VectorStoreIndex.from_documents(docs, llm=llm) - with st.spinner("Creating query engine..."): - query_engine = index.as_query_engine() - - st.session_state['index'] = index - st.session_state['query_engine'] = query_engine - return query_engine -``` - -## Chat Interface Module - -The Chat Interface module provides an interactive environment for users to query their PDF documents using natural language. This component handles the conversation flow and response generation. - -### Primary Functions - -1. **Chat Management** - - Maintains conversation history - - Handles user input processing - - Manages response streaming and display - -2. **Context-Aware Question Answering** - - Reformulates questions to maintain context - - Generates relevant responses using the query engine - - Streams responses for better user experience - -### Implementation Details - -Implemented in `pages/2_💬CHAT_WITH_PDF.py`, the module uses a custom prompt template for context-aware responses: - -```python -custom_prompt = PromptTemplate("""\ -Given a conversation (between Human and Assistant) and a follow up message from Human, \ -rewrite the message to be a standalone question that captures all relevant context \ -from the conversation. - - -{chat_history} - - -{question} - - -""") -``` - -The chat engine implementation includes streaming capabilities for real-time response generation: +### Response Streaming ```python def conversational_chat(query): streaming_response = chat_engine.stream_chat(query) - response_tokens = [] - for token in streaming_response.response_gen: - response_tokens.append(token) - return ''.join(response_tokens) -``` - -### Component Interactions - -The three modules work together in a sequential flow: - -1. The API Configuration module initializes the OpenAI client and enables AI functionality -2. The Document Processing module uses the configured API to create document indices -3. The Chat Interface module leverages both the API configuration and document indices to provide interactive question-answering capabilities - -This architecture ensures a smooth user experience while maintaining separation of concerns and modularity in the codebase. - -## Error Handling and State Management - -Each component implements robust error handling and state management: - -- API Configuration validates keys and provides clear feedback -- Document Processing includes upload validation and processing status indicators -- Chat Interface maintains conversation state and handles streaming errors gracefully - -The application uses Streamlit's session state for persistent storage across components, ensuring a seamless user experience throughout the interaction flow. - -# Technology Stack Documentation - Talk to PDF - -## Core Technologies - -### Python -- **Version**: 3.11 (specified in devcontainer.json) -- **Role**: Primary programming language for the application -- **Justification**: Python was chosen for its extensive machine learning and NLP libraries, excellent web framework support through Streamlit, and seamless integration with OpenAI's APIs. The language's readability and extensive package ecosystem make it ideal for rapid development of AI-powered applications. - -### Streamlit -- **Version**: 1.38.0 -- **Role**: Web application framework and user interface -- **Justification**: Streamlit provides a rapid development environment for data-focused applications with minimal frontend code. Its built-in components and session state management make it perfect for creating interactive AI applications. - -Example usage from `0_🔌API_KEY.py`: -```python -st.set_page_config(page_title="Talk to PDF", page_icon=":robot_face:", layout="wide") -st.title("Talk to your PDF 🤖 📑️") -api_key = st.text_input("Enter your OpenAI API key", type="password") -``` - -### OpenAI Integration -- **Version**: 1.43.0 -- **Role**: Natural language processing and question answering -- **Justification**: OpenAI's GPT models provide state-of-the-art natural language understanding and generation capabilities, essential for accurate PDF content analysis and question answering. - -Implementation example from `functions.py`: -```python -def query_engine(docs, model_name, temperature): - llm = OpenAI(model=model_name, temperature=temperature) - Settings.llm = llm - index = VectorStoreIndex.from_documents(docs, llm=llm) - query_engine = index.as_query_engine() - return query_engine + return ''.join(token for token in streaming_response.response_gen) ``` -## Document Processing Stack - -### LlamaIndex -- **Version**: 0.11.3 -- **Role**: Document indexing and retrieval system -- **Justification**: LlamaIndex provides sophisticated document processing capabilities with built-in support for various document types and vector storage systems. It seamlessly integrates with OpenAI's embeddings for efficient document querying. - -Key components: -- `VectorStoreIndex`: Document indexing and retrieval -- `SimpleDirectoryReader`: PDF file processing -- `CondenseQuestionChatEngine`: Context-aware question answering - -Example implementation: -```python -from llama_index.core import VectorStoreIndex, SimpleDirectoryReader -reader = SimpleDirectoryReader(input_dir=temp_dir.name) -docs = reader.load_data() -index = VectorStoreIndex.from_documents(docs, llm=llm) -``` - -### PyPDF -- **Version**: 4.3.1 -- **Role**: PDF file processing and text extraction -- **Justification**: Provides robust PDF parsing capabilities with support for various PDF formats and structures. - -## UI Components and Extensions - -### Streamlit Extensions -- **streamlit-chat**: Version 0.1.1 - Provides chat interface components -- **streamlit-extras**: Version 0.4.7 - Additional UI utilities -- **streamlit-faker**: Version 0.0.3 - Test data generation -- **markdownlit**: Version 0.0.7 - Enhanced markdown rendering +## Benefits -These extensions enhance the base Streamlit functionality with specialized components for chat interfaces and improved user experience. +### Enhanced Efficiency +- Instant document content access +- Natural language querying +- Context-aware responses -## Dependency Management +### User Experience +- Intuitive chat interface +- Real-time feedback +- Customizable responses -### Requirements Management -The project uses `pip` and `requirements.txt` for dependency management. The requirements file is compiled using `pip-compile`, ensuring reproducible builds across environments. +### Technical Advantages +- Scalable architecture +- Efficient memory management +- Secure document processing +- Modular design -Key dependencies are organized into categories: -- Core dependencies (Python packages) -- UI components (Streamlit and extensions) -- AI/ML libraries (OpenAI, LlamaIndex) -- Utility packages (typing, packaging, etc.) +## Development -Installation process: -```bash -pip install -r requirements.txt +### Project Structure ``` - -### Version Control -Dependencies are strictly versioned to ensure consistency: -```txt -streamlit==1.38.0 -openai==1.43.0 -llama-index==0.11.3 -pypdf==4.3.1 +talk_to_pdf/ +├── 0_🔌API_KEY.py # API configuration +├── functions.py # Utility functions +└── pages/ + ├── 1_📑UPLOAD_PDF.py # Document processing + └── 2_💬CHAT_WITH_PDF.py # Chat interface ``` +### Contributing +1. Fork the repository +2. Create your feature branch (`git checkout -b feature/AmazingFeature`) +3. Commit your changes (`git commit -m 'Add some AmazingFeature'`) +4. Push to the branch (`git push origin feature/AmazingFeature`) +5. Open a Pull Request -## 📝 License +## License -This project is [MIT](https://choosealicense.com/licenses/mit/) licensed. +This project is licensed under the MIT License - see the [LICENSE](LICENSE) file for details. -## 📬 Contact +## Contact -Author - [Kaushal](https://www.linkedin.com/in/kaushal-powar-a52b1a159/) +Author: [Kaushal](https://twitter.com/holy_kau) Project Link: [https://github.com/yourusername/talk-to-pdf](https://github.com/yourusername/talk-to-pdf) --- -Enjoy interacting with your PDFs using natural language! 🚀📄 +Made with ❤️ by Kaushal | [Support my work](https://www.buymeacoffee.com/kaushal.ai) \ No newline at end of file diff --git a/functions.py b/functions.py index 771efe9..0e4383f 100644 --- a/functions.py +++ b/functions.py @@ -1,202 +1,227 @@ +""" +Utility functions for the Talk to PDF application. +Handles UI components, document processing, and query engine setup. +""" + +from dataclasses import dataclass +from pathlib import Path +from typing import List, Optional +import logging +import tempfile +import os + import streamlit as st from streamlit.components.v1 import html from streamlit_extras.switch_page_button import switch_page import openai -import os - -from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, Settings, set_global_service_context +from llama_index.core import ( + VectorStoreIndex, + SimpleDirectoryReader, + Settings, + Document +) from llama_index.llms.openai import OpenAI from llama_index.embeddings.openai import OpenAIEmbedding from llama_index.core.text_splitter import TokenTextSplitter from llama_index.core.indices.prompt_helper import PromptHelper -import tempfile - -def sidebar_stuff1(): - html_temp = """ -
- -
- """ - - - button = """ - """ +# Configure logging +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger(__name__) + +# Constants +DOCUMENTS_FOLDER = "documents" +BUTTON_SCRIPT = """ + +""" + +IFRAME_STYLE = """ + +""" + +@dataclass +class SidebarContent: + """Configuration for sidebar content.""" + title: str + description: str + instructions: List[str] + +def create_sidebar_base(content: SidebarContent) -> None: + """ + Creates a sidebar with consistent styling and content structure. + + Args: + content: SidebarContent object containing sidebar configuration + """ with st.sidebar: - st.markdown(""" - # ● About - "Talk to PDF" is an app that allows users to ask questions about the content of a PDF file using Natural Language Processing. + if content.title: + st.markdown(f"# ● {content.title}") - The app uses a question-answering system powered by OpenAI's GPT 🔥 to provide accurate and relevant answers to the your queries. """) - - st.markdown(html_temp.format("rgba(55, 53, 47, 0.16)"), unsafe_allow_html=True) - st.markdown(""" - # ● Get started - ・Paste your OpenAI API key. (click on the link to get your API key) - - """) - st.markdown(html_temp.format("rgba(55, 53, 47, 0.16)"), unsafe_allow_html=True) - - - st.markdown(""" - Made by [Kaushal](https://twitter.com/holy_kau) - """) - html(button, height=70, width=220) - st.markdown( - """ - - """, - unsafe_allow_html=True, - ) - - -def sidebar_stuff2(): - html_temp = """ -
- -
- """ - - - button = """ - """ - with st.sidebar: - - st.markdown(html_temp.format("rgba(55, 53, 47, 0.16)"), unsafe_allow_html=True) - st.markdown(""" - - ・Choose your model (gpt-3.5-turbo or gpt-4) + if content.description: + st.markdown(content.description) + + st.markdown("""
""", + unsafe_allow_html=True) - ・Adjust the temperature according to your needs - + for instruction in content.instructions: + st.markdown(instruction) + + st.markdown("""Made by [Kaushal](https://twitter.com/holy_kau)""") + html(BUTTON_SCRIPT, height=70, width=220) + st.markdown(IFRAME_STYLE, unsafe_allow_html=True) + +def sidebar_stuff1() -> None: + """Renders the first sidebar variant with API key instructions.""" + content = SidebarContent( + title="About", + description=""""Talk to PDF" is an app that allows users to ask questions about the content + of a PDF file using Natural Language Processing.\n\nThe app uses a question-answering system + powered by OpenAI's GPT 🔥 to provide accurate and relevant answers to your queries.""", + instructions=["# ● Get started", "・Paste your OpenAI API key. (click on the link to get your API key)"] + ) + create_sidebar_base(content) + +def sidebar_stuff2() -> None: + """Renders the second sidebar variant with model selection instructions.""" + content = SidebarContent( + title="", + description="", + instructions=[ + "・Choose your model (gpt-3.5-turbo or gpt-4)", + "・Adjust the temperature according to your needs\n\n(It controls the randomness of the model's output. " + "A higher temperature (e.g., 1.0) makes the output more diverse and random, while a lower temperature " + "(e.g., 0.5) makes the output more focused and deterministic.)", + "・Upload a PDF file and ask questions about its content" + ] + ) + create_sidebar_base(content) + +def sidebar_stuff3() -> None: + """Renders the third sidebar variant with usage instructions.""" + content = SidebarContent( + title="", + description="", + instructions=[ + "・Ask questions about your documents content", + "・Get instant answers to your questions" + ] + ) + create_sidebar_base(content) + +def save_file(doc: st.uploaded_file_widget) -> str: + """ + Saves an uploaded file to the documents folder. + + Args: + doc: Streamlit uploaded file object + + Returns: + str: Name of the saved file + + Raises: + OSError: If file operations fail + """ + try: + file_name = os.path.basename(doc.name) + documents_path = Path(DOCUMENTS_FOLDER) - (It controls the randomness of the model's output. A higher temperature (e.g., 1.0) makes the output more diverse and random, while a lower temperature (e.g., 0.5) makes the output more focused and deterministic.) - - ・Upload a PDF file and ask questions about its content - - """) - st.markdown(html_temp.format("rgba(55, 53, 47, 0.16)"), unsafe_allow_html=True) - - - st.markdown(""" - Made by [Kaushal](https://twitter.com/holy_kau) - """) - html(button, height=70, width=220) - st.markdown( - """ - - """, - unsafe_allow_html=True, - ) - - -def sidebar_stuff3(): - html_temp = """ -
- -
- """ - - - button = """ - """ - with st.sidebar: - - st.markdown(html_temp.format("rgba(55, 53, 47, 0.16)"), unsafe_allow_html=True) - st.markdown(""" - - ・Ask questions about your documents content + # Create documents directory if it doesn't exist + documents_path.mkdir(exist_ok=True) - ・Get instant answers to your questions - - """) - st.markdown(html_temp.format("rgba(55, 53, 47, 0.16)"), unsafe_allow_html=True) - - - st.markdown(""" - Made by [Kaushal](https://twitter.com/holy_kau) - """) - html(button, height=70, width=220) - st.markdown( - """ - - """, - unsafe_allow_html=True, - ) - - -def save_file(doc): - fn = os.path.basename(doc.name) - # check if documents_folder exists in the directory - if not os.path.exists(documents_folder): - # if documents_folder does not exist then making the directory - os.makedirs(documents_folder) - # open read and write the file into the server - open(documents_folder + '/' + fn, 'wb').write(doc.read()) - # Check for the current filename, If new filename - # clear the previous cached vectors and update the filename - # with current name - if st.session_state.get('file_name'): - if st.session_state.file_name != fn: + # Save file + with open(documents_path / file_name, 'wb') as f: + f.write(doc.read()) + + # Update session state + if st.session_state.get('file_name') != file_name: st.cache_resource.clear() - st.session_state['file_name'] = fn - else: - st.session_state['file_name'] = fn - - return fn - - -def remove_file(file_path): - # Remove the file from the Document folder once - # vectors are created - if os.path.isfile(documents_folder + '/' + file_path): - os.remove(documents_folder + '/' + file_path) - - - -def query_engine(docs, model_name, temperature): - llm = OpenAI(model=model_name, temperature=temperature) - #file_name = st.session_state["tmp_file"] - #service_context = ServiceContext.from_defaults(llm=llm) - Settings.llm = llm - with st.spinner("Indexing document..."): - index = VectorStoreIndex.from_documents(docs, llm=llm) - print("index created : ", index) - with st.spinner("Creating query engine..."): - query_engine = index.as_query_engine() - print("query engine created ") - - st.session_state['index'] = index - st.session_state['query_engine'] = query_engine - switch_page('chat with pdf') - return query_engine - -def load_document(uploaded_files): - temp_dir = tempfile.TemporaryDirectory() - for file in uploaded_files: - temp_filepath = os.path.join(temp_dir.name, file.name) - with open(temp_filepath, "wb") as f: - f.write(file.getvalue()) + st.session_state['file_name'] = file_name + + return file_name + + except Exception as e: + logger.error(f"Error saving file: {e}") + raise - reader = SimpleDirectoryReader(input_dir=temp_dir.name) - docs = reader.load_data() - print(docs) - return docs +def remove_file(file_path: str) -> None: + """ + Removes a file from the documents folder. + + Args: + file_path: Path to the file to remove + """ + try: + full_path = Path(DOCUMENTS_FOLDER) / file_path + if full_path.is_file(): + full_path.unlink() + except Exception as e: + logger.error(f"Error removing file: {e}") + +def load_document(uploaded_files: List[st.uploaded_file_widget]) -> List[Document]: + """ + Loads documents from uploaded files into memory. + + Args: + uploaded_files: List of Streamlit uploaded file objects + + Returns: + List[Document]: Loaded documents + """ + with tempfile.TemporaryDirectory() as temp_dir: + # Save uploaded files to temporary directory + for file in uploaded_files: + temp_path = Path(temp_dir) / file.name + with open(temp_path, "wb") as f: + f.write(file.getvalue()) + + # Load documents using SimpleDirectoryReader + reader = SimpleDirectoryReader(input_dir=temp_dir) + docs = reader.load_data() + logger.info(f"Loaded {len(docs)} documents") + return docs + +def query_engine(docs: List[Document], model_name: str, temperature: float): + """ + Creates a query engine for document processing. + + Args: + docs: List of documents to process + model_name: Name of the OpenAI model to use + temperature: Temperature parameter for text generation + + Returns: + QueryEngine: Configured query engine + """ + try: + llm = OpenAI(model=model_name, temperature=temperature) + Settings.llm = llm + + with st.spinner("Indexing document..."): + index = VectorStoreIndex.from_documents(docs, llm=llm) + logger.info("Document indexing completed") + + with st.spinner("Creating query engine..."): + query_engine = index.as_query_engine() + logger.info("Query engine created") + + # Store in session state + st.session_state['index'] = index + st.session_state['query_engine'] = query_engine + + # Navigate to chat page + switch_page('chat with pdf') + return query_engine + + except Exception as e: + logger.error(f"Error creating query engine: {e}") + st.error("Failed to create query engine. Please try again.") + raise \ No newline at end of file diff --git "a/pages/1_\360\237\223\221UPLOAD_PDF.py" "b/pages/1_\360\237\223\221UPLOAD_PDF.py" index b7fe680..40f862f 100644 --- "a/pages/1_\360\237\223\221UPLOAD_PDF.py" +++ "b/pages/1_\360\237\223\221UPLOAD_PDF.py" @@ -1,20 +1,104 @@ +""" +PDF Upload and Processing Page + +This module handles the PDF document upload and processing functionality using +Streamlit interface. It allows users to select AI models, configure parameters, +and process PDF documents for analysis. +""" + import os -from functions import sidebar_stuff2, query_engine, save_file, remove_file, load_document +from typing import List, Optional import tempfile import streamlit as st from streamlit_extras.switch_page_button import switch_page -sidebar_stuff2() +from functions import ( + sidebar_stuff2, + query_engine, + save_file, + remove_file, + load_document +) + +# Constants +SUPPORTED_MODELS = ["gpt-3.5-turbo", "gpt-4"] +DEFAULT_TEMPERATURE = 0.5 +TEMPERATURE_MIN = 0.1 +TEMPERATURE_MAX = 1.0 +TEMPERATURE_STEP = 0.1 + +def initialize_page() -> None: + """Initialize the page layout and sidebar.""" + sidebar_stuff2() + +def configure_model_settings(): + """Configure AI model settings and parameters.""" + model_name = st.selectbox( + "Select the model you want to use", + options=SUPPORTED_MODELS, + help="Choose the AI model for processing your documents" + ) + + temperature = st.slider( + "Set temperature", + min_value=TEMPERATURE_MIN, + max_value=TEMPERATURE_MAX, + value=DEFAULT_TEMPERATURE, + step=TEMPERATURE_STEP, + help="Controls randomness in model responses. Higher values increase creativity." + ) + + return model_name, temperature + +def handle_file_upload() -> Optional[List]: + """Handle PDF file upload functionality.""" + return st.file_uploader( + "Unleash the power of AI to have a conversation with your PDFs " + "and uncover new insights, all with a single upload⬇️", + type=['pdf'], + accept_multiple_files=True, + help="Upload one or more PDF files to analyze" + ) +def process_uploaded_files( + pdf_files: List, + model_name: str, + temperature: float +) -> None: + """ + Process uploaded PDF files and initialize query engine. + + Args: + pdf_files: List of uploaded PDF files + model_name: Name of the selected AI model + temperature: Temperature parameter for model responses + """ + try: + reader = load_document(uploaded_files=pdf_files) + engine = query_engine(reader, model_name, temperature) + + # Show success message + st.success("Documents processed successfully! You can now proceed to chat.") + + except Exception as e: + st.error(f"Error processing documents: {str(e)}") + st.info("Please try uploading your documents again.") -model_name = st.selectbox("Select the model you want to use",("gpt-3.5-turbo","gpt-4")) -temperature = st.slider("Set temperature", 0.1, 1.0, 0.5,0.1) -pdf_file = st.file_uploader( - "Unleash the power of AI to have a conversation with your PDFs and uncover new insights, all with a single upload⬇️ ",type=['pdf'], accept_multiple_files=True) +def main(): + """Main function to run the PDF upload page.""" + initialize_page() + + # Configure model settings + model_name, temperature = configure_model_settings() + + # Handle file upload + pdf_files = handle_file_upload() + + if pdf_files: + process_uploaded_files(pdf_files, model_name, temperature) + else: + st.info("Please upload a PDF file to begin") -if pdf_file : - reader = load_document(uploaded_files=pdf_file) - query_engine = query_engine(reader, model_name, temperature) -else: - st.error("Please upload a PDF file") +if __name__ == "__main__": + main() \ No newline at end of file diff --git "a/pages/2_\360\237\222\254CHAT_WITH_PDF.py" "b/pages/2_\360\237\222\254CHAT_WITH_PDF.py" index ca556f9..4e554d7 100644 --- "a/pages/2_\360\237\222\254CHAT_WITH_PDF.py" +++ "b/pages/2_\360\237\222\254CHAT_WITH_PDF.py" @@ -1,88 +1,158 @@ +""" +Chat Interface Module for Talk to PDF application. +Implements an interactive chat interface using Streamlit and LlamaIndex for document Q&A. +""" + +import logging +from typing import List, Optional + import streamlit as st from streamlit_chat import message -from functions import sidebar_stuff3 -from llama_index.core.prompts import PromptTemplate +from llama_index.core.prompts import PromptTemplate from llama_index.core.chat_engine.condense_question import CondenseQuestionChatEngine from llama_index.core.llms import ChatMessage, MessageRole -sidebar_stuff3() - -query_engine=st.session_state['query_engine'] -index = st.session_state['index'] - -#from llama_index.memory import ChatMemoryBuffer - -#memory = ChatMemoryBuffer.from_defaults(token_limit=1500) -custom_prompt = PromptTemplate("""\ -Given a conversation (between Human and Assistant) and a follow up message from Human, \ -rewrite the message to be a standalone question that captures all relevant context \ -from the conversation. - - -{chat_history} - - -{question} - - -""") - -custom_chat_history = [ - ChatMessage( - role=MessageRole.USER, - content='Hello assistant, given is a document. Please answer the question by understanding the context and information of the document. Use your own knowledge and understanding to answer the question.' - ), - ChatMessage( - role=MessageRole.ASSISTANT, - content='Okay, sounds good.' - ) -] - -query_engine = st.session_state['query_engine'] -chat_engine = CondenseQuestionChatEngine.from_defaults( - query_engine=query_engine, - condense_question_prompt=custom_prompt, - chat_history=custom_chat_history -) - -response = chat_engine.chat("Hello!") -def conversational_chat(query): - streaming_response = chat_engine.stream_chat(query) - response_tokens = [] - for token in streaming_response.response_gen: - response_tokens.append(token) - return ''.join(response_tokens) - -# Initialize session state variables -if 'history' not in st.session_state: - st.session_state['history'] = [] - -if 'generated' not in st.session_state: - st.session_state['generated'] = ["Hello! Ask me anything about the uploaded document 🤗"] - -if 'past' not in st.session_state: - st.session_state['past'] = ["Hey! 👋"] - -# Containers for chat history and user input -response_container = st.container() -container = st.container() - -# User input form -with container: - with st.form(key='my_form', clear_on_submit=True): - user_input = st.text_input("Query:", placeholder="What is this document about?", key='input') - submit_button = st.form_submit_button(label='Send') +from functions import sidebar_stuff3 + +# Configure logging +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger(__name__) + +# Constants +DEFAULT_GREETING = "Hello! Ask me anything about the uploaded document 🤗" +DEFAULT_USER_GREETING = "Hey! 👋" +QUERY_PLACEHOLDER = "What is this document about?" + +class ChatInterface: + """Manages the chat interface and conversation logic.""" + + def __init__(self): + """Initialize chat interface with query engine and index from session state.""" + self.query_engine = st.session_state['query_engine'] + self.index = st.session_state['index'] + self.chat_engine = self._initialize_chat_engine() + self._initialize_session_state() + + @staticmethod + def _get_custom_prompt() -> PromptTemplate: + """Returns the custom prompt template for question reformulation.""" + return PromptTemplate(""" + Given a conversation (between Human and Assistant) and a follow up message from Human, \ + rewrite the message to be a standalone question that captures all relevant context \ + from the conversation. + + + {chat_history} + + + {question} + + + """) + + @staticmethod + def _get_initial_chat_history() -> List[ChatMessage]: + """Returns the initial chat history messages.""" + return [ + ChatMessage( + role=MessageRole.USER, + content='Hello assistant, given is a document. Please answer the question by understanding the context and information of the document. Use your own knowledge and understanding to answer the question.' + ), + ChatMessage( + role=MessageRole.ASSISTANT, + content='Okay, sounds good.' + ) + ] + + def _initialize_chat_engine(self) -> CondenseQuestionChatEngine: + """Initialize and configure the chat engine.""" + try: + chat_engine = CondenseQuestionChatEngine.from_defaults( + query_engine=self.query_engine, + condense_question_prompt=self._get_custom_prompt(), + chat_history=self._get_initial_chat_history() + ) + # Test the chat engine + chat_engine.chat("Hello!") + return chat_engine + except Exception as e: + logger.error(f"Failed to initialize chat engine: {str(e)}") + st.error("Failed to initialize chat system. Please try refreshing the page.") + raise + + @staticmethod + def _initialize_session_state(): + """Initialize session state variables for chat history.""" + if 'history' not in st.session_state: + st.session_state['history'] = [] + if 'generated' not in st.session_state: + st.session_state['generated'] = [DEFAULT_GREETING] + if 'past' not in st.session_state: + st.session_state['past'] = [DEFAULT_USER_GREETING] + + def conversational_chat(self, query: str) -> str: + """ + Process user query and generate response using streaming. - # Handle user input and generate response - if submit_button and user_input: - output = conversational_chat(user_input) - st.session_state['past'].append(user_input) - st.session_state['generated'].append(output) - -# Display chat history -if st.session_state['generated']: - with response_container: - for i in range(len(st.session_state['generated'])): - message(st.session_state["past"][i], is_user=True, key=str(i) + '_user', avatar_style="big-ears",seed="missy") - message(st.session_state["generated"][i], key=str(i)) -#st.markdown(response) + Args: + query: User input query string + + Returns: + str: Generated response from the chat engine + """ + try: + streaming_response = self.chat_engine.stream_chat(query) + response_tokens = [] + for token in streaming_response.response_gen: + response_tokens.append(token) + return ''.join(response_tokens) + except Exception as e: + logger.error(f"Error in chat response generation: {str(e)}") + return "I apologize, but I encountered an error processing your request. Please try again." + + def render_chat_interface(self): + """Render the chat interface components and handle user interaction.""" + # Create containers for chat layout + response_container = st.container() + input_container = st.container() + + # User input form + with input_container: + with st.form(key='chat_form', clear_on_submit=True): + user_input = st.text_input( + "Query:", + placeholder=QUERY_PLACEHOLDER, + key='input' + ) + submit_button = st.form_submit_button(label='Send') + + if submit_button and user_input: + output = self.conversational_chat(user_input) + st.session_state['past'].append(user_input) + st.session_state['generated'].append(output) + + # Display chat history + if st.session_state['generated']: + with response_container: + for i in range(len(st.session_state['generated'])): + message( + st.session_state["past"][i], + is_user=True, + key=f"{i}_user", + avatar_style="big-ears", + seed="missy" + ) + message(st.session_state["generated"][i], key=str(i)) + +def main(): + """Main entry point for the chat interface.""" + try: + sidebar_stuff3() + chat_interface = ChatInterface() + chat_interface.render_chat_interface() + except Exception as e: + logger.error(f"Application error: {str(e)}") + st.error("An error occurred while loading the chat interface. Please refresh the page and try again.") + +if __name__ == "__main__": + main() \ No newline at end of file