From ce0657020b32065176e78fbe3f02cb9c34f152b7 Mon Sep 17 00:00:00 2001 From: XXXJumpingFrogXXX Date: Wed, 9 Oct 2024 15:03:11 +0800 Subject: [PATCH 1/8] FastAPI Framework setup --- chat/router.py | 110 +++++++++++++++++++++++++++++++++++++++++++++++ main.py | 49 ++++++++++----------- original_main.py | 27 ++++++++++++ 3 files changed, 159 insertions(+), 27 deletions(-) create mode 100644 chat/router.py create mode 100644 original_main.py diff --git a/chat/router.py b/chat/router.py new file mode 100644 index 0000000..e2f001e --- /dev/null +++ b/chat/router.py @@ -0,0 +1,110 @@ +from fastapi import APIRouter +from pydantic import BaseModel + +from unsloth import FastLanguageModel +import torch + +max_seq_length = 2048 # Choose any! We auto support RoPE Scaling internally! +dtype = None # None for auto detection. Float16 for Tesla T4, V100, Bfloat16 for Ampere+ +load_in_4bit = True # Use 4bit quantization to reduce memory usage. Can be False. + +alpaca_prompt = """Below is an instruction that describes a task, along with an input that provides additional context. Write a response that appropriately completes the request. + +### Instruction: +{} + +### Input: +{} + +### Response: +{}""" + +class Question(BaseModel): + query: str + +@router.post("/generate_answer") +def generate_answer(value: Question): + try: + llama_model, llama_tokenizer = FastLanguageModel.from_pretrained( + model_name = "Antonio27/llama3-8b-4-bit-for-sugar", + max_seq_length = max_seq_length, + dtype = dtype, + load_in_4bit = load_in_4bit, + ) + + gemma_model, gemma_tokenizer = FastLanguageModel.from_pretrained( + model_name = "unsloth/gemma-2-9b-it-bnb-4bit", + max_seq_length = max_seq_length, + dtype = dtype, + load_in_4bit = load_in_4bit, + ) + + FastLanguageModel.for_inference(llama_model) + llama_tokenizer.pad_token = llama_tokenizer.eos_token + llama_tokenizer.add_eos_token = True + + inputs = llama_tokenizer( + [ + alpaca_prompt.format( + f''' + Your task is to answer children's questions using simple language. + Explain any difficult words in a way a 3-year-old can understand. + Keep responses under 60 words. + \n\nQuestion: {value.query} + ''', # instruction + "", # input + "", # output - leave this blank for generation! + ) + ], return_tensors="pt").to("cuda") + + outputs = llama_model.generate(**inputs, max_new_tokens=256, temperature=0.6) + decoded_outputs = llama_tokenizer.batch_decode(outputs) + + response_text = decoded_outputs[0] + + match = re.search(r"### Response:(.*?)(?=\n###|$)", response_text, re.DOTALL) + if match: + initial_response = match.group(1).strip() + else: + initial_response = "" + + FastLanguageModel.for_inference(gemma_model) + gemma_tokenizer.pad_token = gemma_tokenizer.eos_token + gemma_tokenizer.add_eos_token = True + + inputs = gemma_tokenizer( + [ + alpaca_prompt.format( + f''' + Modify the given content for a 5-year-old. + Use simple words and phrases. + Remove any repetitive information. + Keep responses under 50 words. + \n\nGiven Content: {initial_response} + ''', # instruction + "", # input + "", # output - leave this blank for generation! + ) + ], return_tensors="pt").to("cuda") + + outputs = gemma_model.generate(**inputs, max_new_tokens=256, temperature=0.6) + decoded_outputs = gemma_tokenizer.batch_decode(outputs) + + response_text = decoded_outputs[0] + + match = re.search(r"### Response:(.*?)(?=\n###|$)", response_text, re.DOTALL) + if match: + adjusted_response = match.group(1).strip() + else: + adjusted_response = "" + + return { + 'success': True, + 'response': { + "result": adjusted_response + } + } + + except Exception as e: + return {'success': False, 'response': str(e)} + \ No newline at end of file diff --git a/main.py b/main.py index 35f0a27..d1e736e 100644 --- a/main.py +++ b/main.py @@ -1,27 +1,22 @@ - -from transformers import GPT2Tokenizer, GPT2LMHeadModel - - -# We should rename this -class AI_Test: - def __init__(self): - pass - - def generate_bot_response(self, question): - tokenizer = GPT2Tokenizer.from_pretrained("distilgpt2") - model = GPT2LMHeadModel.from_pretrained("distilgpt2") - - prompt = ''' - Your task is to answer children's questions using simple language. - Explain any difficult words in a way a 3-year-old can understand. - Keep responses under 60 words. - \n\nQuestion: - ''' - - input_text = prompt + question - - inputs = tokenizer.encode(input_text, return_tensors='pt') - outputs = model.generate(inputs, max_length=150, num_return_sequences=1) - answer = tokenizer.decode(outputs[0], skip_special_tokens=True) - - return answer +import os +import uvicorn +from fastapi import FastAPI +from fastapi.middleware.cors import CORSMiddleware + +from chat.router import router as chat_router +# from piggy.router import router as piggy_router + +app = FastAPI( + docs_url="/sugar-ai/docs", +) + +app.include_router(chat_router, prefix="/sugar-ai/chat") +# app.include_router(piggy_router, prefix="/sugar-ai/piggy") + +app.add_middleware( + CORSMiddleware, + allow_origins=["*"], + allow_credentials=True, + allow_methods=["*"], + allow_headers=["*"], +) \ No newline at end of file diff --git a/original_main.py b/original_main.py new file mode 100644 index 0000000..35f0a27 --- /dev/null +++ b/original_main.py @@ -0,0 +1,27 @@ + +from transformers import GPT2Tokenizer, GPT2LMHeadModel + + +# We should rename this +class AI_Test: + def __init__(self): + pass + + def generate_bot_response(self, question): + tokenizer = GPT2Tokenizer.from_pretrained("distilgpt2") + model = GPT2LMHeadModel.from_pretrained("distilgpt2") + + prompt = ''' + Your task is to answer children's questions using simple language. + Explain any difficult words in a way a 3-year-old can understand. + Keep responses under 60 words. + \n\nQuestion: + ''' + + input_text = prompt + question + + inputs = tokenizer.encode(input_text, return_tensors='pt') + outputs = model.generate(inputs, max_length=150, num_return_sequences=1) + answer = tokenizer.decode(outputs[0], skip_special_tokens=True) + + return answer From 0999a4569911144dd7d52cf008caa79dcee5487e Mon Sep 17 00:00:00 2001 From: XXXJumpingFrogXXX Date: Wed, 9 Oct 2024 15:03:11 +0800 Subject: [PATCH 2/8] This commit includes the following changes: (1) Created the latest main.py file and completed some basic FastAPI settings in it. (2) Renamed the original main.py file to original_main.py. (3) Kept the existing piggy directory and created a chat directory to establish separate routers and APIs for each project. --- chat/router.py | 110 +++++++++++++++++++++++++++++++++++++++++++++++ main.py | 49 ++++++++++----------- original_main.py | 27 ++++++++++++ 3 files changed, 159 insertions(+), 27 deletions(-) create mode 100644 chat/router.py create mode 100644 original_main.py diff --git a/chat/router.py b/chat/router.py new file mode 100644 index 0000000..e2f001e --- /dev/null +++ b/chat/router.py @@ -0,0 +1,110 @@ +from fastapi import APIRouter +from pydantic import BaseModel + +from unsloth import FastLanguageModel +import torch + +max_seq_length = 2048 # Choose any! We auto support RoPE Scaling internally! +dtype = None # None for auto detection. Float16 for Tesla T4, V100, Bfloat16 for Ampere+ +load_in_4bit = True # Use 4bit quantization to reduce memory usage. Can be False. + +alpaca_prompt = """Below is an instruction that describes a task, along with an input that provides additional context. Write a response that appropriately completes the request. + +### Instruction: +{} + +### Input: +{} + +### Response: +{}""" + +class Question(BaseModel): + query: str + +@router.post("/generate_answer") +def generate_answer(value: Question): + try: + llama_model, llama_tokenizer = FastLanguageModel.from_pretrained( + model_name = "Antonio27/llama3-8b-4-bit-for-sugar", + max_seq_length = max_seq_length, + dtype = dtype, + load_in_4bit = load_in_4bit, + ) + + gemma_model, gemma_tokenizer = FastLanguageModel.from_pretrained( + model_name = "unsloth/gemma-2-9b-it-bnb-4bit", + max_seq_length = max_seq_length, + dtype = dtype, + load_in_4bit = load_in_4bit, + ) + + FastLanguageModel.for_inference(llama_model) + llama_tokenizer.pad_token = llama_tokenizer.eos_token + llama_tokenizer.add_eos_token = True + + inputs = llama_tokenizer( + [ + alpaca_prompt.format( + f''' + Your task is to answer children's questions using simple language. + Explain any difficult words in a way a 3-year-old can understand. + Keep responses under 60 words. + \n\nQuestion: {value.query} + ''', # instruction + "", # input + "", # output - leave this blank for generation! + ) + ], return_tensors="pt").to("cuda") + + outputs = llama_model.generate(**inputs, max_new_tokens=256, temperature=0.6) + decoded_outputs = llama_tokenizer.batch_decode(outputs) + + response_text = decoded_outputs[0] + + match = re.search(r"### Response:(.*?)(?=\n###|$)", response_text, re.DOTALL) + if match: + initial_response = match.group(1).strip() + else: + initial_response = "" + + FastLanguageModel.for_inference(gemma_model) + gemma_tokenizer.pad_token = gemma_tokenizer.eos_token + gemma_tokenizer.add_eos_token = True + + inputs = gemma_tokenizer( + [ + alpaca_prompt.format( + f''' + Modify the given content for a 5-year-old. + Use simple words and phrases. + Remove any repetitive information. + Keep responses under 50 words. + \n\nGiven Content: {initial_response} + ''', # instruction + "", # input + "", # output - leave this blank for generation! + ) + ], return_tensors="pt").to("cuda") + + outputs = gemma_model.generate(**inputs, max_new_tokens=256, temperature=0.6) + decoded_outputs = gemma_tokenizer.batch_decode(outputs) + + response_text = decoded_outputs[0] + + match = re.search(r"### Response:(.*?)(?=\n###|$)", response_text, re.DOTALL) + if match: + adjusted_response = match.group(1).strip() + else: + adjusted_response = "" + + return { + 'success': True, + 'response': { + "result": adjusted_response + } + } + + except Exception as e: + return {'success': False, 'response': str(e)} + \ No newline at end of file diff --git a/main.py b/main.py index 35f0a27..d1e736e 100644 --- a/main.py +++ b/main.py @@ -1,27 +1,22 @@ - -from transformers import GPT2Tokenizer, GPT2LMHeadModel - - -# We should rename this -class AI_Test: - def __init__(self): - pass - - def generate_bot_response(self, question): - tokenizer = GPT2Tokenizer.from_pretrained("distilgpt2") - model = GPT2LMHeadModel.from_pretrained("distilgpt2") - - prompt = ''' - Your task is to answer children's questions using simple language. - Explain any difficult words in a way a 3-year-old can understand. - Keep responses under 60 words. - \n\nQuestion: - ''' - - input_text = prompt + question - - inputs = tokenizer.encode(input_text, return_tensors='pt') - outputs = model.generate(inputs, max_length=150, num_return_sequences=1) - answer = tokenizer.decode(outputs[0], skip_special_tokens=True) - - return answer +import os +import uvicorn +from fastapi import FastAPI +from fastapi.middleware.cors import CORSMiddleware + +from chat.router import router as chat_router +# from piggy.router import router as piggy_router + +app = FastAPI( + docs_url="/sugar-ai/docs", +) + +app.include_router(chat_router, prefix="/sugar-ai/chat") +# app.include_router(piggy_router, prefix="/sugar-ai/piggy") + +app.add_middleware( + CORSMiddleware, + allow_origins=["*"], + allow_credentials=True, + allow_methods=["*"], + allow_headers=["*"], +) \ No newline at end of file diff --git a/original_main.py b/original_main.py new file mode 100644 index 0000000..35f0a27 --- /dev/null +++ b/original_main.py @@ -0,0 +1,27 @@ + +from transformers import GPT2Tokenizer, GPT2LMHeadModel + + +# We should rename this +class AI_Test: + def __init__(self): + pass + + def generate_bot_response(self, question): + tokenizer = GPT2Tokenizer.from_pretrained("distilgpt2") + model = GPT2LMHeadModel.from_pretrained("distilgpt2") + + prompt = ''' + Your task is to answer children's questions using simple language. + Explain any difficult words in a way a 3-year-old can understand. + Keep responses under 60 words. + \n\nQuestion: + ''' + + input_text = prompt + question + + inputs = tokenizer.encode(input_text, return_tensors='pt') + outputs = model.generate(inputs, max_length=150, num_return_sequences=1) + answer = tokenizer.decode(outputs[0], skip_special_tokens=True) + + return answer From c24a43fc1b4db262330bfe67fa144bbe4cfa462f Mon Sep 17 00:00:00 2001 From: XXXJumpingFrogXXX Date: Wed, 16 Oct 2024 15:46:50 +0800 Subject: [PATCH 3/8] This commit includes the following changes: (1)Created a new main.py file: Established basic FastAPI settings to enhance application structure and scalability. This setup includes initial configurations and middleware setup, laying the groundwork for future development. (2)Renamed the original main.py: Changed to original_main.py to preserve the previous version and provide a reference for legacy code, facilitating a smooth transition and ensuring no loss of important historical context. (3)Refactored project structure: Maintained the existing piggy directory and introduced a chat directory. This separation of routers and APIs improves modularity, making it easier to manage and extend each project independently. These changes aim to improve code organization and prepare the project for scalable development with FastAPI. --- chat/router.py | 31 +++++++++++++++++++++++-------- main.py | 12 ++++++++---- 2 files changed, 31 insertions(+), 12 deletions(-) diff --git a/chat/router.py b/chat/router.py index e2f001e..ab00cb9 100644 --- a/chat/router.py +++ b/chat/router.py @@ -22,27 +22,33 @@ class Question(BaseModel): query: str +router = APIRouter() + @router.post("/generate_answer") def generate_answer(value: Question): try: + # Load the llama model and tokenizer from the pretrained model llama_model, llama_tokenizer = FastLanguageModel.from_pretrained( - model_name = "Antonio27/llama3-8b-4-bit-for-sugar", - max_seq_length = max_seq_length, - dtype = dtype, - load_in_4bit = load_in_4bit, + model_name="Antonio27/llama3-8b-4-bit-for-sugar", + max_seq_length=max_seq_length, + dtype=dtype, + load_in_4bit=load_in_4bit, ) + # Load the gemma model and tokenizer from the pretrained model gemma_model, gemma_tokenizer = FastLanguageModel.from_pretrained( - model_name = "unsloth/gemma-2-9b-it-bnb-4bit", - max_seq_length = max_seq_length, - dtype = dtype, - load_in_4bit = load_in_4bit, + model_name="unsloth/gemma-2-9b-it-bnb-4bit", + max_seq_length=max_seq_length, + dtype=dtype, + load_in_4bit=load_in_4bit, ) + # Prepare llama model for inference FastLanguageModel.for_inference(llama_model) llama_tokenizer.pad_token = llama_tokenizer.eos_token llama_tokenizer.add_eos_token = True + # Tokenize the input question for the llama model inputs = llama_tokenizer( [ alpaca_prompt.format( @@ -57,21 +63,26 @@ def generate_answer(value: Question): ) ], return_tensors="pt").to("cuda") + # Generate output using the llama model outputs = llama_model.generate(**inputs, max_new_tokens=256, temperature=0.6) decoded_outputs = llama_tokenizer.batch_decode(outputs) + # Extract the response text response_text = decoded_outputs[0] + # Use regex to find the response section in the output match = re.search(r"### Response:(.*?)(?=\n###|$)", response_text, re.DOTALL) if match: initial_response = match.group(1).strip() else: initial_response = "" + # Prepare gemma model for inference FastLanguageModel.for_inference(gemma_model) gemma_tokenizer.pad_token = gemma_tokenizer.eos_token gemma_tokenizer.add_eos_token = True + # Tokenize the initial response for the gemma model inputs = gemma_tokenizer( [ alpaca_prompt.format( @@ -87,17 +98,21 @@ def generate_answer(value: Question): ) ], return_tensors="pt").to("cuda") + # Generate adjusted output using the gemma model outputs = gemma_model.generate(**inputs, max_new_tokens=256, temperature=0.6) decoded_outputs = gemma_tokenizer.batch_decode(outputs) + # Extract the adjusted response text response_text = decoded_outputs[0] + # Use regex to find the response section in the output match = re.search(r"### Response:(.*?)(?=\n###|$)", response_text, re.DOTALL) if match: adjusted_response = match.group(1).strip() else: adjusted_response = "" + # Return the final adjusted response in a success dictionary return { 'success': True, 'response': { diff --git a/main.py b/main.py index d1e736e..a685474 100644 --- a/main.py +++ b/main.py @@ -6,17 +6,21 @@ from chat.router import router as chat_router # from piggy.router import router as piggy_router +# Create a FastAPI application instance with custom documentation URL app = FastAPI( docs_url="/sugar-ai/docs", ) +# Include the chat router with a specified prefix for endpoint paths app.include_router(chat_router, prefix="/sugar-ai/chat") +# Include the piggy router with a specified prefix for endpoint paths (currently commented out) # app.include_router(piggy_router, prefix="/sugar-ai/piggy") +# Add CORS middleware to allow cross-origin requests from any origin app.add_middleware( CORSMiddleware, - allow_origins=["*"], - allow_credentials=True, - allow_methods=["*"], - allow_headers=["*"], + allow_origins=["*"], # Allow requests from any origin + allow_credentials=True, # Allow sending of credentials (e.g., cookies) + allow_methods=["*"], # Allow all HTTP methods + allow_headers=["*"], # Allow all headers ) \ No newline at end of file From a1908c79d94be98ca6bd6381be1c3b36913d649f Mon Sep 17 00:00:00 2001 From: Chihurumnaya Ibiam Date: Thu, 31 Oct 2024 15:44:38 +0100 Subject: [PATCH 4/8] More dpeendecy changes Signed-off-by: Chihurumnaya Ibiam --- requirements.txt | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/requirements.txt b/requirements.txt index c2c561c..f834bc7 100644 --- a/requirements.txt +++ b/requirements.txt @@ -135,7 +135,7 @@ toml==0.10.2 tomli==2.0.1 tomlkit==0.13.2 toolz==0.12.1 -torch @ https://download.pytorch.org/whl/cu121_full/torch-2.4.0%2Bcu +torch==2.4.1 triton==3.0.0 trl==0.10.1 tweepy==4.14.0 @@ -176,12 +176,15 @@ yfinance==0.2.43 zict==3.0.0 zipp==3.20.1 -# Rag agent requirments +# Rag agent requirements langchain langchain-ollama streamlit langchain_experimental -faiss-cpu -pymupdf -sentence-transformers +faiss-cpu==1.9.0 +Pymupdf +sentence-transformers==3.1.1 langchain_community +ollama==0.3.3 +huggingface-hub==0.25.2 +transformers==4.45.2 From 98de784940978c42338c2f645a6f4ace802c4486 Mon Sep 17 00:00:00 2001 From: XXXJumpingFrogXXX Date: Sun, 15 Dec 2024 14:00:39 +0800 Subject: [PATCH 5/8] Add router file for Piggy activity --- Pippy/router.py | 102 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 102 insertions(+) create mode 100644 Pippy/router.py diff --git a/Pippy/router.py b/Pippy/router.py new file mode 100644 index 0000000..148fea3 --- /dev/null +++ b/Pippy/router.py @@ -0,0 +1,102 @@ +from fastapi import APIRouter +from pydantic import BaseModel +import os +import warnings +from langchain_community.vectorstores import FAISS +from langchain_community.embeddings import HuggingFaceEmbeddings +from langchain_community.document_loaders import PyMuPDFLoader, TextLoader +from langchain.chains import RetrievalQA +from langchain_core.prompts import ChatPromptTemplate +from langchain_ollama.llms import OllamaLLM + +# Suppress warnings +warnings.filterwarnings("ignore", category=FutureWarning) +warnings.filterwarnings("ignore", category=DeprecationWarning) + +# Define document paths +document_paths = [ + '/home/kshitij/Downloads/AI-model/Pygame Documentation.pdf', + '/home/kshitij/Downloads/AI-model/AI-model(Streamlitfree)/Python GTK+3 Documentation.pdf', +] + +# Define the Pydantic model for input +class Question(BaseModel): + query: str + +router = APIRouter() + +# Helper function to set up the vector store +def setup_vectorstore(file_paths): + try: + all_documents = [] + for file_path in file_paths: + if os.path.exists(file_path): + print(f"Loading document from: {file_path}") + if file_path.endswith(".pdf"): + loader = PyMuPDFLoader(file_path) + else: + loader = TextLoader(file_path) + + documents = loader.load() + print(f"Loaded {len(documents)} documents from {file_path}.") + all_documents.extend(documents) + else: + print(f"File not found: {file_path}") + + embeddings = HuggingFaceEmbeddings() + vector_store = FAISS.from_documents(all_documents, embeddings) + return vector_store.as_retriever() + + except Exception as e: + print(f"Failed to set up the retriever: {e}") + return None + +# System prompt definition +system_prompt = """ +You are a highly intelligent Python coding assistant with access to both general knowledge and specific Pygame documentation. +1. You only have to answer Python and GTK based coding queries. +2. Prioritize answers based on the documentation when the query is related to it. However make sure you are not biased towards documentation provided to you. +3. Make sure that you don't mention words like context or documentation stating what has been provided to you. +4. Provide step-by-step explanations wherever applicable. +5. If the documentation does not contain relevant information, use your general knowledge. +6. Always be clear, concise, and provide examples where necessary. +""" + +template = f"""{system_prompt} +Question: {{question}} +Answer: Let's think step by step. +""" +prompt = ChatPromptTemplate.from_template(template) +model = OllamaLLM(model="llama3.1") + +retriever = setup_vectorstore(document_paths) + +if retriever: + rag_chain = RetrievalQA.from_chain_type(llm=model, chain_type="stuff", retriever=retriever) +else: + raise RuntimeError("Unable to initialize retriever. Check document paths.") + +@router.post("/generate_answer") +def generate_answer(question: Question): + try: + # Retrieve relevant documents + results = retriever.get_relevant_documents(question.query) + if results: + print("Relevant document found. Using document-specific response...") + response = rag_chain({"query": question.query}) + return { + "success": True, + "response": response.get("result", "No result found.") + } + else: + print("No relevant document found. Using general knowledge response...") + response = model.invoke(question.query) + return { + "success": True, + "response": response + } + except Exception as e: + return { + "success": False, + "error": str(e) + } \ No newline at end of file From c632bfbee69c9503e41c8c19dee91b1c4ebdb42f Mon Sep 17 00:00:00 2001 From: XXXJumpingFrogXXX Date: Tue, 31 Dec 2024 06:57:52 +0800 Subject: [PATCH 6/8] Incorporate Piggy Activity Code --- main.py | 4 ++-- {Pippy => piggy}/router.py | 0 2 files changed, 2 insertions(+), 2 deletions(-) rename {Pippy => piggy}/router.py (100%) diff --git a/main.py b/main.py index d1e736e..a3e447d 100644 --- a/main.py +++ b/main.py @@ -4,14 +4,14 @@ from fastapi.middleware.cors import CORSMiddleware from chat.router import router as chat_router -# from piggy.router import router as piggy_router +from Piggy.router import router as piggy_router app = FastAPI( docs_url="/sugar-ai/docs", ) app.include_router(chat_router, prefix="/sugar-ai/chat") -# app.include_router(piggy_router, prefix="/sugar-ai/piggy") +app.include_router(piggy_router, prefix="/sugar-ai/piggy") app.add_middleware( CORSMiddleware, diff --git a/Pippy/router.py b/piggy/router.py similarity index 100% rename from Pippy/router.py rename to piggy/router.py From b5fc916b7a5df7c639fa2e5950b9f2fb9a5cd328 Mon Sep 17 00:00:00 2001 From: XXXJumpingFrogXXX Date: Sun, 12 Jan 2025 06:11:45 +0800 Subject: [PATCH 7/8] Rename the directory --- main.py | 2 +- {piggy => pippy}/router.py | 0 2 files changed, 1 insertion(+), 1 deletion(-) rename {piggy => pippy}/router.py (100%) diff --git a/main.py b/main.py index a3e447d..a8d24a4 100644 --- a/main.py +++ b/main.py @@ -4,7 +4,7 @@ from fastapi.middleware.cors import CORSMiddleware from chat.router import router as chat_router -from Piggy.router import router as piggy_router +from pippy.router import router as piggy_router app = FastAPI( docs_url="/sugar-ai/docs", diff --git a/piggy/router.py b/pippy/router.py similarity index 100% rename from piggy/router.py rename to pippy/router.py From be94c9595876f5a2a56c6515db01327aa5233ca3 Mon Sep 17 00:00:00 2001 From: XXXJumpingFrogXXX Date: Sun, 12 Jan 2025 06:12:58 +0800 Subject: [PATCH 8/8] Rename the directory --- main.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/main.py b/main.py index a8d24a4..4a3d2d2 100644 --- a/main.py +++ b/main.py @@ -4,14 +4,14 @@ from fastapi.middleware.cors import CORSMiddleware from chat.router import router as chat_router -from pippy.router import router as piggy_router +from pippy.router import router as pippy_router app = FastAPI( docs_url="/sugar-ai/docs", ) app.include_router(chat_router, prefix="/sugar-ai/chat") -app.include_router(piggy_router, prefix="/sugar-ai/piggy") +app.include_router(pippy_router, prefix="/sugar-ai/piggy") app.add_middleware( CORSMiddleware,