Skip to content

Commit

Permalink
Merge pull request #74 from arjbingly/project-BasicRAG
Browse files Browse the repository at this point in the history
Added badge push to Jenkins
  • Loading branch information
arjbingly authored Apr 12, 2024
2 parents 398a5b6 + 6f983e0 commit 6877f5f
Show file tree
Hide file tree
Showing 3 changed files with 48 additions and 16 deletions.
22 changes: 22 additions & 0 deletions ci/modify_test_status.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
import argparse

if __name__ == '__main__':

parser = argparse.ArgumentParser()
parser.add_argument('-f', '--fail', action='store_true')
args = parser.parse_args()
readme_path = 'README.md' # Path to your README file

with open(readme_path, 'r') as file:
lines = file.readlines()

for i, line in enumerate(lines):
if 'img.shields.io' in line and ('passing' in line or 'failing' in line):
if args.f:
lines[i] = line.replace('passing', 'failing').replace('darggreen', 'red')
else:
lines[i] = line.replace('failing', 'passing').replace('red', 'darggreen')
print('README file has been updated.')

with open(readme_path, 'w') as file:
file.writelines(lines)
10 changes: 9 additions & 1 deletion cookbook/Basic-RAG/BasicRAG_ingest.py
Original file line number Diff line number Diff line change
@@ -1,15 +1,23 @@
"""A cookbook demonstrating how to ingest pdf files for use with Basic RAG."""

import asyncio
from pathlib import Path

from grag.components.multivec_retriever import Retriever
from grag.components.vectordb.deeplake_client import DeepLakeClient

# from grag.components.vectordb.chroma_client import ChromaClient

SYNC = True # Run synchronously (slow)
ASYNC = True # Run asynchronously

client = DeepLakeClient(collection_name="ci_test")
# client = ChromaClient(collection_name="ci_test")
retriever = Retriever(vectordb=client)

dir_path = Path(__file__).parents[2] / "data/test/pdfs/new_papers"
retriever.ingest(dir_path)

if SYNC:
retriever.ingest(dir_path)
elif ASYNC:
asyncio.run(retriever.aingest(dir_path))
32 changes: 17 additions & 15 deletions src/grag/components/llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@

import os
from pathlib import Path
from typing import Optional, Union

import torch
from langchain.callbacks.manager import CallbackManager
Expand Down Expand Up @@ -38,18 +39,18 @@ class LLM:

def __init__(
self,
model_name=llm_conf["model_name"],
device_map=llm_conf["device_map"],
task=llm_conf["task"],
max_new_tokens=llm_conf["max_new_tokens"],
temperature=llm_conf["temperature"],
n_batch=llm_conf["n_batch_gpu_cpp"],
n_ctx=llm_conf["n_ctx_cpp"],
n_gpu_layers=llm_conf["n_gpu_layers_cpp"],
std_out=llm_conf["std_out"],
base_dir=llm_conf["base_dir"],
quantization=llm_conf["quantization"],
pipeline=llm_conf["pipeline"],
model_name: str = llm_conf["model_name"],
device_map: str = llm_conf["device_map"],
task: str = llm_conf["task"],
max_new_tokens: str = llm_conf["max_new_tokens"],
temperature: str = llm_conf["temperature"],
n_batch: str = llm_conf["n_batch_gpu_cpp"],
n_ctx: str = llm_conf["n_ctx_cpp"],
n_gpu_layers: str = llm_conf["n_gpu_layers_cpp"],
std_out: Union[bool, str] = llm_conf["std_out"],
base_dir: str = llm_conf["base_dir"],
quantization: str = llm_conf["quantization"],
pipeline: str = llm_conf["pipeline"],
):
"""Initialize the LLM class using the given parameters."""
self.base_dir = Path(base_dir)
Expand All @@ -66,7 +67,7 @@ def __init__(
if std_out:
self.callback_manager = CallbackManager([StreamingStdOutCallbackHandler()])
else:
self.callback_manager = None
self.callback_manager = None # type: ignore

@property
def model_name(self):
Expand All @@ -85,7 +86,7 @@ def model_path(self):
self.base_dir / self.model_name / f"ggml-model-{self.quantization}.gguf"
)

def hf_pipeline(self, is_local=False):
def hf_pipeline(self, is_local: Optional[bool] = False):
"""Loads the model using Hugging Face transformers.
Args:
Expand Down Expand Up @@ -161,7 +162,8 @@ def llama_cpp(self):
return llm

def load_model(
self, model_name=None, pipeline=None, quantization=None, is_local=None
self, model_name: Optional[str] = None, pipeline: Optional[str] = None, quantization: Optional[str] = None,
is_local: Optional[bool] = None
):
"""Loads the model based on the specified pipeline and model name.
Expand Down

0 comments on commit 6877f5f

Please sign in to comment.