Skip to content

Commit

Permalink
Merge pull request #125 from sank8-2/main
Browse files Browse the repository at this point in the history
Added Github Actions
  • Loading branch information
LarFii authored Oct 26, 2024
2 parents e099019 + 5e3ab98 commit f192041
Show file tree
Hide file tree
Showing 11 changed files with 203 additions and 93 deletions.
30 changes: 30 additions & 0 deletions .github/workflows/linting.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
name: Linting and Formatting

on:
push:
branches:
- main
pull_request:
branches:
- main

jobs:
lint-and-format:
runs-on: ubuntu-latest

steps:
- name: Checkout code
uses: actions/checkout@v2

- name: Set up Python
uses: actions/setup-python@v2
with:
python-version: '3.x'

- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install pre-commit
- name: Run pre-commit
run: pre-commit run --all-files
2 changes: 1 addition & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -4,4 +4,4 @@ dickens/
book.txt
lightrag-dev/
.idea/
dist/
dist/
12 changes: 6 additions & 6 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -58,8 +58,8 @@ from lightrag.llm import gpt_4o_mini_complete, gpt_4o_complete

#########
# Uncomment the below two lines if running in a jupyter notebook to handle the async nature of rag.insert()
# import nest_asyncio
# nest_asyncio.apply()
# import nest_asyncio
# nest_asyncio.apply()
#########

WORKING_DIR = "./dickens"
Expand Down Expand Up @@ -157,7 +157,7 @@ rag = LightRAG(

<details>
<summary> Using Ollama Models </summary>

* If you want to use Ollama models, you only need to set LightRAG as follows:

```python
Expand Down Expand Up @@ -328,8 +328,8 @@ def main():
SET e.entity_type = node.entity_type,
e.description = node.description,
e.source_id = node.source_id,
e.displayName = node.id
REMOVE e:Entity
e.displayName = node.id
REMOVE e:Entity
WITH e, node
CALL apoc.create.addLabels(e, [node.entity_type]) YIELD node AS labeledNode
RETURN count(*)
Expand Down Expand Up @@ -382,7 +382,7 @@ def main():

except Exception as e:
print(f"Error occurred: {e}")

finally:
driver.close()

Expand Down
6 changes: 3 additions & 3 deletions examples/graph_visual_with_html.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
import random

# Load the GraphML file
G = nx.read_graphml('./dickens/graph_chunk_entity_relation.graphml')
G = nx.read_graphml("./dickens/graph_chunk_entity_relation.graphml")

# Create a Pyvis network
net = Network(notebook=True)
Expand All @@ -13,7 +13,7 @@

# Add colors to nodes
for node in net.nodes:
node['color'] = "#{:06x}".format(random.randint(0, 0xFFFFFF))
node["color"] = "#{:06x}".format(random.randint(0, 0xFFFFFF))

# Save and display the network
net.show('knowledge_graph.html')
net.show("knowledge_graph.html")
30 changes: 19 additions & 11 deletions examples/graph_visual_with_neo4j.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
NEO4J_USERNAME = "neo4j"
NEO4J_PASSWORD = "your_password"


def convert_xml_to_json(xml_path, output_path):
"""Converts XML file to JSON and saves the output."""
if not os.path.exists(xml_path):
Expand All @@ -21,33 +22,35 @@ def convert_xml_to_json(xml_path, output_path):

json_data = xml_to_json(xml_path)
if json_data:
with open(output_path, 'w', encoding='utf-8') as f:
with open(output_path, "w", encoding="utf-8") as f:
json.dump(json_data, f, ensure_ascii=False, indent=2)
print(f"JSON file created: {output_path}")
return json_data
else:
print("Failed to create JSON data")
return None


def process_in_batches(tx, query, data, batch_size):
"""Process data in batches and execute the given query."""
for i in range(0, len(data), batch_size):
batch = data[i:i + batch_size]
batch = data[i : i + batch_size]
tx.run(query, {"nodes": batch} if "nodes" in query else {"edges": batch})


def main():
# Paths
xml_file = os.path.join(WORKING_DIR, 'graph_chunk_entity_relation.graphml')
json_file = os.path.join(WORKING_DIR, 'graph_data.json')
xml_file = os.path.join(WORKING_DIR, "graph_chunk_entity_relation.graphml")
json_file = os.path.join(WORKING_DIR, "graph_data.json")

# Convert XML to JSON
json_data = convert_xml_to_json(xml_file, json_file)
if json_data is None:
return

# Load nodes and edges
nodes = json_data.get('nodes', [])
edges = json_data.get('edges', [])
nodes = json_data.get("nodes", [])
edges = json_data.get("edges", [])

# Neo4j queries
create_nodes_query = """
Expand All @@ -56,8 +59,8 @@ def main():
SET e.entity_type = node.entity_type,
e.description = node.description,
e.source_id = node.source_id,
e.displayName = node.id
REMOVE e:Entity
e.displayName = node.id
REMOVE e:Entity
WITH e, node
CALL apoc.create.addLabels(e, [node.entity_type]) YIELD node AS labeledNode
RETURN count(*)
Expand Down Expand Up @@ -100,19 +103,24 @@ def main():
# Execute queries in batches
with driver.session() as session:
# Insert nodes in batches
session.execute_write(process_in_batches, create_nodes_query, nodes, BATCH_SIZE_NODES)
session.execute_write(
process_in_batches, create_nodes_query, nodes, BATCH_SIZE_NODES
)

# Insert edges in batches
session.execute_write(process_in_batches, create_edges_query, edges, BATCH_SIZE_EDGES)
session.execute_write(
process_in_batches, create_edges_query, edges, BATCH_SIZE_EDGES
)

# Set displayName and labels
session.run(set_displayname_and_labels_query)

except Exception as e:
print(f"Error occurred: {e}")

finally:
driver.close()


if __name__ == "__main__":
main()
27 changes: 20 additions & 7 deletions examples/lightrag_openai_compatible_demo.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,7 @@ async def test_funcs():

# asyncio.run(test_funcs())


async def main():
try:
embedding_dimension = await get_embedding_dim()
Expand All @@ -61,35 +62,47 @@ async def main():
working_dir=WORKING_DIR,
llm_model_func=llm_model_func,
embedding_func=EmbeddingFunc(
embedding_dim=embedding_dimension, max_token_size=8192, func=embedding_func
embedding_dim=embedding_dimension,
max_token_size=8192,
func=embedding_func,
),
)


with open("./book.txt", "r", encoding="utf-8") as f:
rag.insert(f.read())

# Perform naive search
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
rag.query(
"What are the top themes in this story?", param=QueryParam(mode="naive")
)
)

# Perform local search
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
rag.query(
"What are the top themes in this story?", param=QueryParam(mode="local")
)
)

# Perform global search
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
rag.query(
"What are the top themes in this story?",
param=QueryParam(mode="global"),
)
)

# Perform hybrid search
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
rag.query(
"What are the top themes in this story?",
param=QueryParam(mode="hybrid"),
)
)
except Exception as e:
print(f"An error occurred: {e}")


if __name__ == "__main__":
asyncio.run(main())
asyncio.run(main())
2 changes: 1 addition & 1 deletion examples/lightrag_siliconcloud_demo.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ async def embedding_func(texts: list[str]) -> np.ndarray:
texts,
model="netease-youdao/bce-embedding-base_v1",
api_key=os.getenv("SILICONFLOW_API_KEY"),
max_token_size=512
max_token_size=512,
)


Expand Down
36 changes: 29 additions & 7 deletions examples/vram_management_demo.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,49 +27,71 @@
# Read all .txt files from the TEXT_FILES_DIR directory
texts = []
for filename in os.listdir(TEXT_FILES_DIR):
if filename.endswith('.txt'):
if filename.endswith(".txt"):
file_path = os.path.join(TEXT_FILES_DIR, filename)
with open(file_path, 'r', encoding='utf-8') as file:
with open(file_path, "r", encoding="utf-8") as file:
texts.append(file.read())


# Batch insert texts into LightRAG with a retry mechanism
def insert_texts_with_retry(rag, texts, retries=3, delay=5):
for _ in range(retries):
try:
rag.insert(texts)
return
except Exception as e:
print(f"Error occurred during insertion: {e}. Retrying in {delay} seconds...")
print(
f"Error occurred during insertion: {e}. Retrying in {delay} seconds..."
)
time.sleep(delay)
raise RuntimeError("Failed to insert texts after multiple retries.")


insert_texts_with_retry(rag, texts)

# Perform different types of queries and handle potential errors
try:
print(rag.query("What are the top themes in this story?", param=QueryParam(mode="naive")))
print(
rag.query(
"What are the top themes in this story?", param=QueryParam(mode="naive")
)
)
except Exception as e:
print(f"Error performing naive search: {e}")

try:
print(rag.query("What are the top themes in this story?", param=QueryParam(mode="local")))
print(
rag.query(
"What are the top themes in this story?", param=QueryParam(mode="local")
)
)
except Exception as e:
print(f"Error performing local search: {e}")

try:
print(rag.query("What are the top themes in this story?", param=QueryParam(mode="global")))
print(
rag.query(
"What are the top themes in this story?", param=QueryParam(mode="global")
)
)
except Exception as e:
print(f"Error performing global search: {e}")

try:
print(rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid")))
print(
rag.query(
"What are the top themes in this story?", param=QueryParam(mode="hybrid")
)
)
except Exception as e:
print(f"Error performing hybrid search: {e}")


# Function to clear VRAM resources
def clear_vram():
os.system("sudo nvidia-smi --gpu-reset")


# Regularly clear VRAM to prevent overflow
clear_vram_interval = 3600 # Clear once every hour
start_time = time.time()
Expand Down
Loading

0 comments on commit f192041

Please sign in to comment.