From 32091707dacad816e5a68330b39b1066a7b3dd49 Mon Sep 17 00:00:00 2001
From: g-despot <66276597+g-despot@users.noreply.github.com>
Date: Tue, 19 Apr 2022 11:38:04 +0200
Subject: [PATCH 1/3] Update bug_report.md
---
.github/ISSUE_TEMPLATE/bug_report.md | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md
index 3a48f9d4..dce94a65 100644
--- a/.github/ISSUE_TEMPLATE/bug_report.md
+++ b/.github/ISSUE_TEMPLATE/bug_report.md
@@ -3,7 +3,7 @@ name: Bug report
about: Create a report to help us improve
title: "[BUG] "
labels: bug
-assignees: gitbuda, antonio2368
+assignees: g-despot, Josipmrden, BorisTasevski, katarinasupe, brunos252
---
**Memgraph version** Which version did you use?
From 0c7cf5b7c22912476fcb020e1d8281f8d5a39682 Mon Sep 17 00:00:00 2001
From: Katarina Supe <61758502+katarinasupe@users.noreply.github.com>
Date: Wed, 4 May 2022 15:54:28 +0200
Subject: [PATCH 2/3] [master < T0068-GA] Update README (#136)
* Update README
* Fix broken link
* Move emojis
* Add table to title
---
README.md | 217 ++++++++++++++++++++++++++++++++++++++++--------------
1 file changed, 163 insertions(+), 54 deletions(-)
diff --git a/README.md b/README.md
index 6ce7f0a7..59d12894 100644
--- a/README.md
+++ b/README.md
@@ -40,85 +40,194 @@ The project uses [Poetry](https://python-poetry.org/) to build the GQLAlchemy Py
Before starting the tests, make sure you have an active Memgraph instance running. Execute the following command:
`poetry run pytest .`
-## GQLAlchemy example
+## GQLAlchemy capabilities
-When working with the `gqlalchemy`, a Python developer can connect to the database and execute a `MATCH` Cypher query using the following syntax:
+
+๐บ๏ธ Object graph mapper
+
+
+Below you can see an example of how to create `User` and `Language` node classes, and a relationship class of type `SPEAKS`. Along with that, you can see how to create a new node and relationship and how to save them in the database. After that, you can load those nodes and relationship from the database.
+
+
+
+```python
+from gqlalchemy import Memgraph, Node, Relationship, Field
+from typing import Optional
+
+db = Memgraph()
+
+class User(Node, index=True, db=db):
+ id: str = Field(index=True, exist=True, unique=True, db=db)
+
+class Language(Node):
+ name: str = Field(unique=True, db=db)
+
+class Speaks(Relationship, type="SPEAKS"):
+ pass
+
+user = User(id="3", username="John").save(db)
+language = Language(name="en").save(db)
+speaks_rel = Speaks(
+ _start_node_id = user._id,
+ _end_node_id = language._id
+).save(db)
+
+loaded_user = User(id="3").load(db=db)
+print(loaded_user)
+loaded_speaks = Speaks(
+ _start_node_id=user._id,
+ _end_node_id=language._id
+ ).load(db)
+print(loaded_speaks)
+```
+
+
+
+๐จ Query builder
+
+When building a Cypher query, you can use a set of methods that are wrappers around Cypher clauses.
+
+
```python
-from gqlalchemy import Memgraph
-
-memgraph = Memgraph("127.0.0.1", 7687)
-memgraph.execute("CREATE (:Node)-[:Connection]->(:Node)")
-results = memgraph.execute_and_fetch("""
- MATCH (from:Node)-[:Connection]->(to:Node)
- RETURN from, to;
-""")
-
-for result in results:
- print(result['from'])
- print(result['to'])
+from gqlalchemy import create, match
+
+query_create = create()
+ .node(labels="Person", name="Leslie")
+ .to(edge_label="FRIENDS_WITH")
+ .node(labels="Person", name="Ron")
+ .execute()
+
+query_match = match()
+ .node(labels="Person", variable="p1")
+ .to()
+ .node(labels="Person", variable="p2")
+ .where(item="p1.name", operator="=", literal="Leslie")
+ .return_({"p1":"p1"})
+ .execute()
```
+
-## Query builder example
+
+๐ฐ Manage streams
+
-As we can see, the example above can be error-prone, because we do not have abstractions for creating a database connection and `MATCH` query.
+You can create and start Kafka or Pulsar stream using GQLAlchemy.
+
-Now, rewrite the exact same query by using the functionality of GQLAlchemy's query builder:
+**Kafka stream**
+```python
+from gqlalchemy import MemgraphPulsarStream
+stream = MemgraphPulsarStream(name="ratings_stream", topics=["ratings"], transform="movielens.rating", service_url="localhost:6650")
+db.create_stream(stream)
+db.start_stream(stream)
+```
+
+**Pulsar stream**
```python
-from gqlalchemy import match, Memgraph
+from gqlalchemy import MemgraphKafkaStream
+
+stream = MemgraphKafkaStream(name="ratings_stream", topics=["ratings"], transform="movielens.rating", bootstrap_servers="localhost:9093")
+db.create_stream(stream)
+db.start_stream(stream)
+```
+
+
+
+๐๏ธ Import table data from different sources
+
+
+**Import table data to a graph database**
+
+You can translate table data from a file to graph data and import it to Memgraph. Currently, we support reading of CSV, Parquet, ORC and IPC/Feather/Arrow file formats via the PyArrow package.
+
+Read all about it in [table to graph importer how-to guide](https://memgraph.com/docs/gqlalchemy/how-to-guides/loaders/table-to-graph-importer).
-memgraph = Memgraph()
+**Make a custom file system importer**
-results = (
- match()
- .node("Node", variable="from")
- .to("Connection")
- .node("Node", variable="to")
- .return_()
- .execute()
+If you want to read from a file system not currently supported by GQLAlchemy, or use a file type currently not readable, you can implement your own by extending abstract classes `FileSystemHandler` and `DataLoader`, respectively.
+
+Read all about it in [custom file system importer how-to guide](https://memgraph.com/docs/gqlalchemy/how-to-guides/loaders/custom-file-system-importer).
+
+
+
+
+โ๏ธ Manage Memgraph instances
+
+
+You can start, stop, connect to and monitor Memgraph instances with GQLAlchemy.
+
+**Manage Memgraph Docker instance**
+
+```python
+from gqlalchemy.instance_runner import (
+ DockerImage,
+ MemgraphInstanceDocker
)
-for result in results:
- print(result["from"])
- print(result["to"])
+memgraph_instance = MemgraphInstanceDocker(
+ docker_image=DockerImage.MEMGRAPH, docker_image_tag="latest", host="0.0.0.0", port=7687
+)
+memgraph = memgraph_instance.start_and_connect(restart=False)
+
+memgraph.execute_and_fetch("RETURN 'Memgraph is running' AS result"))[0]["result"]
```
-An example using the `Node` and `Relationship` classes:
+**Manage Memgraph binary instance**
```python
-from gqlalchemy import Memgraph, Node, Relationship, match, Field
+from gqlalchemy.instance_runner import MemgraphInstanceBinary
+
+memgraph_instance = MemgraphInstanceBinary(
+ host="0.0.0.0", port=7698, binary_path="/usr/lib/memgraph/memgraph", user="memgraph"
+)
+memgraph = memgraph_instance.start_and_connect(restart=False)
-memgraph = Memgraph("127.0.0.1", 7687)
+memgraph.execute_and_fetch("RETURN 'Memgraph is running' AS result"))[0]["result"]
+```
+
+
+๐ซ Manage database triggers
+
-class User(Node):
- id: int = Field(index=True, exist=True, unique=True, db=memgraph)
+Because Memgraph supports database triggers on `CREATE`, `UPDATE` and `DELETE` operations, GQLAlchemy also implements a simple interface for maintaining these triggers.
+```python
+from gqlalchemy import Memgraph, MemgraphTrigger
+from gqlalchemy.models import (
+ TriggerEventType,
+ TriggerEventObject,
+ TriggerExecutionPhase,
+)
-class Follows(Relationship, type="FOLLOWS"):
- pass
+db = Memgraph()
+trigger = MemgraphTrigger(
+ name="ratings_trigger",
+ event_type=TriggerEventType.CREATE,
+ event_object=TriggerEventObject.NODE,
+ execution_phase=TriggerExecutionPhase.AFTER,
+ statement="UNWIND createdVertices AS node SET node.created_at = LocalDateTime()",
+)
-u1 = User(id=1).save(memgraph)
-u2 = User(id=2).save(memgraph)
-r = Follows(_start_node_id=u1._id, _end_node_id=u2._id).save(memgraph)
-
-result = list(
- match(memgraph.new_connection())
- .node(variable="a")
- .to(variable="r")
- .node(variable="b")
- .where("a.id", "=", u1.id)
- .or_where("b.id", "=", u2.id)
- .return_()
- .execute()
-)[0]
-
-print(result["a"])
-print(result["b"])
-print(result["r"])
+db.create_trigger(trigger)
+triggers = db.get_triggers()
+print(triggers)
```
+
+
+
+๐ฝ On-disk storage
+
+
+Since Memgraph is an in-memory graph database, the GQLAlchemy library provides an on-disk storage solution for large properties not used in graph algorithms. This is useful when nodes or relationships have metadata that doesnโt need to be used in any of the graph algorithms that need to be carried out in Memgraph, but can be fetched after. Learn all about it in the [on-disk storage how-to guide](https://memgraph.com/docs/gqlalchemy/how-to-guides/on-disk-storage).
+
+
+
+
+If you want to learn more about OGM, query builder, managing streams, importing data from different source, managing Memgraph instances, managing database triggers and using on-disk storage, check out the GQLAlchemy [how-to guides](https://memgraph.com/docs/gqlalchemy/how-to-guides).
## Development (how to build)
```
From bde84c4b0b0b2abc33dc2cd0160918eb74c07b1d Mon Sep 17 00:00:00 2001
From: Bruno Sacaric
Date: Wed, 25 May 2022 15:04:22 +0200
Subject: [PATCH 3/3] changed string variables for Blob and S3 kwargs
---
gqlalchemy/loaders.py | 28 ++++++++++++++--------------
1 file changed, 14 insertions(+), 14 deletions(-)
diff --git a/gqlalchemy/loaders.py b/gqlalchemy/loaders.py
index 2b2ac70e..bf4ff137 100644
--- a/gqlalchemy/loaders.py
+++ b/gqlalchemy/loaders.py
@@ -52,15 +52,15 @@
FEATHER_EXTENSION = "feather"
ARROW_EXTENSION = "arrow"
-BLOB_ACCOUNT_NAME = "blob_account_name"
-BLOB_ACCOUNT_KEY = "blob_account_key"
-BLOB_SAS_TOKEN = "blob_sas_token"
+BLOB_ACCOUNT_NAME = "account_name"
+BLOB_ACCOUNT_KEY = "account_key"
+BLOB_SAS_TOKEN = "sas_token"
BLOB_CONTAINER_NAME_KEY = "container_name"
-S3_REGION = "s3_region"
-S3_ACCESS_KEY = "s3_access_key"
-S3_SECRET_KEY = "s3_secret_key"
-S3_SESSION_TOKEN = "s3_session_token"
+S3_REGION = "region"
+S3_ACCESS_KEY = "access_key"
+S3_SECRET_KEY = "secret_key"
+S3_SESSION_TOKEN = "session_token"
S3_BUCKET_NAME_KEY = "bucket_name"
LOCAL_STORAGE_PATH = "local_storage_path"
@@ -209,10 +209,10 @@ def __init__(self, bucket_name: str, **kwargs):
bucket_name: Name of the bucket on S3 from which to read the data
Kwargs:
- s3_access_key: S3 access key.
- s3_secret_key: S3 secret key.
- s3_region: S3 region.
- s3_session_token: S3 session token (Optional).
+ access_key: S3 access key.
+ secret_key: S3 secret key.
+ region: S3 region.
+ session_token: S3 session token (Optional).
Raises:
KeyError: kwargs doesn't contain necessary fields.
@@ -244,9 +244,9 @@ def __init__(self, container_name: str, **kwargs) -> None:
container_name: Name of the Blob container storing data.
Kwargs:
- blob_account_name: Account name from Azure Blob.
- blob_account_key: Account key for Azure Blob (Optional - if using sas_token).
- blob_sas_token: Shared access signature token for authentification (Optional).
+ account_name: Account name from Azure Blob.
+ account_key: Account key for Azure Blob (Optional - if using sas_token).
+ sas_token: Shared access signature token for authentification (Optional).
Raises:
KeyError: kwargs doesn't contain necessary fields.