diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 00000000..8ec94e21 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,64 @@ +FROM ubuntu:16.04 +MAINTAINER Subramanyam Makam (makam.subramanyam.code@gmail.com) + +RUN apt-get update -y && \ + apt-get install -y aptitude && \ + aptitude install -y build-essential checkinstall software-properties-common python-software-properties && \ + aptitude install -y wget git libreadline-gplv2-dev libncursesw5-dev libssl-dev libsqlite3-dev tk-dev libgdbm-dev libc6-dev && \ + mkdir ~/Downloads && \ + cd ~/Downloads/ && \ + wget https://www.python.org/ftp/python/2.7.13/Python-2.7.13.tgz && \ + tar -xvf Python-2.7.13.tgz && \ + cd Python-2.7.13 && \ + ./configure && \ + make && \ + make install && \ + aptitude install -y nodejs nodejs-legacy npm && \ + npm install -g bower && \ + cd && \ + git config --global user.email "makam.subramanyam.code@gmail.com" && \ + git config --global user.name "bruce-wayne99" && \ + aptitude update && \ + aptitude install -y postgresql postgresql-contrib && \ + aptitude install -y python2.7-dev python-pip && \ + pip install virtualenv && \ + aptitude install -y openjdk-8-jre openjdk-8-jdk && \ + echo oracle-java8-installer shared/accepted-oracle-license-v1-1 select true | debconf-set-selections && \ + add-apt-repository -y ppa:webupd8team/java && \ + aptitude update && \ + aptitude install -y oracle-java8-installer && \ + cd ~/Downloads && \ + wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-5.5.1.tar.gz && \ + tar xvzf elasticsearch-5.5.1.tar.gz && \ + rm -f elasticsearch-5.5.1.tar.gz && \ + mv elasticsearch-5.5.1 /elasticsearch && \ + aptitude install tcl && \ + wget http://download.redis.io/redis-stable.tar.gz && \ + tar xzvf redis-stable.tar.gz && \ + cd redis-stable && \ + make && \ + make install && \ + mkdir /redis && \ + cp redis.conf /redis/ && \ + aptitude update && \ + aptitude install -y zookeeperd && \ + cd / && \ + git clone -b comment-system https://github.com/bruce-wayne99/GraphSpace.git && \ + aptitude install -y python-psycopg2 libpq-dev && \ + chmod -R 777 /GraphSpace && \ + rm -r /var/cache/ + +USER postgres + +RUN /etc/init.d/postgresql start && \ + psql -c "CREATE DATABASE test;" && \ + psql -c "ALTER USER postgres with PASSWORD '987654321';" && \ + psql -d test -c "CREATE EXTENSION pg_trgm;" && \ + psql -d test -c "CREATE EXTENSION btree_gin;" + +USER root + +RUN useradd -ms /bin/bash elasticsearch && \ + chmod -R 777 /elasticsearch + +ENV JAVA_HOME /usr/lib/jvm/java-8-oracle diff --git a/applications/comments/__init__.py b/applications/comments/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/applications/comments/admin.py b/applications/comments/admin.py new file mode 100644 index 00000000..8c38f3f3 --- /dev/null +++ b/applications/comments/admin.py @@ -0,0 +1,3 @@ +from django.contrib import admin + +# Register your models here. diff --git a/applications/comments/apps.py b/applications/comments/apps.py new file mode 100644 index 00000000..ff01b775 --- /dev/null +++ b/applications/comments/apps.py @@ -0,0 +1,5 @@ +from django.apps import AppConfig + + +class CommentsConfig(AppConfig): + name = 'comments' diff --git a/applications/comments/controllers.py b/applications/comments/controllers.py new file mode 100644 index 00000000..beb937fe --- /dev/null +++ b/applications/comments/controllers.py @@ -0,0 +1,272 @@ +from sqlalchemy.exc import IntegrityError +import applications.comments.dal as db +import applications.graphs.dal as graphs_db +from graphspace.exceptions import ErrorCodes, BadRequest +from graphspace.wrappers import atomic_transaction + + +@atomic_transaction +def add_comment(request, text=None, graph_id=None, edges=None, nodes=None, is_closed=0, owner_email=None, + parent_comment_id=None, layout_id=None): + """ + + Parameters + ---------- + request: object + HTTP Request. + text: string + Comment message. + graph_id: Integer + Unique ID of each graph. + edges: List + List of edge names on associated with the comment. + nodes: List + List of node names on associated with the comment. + is_closed: Integer + Integer indicating if the comment is closed or not. + owner_email: string + Email ID of user who comment on the graph. + parent_comment_id: Integer + Unique ID of parent comment. + layout_id: Integer + Unique ID of layout. + + Returns + ------- + comment: Object + Comment Object. + + """ + # Construct new comment to add to database + comment = db.add_comment(request.db_session, text=text, owner_email=owner_email, + is_closed=is_closed, parent_comment_id=parent_comment_id) + comment_to_graph = db.add_comment_to_graph(request.db_session, comment_id=comment.id, graph_id=graph_id, + layout_id=layout_id) + # Add comment edges + if edges != None: + for edge_id in edges: + db.add_comment_to_edge(request.db_session, comment_id=comment.id, edge_id=edge_id) + + # Add comment nodes + if nodes != None: + for node_id in nodes: + db.add_comment_to_node(request.db_session, comment_id=comment.id, node_id=node_id) + + db.send_comment(comment, comment_to_graph, event="insert") + return comment + + +def get_comment_by_graph_id(request, graph_id): + + """ + Parameters + ---------- + request: object + HTTP request. + graph_id: Integer + Unique ID of graph. + + Returns + ------- + return value: Object. + Comment Object. + + """ + return db.get_comment_by_graph_id(request.db_session, graph_id=graph_id) + + +def get_nodes_by_comment_id(request, comment_id): + """ + + Parameters + ---------- + request: object + HTTP request. + comment_id: Integer + Unique ID of comment. + + Returns + ------- + return value: List. + List of Node Objects. + + """ + return db.get_nodes_by_comment_id(request.db_session, comment_id=comment_id) + + +def get_edges_by_comment_id(request, comment_id): + """ + + Parameters + ---------- + request: object + HTTP request. + comment_id: Integer + Unique ID of comment. + + Returns + ------- + return value: List. + List of Edge Objects. + + """ + return db.get_edges_by_comment_id(request.db_session, comment_id=comment_id) + + +def get_comment_by_id(request, comment_id): + """ + + Parameters + ---------- + request: object + HTTP request. + comment_id: Integer + Unique ID of comment. + + Returns + ------- + return value: object. + Comment Object. + + """ + return db.get_comment_by_id(request.db_session, id=comment_id) + + +def get_comment_to_graph(request, comment_id): + """ + + Parameters + ---------- + request: object + HTTP request. + comment_id: Integer + Unique ID of comment. + + Returns + ------- + return value: object. + Comment Object. + + """ + return db.get_comment_to_graph(request.db_session, id=comment_id) + + +@atomic_transaction +def edit_comment(request, comment_id=None, text=None, is_closed=None): + """ + + Parameters + ---------- + request: object + HTTP request. + comment_id: Integer + Unique ID of comment. + text: String + New comment message. + is_closed: Integer + Indicates if the comment is closed or not. + + Returns + ------- + return value: object + Edited Comment Object. + + """ + updated_comment = {} + + # Check if field is present or not. + if is_closed != None: + updated_comment['is_closed'] = is_closed + if text != None: + updated_comment['text'] = text + return db.edit_comment(request.db_session, id=comment_id, updated_comment=updated_comment) + + # # Only top comment in a comment thread can be resolved. + comment = db.get_comment_by_id(request.db_session, id=comment_id) + if comment.serialize()['is_closed'] == 0 and is_closed == 1 and comment.serialize()[ + 'parent_comment_id'] != None: + raise Exception('Reply comments can not be resolved') + + return db.update_comment(request.db_session, id=comment_id, updated_comment=updated_comment) + + +@atomic_transaction +def delete_comment(request, id=None): + """ + + Parameters + ---------- + request: object + HTTP request. + id: Integer + Unique ID of comment. + + Returns + ------- + return value: object + Deleted Comment Object. + + """ + return db.delete_comment(request.db_session, id=id) + + +def is_user_authorized_to_update_comment(request, username, comment_id): + """ + + Parameters + ---------- + request: object + HTTP request. + username: string + Email ID of user. + comment_id: integer + Unique ID of comment. + + Returns + ------- + is_authorized: bool + Returns True if the user is authorized to update comment else False. + + """ + is_authorized = False + comment = db.get_comment_by_id(request.db_session, comment_id) + + if comment is not None: # Comment exists + if comment.owner_email == username: + is_authorized = True + + return is_authorized + + +def is_user_authorized_to_delete_comment(request, username, comment_id): + """ + + Parameters + ---------- + request: object + HTTP request. + username: string + Email ID of user. + comment_id: integer + Unique ID of comment. + + Returns + ------- + is_authorized: bool + Returns True if the user is authorized to delete comment else False. + + """ + is_authorized = False + + comment = db.get_comment_by_id(request.db_session, comment_id) + if comment is not None: + comment_to_graph = db.get_comment_to_graph(request.db_session, comment_id) + graph = graphs_db.get_graph_by_id(request.db_session, comment_to_graph.graph_id) + + if comment is not None: # Comment exists + if comment.owner_email == username: + is_authorized = True + elif graph.owner_email == username: + is_authorized = True + + return is_authorized diff --git a/applications/comments/dal.py b/applications/comments/dal.py new file mode 100644 index 00000000..aaa8187b --- /dev/null +++ b/applications/comments/dal.py @@ -0,0 +1,375 @@ +from sqlalchemy import and_, or_, desc, asc, event +from sqlalchemy.orm import joinedload, subqueryload +from graphspace.wrappers import with_session +from applications.comments.models import * +from applications.graphs.dal import * +from applications.discussions.dal import * +from applications.users.dal import * +from applications.users.models import * +import graphspace.signals as socket +from graphspace.database import * + + +@with_session +def add_comment(db_session, text, owner_email=None, is_closed=0, parent_comment_id=None): + """ + + Parameters + ---------- + db_session: Object + Database session. + text: string + Comment message. + owner_email: string + Email ID of user who comment on the graph. + is_closed: Integer + Integer indicating if the comment is closed or not. + parent_comment_id:Integer + Unique ID of parent comment. + + Returns + ------- + comment: Object + Comment Object. + + """ + comment = Comment(text=text, owner_email=owner_email, is_closed=is_closed, parent_comment_id=parent_comment_id) + db_session.add(comment) + return comment + + +@with_session +def add_comment_to_graph(db_session, comment_id, graph_id, layout_id): + """ + + Parameters + ---------- + db_session: Object + Database session. + comment_id: Integer + Unique ID of comment. + graph_id: Integer + Unique ID of graph. + layout_id: Integer + Unique ID of layout. + + Returns + ------- + comment_to_graph: Object. + CommentToGraph Object. + + """ + comment_to_graph = CommentToGraph(comment_id=comment_id, graph_id=graph_id, layout_id=layout_id) + db_session.add(comment_to_graph) + return comment_to_graph + + +@with_session +def add_comment_to_edge(db_session, comment_id, edge_id): + """ + + Parameters + ---------- + db_session: Object + Database session. + comment_id: Integer + Unique ID of comment. + edge_id: Integer + Unique ID of edge. + + Returns + ------- + comment_to_edge: Object. + CommentToEdge Object. + + """ + comment_to_edge = CommentToEdge(comment_id=comment_id, edge_id=edge_id) + db_session.add(comment_to_edge) + return comment_to_edge + + +@with_session +def add_comment_to_node(db_session, comment_id, node_id): + """ + + Parameters + ---------- + db_session: Object + Database session. + comment_id: Integer + Unique ID of comment. + node_id: Integer + Unique ID of node. + + Returns + ------- + comment_to_node: Object + CommentToNode Object + + """ + comment_to_node = CommentToNode(comment_id=comment_id, node_id=node_id) + db_session.add(comment_to_node) + return comment_to_node + + +@with_session +def get_comment_by_graph_id(db_session, graph_id): + """ + + Parameters + ---------- + db_session: Object + Database session. + graph_id: Integer + Unique ID of graph. + + Returns + ------- + return value: tuple + Count, List of comments associated with the graph. + + """ + query = db_session.query(Comment) + query = query.filter(CommentToGraph.graph_id == graph_id) + query = query.filter(Comment.id == CommentToGraph.comment_id) + query2 = db_session.query(Comment).filter(Comment.parent_comment_id == CommentToGraph.graph_id) + query3 = query.union(query2) + + return query3.count(), query3.all() + + +@with_session +def get_comment_by_id(db_session, id): + """ + + Parameters + ---------- + db_session: object + Database session. + id: Integer + Unique ID of comment. + + Returns + ------- + comment: Object + Comment Object. + + """ + comment = db_session.query(Comment).filter(Comment.id == id).one_or_none() + return comment + + +@with_session +def get_comment_to_graph(db_session, id): + """ + + Parameters + ---------- + db_session: object + Database session. + id: Integer + Unique ID of comment. + + Returns + ------- + comment: Object + Comment Object. + + """ + comment_to_graph = db_session.query(CommentToGraph).filter(CommentToGraph.comment_id == id).one_or_none() + return comment_to_graph + + +@with_session +def get_user_emails_by_graph_id(db_session, graph_id): + """ + + Parameters + ---------- + db_session: object + Database session. + graph_id: Integer + Unique ID of the graph. + + Returns + ------- + return value: List + List of all email IDs who have permission to read the graph. + + """ + query = db_session.query(User, GroupToGraph, GroupToUser) + query = query.filter(GroupToGraph.graph_id == graph_id) + query = query.filter(GroupToUser.group_id == GroupToGraph.group_id) + query = query.filter(User.id == GroupToUser.user_id) + return query.all() + + +@with_session +def get_nodes_by_comment_id(db_session, comment_id): + """ + + Parameters + ---------- + db_session: Object + Database session. + comment_id: Integer + Unique ID of the comment. + + Returns + ------- + return value: List + List of all nodes associated with the given comment. + + """ + query = db_session.query(Comment, CommentToNode, Node) + query = query.filter(comment_id == CommentToNode.comment_id) + query = query.filter(CommentToNode.node_id == Node.id) + return query.all() + + +@with_session +def get_edges_by_comment_id(db_session, comment_id): + """ + + Parameters + ---------- + db_session: Object + Database session. + comment_id: Integer + Unique ID of the comment. + + Returns + ------- + return value: List + List of all edges associated with the given comment. + + """ + query = db_session.query(Comment, CommentToEdge, Edge) + query = query.filter(comment_id == CommentToEdge.comment_id) + query = query.filter(CommentToEdge.edge_id == Edge.id) + return query.all() + + +@with_session +def get_owner_email_by_graph_id(db_session, graph_id): + """ + + Parameters + ---------- + db_session: Object + Database session. + graph_id: Integer + Unique ID of graph. + + Returns + ------- + return value: List + List of all User Objects. + + """ + query = db_session.query(User, Graph) + query = query.filter(User.email == Graph.owner_email) + return query.all() + + +@with_session +def update_comment(db_session, id, updated_comment): + """ + + Parameters + ---------- + db_session: Object + Database session. + id: Integer + Unique ID of comment. + updated_comment: dict + Dict containing key, value pairs of updated_comment. + + Returns + ------- + comment: Object + Updated Comment Object. + + """ + comment = db_session.query(Comment).filter(Comment.id == id).one_or_none() + if updated_comment['is_closed'] == 1: + query = db_session.query(Comment).filter(Comment.parent_comment_id == id).all() + for ele in query: + for (key, value) in updated_comment.items(): + setattr(ele, key, value) + elif updated_comment['is_closed'] == 0: + query = db_session.query(Comment).filter(Comment.parent_comment_id == id).all() + for ele in query: + for (key, value) in updated_comment.items(): + setattr(ele, key, value) + for (key, value) in updated_comment.items(): + setattr(comment, key, value) + comment_to_graph = get_comment_to_graph(Database().session(), comment.id) + send_comment(comment, comment_to_graph, event="update") + return comment + +@with_session +def edit_comment(db_session, id, updated_comment): + """ + + Parameters + ---------- + db_session: Object + Database session. + id: Integer + Unique ID of comment. + updated_comment: dict + Dict containing key, value pairs of updated_comment. + + Returns + ------- + comment: Object + Updated Comment Object. + + """ + comment = db_session.query(Comment).filter(Comment.id == id).one_or_none() + for (key, value) in updated_comment.items(): + setattr(comment, key, value) + + comment_to_graph = get_comment_to_graph(Database().session(), comment.id) + send_comment(comment, comment_to_graph, event="update") + return comment + +@with_session +def delete_comment(db_session, id): + """ + + Parameters + ---------- + db_session: Object + Database session. + id: Integer + Unique ID of comment. + + Returns + ------- + comment: Object + Deleted Comment Object. + + """ + comment = db_session.query(Comment).filter(Comment.id == id).one_or_none() + for reaction in comment.reactions: + db_session.delete(reaction) + comment_to_graph = get_comment_to_graph(db_session, comment.id) + send_comment(comment, comment_to_graph, event="delete") + query = db_session.query(Comment).filter(Comment.parent_comment_id == id).all() + for ele in query: + for reaction in ele.reactions: + db_session.delete(reaction) + db_session.delete(ele) + db_session.delete(comment) + return comment + + +def send_comment(comment, comment_to_graph, event): + + users_list = get_user_emails_by_graph_id(Database().session(), comment_to_graph.graph_id) + users_list = [ele[0] for ele in users_list] + owner_list = get_owner_email_by_graph_id(Database().session(), comment_to_graph.graph_id) + owner_list = [ele[0] for ele in owner_list] + socket.send_comment(comment=comment, type="private", users=owner_list + users_list, event=event) diff --git a/applications/comments/migrations/__init__.py b/applications/comments/migrations/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/applications/comments/models.py b/applications/comments/models.py new file mode 100644 index 00000000..d67ed377 --- /dev/null +++ b/applications/comments/models.py @@ -0,0 +1,181 @@ +from __future__ import unicode_literals + +import json +from applications.users.models import * +from applications.graphs.models import * +from django.conf import settings +from graphspace.mixins import * +from sqlalchemy import ForeignKeyConstraint, text, Enum, Boolean +Base = settings.BASE + + +# ================== Table Definitions =================== # + +class CommentToGraph(TimeStampMixin, Base): + __tablename__ = 'comment_to_graph' + + comment_id = Column(Integer, ForeignKey('comment.id', ondelete="CASCADE", onupdate="CASCADE"), primary_key=True) + + graph_id = Column(Integer, ForeignKey('graph.id', ondelete="CASCADE", onupdate="CASCADE"), nullable=False) + graph = relationship("Graph", foreign_keys=[graph_id], back_populates="comments", uselist=False) + + layout_id = Column(Integer, ForeignKey('layout.id', ondelete="CASCADE", onupdate="CASCADE"),nullable=True) + layout = relationship("Layout", foreign_keys=[layout_id], back_populates="comments", uselist=False) + + indices = () + constraints = () + + @declared_attr + def __table_args__(cls): + args = cls.constraints + cls.indices + return args + + def serialize(cls, **kwargs): + return { + 'comment_id': cls.comment_id, + 'graph_id': cls.graph_id, + 'layout_id':cls.layout_id, + 'created_at': cls.created_at.isoformat(), + 'updated_at': cls.updated_at.isoformat() + } + +class Comment(IDMixin, TimeStampMixin, Base): + __tablename__ = 'comment' + + text = Column(String, nullable=False) + is_closed = Column(Integer, nullable=False, default=0) + + owner_email = Column(String, ForeignKey('user.email', ondelete="CASCADE", onupdate="CASCADE"), nullable=False) + owner = relationship("User", back_populates="owned_comments", uselist=False) + + parent_comment_id = Column(Integer, ForeignKey('comment.id', ondelete="CASCADE", onupdate="CASCADE"), nullable=True) + + nodes = association_proxy('associated_nodes', 'node') + edges = association_proxy('associated_edges', 'edge') + discussion = association_proxy('associated_discussion', 'discussion') + reactions = association_proxy('associated_reactions', 'reaction') + + constraints = () + indices = () + + @declared_attr + def __table_args__(cls): + args = cls.constraints + cls.indices + return args + + def serialize(cls, **kwargs): + return { + 'id': cls.id, + 'owner_email': cls.owner_email, + 'text': cls.text, + 'is_closed': cls.is_closed, + 'parent_comment_id': cls.parent_comment_id, + 'group_id': [discussion.group_id for discussion in cls.discussion], + 'discussion_id': [discussion.id for discussion in cls.discussion], + 'nodes': [node.name for node in cls.nodes], + 'edges': [edge.name for edge in cls.edges], + 'reaction_content': [reaction.content for reaction in cls.reactions], + 'reaction_owner': [reaction.owner_email for reaction in cls.reactions], + 'created_at': cls.created_at.isoformat(), + 'updated_at': cls.updated_at.isoformat() + } + +class CommentToNode(TimeStampMixin, Base): + __tablename__ = 'comment_to_node' + + comment_id = Column(Integer, ForeignKey('comment.id', ondelete="CASCADE", onupdate="CASCADE"), primary_key=True) + node_id = Column(Integer, ForeignKey('node.id', ondelete="CASCADE", onupdate="CASCADE"), primary_key=True) + + comment = relationship("Comment", backref=backref("associated_nodes", cascade="all, delete-orphan")) + node = relationship("Node", backref=backref("associated_comments", cascade="all, delete-orphan")) + + indices = (Index('comment2node_idx_comment_id_node_id', 'comment_id', 'node_id'),) + constraints = () + + @declared_attr + def __table_args__(cls): + args = cls.constraints + cls.indices + return args + + def serialize(cls, **kwargs): + return { + 'comment_id': cls.comment_id, + 'node_id': cls.node_id, + 'created_at': cls.created_at.isoformat(), + 'updated_at': cls.updated_at.isoformat() + } + +class CommentToEdge(TimeStampMixin, Base): + __tablename__ = 'comment_to_edge' + + comment_id = Column(Integer, ForeignKey('comment.id', ondelete="CASCADE", onupdate="CASCADE"), primary_key=True) + edge_id = Column(Integer, ForeignKey('edge.id', ondelete="CASCADE", onupdate="CASCADE"), primary_key=True) + + comment = relationship("Comment", backref=backref("associated_edges", cascade="all, delete-orphan")) + edge = relationship("Edge", backref=backref("associated_comments", cascade="all, delete-orphan")) + + indices = (Index('comment2edge_idx_comment_id_edge_id', 'comment_id', 'edge_id'),) + constraints = () + + @declared_attr + def __table_args__(cls): + args = cls.constraints + cls.indices + return args + + def serialize(cls, **kwargs): + return { + 'comment_id': cls.comment_id, + 'edge_id': cls.edge_id, + 'created_at': cls.created_at.isoformat(), + 'updated_at': cls.updated_at.isoformat() + } +class CommentToReaction(TimeStampMixin, Base): + __tablename__ = 'comment_to_reaction' + + comment_id = Column(Integer, ForeignKey('comment.id', ondelete="CASCADE", onupdate="CASCADE"), primary_key=True) + reaction_id = Column(Integer, ForeignKey('reaction.id', ondelete="CASCADE", onupdate="CASCADE"), primary_key=True) + + comment = relationship("Comment", backref=backref("associated_reactions", cascade="all, delete-orphan")) + reaction = relationship("Reaction", backref=backref("associated_comments", cascade="all, delete-orphan")) + + indices = (Index('comment2reaction_idx_comment_id_reaction_id', 'comment_id', 'reaction_id'),) + constraints = () + + @declared_attr + def __table_args__(cls): + args = cls.constraints + cls.indices + return args + + def serialize(cls, **kwargs): + return { + 'comment_id': cls.comment_id, + 'reaction_id': cls.reaction_id, + 'created_at': cls.created_at.isoformat(), + 'updated_at': cls.updated_at.isoformat() + } +class Reaction(IDMixin, TimeStampMixin, Base): + __tablename__ = 'reaction' + + content = Column(Integer, nullable=False) + + owner_email = Column(String, ForeignKey('user.email', ondelete="CASCADE", onupdate="CASCADE"), nullable=False) + owner = relationship("User", back_populates="owned_reactions", uselist=False) + + comments = association_proxy('associated_comments', 'comment') + + constraints = () + indices = () + + @declared_attr + def __table_args__(cls): + args = cls.constraints + cls.indices + return args + + def serialize(cls, **kwargs): + return { + 'id': cls.id, + 'owner_email': cls.owner_email, + 'content': cls.content, + 'created_at': cls.created_at.isoformat(), + 'updated_at': cls.updated_at.isoformat() + } \ No newline at end of file diff --git a/applications/discussions/__init__.py b/applications/discussions/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/applications/discussions/admin.py b/applications/discussions/admin.py new file mode 100644 index 00000000..8c38f3f3 --- /dev/null +++ b/applications/discussions/admin.py @@ -0,0 +1,3 @@ +from django.contrib import admin + +# Register your models here. diff --git a/applications/discussions/apps.py b/applications/discussions/apps.py new file mode 100644 index 00000000..bce76095 --- /dev/null +++ b/applications/discussions/apps.py @@ -0,0 +1,7 @@ +from __future__ import unicode_literals + +from django.apps import AppConfig + + +class DiscussionsConfig(AppConfig): + name = 'discussions' diff --git a/applications/discussions/controllers.py b/applications/discussions/controllers.py new file mode 100644 index 00000000..d2c3dafd --- /dev/null +++ b/applications/discussions/controllers.py @@ -0,0 +1,115 @@ +from sqlalchemy.exc import IntegrityError +import applications.discussions.dal as db +from graphspace.exceptions import ErrorCodes, BadRequest +from graphspace.wrappers import atomic_transaction + + +@atomic_transaction +def add_discussion(request, topic=None, description=None, owner_email=None, group_id=None, is_closed=0): + # Construct new discussion to add to database + discussion = db.add_discussion(request.db_session, topic=topic, description=description, + owner_email=owner_email, + group_id=group_id, is_closed=is_closed) + return discussion + + +def get_discussions(request, group_id=None, keyword=None, limit=20, offset=0, order='desc', sort='created_at'): + if group_id is None: + raise Exception("Atleast one group id is required.") + + if sort == 'topic': + sort_attr = db.Discussion.topic + else: + sort_attr = db.Discussion.created_at + + if order == 'desc': + orber_by = db.asc(sort_attr) + else: + orber_by = db.desc(sort_attr) + + total, discussions = db.get_discussions(request.db_session, + group_id=group_id, + keyword=keyword, + limit=limit, + offset=offset, + order_by=orber_by) + + return total, discussions + + +def update_discussion(request, discussion_id, is_closed): + discussion = {} + if is_closed is not None: + if is_closed == u'1': + discussion['is_closed'] = is_closed + return db.close_discussion(request.db_session, id=discussion_id, updated_discussion=discussion) + if is_closed == u'0': + discussion['is_closed'] = is_closed + return db.reopen_discussion(request.db_session, id=discussion_id, updated_discussion=discussion) + + +def delete_discussion_by_id(request, discussion_id): + db.delete_discussion(request.db_session, id=discussion_id) + return + + +def get_discussion_by_id(request, discussion_id): + return db.get_discussion_by_id(request.db_session, id=discussion_id) + + +def is_user_authorized_to_delete_discussion(request, username, discussion_id): + is_authorized = False + + discussion = db.get_discussion_by_id(request.db_session, discussion_id) + + if discussion is not None: + if discussion.owner_email == username: + is_authorized = True + + return is_authorized + + +def add_discussion_comment(request, discussion_id=None, text=None, owner_email=None, is_closed=0): + # Construct new comment to add to database + comment = db.add_discussion_comment(request.db_session, text=text, owner_email=owner_email, is_closed=is_closed) + comment_to_discussion = db.add_comment_to_discussion(request.db_session, discussion_id=discussion_id, + comment_id=comment.id) + db.send_comment(comment, event="insert_comment") + return comment + + +def search_comments_by_discussion_id(request, group_id=None, discussion_id=None): + if group_id is None: + raise Exception("Atleast one group id is required.") + if discussion_id is None: + raise Exception("Atleast one discussion id is required.") + return db.get_comments_by_discussion_id(request.db_session, group_id=group_id, discussion_id=discussion_id) + + +def update_comment(request, text, comment_id): + comment = {} + if text is not None: + comment['text'] = text + return db.update_comment(request.db_session, id=comment_id, updated_comment=comment) + + +def delete_comment_by_id(request, comment_id): + db.delete_comment(request.db_session, id=comment_id) + return + + +def add_comment_reaction(request, comment_id=None, content=None, owner_email=None): + # Construct new reaction to add to database + reaction = db.add_comment_reaction(request.db_session, content=content, owner_email=owner_email) + comment_to_reaction = db.add_comment_to_reaction(request.db_session, comment_id=comment_id, reaction_id=reaction.id) + return reaction + + +def delete_comment_reaction(request, comment_id=None, content=None, owner_email=None): + reaction = db.delete_comment_reaction(request.db_session, comment_id=comment_id, content=content, + owner_email=owner_email) + + +def get_comment_reactions(request, comment_id=None, content=None): + total, reactions = db.get_comment_reactions(request.db_session, comment_id=comment_id, content=content) + return total, reactions diff --git a/applications/discussions/dal.py b/applications/discussions/dal.py new file mode 100644 index 00000000..41457e64 --- /dev/null +++ b/applications/discussions/dal.py @@ -0,0 +1,281 @@ +from sqlalchemy import and_, or_, desc, asc, event +from sqlalchemy.orm import joinedload, subqueryload +from graphspace.wrappers import with_session +from applications.discussions.models import * +from applications.users.models import * +from applications.comments.models import * +from applications.users.dal import * +import graphspace.signals as socket +from graphspace.database import * + + +@with_session +def add_discussion(db_session, topic, description, owner_email, group_id, is_closed=0): + """ + Update discussion row entry. + :param db_session: Database session. + :param topic: Discussion topic + :param description: Discussion description + :param owner_email: Discussion owner_email + :param group_id: Unique ID of the discussion + :param is_closed: value for discussion status + :return: Discussion if id exists else None + """ + discussion = Discussion(topic=topic, description=description, owner_email=owner_email, group_id=group_id, + is_closed=is_closed) + group = get_group(db_session, group_id) + group.group_discussions.append(discussion) + db_session.add(discussion) + return discussion + + +@with_session +def get_discussions(db_session, group_id, keyword, limit, offset, order_by=desc(Discussion.created_at)): + """ + Update discussion row entry. + :param db_session: Database session. + :param keyword: Search keyword + :param group_id: Unique ID of the discussion + :return: All related discussions and its count + """ + query = db_session.query(Discussion).filter(Discussion.group_id == group_id) + if order_by is not None: + query = query.order_by(order_by) + if keyword is not None: + query1 = query.filter(Discussion.topic.ilike(keyword)) + query2 = query.filter(Discussion.owner_email.ilike(keyword)) + query3 = query.filter(Discussion.description.ilike(keyword)) + query4 = query1.union(query2) + query = query3.union(query4) + total = query.count() + + if offset is not None and limit is not None: + query = query.limit(limit).offset(offset) + + return total, query.all() + + +@with_session +def close_discussion(db_session, id, updated_discussion): + """ + Update discussion row entry. + :param db_session: Database session. + :param id: Unique ID of the discussion + :param updated_discussion: Updated discussion row entry + :return: Discussion if id exists else None + """ + discussion = db_session.query(Discussion).filter(Discussion.id == id).one_or_none() + query = db_session.query(Comment) + query = query.options(joinedload('associated_discussion')) + query = query.filter(Comment.discussion.any(CommentToDiscussion.discussion_id == id)) + for (key, value) in updated_discussion.items(): + setattr(discussion, key, value) + for ele in query: + for (key, value) in updated_discussion.items(): + setattr(ele, key, value) + send_discussion(discussion, event="close") + return discussion + + +@with_session +def reopen_discussion(db_session, id, updated_discussion): + """ + Update discussion row entry. + :param db_session: Database session. + :param id: Unique ID of the discussion + :param updated_discussion: Updated discussion row entry + :return: Discussion if id exists else None + """ + discussion = db_session.query(Discussion).filter(Discussion.id == id).one_or_none() + query = db_session.query(Comment) + query = query.options(joinedload('associated_discussion')) + query = query.filter(Comment.discussion.any(CommentToDiscussion.discussion_id == id)) + for (key, value) in updated_discussion.items(): + setattr(discussion, key, value) + for ele in query: + for (key, value) in updated_discussion.items(): + setattr(ele, key, value) + send_discussion(discussion, event="reopen") + return discussion + + +@with_session +def delete_discussion(db_session, id): + """ + Delete discussion from Discussion table. + :param db_session: Database session. + :param id: Unique ID of the discussion + :return: discussion + """ + discussion = db_session.query(Discussion).filter(Discussion.id == id).one_or_none() + query = db_session.query(Comment) + query = query.options(joinedload('associated_discussion')) + query = query.filter(Comment.discussion.any(CommentToDiscussion.discussion_id == id)).all() + for ele in query: + for reaction in ele.reactions: + db_session.delete(reaction) + db_session.delete(ele) + db_session.delete(discussion) + send_discussion(discussion, event="delete_discussion") + return discussion + + +@with_session +def add_discussion_comment(db_session, text, owner_email, is_closed=0, parent_comment_id=None): + """ + Get discussion by discussion id. + :param db_session: Database session.\ + :param text: message of comment. + :param owner_email: owner email of comment. + :param is_closed: value for comment status. + :param parent_comment_id: Unique ID of the parent comment + :return: Discussion if id exists else None + """ + comment = Comment(text=text, owner_email=owner_email, is_closed=is_closed, parent_comment_id=parent_comment_id) + db_session.add(comment) + # send_discussion(comment, event="insert_comment") + return comment + + +@with_session +def add_comment_to_discussion(db_session, discussion_id, comment_id): + """ + Adding discussion_id comment_id to comment_to_discussion table. + :param db_session: Database session. + :param discussion_id: Unique ID of the discussion + :param comment_id: Unique ID of the comment + :return: comment_to discussion row + """ + comment_to_discussion = CommentToDiscussion(discussion_id=discussion_id, comment_id=comment_id) + db_session.add(comment_to_discussion) + return comment_to_discussion + + +@with_session +def get_discussion_by_id(db_session, id): + """ + Get discussion by discussion id. + :param db_session: Database session. + :param id: Unique ID of the discussion + :return: Discussion if id exists else None + """ + return db_session.query(Discussion).filter(Discussion.id == id).one_or_none() + + +def get_comments_by_discussion_id(db_session, group_id, discussion_id): + """ + Get comments vy discussion id + :param db_session: Database session. + :param group_id: Unique ID of the group + :param discussion_id: Unique ID of the discussion + :return: total comments value and comments if id exists else None + """ + query = db_session.query(Comment) + query = query.options(joinedload('associated_discussion')) + query = query.filter(Comment.discussion.any(CommentToDiscussion.discussion_id == discussion_id)) + return query.count(), query.all() + + +@with_session +def update_comment(db_session, id, updated_comment): + """ + Update comment row entry. + :param db_session: Database session. + :param id: Unique ID of the discussion + :param updated_comment: Updated comment row entry + :return: Comment if id exists else None + """ + comment = db_session.query(Comment).filter(Comment.id == id).one_or_none() + for (key, value) in updated_comment.items(): + setattr(comment, key, value) + send_comment(comment, event="update_comment") + return comment + + +@with_session +def delete_comment(db_session, id): + """ + Delete comment from Comment table. + :param db_session: Database session. + :param id: Unique ID of the comment + :return: comment + """ + comment = db_session.query(Comment).filter(Comment.id == id).one_or_none() + for reaction in comment.reactions: + db_session.delete(reaction) + db_session.delete(comment) + send_comment(comment, event="delete_comment") + return comment + + +@with_session +def add_comment_reaction(db_session, content, owner_email): + """ + Add reaction. + :param db_session: Database session. + :param content: message of reaction. + :param owner_email: owner email of reaction. + :return: Reaction + """ + reaction = Reaction(content=content, owner_email=owner_email) + db_session.add(reaction) + # send_discussion(comment, event="insert_comment") + return reaction + + +@with_session +def add_comment_to_reaction(db_session, comment_id, reaction_id): + """ + Adding comment_id reaction_id to comment_to_reaction table. + :param db_session: Database session. + :param reaction_id: Unique ID of the reaction + :param comment_id: Unique ID of the comment + :return: comment_to discussion row + """ + comment_to_reaction = CommentToReaction(comment_id=comment_id, reaction_id=reaction_id) + db_session.add(comment_to_reaction) + return comment_to_reaction + + +@with_session +def delete_comment_reaction(db_session, comment_id, content, owner_email): + """ + Delete reaction by comment_id content owner_email. + :param db_session: Database session. + :param comment_id: Unique Id of comment. + :param content: content of reaction. + :param owner_email: owner email of reaction. + :return: + """ + query = db_session.query(Reaction) + query = query.options(joinedload('associated_comments')) + query = query.filter(Reaction.comments.any(CommentToReaction.comment_id == comment_id)) + query = query.filter(Reaction.content == content, Reaction.owner_email == owner_email).one_or_none() + + db_session.delete(query) + return query + + +def get_comment_reactions(db_session, comment_id, content): + """ + Get reaction by comment_id + :param db_session: Database session. + :param comment_id: Unique ID of the comment + :param content: content of reaction. + :return: total reactions value and reactions + """ + query = db_session.query(Reaction) + query = query.options(joinedload('associated_comments')) + query = query.filter(Reaction.comments.any(CommentToReaction.comment_id == comment_id)) + query = query.filter(Reaction.content == content) + return query.count(), query.all() + + +def send_comment(comment, event): + users_list = get_users_by_group(Database().session(), comment.associated_discussion[0].discussion.group_id) + socket.send_discussion(discussion=comment, type="private", users=users_list, event=event) + + +def send_discussion(discussion, event): + users_list = get_users_by_group(Database().session(), discussion.group_id) + socket.send_discussion(discussion=discussion, type="private", users=users_list, event=event) diff --git a/applications/discussions/migrations/__init__.py b/applications/discussions/migrations/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/applications/discussions/models.py b/applications/discussions/models.py new file mode 100644 index 00000000..be174213 --- /dev/null +++ b/applications/discussions/models.py @@ -0,0 +1,75 @@ +from __future__ import unicode_literals + +import json +from applications.users.models import * +from applications.comments.models import * +from django.conf import settings +from graphspace.mixins import * +from sqlalchemy import ForeignKeyConstraint, text, Enum, Boolean +from sqlalchemy import String, ForeignKey, UniqueConstraint +Base = settings.BASE + + +# ================== Table Definitions =================== # + +class Discussion(IDMixin, TimeStampMixin, Base): + __tablename__ = 'discussion' + + topic = Column(String, nullable=False) + description = Column(String, nullable=True) + is_closed = Column(Integer, nullable=False, default=0) + + owner_email = Column(String, ForeignKey('user.email', ondelete="CASCADE", onupdate="CASCADE"), nullable=False) + owner = relationship("User", back_populates="owned_discussions", uselist=False) + + group_id = Column(Integer, ForeignKey('group.id', ondelete="CASCADE", onupdate="CASCADE"), nullable=False) + group = relationship("Group", back_populates="group_discussions", uselist=False) + + comments = association_proxy('associated_comments', 'comment') + + constraints = (UniqueConstraint('topic', 'group_id', name='_discussion_uc_topic_group_id'),) + indices = () + + @declared_attr + def __table_args__(cls): + args = cls.constraints + cls.indices + return args + + def serialize(cls, **kwargs): + return { + 'id': cls.id, + 'owner_email': cls.owner_email, + 'topic': cls.topic, + 'description': cls.description, + 'is_closed': cls.is_closed, + 'group_id': cls.group_id, + 'group_owner_email': cls.group.owner_email, + 'created_at': cls.created_at.isoformat(), + 'updated_at': cls.updated_at.isoformat() + } + +class CommentToDiscussion(TimeStampMixin, Base): + __tablename__ = 'comment_to_discussion' + + discussion_id = Column(Integer, ForeignKey('discussion.id', ondelete="CASCADE", onupdate="CASCADE"), primary_key=True) + comment_id = Column(Integer, ForeignKey('comment.id', ondelete="CASCADE", onupdate="CASCADE"), primary_key=True) + + discussion = relationship("Discussion", backref=backref("associated_comments", cascade="all, delete-orphan")) + comment = relationship("Comment", backref=backref("associated_discussion", cascade="all, delete-orphan")) + + + indices = (Index('comment2discussion_idx_comment_id_discussion_id', 'comment_id', 'discussion_id'),) + constraints = () + + @declared_attr + def __table_args__(cls): + args = cls.constraints + cls.indices + return args + + def serialize(cls, **kwargs): + return { + 'comment_id': cls.comment_id, + 'discussion_id': cls.discussion_id, + 'created_at': cls.created_at.isoformat(), + 'updated_at': cls.updated_at.isoformat() + } diff --git a/applications/graphs/controllers.py b/applications/graphs/controllers.py index e52782d8..3f8be5d5 100644 --- a/applications/graphs/controllers.py +++ b/applications/graphs/controllers.py @@ -592,6 +592,7 @@ def delete_edge_by_id(request, edge_id): return + def convert_html_legend(graph_json, style_json, param): if param['update_legend_format'] == HTML_LEGEND_TABLE_FORMAT_1: return convert_html_legend_1(graph_json, style_json) @@ -607,3 +608,10 @@ def update_graph_with_html_legend(request, graph_id, param): updated_style_json = convert_html_legend(graph_json=graph_json, style_json=style_json, param=param) del graph_json["data"]["description"] return update_graph(request, graph_id=graph_id, style_json=updated_style_json, graph_json=graph_json) + +def get_edge_by_name(request, graph_id, name): + return db.get_edge_by_name(request.db_session, graph_id=graph_id, name=name) + +def get_node_by_name(request, graph_id, name): + return db.get_node_by_name(request.db_session, graph_id=graph_id, name=name) + diff --git a/applications/graphs/dal.py b/applications/graphs/dal.py index 388b8141..28605518 100644 --- a/applications/graphs/dal.py +++ b/applications/graphs/dal.py @@ -458,3 +458,17 @@ def find_edges(db_session, is_directed=None, names=None, edges=None, graph_id=No query = query.limit(limit).offset(offset) return total, query.all() + +@with_session +def get_edge_by_name(db_session, graph_id, name): + query = db_session.query(Edge) + query = query.filter(Edge.graph_id == graph_id) + query = query.filter(Edge.name == name) + return query.one_or_none() + +@with_session +def get_node_by_name(db_session, graph_id, name): + query = db_session.query(Node) + query = query.filter(Node.graph_id == graph_id) + query = query.filter(Node.name == name) + return query.one_or_none() \ No newline at end of file diff --git a/applications/graphs/models.py b/applications/graphs/models.py index f0e3e554..e3750c45 100644 --- a/applications/graphs/models.py +++ b/applications/graphs/models.py @@ -1,6 +1,7 @@ from __future__ import unicode_literals from applications.users.models import * +from applications.comments.models import * from django.conf import settings from graphspace.mixins import * import json @@ -31,6 +32,7 @@ class Graph(IDMixin, TimeStampMixin, Base): cascade="all, delete-orphan") edges = relationship("Edge", back_populates="graph", cascade="all, delete-orphan") nodes = relationship("Node", back_populates="graph", cascade="all, delete-orphan") + comments = relationship("CommentToGraph", back_populates="graph", cascade="all, delete-orphan") groups = association_proxy('shared_with_groups', 'group') tags = association_proxy('graph_tags', 'tag') @@ -90,6 +92,7 @@ class Edge(IDMixin, TimeStampMixin, Base): graph = relationship("Graph", back_populates="edges", uselist=False) head_node = relationship("Node", foreign_keys=[head_node_id], back_populates="source_edges", uselist=False) tail_node = relationship("Node", foreign_keys=[tail_node_id], back_populates="target_edges", uselist=False) + comments = association_proxy('associated_comments', 'comment') constraints = ( UniqueConstraint('graph_id', 'head_node_id', 'tail_node_id', @@ -139,10 +142,10 @@ class Node(IDMixin, TimeStampMixin, Base): graph_id = Column(Integer, ForeignKey('graph.id', ondelete="CASCADE", onupdate="CASCADE"), nullable=False) graph = relationship("Graph", back_populates="nodes", uselist=False) - source_edges = relationship("Edge", foreign_keys="Edge.head_node_id", back_populates="head_node", - cascade="all, delete-orphan") - target_edges = relationship("Edge", foreign_keys="Edge.tail_node_id", back_populates="tail_node", - cascade="all, delete-orphan") + + source_edges = relationship("Edge", foreign_keys="Edge.head_node_id", back_populates="head_node", cascade="all, delete-orphan") + target_edges = relationship("Edge", foreign_keys="Edge.tail_node_id", back_populates="tail_node", cascade="all, delete-orphan") + comments = association_proxy('associated_comments', 'comment') constraints = ( UniqueConstraint('graph_id', 'name', name='_node_uc_graph_id_name'), @@ -210,6 +213,8 @@ class Layout(IDMixin, TimeStampMixin, Base): graph = relationship("Graph", foreign_keys=[graph_id], back_populates="layouts", uselist=False) owner = relationship("User", back_populates="owned_layouts", uselist=False) + comments = relationship("CommentToGraph", back_populates="layout", cascade="all, delete-orphan") + default_layout_graph = relationship("Graph", foreign_keys="Graph.default_layout_id", back_populates="default_layout", cascade="all, delete-orphan", diff --git a/applications/graphs/urls.py b/applications/graphs/urls.py index 297f92ab..6659a4af 100644 --- a/applications/graphs/urls.py +++ b/applications/graphs/urls.py @@ -6,6 +6,7 @@ url(r'^graphs/$', views.graphs_page, name='graphs'), url(r'^graphs/(?P[^/]+)$', views.graph_page, name='graph'), + url(r'^graphs/(?P[^/]+)/view_comments$', views.view_comments, name='view_comments'), url(r'^graphs/(?P[^/]+)/(?P[^/]+)$', views.graph_page_by_name, name='graph_by_name'), url(r'^upload$', views.upload_graph_page, name='upload_graph'), @@ -27,6 +28,8 @@ # Graph Layouts url(r'^ajax/graphs/(?P[^/]+)/layouts/$', views.graph_layouts_ajax_api, name='graph_layouts_ajax_api'), url(r'^ajax/graphs/(?P[^/]+)/layouts/(?P[^/]+)$', views.graph_layouts_ajax_api, name='graph_layouts_ajax_api'), + # Graph Comments + url(r'^ajax/graphs/(?P[^/]+)/comments/$', views.graph_comments_ajax_api, name='graph_comments_ajax_api'), # REST APIs Endpoints diff --git a/applications/graphs/views.py b/applications/graphs/views.py index d3fb5d88..8b8502dd 100644 --- a/applications/graphs/views.py +++ b/applications/graphs/views.py @@ -2,6 +2,8 @@ import applications.graphs.controllers as graphs import applications.users.controllers as users +import applications.comments.controllers as comments +import applications.discussions.controllers as discussions import graphspace.authorization as authorization import graphspace.utils as utils from django.conf import settings @@ -15,28 +17,28 @@ def upload_graph_page(request): - context = RequestContext(request, {}) - - if request.method == 'POST': - try: - graph = _add_graph(request, graph={ - 'name': request.POST.get('name', None), - 'owner_email': request.POST.get('owner_email', None), - 'is_public': request.POST.get('is_public', None), - 'graph_json': json.loads(request.FILES['graph_file'].read()), - 'style_json': json.loads(request.FILES['style_file'].read()) if 'style_file' in request.FILES else None - }) - context['Success'] = settings.URL_PATH + "graphs/" + str(graph['id']) - except Exception as e: - context['Error'] = str(e) - - return render(request, 'upload_graph/index.html', context) - else: - return render(request, 'upload_graph/index.html', context) + context = RequestContext(request, {}) + + if request.method == 'POST': + try: + graph = _add_graph(request, graph={ + 'name': request.POST.get('name', None), + 'owner_email': request.POST.get('owner_email', None), + 'is_public': request.POST.get('is_public', None), + 'graph_json': json.loads(request.FILES['graph_file'].read()), + 'style_json': json.loads(request.FILES['style_file'].read()) if 'style_file' in request.FILES else None + }) + context['Success'] = settings.URL_PATH + "graphs/" + str(graph['id']) + except Exception as e: + context['Error'] = str(e) + + return render(request, 'upload_graph/index.html', context) + else: + return render(request, 'upload_graph/index.html', context) def graphs_page(request): - """ + """ Wrapper view function for the following pages: /graphs/ @@ -56,17 +58,17 @@ def graphs_page(request): Notes ------ """ - if 'GET' == request.method: - context = RequestContext(request, { - "tags": request.GET.get('tags', '') - }) - return render(request, 'graphs/index.html', context) - else: - raise MethodNotAllowed(request) # Handle other type of request methods like POST, PUT, UPDATE. + if 'GET' == request.method: + context = RequestContext(request, { + "tags": request.GET.get('tags', '') + }) + return render(request, 'graphs/index.html', context) + else: + raise MethodNotAllowed(request) # Handle other type of request methods like POST, PUT, UPDATE. def graph_page_by_name(request, email, graph_name): - """ + """ Redirects to the appropriate graph page. This is only for supporting older URLs. Parameters @@ -76,15 +78,15 @@ def graph_page_by_name(request, email, graph_name): graph_name : string Name of the Graph. """ - graph = graphs.get_graph_by_name(request, owner_email=email, name=graph_name) - if graph is not None: - return redirect('/graphs/' + str(graph.id)) - else: - return redirect('/') + graph = graphs.get_graph_by_name(request, owner_email=email, name=graph_name) + if graph is not None: + return redirect('/graphs/' + str(graph.id)) + else: + return redirect('/') def graph_page(request, graph_id): - """ + """ Wrapper view for the group page. /graphs/ :param request: HTTP GET Request. @@ -94,53 +96,53 @@ def graph_page(request, graph_id): graph_id : string Unique ID of the graph. Required """ - context = RequestContext(request, {}) - authorization.validate(request, permission='GRAPH_READ', graph_id=graph_id) - - uid = request.session['uid'] if 'uid' in request.session else None - - context.push({"graph": _get_graph(request, graph_id)}) - context.push({"is_posted_by_public_user": 'public_user' in context["graph"]["owner_email"]}) - context.push({"default_layout_id": str(context["graph"]['default_layout_id']) if context["graph"][ - 'default_layout_id'] else None}) - - default_layout = graphs.get_layout_by_id(request, context["graph"]['default_layout_id']) if context["graph"][ - 'default_layout_id'] is not None else None - - if default_layout is not None and (default_layout.is_shared == 1 or default_layout.owner_email == uid) and request.GET.get( - 'user_layout') is None and request.GET.get('auto_layout') is None: - if '?' in request.get_full_path(): - return redirect(request.get_full_path() + '&user_layout=' + context["default_layout_id"]) - else: - return redirect(request.get_full_path() + '?user_layout=' + context["default_layout_id"]) - - context['graph_json_string'] = json.dumps(context['graph']['graph_json']) - context['data'] = {k: json.dumps(v, encoding='ascii') for k,v in context['graph']['graph_json']['data'].items()} - context['style_json_string'] = json.dumps(context['graph']['style_json']) - context['description'] = context['graph']['graph_json']['data']['description'] if 'data' in context[ - 'graph']['graph_json'] and 'description' in context['graph']['graph_json']['data'] else '' - - if 'data' in context['graph']['graph_json'] and 'title' in context['graph']['graph_json']['data']: - context['title'] = context['graph']['graph_json']['data']['title'] - elif 'data' in context['graph']['graph_json'] and 'name' in context['graph']['graph_json']['data']: - context['title'] = context['graph']['graph_json']['data']['name'] - else: - context['title'] = '' - - if uid is not None: - context.push({ - "groups": [utils.serializer(group) for group in - users.get_groups_by_member_id(request, member_id=users.get_user(request, uid).id)], - "shared_groups": - _get_graph_groups(request, graph_id, query={'limit': None, 'offset': None, 'member_email': uid})[ - 'groups'] - }) - - shared_group_ids = [group['id'] for group in context["shared_groups"]] - for group in context['groups']: - group['is_shared'] = 1 if group['id'] in shared_group_ids else 0 - - return render(request, 'graph/index.html', context) + context = RequestContext(request, {}) + authorization.validate(request, permission='GRAPH_READ', graph_id=graph_id) + uid = request.session['uid'] if 'uid' in request.session else None + + context.push({"graph": _get_graph(request, graph_id)}) + context.push({"is_posted_by_public_user": 'public_user' in context["graph"]["owner_email"]}) + context.push({"default_layout_id": str(context["graph"]['default_layout_id']) if context["graph"][ + 'default_layout_id'] else None}) + + default_layout = graphs.get_layout_by_id(request, context["graph"]['default_layout_id']) if context["graph"][ + 'default_layout_id'] is not None else None + + if default_layout is not None and ( + default_layout.is_shared == 1 or default_layout.owner_email == uid) and request.GET.get( + 'user_layout') is None and request.GET.get('auto_layout') is None: + if '?' in request.get_full_path(): + return redirect(request.get_full_path() + '&user_layout=' + context["default_layout_id"]) + else: + return redirect(request.get_full_path() + '?user_layout=' + context["default_layout_id"]) + + context['graph_json_string'] = json.dumps(context['graph']['graph_json']) + context['data'] = {k: json.dumps(v, encoding='ascii') for k, v in context['graph']['graph_json']['data'].items()} + context['style_json_string'] = json.dumps(context['graph']['style_json']) + context['description'] = context['graph']['graph_json']['data']['description'] if 'data' in context[ + 'graph']['graph_json'] and 'description' in context['graph']['graph_json']['data'] else '' + + if 'data' in context['graph']['graph_json'] and 'title' in context['graph']['graph_json']['data']: + context['title'] = context['graph']['graph_json']['data']['title'] + elif 'data' in context['graph']['graph_json'] and 'name' in context['graph']['graph_json']['data']: + context['title'] = context['graph']['graph_json']['data']['name'] + else: + context['title'] = '' + + if uid is not None: + context.push({ + "groups": [utils.serializer(group) for group in + users.get_groups_by_member_id(request, member_id=users.get_user(request, uid).id)], + "shared_groups": + _get_graph_groups(request, graph_id, query={'limit': None, 'offset': None, 'member_email': uid})[ + 'groups'] + }) + + shared_group_ids = [group['id'] for group in context["shared_groups"]] + for group in context['groups']: + group['is_shared'] = 1 if group['id'] in shared_group_ids else 0 + + return render(request, 'graph/index.html', context) ''' @@ -151,7 +153,7 @@ def graph_page(request, graph_id): @csrf_exempt @is_authenticated() def graphs_rest_api(request, graph_id=None): - """ + """ Handles any request sent to following urls: /api/v1/graphs /api/v1/graphs/ @@ -165,11 +167,11 @@ def graphs_rest_api(request, graph_id=None): response : JSON Response """ - return _graphs_api(request, graph_id=graph_id) + return _graphs_api(request, graph_id=graph_id) def graphs_ajax_api(request, graph_id=None): - """ + """ Handles any request sent to following urls: /ajax/graphs /ajax/graphs/ @@ -183,12 +185,12 @@ def graphs_ajax_api(request, graph_id=None): response : JSON Response """ - return _graphs_api(request, graph_id=graph_id) + return _graphs_api(request, graph_id=graph_id) @csrf_exempt def graphs_advanced_search_ajax_api(request): - """ + """ Handles any request sent to following urls: /ajax/graphs @@ -201,51 +203,51 @@ def graphs_advanced_search_ajax_api(request): response : JSON Response """ - if request.META.get('HTTP_ACCEPT', None) == 'application/json': - if request.method == "POST": - querydict = QueryDict('', mutable=True) - querydict.update(request.GET) - queryparams = querydict - - # Validate search graphs API request - user_role = authorization.user_role(request) - if user_role == authorization.UserRole.LOGGED_IN: - if queryparams.get('owner_email', None) is None \ - and queryparams.get('member_email', None) is None \ - and queryparams.get('is_public', None) != '1': - raise BadRequest(request, error_code=ErrorCodes.Validation.IsPublicNotSet) - if queryparams.get('is_public', None) != '1': - if get_request_user(request) != queryparams.get('member_email', None) \ - and get_request_user(request) != queryparams.get('owner_email', None): - raise BadRequest(request, error_code=ErrorCodes.Validation.NotAllowedGraphAccess, - args=queryparams.get('owner_email', None)) - - total, graphs_list = graphs.search_graphs1(request, - owner_email=queryparams.get('owner_email', None), - member_email=queryparams.get('member_email', None), - names=list(filter(None, queryparams.getlist('names[]', []))), - is_public=queryparams.get('is_public', None), - nodes=list(filter(None, queryparams.getlist('nodes[]', []))), - edges=list(filter(None, queryparams.getlist('edges[]', []))), - tags=list(filter(None, queryparams.getlist('tags[]', []))), - limit=queryparams.get('limit', 20), - offset=queryparams.get('offset', 0), - order=queryparams.get('order', 'desc'), - sort=queryparams.get('sort', 'name'), - query=json.loads(request.body)) - - return HttpResponse(json.dumps({ - 'total': total, - 'graphs': [utils.serializer(graph, summary=True) for graph in graphs_list] - }), content_type="application/json", status=200) - else: - raise MethodNotAllowed(request) # Handle other type of request methods like GET, OPTIONS etc. - else: - raise BadRequest(request) + if request.META.get('HTTP_ACCEPT', None) == 'application/json': + if request.method == "POST": + querydict = QueryDict('', mutable=True) + querydict.update(request.GET) + queryparams = querydict + + # Validate search graphs API request + user_role = authorization.user_role(request) + if user_role == authorization.UserRole.LOGGED_IN: + if queryparams.get('owner_email', None) is None \ + and queryparams.get('member_email', None) is None \ + and queryparams.get('is_public', None) != '1': + raise BadRequest(request, error_code=ErrorCodes.Validation.IsPublicNotSet) + if queryparams.get('is_public', None) != '1': + if get_request_user(request) != queryparams.get('member_email', None) \ + and get_request_user(request) != queryparams.get('owner_email', None): + raise BadRequest(request, error_code=ErrorCodes.Validation.NotAllowedGraphAccess, + args=queryparams.get('owner_email', None)) + + total, graphs_list = graphs.search_graphs1(request, + owner_email=queryparams.get('owner_email', None), + member_email=queryparams.get('member_email', None), + names=list(filter(None, queryparams.getlist('names[]', []))), + is_public=queryparams.get('is_public', None), + nodes=list(filter(None, queryparams.getlist('nodes[]', []))), + edges=list(filter(None, queryparams.getlist('edges[]', []))), + tags=list(filter(None, queryparams.getlist('tags[]', []))), + limit=queryparams.get('limit', 20), + offset=queryparams.get('offset', 0), + order=queryparams.get('order', 'desc'), + sort=queryparams.get('sort', 'name'), + query=json.loads(request.body)) + + return HttpResponse(json.dumps({ + 'total': total, + 'graphs': [utils.serializer(graph, summary=True) for graph in graphs_list] + }), content_type="application/json", status=200) + else: + raise MethodNotAllowed(request) # Handle other type of request methods like GET, OPTIONS etc. + else: + raise BadRequest(request) def _graphs_api(request, graph_id=None): - """ + """ Handles any request sent to following urls: /graphs /graphs/ @@ -264,32 +266,32 @@ def _graphs_api(request, graph_id=None): BadRequest: If HTTP_ACCEPT header is not set to application/json. """ - if request.META.get('HTTP_ACCEPT', None) == 'application/json': - if request.method == "GET" and graph_id is None: - return HttpResponse(json.dumps(_get_graphs(request, query=request.GET)), content_type="application/json") - elif request.method == "GET" and graph_id is not None: - return HttpResponse(json.dumps(_get_graph(request, graph_id)), content_type="application/json", - status=200) - elif request.method == "POST" and graph_id is None: - return HttpResponse(json.dumps(_add_graph(request, graph=json.loads(request.body))), - content_type="application/json", status=201) - elif request.method == "PUT" and graph_id is not None: - return HttpResponse(json.dumps(_update_graph(request, graph_id, graph=json.loads(request.body))), - content_type="application/json", - status=200) - elif request.method == "DELETE" and graph_id is not None: - _delete_graph(request, graph_id) - return HttpResponse(json.dumps({ - "message": "Successfully deleted graph with id=%s" % graph_id - }), content_type="application/json", status=200) - else: - raise MethodNotAllowed(request) # Handle other type of request methods like OPTIONS etc. - else: - raise BadRequest(request) + if request.META.get('HTTP_ACCEPT', None) == 'application/json': + if request.method == "GET" and graph_id is None: + return HttpResponse(json.dumps(_get_graphs(request, query=request.GET)), content_type="application/json") + elif request.method == "GET" and graph_id is not None: + return HttpResponse(json.dumps(_get_graph(request, graph_id)), content_type="application/json", + status=200) + elif request.method == "POST" and graph_id is None: + return HttpResponse(json.dumps(_add_graph(request, graph=json.loads(request.body))), + content_type="application/json", status=201) + elif request.method == "PUT" and graph_id is not None: + return HttpResponse(json.dumps(_update_graph(request, graph_id, graph=json.loads(request.body))), + content_type="application/json", + status=200) + elif request.method == "DELETE" and graph_id is not None: + _delete_graph(request, graph_id) + return HttpResponse(json.dumps({ + "message": "Successfully deleted graph with id=%s" % graph_id + }), content_type="application/json", status=200) + else: + raise MethodNotAllowed(request) # Handle other type of request methods like OPTIONS etc. + else: + raise BadRequest(request) def _get_graphs(request, query=dict()): - """ + """ Query Parameters ---------- owner_email : string @@ -343,44 +345,44 @@ def _get_graphs(request, query=dict()): ------ """ - querydict = QueryDict('', mutable=True) - querydict.update(query) - query = querydict - - # Validate search graphs API request - user_role = authorization.user_role(request) - if user_role == authorization.UserRole.LOGGED_IN: - if query.get('owner_email', None) is None \ - and query.get('member_email', None) is None \ - and query.get('is_public', None) != '1': - raise BadRequest(request, error_code=ErrorCodes.Validation.IsPublicNotSet) - if query.get('is_public', None) != '1': - if get_request_user(request) != query.get('member_email', None) \ - and get_request_user(request) != query.get('owner_email', None): - raise BadRequest(request, error_code=ErrorCodes.Validation.NotAllowedGraphAccess, - args=query.get('owner_email', None)) - - total, graphs_list = graphs.search_graphs(request, - owner_email=query.get('owner_email', None), - member_email=query.get('member_email', None), - names=list(filter(None, query.getlist('names[]', []))), - is_public=query.get('is_public', None), - nodes=list(filter(None, query.getlist('nodes[]', []))), - edges=list(filter(None, query.getlist('edges[]', []))), - tags=list(filter(None, query.getlist('tags[]', []))), - limit=query.get('limit', 20), - offset=query.get('offset', 0), - order=query.get('order', 'desc'), - sort=query.get('sort', 'name')) - - return { - 'total': total, - 'graphs': [utils.serializer(graph, summary=True) for graph in graphs_list] - } + querydict = QueryDict('', mutable=True) + querydict.update(query) + query = querydict + + # Validate search graphs API request + user_role = authorization.user_role(request) + if user_role == authorization.UserRole.LOGGED_IN: + if query.get('owner_email', None) is None \ + and query.get('member_email', None) is None \ + and query.get('is_public', None) != '1': + raise BadRequest(request, error_code=ErrorCodes.Validation.IsPublicNotSet) + if query.get('is_public', None) != '1': + if get_request_user(request) != query.get('member_email', None) \ + and get_request_user(request) != query.get('owner_email', None): + raise BadRequest(request, error_code=ErrorCodes.Validation.NotAllowedGraphAccess, + args=query.get('owner_email', None)) + + total, graphs_list = graphs.search_graphs(request, + owner_email=query.get('owner_email', None), + member_email=query.get('member_email', None), + names=list(filter(None, query.getlist('names[]', []))), + is_public=query.get('is_public', None), + nodes=list(filter(None, query.getlist('nodes[]', []))), + edges=list(filter(None, query.getlist('edges[]', []))), + tags=list(filter(None, query.getlist('tags[]', []))), + limit=query.get('limit', 20), + offset=query.get('offset', 0), + order=query.get('order', 'desc'), + sort=query.get('sort', 'name')) + + return { + 'total': total, + 'graphs': [utils.serializer(graph, summary=True) for graph in graphs_list] + } def _get_graph(request, graph_id): - """ + """ Parameters ---------- @@ -400,13 +402,13 @@ def _get_graph(request, graph_id): ------ """ - authorization.validate(request, permission='GRAPH_READ', graph_id=graph_id) + authorization.validate(request, permission='GRAPH_READ', graph_id=graph_id) - return utils.serializer(graphs.get_graph_by_id(request, graph_id)) + return utils.serializer(graphs.get_graph_by_id(request, graph_id)) def _add_graph(request, graph={}): - """ + """ Graph Parameters ---------- name : string @@ -438,28 +440,28 @@ def _add_graph(request, graph={}): """ - # Validate add graph API request - user_role = authorization.user_role(request) - if user_role == authorization.UserRole.LOGGED_IN: - if get_request_user(request) != graph.get('owner_email', None): - raise BadRequest(request, error_code=ErrorCodes.Validation.CannotCreateGraphForOtherUser, - args=graph.get('owner_email', None)) - elif user_role == authorization.UserRole.LOGGED_OFF and graph.get('owner_email', None) is not None: - raise BadRequest(request, error_code=ErrorCodes.Validation.CannotCreateGraphForOtherUser, - args=graph.get('owner_email', None)) - - return utils.serializer(graphs.add_graph(request, - name=graph.get('name', None), - is_public=graph.get('is_public', None), - graph_json=graph.get('graph_json', None), - style_json=graph.get('style_json', None), - tags=graph.get('tags', None), - owner_email=graph.get('owner_email', None))) + # Validate add graph API request + user_role = authorization.user_role(request) + if user_role == authorization.UserRole.LOGGED_IN: + if get_request_user(request) != graph.get('owner_email', None): + raise BadRequest(request, error_code=ErrorCodes.Validation.CannotCreateGraphForOtherUser, + args=graph.get('owner_email', None)) + elif user_role == authorization.UserRole.LOGGED_OFF and graph.get('owner_email', None) is not None: + raise BadRequest(request, error_code=ErrorCodes.Validation.CannotCreateGraphForOtherUser, + args=graph.get('owner_email', None)) + + return utils.serializer(graphs.add_graph(request, + name=graph.get('name', None), + is_public=graph.get('is_public', None), + graph_json=graph.get('graph_json', None), + style_json=graph.get('style_json', None), + tags=graph.get('tags', None), + owner_email=graph.get('owner_email', None))) @is_authenticated() def _update_graph(request, graph_id, graph={}): - """ + """ Graph Parameters ---------- name : string @@ -491,26 +493,26 @@ def _update_graph(request, graph_id, graph={}): It will update the owner_email only if user has admin access otherwise user cannot update the owner email. """ - authorization.validate(request, permission='GRAPH_UPDATE', graph_id=graph_id) - user_role = authorization.user_role(request) + authorization.validate(request, permission='GRAPH_UPDATE', graph_id=graph_id) + user_role = authorization.user_role(request) - if 'update_legend_format' in graph: - return utils.serializer(graphs.update_graph_with_html_legend(request, graph_id=graph_id, param=graph)) + if 'update_legend_format' in graph: + return utils.serializer(graphs.update_graph_with_html_legend(request, graph_id=graph_id, param=graph)) - return utils.serializer(graphs.update_graph(request, - graph_id=graph_id, - name=graph.get('name', None), - is_public=graph.get('is_public', None), - graph_json=graph.get('graph_json', None), - style_json=graph.get('style_json', None), - owner_email=graph.get('owner_email', - None) if user_role == authorization.UserRole.ADMIN else None, - default_layout_id=graph.get('default_layout_id', None))) + return utils.serializer(graphs.update_graph(request, + graph_id=graph_id, + name=graph.get('name', None), + is_public=graph.get('is_public', None), + graph_json=graph.get('graph_json', None), + style_json=graph.get('style_json', None), + owner_email=graph.get('owner_email', + None) if user_role == authorization.UserRole.ADMIN else None, + default_layout_id=graph.get('default_layout_id', None))) @is_authenticated() def _delete_graph(request, graph_id): - """ + """ Parameters ---------- @@ -530,8 +532,8 @@ def _delete_graph(request, graph_id): ------ """ - authorization.validate(request, permission='GRAPH_DELETE', graph_id=graph_id) - graphs.delete_graph_by_id(request, graph_id) + authorization.validate(request, permission='GRAPH_DELETE', graph_id=graph_id) + graphs.delete_graph_by_id(request, graph_id) ''' @@ -542,7 +544,7 @@ def _delete_graph(request, graph_id): @csrf_exempt @is_authenticated() def graph_groups_rest_api(request, graph_id, group_id=None): - """ + """ Handles any request sent to following urls: /api/v1/graphs//groups /api/v1/graphs//groups/ @@ -556,11 +558,11 @@ def graph_groups_rest_api(request, graph_id, group_id=None): response : JSON Response """ - return _graph_groups_api(request, graph_id, group_id=group_id) + return _graph_groups_api(request, graph_id, group_id=group_id) def graph_groups_ajax_api(request, graph_id, group_id=None): - """ + """ Handles any request sent to following urls: /javascript/graphs//groups /javascript/graphs//groups/ @@ -574,12 +576,12 @@ def graph_groups_ajax_api(request, graph_id, group_id=None): response : JSON Response """ - return _graph_groups_api(request, graph_id, group_id=group_id) + return _graph_groups_api(request, graph_id, group_id=group_id) @is_authenticated() def _graph_groups_api(request, graph_id, group_id=None): - """ + """ Handles any request (GET/POST) sent to graphs//groups or graphs//groups/. Parameters @@ -598,30 +600,30 @@ def _graph_groups_api(request, graph_id, group_id=None): BadRequest: If graph_id is missing. """ - if request.META.get('HTTP_ACCEPT', None) == 'application/json': - if graph_id is None: - raise BadRequest(request, error_code=ErrorCodes.Validation.GraphIDMissing) - - if request.method == "GET" and group_id is None: - return HttpResponse(json.dumps(_get_graph_groups(request, graph_id, query=request.GET)), - content_type="application/json") - elif request.method == "POST" and group_id is None: - return HttpResponse(json.dumps(_add_graph_group(request, graph_id, group=json.loads(request.body))), - content_type="application/json", - status=201) - elif request.method == "DELETE" and group_id is not None: - _delete_graph_group(request, graph_id, group_id) - return HttpResponse(json.dumps({ - "message": "Successfully deleted graph with id=%s from group with id=%s" % (graph_id, group_id) - }), content_type="application/json", status=200) - else: - raise MethodNotAllowed(request) # Handle other type of request methods like OPTIONS etc. - else: - raise BadRequest(request) + if request.META.get('HTTP_ACCEPT', None) == 'application/json': + if graph_id is None: + raise BadRequest(request, error_code=ErrorCodes.Validation.GraphIDMissing) + + if request.method == "GET" and group_id is None: + return HttpResponse(json.dumps(_get_graph_groups(request, graph_id, query=request.GET)), + content_type="application/json") + elif request.method == "POST" and group_id is None: + return HttpResponse(json.dumps(_add_graph_group(request, graph_id, group=json.loads(request.body))), + content_type="application/json", + status=201) + elif request.method == "DELETE" and group_id is not None: + _delete_graph_group(request, graph_id, group_id) + return HttpResponse(json.dumps({ + "message": "Successfully deleted graph with id=%s from group with id=%s" % (graph_id, group_id) + }), content_type="application/json", status=200) + else: + raise MethodNotAllowed(request) # Handle other type of request methods like OPTIONS etc. + else: + raise BadRequest(request) def _add_graph_group(request, graph_id, group={}): - """ + """ Body Parameters ---------- group_id : string @@ -646,16 +648,16 @@ def _add_graph_group(request, graph_id, group={}): Notes ------ """ - authorization.validate(request, permission='GRAPH_SHARE', graph_id=graph_id) - authorization.validate(request, permission='GROUP_SHARE', group_id=group.get('group_id', None)) + authorization.validate(request, permission='GRAPH_SHARE', graph_id=graph_id) + authorization.validate(request, permission='GROUP_SHARE', group_id=group.get('group_id', None)) - return utils.serializer(users.add_group_graph(request, - graph_id=graph_id, - group_id=group.get('group_id', None))) + return utils.serializer(users.add_group_graph(request, + graph_id=graph_id, + group_id=group.get('group_id', None))) def _delete_graph_group(request, graph_id, group_id): - """ + """ Parameters ---------- request : object @@ -676,16 +678,16 @@ def _delete_graph_group(request, graph_id, group_id): Notes ------ """ - authorization.validate(request, permission='GRAPH_SHARE', graph_id=graph_id) - authorization.validate(request, permission='GROUP_SHARE', group_id=group_id) + authorization.validate(request, permission='GRAPH_SHARE', graph_id=graph_id) + authorization.validate(request, permission='GROUP_SHARE', group_id=group_id) - users.delete_group_graph(request, - group_id=group_id, - graph_id=graph_id) + users.delete_group_graph(request, + group_id=group_id, + graph_id=graph_id) def _get_graph_groups(request, graph_id, query={}): - """ + """ Query Parameters ---------- @@ -729,32 +731,32 @@ def _get_graph_groups(request, graph_id, query={}): ------ """ - authorization.validate(request, permission='GRAPH_READ', graph_id=graph_id) - - # Validate search graph groups API request - user_role = authorization.user_role(request) - if user_role == authorization.UserRole.LOGGED_IN: - if query.get('is_public', None) is not True: - if get_request_user(request) != query.get('member_email', None) \ - and get_request_user(request) != query.get('owner_email', None): - raise BadRequest(request, error_code=ErrorCodes.Validation.NotAllowedGroupAccess, - args=get_request_user(request)) - - total, groups = users.search_groups(request, - graph_ids=[graph_id], - owner_email=query.get('owner_email', None), - member_email=query.get('member_email', None), - name=query.get('name', None), - description=query.get('description', None), - limit=query.get('limit', 20), - offset=query.get('offset', 0), - order=query.get('order', 'desc'), - sort=query.get('sort', 'name')) - - return { - 'total': total, - 'groups': [utils.serializer(group) for group in groups] - } + authorization.validate(request, permission='GRAPH_READ', graph_id=graph_id) + + # Validate search graph groups API request + user_role = authorization.user_role(request) + if user_role == authorization.UserRole.LOGGED_IN: + if query.get('is_public', None) is not True: + if get_request_user(request) != query.get('member_email', None) \ + and get_request_user(request) != query.get('owner_email', None): + raise BadRequest(request, error_code=ErrorCodes.Validation.NotAllowedGroupAccess, + args=get_request_user(request)) + + total, groups = users.search_groups(request, + graph_ids=[graph_id], + owner_email=query.get('owner_email', None), + member_email=query.get('member_email', None), + name=query.get('name', None), + description=query.get('description', None), + limit=query.get('limit', 20), + offset=query.get('offset', 0), + order=query.get('order', 'desc'), + sort=query.get('sort', 'name')) + + return { + 'total': total, + 'groups': [utils.serializer(group) for group in groups] + } ''' @@ -765,7 +767,7 @@ def _get_graph_groups(request, graph_id, query={}): @csrf_exempt @is_authenticated() def graph_layouts_rest_api(request, graph_id, layout_id=None): - """ + """ Handles any request sent to following urls: /api/v1/graphs//layouts /api/v1/graphs//layouts/ @@ -779,11 +781,11 @@ def graph_layouts_rest_api(request, graph_id, layout_id=None): response : JSON Response """ - return _graph_layouts_api(request, graph_id, layout_id=layout_id) + return _graph_layouts_api(request, graph_id, layout_id=layout_id) def graph_layouts_ajax_api(request, graph_id, layout_id=None): - """ + """ Handles any request sent to following urls: /javascript/graphs//layouts /javascript/graphs//layouts/ @@ -797,11 +799,11 @@ def graph_layouts_ajax_api(request, graph_id, layout_id=None): response : JSON Response """ - return _graph_layouts_api(request, graph_id, layout_id=layout_id) + return _graph_layouts_api(request, graph_id, layout_id=layout_id) def _graph_layouts_api(request, graph_id, layout_id=None): - """ + """ Handles any request (GET/POST) sent to /layouts or /layouts/. Parameters @@ -816,35 +818,35 @@ def _graph_layouts_api(request, graph_id, layout_id=None): ------- """ - if request.META.get('HTTP_ACCEPT', None) == 'application/json': - if request.method == "GET" and layout_id is None: - return HttpResponse(json.dumps(_get_layouts(request, graph_id, query=request.GET)), - content_type="application/json") - elif request.method == "GET" and layout_id is not None: - return HttpResponse(json.dumps(_get_layout(request, graph_id, layout_id)), - content_type="application/json") - elif request.method == "POST" and layout_id is None: - return HttpResponse(json.dumps(_add_layout(request, graph_id, layout=json.loads(request.body))), - content_type="application/json", - status=201) - elif request.method == "PUT" and layout_id is not None: - return HttpResponse( - json.dumps(_update_layout(request, graph_id, layout_id, layout=json.loads(request.body))), - content_type="application/json", - status=200) - elif request.method == "DELETE" and layout_id is not None: - _delete_layout(request, graph_id, layout_id) - return HttpResponse(json.dumps({ - "message": "Successfully deleted layout with id=%s" % (layout_id) - }), content_type="application/json", status=200) - else: - raise MethodNotAllowed(request) # Handle other type of request methods like OPTIONS etc. - else: - raise BadRequest(request) + if request.META.get('HTTP_ACCEPT', None) == 'application/json': + if request.method == "GET" and layout_id is None: + return HttpResponse(json.dumps(_get_layouts(request, graph_id, query=request.GET)), + content_type="application/json") + elif request.method == "GET" and layout_id is not None: + return HttpResponse(json.dumps(_get_layout(request, graph_id, layout_id)), + content_type="application/json") + elif request.method == "POST" and layout_id is None: + return HttpResponse(json.dumps(_add_layout(request, graph_id, layout=json.loads(request.body))), + content_type="application/json", + status=201) + elif request.method == "PUT" and layout_id is not None: + return HttpResponse( + json.dumps(_update_layout(request, graph_id, layout_id, layout=json.loads(request.body))), + content_type="application/json", + status=200) + elif request.method == "DELETE" and layout_id is not None: + _delete_layout(request, graph_id, layout_id) + return HttpResponse(json.dumps({ + "message": "Successfully deleted layout with id=%s" % (layout_id) + }), content_type="application/json", status=200) + else: + raise MethodNotAllowed(request) # Handle other type of request methods like OPTIONS etc. + else: + raise BadRequest(request) def _get_layouts(request, graph_id, query=dict()): - """ + """ Query Parameters ---------- owner_email : string @@ -880,37 +882,38 @@ def _get_layouts(request, graph_id, query=dict()): Notes ------ """ - authorization.validate(request, permission='GRAPH_READ', graph_id=graph_id) - - querydict = QueryDict('', mutable=True) - querydict.update(query) - query = querydict - - # Validate search layouts API request - user_role = authorization.user_role(request) - if user_role == authorization.UserRole.LOGGED_IN: - if get_request_user(request) != query.get('owner_email', None) \ - and (query.get('is_shared', None) is None or int(query.get('is_shared', 0)) != 1): - raise BadRequest(request, error_code=ErrorCodes.Validation.NotAllowedLayoutAccess, args=get_request_user(request)) - - total, layouts = graphs.search_layouts(request, - owner_email=query.get('owner_email', None), - name=query.get('name', None), - is_shared=query.get('is_shared', None), - graph_id=graph_id, - limit=query.get('limit', 20), - offset=query.get('offset', 0), - order=query.get('order', 'desc'), - sort=query.get('sort', 'name')) - - return { - 'total': total, - 'layouts': [utils.serializer(layout) for layout in layouts] - } + authorization.validate(request, permission='GRAPH_READ', graph_id=graph_id) + + querydict = QueryDict('', mutable=True) + querydict.update(query) + query = querydict + + # Validate search layouts API request + user_role = authorization.user_role(request) + if user_role == authorization.UserRole.LOGGED_IN: + if get_request_user(request) != query.get('owner_email', None) \ + and (query.get('is_shared', None) is None or int(query.get('is_shared', 0)) != 1): + raise BadRequest(request, error_code=ErrorCodes.Validation.NotAllowedLayoutAccess, + args=get_request_user(request)) + + total, layouts = graphs.search_layouts(request, + owner_email=query.get('owner_email', None), + name=query.get('name', None), + is_shared=query.get('is_shared', None), + graph_id=graph_id, + limit=query.get('limit', 20), + offset=query.get('offset', 0), + order=query.get('order', 'desc'), + sort=query.get('sort', 'name')) + + return { + 'total': total, + 'layouts': [utils.serializer(layout) for layout in layouts] + } def _get_layout(request, graph_id, layout_id): - """ + """ Parameters ---------- @@ -930,14 +933,14 @@ def _get_layout(request, graph_id, layout_id): ------ """ - authorization.validate(request, permission='LAYOUT_READ', layout_id=layout_id) + authorization.validate(request, permission='LAYOUT_READ', layout_id=layout_id) - return utils.serializer(graphs.get_layout_by_id(request, layout_id)) + return utils.serializer(graphs.get_layout_by_id(request, layout_id)) @is_authenticated() def _add_layout(request, graph_id, layout={}): - """ + """ Layout Parameters ---------- name : string @@ -967,28 +970,28 @@ def _add_layout(request, graph_id, layout={}): ------ """ - authorization.validate(request, permission='GRAPH_READ', graph_id=graph_id) + authorization.validate(request, permission='GRAPH_READ', graph_id=graph_id) - # Validate add graph API request - user_role = authorization.user_role(request) - if user_role == authorization.UserRole.LOGGED_IN: - if get_request_user(request) != layout.get('owner_email', None): - raise BadRequest(request, error_code=ErrorCodes.Validation.CannotCreateLayoutForOtherUser, - args=layout.get('owner_email', None)) + # Validate add graph API request + user_role = authorization.user_role(request) + if user_role == authorization.UserRole.LOGGED_IN: + if get_request_user(request) != layout.get('owner_email', None): + raise BadRequest(request, error_code=ErrorCodes.Validation.CannotCreateLayoutForOtherUser, + args=layout.get('owner_email', None)) - return utils.serializer(graphs.add_layout(request, - owner_email=layout.get('owner_email', None), - name=layout.get('name', None), - graph_id=layout.get('graph_id', None), - is_shared=layout.get('is_shared', None), - positions_json=layout.get('positions_json', None), - style_json=layout.get('style_json', None), - )) + return utils.serializer(graphs.add_layout(request, + owner_email=layout.get('owner_email', None), + name=layout.get('name', None), + graph_id=layout.get('graph_id', None), + is_shared=layout.get('is_shared', None), + positions_json=layout.get('positions_json', None), + style_json=layout.get('style_json', None), + )) @is_authenticated() def _update_layout(request, graph_id, layout_id, layout={}): - """ + """ Layout Parameters ---------- name : string @@ -1021,23 +1024,23 @@ def _update_layout(request, graph_id, layout_id, layout={}): It will update the owner_email only if user has admin access otherwise user cannot update the owner email. """ - authorization.validate(request, permission='LAYOUT_UPDATE', layout_id=layout_id) - user_role = authorization.user_role(request) + authorization.validate(request, permission='LAYOUT_UPDATE', layout_id=layout_id) + user_role = authorization.user_role(request) - return utils.serializer(graphs.update_layout(request, layout_id, - owner_email=layout.get('owner_email', - None) if user_role == authorization.UserRole.ADMIN else None, - name=layout.get('name', None), - graph_id=layout.get('graph_id', None), - is_shared=layout.get('is_shared', None), - positions_json=layout.get('positions_json', None), - style_json=layout.get('style_json', None), - )) + return utils.serializer(graphs.update_layout(request, layout_id, + owner_email=layout.get('owner_email', + None) if user_role == authorization.UserRole.ADMIN else None, + name=layout.get('name', None), + graph_id=layout.get('graph_id', None), + is_shared=layout.get('is_shared', None), + positions_json=layout.get('positions_json', None), + style_json=layout.get('style_json', None), + )) @is_authenticated() def _delete_layout(request, graph_id, layout_id): - """ + """ Parameters ---------- @@ -1057,15 +1060,15 @@ def _delete_layout(request, graph_id, layout_id): ------ """ - authorization.validate(request, permission='LAYOUT_DELETE', layout_id=layout_id) + authorization.validate(request, permission='LAYOUT_DELETE', layout_id=layout_id) - graphs.delete_layout_by_id(request, layout_id) + graphs.delete_layout_by_id(request, layout_id) @csrf_exempt @is_authenticated() def graph_nodes_rest_api(request, graph_id, node_id=None): - """ + """ Handles any request sent to following urls: /api/v1/graphs//nodes /api/v1/graphs//nodes/ @@ -1079,11 +1082,11 @@ def graph_nodes_rest_api(request, graph_id, node_id=None): response : JSON Response """ - return _graph_nodes_api(request, graph_id, node_id=node_id) + return _graph_nodes_api(request, graph_id, node_id=node_id) def graph_nodes_ajax_api(request, graph_id, node_id=None): - """ + """ Handles any request sent to following urls: /javascript/graphs//nodes /javascript/graphs//nodes/ @@ -1097,11 +1100,11 @@ def graph_nodes_ajax_api(request, graph_id, node_id=None): response : JSON Response """ - return _graph_nodes_api(request, graph_id, node_id=node_id) + return _graph_nodes_api(request, graph_id, node_id=node_id) def _graph_nodes_api(request, graph_id, node_id=None): - """ + """ Handles any request (GET/POST) sent to nodes/ or nodes/. Parameters @@ -1116,30 +1119,30 @@ def _graph_nodes_api(request, graph_id, node_id=None): ------- """ - if request.META.get('HTTP_ACCEPT', None) == 'application/json': - if request.method == "GET" and node_id is None: - return HttpResponse(json.dumps(_get_nodes(request, graph_id, query=request.GET)), - content_type="application/json") - elif request.method == "GET" and node_id is not None: - return HttpResponse(json.dumps(_get_node(request, graph_id, node_id)), - content_type="application/json") - elif request.method == "POST" and node_id is None: - return HttpResponse(json.dumps(_add_node(request, graph_id, node=json.loads(request.body))), - content_type="application/json", - status=201) - elif request.method == "DELETE" and node_id is not None: - _delete_node(request, graph_id, node_id) - return HttpResponse(json.dumps({ - "message": "Successfully deleted node with id=%s" % (node_id) - }), content_type="application/json", status=200) - else: - raise MethodNotAllowed(request) # Handle other type of request methods like OPTIONS etc. - else: - raise BadRequest(request) + if request.META.get('HTTP_ACCEPT', None) == 'application/json': + if request.method == "GET" and node_id is None: + return HttpResponse(json.dumps(_get_nodes(request, graph_id, query=request.GET)), + content_type="application/json") + elif request.method == "GET" and node_id is not None: + return HttpResponse(json.dumps(_get_node(request, graph_id, node_id)), + content_type="application/json") + elif request.method == "POST" and node_id is None: + return HttpResponse(json.dumps(_add_node(request, graph_id, node=json.loads(request.body))), + content_type="application/json", + status=201) + elif request.method == "DELETE" and node_id is not None: + _delete_node(request, graph_id, node_id) + return HttpResponse(json.dumps({ + "message": "Successfully deleted node with id=%s" % (node_id) + }), content_type="application/json", status=200) + else: + raise MethodNotAllowed(request) # Handle other type of request methods like OPTIONS etc. + else: + raise BadRequest(request) def _get_nodes(request, graph_id, query={}): - """ + """ Query Parameters ---------- @@ -1179,29 +1182,29 @@ def _get_nodes(request, graph_id, query={}): """ - authorization.validate(request, permission='GRAPH_READ', graph_id=graph_id) + authorization.validate(request, permission='GRAPH_READ', graph_id=graph_id) - querydict = QueryDict('', mutable=True) - querydict.update(query) - query = querydict + querydict = QueryDict('', mutable=True) + querydict.update(query) + query = querydict - total, nodes_list = graphs.search_nodes(request, - graph_id=graph_id, - names=query.getlist('names[]', None), - labels=query.getlist('labels[]', None), - limit=query.get('limit', 20), - offset=query.get('offset', 0), - order=query.get('order', 'desc'), - sort=query.get('sort', 'name')) + total, nodes_list = graphs.search_nodes(request, + graph_id=graph_id, + names=query.getlist('names[]', None), + labels=query.getlist('labels[]', None), + limit=query.get('limit', 20), + offset=query.get('offset', 0), + order=query.get('order', 'desc'), + sort=query.get('sort', 'name')) - return { - 'total': total, - 'nodes': [utils.serializer(node) for node in nodes_list] - } + return { + 'total': total, + 'nodes': [utils.serializer(node) for node in nodes_list] + } def _get_node(request, graph_id, node_id): - """ + """ Parameters ---------- @@ -1221,13 +1224,13 @@ def _get_node(request, graph_id, node_id): ------ """ - authorization.validate(request, permission='GRAPH_READ', graph_id=graph_id) - return utils.serializer(graphs.get_node_by_id(request, node_id)) + authorization.validate(request, permission='GRAPH_READ', graph_id=graph_id) + return utils.serializer(graphs.get_node_by_id(request, node_id)) @is_authenticated() def _add_node(request, graph_id, node={}): - """ + """ Node Parameters ---------- name : string @@ -1257,17 +1260,17 @@ def _add_node(request, graph_id, node={}): ------ """ - authorization.validate(request, permission='GRAPH_UPDATE', graph_id=graph_id) + authorization.validate(request, permission='GRAPH_UPDATE', graph_id=graph_id) - return utils.serializer(graphs.add_node(request, - name=node.get('name', None), - label=node.get('label', None), - graph_id=graph_id)) + return utils.serializer(graphs.add_node(request, + name=node.get('name', None), + label=node.get('label', None), + graph_id=graph_id)) @is_authenticated() def _delete_node(request, graph_id, node_id): - """ + """ Parameters ---------- @@ -1287,15 +1290,15 @@ def _delete_node(request, graph_id, node_id): ------ """ - authorization.validate(request, permission='GRAPH_UPDATE', graph_id=graph_id) + authorization.validate(request, permission='GRAPH_UPDATE', graph_id=graph_id) - graphs.delete_node_by_id(request, node_id) + graphs.delete_node_by_id(request, node_id) @csrf_exempt @is_authenticated() def graph_edges_rest_api(request, graph_id, edge_id=None): - """ + """ Handles any request sent to following urls: /api/v1/graphs//edges /api/v1/graphs//edges/ @@ -1309,11 +1312,11 @@ def graph_edges_rest_api(request, graph_id, edge_id=None): response : JSON Response """ - return _graph_edges_api(request, graph_id, edge_id=edge_id) + return _graph_edges_api(request, graph_id, edge_id=edge_id) def graph_edges_ajax_api(request, graph_id, edge_id=None): - """ + """ Handles any request sent to following urls: /javascript/graphs//edges /javascript/graphs//edges/ @@ -1327,11 +1330,11 @@ def graph_edges_ajax_api(request, graph_id, edge_id=None): response : JSON Response """ - return _graph_edges_api(request, graph_id, edge_id=edge_id) + return _graph_edges_api(request, graph_id, edge_id=edge_id) def _graph_edges_api(request, graph_id, edge_id=None): - """ + """ Handles any request (GET/POST) sent to edges/ or edges/. Parameters @@ -1344,30 +1347,30 @@ def _graph_edges_api(request, graph_id, edge_id=None): ------- """ - if request.META.get('HTTP_ACCEPT', None) == 'application/json': - if request.method == "GET" and edge_id is None: - return HttpResponse(json.dumps(_get_edges(request, graph_id, query=request.GET)), - content_type="application/json") - elif request.method == "GET" and edge_id is not None: - return HttpResponse(json.dumps(_get_edge(request, graph_id, edge_id)), - content_type="application/json") - elif request.method == "POST" and edge_id is None: - return HttpResponse(json.dumps(_add_edge(request, graph_id, edge=json.loads(request.body))), - content_type="application/json", - status=201) - elif request.method == "DELETE" and edge_id is not None: - _delete_edge(request, graph_id, edge_id) - return HttpResponse(json.dumps({ - "message": "Successfully deleted node with id=%s" % (edge_id) - }), content_type="application/json", status=200) - else: - raise MethodNotAllowed(request) # Handle other type of request methods like OPTIONS etc. - else: - raise BadRequest(request) + if request.META.get('HTTP_ACCEPT', None) == 'application/json': + if request.method == "GET" and edge_id is None: + return HttpResponse(json.dumps(_get_edges(request, graph_id, query=request.GET)), + content_type="application/json") + elif request.method == "GET" and edge_id is not None: + return HttpResponse(json.dumps(_get_edge(request, graph_id, edge_id)), + content_type="application/json") + elif request.method == "POST" and edge_id is None: + return HttpResponse(json.dumps(_add_edge(request, graph_id, edge=json.loads(request.body))), + content_type="application/json", + status=201) + elif request.method == "DELETE" and edge_id is not None: + _delete_edge(request, graph_id, edge_id) + return HttpResponse(json.dumps({ + "message": "Successfully deleted node with id=%s" % (edge_id) + }), content_type="application/json", status=200) + else: + raise MethodNotAllowed(request) # Handle other type of request methods like OPTIONS etc. + else: + raise BadRequest(request) def _get_edges(request, graph_id, query={}): - """ + """ Query Parameters ---------- @@ -1406,29 +1409,29 @@ def _get_edges(request, graph_id, query={}): ------ """ - authorization.validate(request, permission='GRAPH_READ', graph_id=graph_id) + authorization.validate(request, permission='GRAPH_READ', graph_id=graph_id) - querydict = QueryDict('', mutable=True) - querydict.update(query) - query = querydict + querydict = QueryDict('', mutable=True) + querydict.update(query) + query = querydict - total, edges_list = graphs.search_edges(request, - graph_id=graph_id, - names=query.getlist('names[]', None), - edges=query.getlist('edges[]', None), - limit=query.get('limit', 20), - offset=query.get('offset', 0), - order=query.get('order', 'desc'), - sort=query.get('sort', 'name')) + total, edges_list = graphs.search_edges(request, + graph_id=graph_id, + names=query.getlist('names[]', None), + edges=query.getlist('edges[]', None), + limit=query.get('limit', 20), + offset=query.get('offset', 0), + order=query.get('order', 'desc'), + sort=query.get('sort', 'name')) - return { - 'total': total, - 'edges': [utils.serializer(edge) for edge in edges_list] - } + return { + 'total': total, + 'edges': [utils.serializer(edge) for edge in edges_list] + } def _get_edge(request, graph_id, edge_id): - """ + """ Parameters ---------- @@ -1448,14 +1451,14 @@ def _get_edge(request, graph_id, edge_id): ------ """ - authorization.validate(request, permission='GRAPH_READ', graph_id=graph_id) + authorization.validate(request, permission='GRAPH_READ', graph_id=graph_id) - return utils.serializer(graphs.get_edge_by_id(request, edge_id)) + return utils.serializer(graphs.get_edge_by_id(request, edge_id)) @is_authenticated() def _add_edge(request, graph_id, edge={}): - """ + """ Edge Parameters ---------- name : string @@ -1490,19 +1493,19 @@ def _add_edge(request, graph_id, edge={}): ------ """ - authorization.validate(request, permission='GRAPH_UPDATE', graph_id=graph_id) + authorization.validate(request, permission='GRAPH_UPDATE', graph_id=graph_id) - return utils.serializer(graphs.add_edge(request, - name=edge.get('name', None), - head_node_id=edge.get('head_node_id', None), - tail_node_id=edge.get('tail_node_id', None), - is_directed=edge.get('is_directed', 0), - graph_id=graph_id)) + return utils.serializer(graphs.add_edge(request, + name=edge.get('name', None), + head_node_id=edge.get('head_node_id', None), + tail_node_id=edge.get('tail_node_id', None), + is_directed=edge.get('is_directed', 0), + graph_id=graph_id)) @is_authenticated() def _delete_edge(request, graph_id, edge_id): - """ + """ Parameters ---------- @@ -1522,6 +1525,482 @@ def _delete_edge(request, graph_id, edge_id): ------ """ - authorization.validate(request, permission='GRAPH_UPDATE', graph_id=graph_id) + authorization.validate(request, permission='GRAPH_UPDATE', graph_id=graph_id) + + graphs.delete_edge_by_id(request, edge_id) + + +def view_comments(request, graph_id): + """ + + Parameters + ---------- + graph_id : Integer + Unique ID of the graph. Required. + request : object + HTTP GET Request. + + Returns + ------- + response : HTML Page Response + Rendered comments list page in HTML. + + Raises + ------ + MethodNotAllowed: If a user tries to send requests other than GET. + + Notes + ------ + + """ + + context = RequestContext(request, {}) + authorization.validate(request, permission='GRAPH_READ', graph_id=graph_id) + if 'GET' == request.method: + try: + comments_list = _get_comments_by_graph_id(request, graph_id) + context['Success'] = 'Displaying the list of comments under this graph' + comments_array = [] + for comment in comments_list['comments']: + comments_array.append({ + 'id': comment['id'], + 'owner_email': comment['owner_email'] if comment['owner_email'] != None else 'Anonymous', + 'message': comment['message'] + }) + context['comments'] = comments_array + context['graph_id'] = graph_id + except Exception as e: + context['Error'] = str(e) + return render(request, 'comments/index.html', context) + else: + raise MethodNotAllowed(request) + + +def _add_comment(request, comment={}): + """ + + Comment Parameters + ---------- + text: string + Comment text. Required + edge_names : string + List of names of edges associated with the comment. Required + node_names : string + List of names of nodes associated with the comment. Required + layout_id : Integer + Required if the comment is associated with a layout. + owner_email : string + Email of the person who made the comment on the graph. Required + graph_id : string + Unique ID of the graph for the comment. Required + parent_comment_id: Integer + Required if the comment is a reply to a parent comment. + is_closed: Integer + Indicates if the comment is closed or not. Required + + + + Parameters + ---------- + comment : dict + Dictionary containing the data of the comment being added. + request : object + HTTP POST Request. + + Returns + ------- + comment : object + Newly created comment object. + + Raises + ------ + BadRequest: If the parent comment does not exist comment cannot be created. + BadRequest: If the parent comment is already resolved, reply to that comment cannot be done. + + Notes + ------ + + """ + + edge_names = comment.get('edge_names', None) + node_names = comment.get('node_names', None) + graph_id = comment.get('graph_id', None) + owner_email = comment.get('owner_email', None) + parent_comment_id = comment.get('parent_comment_id', None) + edges, nodes = [], [] + + # Validate if user has permission to create a comment on this graph. + if graph_id != None: + uid = request.session['uid'] if 'uid' in request.session else None + if uid == None: + raise BadRequest(request, error_code=ErrorCodes.Validation.UserNotAuthorizedToCreateComment) + authorization.validate(request, permission='GRAPH_READ', graph_id=graph_id) + + # Reply comments cannot be added to already resolved comment. + if parent_comment_id != None: + parent_comment = comments.get_comment_by_id(request, parent_comment_id) + if parent_comment == None: + raise BadRequest(request, error_code=ErrorCodes.Validation.ParentCommentDoesNotExist) + elif utils.serializer(parent_comment)['is_closed'] == 1: + raise BadRequest(request, error_code=ErrorCodes.Validation.CannotReplyToResolvedComment) + + if len(edge_names) == 0: + edge_names = None + if len(node_names) == 0: + node_names = None + + if edge_names != None and graph_id != None: + for edge_name in edge_names: + edge_id = utils.serializer(graphs.get_edge_by_name(request, graph_id, edge_name))['id'] + if edge_id != None: + edges.append(edge_id) + + if node_names != None and graph_id != None: + for node_name in node_names: + node_id = utils.serializer(graphs.get_node_by_name(request, graph_id, node_name))['id'] + if node_id != None: + nodes.append(node_id) + + if len(edges) == 0: + edges = None + + if len(nodes) == 0: + nodes = None + + return utils.serializer(comments.add_comment(request, + text=comment.get('text', None), + graph_id=comment.get('graph_id', None), + is_closed=comment.get('is_closed', None), + edges=edges, + nodes=nodes, + layout_id=comment.get('layout', None), + parent_comment_id=parent_comment_id, + owner_email=owner_email)) + + +def _get_comments_by_graph_id(request, graph_id): + """ + + Parameters + ---------- + graph_id : Integer + Unique ID of the graph. Required. + request : object + HTTP GET Request. + + Returns + ------- + total : integer + Number of comments associated with the graph_id. + comments : List of Comments. + List of Comment Objects associated with the graph_id. + + Raises + ------ + + Notes + ------ + + """ + # Validate if user has permission to read the comments. + authorization.validate(request, permission='GRAPH_READ', graph_id=graph_id) + + total, commentss = comments.get_comment_by_graph_id(request, graph_id=graph_id) + return { + 'total': total, + 'comments': [utils.serializer(comment) for comment in commentss] + } + + +def _edit_comment(request, comment={}): + """ + Comment Parameters + ---------- + text : string + Comment text. Required + comment_id : Integer + Unique ID of the comment for updating. Required + is_closed : Integer + Indicates if the comment is closed or not. Required + + + Parameters + ---------- + comment : dict + Dictionary containing the data of the comment being edited. + graph_id : Integer + Unique ID of the graph. + request : object + HTTP PUT Request. + + Returns + ------- + comment : object + Updated comment object. + + Raises + ------ + + Notes + ------ + + """ + # Validate if user has permission to edit comment + if comment.get('text', None) != None or comment.get('is_closed', None) == 1: + authorization.validate(request, permission='COMMENT_UPDATE', comment_id=comment.get('id', None)) + elif comment.get('is_closed', None) == 0: + authorization.validate(request, permission='COMMENT_READ', comment_id=comment.get('id', None)) + + return utils.serializer(comments.edit_comment(request, + text=comment.get('text', None), + is_closed=comment.get('is_closed', None), + comment_id=comment.get('id', None))) + + +def _delete_comment(request, graph_id, comment={}): + """ + Comment Parameters + ---------- + id : Integer + Unique ID of the comment. Required + + Parameters + ---------- + comment : dict + Dictionary containing the data of the comment being deleted. + graph_id : Integer + Unique ID of the graph. + request : object + HTTP DELETE Request. + + Returns + ------- + comment : object + Deleted comment object. + + Raises + ------ + + Notes + ------ + + """ + # Validate if user has permission to delete comment + authorization.validate(request, permission='COMMENT_DELETE', comment_id=comment.get('id', None)) + return utils.serializer(comments.delete_comment(request, + id=comment.get('id', None))) + + +def _get_comment_to_graph_by_id(request, comment): + """ + + Parameters + ---------- + comment : dict + Dictionary containing the data of the comment. + request : object + HTTP GET Request. + + Returns + ------- + comment_to_graph : comment_to_graph. + comment_to_graph Object + + Raises + ------ + + Notes + ------ + + """ + # Validate if user has permission to read the comments. + + return utils.serializer(comments.get_comment_to_graph(request, comment_id=comment.get('id', None))) + + +@is_authenticated() +def _add_comment_reaction(request, reaction={}): + """ + Reaction Parameters + ---------- + id : Integer + Unique ID of the comment. + content : string + content of the reaction. + owner_email : string + Email of the Owner of the reaction. Required + + + Parameters + ---------- + reaction : dict + Dictionary containing the data of the reaction being added. + request : object + HTTP POST Request. + + Returns + ------- + reaction : object + Newly created reaction object. + + Raises + ------ + + Notes + ------ + + """ + + return utils.serializer(discussions.add_comment_reaction(request, comment_id=reaction.get('comment_id', None), + content=reaction.get('content', None), + owner_email=reaction.get('owner_email', None))) + + +@is_authenticated() +def _delete_comment_reaction(request, reaction={}): + """ + Reaction Parameters + ---------- + id : Integer + Unique ID of the comment. + content : string + content of the reaction. + owner_email : string + Email of the Owner of the reaction. Required + + + Parameters + ---------- + reaction : dict + Dictionary containing the data of the reaction being added. + request : object + HTTP POST Request. + + Returns + ------- + reaction : object + Newly created reaction object. + + Raises + ------ + + Notes + ------ + + """ + + utils.serializer(discussions.delete_comment_reaction(request, comment_id=reaction.get('comment_id', None), + content=reaction.get('content', None), + owner_email=reaction.get('owner_email', None))) + + +@is_authenticated() +def _get_comment_reactions(request, reaction={}): + """ + Reaction Parameters + ---------- + id : Integer + Unique ID of the comment. + content : string + content of the reaction. + + + Parameters + ---------- + reaction : dict + Dictionary containing the data of the reaction being added. + request : object + HTTP POST Request. + + Returns + ------- + total : integer + Number of reaction associated with the comment_id. + reactions : List of Reactions. + List of Reaction Objects associated with the comment_id. + + Raises + ------ + + Notes + ------ + + """ + total, reactions = discussions.get_comment_reactions(request, comment_id=reaction.get('comment_id', None), + content=reaction.get('content', None)) + + return { + 'total': total, + 'reactions': [utils.serializer(reaction) for reaction in reactions] + } + + +def graph_comments_ajax_api(request, graph_id=None): + """ + Handles any request sent to following urls: + /ajax/graphs//comments + + Parameters + ---------- + request - HTTP Request + + Returns + ------- + response : JSON Response + + """ + return _comments_api(request, graph_id=graph_id) - graphs.delete_edge_by_id(request, edge_id) + +def _comments_api(request, graph_id=None): + """ + Handles any request sent to following urls: + /graphs//comments + + Parameters + ---------- + request - HTTP Request + + Returns + ------- + response : JSON Response + + Raises + ------ + MethodNotAllowed: If a user tries to send requests other than GET, POST, PUT, DELETE, PIN, UNPIN. + BadRequest: If HTTP_ACCEPT header is not set to application/json. + + """ + if request.META.get('HTTP_ACCEPT', None) == 'application/json': + if request.method == "POST": + return HttpResponse(json.dumps(_add_comment(request, comment=json.loads(request.body))), + content_type="application/json", status=201) + elif request.method == "GET" and graph_id is not None: + return HttpResponse(json.dumps(_get_comments_by_graph_id(request, graph_id=graph_id)), + content_type="application/json", status=201) + elif request.method == "PUT" and graph_id is not None: + return HttpResponse(json.dumps(_edit_comment(request, comment=json.loads(request.body))), + content_type="application/json", status=201) + elif request.method == "DELETE" and graph_id is not None: + return HttpResponse( + json.dumps(_delete_comment(request, graph_id=graph_id, comment=json.loads(request.body))), + content_type="application/json", status=201) + elif request.method == "GET_COMMENT_TO_GRAPH": + return HttpResponse(json.dumps(_get_comment_to_graph_by_id(request, comment=json.loads(request.body))), + content_type="application/json", status=201) + elif request.method == "POST_REACTION": + return HttpResponse(json.dumps(_add_comment_reaction(request, reaction=json.loads(request.body))), + content_type="application/json", + status=201) + elif request.method == "GET_REACTION": + return HttpResponse(json.dumps(_get_comment_reactions(request, reaction=json.loads(request.body))), + content_type="application/json") + elif request.method == "DELETE_REACTION": + _delete_comment_reaction(request, reaction=json.loads(request.body)) + return HttpResponse(json.dumps({ + "message": "Successfully deleted discussion" + }), content_type="application/json", status=200) + else: + raise MethodNotAllowed(request) # Handle other type of request methods like OPTIONS etc. + else: + raise BadRequest(request) diff --git a/applications/users/models.py b/applications/users/models.py index 053fc6f1..25ad1607 100644 --- a/applications/users/models.py +++ b/applications/users/models.py @@ -5,6 +5,9 @@ from sqlalchemy.orm import relationship, backref from applications.graphs.models import * +from applications.comments.models import * +from applications.discussions.models import * + Base = settings.BASE @@ -28,6 +31,9 @@ class User(IDMixin, TimeStampMixin, Base): owned_groups = relationship("Group", back_populates="owner", cascade="all, delete-orphan") owned_graphs = relationship("Graph", back_populates="owner", cascade="all, delete-orphan") owned_layouts = relationship("Layout", back_populates="owner", cascade="all, delete-orphan") + owned_comments = relationship("Comment", back_populates="owner", cascade="all, delete-orphan") + owned_discussions = relationship("Discussion", back_populates="owner", cascade="all, delete-orphan") + owned_reactions = relationship("Reaction", back_populates="owner", cascade="all, delete-orphan") member_groups = association_proxy('user_groups', 'group') @@ -82,6 +88,8 @@ class Group(IDMixin, TimeStampMixin, Base): invite_code = Column(String, nullable=False) owner = relationship("User", back_populates="owned_groups", uselist=False) + group_discussions = relationship("Discussion", back_populates="group", cascade="all, delete-orphan") + members = association_proxy('member_users', 'user') graphs = association_proxy('shared_graphs', 'graph') @@ -102,6 +110,7 @@ def serialize(cls, **kwargs): 'description': cls.description, 'total_graphs': len(cls.graphs), 'total_members': len(cls.members), + 'group_discussions': len(cls.group_discussions), 'created_at': cls.created_at.isoformat(), 'updated_at': cls.updated_at.isoformat() } diff --git a/applications/users/urls.py b/applications/users/urls.py index 02bc8a3a..162a6548 100644 --- a/applications/users/urls.py +++ b/applications/users/urls.py @@ -5,6 +5,7 @@ url(r'^groups/$', views.groups_page, name='groups'), url(r'^groups/(?P[^/]+)$', views.group_page, name='group'), + url(r'^groups/(?P[^/]+)/discussions/(?P[^/]+)$', views.discussion_page, name='discussion'), url(r'^groups/(?P[^/]+)/join/$', views.join_group_page, name='signup_by_invitation'), @@ -22,7 +23,11 @@ # Group Graphs url(r'^ajax/groups/(?P[^/]+)/graphs$', views.group_graphs_ajax_api, name='group_graphs_ajax_api'), url(r'^ajax/groups/(?P[^/]+)/graphs/(?P[^/]+)$', views.group_graphs_ajax_api, name='group_graphs_ajax_api'), - + # Group Discussions + url(r'^ajax/groups/(?P[^/]+)/discussions$', views.group_discussions_ajax_api, name='group_discussions_ajax_api'), + url(r'^ajax/groups/(?P[^/]+)/discussions/(?P[^/]+)$', views.discussion_comments_ajax_api, name='discussion_comments_ajax_api'), + # Comment Reactions + url(r'^ajax/groups/(?P[^/]+)/discussions/(?P[^/]+)/(?P[^/]+)$', views.comment_reactions_ajax_api, name='comment_reactions_ajax_api'), # REST APIs Endpoints # Groups diff --git a/applications/users/views.py b/applications/users/views.py index ccc0cd6e..de4c77ff 100644 --- a/applications/users/views.py +++ b/applications/users/views.py @@ -1,6 +1,7 @@ import json import applications.users.controllers as users +import applications.discussions.controllers as discussions from django.http import HttpResponse, QueryDict from django.shortcuts import render, redirect from django.template import RequestContext @@ -14,21 +15,43 @@ @is_authenticated(redirect_url='/') def groups_page(request): - """ + """ Wrapper view for the groups page. :param request: HTTP GET Request. """ - if 'GET' == request.method: - context = RequestContext(request, {}) - return render(request, 'groups/index.html', context) - else: - raise MethodNotAllowed(request) # Handle other type of request methods like POST, PUT, UPDATE. + if 'GET' == request.method: + context = RequestContext(request, {}) + return render(request, 'groups/index.html', context) + else: + raise MethodNotAllowed(request) # Handle other type of request methods like POST, PUT, UPDATE. @is_authenticated(redirect_url='/') def group_page(request, group_id): + """ + Wrapper view for the group page. /groups/ + + :param request: HTTP GET Request. + + Parameters + ---------- + group_id : string + Unique ID of the group. Required """ + if 'GET' == request.method: + context = RequestContext(request, {}) + context.push({ + "group": _get_group(request, int(group_id)), + }) + return render(request, 'group/index.html', context) + else: + raise MethodNotAllowed(request) # Handle other type of request methods like POST, PUT, UPDATE. + + +@is_authenticated(redirect_url='/') +def discussion_page(request, group_id, discussion_id): + """ Wrapper view for the group page. /groups/ :param request: HTTP GET Request. @@ -38,18 +61,18 @@ def group_page(request, group_id): group_id : string Unique ID of the group. Required """ - if 'GET' == request.method: - context = RequestContext(request, {}) - context.push({ - "group": _get_group(request, int(group_id)), - }) - return render(request, 'group/index.html', context) - else: - raise MethodNotAllowed(request) # Handle other type of request methods like POST, PUT, UPDATE. + if 'GET' == request.method: + context = RequestContext(request, {}) + context.push({ + "discussion": _get_discussion(request, int(discussion_id), int(group_id)), + }) + return render(request, 'discussions/index.html', context) + else: + raise MethodNotAllowed(request) # Handle other type of request methods like POST, PUT, UPDATE. def join_group_page(request, group_id): - """ + """ Wrapper view for the join_group_page by invitation. /groups//invite/ :param request: HTTP GET Request. @@ -59,49 +82,50 @@ def join_group_page(request, group_id): group_id : string Unique ID of the group. Required """ - context = RequestContext(request, {}) - - if 'GET' == request.method: - group = users.get_group_by_id(request, group_id) - if group is not None and group.invite_code == request.GET.get('code', None): - if request.session['uid'] is None: - context.push({ - "group": group, - "invite_code": request.GET.get('code', None) - }) - return render(request, 'join_group/index.html', context) - else: - try: - users.add_group_member(request, group_id, member_email=request.session['uid']) - finally: - return redirect('/groups/'+group_id) - else: - return redirect('/') # TODO: change it to signup page. Currently we dont have a signup link. - elif 'POST' == request.method: - - group = users.get_group_by_id(request, group_id) - if group is not None and group.invite_code == request.POST.get('code', None): - try: - if request.session['uid'] is None: - user = users.register(request, username=request.POST.get('user_id', None), password=request.POST.get('password', None)) - if user is not None: - request.session['uid'] = user.email - request.session['admin'] = user.is_admin - - users.add_group_member(request, group_id, member_id=user.id) - - return redirect('/groups/'+group_id) - except GraphSpaceError as e: - context.push({ - "error_message": e.get_message(), - "group": group, - "invite_code": request.POST.get('code', None) - }) - return render(request, 'join_group/index.html', context) - else: - return redirect('/') # TODO: change it to signup page. Currently we dont have a signup link. - else: - raise MethodNotAllowed(request) # Handle other type of request methods like POST, PUT, UPDATE. + context = RequestContext(request, {}) + + if 'GET' == request.method: + group = users.get_group_by_id(request, group_id) + if group is not None and group.invite_code == request.GET.get('code', None): + if request.session['uid'] is None: + context.push({ + "group": group, + "invite_code": request.GET.get('code', None) + }) + return render(request, 'join_group/index.html', context) + else: + try: + users.add_group_member(request, group_id, member_email=request.session['uid']) + finally: + return redirect('/groups/' + group_id) + else: + return redirect('/') # TODO: change it to signup page. Currently we dont have a signup link. + elif 'POST' == request.method: + + group = users.get_group_by_id(request, group_id) + if group is not None and group.invite_code == request.POST.get('code', None): + try: + if request.session['uid'] is None: + user = users.register(request, username=request.POST.get('user_id', None), + password=request.POST.get('password', None)) + if user is not None: + request.session['uid'] = user.email + request.session['admin'] = user.is_admin + + users.add_group_member(request, group_id, member_id=user.id) + + return redirect('/groups/' + group_id) + except GraphSpaceError as e: + context.push({ + "error_message": e.get_message(), + "group": group, + "invite_code": request.POST.get('code', None) + }) + return render(request, 'join_group/index.html', context) + else: + return redirect('/') # TODO: change it to signup page. Currently we dont have a signup link. + else: + raise MethodNotAllowed(request) # Handle other type of request methods like POST, PUT, UPDATE. ''' @@ -110,7 +134,7 @@ def join_group_page(request, group_id): def users_ajax_api(request): - """ + """ Handles any request sent to following urls: /javascript/users @@ -123,11 +147,11 @@ def users_ajax_api(request): response : JSON Response """ - return _users_api(request) + return _users_api(request) def _users_api(request): - """ + """ Handles any request (GET/POST) sent to /users. Parameters @@ -138,17 +162,17 @@ def _users_api(request): ------- """ - if 'application/json' in request.META.get('HTTP_ACCEPT', None): - if request.method == "GET": - return HttpResponse(json.dumps(_get_users(request, query=request.GET)), content_type="application/json") - else: - raise MethodNotAllowed(request) # Handle other type of request methods like OPTIONS etc. - else: - raise BadRequest(request) + if 'application/json' in request.META.get('HTTP_ACCEPT', None): + if request.method == "GET": + return HttpResponse(json.dumps(_get_users(request, query=request.GET)), content_type="application/json") + else: + raise MethodNotAllowed(request) # Handle other type of request methods like OPTIONS etc. + else: + raise BadRequest(request) def _get_users(request, query={}): - """ + """ Query Parameters ---------- @@ -185,28 +209,30 @@ def _get_users(request, query={}): """ - # Validate search graph groups API request + # Validate search graph groups API request + + total, users_list = users.search_users(request, + email=query.get('email', None), + limit=query.get('limit', 20), + offset=query.get('offset', 0), + order=query.get('order', 'asc'), + sort=query.get('sort', 'email')) - total, users_list = users.search_users(request, - email=query.get('email', None), - limit=query.get('limit', 20), - offset=query.get('offset', 0), - order=query.get('order', 'asc'), - sort=query.get('sort', 'email')) + return { + 'total': total, + 'users': [utils.serializer(user) for user in users_list] + } - return { - 'total': total, - 'users': [utils.serializer(user) for user in users_list] - } ''' Groups APIs ''' + @csrf_exempt @is_authenticated() def groups_rest_api(request, group_id=None): - """ + """ Handles any request sent to following urls: /api/v1/groups /api/v1/groups/ @@ -220,11 +246,11 @@ def groups_rest_api(request, group_id=None): response : JSON Response """ - return _groups_api(request, group_id=group_id) + return _groups_api(request, group_id=group_id) def groups_ajax_api(request, group_id=None): - """ + """ Handles any request sent to following urls: /javascript/groups /javascript/groups/ @@ -238,11 +264,11 @@ def groups_ajax_api(request, group_id=None): response : JSON Response """ - return _groups_api(request, group_id=group_id) + return _groups_api(request, group_id=group_id) def _groups_api(request, group_id=None): - """ + """ Handles any request (GET/POST) sent to /groups or groups/ Parameters @@ -253,33 +279,33 @@ def _groups_api(request, group_id=None): ------- """ - if request.META.get('HTTP_ACCEPT', None) == 'application/json': - if request.method == "GET" and group_id is None: - return HttpResponse(json.dumps(_get_groups(request, query=request.GET)), content_type="application/json") - elif request.method == "GET" and group_id is not None: - return HttpResponse(json.dumps(_get_group(request, group_id)), content_type="application/json", - status=200) - elif request.method == "POST" and group_id is None: - return HttpResponse(json.dumps(_add_group(request, group=request.POST)), content_type="application/json", - status=201) - elif request.method == "PUT" and group_id is not None: - return HttpResponse(json.dumps(_update_group(request, group_id, group=QueryDict(request.body))), - content_type="application/json", - status=200) - elif request.method == "DELETE" and group_id is not None: - _delete_group(request, group_id) - return HttpResponse(json.dumps({ - "message": "Successfully deleted group with id=%s" % group_id - }), content_type="application/json", status=200) - else: - raise MethodNotAllowed(request) # Handle other type of request methods like OPTIONS etc. - else: - raise BadRequest(request) + if request.META.get('HTTP_ACCEPT', None) == 'application/json': + if request.method == "GET" and group_id is None: + return HttpResponse(json.dumps(_get_groups(request, query=request.GET)), content_type="application/json") + elif request.method == "GET" and group_id is not None: + return HttpResponse(json.dumps(_get_group(request, group_id)), content_type="application/json", + status=200) + elif request.method == "POST" and group_id is None: + return HttpResponse(json.dumps(_add_group(request, group=request.POST)), content_type="application/json", + status=201) + elif request.method == "PUT" and group_id is not None: + return HttpResponse(json.dumps(_update_group(request, group_id, group=QueryDict(request.body))), + content_type="application/json", + status=200) + elif request.method == "DELETE" and group_id is not None: + _delete_group(request, group_id) + return HttpResponse(json.dumps({ + "message": "Successfully deleted group with id=%s" % group_id + }), content_type="application/json", status=200) + else: + raise MethodNotAllowed(request) # Handle other type of request methods like OPTIONS etc. + else: + raise BadRequest(request) @is_authenticated() def _add_group(request, group={}): - """ + """ Group Parameters ---------- name : string @@ -310,21 +336,21 @@ def _add_group(request, group={}): """ - # Validate add graph API request - user_role = authorization.user_role(request) - if user_role == authorization.UserRole.LOGGED_IN: - if get_request_user(request) != group.get('owner_email', None): - raise BadRequest(request, error_code=ErrorCodes.Validation.CannotCreateGroupForOtherUser, - args=group.get('owner_email', None)) + # Validate add graph API request + user_role = authorization.user_role(request) + if user_role == authorization.UserRole.LOGGED_IN: + if get_request_user(request) != group.get('owner_email', None): + raise BadRequest(request, error_code=ErrorCodes.Validation.CannotCreateGroupForOtherUser, + args=group.get('owner_email', None)) - return utils.serializer(users.add_group(request, - name=request.POST.get('name', None), - description=group.get('description', None), - owner_email=group.get('owner_email', None))) + return utils.serializer(users.add_group(request, + name=request.POST.get('name', None), + description=group.get('description', None), + owner_email=group.get('owner_email', None))) def _get_groups(request, query={}): - """ + """ Query Parameters ---------- @@ -369,33 +395,60 @@ def _get_groups(request, query={}): """ - # Validate search graph groups API request - user_role = authorization.user_role(request) - if user_role == authorization.UserRole.LOGGED_IN: - if get_request_user(request) != query.get('member_email', None) \ - and get_request_user(request) != query.get('owner_email', None): - raise BadRequest(request, error_code=ErrorCodes.Validation.NotAllowedGroupAccess, - args=get_request_user(request)) + # Validate search graph groups API request + user_role = authorization.user_role(request) + if user_role == authorization.UserRole.LOGGED_IN: + if get_request_user(request) != query.get('member_email', None) \ + and get_request_user(request) != query.get('owner_email', None): + raise BadRequest(request, error_code=ErrorCodes.Validation.NotAllowedGroupAccess, + args=get_request_user(request)) + + total, groups = users.search_groups(request, + owner_email=query.get('owner_email', None), + member_email=query.get('member_email', None), + name=query.get('name', None), + description=query.get('description', None), + limit=query.get('limit', 20), + offset=query.get('offset', 0), + order=query.get('order', 'desc'), + sort=query.get('sort', 'name')) + + return { + 'total': total, + 'groups': [utils.serializer(group) for group in groups] + } - total, groups = users.search_groups(request, - owner_email=query.get('owner_email', None), - member_email=query.get('member_email', None), - name=query.get('name', None), - description=query.get('description', None), - limit=query.get('limit', 20), - offset=query.get('offset', 0), - order=query.get('order', 'desc'), - sort=query.get('sort', 'name')) - return { - 'total': total, - 'groups': [utils.serializer(group) for group in groups] - } +def _get_group(request, group_id): + """ + Parameters + ---------- + request : object + HTTP GET Request. + group_id : string + Unique ID of the group. + + Returns + ------- + group: object + + Raises + ------ + + Notes + ------ -def _get_group(request, group_id): """ + authorization.validate(request, permission='GROUP_READ', group_id=group_id) + + return utils.serializer(users.get_group_by_id(request, group_id)) + + +def _get_discussion(request, discussion_id, group_id): + """ + Parameters ---------- request : object @@ -415,14 +468,14 @@ def _get_group(request, group_id): """ - authorization.validate(request, permission='GROUP_READ', group_id=group_id) + authorization.validate(request, permission='GROUP_READ', group_id=group_id) - return utils.serializer(users.get_group_by_id(request, group_id)) + return utils.serializer(discussions.get_discussion_by_id(request, discussion_id)) @is_authenticated() def _update_group(request, group_id, group={}): - """ + """ Group Parameters ---------- name : string @@ -454,17 +507,17 @@ def _update_group(request, group_id, group={}): ------ """ - authorization.validate(request, permission='GROUP_UPDATE', group_id=group_id) + authorization.validate(request, permission='GROUP_UPDATE', group_id=group_id) - return utils.serializer(users.update_group(request, group_id=group_id, - name=group.get('name', None), - description=group.get('description', None), - owner_email=group.get('owner_email', None))) + return utils.serializer(users.update_group(request, group_id=group_id, + name=group.get('name', None), + description=group.get('description', None), + owner_email=group.get('owner_email', None))) @is_authenticated() def _delete_group(request, group_id): - """ + """ Parameters ---------- @@ -484,19 +537,20 @@ def _delete_group(request, group_id): ------ """ - authorization.validate(request, permission='GROUP_DELETE', group_id=group_id) + authorization.validate(request, permission='GROUP_DELETE', group_id=group_id) - users.delete_group_by_id(request, group_id) + users.delete_group_by_id(request, group_id) ''' Group Members APIs ''' + @csrf_exempt @is_authenticated() def group_members_rest_api(request, group_id, member_id=None): - """ + """ Handles any request sent to following urls: /api/v1/groups//members /api/v1/groups//members/ @@ -510,11 +564,11 @@ def group_members_rest_api(request, group_id, member_id=None): response : JSON Response """ - return _group_members_api(request, group_id, member_id=member_id) + return _group_members_api(request, group_id, member_id=member_id) def group_members_ajax_api(request, group_id, member_id=None): - """ + """ Handles any request sent to following urls: /javascript/groups//members /javascript/groups//members/ @@ -528,11 +582,11 @@ def group_members_ajax_api(request, group_id, member_id=None): response : JSON Response """ - return _group_members_api(request, group_id, member_id=member_id) + return _group_members_api(request, group_id, member_id=member_id) def _group_members_api(request, group_id, member_id=None): - """ + """ Handles any request (GET/POST) sent to groups//members or groups//members/. Parameters @@ -543,29 +597,29 @@ def _group_members_api(request, group_id, member_id=None): ------- """ - if request.META.get('HTTP_ACCEPT', None) == 'application/json': - if group_id is None: - raise Exception("Group ID is required.") - - if request.method == "GET" and member_id is None: - return HttpResponse(json.dumps(_get_group_members(request, group_id)), - content_type="application/json") - elif request.method == "POST" and member_id is None: - return HttpResponse(json.dumps(_add_group_member(request, group_id)), content_type="application/json", - status=201) - elif request.method == "DELETE" and member_id is not None: - _delete_group_member(request, group_id, member_id) - return HttpResponse(json.dumps({ - "message": "Successfully deleted member with id=%s from group with id=%s" % (member_id, group_id) - }), content_type="application/json", status=200) - else: - raise MethodNotAllowed(request) # Handle other type of request methods like OPTIONS etc. - else: - raise BadRequest(request) + if request.META.get('HTTP_ACCEPT', None) == 'application/json': + if group_id is None: + raise Exception("Group ID is required.") + + if request.method == "GET" and member_id is None: + return HttpResponse(json.dumps(_get_group_members(request, group_id)), + content_type="application/json") + elif request.method == "POST" and member_id is None: + return HttpResponse(json.dumps(_add_group_member(request, group_id)), content_type="application/json", + status=201) + elif request.method == "DELETE" and member_id is not None: + _delete_group_member(request, group_id, member_id) + return HttpResponse(json.dumps({ + "message": "Successfully deleted member with id=%s from group with id=%s" % (member_id, group_id) + }), content_type="application/json", status=200) + else: + raise MethodNotAllowed(request) # Handle other type of request methods like OPTIONS etc. + else: + raise BadRequest(request) def _get_group_members(request, group_id): - """ + """ Parameters ---------- @@ -585,18 +639,18 @@ def _get_group_members(request, group_id): ------ """ - authorization.validate(request, permission='GROUP_READ', group_id=group_id) + authorization.validate(request, permission='GROUP_READ', group_id=group_id) - members = users.get_group_members(request, group_id) - return { - "members": [utils.serializer(user) for user in members], - "total": len(members) - } + members = users.get_group_members(request, group_id) + return { + "members": [utils.serializer(user) for user in members], + "total": len(members) + } @is_authenticated() def _add_group_member(request, group_id): - """ + """ Body Parameters ---------- member_id : string @@ -622,17 +676,17 @@ def _add_group_member(request, group_id): Notes ------ """ - authorization.validate(request, permission='GROUP_UPDATE', group_id=group_id) + authorization.validate(request, permission='GROUP_UPDATE', group_id=group_id) - return utils.serializer(users.add_group_member(request, - group_id=group_id, - member_id=request.POST.get('member_id', None), - member_email=request.POST.get('member_email', None))) + return utils.serializer(users.add_group_member(request, + group_id=group_id, + member_id=request.POST.get('member_id', None), + member_email=request.POST.get('member_email', None))) @is_authenticated() def _delete_group_member(request, group_id, member_id): - """ + """ Parameters ---------- request : object @@ -653,21 +707,22 @@ def _delete_group_member(request, group_id, member_id): Notes ------ """ - authorization.validate(request, permission='GROUP_UPDATE', group_id=group_id) + authorization.validate(request, permission='GROUP_UPDATE', group_id=group_id) - users.delete_group_member(request, - group_id=group_id, - member_id=member_id) + users.delete_group_member(request, + group_id=group_id, + member_id=member_id) ''' Group Graphs APIs ''' + @csrf_exempt @is_authenticated() def group_graphs_rest_api(request, group_id, graph_id=None): - """ + """ Handles any request sent to following urls: /api/v1/groups//graphs /api/v1/groups//graphs/ @@ -681,11 +736,11 @@ def group_graphs_rest_api(request, group_id, graph_id=None): response : JSON Response """ - return _group_graphs_api(request, group_id, graph_id=graph_id) + return _group_graphs_api(request, group_id, graph_id=graph_id) def group_graphs_ajax_api(request, group_id, graph_id=None): - """ + """ Handles any request sent to following urls: /javascript/groups//graphs /javascript/groups//graphs/ @@ -699,11 +754,11 @@ def group_graphs_ajax_api(request, group_id, graph_id=None): response : JSON Response """ - return _group_graphs_api(request, group_id, graph_id=graph_id) + return _group_graphs_api(request, group_id, graph_id=graph_id) def _group_graphs_api(request, group_id, graph_id=None): - """ + """ Handles any request (GET/POST) sent to groups//graphs or groups//graphs/. Parameters @@ -716,26 +771,26 @@ def _group_graphs_api(request, group_id, graph_id=None): ------- """ - if request.META.get('HTTP_ACCEPT', None) == 'application/json': - if request.method == "GET" and graph_id is None: - return HttpResponse(json.dumps(_get_group_graphs(request, group_id)), - content_type="application/json") - elif request.method == "POST" and graph_id is None: - return HttpResponse(json.dumps(_add_group_graph(request, group_id)), content_type="application/json", - status=201) - elif request.method == "DELETE" and graph_id is not None: - _delete_group_graph(request, group_id, graph_id) - return HttpResponse(json.dumps({ - "message": "Successfully deleted graph with id=%s from group with id=%s" % (graph_id, group_id) - }), content_type="application/json", status=200) - else: - raise MethodNotAllowed(request) # Handle other type of request methods like OPTIONS etc. - else: - raise BadRequest(request) + if request.META.get('HTTP_ACCEPT', None) == 'application/json': + if request.method == "GET" and graph_id is None: + return HttpResponse(json.dumps(_get_group_graphs(request, group_id)), + content_type="application/json") + elif request.method == "POST" and graph_id is None: + return HttpResponse(json.dumps(_add_group_graph(request, group_id)), content_type="application/json", + status=201) + elif request.method == "DELETE" and graph_id is not None: + _delete_group_graph(request, group_id, graph_id) + return HttpResponse(json.dumps({ + "message": "Successfully deleted graph with id=%s from group with id=%s" % (graph_id, group_id) + }), content_type="application/json", status=200) + else: + raise MethodNotAllowed(request) # Handle other type of request methods like OPTIONS etc. + else: + raise BadRequest(request) def _get_group_graphs(request, group_id): - """ + """ Query Parameters ---------- @@ -773,29 +828,29 @@ def _get_group_graphs(request, group_id): ------ """ - authorization.validate(request, permission='GROUP_READ', group_id=group_id) - - names = request.GET.get('names', None) - nodes = request.GET.get('nodes', None) - edges = request.GET.get('edges', None) - total, graphs = users.search_group_graphs(request, - group_id=group_id, - owner_email=request.GET.get('owner_email', None), - names=names if names is None or isinstance(names, list) else [names], - nodes=nodes if nodes is None or isinstance(nodes, list) else [nodes], - edges=edges if edges is None or isinstance(edges, list) else [edges], - limit=request.GET.get('limit', 20), - offset=request.GET.get('offset', 0)) - - return { - 'total': total, - 'graphs': [utils.serializer(graph) for graph in graphs] - } + authorization.validate(request, permission='GROUP_READ', group_id=group_id) + + names = request.GET.get('names', None) + nodes = request.GET.get('nodes', None) + edges = request.GET.get('edges', None) + total, graphs = users.search_group_graphs(request, + group_id=group_id, + owner_email=request.GET.get('owner_email', None), + names=names if names is None or isinstance(names, list) else [names], + nodes=nodes if nodes is None or isinstance(nodes, list) else [nodes], + edges=edges if edges is None or isinstance(edges, list) else [edges], + limit=request.GET.get('limit', 20), + offset=request.GET.get('offset', 0)) + + return { + 'total': total, + 'graphs': [utils.serializer(graph) for graph in graphs] + } @is_authenticated() def _add_group_graph(request, group_id): - """ + """ Body Parameters ---------- graph_id : string @@ -819,17 +874,17 @@ def _add_group_graph(request, group_id): Notes ------ """ - authorization.validate(request, permission='GRAPH_SHARE', graph_id=request.POST.get('graph_id', None)) - authorization.validate(request, permission='GROUP_SHARE', group_id=group_id) + authorization.validate(request, permission='GRAPH_SHARE', graph_id=request.POST.get('graph_id', None)) + authorization.validate(request, permission='GROUP_SHARE', group_id=group_id) - return utils.serializer(users.add_group_graph(request, - group_id=group_id, - graph_id=request.POST.get('graph_id', None))) + return utils.serializer(users.add_group_graph(request, + group_id=group_id, + graph_id=request.POST.get('graph_id', None))) @is_authenticated() def _delete_group_graph(request, group_id, graph_id): - """ + """ Parameters ---------- request : object @@ -850,11 +905,579 @@ def _delete_group_graph(request, group_id, graph_id): Notes ------ """ - authorization.validate(request, permission='GRAPH_SHARE', graph_id=graph_id) - authorization.validate(request, permission='GROUP_SHARE', group_id=group_id) + authorization.validate(request, permission='GRAPH_SHARE', graph_id=graph_id) + authorization.validate(request, permission='GROUP_SHARE', group_id=group_id) + + users.delete_group_graph(request, + group_id=group_id, + graph_id=graph_id) + + +def group_discussions_ajax_api(request, group_id, discussion_id=None): + """ + Handles any request sent to following urls: + /javascript/groups/#discussions + + Parameters + ---------- + request - HTTP Request + + Returns + ------- + response : JSON Response + + """ + return _group_discussions_api(request, group_id, discussion_id=discussion_id) + + +def _group_discussions_api(request, group_id, discussion_id=None): + """ + Handles any request (GET/POST) sent to /groups or groups/#discussions + + Parameters + ---------- + request - HTTP Request + + Returns + ------- + + """ + if request.META.get('HTTP_ACCEPT', None) == 'application/json': + if request.method == "POST" and group_id is not None: + return HttpResponse(json.dumps(_add_discussion(request, group_id, discussion=request.POST)), + content_type="application/json", + status=201) + elif request.method == "GET" and discussion_id is None: + return HttpResponse(json.dumps(_get_group_discussions(request, group_id, query=request.GET)), + content_type="application/json") + elif request.method == "PUT" and discussion_id is None: + return HttpResponse(json.dumps(_update_discussion(request, discussion=QueryDict(request.body))), + content_type="application/json", + status=200) + elif request.method == "DELETE" and group_id is not None: + _delete_discussion(request, QueryDict(request.body).get('discussion_id', None)) + return HttpResponse(json.dumps({ + "message": "Successfully deleted discussion" + }), content_type="application/json", status=200) + else: + raise MethodNotAllowed(request) # Handle other type of request methods like OPTIONS etc. + else: + raise BadRequest(request) + + +@is_authenticated() +def _add_discussion(request, group_id, discussion={}): + """ + Group Parameters + ---------- + group_id : Integer + Unique Id of group. Required + topic : string + Topic of group. Required + description : string + Topic of group. Optional + owner_email : string + Email of the Owner of the discussion. Required + + + Parameters + ---------- + discussion : dict + Dictionary containing the data of the discussion being added. + request : object + HTTP POST Request. + + Returns + ------- + discussion : object + Newly created discussion object. + + Raises + ------ + + Notes + ------ + + """ + + # Validate add graph API request + user_role = authorization.user_role(request) + if user_role == authorization.UserRole.LOGGED_IN: + if get_request_user(request) != discussion.get('owner_email', None): + raise BadRequest(request, error_code=ErrorCodes.Validation.CannotCreateGroupForOtherUser, + args=discussion.get('owner_email', None)) + + return utils.serializer(discussions.add_discussion(request, group_id=group_id, + topic=discussion.get('topic', None), + description=discussion.get('description', None), + owner_email=discussion.get('owner_email', None))) + + +def _get_group_discussions(request, group_id, query={}): + """ + + Query Parameters + ---------- + keyword : string + Keyword for searching of the Discussion. + limit : integer + Number of entities to return. Default value is 20. + offset : integer + Offset the list of returned entities by this number. Default value is 0. + + Parameters + ---------- + request : object + HTTP GET Request. + group_id : string + Unique ID of the group. + + Returns + ------- + total : integer + Number of discussions matching the request. + discussions : List of Discussions. + List of Discussions Objects with given limit and offset. + + Raises + ------ + + Notes + ------ + + """ + + user_role = authorization.user_role(request) + if user_role == authorization.UserRole.LOGGED_IN: + authorization.validate(request, permission='GROUP_READ', group_id=group_id) + + total, discussionss = discussions.get_discussions(request, + group_id=group_id, + keyword=query.get('keyword', None), + limit=query.get('limit', 20), + offset=query.get('offset', 0), + order=query.get('order', None), + sort=query.get('sort', None)) + + return { + 'total': total, + 'discussions': [utils.serializer(discussion) for discussion in discussionss] + } + + +@is_authenticated() +def _update_discussion(request, discussion={}): + """ + Group Parameters + ---------- + is_closed : integer + status of the discussion (0,1) . Optional + + + Parameters + ---------- + discussion : dict + Dictionary containing the data of the discussion being updated. + request : object + HTTP POST Request. + discussion_id : string + Unique ID of the discussion. + + Returns + ------- + discussion : object + Newly created discussion object. + + Raises + ------ + + Notes + ------ + + """ + + return utils.serializer(discussions.update_discussion(request, discussion_id=discussion.get('discussion_id', None), + is_closed=discussion.get('is_closed', None))) + + +def _delete_discussion(request, discussion_id): + """ + + Parameters + ---------- + request : object + HTTP GET Request. + discussion_id : string + Unique ID of the discussion. + + Returns + ------- + None + + Raises + ------ + + Notes + ------ + + """ + authorization.validate(request, permission='DISCUSSION_DELETE', discussion_id=discussion_id) + + discussions.delete_discussion_by_id(request, discussion_id) + + +def discussion_comments_ajax_api(request, group_id, discussion_id): + """ + Handles any request sent to following urls: + /javascript/groups/#discussions/ + + Parameters + ---------- + request - HTTP Request + + Returns + ------- + response : JSON Response + + """ + return _discussion_comments_api(request, group_id, discussion_id) + + +def _discussion_comments_api(request, group_id, discussion_id): + """ + Handles any request (GET/POST) sent to /#discussions/ + + Parameters + ---------- + request - HTTP Request + + Returns + ------- + + """ + if request.META.get('HTTP_ACCEPT', None) == 'application/json': + if request.method == "POST" and discussion_id is not None: + return HttpResponse(json.dumps(_add_discussion_comment(request, discussion_id, discussion=request.POST)), + content_type="application/json", + status=201) + elif request.method == "GET" and discussion_id is not None: + return HttpResponse(json.dumps(_get_discussion_comments(request, group_id, discussion_id)), + content_type="application/json") + elif request.method == "PUT" and discussion_id is not None: + return HttpResponse(json.dumps(_update_discussion_comment(request, comment=QueryDict(request.body))), + content_type="application/json", + status=200) + elif request.method == "DELETE" and discussion_id is not None: + _delete_discussion_comment(request, QueryDict(request.body).get('comment_id', None)) + return HttpResponse(json.dumps({ + "message": "Successfully deleted discussion" + }), content_type="application/json", status=200) + else: + raise MethodNotAllowed(request) # Handle other type of request methods like OPTIONS etc. + else: + raise BadRequest(request) + + +@is_authenticated() +def _add_discussion_comment(request, discussion_id, discussion={}): + """ + Group Parameters + ---------- + text : string + Text of the discussion. + owner_email : string + Email of the Owner of the discussion. Required + + + Parameters + ---------- + discussion : dict + Dictionary containing the data of the discussion being added. + discussion_id : string + Unique ID of the discussion. + request : object + HTTP POST Request. + + Returns + ------- + comment : object + Newly created comment object. + + Raises + ------ + + Notes + ------ + + """ + + # Validate add graph API request + user_role = authorization.user_role(request) + if user_role == authorization.UserRole.LOGGED_IN: + if get_request_user(request) != discussion.get('owner_email', None): + raise BadRequest(request, error_code=ErrorCodes.Validation.CannotCreateGroupForOtherUser, + args=discussion.get('owner_email', None)) + + return utils.serializer(discussions.add_discussion_comment(request, discussion_id=discussion_id, + text=discussion.get('text', None), + owner_email=discussion.get('owner_email', None))) + + +def _get_discussion_comments(request, group_id, discussion_id): + """ + Parameters + ---------- + request : object + HTTP GET Request. + group_id : string + Unique ID of the group. + discussion_id : string + Unique ID of the discussion. + + + Returns + ------- + total : integer + Number of comments matching the request. + comments : List of Comments. + List of comments Objects. + + Raises + ------ + + Notes + ------ + + """ + authorization.validate(request, permission='GROUP_READ', group_id=group_id) + + total, comments = discussions.search_comments_by_discussion_id(request, group_id=group_id, + discussion_id=discussion_id) + + return { + 'total': total, + 'comments': [utils.serializer(comment) for comment in comments] + } + + +@is_authenticated() +def _update_discussion_comment(request, comment={}): + """ + Group Parameters + ---------- + text : string + Text of comment. Required + + + Parameters + ---------- + request : object + HTTP POST Request. + comment_id : string + Unique ID of the comment. + + Returns + ------- + comment : object + Newly created comment object. + + Raises + ------ + + Notes + ------ + + """ + + return utils.serializer(discussions.update_comment(request, comment_id=comment.get('comment_id', None), + text=comment.get('text', None))) + + +def _delete_discussion_comment(request, comment_id): + """ + + Parameters + ---------- + request : object + HTTP GET Request. + comment_id : string + Unique ID of the comment. + + Returns + ------- + None + + Raises + ------ + + Notes + ------ + + """ - users.delete_group_graph(request, - group_id=group_id, - graph_id=graph_id) + discussions.delete_comment_by_id(request, comment_id) +def comment_reactions_ajax_api(request, group_id, discussion_id, comment_id): + """ + Handles any request sent to following urls: + /javascript/groups/#discussions/ + + Parameters + ---------- + request - HTTP Request + + Returns + ------- + response : JSON Response + + """ + return _comments_reactions_api(request, group_id, discussion_id, comment_id) + + +def _comments_reactions_api(request, group_id, discussion_id, comment_id): + """ + Handles any request (GET/POST) sent to /#discussions/ + + Parameters + ---------- + request - HTTP Request + + Returns + ------- + + """ + if request.META.get('HTTP_ACCEPT', None) == 'application/json': + if request.method == "POST": + return HttpResponse(json.dumps(_add_comment_reaction(request, comment_id, reaction=request.POST)), + content_type="application/json", + status=201) + elif request.method == "GET": + return HttpResponse(json.dumps(_get_comment_reactions(request, comment_id, reaction=request.GET)), + content_type="application/json") + elif request.method == "DELETE": + _delete_comment_reaction(request, comment_id, reaction=QueryDict(request.body)) + return HttpResponse(json.dumps({ + "message": "Successfully deleted discussion" + }), content_type="application/json", status=200) + else: + raise MethodNotAllowed(request) # Handle other type of request methods like OPTIONS etc. + else: + raise BadRequest(request) + + +@is_authenticated() +def _add_comment_reaction(request, comment_id, reaction={}): + """ + Reaction Parameters + ---------- + id : Integer + Unique ID of the comment. + content : string + content of the reaction. + owner_email : string + Email of the Owner of the reaction. Required + + + Parameters + ---------- + reaction : dict + Dictionary containing the data of the reaction being added. + request : object + HTTP POST Request. + + Returns + ------- + reaction : object + Newly created reaction object. + + Raises + ------ + + Notes + ------ + + """ + + return utils.serializer(discussions.add_comment_reaction(request, comment_id=comment_id, + content=reaction.get('content', None), + owner_email=reaction.get('owner_email', None))) + + +@is_authenticated() +def _delete_comment_reaction(request, comment_id, reaction={}): + """ + Reaction Parameters + ---------- + id : Integer + Unique ID of the comment. + content : string + content of the reaction. + owner_email : string + Email of the Owner of the reaction. Required + + + Parameters + ---------- + reaction : dict + Dictionary containing the data of the reaction being added. + request : object + HTTP POST Request. + + Returns + ------- + reaction : object + Newly created reaction object. + + Raises + ------ + + Notes + ------ + + """ + + utils.serializer(discussions.delete_comment_reaction(request, comment_id=comment_id, + content=reaction.get('content', None), + owner_email=reaction.get('owner_email', None))) + + +@is_authenticated() +def _get_comment_reactions(request, comment_id, reaction={}): + """ + Reaction Parameters + ---------- + id : Integer + Unique ID of the comment. + content : string + content of the reaction. + + + Parameters + ---------- + reaction : dict + Dictionary containing the data of the reaction being added. + request : object + HTTP POST Request. + + Returns + ------- + total : integer + Number of reaction associated with the comment_id. + reactions : List of Reactions. + List of Reaction Objects associated with the comment_id. + + Raises + ------ + + Notes + ------ + + """ + total, reactions = discussions.get_comment_reactions(request, comment_id=comment_id, + content=reaction.get('content', None)) + + return { + 'total': total, + 'reactions': [utils.serializer(reaction) for reaction in reactions] + } diff --git a/docker-compose.yml b/docker-compose.yml new file mode 100644 index 00000000..fe63bb3d --- /dev/null +++ b/docker-compose.yml @@ -0,0 +1,13 @@ +version: '2' +services: + web: + build: . + container_name: GRAPHSPACECOMMENTSYSTEM + command: bash -c "/GraphSpace/docker_config/docker_command.sh" + restart: unless-stopped + volumes: + - /GraphSpace + expose: + - "8002" + ports: + - "8002:8002" diff --git a/docker_config/docker_command.sh b/docker_config/docker_command.sh new file mode 100644 index 00000000..bc543640 --- /dev/null +++ b/docker_config/docker_command.sh @@ -0,0 +1,63 @@ +#!/bin/bash +echo "Pull changes" +cd /GraphSpace +git stash +git pull + +echo "Copy configurations" +yes | cp -rf /GraphSpace/docker_config/elasticsearch/elasticsearch.yml /elasticsearch/config/elasticsearch.yml +yes | cp -rf /GraphSpace/docker_config/redis/redis.conf /redis/redis.conf + +echo "Starting postgres" +service postgresql start + +echo "Sleeping for 5 seconds" +sleep 5 + +echo "Starting redis" +mkdir /GraphSpace/redis +redis-server /redis/redis.conf + +# echo "Creating logs directory" +# mkdir logs +# mkdir data +# > debug.log + +echo "Starting elasticsearch" +su - elasticsearch -c '/elasticsearch/bin/elasticsearch -d' + +echo "Create virtual env" +virtualenv venv + +echo "Activate venv" +. venv/bin/activate + +echo "Install dependency" +pip install -r requirements.txt +bower install --allow-root + +cp graphspace/settings/docker.py graphspace/settings/production.py + +cd / +echo "Changing permissions" +chmod -R 777 /GraphSpace + +echo "Sleeping for 10 seconds" +sleep 10 + +echo "Activate virtual env" +cd /GraphSpace && source venv/bin/activate + +echo "Migrate" +python manage.py migrate --settings=graphspace.settings.production + +echo "Running Daphne" +exec daphne -b 0.0.0.0 -p 8002 graphspace.asgi:channel_layer & + +echo "Sleeping for 5 seconds" +sleep 5 + +echo "Running server" +exec python manage.py runworker + +echo "Enjoy browsing GraphSpace" diff --git a/docker_config/elasticsearch/elasticsearch.yml b/docker_config/elasticsearch/elasticsearch.yml new file mode 100644 index 00000000..c829a7f4 --- /dev/null +++ b/docker_config/elasticsearch/elasticsearch.yml @@ -0,0 +1,88 @@ +# ======================== Elasticsearch Configuration ========================= +# +# NOTE: Elasticsearch comes with reasonable defaults for most settings. +# Before you set out to tweak and tune the configuration, make sure you +# understand what are you trying to accomplish and the consequences. +# +# The primary way of configuring a node is via this file. This template lists +# the most important settings you may want to configure for a production cluster. +# +# Please consult the documentation for further information on configuration options: +# https://www.elastic.co/guide/en/elasticsearch/reference/index.html +# +# ---------------------------------- Cluster ----------------------------------- +# +# Use a descriptive name for your cluster: +# +cluster.name: graphspaceDockerCluster +# +# ------------------------------------ Node ------------------------------------ +# +# Use a descriptive name for the node: +# +node.name: "graphspace-docker-node" +# +# Add custom attributes to the node: +# +#node.attr.rack: r1 +# +# ----------------------------------- Paths ------------------------------------ +# +# Path to directory where to store the data (separate multiple locations by comma): +# +#path.data: /GraphSpace/data +# +# Path to log files: +# +#path.logs: /GraphSpace/logs +# +# ----------------------------------- Memory ----------------------------------- +# +# Lock the memory on startup: +# +#bootstrap.memory_lock: true +# +# Make sure that the heap size is set to about half the memory available +# on the system and that the owner of the process is allowed to use this +# limit. +# +# Elasticsearch performs poorly when the system is swapping the memory. +# +# ---------------------------------- Network ----------------------------------- +# +# Set the bind address to a specific IP (IPv4 or IPv6): +# +#network.host: 127.0.0.1 +# +# Set a custom port for HTTP: +# +#http.port: 9200 +# +# For more information, consult the network module documentation. +# +# --------------------------------- Discovery ---------------------------------- +# +# Pass an initial list of hosts to perform discovery when new node is started: +# The default list of hosts is ["127.0.0.1", "[::1]"] +# +#discovery.zen.ping.unicast.hosts: ["host1", "host2"] +# +# Prevent the "split brain" by configuring the majority of nodes (total number of master-eligible nodes / 2 + 1): +# +#discovery.zen.minimum_master_nodes: 3 +# +# For more information, consult the zen discovery module documentation. +# +# ---------------------------------- Gateway ----------------------------------- +# +# Block initial recovery after a full cluster restart until N nodes are started: +# +#gateway.recover_after_nodes: 3 +# +# For more information, consult the gateway module documentation. +# +# ---------------------------------- Various ----------------------------------- +# +# Require explicit names when deleting indices: +# +#action.destructive_requires_name: true diff --git a/docker_config/redis/redis.conf b/docker_config/redis/redis.conf new file mode 100644 index 00000000..7a496068 --- /dev/null +++ b/docker_config/redis/redis.conf @@ -0,0 +1,1052 @@ +# Redis configuration file example. +# +# Note that in order to read the configuration file, Redis must be +# started with the file path as first argument: +# +# ./redis-server /path/to/redis.conf + +# Note on units: when memory size is needed, it is possible to specify +# it in the usual form of 1k 5GB 4M and so forth: +# +# 1k => 1000 bytes +# 1kb => 1024 bytes +# 1m => 1000000 bytes +# 1mb => 1024*1024 bytes +# 1g => 1000000000 bytes +# 1gb => 1024*1024*1024 bytes +# +# units are case insensitive so 1GB 1Gb 1gB are all the same. + +################################## INCLUDES ################################### + +# Include one or more other config files here. This is useful if you +# have a standard template that goes to all Redis servers but also need +# to customize a few per-server settings. Include files can include +# other files, so use this wisely. +# +# Notice option "include" won't be rewritten by command "CONFIG REWRITE" +# from admin or Redis Sentinel. Since Redis always uses the last processed +# line as value of a configuration directive, you'd better put includes +# at the beginning of this file to avoid overwriting config change at runtime. +# +# If instead you are interested in using includes to override configuration +# options, it is better to use include as the last line. +# +# include /path/to/local.conf +# include /path/to/other.conf + +################################## NETWORK ##################################### + +# By default, if no "bind" configuration directive is specified, Redis listens +# for connections from all the network interfaces available on the server. +# It is possible to listen to just one or multiple selected interfaces using +# the "bind" configuration directive, followed by one or more IP addresses. +# +# Examples: +# +# bind 192.168.1.100 10.0.0.1 +# bind 127.0.0.1 ::1 +# +# ~~~ WARNING ~~~ If the computer running Redis is directly exposed to the +# internet, binding to all the interfaces is dangerous and will expose the +# instance to everybody on the internet. So by default we uncomment the +# following bind directive, that will force Redis to listen only into +# the IPv4 lookback interface address (this means Redis will be able to +# accept connections only from clients running into the same computer it +# is running). +# +# IF YOU ARE SURE YOU WANT YOUR INSTANCE TO LISTEN TO ALL THE INTERFACES +# JUST COMMENT THE FOLLOWING LINE. +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +bind 127.0.0.1 + +# Protected mode is a layer of security protection, in order to avoid that +# Redis instances left open on the internet are accessed and exploited. +# +# When protected mode is on and if: +# +# 1) The server is not binding explicitly to a set of addresses using the +# "bind" directive. +# 2) No password is configured. +# +# The server only accepts connections from clients connecting from the +# IPv4 and IPv6 loopback addresses 127.0.0.1 and ::1, and from Unix domain +# sockets. +# +# By default protected mode is enabled. You should disable it only if +# you are sure you want clients from other hosts to connect to Redis +# even if no authentication is configured, nor a specific set of interfaces +# are explicitly listed using the "bind" directive. +protected-mode yes + +# Accept connections on the specified port, default is 6379 (IANA #815344). +# If port 0 is specified Redis will not listen on a TCP socket. +port 6379 + +# TCP listen() backlog. +# +# In high requests-per-second environments you need an high backlog in order +# to avoid slow clients connections issues. Note that the Linux kernel +# will silently truncate it to the value of /proc/sys/net/core/somaxconn so +# make sure to raise both the value of somaxconn and tcp_max_syn_backlog +# in order to get the desired effect. +tcp-backlog 511 + +# Unix socket. +# +# Specify the path for the Unix socket that will be used to listen for +# incoming connections. There is no default, so Redis will not listen +# on a unix socket when not specified. +# +# unixsocket /tmp/redis.sock +# unixsocketperm 700 + +# Close the connection after a client is idle for N seconds (0 to disable) +timeout 0 + +# TCP keepalive. +# +# If non-zero, use SO_KEEPALIVE to send TCP ACKs to clients in absence +# of communication. This is useful for two reasons: +# +# 1) Detect dead peers. +# 2) Take the connection alive from the point of view of network +# equipment in the middle. +# +# On Linux, the specified value (in seconds) is the period used to send ACKs. +# Note that to close the connection the double of the time is needed. +# On other kernels the period depends on the kernel configuration. +# +# A reasonable value for this option is 300 seconds, which is the new +# Redis default starting with Redis 3.2.1. +tcp-keepalive 300 + +################################# GENERAL ##################################### + +# By default Redis does not run as a daemon. Use 'yes' if you need it. +# Note that Redis will write a pid file in /var/run/redis.pid when daemonized. +daemonize yes + +# If you run Redis from upstart or systemd, Redis can interact with your +# supervision tree. Options: +# supervised no - no supervision interaction +# supervised upstart - signal upstart by putting Redis into SIGSTOP mode +# supervised systemd - signal systemd by writing READY=1 to $NOTIFY_SOCKET +# supervised auto - detect upstart or systemd method based on +# UPSTART_JOB or NOTIFY_SOCKET environment variables +# Note: these supervision methods only signal "process is ready." +# They do not enable continuous liveness pings back to your supervisor. +supervised systemd + +# If a pid file is specified, Redis writes it where specified at startup +# and removes it at exit. +# +# When the server runs non daemonized, no pid file is created if none is +# specified in the configuration. When the server is daemonized, the pid file +# is used even if not specified, defaulting to "/var/run/redis.pid". +# +# Creating a pid file is best effort: if Redis is not able to create it +# nothing bad happens, the server will start and run normally. +pidfile /var/run/redis_6379.pid + +# Specify the server verbosity level. +# This can be one of: +# debug (a lot of information, useful for development/testing) +# verbose (many rarely useful info, but not a mess like the debug level) +# notice (moderately verbose, what you want in production probably) +# warning (only very important / critical messages are logged) +loglevel notice + +# Specify the log file name. Also the empty string can be used to force +# Redis to log on the standard output. Note that if you use standard +# output for logging but daemonize, logs will be sent to /dev/null +logfile "" + +# To enable logging to the system logger, just set 'syslog-enabled' to yes, +# and optionally update the other syslog parameters to suit your needs. +# syslog-enabled no + +# Specify the syslog identity. +# syslog-ident redis + +# Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7. +# syslog-facility local0 + +# Set the number of databases. The default database is DB 0, you can select +# a different one on a per-connection basis using SELECT where +# dbid is a number between 0 and 'databases'-1 +databases 16 + +################################ SNAPSHOTTING ################################ +# +# Save the DB on disk: +# +# save +# +# Will save the DB if both the given number of seconds and the given +# number of write operations against the DB occurred. +# +# In the example below the behaviour will be to save: +# after 900 sec (15 min) if at least 1 key changed +# after 300 sec (5 min) if at least 10 keys changed +# after 60 sec if at least 10000 keys changed +# +# Note: you can disable saving completely by commenting out all "save" lines. +# +# It is also possible to remove all the previously configured save +# points by adding a save directive with a single empty string argument +# like in the following example: +# +# save "" + +save 900 1 +save 300 10 +save 60 10000 + +# By default Redis will stop accepting writes if RDB snapshots are enabled +# (at least one save point) and the latest background save failed. +# This will make the user aware (in a hard way) that data is not persisting +# on disk properly, otherwise chances are that no one will notice and some +# disaster will happen. +# +# If the background saving process will start working again Redis will +# automatically allow writes again. +# +# However if you have setup your proper monitoring of the Redis server +# and persistence, you may want to disable this feature so that Redis will +# continue to work as usual even if there are problems with disk, +# permissions, and so forth. +stop-writes-on-bgsave-error yes + +# Compress string objects using LZF when dump .rdb databases? +# For default that's set to 'yes' as it's almost always a win. +# If you want to save some CPU in the saving child set it to 'no' but +# the dataset will likely be bigger if you have compressible values or keys. +rdbcompression yes + +# Since version 5 of RDB a CRC64 checksum is placed at the end of the file. +# This makes the format more resistant to corruption but there is a performance +# hit to pay (around 10%) when saving and loading RDB files, so you can disable it +# for maximum performances. +# +# RDB files created with checksum disabled have a checksum of zero that will +# tell the loading code to skip the check. +rdbchecksum yes + +# The filename where to dump the DB +dbfilename dump.rdb + +# The working directory. +# +# The DB will be written inside this directory, with the filename specified +# above using the 'dbfilename' configuration directive. +# +# The Append Only File will also be created inside this directory. +# +# Note that you must specify a directory here, not a file name. +dir /GraphSpace/redis + +################################# REPLICATION ################################# + +# Master-Slave replication. Use slaveof to make a Redis instance a copy of +# another Redis server. A few things to understand ASAP about Redis replication. +# +# 1) Redis replication is asynchronous, but you can configure a master to +# stop accepting writes if it appears to be not connected with at least +# a given number of slaves. +# 2) Redis slaves are able to perform a partial resynchronization with the +# master if the replication link is lost for a relatively small amount of +# time. You may want to configure the replication backlog size (see the next +# sections of this file) with a sensible value depending on your needs. +# 3) Replication is automatic and does not need user intervention. After a +# network partition slaves automatically try to reconnect to masters +# and resynchronize with them. +# +# slaveof + +# If the master is password protected (using the "requirepass" configuration +# directive below) it is possible to tell the slave to authenticate before +# starting the replication synchronization process, otherwise the master will +# refuse the slave request. +# +# masterauth + +# When a slave loses its connection with the master, or when the replication +# is still in progress, the slave can act in two different ways: +# +# 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will +# still reply to client requests, possibly with out of date data, or the +# data set may just be empty if this is the first synchronization. +# +# 2) if slave-serve-stale-data is set to 'no' the slave will reply with +# an error "SYNC with master in progress" to all the kind of commands +# but to INFO and SLAVEOF. +# +slave-serve-stale-data yes + +# You can configure a slave instance to accept writes or not. Writing against +# a slave instance may be useful to store some ephemeral data (because data +# written on a slave will be easily deleted after resync with the master) but +# may also cause problems if clients are writing to it because of a +# misconfiguration. +# +# Since Redis 2.6 by default slaves are read-only. +# +# Note: read only slaves are not designed to be exposed to untrusted clients +# on the internet. It's just a protection layer against misuse of the instance. +# Still a read only slave exports by default all the administrative commands +# such as CONFIG, DEBUG, and so forth. To a limited extent you can improve +# security of read only slaves using 'rename-command' to shadow all the +# administrative / dangerous commands. +slave-read-only yes + +# Replication SYNC strategy: disk or socket. +# +# ------------------------------------------------------- +# WARNING: DISKLESS REPLICATION IS EXPERIMENTAL CURRENTLY +# ------------------------------------------------------- +# +# New slaves and reconnecting slaves that are not able to continue the replication +# process just receiving differences, need to do what is called a "full +# synchronization". An RDB file is transmitted from the master to the slaves. +# The transmission can happen in two different ways: +# +# 1) Disk-backed: The Redis master creates a new process that writes the RDB +# file on disk. Later the file is transferred by the parent +# process to the slaves incrementally. +# 2) Diskless: The Redis master creates a new process that directly writes the +# RDB file to slave sockets, without touching the disk at all. +# +# With disk-backed replication, while the RDB file is generated, more slaves +# can be queued and served with the RDB file as soon as the current child producing +# the RDB file finishes its work. With diskless replication instead once +# the transfer starts, new slaves arriving will be queued and a new transfer +# will start when the current one terminates. +# +# When diskless replication is used, the master waits a configurable amount of +# time (in seconds) before starting the transfer in the hope that multiple slaves +# will arrive and the transfer can be parallelized. +# +# With slow disks and fast (large bandwidth) networks, diskless replication +# works better. +repl-diskless-sync no + +# When diskless replication is enabled, it is possible to configure the delay +# the server waits in order to spawn the child that transfers the RDB via socket +# to the slaves. +# +# This is important since once the transfer starts, it is not possible to serve +# new slaves arriving, that will be queued for the next RDB transfer, so the server +# waits a delay in order to let more slaves arrive. +# +# The delay is specified in seconds, and by default is 5 seconds. To disable +# it entirely just set it to 0 seconds and the transfer will start ASAP. +repl-diskless-sync-delay 5 + +# Slaves send PINGs to server in a predefined interval. It's possible to change +# this interval with the repl_ping_slave_period option. The default value is 10 +# seconds. +# +# repl-ping-slave-period 10 + +# The following option sets the replication timeout for: +# +# 1) Bulk transfer I/O during SYNC, from the point of view of slave. +# 2) Master timeout from the point of view of slaves (data, pings). +# 3) Slave timeout from the point of view of masters (REPLCONF ACK pings). +# +# It is important to make sure that this value is greater than the value +# specified for repl-ping-slave-period otherwise a timeout will be detected +# every time there is low traffic between the master and the slave. +# +# repl-timeout 60 + +# Disable TCP_NODELAY on the slave socket after SYNC? +# +# If you select "yes" Redis will use a smaller number of TCP packets and +# less bandwidth to send data to slaves. But this can add a delay for +# the data to appear on the slave side, up to 40 milliseconds with +# Linux kernels using a default configuration. +# +# If you select "no" the delay for data to appear on the slave side will +# be reduced but more bandwidth will be used for replication. +# +# By default we optimize for low latency, but in very high traffic conditions +# or when the master and slaves are many hops away, turning this to "yes" may +# be a good idea. +repl-disable-tcp-nodelay no + +# Set the replication backlog size. The backlog is a buffer that accumulates +# slave data when slaves are disconnected for some time, so that when a slave +# wants to reconnect again, often a full resync is not needed, but a partial +# resync is enough, just passing the portion of data the slave missed while +# disconnected. +# +# The bigger the replication backlog, the longer the time the slave can be +# disconnected and later be able to perform a partial resynchronization. +# +# The backlog is only allocated once there is at least a slave connected. +# +# repl-backlog-size 1mb + +# After a master has no longer connected slaves for some time, the backlog +# will be freed. The following option configures the amount of seconds that +# need to elapse, starting from the time the last slave disconnected, for +# the backlog buffer to be freed. +# +# A value of 0 means to never release the backlog. +# +# repl-backlog-ttl 3600 + +# The slave priority is an integer number published by Redis in the INFO output. +# It is used by Redis Sentinel in order to select a slave to promote into a +# master if the master is no longer working correctly. +# +# A slave with a low priority number is considered better for promotion, so +# for instance if there are three slaves with priority 10, 100, 25 Sentinel will +# pick the one with priority 10, that is the lowest. +# +# However a special priority of 0 marks the slave as not able to perform the +# role of master, so a slave with priority of 0 will never be selected by +# Redis Sentinel for promotion. +# +# By default the priority is 100. +slave-priority 100 + +# It is possible for a master to stop accepting writes if there are less than +# N slaves connected, having a lag less or equal than M seconds. +# +# The N slaves need to be in "online" state. +# +# The lag in seconds, that must be <= the specified value, is calculated from +# the last ping received from the slave, that is usually sent every second. +# +# This option does not GUARANTEE that N replicas will accept the write, but +# will limit the window of exposure for lost writes in case not enough slaves +# are available, to the specified number of seconds. +# +# For example to require at least 3 slaves with a lag <= 10 seconds use: +# +# min-slaves-to-write 3 +# min-slaves-max-lag 10 +# +# Setting one or the other to 0 disables the feature. +# +# By default min-slaves-to-write is set to 0 (feature disabled) and +# min-slaves-max-lag is set to 10. + +# A Redis master is able to list the address and port of the attached +# slaves in different ways. For example the "INFO replication" section +# offers this information, which is used, among other tools, by +# Redis Sentinel in order to discover slave instances. +# Another place where this info is available is in the output of the +# "ROLE" command of a masteer. +# +# The listed IP and address normally reported by a slave is obtained +# in the following way: +# +# IP: The address is auto detected by checking the peer address +# of the socket used by the slave to connect with the master. +# +# Port: The port is communicated by the slave during the replication +# handshake, and is normally the port that the slave is using to +# list for connections. +# +# However when port forwarding or Network Address Translation (NAT) is +# used, the slave may be actually reachable via different IP and port +# pairs. The following two options can be used by a slave in order to +# report to its master a specific set of IP and port, so that both INFO +# and ROLE will report those values. +# +# There is no need to use both the options if you need to override just +# the port or the IP address. +# +# slave-announce-ip 5.5.5.5 +# slave-announce-port 1234 + +################################## SECURITY ################################### + +# Require clients to issue AUTH before processing any other +# commands. This might be useful in environments in which you do not trust +# others with access to the host running redis-server. +# +# This should stay commented out for backward compatibility and because most +# people do not need auth (e.g. they run their own servers). +# +# Warning: since Redis is pretty fast an outside user can try up to +# 150k passwords per second against a good box. This means that you should +# use a very strong password otherwise it will be very easy to break. +# +# requirepass foobared + +# Command renaming. +# +# It is possible to change the name of dangerous commands in a shared +# environment. For instance the CONFIG command may be renamed into something +# hard to guess so that it will still be available for internal-use tools +# but not available for general clients. +# +# Example: +# +# rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52 +# +# It is also possible to completely kill a command by renaming it into +# an empty string: +# +# rename-command CONFIG "" +# +# Please note that changing the name of commands that are logged into the +# AOF file or transmitted to slaves may cause problems. + +################################### LIMITS #################################### + +# Set the max number of connected clients at the same time. By default +# this limit is set to 10000 clients, however if the Redis server is not +# able to configure the process file limit to allow for the specified limit +# the max number of allowed clients is set to the current file limit +# minus 32 (as Redis reserves a few file descriptors for internal uses). +# +# Once the limit is reached Redis will close all the new connections sending +# an error 'max number of clients reached'. +# +# maxclients 10000 + +# Don't use more memory than the specified amount of bytes. +# When the memory limit is reached Redis will try to remove keys +# according to the eviction policy selected (see maxmemory-policy). +# +# If Redis can't remove keys according to the policy, or if the policy is +# set to 'noeviction', Redis will start to reply with errors to commands +# that would use more memory, like SET, LPUSH, and so on, and will continue +# to reply to read-only commands like GET. +# +# This option is usually useful when using Redis as an LRU cache, or to set +# a hard memory limit for an instance (using the 'noeviction' policy). +# +# WARNING: If you have slaves attached to an instance with maxmemory on, +# the size of the output buffers needed to feed the slaves are subtracted +# from the used memory count, so that network problems / resyncs will +# not trigger a loop where keys are evicted, and in turn the output +# buffer of slaves is full with DELs of keys evicted triggering the deletion +# of more keys, and so forth until the database is completely emptied. +# +# In short... if you have slaves attached it is suggested that you set a lower +# limit for maxmemory so that there is some free RAM on the system for slave +# output buffers (but this is not needed if the policy is 'noeviction'). +# +# maxmemory + +# MAXMEMORY POLICY: how Redis will select what to remove when maxmemory +# is reached. You can select among five behaviors: +# +# volatile-lru -> remove the key with an expire set using an LRU algorithm +# allkeys-lru -> remove any key according to the LRU algorithm +# volatile-random -> remove a random key with an expire set +# allkeys-random -> remove a random key, any key +# volatile-ttl -> remove the key with the nearest expire time (minor TTL) +# noeviction -> don't expire at all, just return an error on write operations +# +# Note: with any of the above policies, Redis will return an error on write +# operations, when there are no suitable keys for eviction. +# +# At the date of writing these commands are: set setnx setex append +# incr decr rpush lpush rpushx lpushx linsert lset rpoplpush sadd +# sinter sinterstore sunion sunionstore sdiff sdiffstore zadd zincrby +# zunionstore zinterstore hset hsetnx hmset hincrby incrby decrby +# getset mset msetnx exec sort +# +# The default is: +# +# maxmemory-policy noeviction + +# LRU and minimal TTL algorithms are not precise algorithms but approximated +# algorithms (in order to save memory), so you can tune it for speed or +# accuracy. For default Redis will check five keys and pick the one that was +# used less recently, you can change the sample size using the following +# configuration directive. +# +# The default of 5 produces good enough results. 10 Approximates very closely +# true LRU but costs a bit more CPU. 3 is very fast but not very accurate. +# +# maxmemory-samples 5 + +############################## APPEND ONLY MODE ############################### + +# By default Redis asynchronously dumps the dataset on disk. This mode is +# good enough in many applications, but an issue with the Redis process or +# a power outage may result into a few minutes of writes lost (depending on +# the configured save points). +# +# The Append Only File is an alternative persistence mode that provides +# much better durability. For instance using the default data fsync policy +# (see later in the config file) Redis can lose just one second of writes in a +# dramatic event like a server power outage, or a single write if something +# wrong with the Redis process itself happens, but the operating system is +# still running correctly. +# +# AOF and RDB persistence can be enabled at the same time without problems. +# If the AOF is enabled on startup Redis will load the AOF, that is the file +# with the better durability guarantees. +# +# Please check http://redis.io/topics/persistence for more information. + +appendonly no + +# The name of the append only file (default: "appendonly.aof") + +appendfilename "appendonly.aof" + +# The fsync() call tells the Operating System to actually write data on disk +# instead of waiting for more data in the output buffer. Some OS will really flush +# data on disk, some other OS will just try to do it ASAP. +# +# Redis supports three different modes: +# +# no: don't fsync, just let the OS flush the data when it wants. Faster. +# always: fsync after every write to the append only log. Slow, Safest. +# everysec: fsync only one time every second. Compromise. +# +# The default is "everysec", as that's usually the right compromise between +# speed and data safety. It's up to you to understand if you can relax this to +# "no" that will let the operating system flush the output buffer when +# it wants, for better performances (but if you can live with the idea of +# some data loss consider the default persistence mode that's snapshotting), +# or on the contrary, use "always" that's very slow but a bit safer than +# everysec. +# +# More details please check the following article: +# http://antirez.com/post/redis-persistence-demystified.html +# +# If unsure, use "everysec". + +# appendfsync always +appendfsync everysec +# appendfsync no + +# When the AOF fsync policy is set to always or everysec, and a background +# saving process (a background save or AOF log background rewriting) is +# performing a lot of I/O against the disk, in some Linux configurations +# Redis may block too long on the fsync() call. Note that there is no fix for +# this currently, as even performing fsync in a different thread will block +# our synchronous write(2) call. +# +# In order to mitigate this problem it's possible to use the following option +# that will prevent fsync() from being called in the main process while a +# BGSAVE or BGREWRITEAOF is in progress. +# +# This means that while another child is saving, the durability of Redis is +# the same as "appendfsync none". In practical terms, this means that it is +# possible to lose up to 30 seconds of log in the worst scenario (with the +# default Linux settings). +# +# If you have latency problems turn this to "yes". Otherwise leave it as +# "no" that is the safest pick from the point of view of durability. + +no-appendfsync-on-rewrite no + +# Automatic rewrite of the append only file. +# Redis is able to automatically rewrite the log file implicitly calling +# BGREWRITEAOF when the AOF log size grows by the specified percentage. +# +# This is how it works: Redis remembers the size of the AOF file after the +# latest rewrite (if no rewrite has happened since the restart, the size of +# the AOF at startup is used). +# +# This base size is compared to the current size. If the current size is +# bigger than the specified percentage, the rewrite is triggered. Also +# you need to specify a minimal size for the AOF file to be rewritten, this +# is useful to avoid rewriting the AOF file even if the percentage increase +# is reached but it is still pretty small. +# +# Specify a percentage of zero in order to disable the automatic AOF +# rewrite feature. + +auto-aof-rewrite-percentage 100 +auto-aof-rewrite-min-size 64mb + +# An AOF file may be found to be truncated at the end during the Redis +# startup process, when the AOF data gets loaded back into memory. +# This may happen when the system where Redis is running +# crashes, especially when an ext4 filesystem is mounted without the +# data=ordered option (however this can't happen when Redis itself +# crashes or aborts but the operating system still works correctly). +# +# Redis can either exit with an error when this happens, or load as much +# data as possible (the default now) and start if the AOF file is found +# to be truncated at the end. The following option controls this behavior. +# +# If aof-load-truncated is set to yes, a truncated AOF file is loaded and +# the Redis server starts emitting a log to inform the user of the event. +# Otherwise if the option is set to no, the server aborts with an error +# and refuses to start. When the option is set to no, the user requires +# to fix the AOF file using the "redis-check-aof" utility before to restart +# the server. +# +# Note that if the AOF file will be found to be corrupted in the middle +# the server will still exit with an error. This option only applies when +# Redis will try to read more data from the AOF file but not enough bytes +# will be found. +aof-load-truncated yes + +################################ LUA SCRIPTING ############################### + +# Max execution time of a Lua script in milliseconds. +# +# If the maximum execution time is reached Redis will log that a script is +# still in execution after the maximum allowed time and will start to +# reply to queries with an error. +# +# When a long running script exceeds the maximum execution time only the +# SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be +# used to stop a script that did not yet called write commands. The second +# is the only way to shut down the server in the case a write command was +# already issued by the script but the user doesn't want to wait for the natural +# termination of the script. +# +# Set it to 0 or a negative value for unlimited execution without warnings. +lua-time-limit 5000 + +################################ REDIS CLUSTER ############################### +# +# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +# WARNING EXPERIMENTAL: Redis Cluster is considered to be stable code, however +# in order to mark it as "mature" we need to wait for a non trivial percentage +# of users to deploy it in production. +# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +# +# Normal Redis instances can't be part of a Redis Cluster; only nodes that are +# started as cluster nodes can. In order to start a Redis instance as a +# cluster node enable the cluster support uncommenting the following: +# +# cluster-enabled yes + +# Every cluster node has a cluster configuration file. This file is not +# intended to be edited by hand. It is created and updated by Redis nodes. +# Every Redis Cluster node requires a different cluster configuration file. +# Make sure that instances running in the same system do not have +# overlapping cluster configuration file names. +# +# cluster-config-file nodes-6379.conf + +# Cluster node timeout is the amount of milliseconds a node must be unreachable +# for it to be considered in failure state. +# Most other internal time limits are multiple of the node timeout. +# +# cluster-node-timeout 15000 + +# A slave of a failing master will avoid to start a failover if its data +# looks too old. +# +# There is no simple way for a slave to actually have a exact measure of +# its "data age", so the following two checks are performed: +# +# 1) If there are multiple slaves able to failover, they exchange messages +# in order to try to give an advantage to the slave with the best +# replication offset (more data from the master processed). +# Slaves will try to get their rank by offset, and apply to the start +# of the failover a delay proportional to their rank. +# +# 2) Every single slave computes the time of the last interaction with +# its master. This can be the last ping or command received (if the master +# is still in the "connected" state), or the time that elapsed since the +# disconnection with the master (if the replication link is currently down). +# If the last interaction is too old, the slave will not try to failover +# at all. +# +# The point "2" can be tuned by user. Specifically a slave will not perform +# the failover if, since the last interaction with the master, the time +# elapsed is greater than: +# +# (node-timeout * slave-validity-factor) + repl-ping-slave-period +# +# So for example if node-timeout is 30 seconds, and the slave-validity-factor +# is 10, and assuming a default repl-ping-slave-period of 10 seconds, the +# slave will not try to failover if it was not able to talk with the master +# for longer than 310 seconds. +# +# A large slave-validity-factor may allow slaves with too old data to failover +# a master, while a too small value may prevent the cluster from being able to +# elect a slave at all. +# +# For maximum availability, it is possible to set the slave-validity-factor +# to a value of 0, which means, that slaves will always try to failover the +# master regardless of the last time they interacted with the master. +# (However they'll always try to apply a delay proportional to their +# offset rank). +# +# Zero is the only value able to guarantee that when all the partitions heal +# the cluster will always be able to continue. +# +# cluster-slave-validity-factor 10 + +# Cluster slaves are able to migrate to orphaned masters, that are masters +# that are left without working slaves. This improves the cluster ability +# to resist to failures as otherwise an orphaned master can't be failed over +# in case of failure if it has no working slaves. +# +# Slaves migrate to orphaned masters only if there are still at least a +# given number of other working slaves for their old master. This number +# is the "migration barrier". A migration barrier of 1 means that a slave +# will migrate only if there is at least 1 other working slave for its master +# and so forth. It usually reflects the number of slaves you want for every +# master in your cluster. +# +# Default is 1 (slaves migrate only if their masters remain with at least +# one slave). To disable migration just set it to a very large value. +# A value of 0 can be set but is useful only for debugging and dangerous +# in production. +# +# cluster-migration-barrier 1 + +# By default Redis Cluster nodes stop accepting queries if they detect there +# is at least an hash slot uncovered (no available node is serving it). +# This way if the cluster is partially down (for example a range of hash slots +# are no longer covered) all the cluster becomes, eventually, unavailable. +# It automatically returns available as soon as all the slots are covered again. +# +# However sometimes you want the subset of the cluster which is working, +# to continue to accept queries for the part of the key space that is still +# covered. In order to do so, just set the cluster-require-full-coverage +# option to no. +# +# cluster-require-full-coverage yes + +# In order to setup your cluster make sure to read the documentation +# available at http://redis.io web site. + +################################## SLOW LOG ################################### + +# The Redis Slow Log is a system to log queries that exceeded a specified +# execution time. The execution time does not include the I/O operations +# like talking with the client, sending the reply and so forth, +# but just the time needed to actually execute the command (this is the only +# stage of command execution where the thread is blocked and can not serve +# other requests in the meantime). +# +# You can configure the slow log with two parameters: one tells Redis +# what is the execution time, in microseconds, to exceed in order for the +# command to get logged, and the other parameter is the length of the +# slow log. When a new command is logged the oldest one is removed from the +# queue of logged commands. + +# The following time is expressed in microseconds, so 1000000 is equivalent +# to one second. Note that a negative number disables the slow log, while +# a value of zero forces the logging of every command. +slowlog-log-slower-than 10000 + +# There is no limit to this length. Just be aware that it will consume memory. +# You can reclaim memory used by the slow log with SLOWLOG RESET. +slowlog-max-len 128 + +################################ LATENCY MONITOR ############################## + +# The Redis latency monitoring subsystem samples different operations +# at runtime in order to collect data related to possible sources of +# latency of a Redis instance. +# +# Via the LATENCY command this information is available to the user that can +# print graphs and obtain reports. +# +# The system only logs operations that were performed in a time equal or +# greater than the amount of milliseconds specified via the +# latency-monitor-threshold configuration directive. When its value is set +# to zero, the latency monitor is turned off. +# +# By default latency monitoring is disabled since it is mostly not needed +# if you don't have latency issues, and collecting data has a performance +# impact, that while very small, can be measured under big load. Latency +# monitoring can easily be enabled at runtime using the command +# "CONFIG SET latency-monitor-threshold " if needed. +latency-monitor-threshold 0 + +############################# EVENT NOTIFICATION ############################## + +# Redis can notify Pub/Sub clients about events happening in the key space. +# This feature is documented at http://redis.io/topics/notifications +# +# For instance if keyspace events notification is enabled, and a client +# performs a DEL operation on key "foo" stored in the Database 0, two +# messages will be published via Pub/Sub: +# +# PUBLISH __keyspace@0__:foo del +# PUBLISH __keyevent@0__:del foo +# +# It is possible to select the events that Redis will notify among a set +# of classes. Every class is identified by a single character: +# +# K Keyspace events, published with __keyspace@__ prefix. +# E Keyevent events, published with __keyevent@__ prefix. +# g Generic commands (non-type specific) like DEL, EXPIRE, RENAME, ... +# $ String commands +# l List commands +# s Set commands +# h Hash commands +# z Sorted set commands +# x Expired events (events generated every time a key expires) +# e Evicted events (events generated when a key is evicted for maxmemory) +# A Alias for g$lshzxe, so that the "AKE" string means all the events. +# +# The "notify-keyspace-events" takes as argument a string that is composed +# of zero or multiple characters. The empty string means that notifications +# are disabled. +# +# Example: to enable list and generic events, from the point of view of the +# event name, use: +# +# notify-keyspace-events Elg +# +# Example 2: to get the stream of the expired keys subscribing to channel +# name __keyevent@0__:expired use: +# +# notify-keyspace-events Ex +# +# By default all notifications are disabled because most users don't need +# this feature and the feature has some overhead. Note that if you don't +# specify at least one of K or E, no events will be delivered. +notify-keyspace-events "" + +############################### ADVANCED CONFIG ############################### + +# Hashes are encoded using a memory efficient data structure when they have a +# small number of entries, and the biggest entry does not exceed a given +# threshold. These thresholds can be configured using the following directives. +hash-max-ziplist-entries 512 +hash-max-ziplist-value 64 + +# Lists are also encoded in a special way to save a lot of space. +# The number of entries allowed per internal list node can be specified +# as a fixed maximum size or a maximum number of elements. +# For a fixed maximum size, use -5 through -1, meaning: +# -5: max size: 64 Kb <-- not recommended for normal workloads +# -4: max size: 32 Kb <-- not recommended +# -3: max size: 16 Kb <-- probably not recommended +# -2: max size: 8 Kb <-- good +# -1: max size: 4 Kb <-- good +# Positive numbers mean store up to _exactly_ that number of elements +# per list node. +# The highest performing option is usually -2 (8 Kb size) or -1 (4 Kb size), +# but if your use case is unique, adjust the settings as necessary. +list-max-ziplist-size -2 + +# Lists may also be compressed. +# Compress depth is the number of quicklist ziplist nodes from *each* side of +# the list to *exclude* from compression. The head and tail of the list +# are always uncompressed for fast push/pop operations. Settings are: +# 0: disable all list compression +# 1: depth 1 means "don't start compressing until after 1 node into the list, +# going from either the head or tail" +# So: [head]->node->node->...->node->[tail] +# [head], [tail] will always be uncompressed; inner nodes will compress. +# 2: [head]->[next]->node->node->...->node->[prev]->[tail] +# 2 here means: don't compress head or head->next or tail->prev or tail, +# but compress all nodes between them. +# 3: [head]->[next]->[next]->node->node->...->node->[prev]->[prev]->[tail] +# etc. +list-compress-depth 0 + +# Sets have a special encoding in just one case: when a set is composed +# of just strings that happen to be integers in radix 10 in the range +# of 64 bit signed integers. +# The following configuration setting sets the limit in the size of the +# set in order to use this special memory saving encoding. +set-max-intset-entries 512 + +# Similarly to hashes and lists, sorted sets are also specially encoded in +# order to save a lot of space. This encoding is only used when the length and +# elements of a sorted set are below the following limits: +zset-max-ziplist-entries 128 +zset-max-ziplist-value 64 + +# HyperLogLog sparse representation bytes limit. The limit includes the +# 16 bytes header. When an HyperLogLog using the sparse representation crosses +# this limit, it is converted into the dense representation. +# +# A value greater than 16000 is totally useless, since at that point the +# dense representation is more memory efficient. +# +# The suggested value is ~ 3000 in order to have the benefits of +# the space efficient encoding without slowing down too much PFADD, +# which is O(N) with the sparse encoding. The value can be raised to +# ~ 10000 when CPU is not a concern, but space is, and the data set is +# composed of many HyperLogLogs with cardinality in the 0 - 15000 range. +hll-sparse-max-bytes 3000 + +# Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in +# order to help rehashing the main Redis hash table (the one mapping top-level +# keys to values). The hash table implementation Redis uses (see dict.c) +# performs a lazy rehashing: the more operation you run into a hash table +# that is rehashing, the more rehashing "steps" are performed, so if the +# server is idle the rehashing is never complete and some more memory is used +# by the hash table. +# +# The default is to use this millisecond 10 times every second in order to +# actively rehash the main dictionaries, freeing memory when possible. +# +# If unsure: +# use "activerehashing no" if you have hard latency requirements and it is +# not a good thing in your environment that Redis can reply from time to time +# to queries with 2 milliseconds delay. +# +# use "activerehashing yes" if you don't have such hard requirements but +# want to free memory asap when possible. +activerehashing yes + +# The client output buffer limits can be used to force disconnection of clients +# that are not reading data from the server fast enough for some reason (a +# common reason is that a Pub/Sub client can't consume messages as fast as the +# publisher can produce them). +# +# The limit can be set differently for the three different classes of clients: +# +# normal -> normal clients including MONITOR clients +# slave -> slave clients +# pubsub -> clients subscribed to at least one pubsub channel or pattern +# +# The syntax of every client-output-buffer-limit directive is the following: +# +# client-output-buffer-limit +# +# A client is immediately disconnected once the hard limit is reached, or if +# the soft limit is reached and remains reached for the specified number of +# seconds (continuously). +# So for instance if the hard limit is 32 megabytes and the soft limit is +# 16 megabytes / 10 seconds, the client will get disconnected immediately +# if the size of the output buffers reach 32 megabytes, but will also get +# disconnected if the client reaches 16 megabytes and continuously overcomes +# the limit for 10 seconds. +# +# By default normal clients are not limited because they don't receive data +# without asking (in a push way), but just after a request, so only +# asynchronous clients may create a scenario where data is requested faster +# than it can read. +# +# Instead there is a default limit for pubsub and slave clients, since +# subscribers and slaves receive data in a push fashion. +# +# Both the hard or the soft limit can be disabled by setting them to zero. +client-output-buffer-limit normal 0 0 0 +client-output-buffer-limit slave 256mb 64mb 60 +client-output-buffer-limit pubsub 32mb 8mb 60 + +# Redis calls an internal function to perform many background tasks, like +# closing connections of clients in timeout, purging expired keys that are +# never requested, and so forth. +# +# Not all tasks are performed with the same frequency, but Redis checks for +# tasks to perform according to the specified "hz" value. +# +# By default "hz" is set to 10. Raising the value will use more CPU when +# Redis is idle, but at the same time will make Redis more responsive when +# there are many keys expiring at the same time, and timeouts may be +# handled with more precision. +# +# The range is between 1 and 500, however a value over 100 is usually not +# a good idea. Most users should use the default of 10 and raise this up to +# 100 only in environments where very low latency is required. +hz 10 + +# When a child rewrites the AOF file, if the following option is enabled +# the file will be fsync-ed every 32 MB of data generated. This is useful +# in order to commit the file to the disk more incrementally and avoid +# big latency spikes. +aof-rewrite-incremental-fsync yes diff --git a/graphspace/asgi.py b/graphspace/asgi.py new file mode 100644 index 00000000..efc66530 --- /dev/null +++ b/graphspace/asgi.py @@ -0,0 +1,11 @@ +import os +import django +from channels.asgi import get_channel_layer + +os.environ.setdefault("DJANGO_SETTINGS_MODULE", "graphspace.settings.production") +django.setup() +channel_layer = get_channel_layer() + +from graphspace.database import * +from django.conf import settings +settings.db = Database() \ No newline at end of file diff --git a/graphspace/authorization.py b/graphspace/authorization.py index 062ec83f..5237d0ba 100644 --- a/graphspace/authorization.py +++ b/graphspace/authorization.py @@ -1,6 +1,8 @@ import applications.users as users import applications.graphs as graphs -from graphspace.exceptions import UserNotAuthorized +import applications.comments as comments +import applications.discussions as discussions +from graphspace.exceptions import UserNotAuthorized, BadRequest, ErrorCodes from graphspace.utils import get_request_user @@ -32,7 +34,7 @@ def user_role(request): return UserRole.LOGGED_IN -def validate(request, permission, graph_id=None, group_id=None, layout_id=None): +def validate(request, permission, graph_id=None, group_id=None, layout_id=None, comment_id=None, discussion_id=None): """ Validates if the user has the given permissions based on information like graph id, group id or layout id. @@ -74,4 +76,12 @@ def validate(request, permission, graph_id=None, group_id=None, layout_id=None): raise UserNotAuthorized(request) if permission == 'LAYOUT_DELETE' and not graphs.controllers.is_user_authorized_to_delete_layout(request, username=get_request_user(request), layout_id = layout_id): raise UserNotAuthorized(request) + if comment_id is not None: + if permission == 'COMMENT_UPDATE' and not comments.controllers.is_user_authorized_to_update_comment(request, username=get_request_user(request), comment_id = comment_id): + raise BadRequest(request, error_code=ErrorCodes.Validation.UserNotAuthorizedToUpdateComment) + if permission == 'COMMENT_DELETE' and not comments.controllers.is_user_authorized_to_delete_comment(request, username=get_request_user(request), comment_id = comment_id): + raise BadRequest(request, error_code=ErrorCodes.Validation.UserNotAuthorizedToDeleteComment) + if discussion_id is not None: + if permission == 'DISCUSSION_DELETE' and not discussions.controllers.is_user_authorized_to_delete_discussion(request, username=get_request_user(request), discussion_id = discussion_id): + raise BadRequest(request, error_code=ErrorCodes.Validation.UserNotAuthorizedToDeleteDiscussion) return \ No newline at end of file diff --git a/graphspace/consumers.py b/graphspace/consumers.py new file mode 100644 index 00000000..a96005c6 --- /dev/null +++ b/graphspace/consumers.py @@ -0,0 +1,56 @@ +from channels import Group +from channels.auth import channel_session_user_from_http, channel_session_user +from graphspace.utils import websocket_group_name +import applications.users.dal as db +from graphspace.database import * +import re + +@channel_session_user_from_http +def websocket_connect(message): + """ + Establish connection to a socket + + :param message: ASGI WebSocket packet-received and send-packet message + """ + if message.http_session['uid'] is not None: + group_name = websocket_group_name(message.http_session['uid']) + message.channel_session['group_name'] = group_name + message.reply_channel.send({ + 'accept': True + }) + Group(group_name).add(message.reply_channel) + else: + message.reply_channel.send({ + 'accept': True + }) + Group(websocket_group_name('anonymous@anonymous.com')).add(message.reply_channel) + +@channel_session_user +def websocket_keepalive(message): + """ + Reconnect/keep-alive user socket connection + + :param message: ASGI WebSocket packet-received and send-packet message + """ + Group(message.channel_session['group_name']).add( + message.reply_channel) + Group(websocket_group_name('anonymous@anonymous.com')).add( + message.reply_channel) + + +# Connected to websocket.disconnect +@channel_session_user +def websocket_disconnect(message): + """ + Disconnect user socket + + :param message: ASGI WebSocket packet-received and send-packet message + """ + message.reply_channel.send({ + 'close': True + }) + if message.channel_session.get('group_name', None) is not None: + Group(message.channel_session['group_name']).discard( + message.reply_channel) + Group(websocket_group_name('anonymous@anonymous.com')).discard( + message.reply_channel) diff --git a/graphspace/exceptions/error_codes.py b/graphspace/exceptions/error_codes.py index 05fc62f7..697d028a 100644 --- a/graphspace/exceptions/error_codes.py +++ b/graphspace/exceptions/error_codes.py @@ -29,10 +29,20 @@ class Validation(object): CannotCreateGroupForOtherUser = ( 1011, "Cannot create group with owner email = `{0}`.") - # Layouts API - NotAllowedLayoutAccess = ( - 1012, "User is not authorized to access layouts which are not shared. Set `owner_email` to {0} or `is_shared` to 1.") - CannotCreateLayoutForOtherUser = ( - 1013, "Cannot create layout with owner email = `{0}`.") - LayoutNameAlreadyExists = ( - 1014, "Layout with name `{0}` already exists.") + + # Layouts API + NotAllowedLayoutAccess = (1012, "User is not authorized to access layouts which are not shared. Set `owner_email` to {0} or `is_shared` to 1.") + CannotCreateLayoutForOtherUser = (1013, "Cannot create layout with owner email = `{0}`.") + LayoutNameAlreadyExists = (1014, "Layout with name `{0}` already exists.") + + # Comments API + UserNotAuthorizedToCreateComment = (1015, "Please create an account or login to create a comment on this graph.") + UserNotAuthorizedToUpdateComment = (1016, "You do not have permission to update this comment.") + UserNotAuthorizedToDeleteComment = (1017, "You do not have permission to delete this comment.") + ParentCommentDoesNotExist = (1018, "You cannot reply to comments which do not exist") + CannotReplyToResolvedComment = (1019, "You cannot reply to resolved comment") + UserCannotPinThisComment = (1020, "You cannot pin this comment") + UserCannotUnpinThisComment = (1021, "You cannot unpin this comment") + + # Discussions API + UserNotAuthorizedToDeleteDiscussion = (1022, "You do not have permission to delete this discussion.") \ No newline at end of file diff --git a/graphspace/middleware.py b/graphspace/middleware.py index 317a9d96..1e0845a7 100644 --- a/graphspace/middleware.py +++ b/graphspace/middleware.py @@ -4,11 +4,17 @@ import json from graphspace.exceptions import * from graphspace.exceptions import ErrorCodes, GraphSpaceError - +from graphspace.database import * +from channels.handler import AsgiRequest class SQLAlchemySessionMiddleware(object): def process_request(self, request): - request.db_session = settings.db.session() + # Re-establish DB sessions for ASGI requests which are used for websockets + if type(request) == AsgiRequest: + # Database() is a func in graphspace.database + request.db_session = Database().session() + else: + request.db_session = settings.db.session() def process_response(self, request, response): try: diff --git a/graphspace/mixins.py b/graphspace/mixins.py index 7fa9c12e..0723530d 100644 --- a/graphspace/mixins.py +++ b/graphspace/mixins.py @@ -17,4 +17,4 @@ class IDMixin(object): class TimeStampMixin(object): created_at = Column(TIMESTAMP, server_default=func.now(), nullable=False) - updated_at = Column(TIMESTAMP, server_default=func.now(), nullable=False) + updated_at = Column(TIMESTAMP, server_default=func.now(), onupdate=func.now(), nullable=False) diff --git a/graphspace/routing.py b/graphspace/routing.py new file mode 100644 index 00000000..c63102a4 --- /dev/null +++ b/graphspace/routing.py @@ -0,0 +1,6 @@ + +channel_routing = { + "websocket.connect": "graphspace.consumers.websocket_connect", + "websocket.keepalive": "graphspace.consumers.websocket_keepalive", + "websocket.disconnect": "graphspace.consumers.websocket_disconnect" +} \ No newline at end of file diff --git a/graphspace/settings/base.py b/graphspace/settings/base.py index a247f43b..613ec411 100644 --- a/graphspace/settings/base.py +++ b/graphspace/settings/base.py @@ -25,15 +25,18 @@ # Application definition INSTALLED_APPS = ( - 'analytical', - 'django.contrib.admin', - 'django.contrib.auth', - 'django.contrib.contenttypes', - 'django.contrib.sessions', - 'django.contrib.messages', - 'django.contrib.staticfiles', - 'applications.users', - 'applications.graphs' + 'analytical', + 'django.contrib.admin', + 'django.contrib.auth', + 'django.contrib.contenttypes', + 'django.contrib.sessions', + 'django.contrib.messages', + 'django.contrib.staticfiles', + 'applications.users', + 'applications.graphs', + 'applications.comments', + 'applications.discussions', + 'channels' ) MIDDLEWARE_CLASSES = ( @@ -159,4 +162,18 @@ }, } + + + +# Channel settings and routing +CHANNEL_LAYERS = { + 'default': { + 'BACKEND': 'asgi_redis.RedisChannelLayer', + 'CONFIG': { + 'hosts': [('127.0.0.1', 6379)], + }, + 'ROUTING': 'graphspace.routing.channel_routing', + } +} MAINTENANCE = False + diff --git a/graphspace/settings/docker.py b/graphspace/settings/docker.py new file mode 100644 index 00000000..3d5fd731 --- /dev/null +++ b/graphspace/settings/docker.py @@ -0,0 +1,48 @@ +from graphspace.settings.base import * + +# variables for setting up account through which GraphSpace emails +EMAIL_HOST = 'NONE' +EMAIL_HOST_USER = 'NONE' +EMAIL_HOST_PASSWORD = 'NONE' + +# If error is thrown, display the error in the browser (ONLY FOR LOCAL MACHINES) +DEBUG = True +TEMPLATE_DEBUG = True + +# URL through which to access graphspace +URL_PATH = "http://localhost:8000/" + +# If tracking is enabled for GraphSpace in Google Analytics +GOOGLE_ANALYTICS_PROPERTY_ID = 'UA-00000000-0' + +# Keys given by creating a requestor account on Amazon Mechanical Turk (https://www.mturk.com/mturk/welcome) +AWSACCESSKEYID = 'None' +SECRETKEY = 'None' + +# Path to GraphSPace +PATH = "/home/subramanyam/nrnb/GraphSpace" + +# SHOULD NEVER CHANGE THIS VALUE +SECRET_KEY = 'this-is-a-secret-key-for-local-settings-only' + +# If needing to test on production mturk account (real money) +# AWS_URL = 'https://mechanicalturk.amazonaws.com' + +# Sandbox (development) MTURK (fake money used) +AWS_URL = 'https://mechanicalturk.sandbox.amazonaws.com' + +# To configure the application to use the Console Backend for sending e-mail. It writes e-mails to standard out instead of sending them. +# http://stackoverflow.com/questions/4642011/test-sending-email-without-email-server +EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend' + + +DATABASES = { + 'default': { + 'ENGINE': 'django.db.backends.postgresql_psycopg2', + 'NAME': 'test', + 'USER': 'postgres', + 'PASSWORD': '987654321', + 'HOST': 'localhost', + 'PORT': '5432' + } +} diff --git a/graphspace/signals.py b/graphspace/signals.py new file mode 100644 index 00000000..7803b946 --- /dev/null +++ b/graphspace/signals.py @@ -0,0 +1,37 @@ +from channels import Group + +from json import dumps +import re +import graphspace.utils as utils + + +def send_message(group_name, type, message, event): + group_name = utils.websocket_group_name(group_name) + Group(group_name).send({'text': dumps({"type": type, "message": message, "event": event})}) + + +def send_comment(comment, type, users=None, event=None): + comment = utils.serializer(comment) + if type == 'private': + email_list = [] + if users: + for user in users: + user = utils.serializer(user) + email_list.append(user['email']) + email_list.append(comment['owner_email']) + email_list = list(set(email_list)) + for email in email_list: + send_message(group_name=email, type="comment", message=comment, event=event) + + +def send_discussion(discussion, type, users=None, event=None): + discussion = utils.serializer(discussion) + if type == 'private': + email_list = [] + if users: + for user in users: + user = utils.serializer(user) + email_list.append(user['email']) + email_list = list(set(email_list)) + for email in email_list: + send_message(group_name=email, type="discussion", message=discussion, event=event) diff --git a/graphspace/utils.py b/graphspace/utils.py index d5efeff5..2c733cb1 100644 --- a/graphspace/utils.py +++ b/graphspace/utils.py @@ -1,7 +1,7 @@ import json import string import base64 - +import re from django.utils.crypto import random @@ -73,3 +73,9 @@ def cytoscapePresetLayout(csWebJson): return json.dumps(csJson) +def websocket_group_name(name): + """ + Django-channels only accepts alphanumerics, hyphen and period; + Remove all other symbols + """ + return re.sub('[^a-zA-Z0-9\n\.]', '-', name) \ No newline at end of file diff --git a/requirements.txt b/requirements.txt index 1f6c5be8..3fc89df6 100644 --- a/requirements.txt +++ b/requirements.txt @@ -18,7 +18,7 @@ py-bcrypt==0.4 Pygments==2.1.3 pytz==2016.4 requests==2.10.0 -six==1.10.0 +six==1.11.0 snowballstemmer==1.2.1 SQLAlchemy==1.3.3 graphspace_python==0.8.2 @@ -28,3 +28,8 @@ elasticsearch-dsl>=5.0.0,<6.0.0 sphinx-rtd-theme sphinx recommonmark +channels==1.1.6 +asgiref==1.1.2 +asgi-redis==1.4.3 +redis==2.10.6 + diff --git a/static/css/graphspace.css b/static/css/graphspace.css index 1070d46f..4eb546d0 100644 --- a/static/css/graphspace.css +++ b/static/css/graphspace.css @@ -470,3 +470,198 @@ p.lead { margin-left: 0; } } + + +#ViewCommentSideBar { + height: 100%; + width: 100%; +} + +#AddCommentSideBar { + height: 100%; + width: 100%; +} + +.comment-box { + margin: auto; + width: 100%; + padding-left: 2px; + padding-top: 13px; + padding-bottom: 13px; +} +.comment-box:hover { + background-color: #fdfd96; + padding:7px; + +} + +.comment-highlight { + background-color: #F7F7F7; + padding:7px; +} + +.comment-image { + width: 100%; +} + +.comment-email { + padding: 5px; +} + +.comment-text { + width: 169px; + overflow-wrap: break-word; +} + +.passive { + display: none; +} + +.comment-date { + color: grey; + font-size: 13px; +} + +.comment-options { + background-color: #F7F7F7; + padding: 3px 10px; +} + +.comment-symbol { + font-size: 17px; + color: grey; +} + +.comment-options:hover { + border-style: solid; + border-width: 1px; + border-color: silver; +} + +.collapse-comments { + color: #4588FD; + text-decoration: underline; + padding-bottom: 7px; +} + +.res-comment-desc { + padding-top: 2px; + color: grey; + font-size: 14px; + font-style: italic; +} + +#commentsPagination li { + padding: 0px; + display: inline-block; + font-size: 9px; +} +.thumbnail { + padding:0px; +} +.panel { + position:relative; +} +.panel>.panel-heading:after,.panel>.panel-heading:before{ + position:absolute; + top:11px;left:-16px; + right:100%; + width:0; + height:0; + display:block; + content:" "; + border-color:transparent; + border-style:solid solid outset; + pointer-events:none; +} +.panel>.panel-heading:after{ + border-width:7px; + border-right-color:#f7f7f7; + margin-top:1px; + margin-left:2px; +} +.panel>.panel-heading:before{ + border-right-color:#ddd; + border-width:8px; +} +.comment-timeline-item { + position: relative; + display: initial; + padding: 16px 0; + margin-left: 16px; +} +.comment-timeline-item:before { + position: absolute; + top: 0; + bottom: 0; + left: 20px; + display: block; + width: 2px; + content: ""; + background-color: #e1e4e8; +} +.pb-comment-container { + font-family: Lato; + margin-top: 40px; + padding: inherit; +} + +.pb-comment-textarea { + resize: none; + padding: 20px; + height: 130px; + width: 100%; + border: 1px solid #F2F2F2; + } +.comment-text { + white-space: pre-line; + display: contents; + font-family: sans-serif; + font-size: 14px; +} +.pre { + display: contents; + overflow-wrap: break-word; + white-space: pre-wrap; +} +.sticky { + position: fixed; + top: 50px; + width: 100% +} +.discussion-header { + padding: 10px 16px; + background: #f5f5f5; + color: #333; +} +#DiscussionText { + font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Helvetica, Arial, sans-serif, "Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol"; +} +.show-read-more .more-text{ + display: none; +} +.emoji-reacted { + padding: 5px 5px; + font-size: 18px; + background-color: #cae5ff; + border: 1px solid #e1e4e8; + +} +.emoji-reacted:hover { + cursor: pointer; +} +.emoji-unreacted { + padding: 5px 5px; + font-size: 18px; + border: 1px solid #e1e4e8; +} +.emoji-unreacted:hover { + cursor: pointer; +} +.display-emoji { + padding: 5px 4px; + font-size: 18px; +} +.display-emoji:hover { + cursor: pointer; +} \ No newline at end of file diff --git a/static/images/Architecture.png b/static/images/Architecture.png new file mode 100644 index 00000000..800037d0 Binary files /dev/null and b/static/images/Architecture.png differ diff --git a/static/images/add_new_member.gif b/static/images/add_new_member.gif new file mode 100644 index 00000000..65134340 Binary files /dev/null and b/static/images/add_new_member.gif differ diff --git a/static/images/create-group-owner.gif b/static/images/create-group-owner.gif new file mode 100644 index 00000000..2e2ce483 Binary files /dev/null and b/static/images/create-group-owner.gif differ diff --git a/static/images/create-layout-owner.gif b/static/images/create-layout-owner.gif new file mode 100644 index 00000000..3305eaac Binary files /dev/null and b/static/images/create-layout-owner.gif differ diff --git a/static/images/delete-layout-owner-notification.gif b/static/images/delete-layout-owner-notification.gif new file mode 100644 index 00000000..afb371aa Binary files /dev/null and b/static/images/delete-layout-owner-notification.gif differ diff --git a/static/images/group-notification-tab.gif b/static/images/group-notification-tab.gif new file mode 100644 index 00000000..12f9249c Binary files /dev/null and b/static/images/group-notification-tab.gif differ diff --git a/static/images/img_avatar.png b/static/images/img_avatar.png new file mode 100644 index 00000000..2b831855 Binary files /dev/null and b/static/images/img_avatar.png differ diff --git a/static/images/individual-notification.gif b/static/images/individual-notification.gif new file mode 100644 index 00000000..d170d17f Binary files /dev/null and b/static/images/individual-notification.gif differ diff --git a/static/images/individual-owner-notification.gif b/static/images/individual-owner-notification.gif new file mode 100644 index 00000000..8567cb82 Binary files /dev/null and b/static/images/individual-owner-notification.gif differ diff --git a/static/images/mark-all-as-read-notification.gif b/static/images/mark-all-as-read-notification.gif new file mode 100644 index 00000000..571f12f0 Binary files /dev/null and b/static/images/mark-all-as-read-notification.gif differ diff --git a/static/images/share-layout.gif b/static/images/share-layout.gif new file mode 100644 index 00000000..ebf88db6 Binary files /dev/null and b/static/images/share-layout.gif differ diff --git a/static/images/share-unshare-graph.gif b/static/images/share-unshare-graph.gif new file mode 100644 index 00000000..4cd337e8 Binary files /dev/null and b/static/images/share-unshare-graph.gif differ diff --git a/static/images/update-group-owner.gif b/static/images/update-group-owner.gif new file mode 100644 index 00000000..c454780e Binary files /dev/null and b/static/images/update-group-owner.gif differ diff --git a/static/images/upload-graph-owner.gif b/static/images/upload-graph-owner.gif new file mode 100644 index 00000000..b3f66c31 Binary files /dev/null and b/static/images/upload-graph-owner.gif differ diff --git a/static/js/graphs_page.js b/static/js/graphs_page.js index 65142ff7..064b2143 100644 --- a/static/js/graphs_page.js +++ b/static/js/graphs_page.js @@ -98,6 +98,33 @@ var apis = { } }, + comments: { + ENDPOINT: _.template('/ajax/graphs/<%= graph_id %>/comments/'), + add: function (graph_id, data, successCallback, errorCallback) { + apis.jsonRequest('POST', apis.comments.ENDPOINT({'graph_id': graph_id}), data, successCallback, errorCallback) + }, + get: function (graph_id, successCallback, errorCallback) { + apis.jsonRequest('GET', apis.comments.ENDPOINT({'graph_id': graph_id}), undefined, successCallback, errorCallback) + }, + update: function (graph_id, data, successCallback, errorCallback) { + apis.jsonRequest('PUT', apis.comments.ENDPOINT({'graph_id': graph_id}), data, successCallback, errorCallback) + }, + delete: function (graph_id, data, successCallback, errorCallback) { + apis.jsonRequest('DELETE', apis.comments.ENDPOINT({'graph_id': graph_id}), data, successCallback, errorCallback) + }, + getCommentToGraph: function (graph_id, data, successCallback, errorCallback) { + apis.jsonRequest('GET_COMMENT_TO_GRAPH', apis.comments.ENDPOINT({'graph_id': graph_id}), data, successCallback, errorCallback) + }, + addCommentReaction: function (graph_id, data, successCallback, errorCallback) { + apis.jsonRequest('POST_REACTION', apis.comments.ENDPOINT({'graph_id': graph_id}), data, successCallback, errorCallback) + }, + deleteCommentReaction: function (graph_id, data, successCallback, errorCallback) { + apis.jsonRequest('DELETE_REACTION', apis.comments.ENDPOINT({'graph_id': graph_id}), data, successCallback, errorCallback) + }, + getCommentReaction: function (graph_id, data, successCallback, errorCallback) { + apis.jsonRequest('GET_REACTION', apis.comments.ENDPOINT({'graph_id': graph_id}), data, successCallback, errorCallback) + }, + }, jsonRequest: function ( method, url, data, successCallback, errorCallback ) { $.ajax( { headers: { @@ -462,11 +489,14 @@ var uploadGraphPage = { var graphPage = { cyGraph: undefined, timeout: null, + presentComments: null, init: function () { /** * This function is called to setup the graph page. * It will initialize all the event listeners. */ + reactions_code = [128077, 128078, 128516, 128543, 127881, 128640, 128147, 128064]; + graphPage.cyGraph = graphPage.contructCytoscapeGraph(); graphPage.legend.init( style_json ); @@ -502,6 +532,30 @@ var graphPage = { $( '#saveLayoutModal' ).modal( 'show' ); } ); + $('#commentAddBtn').click(function () { + $('#defaultSideBar').removeClass('active'); + $('#AddCommentSideBar').addClass('active'); + graphPage.expandTextarea($('#CommentText')); + }); + + $('#CreateCommentBtn').click(function () { + graphPage.createComment($('#CommentText').val(), null); + }); + + $('#cancelCommentBtn').click(function () { + $('#CommentText').val(""); + $('#AddCommentSideBar').removeClass('active'); + $('#defaultSideBar').addClass('active'); + }); + + $('#viewCommentsBtn').click(function () { + $('#defaultSideBar').removeClass('active'); + $('#ViewCommentSideBar').addClass('active'); + graphPage.getComments(); + $('#allComments').click(); + }); + + $( '#exitLayoutBtn' ).click( function () { graphPage.cyGraph.contextMenus( 'get' ).destroy(); // Destroys the cytocscape context menu extension instance. @@ -527,6 +581,9 @@ var graphPage = { if ( window.location.hash == '#graph_details_tab' ) { $( '#graphDetailsTabBtn' ).trigger( 'click' ); } + if ( window.location.hash == '#comments' ) { + $( '#viewCommentsBtn' ).trigger( 'click' ); + } if ( !_.isEmpty( utils.getURLParameter( 'auto_layout' ) ) ) { graphPage.applyAutoLayout( utils.getURLParameter( 'auto_layout' ) ); @@ -553,6 +610,494 @@ var graphPage = { graphPage.defaultLayoutWidget.init(); }, + createComment: function(text, parent_comment_id) { + console.log('creating comment'); + var nodes = graphPage.cyGraph.$(':selected').nodes(); + var edges = graphPage.cyGraph.$(':selected').edges(); + var node_names = [], edge_names = []; + nodes.each(function(idx) { + node_names.push(nodes[idx]._private.data.name); + }); + edges.each(function(idx) { + edge_names.push(edges[idx]._private.data.name); + }); + var owner_email = ($('#UserEmail').val() && $('#UserEmail').val() != "None")? $('#UserEmail').val() : null; + var graph_id = ($('#GraphID').val())? $('#GraphID').val() : null; + if(text == "") { + $.notify({ + message: 'Please enter a valid text' + }, { + type: 'danger' + }); + return; + } + apis.comments.add(graph_id, { + "owner_email": owner_email, + "graph_id": graph_id, + "node_names": node_names, + "edge_names": edge_names, + "text": text, + "parent_comment_id": parent_comment_id + }, + successCallback = function (response) { + $('#CommentText').val(""); + graphPage.cyGraph.edges().unselect(); + graphPage.cyGraph.nodes().unselect(); + + }, + errorCallback = function (response) { + $.notify({ + message: response.responseJSON.error_message + }, { + type: 'danger' + }); + }); + }, + getComments: function() { + var graph_id = ($('#GraphID').val())? $('#GraphID').val() : null; + apis.comments.get(graph_id, + successCallback = function (response) { + graphPage.commentsFormatter(response.total, response.comments); + $('#allComments').click(); + + }, + errorCallback = function (response) { + $.notify({ + message: response.responseJSON.error_message + }, { + type: 'danger' + }); + }); + }, + editComment: function(comment_id, text, is_closed) { + var graph_id = ($('#GraphID').val())? $('#GraphID').val() : null; + apis.comments.update(graph_id, { + 'id': comment_id, + 'text': text, + 'is_closed': is_closed + }, + successCallback = function (response) { + + }, + errorCallback = function (response) { + $.notify({ + message: response.responseJSON.error_message + }, { + type: 'danger' + }); + }); + }, + deleteComment: function(comment_id) { + var graph_id = ($('#GraphID').val())? $('#GraphID').val() : null; + apis.comments.delete(graph_id, { + 'id': comment_id, + }, + successCallback = function (response) { + + }, + errorCallback = function (response) { + $.notify({ + message: response.responseJSON.error_message + }, { + type: 'danger' + }); + }); + }, + addCommentReaction: function (comment_id, owner_email, reaction_content) { + var reaction = { + "comment_id": comment_id, + "content": reaction_content, + "owner_email": owner_email + }; + apis.comments.addCommentReaction($('#GraphID').val(), reaction, + successCallback = function (response) { + }, + errorCallback = function (xhr, status, errorThrown) { + alert(xhr.responseText); + }); + }, + deleteCommentReaction: function (comment_id, owner_email, reaction_content) { + var reaction = { + "comment_id": comment_id, + "content": reaction_content, + "owner_email": owner_email + }; + apis.comments.deleteCommentReaction($('#GraphID').val(), reaction, + successCallback = function (response) { + }, + errorCallback = function (xhr, status, errorThrown) { + // This method is called when error occurs while deleting reaction. + alert(xhr.responseText); + }); + }, + getCommentReaction: function (comment_id, reaction_content) { + var reaction = { + "comment_id": comment_id, + "content": reaction_content + }; + apis.comments.getCommentReaction($('#GraphID').val(), reaction, + successCallback = function (response) { + var response_reactions = response.reactions; + var str = ""; + for(var i=0; i'; + } + $("#" + String(comment_id) + '-' + reaction_content + '-emoji').attr('data-original-title', str); + }, + errorCallback = function (xhr, status, errorThrown) { + alert(xhr.responseText); + }); + }, + commentsFormatter: function (total, comments) { + var ele = $('#CommentsList'); ele.html(""); + var comment_threads = [], comment_obj = {}; + var visited = {}, str = ""; + comments.forEach(function (comment) { + if(comment.parent_comment_id == null) { + if(comment_obj[comment.id] == null) { + comment_obj[comment.id] = []; + } + comment_obj[comment.id].push(comment); + } + else { + if(comment_obj[comment.parent_comment_id] == null) { + comment_obj[comment.parent_comment_id] = []; + } + comment_obj[comment.parent_comment_id].push(comment); + } + }); + $.each(comment_obj, function( key, value ) { + comment_threads.push(value); + }); + comment_threads.forEach(function (comment_thread) { + comment_thread.sort(function(a, b) { + return new Date(a.created_at) - new Date(b.created_at); + }); + var p_comment = comment_thread[0]; + str = ''; + ele.append(str); + + //Do setting is_closed field if the comment is resolved. + if(p_comment != null) { + if(p_comment.is_closed == 1) { + $('#commentContainer' + p_comment.id).data("is_closed", 1); + $('#commentContainer' + p_comment.id).find('.reply-message').addClass('passive'); + $('#commentContainer' + p_comment.id).find('.res-comment-desc').removeClass('passive'); + } + else { + $('#commentContainer' + p_comment.id).data("is_closed", 0); + } + } + }); + comments.forEach(function (comment) { + graphPage.addCommentHandlers(comment); + }); + + $('#cyGraphContainer').click(function () { + if($("#filterComments").is(':checked')) { + $('#filterComments').click(); + } + }); + + $('#filterComments').click(function () { + graphPage.presentComments = []; + graphPage.filterCommentsBasedOnGraph(); + }); + + $('#allComments').click(function () { + graphPage.presentComments = []; + $.each($('#CommentsList').children("div"), function (key, child) { + $(child).removeClass('passive'); + graphPage.presentComments.push(child); + }); + }); + + $('#cancelViewCommentsBtn').click(function () { + $('#ViewCommentSideBar').removeClass('active'); + $('#defaultSideBar').addClass('active'); + }); + utils.readmoreFormatter(100); + }, + addCommentHandlers: function(comment) { + var comment_box = $('#commentBox' + comment.id); + if(comment.parent_comment_id === null) { + var container = $('#commentContainer' + comment.id); + graphPage.expandTextarea(container.find('.reply-message')); + container.find('.reply-message').unbind('click').click(function (e) { + e.preventDefault(); + container.find('.reply-table').removeClass('passive'); + }); + container.find('.cancel-reply-btn').unbind('click').click(function (e) { + e.preventDefault(); + container.find('.reply-table').addClass('passive'); + }); + container.find('.create-reply-btn').unbind('click').click(function (e) { + e.preventDefault(); + var comment_id = parseInt(container.attr('id').split("commentContainer")[1]); + graphPage.createComment($('#commentContainer' + comment_id).find('.reply-message').val(), comment_id); + $('#commentContainer' + comment_id).find('.reply-message').val(""); + }); + container.find('.collapse-comments').click(function () { + container.find(".collapse").slideToggle('slow'); + var text = $(this).text().split(' '); + if(text[0] === 'View') { + $(this).text('Hide replies'); + } + else { + $(this).text('View all hidden replies'); + } + }); + container.data("nodes", comment.nodes); + container.data("edges", comment.edges); + container.hover( + function () { + graphPage.cacheNodes = graphPage.cyGraph.collection(cytoscapeGraph.getAllSelectedNodes(graphPage.cyGraph)); + graphPage.cacheEdges = graphPage.cyGraph.collection(cytoscapeGraph.getAllSelectedEdges(graphPage.cyGraph)); + graphPage.cyGraph.nodes().unselect(); + graphPage.cyGraph.edges().unselect(); + $(this).data("nodes").forEach(function (node) { + graphPage.cyGraph.nodes("[name = '" + node + "']").select(); + }); + $(this).data("edges").forEach(function (edge) { + graphPage.cyGraph.edges("[name = '" + edge + "']").select(); + }); + }, function() { + graphPage.cyGraph.nodes().unselect(); + graphPage.cyGraph.edges().unselect(); + graphPage.cacheNodes.select(); + graphPage.cacheEdges.select(); + } + ); + }; + comment_box.find('.edit-comment').unbind('click').click(function (e) { + e.preventDefault(); + var ele = $('#commentBox' + comment.id); + var msg = ele.find('p'); msg.addClass('passive'); + var inp = ele.find('textarea'); inp.val(msg.text()); inp.removeClass('passive'); + var btn = ele.find('.edit-table'); btn.removeClass('passive'); + graphPage.expandTextarea(inp); + }); + comment_box.find('.resolve-comment').unbind('click').click(function (e) { + e.preventDefault(); + var comment_id = parseInt(comment_box.attr('id').split("commentBox")[1]); + graphPage.editComment(comment_id, undefined, 1); + }); + comment_box.find('.reopen-comment').unbind('click').click(function (e) { + e.preventDefault(); + var comment_id = parseInt(comment_box.attr('id').split("commentBox")[1]); + graphPage.editComment(comment_id, undefined, 0); + }); + comment_box.find('.delete-comment').unbind('click').click(function (e) { + e.preventDefault(); + var comment_id = parseInt(comment_box.attr('id').split("commentBox")[1]); + graphPage.deleteComment(comment_id); + }); + comment_box.find('.edit-comment-btn').unbind('click').click(function (e) { + e.preventDefault(); + var ele = $('#commentBox' + comment.id); + var comment_id = parseInt(comment_box.attr('id').split('commentBox')[1]); + var msg = ele.find('textarea').val(); + graphPage.editComment(comment_id, msg, undefined); + }); + comment_box.find('.cancel-edit-btn').unbind('click').click(function (e) { + e.preventDefault(); + var ele = $('#commentBox' + comment.id); + var btn = ele.find('.edit-table'); btn.addClass('passive'); + var inp = ele.find('textarea'); inp.addClass('passive'); + var msg = ele.find('.comment-text'); msg.removeClass('passive'); + }); + for(var i=0; i'; + var date = moment(comment.created_at).fromNow(); + var label = ""; + if((comment.nodes).length==0 && (comment.edges).length==0 && comment.parent_comment_id==null){ + label += "#Graph"; + } + if((comment.nodes).length>0 && comment.parent_comment_id==null){ + label += "#Nodes "; + } + if((comment.edges).length>0 && comment.parent_comment_id==null){ + label += "#Edges"; + } + + str += ''; + str += ''; + str += '
' + comment.owner_email + '
commented ' + date + '
' + graphPage.generateCommentOptions(comment) + graphPage.generateCommentReactions(comment) + '
'; + str += '' + label + ''; + str += '
' + comment.text + '
'; + str += ''; + str += '
'; + str += ''; + str += ''; + str += ''; + str += graphPage.displayCommentReactions(comment); + str += '
EditCancel

'; + return str; + }, + generateCommentOptions: function(comment) { + var str = ""; + str += ''; + return str; + }, + generateCommentReactions: function(comment) { + var str = ""; + str += ''; + str += ''; + str += ''; + str += ''; + return str; + }, + displayCommentReactions: function(comment) { + var str="
"; + var user_reacted_emoji = []; + for(var i=0; i<(comment.reaction_content).length;i++){ + user_reacted_emoji.push(comment.reaction_content[i]); + } + var comment_unique_emojis = Array.from(new Set(user_reacted_emoji)); + for(var i=0; i&#' + reactions_code[i] + ';'; + } + else{ + str += '&#' + reactions_code[i] + ';'; + } + } + str += '
'; + return str; + }, + generateReplyTemplate: function(comment) { + var str = ""; + str += ''; + str += '
'; + str += ''; + str += ''; + str += '
'; + str += ''; + str += discussionPage.displayCommentReactions(comment); + return str; + }, + generateCommentOptions: function(comment) { + var str = ""; + str += ''; + str += ''; + str += discussionPage.generateCommentReactions(comment); + str += ''; + str += ''; + str += ''; + if($('#UserEmail').val() === comment.owner_email) { + str += ''; + } + str += ''; + return str; + }, + generateCommentReactions: function(comment) { + var str = ""; + str += ''; + return str; + }, + displayCommentReactions: function(comment) { + var str = ""; + if((comment.reaction_content).length==0){ + str += ''; + ele.append(str); + } + graphPage.addCommentHandlers(comment); + }, + updatecomment: function(comment) { + ele = $('#commentBox' + comment.id); + ele.find('.comment-text').text(comment.text); + ele.find('.cancel-edit-btn').click(); + if(comment.parent_comment_id == null && comment.is_closed == 1) { + $('#commentContainer' + comment.id).data("is_closed", 1); + //styling for resolved comments + $('#commentContainer' + comment.id).find('.reply-message').addClass('passive'); + $('#commentContainer' + comment.id).find('.res-comment-desc').removeClass('passive'); + //ended styling + var box = $('#commentBox' + comment.id).find('.resolve-comment'); + box.removeClass('resolve-comment').addClass('reopen-comment'); box.html("Re-open"); + box.unbind('click').click(function (e) { + e.preventDefault(); + var ele = $('#commentBox' + comment.id); + var comment_id = parseInt(ele.attr('id').split("commentBox")[1]); + graphPage.editComment(comment_id, undefined, 0); + }); + } + if(comment.parent_comment_id == null && comment.is_closed == 0) { + $('#commentContainer' + comment.id).data("is_closed", 0); + //styling for reopened comments + $('#commentContainer' + comment.id).find('.reply-message').removeClass('passive'); + $('#commentContainer' + comment.id).find('.res-comment-desc').addClass('passive'); + //ended styling + var box = $('#commentBox' + comment.id).find('.reopen-comment'); + box.removeClass('reopen-comment').addClass('resolve-comment'); box.html("Resolve"); + box.unbind('click').click(function (e) { + e.preventDefault(); + var ele = $('#commentBox' + comment.id); + var comment_id = parseInt(ele.attr('id').split("commentBox")[1]); + graphPage.editComment(comment_id, undefined, 1); + }); + } + }, + deletecomment: function(comment) { + if(comment.parent_comment_id == null) { + $('#commentContainer' + comment.id).remove(); + } + if(comment.parent_comment_id != null) { + if($('#commentContainer' + comment.parent_comment_id).find('.collapse').children().length == 1) { + $('#commentContainer' + comment.parent_comment_id).find('.collapse').remove(); + $('#commentContainer' + comment.parent_comment_id).find('.collapse-comments').remove(); + } + else { + ($('#commentBox' + comment.id))? $('#commentBox' + comment.id).remove() : null; + } + } + }, + discussionComments: function(data){ + if(data.event === "insert_comment") { + userSocket.addDiscussionComment(data.message); + is_event = true; + } + else if(data.event === "update_comment") { + userSocket.editDiscussionComment(data.message); + is_event = true; + } + else if(data.event === "delete_comment") { + userSocket.deleteDiscussioncomment(data.message); + is_event = true; + } + else if(data.event === "close") { + userSocket.resolveDiscussion(data.message); + is_event = true; + } + else if(data.event === "reopen") { + userSocket.reopenDiscussion(data.message); + is_event = true; + } + else if(data.event === "delete_discussion") { + userSocket.deleteDiscussion(data.message); + is_event = true; + } + }, + addDiscussionComment: function(comment) { + if(location.pathname==('/groups/'+ comment.group_id[0] +'/discussions/' + comment.discussion_id[0])){ + var str = ""; + var ele = $('#CommentsList'); + str = '
'; + str += '
'; + str += discussionPage.generateCommentTemplate(comment); + str += '
'; + str += '
'; + ele.append(str); + discussionPage.addCommentHandlers(comment); + } + }, + + deleteDiscussioncomment: function(comment) { + $('#commentContainer' + comment.id).remove(); + }, + deleteDiscussion: function(discussion) { + $('#DiscussionRow' + discussion.id).remove(); + if(location.pathname==('/groups/'+ discussion.group_id +'/discussions/' + discussion.id)){ + var str ='/groups/'+ discussion.group_id + '#discussions'; + window.location = str; + } + + }, + editDiscussionComment: function(comment) { + $('#TextDisplay' + String(comment.id)).text(comment.text); + }, + + resolveDiscussion: function(discussion) { + if(location.pathname==('/groups/' + discussion.group_id)){ + $('.discussion-unlock' + String(discussion.id)).addClass("passive"); + $('.discussion-lock' + String(discussion.id)).removeClass("passive"); + } + if(location.pathname==('/groups/'+ discussion.group_id +'/discussions/' + discussion.id)){ + window.location=location.pathname; + } + }, + reopenDiscussion: function(discussion) { + if(location.pathname==('/groups/'+ discussion.group_id +'/discussions/' + discussion.id)){ + window.location=location.pathname; + } + if(location.pathname==('/groups/'+ discussion.group_id)){ + $('.discussion-lock' + String(discussion.id)).addClass("passive"); + $('.discussion-unlock' + String(discussion.id)).removeClass("passive"); + } + }, + } \ No newline at end of file diff --git a/static/js/utils.js b/static/js/utils.js index eac401f3..4b851ea4 100644 --- a/static/js/utils.js +++ b/static/js/utils.js @@ -251,7 +251,7 @@ var utils = { } }, dateFormatter: function (value, row, index) { - return moment.utc(value).fromNow(); + return moment(value).fromNow(); }, getURLParameter: function (name) { var results = new RegExp('[\?&]' + name + '=([^&#]*)').exec(window.location.href); @@ -262,6 +262,24 @@ var utils = { return results[1] || 0; } }, + readmoreFormatter: function(value){ + // Breaks the string into two with first substring with (value) length + var maxLength = value; + $(".show-read-more").each(function(){ + var myStr = $(this).text(); + if($.trim(myStr).length > maxLength){ + var newStr = myStr.substring(0, maxLength); + var removedStr = myStr.substring(maxLength, $.trim(myStr).length); + $(this).empty().html(newStr); + $(this).append(' read more...'); + $(this).append('' + removedStr + ''); + } + }); + $(".read-more").click(function(){ + $(this).siblings(".more-text").contents().unwrap(); + $(this).remove(); + }); + }, isNumeric: function (n) { return !isNaN(parseFloat(n)) && isFinite(n); }, diff --git a/templates/base.html b/templates/base.html index bdb8476a..5883c062 100644 --- a/templates/base.html +++ b/templates/base.html @@ -17,13 +17,14 @@ - + + + {##} +{% endblock %} \ No newline at end of file diff --git a/templates/discussions/index.html b/templates/discussions/index.html new file mode 100644 index 00000000..4e597866 --- /dev/null +++ b/templates/discussions/index.html @@ -0,0 +1,58 @@ +{% extends 'base.html' %} +{% block content %} +{% load staticfiles %} + + + + + + + +
+ +




+ + + + + + + +
+ +{% if discussion.is_closed == 0 %} +
+
+
+
+ +
+ +
+
+ +
+
+
+{% endif %} + + + +
+
+ #{{discussion.id}} {{discussion.topic}} {{discussion.description}} +
+ {{discussion.owner_email}} + + +
+ + + + + +{% endblock %} diff --git a/templates/graph/add_comment_sidebar.html b/templates/graph/add_comment_sidebar.html new file mode 100644 index 00000000..9ec1fe32 --- /dev/null +++ b/templates/graph/add_comment_sidebar.html @@ -0,0 +1,29 @@ + + + \ No newline at end of file diff --git a/templates/graph/default_sidebar.html b/templates/graph/default_sidebar.html index 0204f29b..5672d035 100644 --- a/templates/graph/default_sidebar.html +++ b/templates/graph/default_sidebar.html @@ -53,8 +53,12 @@ Use Layout
Editor +
  • + + Comments + +
  • {% endif %} - {% if uid and uid == graph.owner_email %}
  • diff --git a/templates/graph/index.html b/templates/graph/index.html index 8c777a6e..7a15ee0e 100644 --- a/templates/graph/index.html +++ b/templates/graph/index.html @@ -275,6 +275,8 @@

    {% include 'graph/layout_editor_sidebar.html' %} {% include 'graph/legend/legend_editor_sidebar.html' %} {% endif %} + {% include 'graph/add_comment_sidebar.html' %} + {% include 'graph/view_comments_sidebar.html' %} {% include 'graph/filter_nodes_edges_sidebar.html' %} {% include 'graph/change_layout_sidebar.html' %} {% include 'graph/node_editor.html' %} diff --git a/templates/graph/view_comments_sidebar.html b/templates/graph/view_comments_sidebar.html new file mode 100644 index 00000000..b2a16e8b --- /dev/null +++ b/templates/graph/view_comments_sidebar.html @@ -0,0 +1,30 @@ + + \ No newline at end of file diff --git a/templates/group/add_discussion_modal.html b/templates/group/add_discussion_modal.html new file mode 100644 index 00000000..0005776f --- /dev/null +++ b/templates/group/add_discussion_modal.html @@ -0,0 +1,25 @@ + \ No newline at end of file diff --git a/templates/group/delete_discussion_modal.html b/templates/group/delete_discussion_modal.html new file mode 100644 index 00000000..391eb4b8 --- /dev/null +++ b/templates/group/delete_discussion_modal.html @@ -0,0 +1,21 @@ + \ No newline at end of file diff --git a/templates/group/discussions_table.html b/templates/group/discussions_table.html new file mode 100644 index 00000000..9fade21a --- /dev/null +++ b/templates/group/discussions_table.html @@ -0,0 +1,23 @@ + + + + + + + + + +
    Topic +
    \ No newline at end of file diff --git a/templates/group/index.html b/templates/group/index.html index de4d16d6..c7899afe 100644 --- a/templates/group/index.html +++ b/templates/group/index.html @@ -14,6 +14,9 @@
  • {{ group.total_members }} Members
  • +
  • + Discussions +
  • {% if group.owner_email == uid %}
  • Update Group @@ -46,6 +49,10 @@

    +
    + {% include 'group/discussions_table.html' %} +
    +
    @@ -57,7 +64,9 @@

    {% include 'group/unshare_graph_modal.html' %} {% include 'group/remove_group_member_modal.html' %} - + {% include 'group/delete_discussion_modal.html' %} + {% include 'group/add_discussion_modal.html' %} +