Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Linking tags and log records table #84

Merged
merged 15 commits into from
Nov 4, 2023
1 change: 1 addition & 0 deletions backend/app/models/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ def init_app(app):
from .sign_in_logs import SignInLogs
from .log_records import LogRecords
from .tag import Tag
from .log_record_tag import LogRecordTag

app.app_context().push()
db.init_app(app)
Expand Down
32 changes: 32 additions & 0 deletions backend/app/models/log_record_tag.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
from sqlalchemy import inspect
from sqlalchemy.orm.properties import ColumnProperty

from . import db


class LogRecordTag(db.Model):
__tablename__ = "log_record_tag"

log_record_tag_id = db.Column(db.Integer, primary_key=True, nullable=False)
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

for consistency, I mentioned we should either do <table_name>_id or just id. I'm leaning towards switching everything to id. What are your thoughts?

The only reason for this switch is because the resident table has id and resident_id which is a little confusing at first.

log_record_id = db.Column(db.Integer, db.ForeignKey("log_records.log_id"), nullable=False)
tag_id = db.Column(db.Integer, db.ForeignKey("tags.tag_id"), nullable=False)

def to_dict(self, include_relationships=False):
# define the entities table
cls = type(self)

mapper = inspect(cls)
formatted = {}
for column in mapper.attrs:
field = column.key
attr = getattr(self, field)
# if it's a regular column, extract the value
if isinstance(column, ColumnProperty):
formatted[field] = attr
# otherwise, it's a relationship field
# (currently not applicable, but may be useful for entity groups)
elif include_relationships:
# recursively format the relationship
# don't format the relationship's relationships
formatted[field] = [obj.to_dict() for obj in attr]
return formatted
2 changes: 1 addition & 1 deletion backend/app/models/log_records.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,8 +14,8 @@ class LogRecords(db.Model):
attn_to = db.Column(db.Integer, db.ForeignKey("users.id"), nullable=True)
# TODO: replace open String fields with VarChar(NUM_CHARS)
note = db.Column(db.String, nullable=True)
tags = db.Column(db.ARRAY(db.String), nullable=True)
building = db.Column(db.String, nullable=False)
tags = db.relationship("Tag", secondary="log_record_tag", back_populates="log_records")

def to_dict(self, include_relationships=False):
# define the entities table
Expand Down
1 change: 1 addition & 0 deletions backend/app/models/tag.py
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Realizing this now but name should probably be unique. There should be some validation (both in the database as a table constraint and the routes layer to validate that the name is unique).

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Also, maybe status isn't the best approach... it's creating unused data in the db which isn't a big deal in the grand scheme of things but its not ideal...

what if we remove the status column entirely, and do a delete similar to how log_records are handled? First, delete all the occurences of tag_id in the new table, log_record_tag, then delete the tag from its table? This would make sure we don't have unused data in the db and makes it easier on the user (we would've needed to show unused tags to the user as well)

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think the one complaint about this approach was that any log record with a specific tag would lose that tag, but we can throw a warning to the admin and indicate to them that every log with the selected tag will lose that tag

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

this new approach would need to be handled on the API side so it doesn't need to be addressed here but we should have something concrete for #61

Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@ class Tag(db.Model):
status = db.Column(
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

thinking this filename should be tags instead of tag (all of our models use plural instead of singular except user, which should also be updated)

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

should also be applied to the log_record_tags file

db.Enum("Deleted", "Active", name="status"), nullable=False
)
log_records = db.relationship("LogRecords", secondary="log_record_tag", back_populates="tags")

def to_dict(self, include_relationships=False):
# define the entities table
Expand Down
51 changes: 35 additions & 16 deletions backend/app/services/implementations/log_records_service.py
danielk1345 marked this conversation as resolved.
Show resolved Hide resolved
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
from ..interfaces.log_records_service import ILogRecordsService
from ...models.log_records import LogRecords
from ...models.user import User
from ...models.tag import Tag
from ...models import db
from datetime import datetime
from pytz import timezone
Expand All @@ -25,13 +25,26 @@ def add_record(self, log_record):
new_log_record = log_record
new_log_record["datetime"] = datetime.now()

tag_names = new_log_record["tags"]
connor-bechthold marked this conversation as resolved.
Show resolved Hide resolved
del new_log_record["tags"]

try:
new_log_record = LogRecords(**new_log_record)
self.construct_tags(new_log_record, tag_names)

db.session.add(new_log_record)
db.session.commit()
return log_record
except Exception as postgres_error:
raise postgres_error

def construct_tags(self, log_record, tag_names):
for tag_name in tag_names:
tag = Tag.query.filter_by(name=tag_name).first()

if not tag:
raise Exception(f"Tag with name {tag_name} does not exist")
log_record.tags.append(tag)

def to_json_list(self, logs):
try:
Expand Down Expand Up @@ -86,9 +99,9 @@ def filter_by_date_range(self, date_range):
return f"\ndatetime>='{start_date}' AND datetime<='{end_date}'"

def filter_by_tags(self, tags):
sql_statement = f"\n'{tags[0]}'=ANY (tags)"
sql_statement = f"\n'{tags[0]}'=ANY (tag_names)"
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

when filtering, I'm noticing log records appear when they shouldn't. In this example, this is the query param: filters={"tags":["tagA", "TagB"]}

Here is the response:

{
    "log_records": [
        {
            "attn_to": 1,
            "attn_to_first_name": "Safwaan",
            "attn_to_last_name": "Chowdhury",
            "building": "Building D",
            "datetime": "2023-06-13 15:15:40.849777-04:00",
            "employee_first_name": "Safwaan",
            "employee_id": 1,
            "employee_last_name": "Chowdhury",
            "flagged": false,
            "log_id": 8,
            "note": "This is a note",
            "resident_first_name": "Saf",
            "resident_last_name": "Waan",
            "tags": [
                "tagA"
            ]
        },
        {
            "attn_to": 1,
            "attn_to_first_name": "Safwaan",
            "attn_to_last_name": "Chowdhury",
            "building": "Building D",
            "datetime": "2023-06-13 15:04:12.437682-04:00",
            "employee_first_name": "Safwaan",
            "employee_id": 1,
            "employee_last_name": "Chowdhury",
            "flagged": false,
            "log_id": 7,
            "note": "This is a note",
            "resident_first_name": "Saf",
            "resident_last_name": "Waan",
            "tags": [
                "tagA",
                "tagB"
            ]
        }
    ],
    "num_results": 2
}

The first log record shouldn't appear, yes it does have tag A, but it doesn't have tag B. We should only show log records that have both tag A and tag B, and other tags if any. For example, suppose another log record existed with A, B, C- that log record should appear if A and B are filtered for.

Sorry if I explained it poorly the first time 😖

for i in range (1, len(tags)):
sql_statement = sql_statement + f"\nOR '{tags[i]}'=ANY (tags)"
sql_statement = sql_statement + f"\nOR '{tags[i]}'=ANY (tag_names)"
return sql_statement

def filter_by_flagged(self, flagged):
Expand All @@ -101,7 +114,7 @@ def get_log_records(self, page_number,filters=None):
start_index = (page_number - 1) * results_per_page
end_index = start_index + results_per_page

sql = 'SELECT\n \
sql = "SELECT\n \
logs.log_id,\n \
logs.employee_id,\n \
logs.resident_first_name,\n \
Expand All @@ -110,17 +123,23 @@ def get_log_records(self, page_number,filters=None):
logs.flagged,\n \
logs.attn_to,\n \
logs.note,\n \
logs.tags,\n \
logs.building,\n \
t.tag_names, \n \
employees.first_name AS employee_first_name,\n \
employees.last_name AS employee_last_name,\n \
attn_tos.first_name AS attn_to_first_name,\n \
attn_tos.last_name AS attn_to_last_name\n \
FROM log_records logs\n \
LEFT JOIN users attn_tos ON logs.attn_to = attn_tos.id\n \
JOIN users employees ON logs.employee_id = employees.id'

if filters:
JOIN users employees ON logs.employee_id = employees.id\n \
LEFT JOIN\n \
(SELECT logs.log_id, string_to_array(string_agg(tags.name, ','), ',') AS tag_names FROM log_records logs\n \
JOIN log_record_tag lrt ON logs.log_id = lrt.log_record_id\n \
JOIN tags ON lrt.tag_id = tags.tag_id\n \
GROUP BY logs.log_id \n \
) t ON logs.log_id = t.log_id"

if filters:
is_first_filter = True

options = {
Expand All @@ -137,7 +156,6 @@ def get_log_records(self, page_number,filters=None):
is_first_filter = False
else:
sql = sql + "\nAND " + options[filter](filters.get(filter))

sql = sql + "\nORDER BY datetime DESC"
log_records = db.session.execute(text(sql))
json_list = self.to_json_list(log_records)
Expand All @@ -148,13 +166,15 @@ def get_log_records(self, page_number,filters=None):
raise postgres_error

def delete_log_record(self, log_id):
deleted_log_record = LogRecords.query.filter_by(log_id=log_id).delete()
if not deleted_log_record:
log_record_to_delete = LogRecords.query.filter_by(log_id=log_id).first()
if not log_record_to_delete:
raise Exception(
"Log record with id {log_id} not found".format(
log_id=log_id
)
)
log_record_to_delete.tags = []
db.session.delete(log_record_to_delete)
db.session.commit()

def update_log_record(self, log_id, updated_log_record):
Expand Down Expand Up @@ -183,11 +203,10 @@ def update_log_record(self, log_id, updated_log_record):
}
)
if "tags" in updated_log_record:
LogRecords.query.filter_by(log_id=log_id).update(
{
LogRecords.tags: updated_log_record["tags"]
}
)
logRecord = LogRecords.query.filter_by(log_id=log_id).first()
if (logRecord):
logRecord.tags = []
self.construct_tags(logRecord, updated_log_record["tags"])
else:
LogRecords.query.filter_by(log_id=log_id).update(
{
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
"""create junction table between log records and tags

Revision ID: 65a56c245ad7
Revises: 82f36cdf325f
Create Date: 2023-05-20 04:29:49.322186

"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql

# revision identifiers, used by Alembic.
revision = '65a56c245ad7'
down_revision = '82f36cdf325f'
branch_labels = None
depends_on = None


def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('log_record_tag',
sa.Column('log_record_tag_id', sa.Integer(), nullable=False),
sa.Column('log_record_id', sa.Integer(), nullable=False),
sa.Column('tag_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['log_record_id'], ['log_records.log_id'], ),
sa.ForeignKeyConstraint(['tag_id'], ['tags.tag_id'], ),
sa.PrimaryKeyConstraint('log_record_tag_id')
)
with op.batch_alter_table('log_records', schema=None) as batch_op:
batch_op.drop_column('tags')

# ### end Alembic commands ###


def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('log_records', schema=None) as batch_op:
batch_op.add_column(sa.Column('tags', postgresql.ARRAY(sa.VARCHAR()), autoincrement=False, nullable=True))

op.drop_table('log_record_tag')
# ### end Alembic commands ###