From 37fc1f216effb34237d93677f75ae3979b48426e Mon Sep 17 00:00:00 2001 From: Daniel Zhao Date: Sun, 29 Oct 2023 01:43:15 -0400 Subject: [PATCH] Fixed linting --- backend/.dockerignore | 58 +- backend/Dockerfile | 40 +- backend/Dockerfile.dev | 28 +- backend/PennCourses/__init__.py | 8 +- backend/PennCourses/asgi.py | 16 +- backend/PennCourses/celery.py | 48 +- backend/PennCourses/docs_settings.py | 2732 ++++++++--------- backend/PennCourses/settings/base.py | 233 ++ backend/PennCourses/settings/ci.py | 28 +- backend/PennCourses/settings/development.py | 40 +- backend/PennCourses/settings/production.py | 76 +- backend/PennCourses/templates/redoc.html | 40 +- .../templates/topic_courses_admin.html | 36 +- backend/PennCourses/urls.py | 98 +- backend/PennCourses/wsgi.py | 34 +- backend/Pipfile | 138 +- backend/alert/admin.py | 132 +- backend/alert/alerts.py | 350 +-- backend/alert/apps.py | 10 +- .../alert/management/commands/alertstats.py | 148 +- .../commands/compute_head_registrations.py | 80 +- .../commands/export_anon_registrations.py | 284 +- .../management/commands/export_demand_data.py | 528 ++-- .../commands/loadregistrations_pca.py | 312 +- .../management/commands/recomputestats.py | 1198 ++++---- .../management/commands/webhookbackup.py | 156 +- backend/alert/migrations/0001_initial.py | 128 +- .../migrations/0002_delete_registration.py | 32 +- .../0003_courseupdate_registration.py | 218 +- .../migrations/0004_auto_20190926_0549.py | 82 +- .../migrations/0005_delete_courseupdate.py | 94 +- .../migrations/0006_auto_20191110_1357.py | 102 +- .../migrations/0007_auto_20200131_1619.py | 52 +- .../0008_registration_original_created_at.py | 36 +- .../migrations/0009_auto_20200419_2112.py | 46 +- .../migrations/0010_auto_20201002_0714.py | 388 +-- .../migrations/0011_auto_20201108_1535.py | 110 +- .../migrations/0012_auto_20210418_0343.py | 434 +-- ...nestimate_csdv_gamma_fit_log_likelihood.py | 44 +- .../migrations/0014_auto_20210418_0847.py | 52 +- .../migrations/0015_auto_20211010_1235.py | 90 +- .../migrations/0016_auto_20211113_1537.py | 178 +- ...17_alter_registration_head_registration.py | 70 +- backend/alert/models.py | 2176 ++++++------- backend/alert/serializers.py | 266 +- backend/alert/tasks.py | 360 +-- .../alert/templates/alert/email_alert.html | 64 +- .../templates/alert/email_alert_close.html | 2 +- backend/alert/templates/alert/index.html | 686 ++--- backend/alert/templates/alert/push_notif.txt | 4 +- .../templates/alert/push_notif_close.txt | 4 +- backend/alert/templates/alert/text_alert.txt | 6 +- backend/alert/urls.py | 30 +- backend/alert/util.py | 64 +- backend/alert/views.py | 1206 ++++---- backend/courses/admin.py | 448 +-- backend/courses/apps.py | 10 +- .../courses/course_similarity/heuristics.py | 174 +- backend/courses/filters.py | 1026 +++---- .../commands/deduplicate_status_updates.py | 60 +- .../commands/export_status_history.py | 200 +- .../commands/export_test_courses_data.py | 532 ++-- .../management/commands/fillprofiles.py | 44 +- .../management/commands/form_simple_topics.py | 162 +- .../commands/load_add_drop_dates.py | 326 +- .../management/commands/load_crosswalk.py | 288 +- .../commands/load_status_history.py | 202 +- .../commands/load_test_courses_data.py | 556 ++-- .../courses/management/commands/loadstatus.py | 76 +- .../management/commands/merge_topics.py | 508 +-- .../management/commands/registrarimport.py | 140 +- .../management/commands/reset_topics.py | 142 +- backend/courses/migrations/0001_initial.py | 258 +- .../migrations/0002_auto_20190426_2158.py | 232 +- .../migrations/0003_auto_20190428_1707.py | 56 +- .../migrations/0004_auto_20190428_1710.py | 54 +- .../migrations/0005_auto_20190428_1845.py | 48 +- .../migrations/0006_auto_20190508_0200.py | 50 +- .../migrations/0007_auto_20190508_0202.py | 36 +- .../migrations/0008_auto_20190510_0114.py | 118 +- .../courses/migrations/0009_requirement.py | 96 +- .../migrations/0010_auto_20190510_0454.py | 34 +- .../migrations/0011_auto_20190510_0504.py | 46 +- .../migrations/0012_auto_20190510_0559.py | 46 +- .../migrations/0013_auto_20190517_0313.py | 78 +- .../migrations/0013_course_full_code.py | 36 +- .../migrations/0014_auto_20190518_1641.py | 50 +- .../migrations/0015_merge_20190518_2155.py | 26 +- .../migrations/0016_auto_20190523_1554.py | 36 +- .../migrations/0017_auto_20190525_2235.py | 44 +- .../migrations/0017_auto_20190526_1655.py | 44 +- .../migrations/0018_merge_20190526_1901.py | 26 +- .../migrations/0019_apikey_apiprivilege.py | 98 +- .../migrations/0020_auto_20190928_0046.py | 40 +- .../migrations/0021_auto_20191019_2140.py | 78 +- .../migrations/0022_auto_20191029_1927.py | 178 +- .../migrations/0022_section_full_code.py | 36 +- .../migrations/0023_auto_20191101_1717.py | 36 +- .../migrations/0024_merge_20191103_1941.py | 26 +- backend/courses/migrations/0024_userdata.py | 70 +- .../migrations/0025_auto_20191117_1309.py | 48 +- .../migrations/0026_merge_20191117_1420.py | 26 +- .../migrations/0027_auto_20191227_1213.py | 112 +- .../migrations/0028_auto_20200131_1619.py | 48 +- .../migrations/0029_auto_20200512_1525.py | 72 +- .../migrations/0030_auto_20201002_0714.py | 990 +++--- .../0031_userprofile_push_notifications.py | 42 +- .../migrations/0032_auto_20210418_0343.py | 464 +-- .../0033_alter_statusupdate_section.py | 48 +- .../migrations/0034_auto_20211114_0032.py | 58 +- .../migrations/0035_topic_course_topic.py | 114 +- ..._syllabus_url_meeting_end_date_and_more.py | 84 +- ...lter_meeting_room_alter_section_credits.py | 74 +- .../migrations/0038_alter_meeting_room.py | 50 +- .../0039_alter_course_primary_listing.py | 70 +- .../0040_alter_course_primary_listing.py | 48 +- .../0041_remove_section_raw_demand.py | 34 +- .../migrations/0042_section_has_reviews.py | 58 +- .../0043_section_has_status_updates.py | 56 +- .../0044_prengssrequirement_and_more.py | 154 +- ...crn_course_non_null_crn_semester_unique.py | 64 +- ...e_non_null_crn_semester_unique_and_more.py | 64 +- .../migrations/0047_alter_room_number.py | 42 +- ...s_restrictions_ngssrestriction_and_more.py | 272 +- .../migrations/0048_alter_attribute_school.py | 72 +- .../migrations/0049_merge_20220529_2355.py | 26 +- ...lude_ngssrestriction_inclusive_and_more.py | 174 +- .../migrations/0051_alter_attribute_school.py | 72 +- .../0051_alter_ngssrestriction_inclusive.py | 40 +- .../migrations/0052_merge_20220614_0027.py | 26 +- .../0053_alter_ngssrestriction_code.py | 46 +- .../0054_userprofile_uuid_secret_and_more.py | 44 +- backend/courses/registrar.py | 218 +- backend/courses/search.py | 266 +- backend/courses/util.py | 1958 ++++++------ backend/docker-compose.yaml | 70 +- backend/manage.py | 42 +- backend/plan/admin.py | 30 +- backend/plan/apps.py | 10 +- .../management/commands/recommendcourses.py | 518 ++-- .../management/commands/redownloadmodel.py | 42 +- .../management/commands/trainrecommender.py | 1044 +++---- backend/plan/migrations/0001_initial.py | 80 +- .../migrations/0002_auto_20191027_1510.py | 38 +- .../migrations/0003_auto_20201002_0714.py | 100 +- .../0004_alter_schedule_semester.py | 42 +- backend/plan/serializers.py | 21 + .../templates/plan_construction/index.html | 80 +- backend/plan/urls.py | 2 +- backend/plan/views.py | 2 +- backend/pyproject.toml | 4 +- backend/review/admin.py | 62 +- backend/review/annotations.py | 594 ++-- backend/review/apps.py | 10 +- backend/review/documentation.py | 1120 +++---- .../review/management/commands/clearcache.py | 68 +- .../export_department_reviews_by_semester.py | 334 +- .../management/commands/loadcomments.py | 136 +- .../management/commands/mergeinstructors.py | 466 +-- backend/review/migrations/0001_initial.py | 92 +- .../migrations/0002_auto_20190525_2010.py | 66 +- .../migrations/0003_auto_20190525_2040.py | 104 +- .../migrations/0004_auto_20200512_1526.py | 146 +- .../review/migrations/0005_review_comments.py | 36 +- backend/review/models.py | 242 +- backend/review/serializers.py | 40 +- backend/review/urls.py | 90 +- backend/review/util.py | 1178 +++---- backend/scripts/asgi-run | 20 +- backend/setup.cfg | 62 +- backend/tests/__init__.py | 82 +- backend/tests/courses/test-opendata.json | 676 ++-- backend/tests/courses/test_opendata_import.py | 236 +- backend/tests/courses/test_recompute_stats.py | 298 +- backend/tests/courses/util.py | 112 +- .../course_data_test.csv | 724 ++--- .../course_descriptions_test.csv | 616 ++-- backend/tests/plan/test_api.py | 1804 +++++------ backend/tests/plan/test_course_recs.py | 1354 ++++---- backend/tests/plan/test_schedule.py | 1372 ++++----- backend/tests/review/test_api.py | 1916 ++++++------ backend/tests/review/test_mergeinstructors.py | 514 ++-- backend/tests/review/test_models.py | 88 +- backend/tests/review/test_stats.py | 1988 ++++++------ backend/tests/review/test_topics.py | 1406 ++++----- 185 files changed, 23631 insertions(+), 23325 deletions(-) diff --git a/backend/.dockerignore b/backend/.dockerignore index 508be4d7e..52e8757aa 100644 --- a/backend/.dockerignore +++ b/backend/.dockerignore @@ -1,29 +1,29 @@ -# Docker -Dockerfile -Dockerfile.dev -.dockerignore - -# git -.circleci -.git -.gitignore -.gitmodules -.env -**/*.md -LICENSE - -# Misc -.coverage -**/__pycache__/ -tests/ -*.sqlite3 -postgres/ - -# Dev -.vscode - -# MacOS -.DS_Store - -# PCX -pcr-backup +# Docker +Dockerfile +Dockerfile.dev +.dockerignore + +# git +.circleci +.git +.gitignore +.gitmodules +.env +**/*.md +LICENSE + +# Misc +.coverage +**/__pycache__/ +tests/ +*.sqlite3 +postgres/ + +# Dev +.vscode + +# MacOS +.DS_Store + +# PCX +pcr-backup diff --git a/backend/Dockerfile b/backend/Dockerfile index 4c5afed3e..620259d0b 100644 --- a/backend/Dockerfile +++ b/backend/Dockerfile @@ -1,20 +1,20 @@ -FROM pennlabs/django-base:9c4f31bf1af44219d0f9019271a0033a222291c2-3.10.1 - -LABEL maintainer="Penn Labs" - -# Copy project dependencies -COPY Pipfile* /app/ - -# Install project dependencies -RUN pipenv install --system --deploy - -# Copy project files -COPY . /app/ - -ENV DJANGO_SETTINGS_MODULE PennCourses.settings.production -ENV SECRET_KEY 'temporary key just to build the docker image' - -COPY ./scripts/asgi-run /usr/local/bin/ - -# Collect static files -RUN python3 /app/manage.py collectstatic --noinput +FROM pennlabs/django-base:9c4f31bf1af44219d0f9019271a0033a222291c2-3.10.1 + +LABEL maintainer="Penn Labs" + +# Copy project dependencies +COPY Pipfile* /app/ + +# Install project dependencies +RUN pipenv install --system --deploy + +# Copy project files +COPY . /app/ + +ENV DJANGO_SETTINGS_MODULE PennCourses.settings.production +ENV SECRET_KEY 'temporary key just to build the docker image' + +COPY ./scripts/asgi-run /usr/local/bin/ + +# Collect static files +RUN python3 /app/manage.py collectstatic --noinput diff --git a/backend/Dockerfile.dev b/backend/Dockerfile.dev index 75952d067..f2ff59557 100644 --- a/backend/Dockerfile.dev +++ b/backend/Dockerfile.dev @@ -1,14 +1,14 @@ -FROM pennlabs/django-base:f0f05216db7c23c1dbb5b95c3bc9e8a2603bf2fd - -LABEL maintainer="Penn Labs" - -WORKDIR /backend - -# Copy project dependencies -COPY Pipfile* ./ - -# Install backend dependencies -RUN pipenv install --dev - -# Alias runserver command -RUN echo 'alias runserver="python manage.py runserver 0.0.0.0:8000"' >> ~/.bashrc +FROM pennlabs/django-base:f0f05216db7c23c1dbb5b95c3bc9e8a2603bf2fd + +LABEL maintainer="Penn Labs" + +WORKDIR /backend + +# Copy project dependencies +COPY Pipfile* ./ + +# Install backend dependencies +RUN pipenv install --dev + +# Alias runserver command +RUN echo 'alias runserver="python manage.py runserver 0.0.0.0:8000"' >> ~/.bashrc diff --git a/backend/PennCourses/__init__.py b/backend/PennCourses/__init__.py index 8a0ea4594..1ee22ed46 100644 --- a/backend/PennCourses/__init__.py +++ b/backend/PennCourses/__init__.py @@ -1,4 +1,4 @@ -from .celery import app as celery_app - - -__all__ = ("celery_app",) +from .celery import app as celery_app + + +__all__ = ("celery_app",) diff --git a/backend/PennCourses/asgi.py b/backend/PennCourses/asgi.py index e99c1473b..d6ec419de 100644 --- a/backend/PennCourses/asgi.py +++ b/backend/PennCourses/asgi.py @@ -1,8 +1,8 @@ -import os - -from django.core.asgi import get_asgi_application - - -os.environ.setdefault("DJANGO_SETTINGS_MODULE", "PennCourses.settings.production") - -application = get_asgi_application() +import os + +from django.core.asgi import get_asgi_application + + +os.environ.setdefault("DJANGO_SETTINGS_MODULE", "PennCourses.settings.production") + +application = get_asgi_application() diff --git a/backend/PennCourses/celery.py b/backend/PennCourses/celery.py index a5b34d33f..26bff9c23 100644 --- a/backend/PennCourses/celery.py +++ b/backend/PennCourses/celery.py @@ -1,24 +1,24 @@ -import os - -from celery import Celery -from django.conf import settings - - -# set the default Django settings module for the 'celery' program. -os.environ.setdefault("DJANGO_SETTINGS_MODULE", "PennCourses.settings.development") - -app = Celery("PennCourses", broker=settings.MESSAGE_BROKER_URL) - -# Using a string here means the worker doesn't have to serialize -# the configuration object to child processes. -# - namespace='CELERY' means all celery-related configuration keys -# should have a `CELERY_` prefix. -app.config_from_object("django.conf:settings", namespace="CELERY") - -# Load task modules from all registered Django app configs. -app.autodiscover_tasks() - - -@app.task(bind=True) -def debug_task(self): - print("Request: {0!r}".format(self.request)) +import os + +from celery import Celery +from django.conf import settings + + +# set the default Django settings module for the 'celery' program. +os.environ.setdefault("DJANGO_SETTINGS_MODULE", "PennCourses.settings.development") + +app = Celery("PennCourses", broker=settings.MESSAGE_BROKER_URL) + +# Using a string here means the worker doesn't have to serialize +# the configuration object to child processes. +# - namespace='CELERY' means all celery-related configuration keys +# should have a `CELERY_` prefix. +app.config_from_object("django.conf:settings", namespace="CELERY") + +# Load task modules from all registered Django app configs. +app.autodiscover_tasks() + + +@app.task(bind=True) +def debug_task(self): + print("Request: {0!r}".format(self.request)) diff --git a/backend/PennCourses/docs_settings.py b/backend/PennCourses/docs_settings.py index 2115813a0..dd3ba4d16 100644 --- a/backend/PennCourses/docs_settings.py +++ b/backend/PennCourses/docs_settings.py @@ -1,1366 +1,1366 @@ -import inspect -import json -import re -from copy import deepcopy -from inspect import getdoc -from textwrap import dedent - -import jsonref -from django.urls import get_resolver -from rest_framework import serializers -from rest_framework.permissions import IsAuthenticated -from rest_framework.renderers import JSONOpenAPIRenderer -from rest_framework.schemas.openapi import AutoSchema -from rest_framework.schemas.utils import is_list_view - - -""" -This file includes code and settings for our PCx autodocumentation -(based on a Django-generated OpenAPI schema and Redoc, which formats that schema into a -readable documentation web page). Some useful links: -https://github.com/Redocly/redoc -https://github.com/Redocly/redoc/blob/master/docs/redoc-vendor-extensions.md#tagGroupObject -https://www.django-rest-framework.org/api-guide/schemas -https://www.django-rest-framework.org/topics/documenting-your-api/ -A Redoc example from which many of the concepts in this file were taken from: -https://redocly.github.io/redoc/ -https://github.com/Redocly/redoc/blob/master/demo/openapi.yaml - - -TERMINOLOGY: - -Each route (e.g. GET /api/plan/schedules/{id}/) has an "operation ID" that is (by default) -automatically parsed from the codebase (e.g. "Retrieve Schedule"). The base "name" underlying this -operation ID is also automatically parsed by default (e.g. "Schedule"). The operation ID shows up -as the title of the route on our Redoc documentation page. You can customize the -name and/or the operation ID of a route by modifying the custom_name and custom_operation_id -dicts in this file. Customizing the name will change the operation ID and the tag of the route -(see below). You can customize the name with the custom_name dict below, and the operation ID -with the custom_operation_id dict. - -Tags are groupings of routes by name. For instance, all the routes -GET, POST /api/plan/schedules/ and GET, PUT, DELETE /api/plan/schedules/{id} -are organized under the shared tag "[PCP] Schedule", since they all share the base name "Schedule". -You can click on a tag in the table of contents of our Redoc documenation, and the section will -expand to show all the underlying routes (each titled by its operation ID). You can change tag -names using the custom_tag_names dict below, but the default tag names are usually sensible -(derived from the base name of the underlying routes). What's more useful is to give a tag a -description, which you can do with the custom_tag_descriptions dict. - -Tag groups organize tags into groups; they show up in the left sidebar of our Redoc page and divide -the categories of routes into meta categories. We are using them to separate our tags by app. -For instance, "Penn Course Plan" is a tag group. Each tag group has an abbreviation, specified by -the tag_group_abbreviations dict. For instance, the "Penn Course Plan" tag group is -abbreviated "[PCP]". Each tag in a tag group is prefixed by the tag group abbreviation in -square brackets (for instance "[PCP] Schedule"). The subpath_abbreviations dict below takes Django -app names (e.g. "plan"), and maps them to the corresponding tag group abbreviation (this is how -tags are automatically organized into tag groups). You shouldn't need to modify this dict unless -you change an app name or add a new app). Then, the tag_group_abbreviations dict maps the -abbreviation to the full name of the tag group. - - -MAINTENENCE: - -You can update the introductory sections / readme of the auto-docs page by editing the -markdown-formatted openapi_description text below. -You should include docstrings in views (the proper format of docstrings are specified here -https://www.django-rest-framework.org/coreapi/from-documenting-your-api/#documenting-your-views) -explaining the function of the view (or the function of each method if there are multiple supported -methods). These docstrings will be parsed by the auto-docs and included in the documentation page. - -When writing any class-based views where you specify a queryset (such as ViewSets), even if you -override get_queryset(), you should also specify the queryset field with something like -queryset = ExampleModel.objects.none() (using .none() to prevent accidental data breach), or -alternatively a sensible queryset default (e.g. queryset = Course.with_reviews.all() for the -CourseDetail ViewSet). Basically, just make sure the queryset parameter is always pointing to the -relevant model (if you are using queryset or get_queryset()). This will allow the -auto-documentation to access the model underlying the queryset (it cannot call get_queryset() -since it cannot generate a request object which the get_queryset() method might try to access). - -If the meaning of a model or serializer field is not clear, you should include the string help_text -as a parameter when initializing the field, explaining what that field stores. This will show up -in the documentation such that parameter descriptions are inferred from model or serializer field -help text. For properties, the docstring will be used since there is no -way to define help_text for a property; so even if a property's use is clear based on the code, -keep in mind that describing its purpose in the docstring will be helpful to frontend engineers -who are unfamiliar with the backend code (also, don't include a :return: tag as you might normally -do for functions; a property is to be treated as a dynamic field, not a method, so just state -what the method returns as the only text in the docstring). -Including help_text/docstring when a field/property's purpose is unclear will also -make the model/serializer code more understandable for future Labs developers. -And furthermore, all help_text and descriptive docstrings show up in the backend -documentation (accessible at /admin/doc/). - -PcxAutoSchema (defined below) is a subclass of Django's AutoSchema, and it makes some improvements -on that class for use with Redoc as well as some customizations specific to our use-cases. You can -customize auto-docs for a specific view by setting -schema = PcxAutoSchema(...) -in class-based views, or using the decorator -@schema(PcxAutoSchema(...)) -in functional views, and passing kwargs (...) into the PcxAutoSchema constructor (search -PcxAutoSchema in our codebase for some examples of this, and keep reading for a comprehensive -description of how you can customize PcxAutoSchema using these kwargs). - -There are a number of dictionaries you can use to customize these auto-docs; some are passed into -PcxAutoSchema as initialization kwargs, and some are predefined in this file (in the -"Customizable Settings" section below). Often, these dictionaries will contain layers of nested -dictionaries with a schema of path/method/... However, you will notice in example code snippets -in this README and in our codebase, these paths are not hardcoded but instead are referenced by -route name (to avoid repeating URL information that is already specified in urls.py files). -To determine the name of a certain URL, run `python manage.py show_urls` which will print -a list of URLs and their corresponding names. -Note that the route name of the URL here is not to be confused with the base name of the route -as defined above in the TERMINOLOGY section; the name of the URL is specified in the urls.py file, -whereas the name of the route is auto-generated from the code, and may or may not be derived from -the URL name. For instance "courses-detail" is a URL name, and "Course" is the base name of the -corresponding route for documentation generation. -Sorry for the confusion / overloading of terms here. - -By default, response codes will be assumed to be 204 (for delete) or 200 (in all other cases). -To set custom response codes for a path/method (with a custom description), include a -response_codes kwarg in your PcxAutoSchema instantiation. You should input -a dict mapping paths (indicated by route name) to dicts, where each subdict maps string methods -to dicts, and each further subdict maps int response codes to string descriptions. An example: - response_codes={ - "schedules-list": { - "GET": { - 200: "[DESCRIBE_RESPONSE_SCHEMA]Schedules listed successfully.", - }, - "POST": { - 201: "Schedule successfully created.", - 200: "Schedule successfully updated (a schedule with the " - "specified id already existed).", - 400: "Bad request (see description above).", - } - }, - ... - } -Note that if you include "[DESCRIBE_RESPONSE_SCHEMA]" in your string description, that will -not show up in the description text (it will automatically be removed) but instead will indicate -that that response should have a response body schema show up in the documentation (the schema will -be automatically generated by default, but can be customized using the override_response_schema -kwarg; see below). You should generally enable a response schema for responses which will contain -useful data for the frontend beyond the response code. Note that in the example above, the only -such response is the listing of schedules (the GET 200 response). -If you include "[UNDOCUMENTED]" in your string description, that will -remove that response status code from the schema/docs. This is useful if you want to remove -a code that is included by default from the schema. - -If you want to make manual changes to a request schema, include an override_request_schema kwarg -in your PcxAutoSchema instantiation. You should input a dict mapping paths (indicated by -route name) to dicts, where each subdict maps string methods to objects specifying the -desired response schema for that path/method. -The format of these objects is governed by the OpenAPI specification -(for more on the syntax of how to specify a schema, see this link: -http://spec.openapis.org/oas/v3.0.3.html#schema-object [section 4.7.24] -you are specifying the dicts mapped to by "schema" keys in the examples at the following link: -http://spec.openapis.org/oas/v3.0.3.html#request-body-object). An example: - override_request_schema={ - "recommend-courses": { - "POST": { - "type": "object", - "properties": { - "past_courses": { - "type": "array", - "description": ( - "An array of courses the user has previously taken." - ), - "items": { - "type": "string", - "description": "A course code of the form DEPT-XXX, e.g. CIS-120" - } - } - } - } - } - } - -If you want to make manual changes to a response schema, include an override_response_schema kwarg -in your PcxAutoSchema instantiation. You should input a dict mapping paths (indicated by -route name) to dicts, where each subdict maps string methods to dicts, and each further subdict -maps int response codes to the objects specifying the desired response schema. -The format of these objects is governed by the OpenAPI specification -(for more on the syntax of how to specify a schema, see this link: -http://spec.openapis.org/oas/v3.0.3.html#schema-object [section 4.7.24] -you are specifying the dicts mapped to by "schema" keys in the examples at the following link: -http://spec.openapis.org/oas/v3.0.3.html#response-object). You can reference existing schemas -generated by the docs using the notation {"$ref": "#/components/schemas/VeryComplexType"}. -Download the existing OpenAPI schema using the button at the top of the docs page to inspect -what existing schemas exist, and what the path to them is. - -An example: - override_response_schema={ - "recommend-courses": { - "POST": { - 200: { - "type": "array", - "description": "An array of courses that we recommend.", - "items": { - "type": "string", - "description": "The full code of the recommended course, in the form " - "DEPT-XXX, e.g. CIS-120" - } - } - } - } - } - -If you want to manually set the description of a path parameter for a certain path/method, -you can do so by including a custom_path_parameter_desc kwarg in your PcxAutoSchema instantiation, -with keys of the form path > method > variable_name pointing to a string description. Example: - custom_path_parameter_desc={ - "statusupdate": { - "GET": { - "full_code": ( - "The code of the section which this status update applies to, in the " - "form '{dept code}-{course code}-{section code}', e.g. 'CIS-120-001' for the " - "001 section of CIS-120." - ) - } - } - } - -If you want to manually specify parameters (query, path, header, or cookie) for a certain -path/method, you can do so by including a custom_parameters kwarg in your PcxAutoSchema -instantiation, passing a dict of the form path > method > [list of query param schema objects]. -This kwarg will override custom_path_parameter_desc if they conflict. -The format of these objects is described by -https://spec.openapis.org/oas/v3.0.3.html#parameter-object [section 4.7.12] -Example: - custom_parameters={ - "course-plots": { - "GET": [ - { - "name": "course_code", - "in": "path", - "description": "The dash-joined department and code of the course you want plots for, e.g. `CIS-120` for CIS-120.", # noqa E501 - "schema": {"type": "string"}, - "required": True, - }, - { - "name": "instructor_ids", - "in": "query", - "description": "A comma-separated list of instructor IDs with which to filter the sections underlying the returned plots.", # noqa E501 - "schema": {"type": "string"}, - "required": False, - }, - ] - }, - }, - -Finally, if you still need to further customize your API schema, you can do this in the -make_manual_schema_changes function below. This is applied to the final JSON schema after all -automatic changes / customizations are applied. For more about the format of an OpenAPI -schema (which you would need to know a bit about to make further customizations), see this -documentation: -http://spec.openapis.org/oas/v3.0.3.html -To explore our JSON schema (which can help when trying to figure out how to modify it in -make_manual_schema_changes if you need to), you can download it from the /api/openapi/ route. -""" - - -def get_url_by_name(name): - path = get_resolver().reverse_dict[name][0][0][0] - path = path.replace(r"%(pk)s", r"{id}") - return "/" + re.sub(r"%\(([^)]+)\)s", r"{\1}", path) - - -# ============================= Begin Customizable Settings ======================================== - - -# The following is the description which shows up at the top of the documentation site -openapi_description = """ -# Introduction -Penn Courses ([GitHub](https://github.com/pennlabs/penn-courses)) is the umbrella -categorization for [Penn Labs](https://pennlabs.org/) -products designed to help students navigate the course registration process. It currently -includes three products, each with their own API documented on this page: -Penn Course Alert, Penn Course Plan, and Penn Course Review. - -See `Penn Labs Notion > Penn Courses` for more details on each of our (currently) three apps. - -For instructions on how to maintain this documentation while writing code, -see the comments in `backend/PennCourses/docs_settings.py` (it is easy, and will be helpful -for maintaining Labs knowledge in spite of our high member turnover rate). - -See our [GitHub](https://github.com/pennlabs/penn-courses) repo for instructions on -installation, running in development, and loading in course data for development. Visit -the `/admin/doc/` route ([link](/admin/doc/)) for the backend documentation generated by Django -(admin account required, which can be made by running -`python manage.py createsuperuser` in terminal/CLI). - -# Unified Penn Courses -By virtue of the fact that all Penn Courses products deal with, well, courses, -it would make sense for all three products to share the same backend. - -We realized the necessity of a unified backend when attempting to design a new Django backend -for Penn Course Plan. We like to live by the philosophy of keeping it -[DRY](https://en.wikipedia.org/wiki/Don't_repeat_yourself), and -PCA and PCP's data models both need to reference course and -section information. We could have simply copied over code (a bad idea) -or created a shared reusable Django app (a better idea) for course data, -but each app would still need to download copies of the same data. -Additionally, this will help us build integrations between our Courses products. - -# Authentication -PCx user authentication is handled by platform's Penn Labs Accounts Engine. -See [Penn Labs Notion > Platform > The Accounts Engine](https://www.notion.so/pennlabs/The-Accounts-Engine-726ccf8875e244f4b8dbf8a8f2c97a87?pvs=4) -for extensive documentation and links to repositories for this system. When tags or routes -are described as requiring user authentication, they are referring to this system. - -I highly recommend the [official video course on OAuth2](https://oauth.net/2/) (by Aaron Parecki), -then the Platform Notion docs on the "Accounts Engine" for anyone who wants to understand -Labs authentication better. Platform is our OAuth2 "Authorization Server", -and Django Labs Accounts is an OAuth2 client run by our Django backends (Clubs, Penn Courses, etc), -exposing client-facing authentication routes like `penncourseplan.com/accounts/login`. -There's also this Wikipedia page explaining [Shibboleth](https://en.wikipedia.org/wiki/Shibboleth_(software)) -(which is used by Penn for authentication, and by the Platform authorization server). - -See the Django docs for more on Django's features for -[User Authentication](https://docs.djangoproject.com/en/3.0/topics/auth/), -which are used by PCX apps, as part of Platform's accounts system. -""" # noqa E501 - - -# This dictionary takes app names (the string just after /api/ in the path or just after / -# if /api/ does not come at the beginning of the path) -# as values and abbreviated versions of those names as values. It is used to -# add an abbreviated app prefix designating app membership to each route's tag name. -# For instance the Registration tag is prepended with [PCA] to get "[PCA] Registration" since -# its routes start with /api/alert/, and "alert": "PCA" is a key/value pair in the following dict. -subpath_abbreviations = { - "plan": "PCP", - "alert": "PCA", - "review": "PCR", - "base": "PCx", - "accounts": "Accounts", -} -assert all( - [isinstance(key, str) and isinstance(val, str) for key, val in subpath_abbreviations.items()] -) - - -# This dictionary should map abbreviated app names (values from the dict above) to -# longer form names which will show up as the tag group name in the documentation. -tag_group_abbreviations = { - "PCP": "Penn Course Plan", - "PCA": "Penn Course Alert", - "PCR": "Penn Course Review", - "PCx": "Penn Courses (Base)", - "Accounts": "Penn Labs Accounts", - "": "Other" # Catches all other tags (this should normally be an empty tag group and if so - # it will not show up in the documentation, but is left as a debugging safeguard). - # If routes are showing up in a "Misc" tag in this group, make sure you set the schema for - # those views to be PcxAutoSchema, as is instructed in the meta docs above. -} -assert all( - [isinstance(key, str) and isinstance(val, str) for key, val in tag_group_abbreviations.items()] -) - - -# "operation ids" are the unique titles of routes within a tag (if you click on a tag you see -# a list of operation ids, each corresponding to a certain route). - -# name here refers to the name underlying the operation id of the view -# this is NOT the full name that you see on the API, it is the base name underlying it, -# and is used in construction of that name -# For instance, for POST /api/plan/schedules/, the name is "Schedule" and the operation_id is -# "Create Schedule" (see below get_name and _get_operation_id methods in PcxAutoSchema for -# a more in-depth explanation of the difference). -# IMPORTANT: The name also defines what the automatically-set tag name will be. -# That's why this custom_name is provided separately from custom_operation_id below; -# you can use it if you want to change the operation_id AND the tag name at once. -custom_name = { # keys are (path, method) tuples, values are custom names - # method is one of ("GET", "POST", "PUT", "PATCH", "DELETE") - ("registrationhistory-list", "GET"): "Registration History", - ("registrationhistory-detail", "GET"): "Registration History", - ("statusupdate", "GET"): "Status Update", - ("recommend-courses", "POST"): "Course Recommendations", - ("course-reviews", "GET"): "Course Reviews", - ("course-plots", "GET"): "Plots", - ("review-autocomplete", "GET"): "Autocomplete Dump", - ("instructor-reviews", "GET"): "Instructor Reviews", - ("department-reviews", "GET"): "Department Reviews", - ("course-history", "GET"): "Section-Specific Reviews", - ("requirements-list", "GET"): "Pre-NGSS Requirement", - ("restrictions-list", "GET"): "NGSS Restriction", -} -assert all( - [isinstance(k, tuple) and len(k) == 2 and isinstance(k[1], str) for k in custom_name.keys()] -) - - -custom_operation_id = { # keys are (path, method) tuples, values are custom names - # method is one of ("GET", "POST", "PUT", "PATCH", "DELETE") - ("registrationhistory-list", "GET"): "List Registration History", - ("registrationhistory-detail", "GET"): "Retrieve Historic Registration", - ("statusupdate", "GET"): "List Status Updates", - ("courses-search", "GET"): "Course Search", - ("section-search", "GET"): "Section Search", - ("review-autocomplete", "GET"): "Retrieve Autocomplete Dump", -} -assert all( - [ - isinstance(k, tuple) and len(k) == 2 and isinstance(k[1], str) - for k in custom_operation_id.keys() - ] -) - - -# Use this dictionary to rename tags, if you wish to do so -# keys are old tag names (seen on docs), values are new tag names -custom_tag_names = {} -assert all([isinstance(key, str) and isinstance(val, str) for key, val in custom_tag_names.items()]) - - -# Note that you can customize the tag for all routes from a certain view by passing in a -# list containing only that tag into the tags kwarg of PcxAutoSchema instantiation -# (inherited behavior from Django AutoSchema: -# https://www.django-rest-framework.org/api-guide/schemas/#autoschema) - -# tag descriptions show up in the documentation body below the tag name -custom_tag_descriptions = { - # keys are tag names (after any name changes from above dicts), vals are descriptions - "[PCP] Schedule": dedent( - """ - These routes allow interfacing with the user's PCP Schedules for the current semester, - stored on the backend. Ever since we integrated Penn Labs Accounts into PCP so that users - can store their schedules across devices and browsers, we have stored users' schedules on - our backend (rather than local storage). - """ - ), - "[PCP] Pre-NGSS Requirements": dedent( - """ - These routes expose the pre-NGSS (deprecated since 2022C) academic requirements for the - current semester which are stored on our backend (hopefully comprehensive). - """ - ), - "[PCP] Course": dedent( - """ - These routes expose course information for PCP for the current semester. - """ - ), - "[PCA] Registration History": dedent( - """ - These routes expose a user's registration history (including - inactive and obsolete registrations) for the current semester. Inactive registrations are - registrations which would not trigger a notification to be sent if their section opened, - and obsolete registrations are registrations which are not at the head of their resubscribe - chain. - """ - ), - "[PCA] Registration": dedent( - """ - As the main API endpoints for PCA, these routes allow interaction with the user's - PCA registrations. An important concept which is referenced throughout the documentation - for these routes is that of the "resubscribe chain". A resubscribe chain is a chain - of PCA registrations where the tail of the chain was an original registration created - through a POST request to `/api/alert/registrations/` specifying a new section (one that - the user wasn't already registered to receive alerts for). Each next element in the chain - is a registration created by resubscribing to the previous registration (once that - registration had triggered an alert to be sent), either manually by the user or - automatically if auto_resubscribe was set to true. Then, it follows that the head of the - resubscribe chain is the most relevant Registration for that user/section combo; if any - of the registrations in the chain are active, it would be the head. And if the head - is active, none of the other registrations in the chain are active. - - Note that a registration will send an alert when the section it is watching opens, if and - only if it hasn't sent one before, it isn't cancelled, and it isn't deleted. If a - registration would send an alert when the section it is watching opens, we call it - "active". See the Create Registration docs for an explanation of how to create a new - registration, and the Update Registration docs for an explanation of how you can modify - a registration after it is created. - - In addition to sending alerts for when a class opens up, we have also implemented - an optionally user-enabled feature called "close notifications". - If a registration has close_notification enabled, it will act normally when the watched - section opens up for the first time (triggering an alert to be sent). However, once the - watched section closes, it will send another alert (the email alert will be in the same - chain as the original alert) to let the user know that the section has closed. Thus, - if a user sees a PCA notification on their phone during a class for instance, they won't - need to frantically open up their laptop and check PennInTouch to see if the class is still - open just to find that it is already closed. To avoid spam and wasted money, we DO NOT - send any close notifications over text. So the user must have an email saved or use - push notifications in order to be able to enable close notifications on a registration. - Note that the close_notification setting carries over across resubscriptions, but can be - disabled at any time using Update Registration. - - After the PCA backend refactor in 2019C/2020A, all PCA Registrations have a `user` field - pointing to the user's Penn Labs Accounts User object. In other words, we implemented a - user/accounts system for PCA which required that - people log in to use the website. Thus, the contact information used in PCA alerts - is taken from the user's User Profile. You can edit this contact information using - Update User or Partial Update User. If push_notifications is set to True, then - a push notification will be sent when the user is alerted, but no text notifications will - be sent (as that would be a redundant alert to the user's phone). Otherwise, an email - or a text alert is sent if and only if contact information for that medium exists in - the user's profile. - """ - ), - "[PCA] User": dedent( - """ - These routes expose a user's saved settings (from their Penn Labs Accounts user object). - For PCA, the profile object is of particular importance; it stores the email and - phone of the user (with a null value for either indicating the user doesn't want to be - notified using that medium). - """ - ), - "[PCA] Sections": dedent( - """ - This route is used by PCA to get data about sections. - """ - ), - "[Accounts] User": dedent( - """ - These routes allow interaction with the User object of a Penn Labs Accounts user. - We do not document `/accounts/...` authentication routes here, as they are described - by the [Authentication](#section/Authentication) section, and the - [Penn Labs Account Engine](https://www.notion.so/pennlabs/The-Accounts-Engine-726ccf8875e244f4b8dbf8a8f2c97a87?pvs=4) - Notion page. - """ # noqa E501 - ), - "Miscs": dedent( - """ - WARNING: This tag should not be used, and its existence - indicates you may have forgotten to set a view's schema to PcxAutoSchema for the views - under this tag. See the meta documentation in backend/PennCourses/docs_settings.py of our - codebase for instructions on how to properly set a view's schema to PcxAutoSchema. - """ - ), -} -assert all( - [isinstance(key, str) and isinstance(val, str) for key, val in custom_tag_descriptions.items()] -) - - -labs_logo_url = "https://i.imgur.com/tVsRNxJ.png" - - -def make_manual_schema_changes(data): - """ - Use this space to make manual modifications to the schema before it is - presented to the user. Only make manual changes as a last resort, and try - to use built-in functionality whenever possible. - These modifications were written by referencing the existing schema at /api/openapi - and also an example schema (written in YAML instead of JSON, but still - easily interpretable as JSON) from a Redoc example: - https://github.com/Redocly/redoc/blob/master/demo/openapi.yaml - """ - - data["info"]["x-logo"] = {"url": labs_logo_url, "altText": "Labs Logo"} - data["info"]["contact"] = {"email": "contact@pennlabs.org"} - - # Remove ID from the documented PUT request body for /api/plan/schedules/ - # (the id field in the request body is ignored in favor of the id path parameter) - schedules_detail_url = get_url_by_name("schedules-detail") - data["paths"][schedules_detail_url]["put"] = deepcopy( - data["paths"][schedules_detail_url]["put"] - ) - for content_ob in data["paths"][schedules_detail_url]["put"]["requestBody"]["content"].values(): - content_ob["schema"]["properties"].pop("id", None) - - # Make the name and sections fields of the PCP schedule request body required, - # make the id field optionally show up. Also, make the id and semester fields - # show up under the sections field, and make id required. - for path, path_ob in data["paths"].items(): - if get_url_by_name("schedules-list") not in path: - continue - for method_ob in path_ob.values(): - if "requestBody" not in method_ob.keys(): - continue - for content_ob in method_ob["requestBody"]["content"].values(): - properties_ob = content_ob["schema"]["properties"] - if "sections" in properties_ob.keys(): - section_ob = properties_ob["sections"] - if "required" not in section_ob["items"].keys(): - section_ob["items"]["required"] = [] - required = section_ob["items"]["required"] - section_ob["items"]["required"] = list(set(required + ["id", "semester"])) - for field, field_ob in section_ob["items"]["properties"].items(): - if field == "id" or field == "semester": - field_ob["readOnly"] = False - if "semester" in properties_ob.keys(): - properties_ob["semester"]["description"] = dedent( - """ - The semester of the course (of the form YYYYx where x is A [for spring], - B [summer], or C [fall]), e.g. `2019C` for fall 2019. You can omit this - field and the semester of the first section in the sections list will be - used instead (or if the sections list is empty, the current semester will - be used). If this field differs from any of the semesters of the sections - in the sections list, a 400 will be returned. - """ - ) - if "id" in properties_ob.keys(): - properties_ob["id"]["description"] = ( - "The id of the schedule, if you want to explicitly set this (on create) " - "or update an existing schedule by id (optional)." - ) - - # Make application/json the only content type - def delete_other_content_types_dfs(dictionary): - if not isinstance(dictionary, dict): - return None - dictionary.pop("application/x-www-form-urlencoded", None) - dictionary.pop("multipart/form-data", None) - for value in dictionary.values(): - delete_other_content_types_dfs(value) - - delete_other_content_types_dfs(data) - - -# ============================== End Customizable Settings ========================================= - - -def split_camel(w): - return re.sub("([a-z0-9])([A-Z])", lambda x: x.groups()[0] + " " + x.groups()[1], w) - - -def pluralize_word(s): - return s + "s" # naive solution because this is how it is done in DRF - - -# Customization dicts populated by PcxAutoSchema __init__ method calls - -# A cumulative version of the response_codes parameter to PcxAutoSchema: -cumulative_response_codes = dict() -# A cumulative version of the override_request_schema parameter to PcxAutoSchema: -cumulative_override_request_schema = dict() -# A cumulative version of the override_response_schema parameter to PcxAutoSchema: -cumulative_override_response_schema = dict() -# A cumulative version of the custom_path_parameter_desc parameter to PcxAutoSchema: -cumulative_cppd = dict() -# A cumulative version of the custom_parameters parameter to PcxAutoSchema: -cumulative_cp = dict() - - -class JSONOpenAPICustomTagGroupsRenderer(JSONOpenAPIRenderer): - def render(self, data_raw, media_type=None, renderer_context=None): - """ - This overridden method modifies the JSON OpenAPI schema generated by Django - to add tag groups, and most of the other customization specified above. - It was written by referencing the existing schema at /api/openapi - and also an example schema (written in YAML instead of JSON, but still - easily interpretable as JSON) from a Redoc example: - https://github.com/Redocly/redoc/blob/master/demo/openapi.yaml - """ - - # The following resolves JSON refs which are not handled automatically in Python dicts - # https://swagger.io/docs/specification/using-ref/ - data = jsonref.loads(json.dumps(data_raw)) - - # Determine existing tags and create a map from tag to a list of the corresponding dicts - # of nested schema objects at paths/{path}/{method} in the OpenAPI schema (for all - # the paths/methods which have that tag). - # If any routes do not have tags, add the 'Misc' tag to them, which will be put in - # the 'Other' tag group automatically, below. - tags = set() - tag_to_dicts = dict() - for x in data["paths"].values(): - for v in x.values(): - if "tags" in v.keys(): - tags.update(v["tags"]) - for t in v["tags"]: - if t not in tag_to_dicts.keys(): - tag_to_dicts[t] = [] - tag_to_dicts[t].append(v) - else: - v["tags"] = ["Misc"] - tags.add("Misc") - if "Misc" not in tag_to_dicts.keys(): - tag_to_dicts["Misc"] = [] - tag_to_dicts["Misc"].append(v) - - # A function to change tag names (adds requested changes to a dict which will be - # cleared after the for tag in tags loop below finishes; it is done this way since - # the tags set cannot be modified while it is being iterated over). - changes = dict() - - def update_tag(old_tag, new_tag): - for val in tag_to_dicts[old_tag]: - val["tags"] = [(t if t != old_tag else new_tag) for t in val["tags"]] - lst = tag_to_dicts.pop(old_tag) - tag_to_dicts[new_tag] = lst - changes[old_tag] = new_tag # since tags cannot be updated while iterating through tags - return new_tag - - # Pluralize tag name if all views in tag are lists, and apply custom tag names from - # custom_tag_names dict defined above. - for tag in tags: - tag = update_tag(tag, split_camel(tag)) - all_list = all([("list" in v["operationId"].lower()) for v in tag_to_dicts[tag]]) - if all_list: # if all views in tag are lists, pluralize tag name - tag = update_tag( - tag, " ".join(tag.split(" ")[:-1] + [pluralize_word(tag.split(" ")[-1])]) - ) - if tag in custom_tag_names.keys(): # rename custom tags - tag = update_tag(tag, custom_tag_names[tag]) - - # Remove 'required' flags from responses (it doesn't make sense for a response - # item to be 'required'). - def delete_required_dfs(dictionary): - if not isinstance(dictionary, dict): - return None - dictionary.pop("required", None) - for value in dictionary.values(): - delete_required_dfs(value) - - for path_name, val in data["paths"].items(): - for method_name, v in val.items(): - v["responses"] = deepcopy(v["responses"]) - delete_required_dfs(v["responses"]) - - # Since tags could not be updated while we were iterating through tags above, - # we update them now. - for k, v in changes.items(): - tags.remove(k) - tags.add(v) - - # Add custom tag descriptions from the custom_tag_descriptions dict defined above - data["tags"] = [ - {"name": tag, "description": custom_tag_descriptions.get(tag, "")} for tag in tags - ] - - # Add tags to tag groups based on the tag group abbreviation in the name of the tag - # (these abbreviations are added as prefixes of the tag names automatically in the - # get_tags method of PcxAutoSchema). - tags_to_tag_groups = dict() - for t in tags: - for k in tag_group_abbreviations.keys(): - # Assigning the tag groups like this prevents tag abbreviations being substrings - # of each other from being problematic; the longest matching abbreviation is - # used (so even if another tag group abbreviation is a substring, it won't be - # mistakenly used for the tag group). - if k in t and ( - t not in tags_to_tag_groups.keys() or len(k) > len(tags_to_tag_groups[t]) - ): - tags_to_tag_groups[t] = k - data["x-tagGroups"] = [ - {"name": v, "tags": [t for t in tags if tags_to_tag_groups[t] == k]} - for k, v in tag_group_abbreviations.items() - ] - # Remove empty tag groups - data["x-tagGroups"] = [g for g in data["x-tagGroups"] if len(g["tags"]) != 0] - - # This code ensures that no path/methods in optional dictionary kwargs passed to - # PcxAutoSchema __init__ methods are invalid (indicating user error) - for original_kwarg, parameter_name, parameter_dict in [ - ("response_codes", "cumulative_response_codes", cumulative_response_codes), - ( - "override_request_schema", - "cumulative_override_request_schema", - cumulative_override_request_schema, - ), - ( - "override_response_schema", - "cumulative_override_response_schema", - cumulative_override_response_schema, - ), - ("custom_path_parameter_desc", "cumulative_cppd", cumulative_cppd), - ("custom_parameters", "cumulative_cp", cumulative_cp), - ]: - for route_name in parameter_dict: - traceback = parameter_dict[route_name]["traceback"] - path = get_url_by_name(route_name) - if path not in data["paths"].keys(): - raise ValueError( - f"Check the {original_kwarg} input to PcxAutoSchema instantiation at " - f"{traceback}; invalid path found: '{path}'." - + ( - "If 'id' is in your args list, check if you set primary_key=True for " - "some field in the relevant model, and if so change 'id' " - "in your args list to the name of that field." - if "id" in path - else "" - ) - ) - for method in parameter_dict[route_name]: - if method == "traceback": - continue - if method.lower() not in data["paths"][path].keys(): - raise ValueError( - f"Check the {original_kwarg} input to PcxAutoSchema instantiation at " - f"{traceback}; invalid method '{method}' for path '{path}'" - ) - - new_cumulative_cp = { - get_url_by_name(route_name): value for route_name, value in cumulative_cp.items() - } - - # Update query parameter documentation - for path_name, val in data["paths"].items(): - if path_name not in new_cumulative_cp: - continue - for method_name, v in val.items(): - method_name = method_name.upper() - if method_name.upper() not in new_cumulative_cp[path_name]: - continue - custom_query_params = new_cumulative_cp[path_name][method_name] - custom_query_params_names = {param_ob["name"] for param_ob in custom_query_params} - v["parameters"] = [ - param_ob - for param_ob in v["parameters"] - if param_ob["name"] not in custom_query_params_names - ] + custom_query_params - - # Make any additional manual changes to the schema programmed by the user - make_manual_schema_changes(data) - - return jsonref.dumps(data, indent=2).encode("utf-8") - - -class PcxAutoSchema(AutoSchema): - """ - This custom subclass serves to improve AutoSchema in terms of customizability, and - quality of inference in some non-customized cases. - - https://www.django-rest-framework.org/api-guide/schemas/#autoschema - """ - - def __new__( - cls, - *args, - response_codes=None, - override_request_schema=None, - override_response_schema=None, - custom_path_parameter_desc=None, - custom_parameters=None, - **kwargs, - ): - """ - An overridden __new__ method which adds a created_at property to each PcxAutoSchema - instance indicating the file/line from which it was instantiated (useful for debugging). - """ - new_instance = super(PcxAutoSchema, cls).__new__(cls, *args, **kwargs) - stack_trace = inspect.stack() - created_at = "%s:%d" % (stack_trace[1][1], stack_trace[1][2]) - new_instance.created_at = created_at - return new_instance - - # Overrides, uses overridden method - # https://www.django-rest-framework.org/api-guide/schemas/#autoschema__init__-kwargs - def __init__( - self, - *args, - response_codes=None, - override_request_schema=None, - override_response_schema=None, - custom_path_parameter_desc=None, - custom_parameters=None, - **kwargs, - ): - """ - This custom __init__ method deals with optional passed-in kwargs such as - response_codes, override_response_schema, and custom_path_parameter_desc. - """ - - def fail(param, hint): - """ - A function to generate an error message if validation of one of the passed-in - kwargs fails. - """ - raise ValueError( - f"Invalid {param} kwarg passed into PcxAutoSchema at {self.created_at}; please " - f"check the meta docs in PennCourses/docs_settings.py for an explanation of " - f"the proper format of this kwarg. Hint:\n{hint}" - ) - - # Validate that each of the passed-in kwargs are nested dictionaries of the correct depth - for param_name, param_dict in [ - ("response_codes", response_codes), - ("override_request_schema", override_request_schema), - ("override_response_schema", override_response_schema), - ("custom_path_parameter_desc", custom_path_parameter_desc), - ("custom_parameters", custom_parameters), - ]: - if param_dict is not None: - if not isinstance(param_dict, dict): - fail(param_name, f"The {param_name} kwarg must be a dict.") - for dictionary in param_dict.values(): - if not isinstance(dictionary, dict): - fail(param_name, f"All values of the {param_name} dict must be dicts.") - for nested_dictionary in dictionary.values(): - if param_name == "custom_parameters": - if not isinstance(nested_dictionary, list): - fail( - param_name, - f"All values of the dict values of {param_name} must be lists.", - ) - continue - if not isinstance(nested_dictionary, dict): - fail( - param_name, - f"All values of the dict values of {param_name} must be dicts.", - ) - if param_name in [ - "override_request_schema", - "override_response_schema", - ]: - continue - for value in nested_dictionary.values(): - if isinstance(value, dict): - fail( - param_name, - f"Too deep nested dictionaries found in {param_name}.", - ) - - # Handle passed-in custom response codes - global cumulative_response_codes - if response_codes is None: - self.response_codes = dict() - else: - response_codes = deepcopy(response_codes) - for key, d in response_codes.items(): - response_codes[key] = {k.upper(): v for k, v in d.items()} - self.response_codes = response_codes - for_cumulative_response_codes = deepcopy(response_codes) - for dictionary in for_cumulative_response_codes.values(): - dictionary["traceback"] = self.created_at - cumulative_response_codes = { - **cumulative_response_codes, - **for_cumulative_response_codes, - } - - # Handle passed-in customized request schemas - global cumulative_override_request_schema - if override_request_schema is None: - self.override_request_schema = dict() - else: - override_request_schema = deepcopy(override_request_schema) - for key, d in override_request_schema.items(): - override_request_schema[key] = {k.upper(): v for k, v in d.items()} - self.override_request_schema = override_request_schema - for_cumulative_override_request_schema = deepcopy(override_request_schema) - for dictionary in for_cumulative_override_request_schema.values(): - dictionary["traceback"] = self.created_at - cumulative_override_request_schema = { - **cumulative_override_request_schema, - **for_cumulative_override_request_schema, - } - - # Handle passed-in customized response schemas - global cumulative_override_response_schema - if override_response_schema is None: - self.override_response_schema = dict() - else: - override_response_schema = deepcopy(override_response_schema) - for key, d in override_response_schema.items(): - override_response_schema[key] = {k.upper(): v for k, v in d.items()} - self.override_response_schema = override_response_schema - for_cumulative_override_response_schema = deepcopy(override_response_schema) - for dictionary in for_cumulative_override_response_schema.values(): - dictionary["traceback"] = self.created_at - cumulative_override_response_schema = { - **cumulative_override_response_schema, - **for_cumulative_override_response_schema, - } - - # Handle passed-in custom path parameter descriptions - global cumulative_cppd - if custom_path_parameter_desc is None: - self.custom_path_parameter_desc = dict() - else: - custom_path_parameter_desc = deepcopy(custom_path_parameter_desc) - for key, d in custom_path_parameter_desc.items(): - custom_path_parameter_desc[key] = {k.upper(): v for k, v in d.items()} - self.custom_path_parameter_desc = custom_path_parameter_desc - for_cumulative_cppd = deepcopy(custom_path_parameter_desc) - for dictionary in for_cumulative_cppd.values(): - dictionary["traceback"] = self.created_at - cumulative_cppd = {**cumulative_cppd, **for_cumulative_cppd} - - # Handle passed-in custom query parameter descriptions - global cumulative_cp - if custom_parameters is not None: - custom_parameters = deepcopy(custom_parameters) - for key, d in custom_parameters.items(): - custom_parameters[key] = {k.upper(): v for k, v in d.items()} - for dictionary in custom_parameters.values(): - dictionary["traceback"] = self.created_at - cumulative_cp = {**cumulative_cp, **custom_parameters} - - super().__init__(*args, **kwargs) - - # Overrides, uses overridden method - def get_description(self, path, method): - """ - This overridden method adds the method and path to the top of each route description - and a note if authentication is required (in addition to calling/using the - super method). Docstring of overridden method: - - Determine a path description. - - This will be based on the method docstring if one exists, - or else the class docstring. - """ - - # Add the method and path to the description so it is more readable. - desc = f"({method.upper()} `{path}`)\n\n" - # Add the description from docstrings (default functionality). - desc += super().get_description(path, method) - view = self.view - # Add a note if the path/method requires user authentication. - if IsAuthenticated in view.permission_classes: - desc += '\n\nUser authentication required.' - return desc - - # Overrides, uses overridden method - # (https://www.django-rest-framework.org/api-guide/schemas/#map_serializer) - def map_serializer(self, serializer): - """ - This method adds property docstrings as field descriptions when appropriate - (to request/response schemas in the API docs), in addition - to calling the overridden map_serializer function. - For instance, in the response schema of - [PCA] Registration, List Registration (GET /api/alert/registrations/) - the description of the is_active property is inferred from the property docstring - by this method (before it was blank). - """ - - result = super().map_serializer(serializer) - properties = result["properties"] - model = None - if hasattr(serializer, "Meta") and hasattr(serializer.Meta, "model"): - model = serializer.Meta.model - - for field in serializer.fields.values(): - if isinstance(field, serializers.HiddenField): - continue - schema = properties[field.field_name] - if ( - "description" not in schema - and model is not None - and hasattr(model, field.field_name) - and isinstance(getattr(model, field.field_name), property) - and getattr(model, field.field_name).__doc__ - ): - schema["description"] = dedent(getattr(model, field.field_name).__doc__) - - return result - - # Helper method - def get_action(self, path, method): - """ - This method gets the action of the specified path/method (a more expressive name - for the method like "retrieve" or "list" for a GET method or "create" for a POST method). - The code is taken from the get_operation_id_base method in AutoSchema, - but is slightly modified to not use camelCase. - """ - method_name = getattr(self.view, "action", method.lower()) - if is_list_view(path, method, self.view): - action = "list" - elif method_name not in self.method_mapping: - action = method_name.lower() - else: - action = self.method_mapping[method.lower()] - return action - - # Helper method - def get_name(self, path, method, action=None): - """ - This method returns the name of the path/method. If the - user has specified a custom name using the custom_name parameter to __init__, that custom - name is used. - The code here is backported/modified from AutoSchema's get_operation_id_base method - due to how we generate tags (when "s" is added to the end of names for list actions in - get_operation_id_base, this makes it impossible to tag those list action routes together - with their non-list counterparts using their shared names as we like to do). - Besides not appending "s", this backported code is also modified to remove - the "viewset" suffix from the name if it exists. - All modified code is marked by a comment starting with "MODIFIED" - I am probably going to submit a PR to DRF to try to get them to improve their - default tag generation in this way (eventually). - If that ever gets merged and cut into a stable release, we will be able to - remove this method from our code. - Otherwise, keep an eye on DRF changes to see if the overridden get_operation_id_base - method is improved (and incorperate those changes here if possible). - """ - - # Return the custom name if specified by the user - # First convert the functions in the tuple keys of custom_name to strings - custom_name_converted_keys = { - (get_url_by_name(route_name), m): v for (route_name, m), v in custom_name.items() - } - # Check if user has specified custom name - if (path, method) in custom_name_converted_keys.keys(): - return custom_name_converted_keys[(path, method)] - - # Get action if it is not passed in as a parameter - if action is None: - action = self.get_action(path, method) - - # NOTE: All below code is taken/modified from AutoSchema's get_operation_id_base method - - model = getattr(getattr(self.view, "queryset", None), "model", None) - - if self.operation_id_base is not None: - name = self.operation_id_base - - # Try to deduce the ID from the view's model - elif model is not None: - name = model.__name__ - - # Try with the serializer class name - elif self.get_serializer(path, method) is not None: - name = self.get_serializer(path, method).__class__.__name__ - if name.endswith("Serializer"): - name = name[:-10] - - # Fallback to the view name - else: - name = self.view.__class__.__name__ - if name.endswith("APIView"): - name = name[:-7] - elif name.endswith("View"): - name = name[:-4] - elif name.lower().endswith("viewset"): - # MODIFIED from AutoSchema's get_operation_id_base: remove viewset suffix - name = name[:-7] - - # Due to camel-casing of classes and `action` being lowercase, apply title in order to - # find if action truly comes at the end of the name - if name.endswith(action.title()): # ListView, UpdateAPIView, ThingDelete ... - name = name[: -len(action)] - - # MODIFIED from AutoSchema's get_operation_id_base: "s" is not appended - # even if action is list - - return name - - # Overrides, DOES NOT call overridden method - # https://www.django-rest-framework.org/api-guide/schemas/#get_operation_id_base - def get_operation_id_base(self, path, method, action): - """ - This method returns the base operation id (i.e. the name) of the path/method. It - uses get_name as a helper but makes the last character "s" if the action is "list". - See the docstring for the get_name method of this class for an explanation as - to why we do this. Docstring of overridden method: - - Compute the base part for operation ID from the model, serializer or view name. - """ - - name = self.get_name(path, method, action) - - if action == "list" and not name.endswith("s"): # listThings instead of listThing - name = pluralize_word(name) - - return name - - # Overrides, uses overridden method - # https://www.django-rest-framework.org/api-guide/schemas/#get_operation_id - def get_operation_id(self, path, method): - """ - This method gets the operation id for the given path/method. It first checks if - the user has specified a custom operation id for this path/method using the - custom_operation_id dict at the top of docs_settings.py, and if not it returns the result - of the overridden method (which is modified from default by the overriden - get_operation_id_base method above). Docstring of overridden method: - - Compute an operation ID from the view type and get_operation_id_base method. - """ - - # Return the custom operation id if specified by the user - # First convert the functions in the tuple keys of custom_operation_id to strings - custom_operation_id_converted_keys = { - (get_url_by_name(route_name), m): v - for (route_name, m), v in custom_operation_id.items() - } - # Check if user has specified custom operation id - if (path, method) in custom_operation_id_converted_keys.keys(): - return custom_operation_id_converted_keys[(path, method)] - - return split_camel(super().get_operation_id(path, method)).title() - - # Overrides, DOES NOT call overridden method - # Keep an eye on DRF changes to see if the overridden get_tags method is improved - # https://www.django-rest-framework.org/api-guide/schemas/#get_tags - def get_tags(self, path, method): - """ - This method returns custom tags passed into the __init__ method, or otherwise - (if the tags argument was not included) adds a tag - of the form '[APP] route_name' as the default behavior. - Note that the abbreviation of the app in these [APP] brackets is set in the - subpath_abbreviations dict above. - """ - - # If user has specified tags, use them. - if self._tags: - return self._tags - - # Create the tag from the first part of the path (other than "api") and the name - name = self.get_name(path, method) - path_components = (path[1:] if path.startswith("/") else path).split("/") - subpath = path_components[1] if path_components[0] == "api" else path_components[0] - if subpath not in subpath_abbreviations.keys(): - raise ValueError( - f"You must add the the '{subpath}' subpath to the " - "subpath_abbreviations dict in backend/PennCourses/docs_settings.py. " - f"This subpath was inferred from the path '{path}'." - ) - return [f"[{subpath_abbreviations[subpath]}] {name}"] - - # Overrides, uses overridden method - def get_path_parameters(self, path, method): - """ - This method returns a list of parameters from templated path variables. It improves - the inference of path parameter description from the overridden method by utilizing - property docstrings. If a custom path parameter description is specified using the - custom_path_parameter_desc kwarg in __init__, that is used for the description. - Docstring of overridden method: - - Return a list of parameters from templated path variables. - """ - - parameters = super().get_path_parameters(path, method) - - model = getattr(getattr(self.view, "queryset", None), "model", None) - for parameter in parameters: - variable = parameter["name"] - description = parameter["description"] - - # Use property docstrings when possible - if model is not None: - try: - model_field = model._meta.get_field(variable) - except Exception: - model_field = None - if ( - model_field is None - and parameter["name"] in model.__dict__.keys() - and isinstance(model.__dict__[variable], property) - ): - doc = getdoc(model.__dict__[variable]) - description = "" if doc is None else doc - - custom_path_parameter_desc = { - get_url_by_name(route_name): value - for route_name, value in self.custom_path_parameter_desc.items() - } - - # Add custom path parameter description if relevant - if ( - custom_path_parameter_desc - and path in custom_path_parameter_desc.keys() - and method.upper() in custom_path_parameter_desc[path].keys() - and variable in custom_path_parameter_desc[path][method].keys() - and custom_path_parameter_desc[path][method][variable] - ): - description = custom_path_parameter_desc[path][method][variable] - - parameter["description"] = description - - return parameters - - # Overrides, uses overridden method - def get_request_body(self, path, method): - """ - This method overrides the get_request_body method from AutoSchema, setting - a custom request schema if specified via the override_request_schema init kwarg. - """ - request_body = super().get_request_body(path, method) - - override_request_schema = { - get_url_by_name(route_name): value - for route_name, value in self.override_request_schema.items() - } - - if path in override_request_schema and method in override_request_schema[path]: - for ct in request_body["content"]: - request_body["content"][ct]["schema"] = override_request_schema[path][method] - - return request_body - - # Overrides, uses overridden method - def get_responses(self, path, method): - """ - This method describes the responses for this path/method. It makes certain - improvements over the overridden method in terms of adding useful information - (like 403 responses). It also enforces the user's choice as to whether to include - a response schema or alternatively just display the response (for path/method/status_code). - Custom response descriptions specified by the user in the response_codes - kwarg to the __init__ method are also added. - Finally, custom schemas specified by the user in the override_response_schema kwarg to the - __init__ method are added. - """ - - responses = super().get_responses(path, method) - - # Automatically add 403 response if authentication is required - if IsAuthenticated in self.view.permission_classes and 403 not in responses: - responses = { - **responses, - 403: {"description": "Access denied (missing or improper authentication)."}, - } - - # Get "default" schema content from response - # This code is from an older version of the overridden method which - # did not use JSON refs (JSON refs are not appropriate for our use-case since - # we change certain response schemas in ways that we don't want to affect - # request schemas, etc). - serializer = self.get_response_serializer(path, method) - if not isinstance(serializer, serializers.Serializer): - item_schema = {} - else: - item_schema = self.get_reference(serializer) - if is_list_view(path, method, self.view): - response_schema = { - "type": "array", - "items": item_schema, - } - paginator = self.get_paginator() - if paginator: - response_schema = paginator.get_paginated_response_schema(response_schema) - else: - response_schema = item_schema - default_schema_content = { - content_type: {"schema": deepcopy(response_schema)} - for content_type in self.response_media_types - } - - response_codes = { - get_url_by_name(route_name): value for route_name, value in self.response_codes.items() - } - - # Change all status codes to integers - responses = {int(key): value for key, value in responses.items()} - # Add status codes and custom descriptions from custom response_codes dict - if path in response_codes and method in response_codes[path]: - for status_code in response_codes[path][method]: - status_code = int(status_code) - custom_description = response_codes[path][method][status_code] - include_content = "[DESCRIBE_RESPONSE_SCHEMA]" in custom_description - custom_description = custom_description.replace("[DESCRIBE_RESPONSE_SCHEMA]", "") - if status_code in responses.keys(): - if "[UNDOCUMENTED]" in custom_description: - del responses[status_code] - else: - responses[status_code]["description"] = custom_description - if not include_content and "content" in responses[status_code]: - del responses[status_code]["content"] - elif "[UNDOCUMENTED]" not in custom_description: - responses[status_code] = {"description": custom_description} - if include_content: - responses[status_code]["content"] = deepcopy(default_schema_content) - - override_response_schema = { - get_url_by_name(route_name): value - for route_name, value in self.override_response_schema.items() - } - - if path in override_response_schema and method in override_response_schema[path]: - for status_code in override_response_schema[path][method]: - if status_code not in responses.keys(): - responses[status_code] = { - "description": "", - "content": deepcopy(default_schema_content), - } - for status_code in responses.keys(): - if status_code in override_response_schema[path][method]: - custom_schema = override_response_schema[path][method][status_code] - if "content" not in responses[status_code]: - responses[status_code]["content"] = dict() - for ct in self.request_media_types: - responses[status_code]["content"][ct] = custom_schema - else: - for response_schema in responses[status_code]["content"].values(): - response_schema["schema"] = custom_schema - return responses +import inspect +import json +import re +from copy import deepcopy +from inspect import getdoc +from textwrap import dedent + +import jsonref +from django.urls import get_resolver +from rest_framework import serializers +from rest_framework.permissions import IsAuthenticated +from rest_framework.renderers import JSONOpenAPIRenderer +from rest_framework.schemas.openapi import AutoSchema +from rest_framework.schemas.utils import is_list_view + + +""" +This file includes code and settings for our PCx autodocumentation +(based on a Django-generated OpenAPI schema and Redoc, which formats that schema into a +readable documentation web page). Some useful links: +https://github.com/Redocly/redoc +https://github.com/Redocly/redoc/blob/master/docs/redoc-vendor-extensions.md#tagGroupObject +https://www.django-rest-framework.org/api-guide/schemas +https://www.django-rest-framework.org/topics/documenting-your-api/ +A Redoc example from which many of the concepts in this file were taken from: +https://redocly.github.io/redoc/ +https://github.com/Redocly/redoc/blob/master/demo/openapi.yaml + + +TERMINOLOGY: + +Each route (e.g. GET /api/plan/schedules/{id}/) has an "operation ID" that is (by default) +automatically parsed from the codebase (e.g. "Retrieve Schedule"). The base "name" underlying this +operation ID is also automatically parsed by default (e.g. "Schedule"). The operation ID shows up +as the title of the route on our Redoc documentation page. You can customize the +name and/or the operation ID of a route by modifying the custom_name and custom_operation_id +dicts in this file. Customizing the name will change the operation ID and the tag of the route +(see below). You can customize the name with the custom_name dict below, and the operation ID +with the custom_operation_id dict. + +Tags are groupings of routes by name. For instance, all the routes +GET, POST /api/plan/schedules/ and GET, PUT, DELETE /api/plan/schedules/{id} +are organized under the shared tag "[PCP] Schedule", since they all share the base name "Schedule". +You can click on a tag in the table of contents of our Redoc documenation, and the section will +expand to show all the underlying routes (each titled by its operation ID). You can change tag +names using the custom_tag_names dict below, but the default tag names are usually sensible +(derived from the base name of the underlying routes). What's more useful is to give a tag a +description, which you can do with the custom_tag_descriptions dict. + +Tag groups organize tags into groups; they show up in the left sidebar of our Redoc page and divide +the categories of routes into meta categories. We are using them to separate our tags by app. +For instance, "Penn Course Plan" is a tag group. Each tag group has an abbreviation, specified by +the tag_group_abbreviations dict. For instance, the "Penn Course Plan" tag group is +abbreviated "[PCP]". Each tag in a tag group is prefixed by the tag group abbreviation in +square brackets (for instance "[PCP] Schedule"). The subpath_abbreviations dict below takes Django +app names (e.g. "plan"), and maps them to the corresponding tag group abbreviation (this is how +tags are automatically organized into tag groups). You shouldn't need to modify this dict unless +you change an app name or add a new app). Then, the tag_group_abbreviations dict maps the +abbreviation to the full name of the tag group. + + +MAINTENENCE: + +You can update the introductory sections / readme of the auto-docs page by editing the +markdown-formatted openapi_description text below. +You should include docstrings in views (the proper format of docstrings are specified here +https://www.django-rest-framework.org/coreapi/from-documenting-your-api/#documenting-your-views) +explaining the function of the view (or the function of each method if there are multiple supported +methods). These docstrings will be parsed by the auto-docs and included in the documentation page. + +When writing any class-based views where you specify a queryset (such as ViewSets), even if you +override get_queryset(), you should also specify the queryset field with something like +queryset = ExampleModel.objects.none() (using .none() to prevent accidental data breach), or +alternatively a sensible queryset default (e.g. queryset = Course.with_reviews.all() for the +CourseDetail ViewSet). Basically, just make sure the queryset parameter is always pointing to the +relevant model (if you are using queryset or get_queryset()). This will allow the +auto-documentation to access the model underlying the queryset (it cannot call get_queryset() +since it cannot generate a request object which the get_queryset() method might try to access). + +If the meaning of a model or serializer field is not clear, you should include the string help_text +as a parameter when initializing the field, explaining what that field stores. This will show up +in the documentation such that parameter descriptions are inferred from model or serializer field +help text. For properties, the docstring will be used since there is no +way to define help_text for a property; so even if a property's use is clear based on the code, +keep in mind that describing its purpose in the docstring will be helpful to frontend engineers +who are unfamiliar with the backend code (also, don't include a :return: tag as you might normally +do for functions; a property is to be treated as a dynamic field, not a method, so just state +what the method returns as the only text in the docstring). +Including help_text/docstring when a field/property's purpose is unclear will also +make the model/serializer code more understandable for future Labs developers. +And furthermore, all help_text and descriptive docstrings show up in the backend +documentation (accessible at /admin/doc/). + +PcxAutoSchema (defined below) is a subclass of Django's AutoSchema, and it makes some improvements +on that class for use with Redoc as well as some customizations specific to our use-cases. You can +customize auto-docs for a specific view by setting +schema = PcxAutoSchema(...) +in class-based views, or using the decorator +@schema(PcxAutoSchema(...)) +in functional views, and passing kwargs (...) into the PcxAutoSchema constructor (search +PcxAutoSchema in our codebase for some examples of this, and keep reading for a comprehensive +description of how you can customize PcxAutoSchema using these kwargs). + +There are a number of dictionaries you can use to customize these auto-docs; some are passed into +PcxAutoSchema as initialization kwargs, and some are predefined in this file (in the +"Customizable Settings" section below). Often, these dictionaries will contain layers of nested +dictionaries with a schema of path/method/... However, you will notice in example code snippets +in this README and in our codebase, these paths are not hardcoded but instead are referenced by +route name (to avoid repeating URL information that is already specified in urls.py files). +To determine the name of a certain URL, run `python manage.py show_urls` which will print +a list of URLs and their corresponding names. +Note that the route name of the URL here is not to be confused with the base name of the route +as defined above in the TERMINOLOGY section; the name of the URL is specified in the urls.py file, +whereas the name of the route is auto-generated from the code, and may or may not be derived from +the URL name. For instance "courses-detail" is a URL name, and "Course" is the base name of the +corresponding route for documentation generation. +Sorry for the confusion / overloading of terms here. + +By default, response codes will be assumed to be 204 (for delete) or 200 (in all other cases). +To set custom response codes for a path/method (with a custom description), include a +response_codes kwarg in your PcxAutoSchema instantiation. You should input +a dict mapping paths (indicated by route name) to dicts, where each subdict maps string methods +to dicts, and each further subdict maps int response codes to string descriptions. An example: + response_codes={ + "schedules-list": { + "GET": { + 200: "[DESCRIBE_RESPONSE_SCHEMA]Schedules listed successfully.", + }, + "POST": { + 201: "Schedule successfully created.", + 200: "Schedule successfully updated (a schedule with the " + "specified id already existed).", + 400: "Bad request (see description above).", + } + }, + ... + } +Note that if you include "[DESCRIBE_RESPONSE_SCHEMA]" in your string description, that will +not show up in the description text (it will automatically be removed) but instead will indicate +that that response should have a response body schema show up in the documentation (the schema will +be automatically generated by default, but can be customized using the override_response_schema +kwarg; see below). You should generally enable a response schema for responses which will contain +useful data for the frontend beyond the response code. Note that in the example above, the only +such response is the listing of schedules (the GET 200 response). +If you include "[UNDOCUMENTED]" in your string description, that will +remove that response status code from the schema/docs. This is useful if you want to remove +a code that is included by default from the schema. + +If you want to make manual changes to a request schema, include an override_request_schema kwarg +in your PcxAutoSchema instantiation. You should input a dict mapping paths (indicated by +route name) to dicts, where each subdict maps string methods to objects specifying the +desired response schema for that path/method. +The format of these objects is governed by the OpenAPI specification +(for more on the syntax of how to specify a schema, see this link: +http://spec.openapis.org/oas/v3.0.3.html#schema-object [section 4.7.24] +you are specifying the dicts mapped to by "schema" keys in the examples at the following link: +http://spec.openapis.org/oas/v3.0.3.html#request-body-object). An example: + override_request_schema={ + "recommend-courses": { + "POST": { + "type": "object", + "properties": { + "past_courses": { + "type": "array", + "description": ( + "An array of courses the user has previously taken." + ), + "items": { + "type": "string", + "description": "A course code of the form DEPT-XXX, e.g. CIS-120" + } + } + } + } + } + } + +If you want to make manual changes to a response schema, include an override_response_schema kwarg +in your PcxAutoSchema instantiation. You should input a dict mapping paths (indicated by +route name) to dicts, where each subdict maps string methods to dicts, and each further subdict +maps int response codes to the objects specifying the desired response schema. +The format of these objects is governed by the OpenAPI specification +(for more on the syntax of how to specify a schema, see this link: +http://spec.openapis.org/oas/v3.0.3.html#schema-object [section 4.7.24] +you are specifying the dicts mapped to by "schema" keys in the examples at the following link: +http://spec.openapis.org/oas/v3.0.3.html#response-object). You can reference existing schemas +generated by the docs using the notation {"$ref": "#/components/schemas/VeryComplexType"}. +Download the existing OpenAPI schema using the button at the top of the docs page to inspect +what existing schemas exist, and what the path to them is. + +An example: + override_response_schema={ + "recommend-courses": { + "POST": { + 200: { + "type": "array", + "description": "An array of courses that we recommend.", + "items": { + "type": "string", + "description": "The full code of the recommended course, in the form " + "DEPT-XXX, e.g. CIS-120" + } + } + } + } + } + +If you want to manually set the description of a path parameter for a certain path/method, +you can do so by including a custom_path_parameter_desc kwarg in your PcxAutoSchema instantiation, +with keys of the form path > method > variable_name pointing to a string description. Example: + custom_path_parameter_desc={ + "statusupdate": { + "GET": { + "full_code": ( + "The code of the section which this status update applies to, in the " + "form '{dept code}-{course code}-{section code}', e.g. 'CIS-120-001' for the " + "001 section of CIS-120." + ) + } + } + } + +If you want to manually specify parameters (query, path, header, or cookie) for a certain +path/method, you can do so by including a custom_parameters kwarg in your PcxAutoSchema +instantiation, passing a dict of the form path > method > [list of query param schema objects]. +This kwarg will override custom_path_parameter_desc if they conflict. +The format of these objects is described by +https://spec.openapis.org/oas/v3.0.3.html#parameter-object [section 4.7.12] +Example: + custom_parameters={ + "course-plots": { + "GET": [ + { + "name": "course_code", + "in": "path", + "description": "The dash-joined department and code of the course you want plots for, e.g. `CIS-120` for CIS-120.", # noqa E501 + "schema": {"type": "string"}, + "required": True, + }, + { + "name": "instructor_ids", + "in": "query", + "description": "A comma-separated list of instructor IDs with which to filter the sections underlying the returned plots.", # noqa E501 + "schema": {"type": "string"}, + "required": False, + }, + ] + }, + }, + +Finally, if you still need to further customize your API schema, you can do this in the +make_manual_schema_changes function below. This is applied to the final JSON schema after all +automatic changes / customizations are applied. For more about the format of an OpenAPI +schema (which you would need to know a bit about to make further customizations), see this +documentation: +http://spec.openapis.org/oas/v3.0.3.html +To explore our JSON schema (which can help when trying to figure out how to modify it in +make_manual_schema_changes if you need to), you can download it from the /api/openapi/ route. +""" + + +def get_url_by_name(name): + path = get_resolver().reverse_dict[name][0][0][0] + path = path.replace(r"%(pk)s", r"{id}") + return "/" + re.sub(r"%\(([^)]+)\)s", r"{\1}", path) + + +# ============================= Begin Customizable Settings ======================================== + + +# The following is the description which shows up at the top of the documentation site +openapi_description = """ +# Introduction +Penn Courses ([GitHub](https://github.com/pennlabs/penn-courses)) is the umbrella +categorization for [Penn Labs](https://pennlabs.org/) +products designed to help students navigate the course registration process. It currently +includes three products, each with their own API documented on this page: +Penn Course Alert, Penn Course Plan, and Penn Course Review. + +See `Penn Labs Notion > Penn Courses` for more details on each of our (currently) three apps. + +For instructions on how to maintain this documentation while writing code, +see the comments in `backend/PennCourses/docs_settings.py` (it is easy, and will be helpful +for maintaining Labs knowledge in spite of our high member turnover rate). + +See our [GitHub](https://github.com/pennlabs/penn-courses) repo for instructions on +installation, running in development, and loading in course data for development. Visit +the `/admin/doc/` route ([link](/admin/doc/)) for the backend documentation generated by Django +(admin account required, which can be made by running +`python manage.py createsuperuser` in terminal/CLI). + +# Unified Penn Courses +By virtue of the fact that all Penn Courses products deal with, well, courses, +it would make sense for all three products to share the same backend. + +We realized the necessity of a unified backend when attempting to design a new Django backend +for Penn Course Plan. We like to live by the philosophy of keeping it +[DRY](https://en.wikipedia.org/wiki/Don't_repeat_yourself), and +PCA and PCP's data models both need to reference course and +section information. We could have simply copied over code (a bad idea) +or created a shared reusable Django app (a better idea) for course data, +but each app would still need to download copies of the same data. +Additionally, this will help us build integrations between our Courses products. + +# Authentication +PCx user authentication is handled by platform's Penn Labs Accounts Engine. +See [Penn Labs Notion > Platform > The Accounts Engine](https://www.notion.so/pennlabs/The-Accounts-Engine-726ccf8875e244f4b8dbf8a8f2c97a87?pvs=4) +for extensive documentation and links to repositories for this system. When tags or routes +are described as requiring user authentication, they are referring to this system. + +I highly recommend the [official video course on OAuth2](https://oauth.net/2/) (by Aaron Parecki), +then the Platform Notion docs on the "Accounts Engine" for anyone who wants to understand +Labs authentication better. Platform is our OAuth2 "Authorization Server", +and Django Labs Accounts is an OAuth2 client run by our Django backends (Clubs, Penn Courses, etc), +exposing client-facing authentication routes like `penncourseplan.com/accounts/login`. +There's also this Wikipedia page explaining [Shibboleth](https://en.wikipedia.org/wiki/Shibboleth_(software)) +(which is used by Penn for authentication, and by the Platform authorization server). + +See the Django docs for more on Django's features for +[User Authentication](https://docs.djangoproject.com/en/3.0/topics/auth/), +which are used by PCX apps, as part of Platform's accounts system. +""" # noqa E501 + + +# This dictionary takes app names (the string just after /api/ in the path or just after / +# if /api/ does not come at the beginning of the path) +# as values and abbreviated versions of those names as values. It is used to +# add an abbreviated app prefix designating app membership to each route's tag name. +# For instance the Registration tag is prepended with [PCA] to get "[PCA] Registration" since +# its routes start with /api/alert/, and "alert": "PCA" is a key/value pair in the following dict. +subpath_abbreviations = { + "plan": "PCP", + "alert": "PCA", + "review": "PCR", + "base": "PCx", + "accounts": "Accounts", +} +assert all( + [isinstance(key, str) and isinstance(val, str) for key, val in subpath_abbreviations.items()] +) + + +# This dictionary should map abbreviated app names (values from the dict above) to +# longer form names which will show up as the tag group name in the documentation. +tag_group_abbreviations = { + "PCP": "Penn Course Plan", + "PCA": "Penn Course Alert", + "PCR": "Penn Course Review", + "PCx": "Penn Courses (Base)", + "Accounts": "Penn Labs Accounts", + "": "Other" # Catches all other tags (this should normally be an empty tag group and if so + # it will not show up in the documentation, but is left as a debugging safeguard). + # If routes are showing up in a "Misc" tag in this group, make sure you set the schema for + # those views to be PcxAutoSchema, as is instructed in the meta docs above. +} +assert all( + [isinstance(key, str) and isinstance(val, str) for key, val in tag_group_abbreviations.items()] +) + + +# "operation ids" are the unique titles of routes within a tag (if you click on a tag you see +# a list of operation ids, each corresponding to a certain route). + +# name here refers to the name underlying the operation id of the view +# this is NOT the full name that you see on the API, it is the base name underlying it, +# and is used in construction of that name +# For instance, for POST /api/plan/schedules/, the name is "Schedule" and the operation_id is +# "Create Schedule" (see below get_name and _get_operation_id methods in PcxAutoSchema for +# a more in-depth explanation of the difference). +# IMPORTANT: The name also defines what the automatically-set tag name will be. +# That's why this custom_name is provided separately from custom_operation_id below; +# you can use it if you want to change the operation_id AND the tag name at once. +custom_name = { # keys are (path, method) tuples, values are custom names + # method is one of ("GET", "POST", "PUT", "PATCH", "DELETE") + ("registrationhistory-list", "GET"): "Registration History", + ("registrationhistory-detail", "GET"): "Registration History", + ("statusupdate", "GET"): "Status Update", + ("recommend-courses", "POST"): "Course Recommendations", + ("course-reviews", "GET"): "Course Reviews", + ("course-plots", "GET"): "Plots", + ("review-autocomplete", "GET"): "Autocomplete Dump", + ("instructor-reviews", "GET"): "Instructor Reviews", + ("department-reviews", "GET"): "Department Reviews", + ("course-history", "GET"): "Section-Specific Reviews", + ("requirements-list", "GET"): "Pre-NGSS Requirement", + ("restrictions-list", "GET"): "NGSS Restriction", +} +assert all( + [isinstance(k, tuple) and len(k) == 2 and isinstance(k[1], str) for k in custom_name.keys()] +) + + +custom_operation_id = { # keys are (path, method) tuples, values are custom names + # method is one of ("GET", "POST", "PUT", "PATCH", "DELETE") + ("registrationhistory-list", "GET"): "List Registration History", + ("registrationhistory-detail", "GET"): "Retrieve Historic Registration", + ("statusupdate", "GET"): "List Status Updates", + ("courses-search", "GET"): "Course Search", + ("section-search", "GET"): "Section Search", + ("review-autocomplete", "GET"): "Retrieve Autocomplete Dump", +} +assert all( + [ + isinstance(k, tuple) and len(k) == 2 and isinstance(k[1], str) + for k in custom_operation_id.keys() + ] +) + + +# Use this dictionary to rename tags, if you wish to do so +# keys are old tag names (seen on docs), values are new tag names +custom_tag_names = {} +assert all([isinstance(key, str) and isinstance(val, str) for key, val in custom_tag_names.items()]) + + +# Note that you can customize the tag for all routes from a certain view by passing in a +# list containing only that tag into the tags kwarg of PcxAutoSchema instantiation +# (inherited behavior from Django AutoSchema: +# https://www.django-rest-framework.org/api-guide/schemas/#autoschema) + +# tag descriptions show up in the documentation body below the tag name +custom_tag_descriptions = { + # keys are tag names (after any name changes from above dicts), vals are descriptions + "[PCP] Schedule": dedent( + """ + These routes allow interfacing with the user's PCP Schedules for the current semester, + stored on the backend. Ever since we integrated Penn Labs Accounts into PCP so that users + can store their schedules across devices and browsers, we have stored users' schedules on + our backend (rather than local storage). + """ + ), + "[PCP] Pre-NGSS Requirements": dedent( + """ + These routes expose the pre-NGSS (deprecated since 2022C) academic requirements for the + current semester which are stored on our backend (hopefully comprehensive). + """ + ), + "[PCP] Course": dedent( + """ + These routes expose course information for PCP for the current semester. + """ + ), + "[PCA] Registration History": dedent( + """ + These routes expose a user's registration history (including + inactive and obsolete registrations) for the current semester. Inactive registrations are + registrations which would not trigger a notification to be sent if their section opened, + and obsolete registrations are registrations which are not at the head of their resubscribe + chain. + """ + ), + "[PCA] Registration": dedent( + """ + As the main API endpoints for PCA, these routes allow interaction with the user's + PCA registrations. An important concept which is referenced throughout the documentation + for these routes is that of the "resubscribe chain". A resubscribe chain is a chain + of PCA registrations where the tail of the chain was an original registration created + through a POST request to `/api/alert/registrations/` specifying a new section (one that + the user wasn't already registered to receive alerts for). Each next element in the chain + is a registration created by resubscribing to the previous registration (once that + registration had triggered an alert to be sent), either manually by the user or + automatically if auto_resubscribe was set to true. Then, it follows that the head of the + resubscribe chain is the most relevant Registration for that user/section combo; if any + of the registrations in the chain are active, it would be the head. And if the head + is active, none of the other registrations in the chain are active. + + Note that a registration will send an alert when the section it is watching opens, if and + only if it hasn't sent one before, it isn't cancelled, and it isn't deleted. If a + registration would send an alert when the section it is watching opens, we call it + "active". See the Create Registration docs for an explanation of how to create a new + registration, and the Update Registration docs for an explanation of how you can modify + a registration after it is created. + + In addition to sending alerts for when a class opens up, we have also implemented + an optionally user-enabled feature called "close notifications". + If a registration has close_notification enabled, it will act normally when the watched + section opens up for the first time (triggering an alert to be sent). However, once the + watched section closes, it will send another alert (the email alert will be in the same + chain as the original alert) to let the user know that the section has closed. Thus, + if a user sees a PCA notification on their phone during a class for instance, they won't + need to frantically open up their laptop and check PennInTouch to see if the class is still + open just to find that it is already closed. To avoid spam and wasted money, we DO NOT + send any close notifications over text. So the user must have an email saved or use + push notifications in order to be able to enable close notifications on a registration. + Note that the close_notification setting carries over across resubscriptions, but can be + disabled at any time using Update Registration. + + After the PCA backend refactor in 2019C/2020A, all PCA Registrations have a `user` field + pointing to the user's Penn Labs Accounts User object. In other words, we implemented a + user/accounts system for PCA which required that + people log in to use the website. Thus, the contact information used in PCA alerts + is taken from the user's User Profile. You can edit this contact information using + Update User or Partial Update User. If push_notifications is set to True, then + a push notification will be sent when the user is alerted, but no text notifications will + be sent (as that would be a redundant alert to the user's phone). Otherwise, an email + or a text alert is sent if and only if contact information for that medium exists in + the user's profile. + """ + ), + "[PCA] User": dedent( + """ + These routes expose a user's saved settings (from their Penn Labs Accounts user object). + For PCA, the profile object is of particular importance; it stores the email and + phone of the user (with a null value for either indicating the user doesn't want to be + notified using that medium). + """ + ), + "[PCA] Sections": dedent( + """ + This route is used by PCA to get data about sections. + """ + ), + "[Accounts] User": dedent( + """ + These routes allow interaction with the User object of a Penn Labs Accounts user. + We do not document `/accounts/...` authentication routes here, as they are described + by the [Authentication](#section/Authentication) section, and the + [Penn Labs Account Engine](https://www.notion.so/pennlabs/The-Accounts-Engine-726ccf8875e244f4b8dbf8a8f2c97a87?pvs=4) + Notion page. + """ # noqa E501 + ), + "Miscs": dedent( + """ + WARNING: This tag should not be used, and its existence + indicates you may have forgotten to set a view's schema to PcxAutoSchema for the views + under this tag. See the meta documentation in backend/PennCourses/docs_settings.py of our + codebase for instructions on how to properly set a view's schema to PcxAutoSchema. + """ + ), +} +assert all( + [isinstance(key, str) and isinstance(val, str) for key, val in custom_tag_descriptions.items()] +) + + +labs_logo_url = "https://i.imgur.com/tVsRNxJ.png" + + +def make_manual_schema_changes(data): + """ + Use this space to make manual modifications to the schema before it is + presented to the user. Only make manual changes as a last resort, and try + to use built-in functionality whenever possible. + These modifications were written by referencing the existing schema at /api/openapi + and also an example schema (written in YAML instead of JSON, but still + easily interpretable as JSON) from a Redoc example: + https://github.com/Redocly/redoc/blob/master/demo/openapi.yaml + """ + + data["info"]["x-logo"] = {"url": labs_logo_url, "altText": "Labs Logo"} + data["info"]["contact"] = {"email": "contact@pennlabs.org"} + + # Remove ID from the documented PUT request body for /api/plan/schedules/ + # (the id field in the request body is ignored in favor of the id path parameter) + schedules_detail_url = get_url_by_name("schedules-detail") + data["paths"][schedules_detail_url]["put"] = deepcopy( + data["paths"][schedules_detail_url]["put"] + ) + for content_ob in data["paths"][schedules_detail_url]["put"]["requestBody"]["content"].values(): + content_ob["schema"]["properties"].pop("id", None) + + # Make the name and sections fields of the PCP schedule request body required, + # make the id field optionally show up. Also, make the id and semester fields + # show up under the sections field, and make id required. + for path, path_ob in data["paths"].items(): + if get_url_by_name("schedules-list") not in path: + continue + for method_ob in path_ob.values(): + if "requestBody" not in method_ob.keys(): + continue + for content_ob in method_ob["requestBody"]["content"].values(): + properties_ob = content_ob["schema"]["properties"] + if "sections" in properties_ob.keys(): + section_ob = properties_ob["sections"] + if "required" not in section_ob["items"].keys(): + section_ob["items"]["required"] = [] + required = section_ob["items"]["required"] + section_ob["items"]["required"] = list(set(required + ["id", "semester"])) + for field, field_ob in section_ob["items"]["properties"].items(): + if field == "id" or field == "semester": + field_ob["readOnly"] = False + if "semester" in properties_ob.keys(): + properties_ob["semester"]["description"] = dedent( + """ + The semester of the course (of the form YYYYx where x is A [for spring], + B [summer], or C [fall]), e.g. `2019C` for fall 2019. You can omit this + field and the semester of the first section in the sections list will be + used instead (or if the sections list is empty, the current semester will + be used). If this field differs from any of the semesters of the sections + in the sections list, a 400 will be returned. + """ + ) + if "id" in properties_ob.keys(): + properties_ob["id"]["description"] = ( + "The id of the schedule, if you want to explicitly set this (on create) " + "or update an existing schedule by id (optional)." + ) + + # Make application/json the only content type + def delete_other_content_types_dfs(dictionary): + if not isinstance(dictionary, dict): + return None + dictionary.pop("application/x-www-form-urlencoded", None) + dictionary.pop("multipart/form-data", None) + for value in dictionary.values(): + delete_other_content_types_dfs(value) + + delete_other_content_types_dfs(data) + + +# ============================== End Customizable Settings ========================================= + + +def split_camel(w): + return re.sub("([a-z0-9])([A-Z])", lambda x: x.groups()[0] + " " + x.groups()[1], w) + + +def pluralize_word(s): + return s + "s" # naive solution because this is how it is done in DRF + + +# Customization dicts populated by PcxAutoSchema __init__ method calls + +# A cumulative version of the response_codes parameter to PcxAutoSchema: +cumulative_response_codes = dict() +# A cumulative version of the override_request_schema parameter to PcxAutoSchema: +cumulative_override_request_schema = dict() +# A cumulative version of the override_response_schema parameter to PcxAutoSchema: +cumulative_override_response_schema = dict() +# A cumulative version of the custom_path_parameter_desc parameter to PcxAutoSchema: +cumulative_cppd = dict() +# A cumulative version of the custom_parameters parameter to PcxAutoSchema: +cumulative_cp = dict() + + +class JSONOpenAPICustomTagGroupsRenderer(JSONOpenAPIRenderer): + def render(self, data_raw, media_type=None, renderer_context=None): + """ + This overridden method modifies the JSON OpenAPI schema generated by Django + to add tag groups, and most of the other customization specified above. + It was written by referencing the existing schema at /api/openapi + and also an example schema (written in YAML instead of JSON, but still + easily interpretable as JSON) from a Redoc example: + https://github.com/Redocly/redoc/blob/master/demo/openapi.yaml + """ + + # The following resolves JSON refs which are not handled automatically in Python dicts + # https://swagger.io/docs/specification/using-ref/ + data = jsonref.loads(json.dumps(data_raw)) + + # Determine existing tags and create a map from tag to a list of the corresponding dicts + # of nested schema objects at paths/{path}/{method} in the OpenAPI schema (for all + # the paths/methods which have that tag). + # If any routes do not have tags, add the 'Misc' tag to them, which will be put in + # the 'Other' tag group automatically, below. + tags = set() + tag_to_dicts = dict() + for x in data["paths"].values(): + for v in x.values(): + if "tags" in v.keys(): + tags.update(v["tags"]) + for t in v["tags"]: + if t not in tag_to_dicts.keys(): + tag_to_dicts[t] = [] + tag_to_dicts[t].append(v) + else: + v["tags"] = ["Misc"] + tags.add("Misc") + if "Misc" not in tag_to_dicts.keys(): + tag_to_dicts["Misc"] = [] + tag_to_dicts["Misc"].append(v) + + # A function to change tag names (adds requested changes to a dict which will be + # cleared after the for tag in tags loop below finishes; it is done this way since + # the tags set cannot be modified while it is being iterated over). + changes = dict() + + def update_tag(old_tag, new_tag): + for val in tag_to_dicts[old_tag]: + val["tags"] = [(t if t != old_tag else new_tag) for t in val["tags"]] + lst = tag_to_dicts.pop(old_tag) + tag_to_dicts[new_tag] = lst + changes[old_tag] = new_tag # since tags cannot be updated while iterating through tags + return new_tag + + # Pluralize tag name if all views in tag are lists, and apply custom tag names from + # custom_tag_names dict defined above. + for tag in tags: + tag = update_tag(tag, split_camel(tag)) + all_list = all([("list" in v["operationId"].lower()) for v in tag_to_dicts[tag]]) + if all_list: # if all views in tag are lists, pluralize tag name + tag = update_tag( + tag, " ".join(tag.split(" ")[:-1] + [pluralize_word(tag.split(" ")[-1])]) + ) + if tag in custom_tag_names.keys(): # rename custom tags + tag = update_tag(tag, custom_tag_names[tag]) + + # Remove 'required' flags from responses (it doesn't make sense for a response + # item to be 'required'). + def delete_required_dfs(dictionary): + if not isinstance(dictionary, dict): + return None + dictionary.pop("required", None) + for value in dictionary.values(): + delete_required_dfs(value) + + for path_name, val in data["paths"].items(): + for method_name, v in val.items(): + v["responses"] = deepcopy(v["responses"]) + delete_required_dfs(v["responses"]) + + # Since tags could not be updated while we were iterating through tags above, + # we update them now. + for k, v in changes.items(): + tags.remove(k) + tags.add(v) + + # Add custom tag descriptions from the custom_tag_descriptions dict defined above + data["tags"] = [ + {"name": tag, "description": custom_tag_descriptions.get(tag, "")} for tag in tags + ] + + # Add tags to tag groups based on the tag group abbreviation in the name of the tag + # (these abbreviations are added as prefixes of the tag names automatically in the + # get_tags method of PcxAutoSchema). + tags_to_tag_groups = dict() + for t in tags: + for k in tag_group_abbreviations.keys(): + # Assigning the tag groups like this prevents tag abbreviations being substrings + # of each other from being problematic; the longest matching abbreviation is + # used (so even if another tag group abbreviation is a substring, it won't be + # mistakenly used for the tag group). + if k in t and ( + t not in tags_to_tag_groups.keys() or len(k) > len(tags_to_tag_groups[t]) + ): + tags_to_tag_groups[t] = k + data["x-tagGroups"] = [ + {"name": v, "tags": [t for t in tags if tags_to_tag_groups[t] == k]} + for k, v in tag_group_abbreviations.items() + ] + # Remove empty tag groups + data["x-tagGroups"] = [g for g in data["x-tagGroups"] if len(g["tags"]) != 0] + + # This code ensures that no path/methods in optional dictionary kwargs passed to + # PcxAutoSchema __init__ methods are invalid (indicating user error) + for original_kwarg, parameter_name, parameter_dict in [ + ("response_codes", "cumulative_response_codes", cumulative_response_codes), + ( + "override_request_schema", + "cumulative_override_request_schema", + cumulative_override_request_schema, + ), + ( + "override_response_schema", + "cumulative_override_response_schema", + cumulative_override_response_schema, + ), + ("custom_path_parameter_desc", "cumulative_cppd", cumulative_cppd), + ("custom_parameters", "cumulative_cp", cumulative_cp), + ]: + for route_name in parameter_dict: + traceback = parameter_dict[route_name]["traceback"] + path = get_url_by_name(route_name) + if path not in data["paths"].keys(): + raise ValueError( + f"Check the {original_kwarg} input to PcxAutoSchema instantiation at " + f"{traceback}; invalid path found: '{path}'." + + ( + "If 'id' is in your args list, check if you set primary_key=True for " + "some field in the relevant model, and if so change 'id' " + "in your args list to the name of that field." + if "id" in path + else "" + ) + ) + for method in parameter_dict[route_name]: + if method == "traceback": + continue + if method.lower() not in data["paths"][path].keys(): + raise ValueError( + f"Check the {original_kwarg} input to PcxAutoSchema instantiation at " + f"{traceback}; invalid method '{method}' for path '{path}'" + ) + + new_cumulative_cp = { + get_url_by_name(route_name): value for route_name, value in cumulative_cp.items() + } + + # Update query parameter documentation + for path_name, val in data["paths"].items(): + if path_name not in new_cumulative_cp: + continue + for method_name, v in val.items(): + method_name = method_name.upper() + if method_name.upper() not in new_cumulative_cp[path_name]: + continue + custom_query_params = new_cumulative_cp[path_name][method_name] + custom_query_params_names = {param_ob["name"] for param_ob in custom_query_params} + v["parameters"] = [ + param_ob + for param_ob in v["parameters"] + if param_ob["name"] not in custom_query_params_names + ] + custom_query_params + + # Make any additional manual changes to the schema programmed by the user + make_manual_schema_changes(data) + + return jsonref.dumps(data, indent=2).encode("utf-8") + + +class PcxAutoSchema(AutoSchema): + """ + This custom subclass serves to improve AutoSchema in terms of customizability, and + quality of inference in some non-customized cases. + + https://www.django-rest-framework.org/api-guide/schemas/#autoschema + """ + + def __new__( + cls, + *args, + response_codes=None, + override_request_schema=None, + override_response_schema=None, + custom_path_parameter_desc=None, + custom_parameters=None, + **kwargs, + ): + """ + An overridden __new__ method which adds a created_at property to each PcxAutoSchema + instance indicating the file/line from which it was instantiated (useful for debugging). + """ + new_instance = super(PcxAutoSchema, cls).__new__(cls, *args, **kwargs) + stack_trace = inspect.stack() + created_at = "%s:%d" % (stack_trace[1][1], stack_trace[1][2]) + new_instance.created_at = created_at + return new_instance + + # Overrides, uses overridden method + # https://www.django-rest-framework.org/api-guide/schemas/#autoschema__init__-kwargs + def __init__( + self, + *args, + response_codes=None, + override_request_schema=None, + override_response_schema=None, + custom_path_parameter_desc=None, + custom_parameters=None, + **kwargs, + ): + """ + This custom __init__ method deals with optional passed-in kwargs such as + response_codes, override_response_schema, and custom_path_parameter_desc. + """ + + def fail(param, hint): + """ + A function to generate an error message if validation of one of the passed-in + kwargs fails. + """ + raise ValueError( + f"Invalid {param} kwarg passed into PcxAutoSchema at {self.created_at}; please " + f"check the meta docs in PennCourses/docs_settings.py for an explanation of " + f"the proper format of this kwarg. Hint:\n{hint}" + ) + + # Validate that each of the passed-in kwargs are nested dictionaries of the correct depth + for param_name, param_dict in [ + ("response_codes", response_codes), + ("override_request_schema", override_request_schema), + ("override_response_schema", override_response_schema), + ("custom_path_parameter_desc", custom_path_parameter_desc), + ("custom_parameters", custom_parameters), + ]: + if param_dict is not None: + if not isinstance(param_dict, dict): + fail(param_name, f"The {param_name} kwarg must be a dict.") + for dictionary in param_dict.values(): + if not isinstance(dictionary, dict): + fail(param_name, f"All values of the {param_name} dict must be dicts.") + for nested_dictionary in dictionary.values(): + if param_name == "custom_parameters": + if not isinstance(nested_dictionary, list): + fail( + param_name, + f"All values of the dict values of {param_name} must be lists.", + ) + continue + if not isinstance(nested_dictionary, dict): + fail( + param_name, + f"All values of the dict values of {param_name} must be dicts.", + ) + if param_name in [ + "override_request_schema", + "override_response_schema", + ]: + continue + for value in nested_dictionary.values(): + if isinstance(value, dict): + fail( + param_name, + f"Too deep nested dictionaries found in {param_name}.", + ) + + # Handle passed-in custom response codes + global cumulative_response_codes + if response_codes is None: + self.response_codes = dict() + else: + response_codes = deepcopy(response_codes) + for key, d in response_codes.items(): + response_codes[key] = {k.upper(): v for k, v in d.items()} + self.response_codes = response_codes + for_cumulative_response_codes = deepcopy(response_codes) + for dictionary in for_cumulative_response_codes.values(): + dictionary["traceback"] = self.created_at + cumulative_response_codes = { + **cumulative_response_codes, + **for_cumulative_response_codes, + } + + # Handle passed-in customized request schemas + global cumulative_override_request_schema + if override_request_schema is None: + self.override_request_schema = dict() + else: + override_request_schema = deepcopy(override_request_schema) + for key, d in override_request_schema.items(): + override_request_schema[key] = {k.upper(): v for k, v in d.items()} + self.override_request_schema = override_request_schema + for_cumulative_override_request_schema = deepcopy(override_request_schema) + for dictionary in for_cumulative_override_request_schema.values(): + dictionary["traceback"] = self.created_at + cumulative_override_request_schema = { + **cumulative_override_request_schema, + **for_cumulative_override_request_schema, + } + + # Handle passed-in customized response schemas + global cumulative_override_response_schema + if override_response_schema is None: + self.override_response_schema = dict() + else: + override_response_schema = deepcopy(override_response_schema) + for key, d in override_response_schema.items(): + override_response_schema[key] = {k.upper(): v for k, v in d.items()} + self.override_response_schema = override_response_schema + for_cumulative_override_response_schema = deepcopy(override_response_schema) + for dictionary in for_cumulative_override_response_schema.values(): + dictionary["traceback"] = self.created_at + cumulative_override_response_schema = { + **cumulative_override_response_schema, + **for_cumulative_override_response_schema, + } + + # Handle passed-in custom path parameter descriptions + global cumulative_cppd + if custom_path_parameter_desc is None: + self.custom_path_parameter_desc = dict() + else: + custom_path_parameter_desc = deepcopy(custom_path_parameter_desc) + for key, d in custom_path_parameter_desc.items(): + custom_path_parameter_desc[key] = {k.upper(): v for k, v in d.items()} + self.custom_path_parameter_desc = custom_path_parameter_desc + for_cumulative_cppd = deepcopy(custom_path_parameter_desc) + for dictionary in for_cumulative_cppd.values(): + dictionary["traceback"] = self.created_at + cumulative_cppd = {**cumulative_cppd, **for_cumulative_cppd} + + # Handle passed-in custom query parameter descriptions + global cumulative_cp + if custom_parameters is not None: + custom_parameters = deepcopy(custom_parameters) + for key, d in custom_parameters.items(): + custom_parameters[key] = {k.upper(): v for k, v in d.items()} + for dictionary in custom_parameters.values(): + dictionary["traceback"] = self.created_at + cumulative_cp = {**cumulative_cp, **custom_parameters} + + super().__init__(*args, **kwargs) + + # Overrides, uses overridden method + def get_description(self, path, method): + """ + This overridden method adds the method and path to the top of each route description + and a note if authentication is required (in addition to calling/using the + super method). Docstring of overridden method: + + Determine a path description. + + This will be based on the method docstring if one exists, + or else the class docstring. + """ + + # Add the method and path to the description so it is more readable. + desc = f"({method.upper()} `{path}`)\n\n" + # Add the description from docstrings (default functionality). + desc += super().get_description(path, method) + view = self.view + # Add a note if the path/method requires user authentication. + if IsAuthenticated in view.permission_classes: + desc += '\n\nUser authentication required.' + return desc + + # Overrides, uses overridden method + # (https://www.django-rest-framework.org/api-guide/schemas/#map_serializer) + def map_serializer(self, serializer): + """ + This method adds property docstrings as field descriptions when appropriate + (to request/response schemas in the API docs), in addition + to calling the overridden map_serializer function. + For instance, in the response schema of + [PCA] Registration, List Registration (GET /api/alert/registrations/) + the description of the is_active property is inferred from the property docstring + by this method (before it was blank). + """ + + result = super().map_serializer(serializer) + properties = result["properties"] + model = None + if hasattr(serializer, "Meta") and hasattr(serializer.Meta, "model"): + model = serializer.Meta.model + + for field in serializer.fields.values(): + if isinstance(field, serializers.HiddenField): + continue + schema = properties[field.field_name] + if ( + "description" not in schema + and model is not None + and hasattr(model, field.field_name) + and isinstance(getattr(model, field.field_name), property) + and getattr(model, field.field_name).__doc__ + ): + schema["description"] = dedent(getattr(model, field.field_name).__doc__) + + return result + + # Helper method + def get_action(self, path, method): + """ + This method gets the action of the specified path/method (a more expressive name + for the method like "retrieve" or "list" for a GET method or "create" for a POST method). + The code is taken from the get_operation_id_base method in AutoSchema, + but is slightly modified to not use camelCase. + """ + method_name = getattr(self.view, "action", method.lower()) + if is_list_view(path, method, self.view): + action = "list" + elif method_name not in self.method_mapping: + action = method_name.lower() + else: + action = self.method_mapping[method.lower()] + return action + + # Helper method + def get_name(self, path, method, action=None): + """ + This method returns the name of the path/method. If the + user has specified a custom name using the custom_name parameter to __init__, that custom + name is used. + The code here is backported/modified from AutoSchema's get_operation_id_base method + due to how we generate tags (when "s" is added to the end of names for list actions in + get_operation_id_base, this makes it impossible to tag those list action routes together + with their non-list counterparts using their shared names as we like to do). + Besides not appending "s", this backported code is also modified to remove + the "viewset" suffix from the name if it exists. + All modified code is marked by a comment starting with "MODIFIED" + I am probably going to submit a PR to DRF to try to get them to improve their + default tag generation in this way (eventually). + If that ever gets merged and cut into a stable release, we will be able to + remove this method from our code. + Otherwise, keep an eye on DRF changes to see if the overridden get_operation_id_base + method is improved (and incorperate those changes here if possible). + """ + + # Return the custom name if specified by the user + # First convert the functions in the tuple keys of custom_name to strings + custom_name_converted_keys = { + (get_url_by_name(route_name), m): v for (route_name, m), v in custom_name.items() + } + # Check if user has specified custom name + if (path, method) in custom_name_converted_keys.keys(): + return custom_name_converted_keys[(path, method)] + + # Get action if it is not passed in as a parameter + if action is None: + action = self.get_action(path, method) + + # NOTE: All below code is taken/modified from AutoSchema's get_operation_id_base method + + model = getattr(getattr(self.view, "queryset", None), "model", None) + + if self.operation_id_base is not None: + name = self.operation_id_base + + # Try to deduce the ID from the view's model + elif model is not None: + name = model.__name__ + + # Try with the serializer class name + elif self.get_serializer(path, method) is not None: + name = self.get_serializer(path, method).__class__.__name__ + if name.endswith("Serializer"): + name = name[:-10] + + # Fallback to the view name + else: + name = self.view.__class__.__name__ + if name.endswith("APIView"): + name = name[:-7] + elif name.endswith("View"): + name = name[:-4] + elif name.lower().endswith("viewset"): + # MODIFIED from AutoSchema's get_operation_id_base: remove viewset suffix + name = name[:-7] + + # Due to camel-casing of classes and `action` being lowercase, apply title in order to + # find if action truly comes at the end of the name + if name.endswith(action.title()): # ListView, UpdateAPIView, ThingDelete ... + name = name[: -len(action)] + + # MODIFIED from AutoSchema's get_operation_id_base: "s" is not appended + # even if action is list + + return name + + # Overrides, DOES NOT call overridden method + # https://www.django-rest-framework.org/api-guide/schemas/#get_operation_id_base + def get_operation_id_base(self, path, method, action): + """ + This method returns the base operation id (i.e. the name) of the path/method. It + uses get_name as a helper but makes the last character "s" if the action is "list". + See the docstring for the get_name method of this class for an explanation as + to why we do this. Docstring of overridden method: + + Compute the base part for operation ID from the model, serializer or view name. + """ + + name = self.get_name(path, method, action) + + if action == "list" and not name.endswith("s"): # listThings instead of listThing + name = pluralize_word(name) + + return name + + # Overrides, uses overridden method + # https://www.django-rest-framework.org/api-guide/schemas/#get_operation_id + def get_operation_id(self, path, method): + """ + This method gets the operation id for the given path/method. It first checks if + the user has specified a custom operation id for this path/method using the + custom_operation_id dict at the top of docs_settings.py, and if not it returns the result + of the overridden method (which is modified from default by the overriden + get_operation_id_base method above). Docstring of overridden method: + + Compute an operation ID from the view type and get_operation_id_base method. + """ + + # Return the custom operation id if specified by the user + # First convert the functions in the tuple keys of custom_operation_id to strings + custom_operation_id_converted_keys = { + (get_url_by_name(route_name), m): v + for (route_name, m), v in custom_operation_id.items() + } + # Check if user has specified custom operation id + if (path, method) in custom_operation_id_converted_keys.keys(): + return custom_operation_id_converted_keys[(path, method)] + + return split_camel(super().get_operation_id(path, method)).title() + + # Overrides, DOES NOT call overridden method + # Keep an eye on DRF changes to see if the overridden get_tags method is improved + # https://www.django-rest-framework.org/api-guide/schemas/#get_tags + def get_tags(self, path, method): + """ + This method returns custom tags passed into the __init__ method, or otherwise + (if the tags argument was not included) adds a tag + of the form '[APP] route_name' as the default behavior. + Note that the abbreviation of the app in these [APP] brackets is set in the + subpath_abbreviations dict above. + """ + + # If user has specified tags, use them. + if self._tags: + return self._tags + + # Create the tag from the first part of the path (other than "api") and the name + name = self.get_name(path, method) + path_components = (path[1:] if path.startswith("/") else path).split("/") + subpath = path_components[1] if path_components[0] == "api" else path_components[0] + if subpath not in subpath_abbreviations.keys(): + raise ValueError( + f"You must add the the '{subpath}' subpath to the " + "subpath_abbreviations dict in backend/PennCourses/docs_settings.py. " + f"This subpath was inferred from the path '{path}'." + ) + return [f"[{subpath_abbreviations[subpath]}] {name}"] + + # Overrides, uses overridden method + def get_path_parameters(self, path, method): + """ + This method returns a list of parameters from templated path variables. It improves + the inference of path parameter description from the overridden method by utilizing + property docstrings. If a custom path parameter description is specified using the + custom_path_parameter_desc kwarg in __init__, that is used for the description. + Docstring of overridden method: + + Return a list of parameters from templated path variables. + """ + + parameters = super().get_path_parameters(path, method) + + model = getattr(getattr(self.view, "queryset", None), "model", None) + for parameter in parameters: + variable = parameter["name"] + description = parameter["description"] + + # Use property docstrings when possible + if model is not None: + try: + model_field = model._meta.get_field(variable) + except Exception: + model_field = None + if ( + model_field is None + and parameter["name"] in model.__dict__.keys() + and isinstance(model.__dict__[variable], property) + ): + doc = getdoc(model.__dict__[variable]) + description = "" if doc is None else doc + + custom_path_parameter_desc = { + get_url_by_name(route_name): value + for route_name, value in self.custom_path_parameter_desc.items() + } + + # Add custom path parameter description if relevant + if ( + custom_path_parameter_desc + and path in custom_path_parameter_desc.keys() + and method.upper() in custom_path_parameter_desc[path].keys() + and variable in custom_path_parameter_desc[path][method].keys() + and custom_path_parameter_desc[path][method][variable] + ): + description = custom_path_parameter_desc[path][method][variable] + + parameter["description"] = description + + return parameters + + # Overrides, uses overridden method + def get_request_body(self, path, method): + """ + This method overrides the get_request_body method from AutoSchema, setting + a custom request schema if specified via the override_request_schema init kwarg. + """ + request_body = super().get_request_body(path, method) + + override_request_schema = { + get_url_by_name(route_name): value + for route_name, value in self.override_request_schema.items() + } + + if path in override_request_schema and method in override_request_schema[path]: + for ct in request_body["content"]: + request_body["content"][ct]["schema"] = override_request_schema[path][method] + + return request_body + + # Overrides, uses overridden method + def get_responses(self, path, method): + """ + This method describes the responses for this path/method. It makes certain + improvements over the overridden method in terms of adding useful information + (like 403 responses). It also enforces the user's choice as to whether to include + a response schema or alternatively just display the response (for path/method/status_code). + Custom response descriptions specified by the user in the response_codes + kwarg to the __init__ method are also added. + Finally, custom schemas specified by the user in the override_response_schema kwarg to the + __init__ method are added. + """ + + responses = super().get_responses(path, method) + + # Automatically add 403 response if authentication is required + if IsAuthenticated in self.view.permission_classes and 403 not in responses: + responses = { + **responses, + 403: {"description": "Access denied (missing or improper authentication)."}, + } + + # Get "default" schema content from response + # This code is from an older version of the overridden method which + # did not use JSON refs (JSON refs are not appropriate for our use-case since + # we change certain response schemas in ways that we don't want to affect + # request schemas, etc). + serializer = self.get_response_serializer(path, method) + if not isinstance(serializer, serializers.Serializer): + item_schema = {} + else: + item_schema = self.get_reference(serializer) + if is_list_view(path, method, self.view): + response_schema = { + "type": "array", + "items": item_schema, + } + paginator = self.get_paginator() + if paginator: + response_schema = paginator.get_paginated_response_schema(response_schema) + else: + response_schema = item_schema + default_schema_content = { + content_type: {"schema": deepcopy(response_schema)} + for content_type in self.response_media_types + } + + response_codes = { + get_url_by_name(route_name): value for route_name, value in self.response_codes.items() + } + + # Change all status codes to integers + responses = {int(key): value for key, value in responses.items()} + # Add status codes and custom descriptions from custom response_codes dict + if path in response_codes and method in response_codes[path]: + for status_code in response_codes[path][method]: + status_code = int(status_code) + custom_description = response_codes[path][method][status_code] + include_content = "[DESCRIBE_RESPONSE_SCHEMA]" in custom_description + custom_description = custom_description.replace("[DESCRIBE_RESPONSE_SCHEMA]", "") + if status_code in responses.keys(): + if "[UNDOCUMENTED]" in custom_description: + del responses[status_code] + else: + responses[status_code]["description"] = custom_description + if not include_content and "content" in responses[status_code]: + del responses[status_code]["content"] + elif "[UNDOCUMENTED]" not in custom_description: + responses[status_code] = {"description": custom_description} + if include_content: + responses[status_code]["content"] = deepcopy(default_schema_content) + + override_response_schema = { + get_url_by_name(route_name): value + for route_name, value in self.override_response_schema.items() + } + + if path in override_response_schema and method in override_response_schema[path]: + for status_code in override_response_schema[path][method]: + if status_code not in responses.keys(): + responses[status_code] = { + "description": "", + "content": deepcopy(default_schema_content), + } + for status_code in responses.keys(): + if status_code in override_response_schema[path][method]: + custom_schema = override_response_schema[path][method][status_code] + if "content" not in responses[status_code]: + responses[status_code]["content"] = dict() + for ct in self.request_media_types: + responses[status_code]["content"][ct] = custom_schema + else: + for response_schema in responses[status_code]["content"].values(): + response_schema["schema"] = custom_schema + return responses diff --git a/backend/PennCourses/settings/base.py b/backend/PennCourses/settings/base.py index 06163cf13..8a9e643cb 100644 --- a/backend/PennCourses/settings/base.py +++ b/backend/PennCourses/settings/base.py @@ -1,3 +1,4 @@ +<<<<<<< HEAD """ Django settings for PennCourses project. @@ -228,3 +229,235 @@ ROUGH_MINIMUM_DEMAND_DISTRIBUTION_ESTIMATES = ( 200 # Aim for at least 200 demand distribution estimates over the course of a semester ) +======= +""" +Django settings for PennCourses project. + +Generated by 'django-admin startproject' using Django 2.2. + +For more information on this file, see +https://docs.djangoproject.com/en/2.2/topics/settings/ + +For the full list of settings and their values, see +https://docs.djangoproject.com/en/2.2/ref/settings/ +""" + +import os + +import boto3 +import dj_database_url + + +DOMAINS = os.environ.get("DOMAINS", "example.com").split(",") + +# Build paths inside the project like this: os.path.join(BASE_DIR, ...) +BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) + +# Quick-start development settings - unsuitable for production +# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/ + +# SECURITY WARNING: keep the secret key used in production secret! +SECRET_KEY = os.environ.get("SECRET_KEY", "&3!f%)t!o$+dwu3(jao7ipi2f4(k-2ua7@28+^yge-cn7c!_14") + +# SECURITY WARNING: don't run with debug turned on in production! +DEBUG = True + +ALLOWED_HOSTS = ["*"] + + +# Application definition + +INSTALLED_APPS = [ + "django.contrib.admin", + "django.contrib.auth", + "django.contrib.contenttypes", + "django.contrib.sessions", + "django.contrib.messages", + "django.contrib.staticfiles", + "rest_framework", + "corsheaders", + "shortener.apps.ShortenerConfig", + "accounts.apps.AccountsConfig", + "options.apps.OptionsConfig", + "django.contrib.admindocs", + "django_extensions", + "alert", + "courses", + "plan", + "review", +] + +MIDDLEWARE = [ + "corsheaders.middleware.CorsMiddleware", + "django.middleware.security.SecurityMiddleware", + "django.contrib.sessions.middleware.SessionMiddleware", + "django.middleware.common.CommonMiddleware", + "django.middleware.csrf.CsrfViewMiddleware", + "django.contrib.auth.middleware.AuthenticationMiddleware", + "django.contrib.messages.middleware.MessageMiddleware", + "django.middleware.clickjacking.XFrameOptionsMiddleware", +] + +ROOT_URLCONF = os.environ.get("ROOT_URLCONF", "PennCourses.urls") + +TEMPLATES = [ + { + "BACKEND": "django.template.backends.django.DjangoTemplates", + "DIRS": ["PennCourses/templates"], + "APP_DIRS": True, + "OPTIONS": { + "context_processors": [ + "django.template.context_processors.debug", + "django.template.context_processors.request", + "django.contrib.auth.context_processors.auth", + "django.contrib.messages.context_processors.messages", + ], + }, + }, +] + +WSGI_APPLICATION = "PennCourses.wsgi.application" + + +# Database +# https://docs.djangoproject.com/en/2.2/ref/settings/#databases + +DATABASES = { + "default": dj_database_url.config( + # this is overriden by the DATABASE_URL env var + default="postgres://penn-courses:postgres@localhost:5432/postgres" + ) +} + +DEFAULT_AUTO_FIELD = "django.db.models.AutoField" +# Explicitly setting DEFAULT_AUTO_FIELD is necessary to silence warnings after Django 3.2 +# We don't need the range of BigAutoField for auto fields so we can stick with the old behavior + + +# Password validation +# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators + +AUTH_PASSWORD_VALIDATORS = [ + { + "NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator", + }, + { + "NAME": "django.contrib.auth.password_validation.MinimumLengthValidator", + }, + { + "NAME": "django.contrib.auth.password_validation.CommonPasswordValidator", + }, + { + "NAME": "django.contrib.auth.password_validation.NumericPasswordValidator", + }, +] + + +# Authentication Backends + +AUTHENTICATION_BACKENDS = [ + "accounts.backends.LabsUserBackend", + "django.contrib.auth.backends.ModelBackend", +] + + +# Internationalization +# https://docs.djangoproject.com/en/2.2/topics/i18n/ + +LANGUAGE_CODE = "en-us" + +TIME_ZONE = "America/New_York" + +USE_I18N = True + +USE_L10N = True + +USE_TZ = True + + +# Static files (CSS, JavaScript, Images) +# https://docs.djangoproject.com/en/2.2/howto/static-files/ + +STATIC_URL = "/assets/" +STATIC_ROOT = os.path.join(BASE_DIR, "static") + + +# DLA Settings + +PLATFORM_ACCOUNTS = { + "REDIRECT_URI": os.environ.get("LABS_REDIRECT_URI", "http://localhost:8000/accounts/callback/"), + "CLIENT_ID": "clientid", + "CLIENT_SECRET": "supersecretclientsecret", + "PLATFORM_URL": "https://platform-dev.pennlabs.org", + "CUSTOM_ADMIN": False, +} + + +# Penn OpenData API +OPEN_DATA_CLIENT_ID = os.environ.get("OPEN_DATA_CLIENT_ID", "") +OPEN_DATA_OIDC_SECRET = os.environ.get("OPEN_DATA_OIDC_SECRET", "") +OPEN_DATA_TOKEN_URL = ( + "https://sso.apps.k8s.upenn.edu/auth/realms/master/protocol/openid-connect/token" +) +OPEN_DATA_API_BASE = "https://3scale-public-prod-open-data.apps.k8s.upenn.edu/api" + +# Penn OpenData Course Status Webhook Auth +WEBHOOK_USERNAME = os.environ.get("WEBHOOK_USERNAME", "webhook") +WEBHOOK_PASSWORD = os.environ.get("WEBHOOK_PASSWORD", "password") + +# Email Configuration +SMTP_HOST = os.environ.get("SMTP_HOST", "") +SMTP_PORT = os.environ.get("SMTP_PORT", 587) +SMTP_USERNAME = os.environ.get("SMTP_USERNAME", "") +SMTP_PASSWORD = os.environ.get("SMTP_PASSWORD", "") + +# Twilio Credentials +TWILIO_SID = os.environ.get("TWILIO_SID", "") +TWILIO_AUTH_TOKEN = os.environ.get("TWILIO_TOKEN", "") +TWILIO_NUMBER = os.environ.get("TWILIO_NUMBER", "+12153984277") + +# Redis +REDIS_URL = os.environ.get("REDIS_URL", "redis://localhost:6379/1") + +# Celery +MESSAGE_BROKER_URL = REDIS_URL + +# Django REST Framework +REST_FRAMEWORK = { + "COERCE_DECIMAL_TO_STRING": False, + "DEFAULT_SCHEMA_CLASS": "PennCourses.docs_settings.PcxAutoSchema", + "DEFAULT_AUTHENTICATION_CLASSES": [ + "rest_framework.authentication.SessionAuthentication", + "rest_framework.authentication.BasicAuthentication", + "accounts.authentication.PlatformAuthentication", + ], +} + +STATS_WEBHOOK = os.environ.get("STATS_WEBHOOK", None) + +S3_client = boto3.client("s3") +S3_resource = boto3.resource("s3") + +# NGSS course code crosswalk stored in S3 +XWALK_S3_BUCKET = "penn.courses" +XWALK_SRC = "xwalk_csre_number.txt" + +# Registration Metrics Settings + +STATUS_UPDATES_RECORDED_SINCE = "2019C" # How far back does our valid Status Update data span? +PCA_REGISTRATIONS_RECORDED_SINCE = "2020A" # How far back does our valid Registration data span? +WAITLIST_DEPARTMENT_CODES = [] # Which departments (referenced by code) have a waitlist system +# or require permits for registration during the add/drop period? +PRE_NGSS_PERMIT_REQ_RESTRICTION_CODES = [ # TODO: add post-NGSS list + "PCG", + "PAD", + "PCW", + "PCD", + "PLC", + "PIN", + "PDP", +] # Which pre-NGSS restriction codes indicate registration was handled by permit issuance? +ROUGH_MINIMUM_DEMAND_DISTRIBUTION_ESTIMATES = ( + 200 # Aim for at least 200 demand distribution estimates over the course of a semester +) +>>>>>>> 32c7da33 (Fixed linting) diff --git a/backend/PennCourses/settings/ci.py b/backend/PennCourses/settings/ci.py index 204cd063b..cc6a7eb50 100644 --- a/backend/PennCourses/settings/ci.py +++ b/backend/PennCourses/settings/ci.py @@ -1,14 +1,14 @@ -from PennCourses.settings.base import * # noqa: F401, F403 - - -PCA_URL = "http://localhost:8000" - -TEST_RUNNER = "xmlrunner.extra.djangotestrunner.XMLTestRunner" -TEST_OUTPUT_VERBOSE = 2 -TEST_OUTPUT_DIR = "test-results" - -CACHES = { - "default": { - "BACKEND": "django.core.cache.backends.dummy.DummyCache", - } -} +from PennCourses.settings.base import * # noqa: F401, F403 + + +PCA_URL = "http://localhost:8000" + +TEST_RUNNER = "xmlrunner.extra.djangotestrunner.XMLTestRunner" +TEST_OUTPUT_VERBOSE = 2 +TEST_OUTPUT_DIR = "test-results" + +CACHES = { + "default": { + "BACKEND": "django.core.cache.backends.dummy.DummyCache", + } +} diff --git a/backend/PennCourses/settings/development.py b/backend/PennCourses/settings/development.py index 82cd0747f..20cec98b3 100644 --- a/backend/PennCourses/settings/development.py +++ b/backend/PennCourses/settings/development.py @@ -1,20 +1,20 @@ -import os - -from PennCourses.settings.base import * # noqa: F401, F403 - - -PCA_URL = "http://localhost:8000" - -os.environ["OAUTHLIB_INSECURE_TRANSPORT"] = "1" - -INSTALLED_APPS += ["debug_toolbar"] -MIDDLEWARE = ["debug_toolbar.middleware.DebugToolbarMiddleware"] + MIDDLEWARE -INTERNAL_IPS = ["127.0.0.1"] - -CSRF_TRUSTED_ORIGINS = ["http://localhost:3000"] - -CACHES = { - "default": { - "BACKEND": "django.core.cache.backends.dummy.DummyCache", - } -} +import os + +from PennCourses.settings.base import * # noqa: F401, F403 + + +PCA_URL = "http://localhost:8000" + +os.environ["OAUTHLIB_INSECURE_TRANSPORT"] = "1" + +INSTALLED_APPS += ["debug_toolbar"] +MIDDLEWARE = ["debug_toolbar.middleware.DebugToolbarMiddleware"] + MIDDLEWARE +INTERNAL_IPS = ["127.0.0.1"] + +CSRF_TRUSTED_ORIGINS = ["http://localhost:3000"] + +CACHES = { + "default": { + "BACKEND": "django.core.cache.backends.dummy.DummyCache", + } +} diff --git a/backend/PennCourses/settings/production.py b/backend/PennCourses/settings/production.py index 922bade5c..11f87ce7a 100644 --- a/backend/PennCourses/settings/production.py +++ b/backend/PennCourses/settings/production.py @@ -1,38 +1,38 @@ -import os - -import sentry_sdk -from sentry_sdk.integrations.celery import CeleryIntegration -from sentry_sdk.integrations.django import DjangoIntegration - -from PennCourses.settings.base import * # noqa: F401, F403 - - -DEBUG = False - -# Honor the 'X-Forwarded-Proto' header for request.is_secure() -SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https") - -# Allow production host headers -ALLOWED_HOSTS = DOMAINS - -# Sentry settings -SENTRY_URL = os.environ.get("SENTRY_URL", "") -sentry_sdk.init(dsn=SENTRY_URL, integrations=[CeleryIntegration(), DjangoIntegration()]) - -# DLA settings -PLATFORM_ACCOUNTS = {"ADMIN_PERMISSION": "penn_courses_admin"} - -# TODO: This is a BAD HACK. We shouldn't hardcode the base URL into the shortener -PCA_URL = "https://penncoursealert.com" - -CACHES = { - "default": { - "BACKEND": "django_redis.cache.RedisCache", - "LOCATION": REDIS_URL, - "OPTIONS": { - "CLIENT_CLASS": "django_redis.client.DefaultClient", - }, - } -} - -MOBILE_NOTIFICATION_SECRET = os.environ.get("MOBILE_NOTIFICATION_SECRET", "") +import os + +import sentry_sdk +from sentry_sdk.integrations.celery import CeleryIntegration +from sentry_sdk.integrations.django import DjangoIntegration + +from PennCourses.settings.base import * # noqa: F401, F403 + + +DEBUG = False + +# Honor the 'X-Forwarded-Proto' header for request.is_secure() +SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https") + +# Allow production host headers +ALLOWED_HOSTS = DOMAINS + +# Sentry settings +SENTRY_URL = os.environ.get("SENTRY_URL", "") +sentry_sdk.init(dsn=SENTRY_URL, integrations=[CeleryIntegration(), DjangoIntegration()]) + +# DLA settings +PLATFORM_ACCOUNTS = {"ADMIN_PERMISSION": "penn_courses_admin"} + +# TODO: This is a BAD HACK. We shouldn't hardcode the base URL into the shortener +PCA_URL = "https://penncoursealert.com" + +CACHES = { + "default": { + "BACKEND": "django_redis.cache.RedisCache", + "LOCATION": REDIS_URL, + "OPTIONS": { + "CLIENT_CLASS": "django_redis.client.DefaultClient", + }, + } +} + +MOBILE_NOTIFICATION_SECRET = os.environ.get("MOBILE_NOTIFICATION_SECRET", "") diff --git a/backend/PennCourses/templates/redoc.html b/backend/PennCourses/templates/redoc.html index cbd389b22..728c72bb5 100644 --- a/backend/PennCourses/templates/redoc.html +++ b/backend/PennCourses/templates/redoc.html @@ -1,21 +1,21 @@ - - - - Penn Courses API Documentation - - - - - - - - - - - + + + + Penn Courses API Documentation + + + + + + + + + + + \ No newline at end of file diff --git a/backend/PennCourses/templates/topic_courses_admin.html b/backend/PennCourses/templates/topic_courses_admin.html index ad304630c..91e6d88a3 100644 --- a/backend/PennCourses/templates/topic_courses_admin.html +++ b/backend/PennCourses/templates/topic_courses_admin.html @@ -1,18 +1,18 @@ - - - - - - - - - {% for c in courses %} - - - - - - - - {% endfor %} -
Full CodeSemesterTitleDescriptionPrerequisites
{{ c.full_code }} {{ c.semester }} {{ c.title }} {{ c.description }} {{ c.prerequisites }}
+ + + + + + + + + {% for c in courses %} + + + + + + + + {% endfor %} +
Full CodeSemesterTitleDescriptionPrerequisites
{{ c.full_code }} {{ c.semester }} {{ c.title }} {{ c.description }} {{ c.prerequisites }}
diff --git a/backend/PennCourses/urls.py b/backend/PennCourses/urls.py index dc15a5e25..580681f63 100644 --- a/backend/PennCourses/urls.py +++ b/backend/PennCourses/urls.py @@ -1,49 +1,49 @@ -from django.conf import settings -from django.contrib import admin -from django.urls import include, path -from django.views.generic import TemplateView -from rest_framework.schemas import get_schema_view - -from alert.views import accept_webhook -from courses.views import UserView -from PennCourses.docs_settings import JSONOpenAPICustomTagGroupsRenderer, openapi_description - - -api_urlpatterns = [ - path("review/", include("review.urls")), - path("plan/", include("plan.urls")), - path("alert/", include("alert.urls")), - path("base/", include("courses.urls")), - path("options/", include("options.urls", namespace="options")), - path( - "openapi/", - get_schema_view( - title="Penn Courses API Documentation", - public=True, - description=openapi_description, - renderer_classes=[JSONOpenAPICustomTagGroupsRenderer], - ), - name="openapi-schema", - ), - path( - "documentation/", - TemplateView.as_view( - template_name="redoc.html", extra_context={"schema_url": "openapi-schema"} - ), - name="documentation", - ), -] - -urlpatterns = [ - path("admin/doc/", include("django.contrib.admindocs.urls")), - path("admin/", admin.site.urls), - path("accounts/me/", UserView.as_view(), name="user-view"), - path("accounts/", include("accounts.urls", namespace="accounts")), - path("api/", include(api_urlpatterns)), - path("webhook", accept_webhook, name="webhook"), -] - -if settings.DEBUG: - import debug_toolbar - - urlpatterns = [path("__debug__/", include(debug_toolbar.urls))] + urlpatterns +from django.conf import settings +from django.contrib import admin +from django.urls import include, path +from django.views.generic import TemplateView +from rest_framework.schemas import get_schema_view + +from alert.views import accept_webhook +from courses.views import UserView +from PennCourses.docs_settings import JSONOpenAPICustomTagGroupsRenderer, openapi_description + + +api_urlpatterns = [ + path("review/", include("review.urls")), + path("plan/", include("plan.urls")), + path("alert/", include("alert.urls")), + path("base/", include("courses.urls")), + path("options/", include("options.urls", namespace="options")), + path( + "openapi/", + get_schema_view( + title="Penn Courses API Documentation", + public=True, + description=openapi_description, + renderer_classes=[JSONOpenAPICustomTagGroupsRenderer], + ), + name="openapi-schema", + ), + path( + "documentation/", + TemplateView.as_view( + template_name="redoc.html", extra_context={"schema_url": "openapi-schema"} + ), + name="documentation", + ), +] + +urlpatterns = [ + path("admin/doc/", include("django.contrib.admindocs.urls")), + path("admin/", admin.site.urls), + path("accounts/me/", UserView.as_view(), name="user-view"), + path("accounts/", include("accounts.urls", namespace="accounts")), + path("api/", include(api_urlpatterns)), + path("webhook", accept_webhook, name="webhook"), +] + +if settings.DEBUG: + import debug_toolbar + + urlpatterns = [path("__debug__/", include(debug_toolbar.urls))] + urlpatterns diff --git a/backend/PennCourses/wsgi.py b/backend/PennCourses/wsgi.py index d3036ef58..90248a778 100644 --- a/backend/PennCourses/wsgi.py +++ b/backend/PennCourses/wsgi.py @@ -1,17 +1,17 @@ -""" -WSGI config for PennCourses project. - -It exposes the WSGI callable as a module-level variable named ``application``. - -For more information on this file, see -https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/ -""" - -import os - -from django.core.wsgi import get_wsgi_application - - -os.environ.setdefault("DJANGO_SETTINGS_MODULE", "PennCourses.settings.production") - -application = get_wsgi_application() +""" +WSGI config for PennCourses project. + +It exposes the WSGI callable as a module-level variable named ``application``. + +For more information on this file, see +https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/ +""" + +import os + +from django.core.wsgi import get_wsgi_application + + +os.environ.setdefault("DJANGO_SETTINGS_MODULE", "PennCourses.settings.production") + +application = get_wsgi_application() diff --git a/backend/Pipfile b/backend/Pipfile index b1e5ca360..a0b7e8c43 100644 --- a/backend/Pipfile +++ b/backend/Pipfile @@ -1,69 +1,69 @@ -[[source]] -url = "https://pypi.org/simple" -verify_ssl = true -name = "pypi" - -[dev-packages] -coverage = "*" -unittest-xml-reporting = "*" -flake8 = "*" -flake8-isort = "*" -isort = "*" -flake8-quotes = "*" -pytest = "*" -pytest-django = "*" -django-debug-toolbar = "*" -black = "==22.3.0" - -[packages] -click = "==8.0.4" -django = ">=3.2" -psycopg2 = "*" -requests = "*" -twilio = "*" -channels = "<3" -channels-redis = "*" -phonenumbers = "*" -celery = "<5" -redis = ">=3.4.1" -sentry-sdk = "*" -dj-database-url = "*" -djangorestframework = "*" -unidecode = "*" -coreapi = "*" -django-filter = "*" -shortener = "*" -django-labs-accounts = "*" -"beautifulsoup4" = "*" -ipython = "*" -django-auto-prefetching = "*" -django-cors-headers = "*" -django-runtime-options = "*" -pyyaml = "*" -uritemplate = "*" -uwsgi = {version = "*", sys_platform = "== 'linux'"} -uvloop = {version = "*", sys_platform = "== 'linux'"} -gunicorn = "*" -uvicorn = {extras = ["standard"], version = "*"} -httptools = "*" -ddt = "*" -lark-parser = "*" -tqdm = "*" -"boto3" = "*" -nose = "*" -importlib-metadata = "*" -django-redis = "*" -jsonref = "*" -tblib = "*" -django-extensions = "*" -numpy = "*" -scikit-learn = "*" -pandas = "*" -python-dateutil = "*" -docutils = "*" -ics = "*" -psycopg2-binary = "*" -drf-nested-routers = "*" - -[requires] -python_full_version = "3.10" +[[source]] +url = "https://pypi.org/simple" +verify_ssl = true +name = "pypi" + +[dev-packages] +coverage = "*" +unittest-xml-reporting = "*" +flake8 = "*" +flake8-isort = "*" +isort = "*" +flake8-quotes = "*" +pytest = "*" +pytest-django = "*" +django-debug-toolbar = "*" +black = "==22.3.0" + +[packages] +click = "==8.0.4" +django = ">=3.2" +psycopg2 = "*" +requests = "*" +twilio = "*" +channels = "<3" +channels-redis = "*" +phonenumbers = "*" +celery = "<5" +redis = ">=3.4.1" +sentry-sdk = "*" +dj-database-url = "*" +djangorestframework = "*" +unidecode = "*" +coreapi = "*" +django-filter = "*" +shortener = "*" +django-labs-accounts = "*" +"beautifulsoup4" = "*" +ipython = "*" +django-auto-prefetching = "*" +django-cors-headers = "*" +django-runtime-options = "*" +pyyaml = "*" +uritemplate = "*" +uwsgi = {version = "*", sys_platform = "== 'linux'"} +uvloop = {version = "*", sys_platform = "== 'linux'"} +gunicorn = "*" +uvicorn = {extras = ["standard"], version = "*"} +httptools = "*" +ddt = "*" +lark-parser = "*" +tqdm = "*" +"boto3" = "*" +nose = "*" +importlib-metadata = "*" +django-redis = "*" +jsonref = "*" +tblib = "*" +django-extensions = "*" +numpy = "*" +scikit-learn = "*" +pandas = "*" +python-dateutil = "*" +docutils = "*" +ics = "*" +psycopg2-binary = "*" +drf-nested-routers = "*" + +[requires] +python_full_version = "3.10" diff --git a/backend/alert/admin.py b/backend/alert/admin.py index 68016e732..d99cec049 100644 --- a/backend/alert/admin.py +++ b/backend/alert/admin.py @@ -1,66 +1,66 @@ -from django.contrib import admin -from django.urls import reverse -from django.utils.html import format_html - -from alert.models import AddDropPeriod, PcaDemandDistributionEstimate, Registration - - -class RegistrationAdmin(admin.ModelAdmin): - readonly_fields = ("head_registration_id", "section_link", "resubscribed_from", "created_at") - search_fields = ( - "email", - "phone", - "section__full_code", - "section__course__department__code", - ) - autocomplete_fields = ("section",) - - exclude = ["head_registration"] - - ordering = ("-created_at",) - - list_filter = [ - "notification_sent", - "section__course__semester", - ("resubscribed_to", admin.EmptyFieldListFilter), - ] - - list_select_related = ( - "section", - "section__course", - "section__course__department", - ) - - def section_link(self, instance): - link = reverse("admin:courses_section_change", args=[instance.section.id]) - return format_html('{}', link, instance.section.__str__()) - - def head_registration_id(self, instance): - link = reverse("admin:alert_registration_change", args=[instance.head_registration_id]) - return format_html('{}', link, str(instance.head_registration_id)) - - -class PcaDemandDistributionEstimateAdmin(admin.ModelAdmin): - search_fields = ("created_at",) - - autocomplete_fields = ("highest_demand_section", "lowest_demand_section") - - list_filter = ["semester", "created_at"] - - def has_change_permission(self, request, obj=None): - """ - Don't allow PcaDemandDistributionEstimate objects to be changed in the Admin console - (although they can be deleted). - """ - return False - - -class AddDropPeriodAdmin(admin.ModelAdmin): - search_fields = ("semester",) - - list_filter = ["semester"] - - -admin.site.register(Registration, RegistrationAdmin) -admin.site.register(PcaDemandDistributionEstimate, PcaDemandDistributionEstimateAdmin) -admin.site.register(AddDropPeriod, AddDropPeriodAdmin) +from django.contrib import admin +from django.urls import reverse +from django.utils.html import format_html + +from alert.models import AddDropPeriod, PcaDemandDistributionEstimate, Registration + + +class RegistrationAdmin(admin.ModelAdmin): + readonly_fields = ("head_registration_id", "section_link", "resubscribed_from", "created_at") + search_fields = ( + "email", + "phone", + "section__full_code", + "section__course__department__code", + ) + autocomplete_fields = ("section",) + + exclude = ["head_registration"] + + ordering = ("-created_at",) + + list_filter = [ + "notification_sent", + "section__course__semester", + ("resubscribed_to", admin.EmptyFieldListFilter), + ] + + list_select_related = ( + "section", + "section__course", + "section__course__department", + ) + + def section_link(self, instance): + link = reverse("admin:courses_section_change", args=[instance.section.id]) + return format_html('{}', link, instance.section.__str__()) + + def head_registration_id(self, instance): + link = reverse("admin:alert_registration_change", args=[instance.head_registration_id]) + return format_html('{}', link, str(instance.head_registration_id)) + + +class PcaDemandDistributionEstimateAdmin(admin.ModelAdmin): + search_fields = ("created_at",) + + autocomplete_fields = ("highest_demand_section", "lowest_demand_section") + + list_filter = ["semester", "created_at"] + + def has_change_permission(self, request, obj=None): + """ + Don't allow PcaDemandDistributionEstimate objects to be changed in the Admin console + (although they can be deleted). + """ + return False + + +class AddDropPeriodAdmin(admin.ModelAdmin): + search_fields = ("semester",) + + list_filter = ["semester"] + + +admin.site.register(Registration, RegistrationAdmin) +admin.site.register(PcaDemandDistributionEstimate, PcaDemandDistributionEstimateAdmin) +admin.site.register(AddDropPeriod, AddDropPeriodAdmin) diff --git a/backend/alert/alerts.py b/backend/alert/alerts.py index 8ec2d27dd..d98f58af6 100644 --- a/backend/alert/alerts.py +++ b/backend/alert/alerts.py @@ -1,175 +1,175 @@ -import logging -from abc import ABC, abstractmethod -from email.mime.text import MIMEText -from smtplib import SMTP, SMTPRecipientsRefused - -import requests -from django.conf import settings -from django.template import loader -from twilio.base.exceptions import TwilioRestException -from twilio.rest import Client - -from PennCourses.settings.production import MOBILE_NOTIFICATION_SECRET - - -logger = logging.getLogger(__name__) - - -def send_email(from_, to, subject, html): - msg = MIMEText(html, "html") - msg["Subject"] = subject - msg["From"] = from_ - msg["To"] = to - - with SMTP(settings.SMTP_HOST, settings.SMTP_PORT) as server: - server.ehlo() - server.starttls() - server.ehlo() - server.login(settings.SMTP_USERNAME, settings.SMTP_PASSWORD) - server.send_message(msg) - return True - - -def send_text(to, text): - try: - client = Client(settings.TWILIO_SID, settings.TWILIO_AUTH_TOKEN) - msg = client.messages.create(to=to, from_=settings.TWILIO_NUMBER, body=text) - if msg.sid is not None: - return True - except TwilioRestException: - logger.exception("Text Error") - return None - - -class Alert(ABC): - def __init__(self, template, reg, close_template=None): - t = loader.get_template(template) - self.text = t.render( - { - "course": reg.section.full_code, - "brand": "Penn Course Alert", - "auto_resubscribe": reg.auto_resubscribe, - } - ) - self.close_text = None - if close_template: - t = loader.get_template(close_template) - self.close_text = t.render( - { - "course": reg.section.full_code, - "brand": "Penn Course Alert", - "auto_resubscribe": reg.auto_resubscribe, - } - ) - self.registration = reg - - @abstractmethod - def send_alert(self, close_notification=False): - pass - - -class Email(Alert): - def __init__(self, reg): - super().__init__("alert/email_alert.html", reg, "alert/email_alert_close.html") - - def send_alert(self, close_notification=False): - """ - Returns False if notification was not sent intentionally, - and None if notification was attempted to be sent but an error occurred. - """ - if self.registration.user is not None and self.registration.user.profile.email is not None: - email = self.registration.user.profile.email - elif self.registration.email is not None: - email = self.registration.email - else: - return False - - try: - if close_notification: - if not self.close_text: - # This should be unreachable - return None - alert_subject = f"{self.registration.section.full_code} has closed." - alert_text = self.close_text - else: - alert_subject = f"{self.registration.section.full_code} is now open!" - alert_text = self.text - return send_email( - from_="Penn Course Alert ", - to=email, - subject=alert_subject, - html=alert_text, - ) - except SMTPRecipientsRefused: - logger.exception("Email Error") - return None - - -class Text(Alert): - def __init__(self, reg): - super().__init__("alert/text_alert.txt", reg) - - def send_alert(self, close_notification=False): - """ - Returns False if notification was not sent intentionally, - and None if notification was attempted to be sent but an error occurred. - """ - if close_notification: - # Do not send close notifications by text - return False - if self.registration.user is not None and self.registration.user.profile.push_notifications: - # Do not send text if push_notifications is enabled - return False - if self.registration.user is not None and self.registration.user.profile.phone is not None: - phone_number = self.registration.user.profile.phone - elif self.registration.phone is not None: - phone_number = self.registration.phone - else: - return False - - alert_text = self.text - return send_text(phone_number, alert_text) - - -class PushNotification(Alert): - def __init__(self, reg): - super().__init__("alert/push_notif.txt", reg, close_template="alert/push_notif_close.txt") - - def send_alert(self, close_notification=False): - """ - Returns False if notification was not sent intentionally, - and None if notification was attempted to be sent but an error occurred. - """ - if self.registration.user is not None and self.registration.user.profile.push_notifications: - # Only send push notification if push_notifications is enabled - pennkey = self.registration.user.username - bearer_token = MOBILE_NOTIFICATION_SECRET - if close_notification: - if not self.close_text: - # This should be unreachable - return None - alert_title = f"{self.registration.section.full_code} just closed." - alert_body = self.close_text - else: - alert_title = f"{self.registration.section.full_code} is now open!" - alert_body = self.text - try: - response = requests.post( - "https:/api.pennlabs.org/notifications/send/internal", - data={ - "title": alert_title, - "body": alert_body, - "pennkey": pennkey, - }, - headers={"Authorization": f"Bearer {bearer_token}"}, - ) - if response.status_code != 200: - logger.exception( - f"Push Notification {response.status_code} Response: {response.content}" - ) - return None - except requests.exceptions.RequestException as e: - logger.exception(f"Push Notification Request Error: {e}") - return None - return True - return False +import logging +from abc import ABC, abstractmethod +from email.mime.text import MIMEText +from smtplib import SMTP, SMTPRecipientsRefused + +import requests +from django.conf import settings +from django.template import loader +from twilio.base.exceptions import TwilioRestException +from twilio.rest import Client + +from PennCourses.settings.production import MOBILE_NOTIFICATION_SECRET + + +logger = logging.getLogger(__name__) + + +def send_email(from_, to, subject, html): + msg = MIMEText(html, "html") + msg["Subject"] = subject + msg["From"] = from_ + msg["To"] = to + + with SMTP(settings.SMTP_HOST, settings.SMTP_PORT) as server: + server.ehlo() + server.starttls() + server.ehlo() + server.login(settings.SMTP_USERNAME, settings.SMTP_PASSWORD) + server.send_message(msg) + return True + + +def send_text(to, text): + try: + client = Client(settings.TWILIO_SID, settings.TWILIO_AUTH_TOKEN) + msg = client.messages.create(to=to, from_=settings.TWILIO_NUMBER, body=text) + if msg.sid is not None: + return True + except TwilioRestException: + logger.exception("Text Error") + return None + + +class Alert(ABC): + def __init__(self, template, reg, close_template=None): + t = loader.get_template(template) + self.text = t.render( + { + "course": reg.section.full_code, + "brand": "Penn Course Alert", + "auto_resubscribe": reg.auto_resubscribe, + } + ) + self.close_text = None + if close_template: + t = loader.get_template(close_template) + self.close_text = t.render( + { + "course": reg.section.full_code, + "brand": "Penn Course Alert", + "auto_resubscribe": reg.auto_resubscribe, + } + ) + self.registration = reg + + @abstractmethod + def send_alert(self, close_notification=False): + pass + + +class Email(Alert): + def __init__(self, reg): + super().__init__("alert/email_alert.html", reg, "alert/email_alert_close.html") + + def send_alert(self, close_notification=False): + """ + Returns False if notification was not sent intentionally, + and None if notification was attempted to be sent but an error occurred. + """ + if self.registration.user is not None and self.registration.user.profile.email is not None: + email = self.registration.user.profile.email + elif self.registration.email is not None: + email = self.registration.email + else: + return False + + try: + if close_notification: + if not self.close_text: + # This should be unreachable + return None + alert_subject = f"{self.registration.section.full_code} has closed." + alert_text = self.close_text + else: + alert_subject = f"{self.registration.section.full_code} is now open!" + alert_text = self.text + return send_email( + from_="Penn Course Alert ", + to=email, + subject=alert_subject, + html=alert_text, + ) + except SMTPRecipientsRefused: + logger.exception("Email Error") + return None + + +class Text(Alert): + def __init__(self, reg): + super().__init__("alert/text_alert.txt", reg) + + def send_alert(self, close_notification=False): + """ + Returns False if notification was not sent intentionally, + and None if notification was attempted to be sent but an error occurred. + """ + if close_notification: + # Do not send close notifications by text + return False + if self.registration.user is not None and self.registration.user.profile.push_notifications: + # Do not send text if push_notifications is enabled + return False + if self.registration.user is not None and self.registration.user.profile.phone is not None: + phone_number = self.registration.user.profile.phone + elif self.registration.phone is not None: + phone_number = self.registration.phone + else: + return False + + alert_text = self.text + return send_text(phone_number, alert_text) + + +class PushNotification(Alert): + def __init__(self, reg): + super().__init__("alert/push_notif.txt", reg, close_template="alert/push_notif_close.txt") + + def send_alert(self, close_notification=False): + """ + Returns False if notification was not sent intentionally, + and None if notification was attempted to be sent but an error occurred. + """ + if self.registration.user is not None and self.registration.user.profile.push_notifications: + # Only send push notification if push_notifications is enabled + pennkey = self.registration.user.username + bearer_token = MOBILE_NOTIFICATION_SECRET + if close_notification: + if not self.close_text: + # This should be unreachable + return None + alert_title = f"{self.registration.section.full_code} just closed." + alert_body = self.close_text + else: + alert_title = f"{self.registration.section.full_code} is now open!" + alert_body = self.text + try: + response = requests.post( + "https:/api.pennlabs.org/notifications/send/internal", + data={ + "title": alert_title, + "body": alert_body, + "pennkey": pennkey, + }, + headers={"Authorization": f"Bearer {bearer_token}"}, + ) + if response.status_code != 200: + logger.exception( + f"Push Notification {response.status_code} Response: {response.content}" + ) + return None + except requests.exceptions.RequestException as e: + logger.exception(f"Push Notification Request Error: {e}") + return None + return True + return False diff --git a/backend/alert/apps.py b/backend/alert/apps.py index 2049acf95..23fcc43c3 100644 --- a/backend/alert/apps.py +++ b/backend/alert/apps.py @@ -1,5 +1,5 @@ -from django.apps import AppConfig - - -class AlertConfig(AppConfig): - name = "alert" +from django.apps import AppConfig + + +class AlertConfig(AppConfig): + name = "alert" diff --git a/backend/alert/management/commands/alertstats.py b/backend/alert/management/commands/alertstats.py index 095cd5eee..f394e75df 100644 --- a/backend/alert/management/commands/alertstats.py +++ b/backend/alert/management/commands/alertstats.py @@ -1,74 +1,74 @@ -import json -from textwrap import dedent - -import requests -from django.conf import settings -from django.core.management import BaseCommand -from django.db.models import Q -from django.utils import timezone - -from alert.models import Registration -from courses.models import StatusUpdate -from courses.util import get_current_semester - - -class Command(BaseCommand): - help = ( - "Get statistics on PCA, and optionally send to Slack (for analytics use only; do " - "not confuse this script with the recomputestats command, which actually updates " - "cached statistics)." - ) - - def add_arguments(self, parser): - parser.add_argument("days", help="number of days to aggregate.", default=1, type=int) - parser.add_argument("--slack", action="store_true") - - def handle(self, *args, **options): - days = options["days"] - send_to_slack = options["slack"] - - start = timezone.now() - timezone.timedelta(days=days) - - qs = Registration.objects.filter(section__course__semester=get_current_semester()) - - num_registrations = qs.filter(created_at__gte=start, resubscribed_from__isnull=True).count() - num_alerts_sent = qs.filter(notification_sent=True, notification_sent_at__gte=start).count() - num_resubscribe = qs.filter( - resubscribed_from__isnull=False, created_at__gte=start, auto_resubscribe=False - ).count() - num_status_updates = StatusUpdate.objects.filter(created_at__gte=start).count() - num_active_perpetual = qs.filter( - resubscribed_to__isnull=True, - auto_resubscribe=True, - deleted=False, - cancelled=False, - notification_sent=False, - ).count() - num_cancelled_perpetual = ( - qs.filter( - resubscribed_to__isnull=True, - auto_resubscribe=True, - ) - .filter(Q(deleted=True) | Q(cancelled=True)) - .count() - ) - - message = dedent( - f""" - {f'Penn Course Alert stats in the past {days} day(s)' - f' since {start.strftime("%H:%M on %d %B, %Y")}'}: - New registrations: {num_registrations} - Alerts sent: {num_alerts_sent} - Manual resubscribes: {num_resubscribe} - Active auto-resubscribe requests: {num_active_perpetual} - Cancelled auto-resubscribe requests: {num_cancelled_perpetual} - Status Updates from Penn InTouch: {num_status_updates} - """ - ) - - if send_to_slack: - url = settings.STATS_WEBHOOK - print("sending to Slack...") - requests.post(url, data=json.dumps({"text": message})) - else: - print(message) +import json +from textwrap import dedent + +import requests +from django.conf import settings +from django.core.management import BaseCommand +from django.db.models import Q +from django.utils import timezone + +from alert.models import Registration +from courses.models import StatusUpdate +from courses.util import get_current_semester + + +class Command(BaseCommand): + help = ( + "Get statistics on PCA, and optionally send to Slack (for analytics use only; do " + "not confuse this script with the recomputestats command, which actually updates " + "cached statistics)." + ) + + def add_arguments(self, parser): + parser.add_argument("days", help="number of days to aggregate.", default=1, type=int) + parser.add_argument("--slack", action="store_true") + + def handle(self, *args, **options): + days = options["days"] + send_to_slack = options["slack"] + + start = timezone.now() - timezone.timedelta(days=days) + + qs = Registration.objects.filter(section__course__semester=get_current_semester()) + + num_registrations = qs.filter(created_at__gte=start, resubscribed_from__isnull=True).count() + num_alerts_sent = qs.filter(notification_sent=True, notification_sent_at__gte=start).count() + num_resubscribe = qs.filter( + resubscribed_from__isnull=False, created_at__gte=start, auto_resubscribe=False + ).count() + num_status_updates = StatusUpdate.objects.filter(created_at__gte=start).count() + num_active_perpetual = qs.filter( + resubscribed_to__isnull=True, + auto_resubscribe=True, + deleted=False, + cancelled=False, + notification_sent=False, + ).count() + num_cancelled_perpetual = ( + qs.filter( + resubscribed_to__isnull=True, + auto_resubscribe=True, + ) + .filter(Q(deleted=True) | Q(cancelled=True)) + .count() + ) + + message = dedent( + f""" + {f'Penn Course Alert stats in the past {days} day(s)' + f' since {start.strftime("%H:%M on %d %B, %Y")}'}: + New registrations: {num_registrations} + Alerts sent: {num_alerts_sent} + Manual resubscribes: {num_resubscribe} + Active auto-resubscribe requests: {num_active_perpetual} + Cancelled auto-resubscribe requests: {num_cancelled_perpetual} + Status Updates from Penn InTouch: {num_status_updates} + """ + ) + + if send_to_slack: + url = settings.STATS_WEBHOOK + print("sending to Slack...") + requests.post(url, data=json.dumps({"text": message})) + else: + print(message) diff --git a/backend/alert/management/commands/compute_head_registrations.py b/backend/alert/management/commands/compute_head_registrations.py index 8e8d2a8d9..4897e96c6 100644 --- a/backend/alert/management/commands/compute_head_registrations.py +++ b/backend/alert/management/commands/compute_head_registrations.py @@ -1,40 +1,40 @@ -import gc -from textwrap import dedent - -from django.core.management.base import BaseCommand -from tqdm import tqdm - -from alert.models import Registration - - -class Command(BaseCommand): - help = dedent( - """ - For all PCA Registrations in the database, compute head_registration relationships - based on resubscribed_from relationships. This script only needs to be run once after the - head_registration migration is applied, because head_registration relationships - are automatically maintained in the Registration resubscribe method and in the - loadregistrations_pca script. - """ - ) - - def handle(self, *args, **kwargs): - print("Recomputing head registrations...") - queryset = Registration.objects.order_by("pk") - pk = -1 - last_pk = queryset.order_by("-pk")[0].pk - pbar = tqdm(total=last_pk) - while pk < last_pk: - to_save = [] - for registration in queryset.filter(pk__gt=pk)[:1000]: - if pk >= 0: - pbar.update(registration.pk - pk) - pk = registration.pk - head_registration = registration.get_most_current_iter() - if registration.head_registration != head_registration: - registration.head_registration = head_registration - to_save.append(registration) - Registration.objects.bulk_update(to_save, ["head_registration"]) - gc.collect() - pbar.close() - print("Done.") +import gc +from textwrap import dedent + +from django.core.management.base import BaseCommand +from tqdm import tqdm + +from alert.models import Registration + + +class Command(BaseCommand): + help = dedent( + """ + For all PCA Registrations in the database, compute head_registration relationships + based on resubscribed_from relationships. This script only needs to be run once after the + head_registration migration is applied, because head_registration relationships + are automatically maintained in the Registration resubscribe method and in the + loadregistrations_pca script. + """ + ) + + def handle(self, *args, **kwargs): + print("Recomputing head registrations...") + queryset = Registration.objects.order_by("pk") + pk = -1 + last_pk = queryset.order_by("-pk")[0].pk + pbar = tqdm(total=last_pk) + while pk < last_pk: + to_save = [] + for registration in queryset.filter(pk__gt=pk)[:1000]: + if pk >= 0: + pbar.update(registration.pk - pk) + pk = registration.pk + head_registration = registration.get_most_current_iter() + if registration.head_registration != head_registration: + registration.head_registration = head_registration + to_save.append(registration) + Registration.objects.bulk_update(to_save, ["head_registration"]) + gc.collect() + pbar.close() + print("Done.") diff --git a/backend/alert/management/commands/export_anon_registrations.py b/backend/alert/management/commands/export_anon_registrations.py index be5408a5b..52a6884f9 100644 --- a/backend/alert/management/commands/export_anon_registrations.py +++ b/backend/alert/management/commands/export_anon_registrations.py @@ -1,142 +1,142 @@ -import csv -import os -from textwrap import dedent - -from django.core.management.base import BaseCommand -from django.db.models import F -from tqdm import tqdm - -from alert.models import Registration -from courses.util import get_semesters -from PennCourses.settings.base import S3_resource - - -class Command(BaseCommand): - help = ( - "Export anonymized PCA Registrations by semester with the 12 columns:\n" - "registration.section.full_code, registration.section.semester, " - "registration.created_at (%Y-%m-%d %H:%M:%S.%f %Z), " - "registration.original_created_at (%Y-%m-%d %H:%M:%S.%f %Z), " - "registration.id, resubscribed_from_id, " - "registration.notification_sent, notification_sent_at (%Y-%m-%d %H:%M:%S.%f %Z), " - "registration.cancelled, registration.cancelled_at (%Y-%m-%d %H:%M:%S.%f %Z), " - "registration.deleted, registration.deleted_at (%Y-%m-%d %H:%M:%S.%f %Z)" - ) - - def add_arguments(self, parser): - parser.add_argument( - "--path", - type=str, - help="The path (local or in S3) you want to export to (must be a .csv file).", - ) - parser.add_argument( - "--upload_to_s3", - default=False, - action="store_true", - help=( - "Enable this argument to upload the output of this script to the penn.courses " - "S3 bucket, at the path specified by the path argument. " - ), - ) - parser.add_argument( - "--courses_query", - default="", - type=str, - help=( - "A prefix of the course full_code (e.g. CIS-120) to filter exported registrations " - "by. Omit this argument to export all registrations from the given semesters." - ), - ) - parser.add_argument( - "--semesters", - type=str, - help=dedent( - """ - The semesters argument should be a comma-separated list of semesters - corresponding to the semesters from which you want to export PCA registrations, - i.e. "2019C,2020A,2020C" for fall 2019, spring 2020, and fall 2020. - If you pass "all" to this argument, this script will export all PCA registrations. - """ - ), - default="all", - ) - - def handle(self, *args, **kwargs): - path = kwargs["path"] - upload_to_s3 = kwargs["upload_to_s3"] - semesters = get_semesters(kwargs["semesters"], verbose=True) - if len(semesters) == 0: - raise ValueError("No semesters provided for registration export.") - assert path.endswith(".csv") or path == os.devnull - script_print_path = ("s3://penn.courses/" if upload_to_s3 else "") + path - print( - f"Generating {script_print_path} with registration data from " - f"semesters {semesters}..." - ) - rows = 0 - output_file_path = "/tmp/export_anon_registrations.csv" if upload_to_s3 else path - with open(output_file_path, "w") as output_file: - csv_writer = csv.writer( - output_file, delimiter=",", quotechar='"', quoting=csv.QUOTE_MINIMAL - ) - for registration in tqdm( - Registration.objects.filter( - section__course__semester__in=semesters, - section__course__full_code__startswith=kwargs["courses_query"], - ).annotate( - efficient_semester=F("section__course__semester"), - section_full_code=F("section__full_code"), - ) - ): - resubscribed_from_id = ( - str(registration.resubscribed_from_id) - if registration.resubscribed_from is not None - else "" - ) - original_created_at = ( - registration.original_created_at.strftime("%Y-%m-%d %H:%M:%S.%f %Z") - if registration.original_created_at is not None - else "" - ) - notification_sent_at = ( - registration.notification_sent_at.strftime("%Y-%m-%d %H:%M:%S.%f %Z") - if registration.notification_sent_at is not None - else "" - ) - cancelled_at = ( - registration.cancelled_at.strftime("%Y-%m-%d %H:%M:%S.%f %Z") - if registration.cancelled_at is not None - else "" - ) - deleted_at = ( - registration.deleted_at.strftime("%Y-%m-%d %H:%M:%S.%f %Z") - if registration.deleted_at is not None - else "" - ) - rows += 1 - csv_writer.writerow( - [ - str(field) - for field in [ - registration.section_full_code, - registration.efficient_semester, - registration.created_at.strftime("%Y-%m-%d %H:%M:%S.%f %Z"), - original_created_at, - registration.id, - resubscribed_from_id, - registration.notification_sent, - notification_sent_at, - registration.cancelled, - cancelled_at, - registration.deleted, - deleted_at, - ] - ] - ) - if rows % 5000 == 0: - output_file.flush() - - if upload_to_s3: - S3_resource.meta.client.upload_file(output_file_path, "penn.courses", path) - os.remove(output_file_path) - print(f"Generated {script_print_path} with {rows} rows.") +import csv +import os +from textwrap import dedent + +from django.core.management.base import BaseCommand +from django.db.models import F +from tqdm import tqdm + +from alert.models import Registration +from courses.util import get_semesters +from PennCourses.settings.base import S3_resource + + +class Command(BaseCommand): + help = ( + "Export anonymized PCA Registrations by semester with the 12 columns:\n" + "registration.section.full_code, registration.section.semester, " + "registration.created_at (%Y-%m-%d %H:%M:%S.%f %Z), " + "registration.original_created_at (%Y-%m-%d %H:%M:%S.%f %Z), " + "registration.id, resubscribed_from_id, " + "registration.notification_sent, notification_sent_at (%Y-%m-%d %H:%M:%S.%f %Z), " + "registration.cancelled, registration.cancelled_at (%Y-%m-%d %H:%M:%S.%f %Z), " + "registration.deleted, registration.deleted_at (%Y-%m-%d %H:%M:%S.%f %Z)" + ) + + def add_arguments(self, parser): + parser.add_argument( + "--path", + type=str, + help="The path (local or in S3) you want to export to (must be a .csv file).", + ) + parser.add_argument( + "--upload_to_s3", + default=False, + action="store_true", + help=( + "Enable this argument to upload the output of this script to the penn.courses " + "S3 bucket, at the path specified by the path argument. " + ), + ) + parser.add_argument( + "--courses_query", + default="", + type=str, + help=( + "A prefix of the course full_code (e.g. CIS-120) to filter exported registrations " + "by. Omit this argument to export all registrations from the given semesters." + ), + ) + parser.add_argument( + "--semesters", + type=str, + help=dedent( + """ + The semesters argument should be a comma-separated list of semesters + corresponding to the semesters from which you want to export PCA registrations, + i.e. "2019C,2020A,2020C" for fall 2019, spring 2020, and fall 2020. + If you pass "all" to this argument, this script will export all PCA registrations. + """ + ), + default="all", + ) + + def handle(self, *args, **kwargs): + path = kwargs["path"] + upload_to_s3 = kwargs["upload_to_s3"] + semesters = get_semesters(kwargs["semesters"], verbose=True) + if len(semesters) == 0: + raise ValueError("No semesters provided for registration export.") + assert path.endswith(".csv") or path == os.devnull + script_print_path = ("s3://penn.courses/" if upload_to_s3 else "") + path + print( + f"Generating {script_print_path} with registration data from " + f"semesters {semesters}..." + ) + rows = 0 + output_file_path = "/tmp/export_anon_registrations.csv" if upload_to_s3 else path + with open(output_file_path, "w") as output_file: + csv_writer = csv.writer( + output_file, delimiter=",", quotechar='"', quoting=csv.QUOTE_MINIMAL + ) + for registration in tqdm( + Registration.objects.filter( + section__course__semester__in=semesters, + section__course__full_code__startswith=kwargs["courses_query"], + ).annotate( + efficient_semester=F("section__course__semester"), + section_full_code=F("section__full_code"), + ) + ): + resubscribed_from_id = ( + str(registration.resubscribed_from_id) + if registration.resubscribed_from is not None + else "" + ) + original_created_at = ( + registration.original_created_at.strftime("%Y-%m-%d %H:%M:%S.%f %Z") + if registration.original_created_at is not None + else "" + ) + notification_sent_at = ( + registration.notification_sent_at.strftime("%Y-%m-%d %H:%M:%S.%f %Z") + if registration.notification_sent_at is not None + else "" + ) + cancelled_at = ( + registration.cancelled_at.strftime("%Y-%m-%d %H:%M:%S.%f %Z") + if registration.cancelled_at is not None + else "" + ) + deleted_at = ( + registration.deleted_at.strftime("%Y-%m-%d %H:%M:%S.%f %Z") + if registration.deleted_at is not None + else "" + ) + rows += 1 + csv_writer.writerow( + [ + str(field) + for field in [ + registration.section_full_code, + registration.efficient_semester, + registration.created_at.strftime("%Y-%m-%d %H:%M:%S.%f %Z"), + original_created_at, + registration.id, + resubscribed_from_id, + registration.notification_sent, + notification_sent_at, + registration.cancelled, + cancelled_at, + registration.deleted, + deleted_at, + ] + ] + ) + if rows % 5000 == 0: + output_file.flush() + + if upload_to_s3: + S3_resource.meta.client.upload_file(output_file_path, "penn.courses", path) + os.remove(output_file_path) + print(f"Generated {script_print_path} with {rows} rows.") diff --git a/backend/alert/management/commands/export_demand_data.py b/backend/alert/management/commands/export_demand_data.py index dc6cb952a..23c98b53b 100644 --- a/backend/alert/management/commands/export_demand_data.py +++ b/backend/alert/management/commands/export_demand_data.py @@ -1,264 +1,264 @@ -import json -import os -from textwrap import dedent - -from django.core.exceptions import ValidationError -from django.core.management.base import BaseCommand -from django.db.models import F -from django.utils import timezone -from tqdm import tqdm - -from alert.management.commands.recomputestats import recompute_precomputed_fields -from alert.models import Registration, Section, validate_add_drop_semester -from courses.models import StatusUpdate -from courses.util import get_current_semester, get_or_create_add_drop_period, get_semesters -from PennCourses.settings.base import ROUGH_MINIMUM_DEMAND_DISTRIBUTION_ESTIMATES, S3_resource -from review.views import extra_metrics_section_filters - - -def get_demand_data(semesters, section_query="", verbose=False): - current_semester = get_current_semester() - output_dict = dict() - - recompute_precomputed_fields(verbose=True) - - if verbose: - print(f"Computing demand data for semesters {str(semesters)}...") - for semester_num, semester in enumerate(semesters): - try: - validate_add_drop_semester(semester) - except ValidationError: - if verbose: - print(f"Skipping semester {semester} (unsupported kind for stats).") - continue - add_drop_period = get_or_create_add_drop_period(semester) - - if verbose: - print(f"Processing semester {semester}, " f"{(semester_num+1)}/{len(semesters)}.\n") - - output_dict[semester] = [] # list of demand data dicts - section_id_to_object = dict() # maps section id to section object (for this semester) - volume_changes_map = dict() # maps section id to list of volume changes - status_updates_map = dict() # maps section id to list of status updates - - iterator_wrapper = tqdm if verbose else (lambda x: x) - if verbose: - print("Indexing relevant sections...") - for section in iterator_wrapper( - Section.objects.filter( - extra_metrics_section_filters, - full_code__startswith=section_query, - course__semester=semester, - ) - .annotate( - efficient_semester=F("course__semester"), - ) - .distinct() - ): - section_id_to_object[section.id] = section - volume_changes_map[section.id] = [] - status_updates_map[section.id] = [] - - if verbose: - print("Computing registration volume changes over time for each section...") - for registration in iterator_wrapper( - Registration.objects.filter(section_id__in=section_id_to_object.keys()).annotate( - section_capacity=F("section__capacity") - ) - ): - section_id = registration.section_id - volume_changes_map[section_id].append( - {"date": registration.created_at, "volume_change": 1} - ) - deactivated_at = registration.deactivated_at - if deactivated_at is not None: - volume_changes_map[section_id].append({"date": deactivated_at, "volume_change": -1}) - - if verbose: - print("Collecting status updates over time for each section...") - for status_update in iterator_wrapper( - StatusUpdate.objects.filter( - section_id__in=section_id_to_object.keys(), in_add_drop_period=True - ) - ): - section_id = status_update.section_id - status_updates_map[section_id].append( - { - "date": status_update.created_at, - "old_status": status_update.old_status, - "new_status": status_update.new_status, - } - ) - - if verbose: - print("Joining updates for each section and sorting...") - all_changes = sorted( - [ - {"type": "status_update", "section_id": section_id, **update} - for section_id, status_updates_list in status_updates_map.items() - for update in status_updates_list - ] - + [ - {"type": "volume_change", "section_id": section_id, **change} - for section_id, changes_list in volume_changes_map.items() - for change in changes_list - ], - key=lambda x: (x["date"], int(x["type"] != "status_update")), - # put status updates first on matching dates - ) - - # Initialize variables to be maintained in our main all_changes loop - latest_popularity_dist_estimate = None - registration_volumes = {section_id: 0 for section_id in section_id_to_object.keys()} - demands = {section_id: 0 for section_id in section_id_to_object.keys()} - - # Initialize section statuses - section_status = {section_id: None for section_id in section_id_to_object.keys()} - for change in all_changes: - section_id = change["section_id"] - if change["type"] == "status_update": - if section_status[section_id] is None: - section_status[section_id] = change["old_status"] - - percent_through = ( - add_drop_period.get_percent_through_add_drop(timezone.now()) - if semester == current_semester - else 1 - ) - if percent_through == 0: - if verbose: - print( - f"Skipping semester {semester} because the add/drop period " - f"hasn't started yet." - ) - continue - distribution_estimate_threshold = sum( - len(changes_list) for changes_list in volume_changes_map.values() - ) // (ROUGH_MINIMUM_DEMAND_DISTRIBUTION_ESTIMATES * percent_through) - num_changes_without_estimate = 0 - - if verbose: - print(f"Compiling demand data for semester {semester}...") - for change in iterator_wrapper(all_changes): - section_id = change["section_id"] - - if section_status[section_id] is None: - section_status[section_id] = ( - "O" if section_id_to_object[section_id].percent_open > 0.5 else "C" - ) - if change["type"] == "status_update": - section_status[section_id] = change["new_status"] - continue - - date = change["date"] - volume_change = change["volume_change"] - registration_volumes[section_id] += volume_change - demands[section_id] = ( - registration_volumes[section_id] / section_id_to_object[section_id].capacity - ) - max_id = max(demands.keys(), key=lambda x: demands[x]) - min_id = min(demands.keys(), key=lambda x: demands[x]) - if ( - latest_popularity_dist_estimate is None - or section_id == latest_popularity_dist_estimate["highest_demand_section"].id - or section_id == latest_popularity_dist_estimate["lowest_demand_section"].id - or latest_popularity_dist_estimate["highest_demand_section"].id != max_id - or latest_popularity_dist_estimate["lowest_demand_section"].id != min_id - or num_changes_without_estimate >= distribution_estimate_threshold - ): - num_changes_without_estimate = 0 - output_dict[semester].append( - { - "percent_through": percent_through, - "demands": [ - val for sec_id, val in demands.items() if section_status[sec_id] == "C" - ], - } - ) - - latest_popularity_dist_estimate = { - "created_at": date, - "semester": semester, - "highest_demand_section": section_id_to_object[max_id], - "highest_demand_section_volume": registration_volumes[max_id], - "lowest_demand_section": section_id_to_object[min_id], - "lowest_demand_section_volume": registration_volumes[min_id], - } - else: - num_changes_without_estimate += 1 - - return output_dict - - -class Command(BaseCommand): - help = dedent( - """ - Export historical PCA demand data to a JSON file with the following schema: - { - semester: [{percent_through: int, demands: array of ints}, ...], - ... - } - """ - ) - - def add_arguments(self, parser): - parser.add_argument( - "--path", - type=str, - help="The path (local or in S3) you want to export to (must be a .json file).", - ) - parser.add_argument( - "--upload_to_s3", - default=False, - action="store_true", - help=( - "Enable this argument to upload the output of this script to the penn.courses " - "S3 bucket, at the path specified by the path argument. " - ), - ) - parser.add_argument( - "--section_query", - default="", - type=str, - help=( - "A prefix of the section full_code (e.g. CIS-120-001) to filter exported " - "demand data by. Omit this argument to export demand data from all sections " - "from the given semesters." - ), - ) - parser.add_argument( - "--semesters", - type=str, - help=dedent( - """ - The semesters argument should be a comma-separated list of semesters - corresponding to the semesters from which you want to export demand data, - i.e. "2019C,2020A,2020C" for fall 2019, spring 2020, and fall 2020. - If you pass "all" to this argument, this script will export all demand data. - """ - ), - default="all", - ) - - def handle(self, *args, **kwargs): - path = kwargs["path"] - upload_to_s3 = kwargs["upload_to_s3"] - semesters = get_semesters(kwargs["semesters"], verbose=True) - if len(semesters) == 0: - raise ValueError("No semesters provided for demand data export.") - assert path.endswith(".json") or path == os.devnull - script_print_path = ("s3://penn.courses/" if upload_to_s3 else "") + path - print( - f"Generating {script_print_path} with demand data data from " - f"semesters {semesters}..." - ) - output_file_path = "/tmp/export_demand_data.json" if upload_to_s3 else path - with open(output_file_path, "w") as output_file: - output_data = get_demand_data( - semesters, section_query=kwargs["section_query"], verbose=True - ) - json.dump(output_data, output_file) - if upload_to_s3: - S3_resource.meta.client.upload_file(output_file_path, "penn.courses", path) - os.remove(output_file_path) - print(f"Generated {script_print_path} with demand data from {len(semesters)} semesters.") +import json +import os +from textwrap import dedent + +from django.core.exceptions import ValidationError +from django.core.management.base import BaseCommand +from django.db.models import F +from django.utils import timezone +from tqdm import tqdm + +from alert.management.commands.recomputestats import recompute_precomputed_fields +from alert.models import Registration, Section, validate_add_drop_semester +from courses.models import StatusUpdate +from courses.util import get_current_semester, get_or_create_add_drop_period, get_semesters +from PennCourses.settings.base import ROUGH_MINIMUM_DEMAND_DISTRIBUTION_ESTIMATES, S3_resource +from review.views import extra_metrics_section_filters + + +def get_demand_data(semesters, section_query="", verbose=False): + current_semester = get_current_semester() + output_dict = dict() + + recompute_precomputed_fields(verbose=True) + + if verbose: + print(f"Computing demand data for semesters {str(semesters)}...") + for semester_num, semester in enumerate(semesters): + try: + validate_add_drop_semester(semester) + except ValidationError: + if verbose: + print(f"Skipping semester {semester} (unsupported kind for stats).") + continue + add_drop_period = get_or_create_add_drop_period(semester) + + if verbose: + print(f"Processing semester {semester}, " f"{(semester_num+1)}/{len(semesters)}.\n") + + output_dict[semester] = [] # list of demand data dicts + section_id_to_object = dict() # maps section id to section object (for this semester) + volume_changes_map = dict() # maps section id to list of volume changes + status_updates_map = dict() # maps section id to list of status updates + + iterator_wrapper = tqdm if verbose else (lambda x: x) + if verbose: + print("Indexing relevant sections...") + for section in iterator_wrapper( + Section.objects.filter( + extra_metrics_section_filters, + full_code__startswith=section_query, + course__semester=semester, + ) + .annotate( + efficient_semester=F("course__semester"), + ) + .distinct() + ): + section_id_to_object[section.id] = section + volume_changes_map[section.id] = [] + status_updates_map[section.id] = [] + + if verbose: + print("Computing registration volume changes over time for each section...") + for registration in iterator_wrapper( + Registration.objects.filter(section_id__in=section_id_to_object.keys()).annotate( + section_capacity=F("section__capacity") + ) + ): + section_id = registration.section_id + volume_changes_map[section_id].append( + {"date": registration.created_at, "volume_change": 1} + ) + deactivated_at = registration.deactivated_at + if deactivated_at is not None: + volume_changes_map[section_id].append({"date": deactivated_at, "volume_change": -1}) + + if verbose: + print("Collecting status updates over time for each section...") + for status_update in iterator_wrapper( + StatusUpdate.objects.filter( + section_id__in=section_id_to_object.keys(), in_add_drop_period=True + ) + ): + section_id = status_update.section_id + status_updates_map[section_id].append( + { + "date": status_update.created_at, + "old_status": status_update.old_status, + "new_status": status_update.new_status, + } + ) + + if verbose: + print("Joining updates for each section and sorting...") + all_changes = sorted( + [ + {"type": "status_update", "section_id": section_id, **update} + for section_id, status_updates_list in status_updates_map.items() + for update in status_updates_list + ] + + [ + {"type": "volume_change", "section_id": section_id, **change} + for section_id, changes_list in volume_changes_map.items() + for change in changes_list + ], + key=lambda x: (x["date"], int(x["type"] != "status_update")), + # put status updates first on matching dates + ) + + # Initialize variables to be maintained in our main all_changes loop + latest_popularity_dist_estimate = None + registration_volumes = {section_id: 0 for section_id in section_id_to_object.keys()} + demands = {section_id: 0 for section_id in section_id_to_object.keys()} + + # Initialize section statuses + section_status = {section_id: None for section_id in section_id_to_object.keys()} + for change in all_changes: + section_id = change["section_id"] + if change["type"] == "status_update": + if section_status[section_id] is None: + section_status[section_id] = change["old_status"] + + percent_through = ( + add_drop_period.get_percent_through_add_drop(timezone.now()) + if semester == current_semester + else 1 + ) + if percent_through == 0: + if verbose: + print( + f"Skipping semester {semester} because the add/drop period " + f"hasn't started yet." + ) + continue + distribution_estimate_threshold = sum( + len(changes_list) for changes_list in volume_changes_map.values() + ) // (ROUGH_MINIMUM_DEMAND_DISTRIBUTION_ESTIMATES * percent_through) + num_changes_without_estimate = 0 + + if verbose: + print(f"Compiling demand data for semester {semester}...") + for change in iterator_wrapper(all_changes): + section_id = change["section_id"] + + if section_status[section_id] is None: + section_status[section_id] = ( + "O" if section_id_to_object[section_id].percent_open > 0.5 else "C" + ) + if change["type"] == "status_update": + section_status[section_id] = change["new_status"] + continue + + date = change["date"] + volume_change = change["volume_change"] + registration_volumes[section_id] += volume_change + demands[section_id] = ( + registration_volumes[section_id] / section_id_to_object[section_id].capacity + ) + max_id = max(demands.keys(), key=lambda x: demands[x]) + min_id = min(demands.keys(), key=lambda x: demands[x]) + if ( + latest_popularity_dist_estimate is None + or section_id == latest_popularity_dist_estimate["highest_demand_section"].id + or section_id == latest_popularity_dist_estimate["lowest_demand_section"].id + or latest_popularity_dist_estimate["highest_demand_section"].id != max_id + or latest_popularity_dist_estimate["lowest_demand_section"].id != min_id + or num_changes_without_estimate >= distribution_estimate_threshold + ): + num_changes_without_estimate = 0 + output_dict[semester].append( + { + "percent_through": percent_through, + "demands": [ + val for sec_id, val in demands.items() if section_status[sec_id] == "C" + ], + } + ) + + latest_popularity_dist_estimate = { + "created_at": date, + "semester": semester, + "highest_demand_section": section_id_to_object[max_id], + "highest_demand_section_volume": registration_volumes[max_id], + "lowest_demand_section": section_id_to_object[min_id], + "lowest_demand_section_volume": registration_volumes[min_id], + } + else: + num_changes_without_estimate += 1 + + return output_dict + + +class Command(BaseCommand): + help = dedent( + """ + Export historical PCA demand data to a JSON file with the following schema: + { + semester: [{percent_through: int, demands: array of ints}, ...], + ... + } + """ + ) + + def add_arguments(self, parser): + parser.add_argument( + "--path", + type=str, + help="The path (local or in S3) you want to export to (must be a .json file).", + ) + parser.add_argument( + "--upload_to_s3", + default=False, + action="store_true", + help=( + "Enable this argument to upload the output of this script to the penn.courses " + "S3 bucket, at the path specified by the path argument. " + ), + ) + parser.add_argument( + "--section_query", + default="", + type=str, + help=( + "A prefix of the section full_code (e.g. CIS-120-001) to filter exported " + "demand data by. Omit this argument to export demand data from all sections " + "from the given semesters." + ), + ) + parser.add_argument( + "--semesters", + type=str, + help=dedent( + """ + The semesters argument should be a comma-separated list of semesters + corresponding to the semesters from which you want to export demand data, + i.e. "2019C,2020A,2020C" for fall 2019, spring 2020, and fall 2020. + If you pass "all" to this argument, this script will export all demand data. + """ + ), + default="all", + ) + + def handle(self, *args, **kwargs): + path = kwargs["path"] + upload_to_s3 = kwargs["upload_to_s3"] + semesters = get_semesters(kwargs["semesters"], verbose=True) + if len(semesters) == 0: + raise ValueError("No semesters provided for demand data export.") + assert path.endswith(".json") or path == os.devnull + script_print_path = ("s3://penn.courses/" if upload_to_s3 else "") + path + print( + f"Generating {script_print_path} with demand data data from " + f"semesters {semesters}..." + ) + output_file_path = "/tmp/export_demand_data.json" if upload_to_s3 else path + with open(output_file_path, "w") as output_file: + output_data = get_demand_data( + semesters, section_query=kwargs["section_query"], verbose=True + ) + json.dump(output_data, output_file) + if upload_to_s3: + S3_resource.meta.client.upload_file(output_file_path, "penn.courses", path) + os.remove(output_file_path) + print(f"Generated {script_print_path} with demand data from {len(semesters)} semesters.") diff --git a/backend/alert/management/commands/loadregistrations_pca.py b/backend/alert/management/commands/loadregistrations_pca.py index 9027c3dd3..432424315 100644 --- a/backend/alert/management/commands/loadregistrations_pca.py +++ b/backend/alert/management/commands/loadregistrations_pca.py @@ -1,156 +1,156 @@ -import csv -import logging -import os -from datetime import datetime - -from dateutil.tz import gettz -from django.core.management.base import BaseCommand -from django.db import transaction -from django.db.models import F -from django.utils.timezone import make_aware -from tqdm import tqdm - -from alert.management.commands.recomputestats import recompute_demand_distribution_estimates -from alert.models import Registration, Section -from PennCourses.settings.base import TIME_ZONE - - -class Command(BaseCommand): - help = ( - "Load in PCA registrations from a csv file. The csv file must have the following columns:\n" - "registration.section.full_code, registration.section.semester, " - "registration.created_at (%Y-%m-%d %H:%M:%S.%f %Z), " - "registration.original_created_at (%Y-%m-%d %H:%M:%S.%f %Z), " - "registration.id, resubscribed_from_id, " - "registration.notification_sent, notification_sent_at (%Y-%m-%d %H:%M:%S.%f %Z), " - "registration.cancelled, registration.cancelled_at (%Y-%m-%d %H:%M:%S.%f %Z), " - "registration.deleted, registration.deleted_at (%Y-%m-%d %H:%M:%S.%f %Z)" - ) - - def add_arguments(self, parser): - parser.add_argument( - "--src", - type=str, - default="", - help="The file path of the .csv file containing the registrations " - "you want to import", - ) - - def handle(self, *args, **kwargs): - root_logger = logging.getLogger("") - root_logger.setLevel(logging.DEBUG) - src = os.path.abspath(kwargs["src"]) - _, file_extension = os.path.splitext(kwargs["src"]) - if not os.path.exists(src): - return "File does not exist." - if file_extension != ".csv": - return "File is not a csv." - print(f"Loading PCA registrations from path {src}") - print( - "This script is an atomic transaction, so the database will not be modified " - "unless the entire script succeeds." - ) - - sections_map = dict() # maps (full_code, semester) to section id - row_count = 0 - with open(src) as data_file: - data_reader = csv.reader(data_file, delimiter=",", quotechar='"') - sections_to_fetch = set() - for row in data_reader: - sections_to_fetch.add((row[0], row[1])) - row_count += 1 - full_codes = [sec[0] for sec in sections_to_fetch] - semesters = [sec[1] for sec in sections_to_fetch] - section_obs = Section.objects.filter( - full_code__in=full_codes, course__semester__in=semesters - ).annotate(efficient_semester=F("course__semester")) - for section_ob in section_obs: - sections_map[section_ob.full_code, section_ob.efficient_semester] = section_ob.id - - id_corrections = dict() - semesters = set() - registrations = [] - with transaction.atomic(): - with open(src) as data_file: - i = 0 - data_reader = csv.reader(data_file, delimiter=",", quotechar='"') - - for row in tqdm(data_reader, total=row_count): - i += 1 - if len(row) != 12: - print(f"\nRow found with {len(row)} (!=12) columns.") - print( - f"For the above reason, row {i} (1-indexed) of {src} is invalid:\n" - f"{row}\n(Not necessarily helpful reminder: Columns must be:\n" - "registration.section.full_code, registration.section.semester, " - "registration.created_at (%Y-%m-%d %H:%M:%S.%f %Z), " - "registration.original_created_at (%Y-%m-%d %H:%M:%S.%f %Z), " - "registration.id, resubscribed_from_id, " - "registration.notification_sent, " - "notification_sent_at (%Y-%m-%d %H:%M:%S.%f %Z), " - "registration.cancelled, " - "registration.cancelled_at (%Y-%m-%d %H:%M:%S.%f %Z), " - "registration.deleted, " - "registration.deleted_at (%Y-%m-%d %H:%M:%S.%f %Z)" - ")\n\nInvalid input; no registrations were added to the database.\n" - ) - return False - - full_code = row[0] - semester = row[1] - if (full_code, semester) not in sections_map: - raise ValueError(f"Section {full_code} {semester} not found in database.") - semesters.add(semester) - - original_id = row[4] - resubscribed_from_id = row[5] - if resubscribed_from_id == "" or resubscribed_from_id == "None": - resubscribed_from_id = None - - def extract_date(dt_string): - if dt_string is None or dt_string == "" or dt_string == "None": - return None - dt = datetime.strptime(dt_string, "%Y-%m-%d %H:%M:%S.%f %Z") - return make_aware(dt, timezone=gettz(TIME_ZONE), is_dst=None) - - registration_dict = dict() # fields to unpack into Registration initialization - registration_dict["section_id"] = sections_map[full_code, semester] - registration_dict["source"] = "SCRIPT_PCA" - registration_dict["created_at"] = extract_date(row[2]) - registration_dict["original_created_at"] = extract_date(row[3]) - registration_dict["notification_sent"] = bool(row[6]) - registration_dict["notification_sent_at"] = extract_date(row[7]) - registration_dict["cancelled"] = bool(row[8]) - registration_dict["cancelled_at"] = extract_date(row[9]) - registration_dict["deleted"] = bool(row[10]) - registration_dict["deleted_at"] = extract_date(row[11]) - - registration = Registration(**registration_dict) - registration.save(load_script=True) - registration.created_at = registration_dict["created_at"] - registration.save(load_script=True) - id_corrections[original_id] = registration.id - registrations.append((registration, original_id, resubscribed_from_id)) - - print("Connecting resubscribe chains...") - - to_save = [] - for registration, original_id, resubscribed_from_id in registrations: - if resubscribed_from_id is not None: - registration.resubscribed_from_id = id_corrections[resubscribed_from_id] - to_save.append(registration) - Registration.objects.bulk_update(to_save, ["resubscribed_from_id"]) - to_save = [] - for registration, _, _ in tqdm(registrations): - head_registration = registration.get_most_current_iter() - if registration.head_registration != head_registration: - registration.head_registration = head_registration - to_save.append(registration) - Registration.objects.bulk_update(to_save, ["head_registration"]) - - print(f"Done! {len(registrations)} registrations added to database.") - - print( - f"Recomputing PCA Demand Distribution Estimates for {len(semesters)} semesters..." - ) - recompute_demand_distribution_estimates(semesters=",".join(semesters), verbose=True) +import csv +import logging +import os +from datetime import datetime + +from dateutil.tz import gettz +from django.core.management.base import BaseCommand +from django.db import transaction +from django.db.models import F +from django.utils.timezone import make_aware +from tqdm import tqdm + +from alert.management.commands.recomputestats import recompute_demand_distribution_estimates +from alert.models import Registration, Section +from PennCourses.settings.base import TIME_ZONE + + +class Command(BaseCommand): + help = ( + "Load in PCA registrations from a csv file. The csv file must have the following columns:\n" + "registration.section.full_code, registration.section.semester, " + "registration.created_at (%Y-%m-%d %H:%M:%S.%f %Z), " + "registration.original_created_at (%Y-%m-%d %H:%M:%S.%f %Z), " + "registration.id, resubscribed_from_id, " + "registration.notification_sent, notification_sent_at (%Y-%m-%d %H:%M:%S.%f %Z), " + "registration.cancelled, registration.cancelled_at (%Y-%m-%d %H:%M:%S.%f %Z), " + "registration.deleted, registration.deleted_at (%Y-%m-%d %H:%M:%S.%f %Z)" + ) + + def add_arguments(self, parser): + parser.add_argument( + "--src", + type=str, + default="", + help="The file path of the .csv file containing the registrations " + "you want to import", + ) + + def handle(self, *args, **kwargs): + root_logger = logging.getLogger("") + root_logger.setLevel(logging.DEBUG) + src = os.path.abspath(kwargs["src"]) + _, file_extension = os.path.splitext(kwargs["src"]) + if not os.path.exists(src): + return "File does not exist." + if file_extension != ".csv": + return "File is not a csv." + print(f"Loading PCA registrations from path {src}") + print( + "This script is an atomic transaction, so the database will not be modified " + "unless the entire script succeeds." + ) + + sections_map = dict() # maps (full_code, semester) to section id + row_count = 0 + with open(src) as data_file: + data_reader = csv.reader(data_file, delimiter=",", quotechar='"') + sections_to_fetch = set() + for row in data_reader: + sections_to_fetch.add((row[0], row[1])) + row_count += 1 + full_codes = [sec[0] for sec in sections_to_fetch] + semesters = [sec[1] for sec in sections_to_fetch] + section_obs = Section.objects.filter( + full_code__in=full_codes, course__semester__in=semesters + ).annotate(efficient_semester=F("course__semester")) + for section_ob in section_obs: + sections_map[section_ob.full_code, section_ob.efficient_semester] = section_ob.id + + id_corrections = dict() + semesters = set() + registrations = [] + with transaction.atomic(): + with open(src) as data_file: + i = 0 + data_reader = csv.reader(data_file, delimiter=",", quotechar='"') + + for row in tqdm(data_reader, total=row_count): + i += 1 + if len(row) != 12: + print(f"\nRow found with {len(row)} (!=12) columns.") + print( + f"For the above reason, row {i} (1-indexed) of {src} is invalid:\n" + f"{row}\n(Not necessarily helpful reminder: Columns must be:\n" + "registration.section.full_code, registration.section.semester, " + "registration.created_at (%Y-%m-%d %H:%M:%S.%f %Z), " + "registration.original_created_at (%Y-%m-%d %H:%M:%S.%f %Z), " + "registration.id, resubscribed_from_id, " + "registration.notification_sent, " + "notification_sent_at (%Y-%m-%d %H:%M:%S.%f %Z), " + "registration.cancelled, " + "registration.cancelled_at (%Y-%m-%d %H:%M:%S.%f %Z), " + "registration.deleted, " + "registration.deleted_at (%Y-%m-%d %H:%M:%S.%f %Z)" + ")\n\nInvalid input; no registrations were added to the database.\n" + ) + return False + + full_code = row[0] + semester = row[1] + if (full_code, semester) not in sections_map: + raise ValueError(f"Section {full_code} {semester} not found in database.") + semesters.add(semester) + + original_id = row[4] + resubscribed_from_id = row[5] + if resubscribed_from_id == "" or resubscribed_from_id == "None": + resubscribed_from_id = None + + def extract_date(dt_string): + if dt_string is None or dt_string == "" or dt_string == "None": + return None + dt = datetime.strptime(dt_string, "%Y-%m-%d %H:%M:%S.%f %Z") + return make_aware(dt, timezone=gettz(TIME_ZONE), is_dst=None) + + registration_dict = dict() # fields to unpack into Registration initialization + registration_dict["section_id"] = sections_map[full_code, semester] + registration_dict["source"] = "SCRIPT_PCA" + registration_dict["created_at"] = extract_date(row[2]) + registration_dict["original_created_at"] = extract_date(row[3]) + registration_dict["notification_sent"] = bool(row[6]) + registration_dict["notification_sent_at"] = extract_date(row[7]) + registration_dict["cancelled"] = bool(row[8]) + registration_dict["cancelled_at"] = extract_date(row[9]) + registration_dict["deleted"] = bool(row[10]) + registration_dict["deleted_at"] = extract_date(row[11]) + + registration = Registration(**registration_dict) + registration.save(load_script=True) + registration.created_at = registration_dict["created_at"] + registration.save(load_script=True) + id_corrections[original_id] = registration.id + registrations.append((registration, original_id, resubscribed_from_id)) + + print("Connecting resubscribe chains...") + + to_save = [] + for registration, original_id, resubscribed_from_id in registrations: + if resubscribed_from_id is not None: + registration.resubscribed_from_id = id_corrections[resubscribed_from_id] + to_save.append(registration) + Registration.objects.bulk_update(to_save, ["resubscribed_from_id"]) + to_save = [] + for registration, _, _ in tqdm(registrations): + head_registration = registration.get_most_current_iter() + if registration.head_registration != head_registration: + registration.head_registration = head_registration + to_save.append(registration) + Registration.objects.bulk_update(to_save, ["head_registration"]) + + print(f"Done! {len(registrations)} registrations added to database.") + + print( + f"Recomputing PCA Demand Distribution Estimates for {len(semesters)} semesters..." + ) + recompute_demand_distribution_estimates(semesters=",".join(semesters), verbose=True) diff --git a/backend/alert/management/commands/recomputestats.py b/backend/alert/management/commands/recomputestats.py index 15498bbe4..bdd7420ac 100644 --- a/backend/alert/management/commands/recomputestats.py +++ b/backend/alert/management/commands/recomputestats.py @@ -1,599 +1,599 @@ -import logging -from textwrap import dedent - -import numpy as np -import scipy.stats as stats -from django.core.cache import cache -from django.core.exceptions import ValidationError -from django.core.management.base import BaseCommand -from django.db import connection, transaction -from django.db.models import Count, F, OuterRef, Q, Subquery, Value -from django.db.models.functions import Coalesce -from django.utils import timezone -from tqdm import tqdm - -from alert.models import ( - PcaDemandDistributionEstimate, - Registration, - Section, - validate_add_drop_semester, -) -from courses.management.commands.load_add_drop_dates import ( - fill_in_add_drop_periods, - load_add_drop_dates, -) -from courses.models import Course, Meeting, StatusUpdate, Topic -from courses.util import ( - get_current_semester, - get_or_create_add_drop_period, - get_semesters, - subquery_count_distinct, -) -from PennCourses.settings.base import ROUGH_MINIMUM_DEMAND_DISTRIBUTION_ESTIMATES -from review.views import extra_metrics_section_filters - - -def recompute_num_activities(): - Course.objects.all().annotate( - activity_count=subquery_count_distinct( - Section.objects.filter(course_id=OuterRef("id")), column="activity" - ) - ).update(num_activities=F("activity_count")) - - -def recompute_meeting_count(): - Section.objects.all().annotate( - meeting_count=subquery_count_distinct( - Meeting.objects.filter(section_id=OuterRef("id")), column="id" - ) - ).update(num_meetings=F("meeting_count")) - - -def recompute_has_reviews(): - with connection.cursor() as cursor: - cursor.execute( - """ - UPDATE "courses_section" AS U0 - SET "has_reviews" = CASE WHEN - EXISTS (SELECT id FROM "review_review" AS U1 - WHERE U0."id" = U1."section_id") - THEN true ELSE false - END - """ - ) - - -def recompute_has_status_updates(): - with connection.cursor() as cursor: - cursor.execute( - """ - UPDATE "courses_section" AS U0 - SET "has_status_updates" = CASE WHEN - EXISTS (SELECT id FROM "courses_statusupdate" AS U1 - WHERE U0."id" = U1."section_id") - THEN true ELSE false - END - """ - ) - - -def recompute_precomputed_fields(verbose=False): - """ - Recomputes the following precomputed fields: - - Course.num_activities - - Section.num_meetings - - Section.has_reviews - - Section.has_status_updates - - :param verbose: Set to True if you want this script to print its status as it goes, - or keep as False (default) if you want the script to work silently. - """ - if verbose: - print("Recomputing precomputed fields...") - - if verbose: - print("\tRecomputing Course.num_activities") - recompute_num_activities() - if verbose: - print("\tRecomputing Section.num_meetings") - recompute_meeting_count() - if verbose: - print("\tRecomputing Section.has_reviews") - recompute_has_reviews() - if verbose: - print("\tRecomputing Section.has_status_updates") - recompute_has_status_updates() - - if verbose: - print("Done recomputing precomputed fields.") - - -def deduplicate_status_updates(semesters=None, verbose=False, semesters_precomputed=False): - """ - Removes duplicate/redundant status updates from the specified semesters. - - :param semesters: The semesters argument should be a comma-separated list of string semesters - corresponding to the semesters for which you want to remove duplicate/redundant - status updates, i.e. "2019C,2020A,2020C" for fall 2019, spring 2020, and fall 2020. - It defaults to None, in which case only the current semester is used. If you supply - the string "all", it will deduplicate for all semesters found in Courses in the db. - If semesters_precomputed is set to True (non-default), then this argument should - instead be a list of single string semesters. - :param semesters_precomputed: If False (default), the semesters argument will expect a raw - comma-separated string input. If True, the semesters argument will expect a list of - individual string semesters. - :param verbose: Set to True if you want this script to print its status as it goes, - or keep as False (default) if you want the script to work silently. - """ - - semesters = ( - semesters if semesters_precomputed else get_semesters(semesters=semesters, verbose=verbose) - ) - - if verbose: - print(f"Deduplicating status updates for semesters {str(semesters)}...") - - for semester_num, semester in enumerate(semesters): - with transaction.atomic(): - # We make this command an atomic transaction, so that the database will not - # be modified unless the entire update for a semester succeeds. - - if verbose: - print(f"\nProcessing semester {semester}, " f"{(semester_num+1)}/{len(semesters)}.") - - num_removed = 0 - for section_id in tqdm( - Section.objects.filter(course__semester=semester).values_list("id", flat=True), - disable=(not verbose), - ): - last_update = None - ids_to_remove = [] # IDs of redundant status updates to remove - - for update in StatusUpdate.objects.filter(section_id=section_id).order_by( - "created_at" - ): - if ( - last_update - and last_update.old_status == update.old_status - and last_update.new_status == update.new_status - ): - ids_to_remove.append(update.id) - continue - last_update = update - - num_removed += len(ids_to_remove) - StatusUpdate.objects.filter(id__in=ids_to_remove).delete() - print( - f"Removed {num_removed} duplicate status update objects from semester {semester}." - ) - - if verbose: - print(f"Finished deduplicating status updates for semesters {str(semesters)}.") - - -def recompute_percent_open(semesters=None, verbose=False, semesters_precomputed=False): - """ - Recomputes the percent_open field for each section in the given semester(s). - - :param semesters: The semesters argument should be a comma-separated list of string semesters - corresponding to the semesters for which you want to recompute percent_open fields, - i.e. "2019C,2020A,2020C" for fall 2019, spring 2020, and fall 2020. It defaults to None, - in which case only the current semester is used. If you supply the string "all", - it will recompute for all semesters found in Courses in the db. - If semesters_precomputed is set to True (non-default), then this argument should - instead be a list of single string semesters. - :param semesters_precomputed: If False (default), the semesters argument will expect a raw - comma-separated string input. If True, the semesters argument will expect a list of - individual string semesters. - :param verbose: Set to True if you want this script to print its status as it goes, - or keep as False (default) if you want the script to work silently. - """ - - current_semester = get_current_semester() - semesters = ( - semesters if semesters_precomputed else get_semesters(semesters=semesters, verbose=verbose) - ) - - if verbose: - print(f"Recomputing open percentages for semesters {str(semesters)}...") - - for semester_num, semester in enumerate(semesters): - with transaction.atomic(): - # We make this command an atomic transaction, so that the database will not - # be modified unless the entire update for a semester succeeds. - - if verbose: - print(f"\nProcessing semester {semester}, " f"{(semester_num+1)}/{len(semesters)}.") - - add_drop = get_or_create_add_drop_period(semester) - add_drop_start = add_drop.estimated_start - add_drop_end = add_drop.estimated_end - - StatusUpdate.objects.filter(section__course__semester=semester).select_for_update() - - sections = Section.objects.filter(course__semester=semester) - num_erroneous_updates = 0 - num_total_updates = 0 - for section in sections: - status_updates = StatusUpdate.objects.filter( - section=section, created_at__gt=add_drop_start, created_at__lt=add_drop_end - ).order_by("created_at") - num_total_updates += len(status_updates) - total_open_seconds = 0 - if not status_updates.exists(): - try: - guess_status = ( - StatusUpdate.objects.filter( - section=section, created_at__lte=add_drop_start - ) - .latest("created_at") - .new_status - ) - except StatusUpdate.DoesNotExist: - guess_status = section.status - section.percent_open = float(guess_status == "O") - else: - last_dt = add_drop_start - last_status = status_updates.first().old_status - for update in status_updates: - if last_status != update.old_status: - num_erroneous_updates += 1 - if last_status == "O" and update.new_status != "O": - total_open_seconds += (update.created_at - last_dt).total_seconds() - last_dt = update.created_at - last_status = update.new_status - section.percent_open = float(total_open_seconds) / float( - (status_updates.last().created_at - add_drop_start).total_seconds() - ) - if section.semester != current_semester: - section.percent_open = float( - total_open_seconds - + int(last_status == "O") * (add_drop_end - last_dt).total_seconds() - ) / float((add_drop_end - add_drop_start).total_seconds()) - section.save() - if verbose: - print( - f"Finished calculating percent_open for {len(sections)} sections from " - f"semester {semester}, encountered {num_erroneous_updates} erroneous " - f"Status Updates (out of {num_total_updates} total Status Updates)" - ) - if verbose: - print(f"Finished recomputing open percentages for semesters {str(semesters)}.") - - -def recompute_registration_volumes(semesters=None, semesters_precomputed=False, verbose=False): - """ - Recomputes the registration_volume fields for all sections in the given semester(s). - - :param semesters: The semesters argument should be a comma-separated list of string semesters - corresponding to the semesters for which you want to recompute demand distribution - estimate, i.e. "2019C,2020A,2020C" for fall 2019, spring 2020, and fall 2020. It - defaults to None, in which case only the current semester is used. If you supply the - string "all", it will recompute for all semesters found in Courses in the db. - If semesters_precomputed is set to True (non-default), then this argument should - instead be a list of single string semesters. - :param semesters_precomputed: If False (default), the semesters argument will expect a raw - comma-separated string input. If True, the semesters argument will expect a list of - individual string semesters. - :param verbose: Set to True if you want this script to print its status as it goes, - or keep as False (default) if you want the script to work silently. - """ - - semesters = ( - semesters if semesters_precomputed else get_semesters(semesters=semesters, verbose=verbose) - ) - - if verbose: - print(f"Computing most recent registration volumes for semesters {semesters} ...") - with transaction.atomic(): - Section.objects.filter(course__semester__in=semesters).select_for_update().update( - registration_volume=Coalesce( - Subquery( - Registration.objects.filter( - section__id=OuterRef("id"), **Registration.is_active_filter() - ) - .annotate(common=Value(1)) - .values("common") - .annotate(count=Count("*")) - .values("count")[:1], - ), - Value(0), - ) - ) - - -def recompute_demand_distribution_estimates( - semesters=None, semesters_precomputed=False, verbose=False -): - """ - This script recomputes all PcaDemandDistributionEstimate objects for the given semester(s) - based on saved Registration objects. In doing so, it also recomputes the registration_volume - and percent_open fields for all sections in the given semester(s) - (by calling recompute_registration_volumes and recompute_percent_open). - - :param semesters: The semesters argument should be a comma-separated list of string semesters - corresponding to the semesters for which you want to recompute demand distribution - estimate, i.e. "2019C,2020A,2020C" for fall 2019, spring 2020, and fall 2020. It - defaults to None, in which case only the current semester is used. If you supply the - string "all", it will recompute for all semesters found in Courses in the db. - If semesters_precomputed is set to True (non-default), then this argument should - instead be a list of single string semesters. - :param semesters_precomputed: If False (default), the semesters argument will expect a raw - comma-separated string input. If True, the semesters argument will expect a list of - individual string semesters. - :param verbose: Set to True if you want this script to print its status as it goes, - or keep as False (default) if you want the script to work silently. - """ - - current_semester = get_current_semester() - semesters = ( - semesters if semesters_precomputed else get_semesters(semesters=semesters, verbose=verbose) - ) - - recompute_precomputed_fields(verbose=verbose) - recompute_registration_volumes(semesters=semesters, semesters_precomputed=True, verbose=verbose) - recompute_percent_open(semesters=semesters, semesters_precomputed=True, verbose=verbose) - - if verbose: - print(f"Recomputing demand distribution estimates for semesters {str(semesters)}...") - for semester_num, semester in enumerate(semesters): - try: - validate_add_drop_semester(semester) - except ValidationError: - if verbose: - print(f"Skipping semester {semester} (unsupported kind for stats).") - continue - add_drop_period = get_or_create_add_drop_period(semester) - set_cache = semester == current_semester - - with transaction.atomic(): - # We make this command an atomic transaction, so that the database will not - # be modified unless the entire update for a semester succeeds. - # If set_cache is True, we will set the current_demand_distribution_estimate variable - # in cache - - if verbose: - print(f"Processing semester {semester}, " f"{(semester_num+1)}/{len(semesters)}.\n") - print( - "Deleting existing PcaDemandDistributionEstimate objects for semester " - f"{semester} (so that we can recompute these objects)..." - ) - PcaDemandDistributionEstimate.objects.filter( - semester=semester - ).select_for_update().delete() - - section_id_to_object = dict() # maps section id to section object (for this semester) - volume_changes_map = dict() # maps section id to list of volume changes - status_updates_map = dict() # maps section id to list of status updates - - iterator_wrapper = tqdm if verbose else (lambda x: x) - if verbose: - print("Indexing relevant sections...") - for section in iterator_wrapper( - Section.objects.filter(extra_metrics_section_filters, course__semester=semester) - .annotate( - efficient_semester=F("course__semester"), - ) - .distinct() - ): - section_id_to_object[section.id] = section - volume_changes_map[section.id] = [] - status_updates_map[section.id] = [] - - if verbose: - print("Computing registration volume changes over time for each section...") - for registration in iterator_wrapper( - Registration.objects.filter(section_id__in=section_id_to_object.keys()) - .annotate(section_capacity=F("section__capacity")) - .select_for_update() - ): - section_id = registration.section_id - volume_changes_map[section_id].append( - {"date": registration.created_at, "volume_change": 1} - ) - deactivated_at = registration.deactivated_at - if deactivated_at is not None: - volume_changes_map[section_id].append( - {"date": deactivated_at, "volume_change": -1} - ) - - if verbose: - print("Collecting status updates over time for each section...") - for status_update in iterator_wrapper( - StatusUpdate.objects.filter( - section_id__in=section_id_to_object.keys(), in_add_drop_period=True - ).select_for_update() - ): - section_id = status_update.section_id - status_updates_map[section_id].append( - { - "date": status_update.created_at, - "old_status": status_update.old_status, - "new_status": status_update.new_status, - } - ) - - if verbose: - print("Joining updates for each section and sorting...") - all_changes = sorted( - [ - {"type": "status_update", "section_id": section_id, **update} - for section_id, status_updates_list in status_updates_map.items() - for update in status_updates_list - ] - + [ - {"type": "volume_change", "section_id": section_id, **change} - for section_id, changes_list in volume_changes_map.items() - for change in changes_list - ], - key=lambda x: (x["date"], int(x["type"] != "status_update")), - # put status updates first on matching dates - ) - - # Initialize variables to be maintained in our main all_changes loop - latest_popularity_dist_estimate = None - registration_volumes = {section_id: 0 for section_id in section_id_to_object.keys()} - demands = {section_id: 0 for section_id in section_id_to_object.keys()} - - # Initialize section statuses - section_status = {section_id: None for section_id in section_id_to_object.keys()} - for change in all_changes: - section_id = change["section_id"] - if change["type"] == "status_update": - if section_status[section_id] is None: - section_status[section_id] = change["old_status"] - - percent_through = ( - add_drop_period.get_percent_through_add_drop(timezone.now()) - if semester == current_semester - else 1 - ) - if percent_through == 0: - if verbose: - print( - f"Skipping semester {semester} because the add/drop period " - f"hasn't started yet." - ) - continue - distribution_estimate_threshold = sum( - len(changes_list) for changes_list in volume_changes_map.values() - ) // (ROUGH_MINIMUM_DEMAND_DISTRIBUTION_ESTIMATES * percent_through) - num_changes_without_estimate = 0 - - if verbose: - print(f"Creating PcaDemandDistributionEstimate objects for semester {semester}...") - for change in iterator_wrapper(all_changes): - section_id = change["section_id"] - - if section_status[section_id] is None: - section_status[section_id] = ( - "O" if section_id_to_object[section_id].percent_open > 0.5 else "C" - ) - if change["type"] == "status_update": - section_status[section_id] = change["new_status"] - continue - - date = change["date"] - volume_change = change["volume_change"] - registration_volumes[section_id] += volume_change - demands[section_id] = ( - registration_volumes[section_id] / section_id_to_object[section_id].capacity - ) - - max_id = max(demands.keys(), key=lambda x: demands[x]) - min_id = min(demands.keys(), key=lambda x: demands[x]) - if ( - latest_popularity_dist_estimate is None - or section_id == latest_popularity_dist_estimate.highest_demand_section_id - or section_id == latest_popularity_dist_estimate.lowest_demand_section_id - or latest_popularity_dist_estimate.highest_demand_section_id != max_id - or latest_popularity_dist_estimate.lowest_demand_section_id != min_id - or num_changes_without_estimate >= distribution_estimate_threshold - ): - num_changes_without_estimate = 0 - closed_sections_demand_values = np.asarray( - [val for sec_id, val in demands.items() if section_status[sec_id] == "C"] - ) - csrdv_frac_zero, fit_shape, fit_loc, fit_scale = (None, None, None, None) - if len(closed_sections_demand_values) > 0: - closed_sections_positive_demand_values = closed_sections_demand_values[ - np.where(closed_sections_demand_values > 0) - ] - csrdv_frac_zero = 1 - len(closed_sections_positive_demand_values) / len( - closed_sections_demand_values - ) - if len(closed_sections_positive_demand_values) > 0: - fit_shape, fit_loc, fit_scale = stats.lognorm.fit( - closed_sections_positive_demand_values - ) - - latest_popularity_dist_estimate = PcaDemandDistributionEstimate( - created_at=date, - semester=semester, - highest_demand_section=section_id_to_object[max_id], - highest_demand_section_volume=registration_volumes[max_id], - lowest_demand_section=section_id_to_object[min_id], - lowest_demand_section_volume=registration_volumes[min_id], - csrdv_frac_zero=csrdv_frac_zero, - csprdv_lognorm_param_shape=fit_shape, - csprdv_lognorm_param_loc=fit_loc, - csprdv_lognorm_param_scale=fit_scale, - ) - latest_popularity_dist_estimate.save(add_drop_period=add_drop_period) - latest_popularity_dist_estimate.created_at = date - latest_popularity_dist_estimate.save(add_drop_period=add_drop_period) - else: - num_changes_without_estimate += 1 - - if set_cache: - if latest_popularity_dist_estimate is not None: - cache.set( - "current_demand_distribution_estimate", - latest_popularity_dist_estimate, - timeout=None, - ) - else: - cache.set("current_demand_distribution_estimate", None, timeout=None) - - if verbose: - print( - "Finished recomputing demand distribution estimate and section registration_volume " - f"fields for semesters {str(semesters)}." - ) - - -def garbage_collect_topics(): - """ - Deletes topics with no courses. - """ - Topic.objects.filter( - ~Q(id__in=Subquery(Topic.objects.filter(courses__isnull=False).values("id"))), - ).delete() - - -def recompute_stats(semesters=None, semesters_precomputed=False, verbose=False): - """ - Recomputes PCA demand distribution estimates, as well as the registration_volume - and percent_open fields for all sections in the given semester(s). Deduplicates - status updates saved to the database. - """ - if not semesters_precomputed: - semesters = get_semesters(semesters=semesters, verbose=verbose) - semesters = fill_in_add_drop_periods(verbose=verbose).intersection(semesters) - garbage_collect_topics() - load_add_drop_dates(verbose=verbose) - deduplicate_status_updates(semesters=semesters, semesters_precomputed=True, verbose=verbose) - recompute_demand_distribution_estimates( - semesters=semesters, semesters_precomputed=True, verbose=verbose - ) - - -class Command(BaseCommand): - help = ( - "Recomputes PCA demand distribution estimates, as well as the registration_volume " - "and percent_open fields for all sections in the given semester(s)." - ) - - def add_arguments(self, parser): - parser.add_argument( - "--semesters", - type=str, - help=dedent( - """ - The semesters argument should be a comma-separated list of semesters - corresponding to the semesters for which you want to recompute stats, - i.e. "2019C,2020A,2020C" for fall 2019, spring 2020, and fall 2020. If this argument - is omitted, stats are only recomputed for the current semester. - If you pass "all" to this argument, this script will recompute stats for - all semesters found in Courses in the db. - """ - ), - nargs="?", - default=None, - ) - - def handle(self, *args, **kwargs): - root_logger = logging.getLogger("") - root_logger.setLevel(logging.DEBUG) - - recompute_stats(semesters=kwargs["semesters"], verbose=True) +import logging +from textwrap import dedent + +import numpy as np +import scipy.stats as stats +from django.core.cache import cache +from django.core.exceptions import ValidationError +from django.core.management.base import BaseCommand +from django.db import connection, transaction +from django.db.models import Count, F, OuterRef, Q, Subquery, Value +from django.db.models.functions import Coalesce +from django.utils import timezone +from tqdm import tqdm + +from alert.models import ( + PcaDemandDistributionEstimate, + Registration, + Section, + validate_add_drop_semester, +) +from courses.management.commands.load_add_drop_dates import ( + fill_in_add_drop_periods, + load_add_drop_dates, +) +from courses.models import Course, Meeting, StatusUpdate, Topic +from courses.util import ( + get_current_semester, + get_or_create_add_drop_period, + get_semesters, + subquery_count_distinct, +) +from PennCourses.settings.base import ROUGH_MINIMUM_DEMAND_DISTRIBUTION_ESTIMATES +from review.views import extra_metrics_section_filters + + +def recompute_num_activities(): + Course.objects.all().annotate( + activity_count=subquery_count_distinct( + Section.objects.filter(course_id=OuterRef("id")), column="activity" + ) + ).update(num_activities=F("activity_count")) + + +def recompute_meeting_count(): + Section.objects.all().annotate( + meeting_count=subquery_count_distinct( + Meeting.objects.filter(section_id=OuterRef("id")), column="id" + ) + ).update(num_meetings=F("meeting_count")) + + +def recompute_has_reviews(): + with connection.cursor() as cursor: + cursor.execute( + """ + UPDATE "courses_section" AS U0 + SET "has_reviews" = CASE WHEN + EXISTS (SELECT id FROM "review_review" AS U1 + WHERE U0."id" = U1."section_id") + THEN true ELSE false + END + """ + ) + + +def recompute_has_status_updates(): + with connection.cursor() as cursor: + cursor.execute( + """ + UPDATE "courses_section" AS U0 + SET "has_status_updates" = CASE WHEN + EXISTS (SELECT id FROM "courses_statusupdate" AS U1 + WHERE U0."id" = U1."section_id") + THEN true ELSE false + END + """ + ) + + +def recompute_precomputed_fields(verbose=False): + """ + Recomputes the following precomputed fields: + - Course.num_activities + - Section.num_meetings + - Section.has_reviews + - Section.has_status_updates + + :param verbose: Set to True if you want this script to print its status as it goes, + or keep as False (default) if you want the script to work silently. + """ + if verbose: + print("Recomputing precomputed fields...") + + if verbose: + print("\tRecomputing Course.num_activities") + recompute_num_activities() + if verbose: + print("\tRecomputing Section.num_meetings") + recompute_meeting_count() + if verbose: + print("\tRecomputing Section.has_reviews") + recompute_has_reviews() + if verbose: + print("\tRecomputing Section.has_status_updates") + recompute_has_status_updates() + + if verbose: + print("Done recomputing precomputed fields.") + + +def deduplicate_status_updates(semesters=None, verbose=False, semesters_precomputed=False): + """ + Removes duplicate/redundant status updates from the specified semesters. + + :param semesters: The semesters argument should be a comma-separated list of string semesters + corresponding to the semesters for which you want to remove duplicate/redundant + status updates, i.e. "2019C,2020A,2020C" for fall 2019, spring 2020, and fall 2020. + It defaults to None, in which case only the current semester is used. If you supply + the string "all", it will deduplicate for all semesters found in Courses in the db. + If semesters_precomputed is set to True (non-default), then this argument should + instead be a list of single string semesters. + :param semesters_precomputed: If False (default), the semesters argument will expect a raw + comma-separated string input. If True, the semesters argument will expect a list of + individual string semesters. + :param verbose: Set to True if you want this script to print its status as it goes, + or keep as False (default) if you want the script to work silently. + """ + + semesters = ( + semesters if semesters_precomputed else get_semesters(semesters=semesters, verbose=verbose) + ) + + if verbose: + print(f"Deduplicating status updates for semesters {str(semesters)}...") + + for semester_num, semester in enumerate(semesters): + with transaction.atomic(): + # We make this command an atomic transaction, so that the database will not + # be modified unless the entire update for a semester succeeds. + + if verbose: + print(f"\nProcessing semester {semester}, " f"{(semester_num+1)}/{len(semesters)}.") + + num_removed = 0 + for section_id in tqdm( + Section.objects.filter(course__semester=semester).values_list("id", flat=True), + disable=(not verbose), + ): + last_update = None + ids_to_remove = [] # IDs of redundant status updates to remove + + for update in StatusUpdate.objects.filter(section_id=section_id).order_by( + "created_at" + ): + if ( + last_update + and last_update.old_status == update.old_status + and last_update.new_status == update.new_status + ): + ids_to_remove.append(update.id) + continue + last_update = update + + num_removed += len(ids_to_remove) + StatusUpdate.objects.filter(id__in=ids_to_remove).delete() + print( + f"Removed {num_removed} duplicate status update objects from semester {semester}." + ) + + if verbose: + print(f"Finished deduplicating status updates for semesters {str(semesters)}.") + + +def recompute_percent_open(semesters=None, verbose=False, semesters_precomputed=False): + """ + Recomputes the percent_open field for each section in the given semester(s). + + :param semesters: The semesters argument should be a comma-separated list of string semesters + corresponding to the semesters for which you want to recompute percent_open fields, + i.e. "2019C,2020A,2020C" for fall 2019, spring 2020, and fall 2020. It defaults to None, + in which case only the current semester is used. If you supply the string "all", + it will recompute for all semesters found in Courses in the db. + If semesters_precomputed is set to True (non-default), then this argument should + instead be a list of single string semesters. + :param semesters_precomputed: If False (default), the semesters argument will expect a raw + comma-separated string input. If True, the semesters argument will expect a list of + individual string semesters. + :param verbose: Set to True if you want this script to print its status as it goes, + or keep as False (default) if you want the script to work silently. + """ + + current_semester = get_current_semester() + semesters = ( + semesters if semesters_precomputed else get_semesters(semesters=semesters, verbose=verbose) + ) + + if verbose: + print(f"Recomputing open percentages for semesters {str(semesters)}...") + + for semester_num, semester in enumerate(semesters): + with transaction.atomic(): + # We make this command an atomic transaction, so that the database will not + # be modified unless the entire update for a semester succeeds. + + if verbose: + print(f"\nProcessing semester {semester}, " f"{(semester_num+1)}/{len(semesters)}.") + + add_drop = get_or_create_add_drop_period(semester) + add_drop_start = add_drop.estimated_start + add_drop_end = add_drop.estimated_end + + StatusUpdate.objects.filter(section__course__semester=semester).select_for_update() + + sections = Section.objects.filter(course__semester=semester) + num_erroneous_updates = 0 + num_total_updates = 0 + for section in sections: + status_updates = StatusUpdate.objects.filter( + section=section, created_at__gt=add_drop_start, created_at__lt=add_drop_end + ).order_by("created_at") + num_total_updates += len(status_updates) + total_open_seconds = 0 + if not status_updates.exists(): + try: + guess_status = ( + StatusUpdate.objects.filter( + section=section, created_at__lte=add_drop_start + ) + .latest("created_at") + .new_status + ) + except StatusUpdate.DoesNotExist: + guess_status = section.status + section.percent_open = float(guess_status == "O") + else: + last_dt = add_drop_start + last_status = status_updates.first().old_status + for update in status_updates: + if last_status != update.old_status: + num_erroneous_updates += 1 + if last_status == "O" and update.new_status != "O": + total_open_seconds += (update.created_at - last_dt).total_seconds() + last_dt = update.created_at + last_status = update.new_status + section.percent_open = float(total_open_seconds) / float( + (status_updates.last().created_at - add_drop_start).total_seconds() + ) + if section.semester != current_semester: + section.percent_open = float( + total_open_seconds + + int(last_status == "O") * (add_drop_end - last_dt).total_seconds() + ) / float((add_drop_end - add_drop_start).total_seconds()) + section.save() + if verbose: + print( + f"Finished calculating percent_open for {len(sections)} sections from " + f"semester {semester}, encountered {num_erroneous_updates} erroneous " + f"Status Updates (out of {num_total_updates} total Status Updates)" + ) + if verbose: + print(f"Finished recomputing open percentages for semesters {str(semesters)}.") + + +def recompute_registration_volumes(semesters=None, semesters_precomputed=False, verbose=False): + """ + Recomputes the registration_volume fields for all sections in the given semester(s). + + :param semesters: The semesters argument should be a comma-separated list of string semesters + corresponding to the semesters for which you want to recompute demand distribution + estimate, i.e. "2019C,2020A,2020C" for fall 2019, spring 2020, and fall 2020. It + defaults to None, in which case only the current semester is used. If you supply the + string "all", it will recompute for all semesters found in Courses in the db. + If semesters_precomputed is set to True (non-default), then this argument should + instead be a list of single string semesters. + :param semesters_precomputed: If False (default), the semesters argument will expect a raw + comma-separated string input. If True, the semesters argument will expect a list of + individual string semesters. + :param verbose: Set to True if you want this script to print its status as it goes, + or keep as False (default) if you want the script to work silently. + """ + + semesters = ( + semesters if semesters_precomputed else get_semesters(semesters=semesters, verbose=verbose) + ) + + if verbose: + print(f"Computing most recent registration volumes for semesters {semesters} ...") + with transaction.atomic(): + Section.objects.filter(course__semester__in=semesters).select_for_update().update( + registration_volume=Coalesce( + Subquery( + Registration.objects.filter( + section__id=OuterRef("id"), **Registration.is_active_filter() + ) + .annotate(common=Value(1)) + .values("common") + .annotate(count=Count("*")) + .values("count")[:1], + ), + Value(0), + ) + ) + + +def recompute_demand_distribution_estimates( + semesters=None, semesters_precomputed=False, verbose=False +): + """ + This script recomputes all PcaDemandDistributionEstimate objects for the given semester(s) + based on saved Registration objects. In doing so, it also recomputes the registration_volume + and percent_open fields for all sections in the given semester(s) + (by calling recompute_registration_volumes and recompute_percent_open). + + :param semesters: The semesters argument should be a comma-separated list of string semesters + corresponding to the semesters for which you want to recompute demand distribution + estimate, i.e. "2019C,2020A,2020C" for fall 2019, spring 2020, and fall 2020. It + defaults to None, in which case only the current semester is used. If you supply the + string "all", it will recompute for all semesters found in Courses in the db. + If semesters_precomputed is set to True (non-default), then this argument should + instead be a list of single string semesters. + :param semesters_precomputed: If False (default), the semesters argument will expect a raw + comma-separated string input. If True, the semesters argument will expect a list of + individual string semesters. + :param verbose: Set to True if you want this script to print its status as it goes, + or keep as False (default) if you want the script to work silently. + """ + + current_semester = get_current_semester() + semesters = ( + semesters if semesters_precomputed else get_semesters(semesters=semesters, verbose=verbose) + ) + + recompute_precomputed_fields(verbose=verbose) + recompute_registration_volumes(semesters=semesters, semesters_precomputed=True, verbose=verbose) + recompute_percent_open(semesters=semesters, semesters_precomputed=True, verbose=verbose) + + if verbose: + print(f"Recomputing demand distribution estimates for semesters {str(semesters)}...") + for semester_num, semester in enumerate(semesters): + try: + validate_add_drop_semester(semester) + except ValidationError: + if verbose: + print(f"Skipping semester {semester} (unsupported kind for stats).") + continue + add_drop_period = get_or_create_add_drop_period(semester) + set_cache = semester == current_semester + + with transaction.atomic(): + # We make this command an atomic transaction, so that the database will not + # be modified unless the entire update for a semester succeeds. + # If set_cache is True, we will set the current_demand_distribution_estimate variable + # in cache + + if verbose: + print(f"Processing semester {semester}, " f"{(semester_num+1)}/{len(semesters)}.\n") + print( + "Deleting existing PcaDemandDistributionEstimate objects for semester " + f"{semester} (so that we can recompute these objects)..." + ) + PcaDemandDistributionEstimate.objects.filter( + semester=semester + ).select_for_update().delete() + + section_id_to_object = dict() # maps section id to section object (for this semester) + volume_changes_map = dict() # maps section id to list of volume changes + status_updates_map = dict() # maps section id to list of status updates + + iterator_wrapper = tqdm if verbose else (lambda x: x) + if verbose: + print("Indexing relevant sections...") + for section in iterator_wrapper( + Section.objects.filter(extra_metrics_section_filters, course__semester=semester) + .annotate( + efficient_semester=F("course__semester"), + ) + .distinct() + ): + section_id_to_object[section.id] = section + volume_changes_map[section.id] = [] + status_updates_map[section.id] = [] + + if verbose: + print("Computing registration volume changes over time for each section...") + for registration in iterator_wrapper( + Registration.objects.filter(section_id__in=section_id_to_object.keys()) + .annotate(section_capacity=F("section__capacity")) + .select_for_update() + ): + section_id = registration.section_id + volume_changes_map[section_id].append( + {"date": registration.created_at, "volume_change": 1} + ) + deactivated_at = registration.deactivated_at + if deactivated_at is not None: + volume_changes_map[section_id].append( + {"date": deactivated_at, "volume_change": -1} + ) + + if verbose: + print("Collecting status updates over time for each section...") + for status_update in iterator_wrapper( + StatusUpdate.objects.filter( + section_id__in=section_id_to_object.keys(), in_add_drop_period=True + ).select_for_update() + ): + section_id = status_update.section_id + status_updates_map[section_id].append( + { + "date": status_update.created_at, + "old_status": status_update.old_status, + "new_status": status_update.new_status, + } + ) + + if verbose: + print("Joining updates for each section and sorting...") + all_changes = sorted( + [ + {"type": "status_update", "section_id": section_id, **update} + for section_id, status_updates_list in status_updates_map.items() + for update in status_updates_list + ] + + [ + {"type": "volume_change", "section_id": section_id, **change} + for section_id, changes_list in volume_changes_map.items() + for change in changes_list + ], + key=lambda x: (x["date"], int(x["type"] != "status_update")), + # put status updates first on matching dates + ) + + # Initialize variables to be maintained in our main all_changes loop + latest_popularity_dist_estimate = None + registration_volumes = {section_id: 0 for section_id in section_id_to_object.keys()} + demands = {section_id: 0 for section_id in section_id_to_object.keys()} + + # Initialize section statuses + section_status = {section_id: None for section_id in section_id_to_object.keys()} + for change in all_changes: + section_id = change["section_id"] + if change["type"] == "status_update": + if section_status[section_id] is None: + section_status[section_id] = change["old_status"] + + percent_through = ( + add_drop_period.get_percent_through_add_drop(timezone.now()) + if semester == current_semester + else 1 + ) + if percent_through == 0: + if verbose: + print( + f"Skipping semester {semester} because the add/drop period " + f"hasn't started yet." + ) + continue + distribution_estimate_threshold = sum( + len(changes_list) for changes_list in volume_changes_map.values() + ) // (ROUGH_MINIMUM_DEMAND_DISTRIBUTION_ESTIMATES * percent_through) + num_changes_without_estimate = 0 + + if verbose: + print(f"Creating PcaDemandDistributionEstimate objects for semester {semester}...") + for change in iterator_wrapper(all_changes): + section_id = change["section_id"] + + if section_status[section_id] is None: + section_status[section_id] = ( + "O" if section_id_to_object[section_id].percent_open > 0.5 else "C" + ) + if change["type"] == "status_update": + section_status[section_id] = change["new_status"] + continue + + date = change["date"] + volume_change = change["volume_change"] + registration_volumes[section_id] += volume_change + demands[section_id] = ( + registration_volumes[section_id] / section_id_to_object[section_id].capacity + ) + + max_id = max(demands.keys(), key=lambda x: demands[x]) + min_id = min(demands.keys(), key=lambda x: demands[x]) + if ( + latest_popularity_dist_estimate is None + or section_id == latest_popularity_dist_estimate.highest_demand_section_id + or section_id == latest_popularity_dist_estimate.lowest_demand_section_id + or latest_popularity_dist_estimate.highest_demand_section_id != max_id + or latest_popularity_dist_estimate.lowest_demand_section_id != min_id + or num_changes_without_estimate >= distribution_estimate_threshold + ): + num_changes_without_estimate = 0 + closed_sections_demand_values = np.asarray( + [val for sec_id, val in demands.items() if section_status[sec_id] == "C"] + ) + csrdv_frac_zero, fit_shape, fit_loc, fit_scale = (None, None, None, None) + if len(closed_sections_demand_values) > 0: + closed_sections_positive_demand_values = closed_sections_demand_values[ + np.where(closed_sections_demand_values > 0) + ] + csrdv_frac_zero = 1 - len(closed_sections_positive_demand_values) / len( + closed_sections_demand_values + ) + if len(closed_sections_positive_demand_values) > 0: + fit_shape, fit_loc, fit_scale = stats.lognorm.fit( + closed_sections_positive_demand_values + ) + + latest_popularity_dist_estimate = PcaDemandDistributionEstimate( + created_at=date, + semester=semester, + highest_demand_section=section_id_to_object[max_id], + highest_demand_section_volume=registration_volumes[max_id], + lowest_demand_section=section_id_to_object[min_id], + lowest_demand_section_volume=registration_volumes[min_id], + csrdv_frac_zero=csrdv_frac_zero, + csprdv_lognorm_param_shape=fit_shape, + csprdv_lognorm_param_loc=fit_loc, + csprdv_lognorm_param_scale=fit_scale, + ) + latest_popularity_dist_estimate.save(add_drop_period=add_drop_period) + latest_popularity_dist_estimate.created_at = date + latest_popularity_dist_estimate.save(add_drop_period=add_drop_period) + else: + num_changes_without_estimate += 1 + + if set_cache: + if latest_popularity_dist_estimate is not None: + cache.set( + "current_demand_distribution_estimate", + latest_popularity_dist_estimate, + timeout=None, + ) + else: + cache.set("current_demand_distribution_estimate", None, timeout=None) + + if verbose: + print( + "Finished recomputing demand distribution estimate and section registration_volume " + f"fields for semesters {str(semesters)}." + ) + + +def garbage_collect_topics(): + """ + Deletes topics with no courses. + """ + Topic.objects.filter( + ~Q(id__in=Subquery(Topic.objects.filter(courses__isnull=False).values("id"))), + ).delete() + + +def recompute_stats(semesters=None, semesters_precomputed=False, verbose=False): + """ + Recomputes PCA demand distribution estimates, as well as the registration_volume + and percent_open fields for all sections in the given semester(s). Deduplicates + status updates saved to the database. + """ + if not semesters_precomputed: + semesters = get_semesters(semesters=semesters, verbose=verbose) + semesters = fill_in_add_drop_periods(verbose=verbose).intersection(semesters) + garbage_collect_topics() + load_add_drop_dates(verbose=verbose) + deduplicate_status_updates(semesters=semesters, semesters_precomputed=True, verbose=verbose) + recompute_demand_distribution_estimates( + semesters=semesters, semesters_precomputed=True, verbose=verbose + ) + + +class Command(BaseCommand): + help = ( + "Recomputes PCA demand distribution estimates, as well as the registration_volume " + "and percent_open fields for all sections in the given semester(s)." + ) + + def add_arguments(self, parser): + parser.add_argument( + "--semesters", + type=str, + help=dedent( + """ + The semesters argument should be a comma-separated list of semesters + corresponding to the semesters for which you want to recompute stats, + i.e. "2019C,2020A,2020C" for fall 2019, spring 2020, and fall 2020. If this argument + is omitted, stats are only recomputed for the current semester. + If you pass "all" to this argument, this script will recompute stats for + all semesters found in Courses in the db. + """ + ), + nargs="?", + default=None, + ) + + def handle(self, *args, **kwargs): + root_logger = logging.getLogger("") + root_logger.setLevel(logging.DEBUG) + + recompute_stats(semesters=kwargs["semesters"], verbose=True) diff --git a/backend/alert/management/commands/webhookbackup.py b/backend/alert/management/commands/webhookbackup.py index 2e9c473d8..b8d5fc15c 100644 --- a/backend/alert/management/commands/webhookbackup.py +++ b/backend/alert/management/commands/webhookbackup.py @@ -1,78 +1,78 @@ -import logging - -from django.core.management.base import BaseCommand -from tqdm import tqdm - -from alert.models import Course, Section -from alert.util import should_send_pca_alert -from alert.views import alert_for_course -from courses import registrar -from courses.util import get_course_and_section, get_current_semester - - -class Command(BaseCommand): - help = "Load course status for courses in the DB" - - def add_arguments(self, parser): - parser.add_argument("--semester", nargs="?", type=str) - - def handle(self, *args, **kwargs): - root_logger = logging.getLogger("") - root_logger.setLevel(logging.DEBUG) - - semester = get_current_semester() - statuses = registrar.get_all_course_status(semester) - stats = { - "missing_data": 0, - "section_not_found": 0, - "duplicate_updates": 0, - "sent": 0, - "parse_error": 0, - "error": 0, - "skipped": 0, - } - for status in tqdm(statuses): - data = status - section_code = data.get("section_id_normalized") - if section_code is None: - stats["missing_data"] += 1 - continue - - course_status = data.get("status") - if course_status is None: - stats["missing_data"] += 1 - continue - - course_term = data.get("term") - if course_term is None: - stats["missing_data"] += 1 - continue - - # Ignore sections not in db - try: - _, section = get_course_and_section(section_code, semester) - except (Section.DoesNotExist, Course.DoesNotExist): - stats["section_not_found"] += 1 - continue - - # Ignore duplicate updates - last_status_update = section.last_status_update - if last_status_update and last_status_update.new_status == course_status: - stats["duplicate_updates"] += 1 - continue - - if should_send_pca_alert(course_term, course_status): - try: - alert_for_course( - section_code, - semester=course_term, - sent_by="WEB", - course_status=course_status, - ) - stats["sent"] += 1 - except ValueError: - stats["parse_error"] += 1 - else: - stats["skipped"] += 1 - - print(stats) +import logging + +from django.core.management.base import BaseCommand +from tqdm import tqdm + +from alert.models import Course, Section +from alert.util import should_send_pca_alert +from alert.views import alert_for_course +from courses import registrar +from courses.util import get_course_and_section, get_current_semester + + +class Command(BaseCommand): + help = "Load course status for courses in the DB" + + def add_arguments(self, parser): + parser.add_argument("--semester", nargs="?", type=str) + + def handle(self, *args, **kwargs): + root_logger = logging.getLogger("") + root_logger.setLevel(logging.DEBUG) + + semester = get_current_semester() + statuses = registrar.get_all_course_status(semester) + stats = { + "missing_data": 0, + "section_not_found": 0, + "duplicate_updates": 0, + "sent": 0, + "parse_error": 0, + "error": 0, + "skipped": 0, + } + for status in tqdm(statuses): + data = status + section_code = data.get("section_id_normalized") + if section_code is None: + stats["missing_data"] += 1 + continue + + course_status = data.get("status") + if course_status is None: + stats["missing_data"] += 1 + continue + + course_term = data.get("term") + if course_term is None: + stats["missing_data"] += 1 + continue + + # Ignore sections not in db + try: + _, section = get_course_and_section(section_code, semester) + except (Section.DoesNotExist, Course.DoesNotExist): + stats["section_not_found"] += 1 + continue + + # Ignore duplicate updates + last_status_update = section.last_status_update + if last_status_update and last_status_update.new_status == course_status: + stats["duplicate_updates"] += 1 + continue + + if should_send_pca_alert(course_term, course_status): + try: + alert_for_course( + section_code, + semester=course_term, + sent_by="WEB", + course_status=course_status, + ) + stats["sent"] += 1 + except ValueError: + stats["parse_error"] += 1 + else: + stats["skipped"] += 1 + + print(stats) diff --git a/backend/alert/migrations/0001_initial.py b/backend/alert/migrations/0001_initial.py index cc9c21f65..541e8bf9e 100644 --- a/backend/alert/migrations/0001_initial.py +++ b/backend/alert/migrations/0001_initial.py @@ -1,64 +1,64 @@ -# Generated by Django 2.2 on 2019-04-14 18:01 - -import django.db.models.deletion -from django.db import migrations, models - - -class Migration(migrations.Migration): - - initial = True - - dependencies = [ - ("courses", "0001_initial"), - ] - - operations = [ - migrations.CreateModel( - name="Registration", - fields=[ - ( - "id", - models.AutoField( - auto_created=True, primary_key=True, serialize=False, verbose_name="ID" - ), - ), - ("created_at", models.DateTimeField(auto_now_add=True)), - ("updated_at", models.DateTimeField(auto_now=True)), - ("email", models.EmailField(blank=True, max_length=254, null=True)), - ("phone", models.CharField(blank=True, max_length=100, null=True)), - ("notification_sent", models.BooleanField(default=False)), - ("notification_sent_at", models.DateTimeField(blank=True, null=True)), - ( - "notification_sent_by", - models.CharField( - blank=True, - choices=[ - ("", "Unsent"), - ("LEG", "[Legacy] Sequence of course API requests"), - ("WEB", "Webhook"), - ("SERV", "Course Status Service"), - ("ADM", "Admin Interface"), - ], - default="", - max_length=16, - ), - ), - ( - "resubscribed_from", - models.OneToOneField( - blank=True, - null=True, - on_delete=django.db.models.deletion.SET_NULL, - related_name="resubscribed_to", - to="alert.Registration", - ), - ), - ( - "section", - models.ForeignKey( - on_delete=django.db.models.deletion.CASCADE, to="courses.Section" - ), - ), - ], - ), - ] +# Generated by Django 2.2 on 2019-04-14 18:01 + +import django.db.models.deletion +from django.db import migrations, models + + +class Migration(migrations.Migration): + + initial = True + + dependencies = [ + ("courses", "0001_initial"), + ] + + operations = [ + migrations.CreateModel( + name="Registration", + fields=[ + ( + "id", + models.AutoField( + auto_created=True, primary_key=True, serialize=False, verbose_name="ID" + ), + ), + ("created_at", models.DateTimeField(auto_now_add=True)), + ("updated_at", models.DateTimeField(auto_now=True)), + ("email", models.EmailField(blank=True, max_length=254, null=True)), + ("phone", models.CharField(blank=True, max_length=100, null=True)), + ("notification_sent", models.BooleanField(default=False)), + ("notification_sent_at", models.DateTimeField(blank=True, null=True)), + ( + "notification_sent_by", + models.CharField( + blank=True, + choices=[ + ("", "Unsent"), + ("LEG", "[Legacy] Sequence of course API requests"), + ("WEB", "Webhook"), + ("SERV", "Course Status Service"), + ("ADM", "Admin Interface"), + ], + default="", + max_length=16, + ), + ), + ( + "resubscribed_from", + models.OneToOneField( + blank=True, + null=True, + on_delete=django.db.models.deletion.SET_NULL, + related_name="resubscribed_to", + to="alert.Registration", + ), + ), + ( + "section", + models.ForeignKey( + on_delete=django.db.models.deletion.CASCADE, to="courses.Section" + ), + ), + ], + ), + ] diff --git a/backend/alert/migrations/0002_delete_registration.py b/backend/alert/migrations/0002_delete_registration.py index add525226..5ce819dd6 100644 --- a/backend/alert/migrations/0002_delete_registration.py +++ b/backend/alert/migrations/0002_delete_registration.py @@ -1,16 +1,16 @@ -# Generated by Django 2.2 on 2019-05-08 12:43 - -from django.db import migrations - - -class Migration(migrations.Migration): - - dependencies = [ - ("alert", "0001_initial"), - ] - - operations = [ - migrations.DeleteModel( - name="Registration", - ), - ] +# Generated by Django 2.2 on 2019-05-08 12:43 + +from django.db import migrations + + +class Migration(migrations.Migration): + + dependencies = [ + ("alert", "0001_initial"), + ] + + operations = [ + migrations.DeleteModel( + name="Registration", + ), + ] diff --git a/backend/alert/migrations/0003_courseupdate_registration.py b/backend/alert/migrations/0003_courseupdate_registration.py index cc8823af7..8d60b0008 100644 --- a/backend/alert/migrations/0003_courseupdate_registration.py +++ b/backend/alert/migrations/0003_courseupdate_registration.py @@ -1,109 +1,109 @@ -# Generated by Django 2.2.1 on 2019-09-20 16:25 - -import django.db.models.deletion -from django.db import migrations, models - - -class Migration(migrations.Migration): - - initial = True - - dependencies = [ - ("courses", "0018_merge_20190526_1901"), - ("alert", "0002_delete_registration"), - ] - - operations = [ - migrations.CreateModel( - name="Registration", - fields=[ - ( - "id", - models.AutoField( - auto_created=True, primary_key=True, serialize=False, verbose_name="ID" - ), - ), - ("created_at", models.DateTimeField(auto_now_add=True)), - ("updated_at", models.DateTimeField(auto_now=True)), - ("email", models.EmailField(blank=True, max_length=254, null=True)), - ("phone", models.CharField(blank=True, max_length=100, null=True)), - ("notification_sent", models.BooleanField(default=False)), - ("notification_sent_at", models.DateTimeField(blank=True, null=True)), - ( - "notification_sent_by", - models.CharField( - blank=True, - choices=[ - ("", "Unsent"), - ("LEG", "[Legacy] Sequence of course API requests"), - ("WEB", "Webhook"), - ("SERV", "Course Status Service"), - ("ADM", "Admin Interface"), - ], - default="", - max_length=16, - ), - ), - ( - "resubscribed_from", - models.OneToOneField( - blank=True, - null=True, - on_delete=django.db.models.deletion.SET_NULL, - related_name="resubscribed_to", - to="alert.Registration", - ), - ), - ( - "section", - models.ForeignKey( - on_delete=django.db.models.deletion.CASCADE, to="courses.Section" - ), - ), - ], - ), - migrations.CreateModel( - name="CourseUpdate", - fields=[ - ( - "id", - models.AutoField( - auto_created=True, primary_key=True, serialize=False, verbose_name="ID" - ), - ), - ( - "old_status", - models.CharField( - choices=[ - ("O", "Open"), - ("C", "Closed"), - ("X", "Cancelled"), - ("", "Unlisted"), - ], - max_length=16, - ), - ), - ( - "new_status", - models.CharField( - choices=[ - ("O", "Open"), - ("C", "Closed"), - ("X", "Cancelled"), - ("", "Unlisted"), - ], - max_length=16, - ), - ), - ("created_at", models.DateTimeField(auto_now_add=True)), - ("alert_sent", models.BooleanField()), - ("request_body", models.TextField()), - ( - "section", - models.ForeignKey( - on_delete=django.db.models.deletion.CASCADE, to="courses.Section" - ), - ), - ], - ), - ] +# Generated by Django 2.2.1 on 2019-09-20 16:25 + +import django.db.models.deletion +from django.db import migrations, models + + +class Migration(migrations.Migration): + + initial = True + + dependencies = [ + ("courses", "0018_merge_20190526_1901"), + ("alert", "0002_delete_registration"), + ] + + operations = [ + migrations.CreateModel( + name="Registration", + fields=[ + ( + "id", + models.AutoField( + auto_created=True, primary_key=True, serialize=False, verbose_name="ID" + ), + ), + ("created_at", models.DateTimeField(auto_now_add=True)), + ("updated_at", models.DateTimeField(auto_now=True)), + ("email", models.EmailField(blank=True, max_length=254, null=True)), + ("phone", models.CharField(blank=True, max_length=100, null=True)), + ("notification_sent", models.BooleanField(default=False)), + ("notification_sent_at", models.DateTimeField(blank=True, null=True)), + ( + "notification_sent_by", + models.CharField( + blank=True, + choices=[ + ("", "Unsent"), + ("LEG", "[Legacy] Sequence of course API requests"), + ("WEB", "Webhook"), + ("SERV", "Course Status Service"), + ("ADM", "Admin Interface"), + ], + default="", + max_length=16, + ), + ), + ( + "resubscribed_from", + models.OneToOneField( + blank=True, + null=True, + on_delete=django.db.models.deletion.SET_NULL, + related_name="resubscribed_to", + to="alert.Registration", + ), + ), + ( + "section", + models.ForeignKey( + on_delete=django.db.models.deletion.CASCADE, to="courses.Section" + ), + ), + ], + ), + migrations.CreateModel( + name="CourseUpdate", + fields=[ + ( + "id", + models.AutoField( + auto_created=True, primary_key=True, serialize=False, verbose_name="ID" + ), + ), + ( + "old_status", + models.CharField( + choices=[ + ("O", "Open"), + ("C", "Closed"), + ("X", "Cancelled"), + ("", "Unlisted"), + ], + max_length=16, + ), + ), + ( + "new_status", + models.CharField( + choices=[ + ("O", "Open"), + ("C", "Closed"), + ("X", "Cancelled"), + ("", "Unlisted"), + ], + max_length=16, + ), + ), + ("created_at", models.DateTimeField(auto_now_add=True)), + ("alert_sent", models.BooleanField()), + ("request_body", models.TextField()), + ( + "section", + models.ForeignKey( + on_delete=django.db.models.deletion.CASCADE, to="courses.Section" + ), + ), + ], + ), + ] diff --git a/backend/alert/migrations/0004_auto_20190926_0549.py b/backend/alert/migrations/0004_auto_20190926_0549.py index d3ad82d5a..b87e8eaf4 100644 --- a/backend/alert/migrations/0004_auto_20190926_0549.py +++ b/backend/alert/migrations/0004_auto_20190926_0549.py @@ -1,41 +1,41 @@ -# Generated by Django 2.2.5 on 2019-09-26 05:49 - -import django.db.models.deletion -from django.db import migrations, models - - -class Migration(migrations.Migration): - - dependencies = [ - ("courses", "0019_apikey_apiprivilege"), - ("alert", "0003_courseupdate_registration"), - ] - - operations = [ - migrations.AddField( - model_name="registration", - name="api_key", - field=models.ForeignKey( - blank=True, - null=True, - on_delete=django.db.models.deletion.CASCADE, - to="courses.APIKey", - ), - ), - migrations.AddField( - model_name="registration", - name="source", - field=models.CharField( - choices=[ - ("PCA", "Penn Course Alert"), - ("API", "3rd Party Integration"), - ("PCP", "Penn Course Plan"), - ("PCR", "Penn Course Review"), - ("PM", "Penn Mobile"), - ], - default="PCA", - max_length=16, - ), - preserve_default=False, - ), - ] +# Generated by Django 2.2.5 on 2019-09-26 05:49 + +import django.db.models.deletion +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ("courses", "0019_apikey_apiprivilege"), + ("alert", "0003_courseupdate_registration"), + ] + + operations = [ + migrations.AddField( + model_name="registration", + name="api_key", + field=models.ForeignKey( + blank=True, + null=True, + on_delete=django.db.models.deletion.CASCADE, + to="courses.APIKey", + ), + ), + migrations.AddField( + model_name="registration", + name="source", + field=models.CharField( + choices=[ + ("PCA", "Penn Course Alert"), + ("API", "3rd Party Integration"), + ("PCP", "Penn Course Plan"), + ("PCR", "Penn Course Review"), + ("PM", "Penn Mobile"), + ], + default="PCA", + max_length=16, + ), + preserve_default=False, + ), + ] diff --git a/backend/alert/migrations/0005_delete_courseupdate.py b/backend/alert/migrations/0005_delete_courseupdate.py index b7f5a2cd4..65441b4cb 100644 --- a/backend/alert/migrations/0005_delete_courseupdate.py +++ b/backend/alert/migrations/0005_delete_courseupdate.py @@ -1,47 +1,47 @@ -# Generated by Django 2.2.5 on 2019-09-28 20:36 - -from django.db import migrations - - -def forwards(apps, schema_editor): - CourseUpdate = apps.get_model("alert", "CourseUpdate") - StatusUpdate = apps.get_model("courses", "StatusUpdate") - for old_up in CourseUpdate.objects.all(): - new_up = StatusUpdate.objects.create( - section=old_up.section, - old_status=old_up.old_status, - new_status=old_up.new_status, - created_at=old_up.created_at, - alert_sent=old_up.alert_sent, - request_body=old_up.request_body, - ) - new_up.save() - - -def backwards(apps, schema_editor): - CourseUpdate = apps.get_model("alert", "CourseUpdate") - StatusUpdate = apps.get_model("courses", "StatusUpdate") - for old_up in StatusUpdate.objects.all(): - new_up = CourseUpdate.objects.create( - section=old_up.section, - old_status=old_up.old_status, - new_status=old_up.old_status, - created_at=old_up.created_at, - alert_sent=old_up.alert_sent, - request_body=old_up.request_body, - ) - new_up.save() - - -class Migration(migrations.Migration): - - dependencies = [ - ("alert", "0004_auto_20190926_0549"), - ] - - operations = [ - migrations.RunPython(forwards, backwards), - migrations.DeleteModel( - name="CourseUpdate", - ), - ] +# Generated by Django 2.2.5 on 2019-09-28 20:36 + +from django.db import migrations + + +def forwards(apps, schema_editor): + CourseUpdate = apps.get_model("alert", "CourseUpdate") + StatusUpdate = apps.get_model("courses", "StatusUpdate") + for old_up in CourseUpdate.objects.all(): + new_up = StatusUpdate.objects.create( + section=old_up.section, + old_status=old_up.old_status, + new_status=old_up.new_status, + created_at=old_up.created_at, + alert_sent=old_up.alert_sent, + request_body=old_up.request_body, + ) + new_up.save() + + +def backwards(apps, schema_editor): + CourseUpdate = apps.get_model("alert", "CourseUpdate") + StatusUpdate = apps.get_model("courses", "StatusUpdate") + for old_up in StatusUpdate.objects.all(): + new_up = CourseUpdate.objects.create( + section=old_up.section, + old_status=old_up.old_status, + new_status=old_up.old_status, + created_at=old_up.created_at, + alert_sent=old_up.alert_sent, + request_body=old_up.request_body, + ) + new_up.save() + + +class Migration(migrations.Migration): + + dependencies = [ + ("alert", "0004_auto_20190926_0549"), + ] + + operations = [ + migrations.RunPython(forwards, backwards), + migrations.DeleteModel( + name="CourseUpdate", + ), + ] diff --git a/backend/alert/migrations/0006_auto_20191110_1357.py b/backend/alert/migrations/0006_auto_20191110_1357.py index 6c5705968..db7fe3b59 100644 --- a/backend/alert/migrations/0006_auto_20191110_1357.py +++ b/backend/alert/migrations/0006_auto_20191110_1357.py @@ -1,51 +1,51 @@ -# Generated by Django 2.2.5 on 2019-11-10 18:57 - -import django.db.models.deletion -from django.conf import settings -from django.db import migrations, models - - -class Migration(migrations.Migration): - - dependencies = [ - migrations.swappable_dependency(settings.AUTH_USER_MODEL), - ("alert", "0005_delete_courseupdate"), - ] - - operations = [ - migrations.AddField( - model_name="registration", - name="auto_mute", - field=models.BooleanField(default=True), - ), - migrations.AddField( - model_name="registration", - name="deleted", - field=models.BooleanField(default=False), - ), - migrations.AddField( - model_name="registration", - name="deleted_at", - field=models.DateTimeField(blank=True, null=True), - ), - migrations.AddField( - model_name="registration", - name="muted", - field=models.BooleanField(default=False), - ), - migrations.AddField( - model_name="registration", - name="muted_at", - field=models.DateTimeField(blank=True, null=True), - ), - migrations.AddField( - model_name="registration", - name="user", - field=models.ForeignKey( - blank=True, - null=True, - on_delete=django.db.models.deletion.CASCADE, - to=settings.AUTH_USER_MODEL, - ), - ), - ] +# Generated by Django 2.2.5 on 2019-11-10 18:57 + +import django.db.models.deletion +from django.conf import settings +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + migrations.swappable_dependency(settings.AUTH_USER_MODEL), + ("alert", "0005_delete_courseupdate"), + ] + + operations = [ + migrations.AddField( + model_name="registration", + name="auto_mute", + field=models.BooleanField(default=True), + ), + migrations.AddField( + model_name="registration", + name="deleted", + field=models.BooleanField(default=False), + ), + migrations.AddField( + model_name="registration", + name="deleted_at", + field=models.DateTimeField(blank=True, null=True), + ), + migrations.AddField( + model_name="registration", + name="muted", + field=models.BooleanField(default=False), + ), + migrations.AddField( + model_name="registration", + name="muted_at", + field=models.DateTimeField(blank=True, null=True), + ), + migrations.AddField( + model_name="registration", + name="user", + field=models.ForeignKey( + blank=True, + null=True, + on_delete=django.db.models.deletion.CASCADE, + to=settings.AUTH_USER_MODEL, + ), + ), + ] diff --git a/backend/alert/migrations/0007_auto_20200131_1619.py b/backend/alert/migrations/0007_auto_20200131_1619.py index f2507c1a6..63f77c1b5 100644 --- a/backend/alert/migrations/0007_auto_20200131_1619.py +++ b/backend/alert/migrations/0007_auto_20200131_1619.py @@ -1,26 +1,26 @@ -# Generated by Django 2.2.9 on 2020-01-31 21:19 - -from django.db import migrations - - -class Migration(migrations.Migration): - - dependencies = [ - ("alert", "0006_auto_20191110_1357"), - ] - - operations = [ - migrations.RenameField( - model_name="registration", - old_name="muted", - new_name="auto_resubscribe", - ), - migrations.RemoveField( - model_name="registration", - name="auto_mute", - ), - migrations.RemoveField( - model_name="registration", - name="muted_at", - ), - ] +# Generated by Django 2.2.9 on 2020-01-31 21:19 + +from django.db import migrations + + +class Migration(migrations.Migration): + + dependencies = [ + ("alert", "0006_auto_20191110_1357"), + ] + + operations = [ + migrations.RenameField( + model_name="registration", + old_name="muted", + new_name="auto_resubscribe", + ), + migrations.RemoveField( + model_name="registration", + name="auto_mute", + ), + migrations.RemoveField( + model_name="registration", + name="muted_at", + ), + ] diff --git a/backend/alert/migrations/0008_registration_original_created_at.py b/backend/alert/migrations/0008_registration_original_created_at.py index a8e89f084..05fdc775f 100644 --- a/backend/alert/migrations/0008_registration_original_created_at.py +++ b/backend/alert/migrations/0008_registration_original_created_at.py @@ -1,18 +1,18 @@ -# Generated by Django 2.2.9 on 2020-02-16 07:40 - -from django.db import migrations, models - - -class Migration(migrations.Migration): - - dependencies = [ - ("alert", "0007_auto_20200131_1619"), - ] - - operations = [ - migrations.AddField( - model_name="registration", - name="original_created_at", - field=models.DateTimeField(null=True), - ), - ] +# Generated by Django 2.2.9 on 2020-02-16 07:40 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ("alert", "0007_auto_20200131_1619"), + ] + + operations = [ + migrations.AddField( + model_name="registration", + name="original_created_at", + field=models.DateTimeField(null=True), + ), + ] diff --git a/backend/alert/migrations/0009_auto_20200419_2112.py b/backend/alert/migrations/0009_auto_20200419_2112.py index 3ec70750b..b73ec1290 100644 --- a/backend/alert/migrations/0009_auto_20200419_2112.py +++ b/backend/alert/migrations/0009_auto_20200419_2112.py @@ -1,23 +1,23 @@ -# Generated by Django 2.2.11 on 2020-04-20 01:12 - -from django.db import migrations, models - - -class Migration(migrations.Migration): - - dependencies = [ - ("alert", "0008_registration_original_created_at"), - ] - - operations = [ - migrations.AddField( - model_name="registration", - name="cancelled", - field=models.BooleanField(default=False), - ), - migrations.AddField( - model_name="registration", - name="cancelled_at", - field=models.DateTimeField(blank=True, null=True), - ), - ] +# Generated by Django 2.2.11 on 2020-04-20 01:12 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ("alert", "0008_registration_original_created_at"), + ] + + operations = [ + migrations.AddField( + model_name="registration", + name="cancelled", + field=models.BooleanField(default=False), + ), + migrations.AddField( + model_name="registration", + name="cancelled_at", + field=models.DateTimeField(blank=True, null=True), + ), + ] diff --git a/backend/alert/migrations/0010_auto_20201002_0714.py b/backend/alert/migrations/0010_auto_20201002_0714.py index a9f1a5b86..4294a1efe 100644 --- a/backend/alert/migrations/0010_auto_20201002_0714.py +++ b/backend/alert/migrations/0010_auto_20201002_0714.py @@ -1,194 +1,194 @@ -# Generated by Django 3.1.1 on 2020-10-02 11:14 - -import django.db.models.deletion -from django.conf import settings -from django.db import migrations, models - - -class Migration(migrations.Migration): - - dependencies = [ - ("courses", "0030_auto_20201002_0714"), - migrations.swappable_dependency(settings.AUTH_USER_MODEL), - ("alert", "0009_auto_20200419_2112"), - ] - - operations = [ - migrations.AlterField( - model_name="registration", - name="api_key", - field=models.ForeignKey( - blank=True, - help_text="\nAn API key for 3rd party alternatives to PCA. This is currently unused now that\nPenn Course Notify has fallen, but may be used in the future.\n", - null=True, - on_delete=django.db.models.deletion.CASCADE, - to="courses.apikey", - ), - ), - migrations.AlterField( - model_name="registration", - name="auto_resubscribe", - field=models.BooleanField( - default=False, - help_text="\nDefaults to False, in which case a registration will not be automatically resubscribed\nafter it triggers an alert to be sent (but the user can still resubscribe to a sent alert,\nas long as it is not deleted). If set to True, the registration will be automatically\nresubscribed to once it triggers an alert to be sent (this is useful in the case of\nvolatile sections which are opening and closing frequently, often before the user has\ntime to register).\n", - ), - ), - migrations.AlterField( - model_name="registration", - name="cancelled", - field=models.BooleanField( - default=False, - help_text="\nDefaults to False, changed to True if the registration has been cancelled. A cancelled\nregistration will not trigger any alerts to be sent even if the relevant section opens.\nA cancelled section can be resubscribed to (unlike deleted alerts), and will show up\non the manage alerts page on the frontend (also unlike deleted alerts). Note that once\na registration is cancelled, it cannot be uncancelled (resubscribing creates a new\nregistration which is accessible via the resubscribed_to field, related name of\nresubscribed_from).\n", - ), - ), - migrations.AlterField( - model_name="registration", - name="cancelled_at", - field=models.DateTimeField( - blank=True, - help_text="When was the registration cancelled? Null if it hasn't been cancelled.", - null=True, - ), - ), - migrations.AlterField( - model_name="registration", - name="created_at", - field=models.DateTimeField( - auto_now_add=True, help_text="The datetime at which this registration was created." - ), - ), - migrations.AlterField( - model_name="registration", - name="deleted", - field=models.BooleanField( - default=False, - help_text="\nDefaults to False, changed to True if the registration has been deleted. A deleted\nregistration will not trigger any alerts to be sent even if the relevant section opens.\nA deleted section cannot be resubscribed to or undeleted, and will not show up on the\nmanage alerts page on the frontend. It is kept in the database for analytics purposes,\neven though it serves no immediate functional purpose for its original user.\n", - ), - ), - migrations.AlterField( - model_name="registration", - name="deleted_at", - field=models.DateTimeField( - blank=True, - help_text="When was the registration deleted? Null if it hasn't been deleted.", - null=True, - ), - ), - migrations.AlterField( - model_name="registration", - name="email", - field=models.EmailField( - blank=True, - help_text="\nA legacy field that stored the user's email before the Spring 2020 PCA refresh. Currently,\nfor all new registrations the email and phone fields will be None and contact information\ncan be found in the User's UserProfile object (related_name is profile, so you can\naccess the profile from the User object with `.user.profile`).\n", - max_length=254, - null=True, - ), - ), - migrations.AlterField( - model_name="registration", - name="notification_sent", - field=models.BooleanField( - default=False, - help_text="True if an alert has been sent to the user, false otherwise.", - ), - ), - migrations.AlterField( - model_name="registration", - name="notification_sent_at", - field=models.DateTimeField( - blank=True, - help_text="\nWhen was an alert sent to the user as a result of this registration?\nNull if an alert was not sent.\n", - null=True, - ), - ), - migrations.AlterField( - model_name="registration", - name="notification_sent_by", - field=models.CharField( - blank=True, - choices=[ - ("", "Unsent"), - ("LEG", "[Legacy] Sequence of course API requests"), - ("WEB", "Webhook"), - ("SERV", "Course Status Service"), - ("ADM", "Admin Interface"), - ], - default="", - help_text='What triggered the alert to be sent? Options and meanings:
"""Unsent"
"LEG""[Legacy] Sequence of course API requests"
"WEB""Webhook"
"SERV""Course Status Service"
"ADM""Admin Interface"
', - max_length=16, - ), - ), - migrations.AlterField( - model_name="registration", - name="original_created_at", - field=models.DateTimeField( - help_text="\nThe datetime at which the tail of the resubscribe chain to which this registration belongs\nwas created. In other words, the datetime at which the user created the original\nregistration for this section, before resubscribing some number of times\n(0 or more) to reach this registration.\n", - null=True, - ), - ), - migrations.AlterField( - model_name="registration", - name="phone", - field=models.CharField( - blank=True, - help_text="\nA legacy field that stored the user's phone before the Spring 2020 PCA refresh. Currently,\nfor all new registrations the email and phone fields will be None and contact information\ncan be found in the User's UserProfile object (related_name is profile, so you can\naccess the profile from the User object with `.user.profile`).\n", - max_length=100, - null=True, - ), - ), - migrations.AlterField( - model_name="registration", - name="resubscribed_from", - field=models.OneToOneField( - blank=True, - help_text="\nThe registration which was resubscribed to, triggering the creation of this registration.\nIf this registration is the original registration in its resubscribe chain (the tail),\nthis field is null. The related field, 'resubscribed_to' only exists as an attribute of\na Registration object if the registration has been resubscribed. In that case,\nthe field resubscribed_to will point to the next element in the resubscribe chain.\nIf the field does not exist, this registration is the head of its resubscribe chain\n(note that an element can be both the head and the tail of its resubscribe chain,\nin which case it is the only element in its resubscribe chain).\n", - null=True, - on_delete=django.db.models.deletion.SET_NULL, - related_name="resubscribed_to", - to="alert.registration", - ), - ), - migrations.AlterField( - model_name="registration", - name="section", - field=models.ForeignKey( - help_text="The section that the user registered to be notified about.", - on_delete=django.db.models.deletion.CASCADE, - to="courses.section", - ), - ), - migrations.AlterField( - model_name="registration", - name="source", - field=models.CharField( - choices=[ - ("PCA", "Penn Course Alert"), - ("API", "3rd Party Integration"), - ("PCP", "Penn Course Plan"), - ("PCR", "Penn Course Review"), - ("PM", "Penn Mobile"), - ], - help_text='Where did the registration come from? Options and meanings:
"PCA""Penn Course Alert"
"API""3rd Party Integration"
"PCP""Penn Course Plan"
"PCR""Penn Course Review"
"PM""Penn Mobile"
', - max_length=16, - ), - ), - migrations.AlterField( - model_name="registration", - name="updated_at", - field=models.DateTimeField( - auto_now=True, - help_text="The datetime at which this registration was last modified.", - ), - ), - migrations.AlterField( - model_name="registration", - name="user", - field=models.ForeignKey( - blank=True, - help_text="\nThe User that registered for this alert. This object will be none if registration occurred\nbefore the PCA refresh of Spring 2020 (before the refresh user's were only identified by\ntheir email and phone numbers, which are legacy fields in this model now). This object\nmight also be none if registration occurred through a 3rd part API such as Penn Course\nNotify (now that Notify has fallen this is an unlikely event).\n", - null=True, - on_delete=django.db.models.deletion.CASCADE, - to=settings.AUTH_USER_MODEL, - ), - ), - ] +# Generated by Django 3.1.1 on 2020-10-02 11:14 + +import django.db.models.deletion +from django.conf import settings +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ("courses", "0030_auto_20201002_0714"), + migrations.swappable_dependency(settings.AUTH_USER_MODEL), + ("alert", "0009_auto_20200419_2112"), + ] + + operations = [ + migrations.AlterField( + model_name="registration", + name="api_key", + field=models.ForeignKey( + blank=True, + help_text="\nAn API key for 3rd party alternatives to PCA. This is currently unused now that\nPenn Course Notify has fallen, but may be used in the future.\n", + null=True, + on_delete=django.db.models.deletion.CASCADE, + to="courses.apikey", + ), + ), + migrations.AlterField( + model_name="registration", + name="auto_resubscribe", + field=models.BooleanField( + default=False, + help_text="\nDefaults to False, in which case a registration will not be automatically resubscribed\nafter it triggers an alert to be sent (but the user can still resubscribe to a sent alert,\nas long as it is not deleted). If set to True, the registration will be automatically\nresubscribed to once it triggers an alert to be sent (this is useful in the case of\nvolatile sections which are opening and closing frequently, often before the user has\ntime to register).\n", + ), + ), + migrations.AlterField( + model_name="registration", + name="cancelled", + field=models.BooleanField( + default=False, + help_text="\nDefaults to False, changed to True if the registration has been cancelled. A cancelled\nregistration will not trigger any alerts to be sent even if the relevant section opens.\nA cancelled section can be resubscribed to (unlike deleted alerts), and will show up\non the manage alerts page on the frontend (also unlike deleted alerts). Note that once\na registration is cancelled, it cannot be uncancelled (resubscribing creates a new\nregistration which is accessible via the resubscribed_to field, related name of\nresubscribed_from).\n", + ), + ), + migrations.AlterField( + model_name="registration", + name="cancelled_at", + field=models.DateTimeField( + blank=True, + help_text="When was the registration cancelled? Null if it hasn't been cancelled.", + null=True, + ), + ), + migrations.AlterField( + model_name="registration", + name="created_at", + field=models.DateTimeField( + auto_now_add=True, help_text="The datetime at which this registration was created." + ), + ), + migrations.AlterField( + model_name="registration", + name="deleted", + field=models.BooleanField( + default=False, + help_text="\nDefaults to False, changed to True if the registration has been deleted. A deleted\nregistration will not trigger any alerts to be sent even if the relevant section opens.\nA deleted section cannot be resubscribed to or undeleted, and will not show up on the\nmanage alerts page on the frontend. It is kept in the database for analytics purposes,\neven though it serves no immediate functional purpose for its original user.\n", + ), + ), + migrations.AlterField( + model_name="registration", + name="deleted_at", + field=models.DateTimeField( + blank=True, + help_text="When was the registration deleted? Null if it hasn't been deleted.", + null=True, + ), + ), + migrations.AlterField( + model_name="registration", + name="email", + field=models.EmailField( + blank=True, + help_text="\nA legacy field that stored the user's email before the Spring 2020 PCA refresh. Currently,\nfor all new registrations the email and phone fields will be None and contact information\ncan be found in the User's UserProfile object (related_name is profile, so you can\naccess the profile from the User object with `.user.profile`).\n", + max_length=254, + null=True, + ), + ), + migrations.AlterField( + model_name="registration", + name="notification_sent", + field=models.BooleanField( + default=False, + help_text="True if an alert has been sent to the user, false otherwise.", + ), + ), + migrations.AlterField( + model_name="registration", + name="notification_sent_at", + field=models.DateTimeField( + blank=True, + help_text="\nWhen was an alert sent to the user as a result of this registration?\nNull if an alert was not sent.\n", + null=True, + ), + ), + migrations.AlterField( + model_name="registration", + name="notification_sent_by", + field=models.CharField( + blank=True, + choices=[ + ("", "Unsent"), + ("LEG", "[Legacy] Sequence of course API requests"), + ("WEB", "Webhook"), + ("SERV", "Course Status Service"), + ("ADM", "Admin Interface"), + ], + default="", + help_text='What triggered the alert to be sent? Options and meanings:
"""Unsent"
"LEG""[Legacy] Sequence of course API requests"
"WEB""Webhook"
"SERV""Course Status Service"
"ADM""Admin Interface"
', + max_length=16, + ), + ), + migrations.AlterField( + model_name="registration", + name="original_created_at", + field=models.DateTimeField( + help_text="\nThe datetime at which the tail of the resubscribe chain to which this registration belongs\nwas created. In other words, the datetime at which the user created the original\nregistration for this section, before resubscribing some number of times\n(0 or more) to reach this registration.\n", + null=True, + ), + ), + migrations.AlterField( + model_name="registration", + name="phone", + field=models.CharField( + blank=True, + help_text="\nA legacy field that stored the user's phone before the Spring 2020 PCA refresh. Currently,\nfor all new registrations the email and phone fields will be None and contact information\ncan be found in the User's UserProfile object (related_name is profile, so you can\naccess the profile from the User object with `.user.profile`).\n", + max_length=100, + null=True, + ), + ), + migrations.AlterField( + model_name="registration", + name="resubscribed_from", + field=models.OneToOneField( + blank=True, + help_text="\nThe registration which was resubscribed to, triggering the creation of this registration.\nIf this registration is the original registration in its resubscribe chain (the tail),\nthis field is null. The related field, 'resubscribed_to' only exists as an attribute of\na Registration object if the registration has been resubscribed. In that case,\nthe field resubscribed_to will point to the next element in the resubscribe chain.\nIf the field does not exist, this registration is the head of its resubscribe chain\n(note that an element can be both the head and the tail of its resubscribe chain,\nin which case it is the only element in its resubscribe chain).\n", + null=True, + on_delete=django.db.models.deletion.SET_NULL, + related_name="resubscribed_to", + to="alert.registration", + ), + ), + migrations.AlterField( + model_name="registration", + name="section", + field=models.ForeignKey( + help_text="The section that the user registered to be notified about.", + on_delete=django.db.models.deletion.CASCADE, + to="courses.section", + ), + ), + migrations.AlterField( + model_name="registration", + name="source", + field=models.CharField( + choices=[ + ("PCA", "Penn Course Alert"), + ("API", "3rd Party Integration"), + ("PCP", "Penn Course Plan"), + ("PCR", "Penn Course Review"), + ("PM", "Penn Mobile"), + ], + help_text='Where did the registration come from? Options and meanings:
"PCA""Penn Course Alert"
"API""3rd Party Integration"
"PCP""Penn Course Plan"
"PCR""Penn Course Review"
"PM""Penn Mobile"
', + max_length=16, + ), + ), + migrations.AlterField( + model_name="registration", + name="updated_at", + field=models.DateTimeField( + auto_now=True, + help_text="The datetime at which this registration was last modified.", + ), + ), + migrations.AlterField( + model_name="registration", + name="user", + field=models.ForeignKey( + blank=True, + help_text="\nThe User that registered for this alert. This object will be none if registration occurred\nbefore the PCA refresh of Spring 2020 (before the refresh user's were only identified by\ntheir email and phone numbers, which are legacy fields in this model now). This object\nmight also be none if registration occurred through a 3rd part API such as Penn Course\nNotify (now that Notify has fallen this is an unlikely event).\n", + null=True, + on_delete=django.db.models.deletion.CASCADE, + to=settings.AUTH_USER_MODEL, + ), + ), + ] diff --git a/backend/alert/migrations/0011_auto_20201108_1535.py b/backend/alert/migrations/0011_auto_20201108_1535.py index 551c7eed4..78c6b762b 100644 --- a/backend/alert/migrations/0011_auto_20201108_1535.py +++ b/backend/alert/migrations/0011_auto_20201108_1535.py @@ -1,55 +1,55 @@ -# Generated by Django 3.1.1 on 2020-11-08 20:35 - -from django.db import migrations, models - - -class Migration(migrations.Migration): - - dependencies = [ - ("alert", "0010_auto_20201002_0714"), - ] - - operations = [ - migrations.AddField( - model_name="registration", - name="close_notification", - field=models.BooleanField( - default=False, - help_text="Defaults to False. Changes to True if the user opts-in to receive\n a close notification (an alert when the section closes after an\n alert was sent for it opening).\n", - ), - ), - migrations.AddField( - model_name="registration", - name="close_notification_sent", - field=models.BooleanField( - default=False, - help_text="True if a close notification has been sent to the user, false otherwise.", - ), - ), - migrations.AddField( - model_name="registration", - name="close_notification_sent_at", - field=models.DateTimeField( - blank=True, - help_text="\nWhen was a close notification sent to the user as a result of this registration?\nNull if a close notification was not sent.\n", - null=True, - ), - ), - migrations.AddField( - model_name="registration", - name="close_notification_sent_by", - field=models.CharField( - blank=True, - choices=[ - ("", "Unsent"), - ("LEG", "[Legacy] Sequence of course API requests"), - ("WEB", "Webhook"), - ("SERV", "Course Status Service"), - ("ADM", "Admin Interface"), - ], - default="", - help_text='What triggered the close notification to be sent? Options and meanings:
"""Unsent"
"LEG""[Legacy] Sequence of course API requests"
"WEB""Webhook"
"SERV""Course Status Service"
"ADM""Admin Interface"
', - max_length=16, - ), - ), - ] +# Generated by Django 3.1.1 on 2020-11-08 20:35 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ("alert", "0010_auto_20201002_0714"), + ] + + operations = [ + migrations.AddField( + model_name="registration", + name="close_notification", + field=models.BooleanField( + default=False, + help_text="Defaults to False. Changes to True if the user opts-in to receive\n a close notification (an alert when the section closes after an\n alert was sent for it opening).\n", + ), + ), + migrations.AddField( + model_name="registration", + name="close_notification_sent", + field=models.BooleanField( + default=False, + help_text="True if a close notification has been sent to the user, false otherwise.", + ), + ), + migrations.AddField( + model_name="registration", + name="close_notification_sent_at", + field=models.DateTimeField( + blank=True, + help_text="\nWhen was a close notification sent to the user as a result of this registration?\nNull if a close notification was not sent.\n", + null=True, + ), + ), + migrations.AddField( + model_name="registration", + name="close_notification_sent_by", + field=models.CharField( + blank=True, + choices=[ + ("", "Unsent"), + ("LEG", "[Legacy] Sequence of course API requests"), + ("WEB", "Webhook"), + ("SERV", "Course Status Service"), + ("ADM", "Admin Interface"), + ], + default="", + help_text='What triggered the close notification to be sent? Options and meanings:
"""Unsent"
"LEG""[Legacy] Sequence of course API requests"
"WEB""Webhook"
"SERV""Course Status Service"
"ADM""Admin Interface"
', + max_length=16, + ), + ), + ] diff --git a/backend/alert/migrations/0012_auto_20210418_0343.py b/backend/alert/migrations/0012_auto_20210418_0343.py index 1cff4b39d..ae02c6e63 100644 --- a/backend/alert/migrations/0012_auto_20210418_0343.py +++ b/backend/alert/migrations/0012_auto_20210418_0343.py @@ -1,217 +1,217 @@ -# Generated by Django 3.2 on 2021-04-18 07:43 - -import django.db.models.deletion -import django.utils.timezone -from django.db import migrations, models - -import alert.models - - -class Migration(migrations.Migration): - - dependencies = [ - ("courses", "0032_auto_20210418_0343"), - ("alert", "0011_auto_20201108_1535"), - ] - - operations = [ - migrations.CreateModel( - name="AddDropPeriod", - fields=[ - ( - "id", - models.AutoField( - auto_created=True, primary_key=True, serialize=False, verbose_name="ID" - ), - ), - ( - "semester", - models.CharField( - db_index=True, - help_text="\nThe semester of this add drop period (of the form YYYYx where x is\nA [for spring], or C [fall]), e.g. `2019C` for fall 2019.\n", - max_length=5, - unique=True, - validators=[alert.models.validate_add_drop_semester], - ), - ), - ( - "start", - models.DateTimeField( - blank=True, - help_text="The datetime at which the add drop period started.", - null=True, - ), - ), - ( - "end", - models.DateTimeField( - blank=True, - help_text="The datetime at which the add drop period ended.", - null=True, - ), - ), - ( - "estimated_start", - models.DateTimeField( - blank=True, - help_text="\nThis field estimates the start of the add/drop period based on the semester\nand historical data, even if the start field hasn't been filled in yet.\nIt equals the start of the add/drop period for this semester if it is explicitly set,\notherwise the most recent non-null start to an add/drop period, otherwise\n(if none exist), estimate as April 5 @ 7:00am ET of the same year (for a fall semester),\nor November 16 @ 7:00am ET of the previous year (for a spring semester).\n", - null=True, - ), - ), - ( - "estimated_end", - models.DateTimeField( - blank=True, - help_text="\n This field estimates the end of the add/drop period based on the semester\n and historical data, even if the end field hasn't been filled in yet.\n The end of the add/drop period for this semester, if it is explicitly set, otherwise\nthe most recent non-null end to an add/drop period, otherwise (if none exist),\nestimate as October 12 @ 11:59pm ET (for a fall semester),\nor February 22 @ 11:59pm ET (for a spring semester),\nof the same year.\n", - null=True, - ), - ), - ], - ), - migrations.AlterField( - model_name="registration", - name="section", - field=models.ForeignKey( - help_text="The section that the user registered to be notified about.", - on_delete=django.db.models.deletion.CASCADE, - related_name="registrations", - to="courses.section", - ), - ), - migrations.AlterField( - model_name="registration", - name="source", - field=models.CharField( - choices=[ - ("PCA", "Penn Course Alert"), - ("API", "3rd Party Integration"), - ("PCP", "Penn Course Plan"), - ("PCR", "Penn Course Review"), - ("PM", "Penn Mobile"), - ("SCRIPT_PCN", "The loadregistrations_pcn shell command"), - ("SCRIPT_PCA", "The loadregistrations_pca shell command"), - ], - help_text='Where did the registration come from? Options and meanings:
"PCA""Penn Course Alert"
"API""3rd Party Integration"
"PCP""Penn Course Plan"
"PCR""Penn Course Review"
"PM""Penn Mobile"
"SCRIPT_PCN""The loadregistrations_pcn shell command"
"SCRIPT_PCA""The loadregistrations_pca shell command"
', - max_length=16, - ), - ), - migrations.CreateModel( - name="PcaDemandDistributionEstimate", - fields=[ - ( - "id", - models.AutoField( - auto_created=True, primary_key=True, serialize=False, verbose_name="ID" - ), - ), - ( - "semester", - models.CharField( - db_index=True, - help_text="\nThe semester of this demand distribution estimate (of the form YYYYx where x is\nA [for spring], B [summer], or C [fall]), e.g. `2019C` for fall 2019.\n", - max_length=5, - ), - ), - ( - "created_at", - models.DateTimeField( - db_index=True, - default=django.utils.timezone.now, - help_text="The datetime at which the distribution estimates were updated.", - ), - ), - ( - "percent_through_add_drop_period", - models.FloatField( - default=0, - help_text="The percentage through the add/drop period at which this demand distribution estimate change occurred. This percentage is constrained within the range [0,1].", - ), - ), - ( - "in_add_drop_period", - models.BooleanField( - default=False, - help_text="Was this demand distribution estimate created during the add/drop period?", - ), - ), - ( - "highest_demand_section_volume", - models.IntegerField( - help_text="The registration volume of the highest_demand_section at this time." - ), - ), - ( - "lowest_demand_section_volume", - models.IntegerField( - help_text="The registration volume of the lowest_demand_section at this time." - ), - ), - ( - "csdv_gamma_param_alpha", - models.FloatField( - blank=True, - help_text="The fitted gamma distribution alpha parameter of all closed sections' raw demand values at this time. The abbreviation 'csdv' stands for 'closed section demand values'; this is a collection of the raw demand values of each closed section at this time.", - null=True, - ), - ), - ( - "csdv_gamma_param_loc", - models.FloatField( - blank=True, - help_text="The fitted gamma distribution loc parameter of all closed sections' raw demand values at this time. The abbreviation 'csdv' stands for 'closed section demand values'; this is a collection of the raw demand values of each closed section at this time.", - null=True, - ), - ), - ( - "csdv_gamma_param_scale", - models.FloatField( - blank=True, - help_text="The fitted gamma distribution beta parameter of all closed sections' raw demand values at this time. The abbreviation 'csdv' stands for 'closed section demand values'; this is a collection of the raw demand values of each closed section at this time.", - null=True, - ), - ), - ( - "csdv_mean", - models.FloatField( - blank=True, - help_text="The mean of all closed sections' raw demand values at this time. The abbreviation 'csdv' stands for 'closed section demand values'; this is a collection of the raw demand values of each closed section at this time.", - null=True, - ), - ), - ( - "csdv_median", - models.FloatField( - blank=True, - help_text="The median of all closed sections' raw demand values at this time. The abbreviation 'csdv' stands for 'closed section demand values'; this is a collection of the raw demand values of each closed section at this time.", - null=True, - ), - ), - ( - "csdv_75th_percentile", - models.FloatField( - blank=True, - help_text="The 75th percentile of all closed sections' raw demand values at this time. The abbreviation 'csdv' stands for 'closed section demand values'; this is a collection of the raw demand values of each closed section at this time.", - null=True, - ), - ), - ( - "highest_demand_section", - models.ForeignKey( - help_text="A section with the highest raw demand value at this time.", - on_delete=django.db.models.deletion.CASCADE, - related_name="highest_demand_distribution_estimates", - to="courses.section", - ), - ), - ( - "lowest_demand_section", - models.ForeignKey( - help_text="A section with the lowest raw demand value at this time.", - on_delete=django.db.models.deletion.CASCADE, - related_name="lowest_demand_distribution_estimates", - to="courses.section", - ), - ), - ], - ), - ] +# Generated by Django 3.2 on 2021-04-18 07:43 + +import django.db.models.deletion +import django.utils.timezone +from django.db import migrations, models + +import alert.models + + +class Migration(migrations.Migration): + + dependencies = [ + ("courses", "0032_auto_20210418_0343"), + ("alert", "0011_auto_20201108_1535"), + ] + + operations = [ + migrations.CreateModel( + name="AddDropPeriod", + fields=[ + ( + "id", + models.AutoField( + auto_created=True, primary_key=True, serialize=False, verbose_name="ID" + ), + ), + ( + "semester", + models.CharField( + db_index=True, + help_text="\nThe semester of this add drop period (of the form YYYYx where x is\nA [for spring], or C [fall]), e.g. `2019C` for fall 2019.\n", + max_length=5, + unique=True, + validators=[alert.models.validate_add_drop_semester], + ), + ), + ( + "start", + models.DateTimeField( + blank=True, + help_text="The datetime at which the add drop period started.", + null=True, + ), + ), + ( + "end", + models.DateTimeField( + blank=True, + help_text="The datetime at which the add drop period ended.", + null=True, + ), + ), + ( + "estimated_start", + models.DateTimeField( + blank=True, + help_text="\nThis field estimates the start of the add/drop period based on the semester\nand historical data, even if the start field hasn't been filled in yet.\nIt equals the start of the add/drop period for this semester if it is explicitly set,\notherwise the most recent non-null start to an add/drop period, otherwise\n(if none exist), estimate as April 5 @ 7:00am ET of the same year (for a fall semester),\nor November 16 @ 7:00am ET of the previous year (for a spring semester).\n", + null=True, + ), + ), + ( + "estimated_end", + models.DateTimeField( + blank=True, + help_text="\n This field estimates the end of the add/drop period based on the semester\n and historical data, even if the end field hasn't been filled in yet.\n The end of the add/drop period for this semester, if it is explicitly set, otherwise\nthe most recent non-null end to an add/drop period, otherwise (if none exist),\nestimate as October 12 @ 11:59pm ET (for a fall semester),\nor February 22 @ 11:59pm ET (for a spring semester),\nof the same year.\n", + null=True, + ), + ), + ], + ), + migrations.AlterField( + model_name="registration", + name="section", + field=models.ForeignKey( + help_text="The section that the user registered to be notified about.", + on_delete=django.db.models.deletion.CASCADE, + related_name="registrations", + to="courses.section", + ), + ), + migrations.AlterField( + model_name="registration", + name="source", + field=models.CharField( + choices=[ + ("PCA", "Penn Course Alert"), + ("API", "3rd Party Integration"), + ("PCP", "Penn Course Plan"), + ("PCR", "Penn Course Review"), + ("PM", "Penn Mobile"), + ("SCRIPT_PCN", "The loadregistrations_pcn shell command"), + ("SCRIPT_PCA", "The loadregistrations_pca shell command"), + ], + help_text='Where did the registration come from? Options and meanings:
"PCA""Penn Course Alert"
"API""3rd Party Integration"
"PCP""Penn Course Plan"
"PCR""Penn Course Review"
"PM""Penn Mobile"
"SCRIPT_PCN""The loadregistrations_pcn shell command"
"SCRIPT_PCA""The loadregistrations_pca shell command"
', + max_length=16, + ), + ), + migrations.CreateModel( + name="PcaDemandDistributionEstimate", + fields=[ + ( + "id", + models.AutoField( + auto_created=True, primary_key=True, serialize=False, verbose_name="ID" + ), + ), + ( + "semester", + models.CharField( + db_index=True, + help_text="\nThe semester of this demand distribution estimate (of the form YYYYx where x is\nA [for spring], B [summer], or C [fall]), e.g. `2019C` for fall 2019.\n", + max_length=5, + ), + ), + ( + "created_at", + models.DateTimeField( + db_index=True, + default=django.utils.timezone.now, + help_text="The datetime at which the distribution estimates were updated.", + ), + ), + ( + "percent_through_add_drop_period", + models.FloatField( + default=0, + help_text="The percentage through the add/drop period at which this demand distribution estimate change occurred. This percentage is constrained within the range [0,1].", + ), + ), + ( + "in_add_drop_period", + models.BooleanField( + default=False, + help_text="Was this demand distribution estimate created during the add/drop period?", + ), + ), + ( + "highest_demand_section_volume", + models.IntegerField( + help_text="The registration volume of the highest_demand_section at this time." + ), + ), + ( + "lowest_demand_section_volume", + models.IntegerField( + help_text="The registration volume of the lowest_demand_section at this time." + ), + ), + ( + "csdv_gamma_param_alpha", + models.FloatField( + blank=True, + help_text="The fitted gamma distribution alpha parameter of all closed sections' raw demand values at this time. The abbreviation 'csdv' stands for 'closed section demand values'; this is a collection of the raw demand values of each closed section at this time.", + null=True, + ), + ), + ( + "csdv_gamma_param_loc", + models.FloatField( + blank=True, + help_text="The fitted gamma distribution loc parameter of all closed sections' raw demand values at this time. The abbreviation 'csdv' stands for 'closed section demand values'; this is a collection of the raw demand values of each closed section at this time.", + null=True, + ), + ), + ( + "csdv_gamma_param_scale", + models.FloatField( + blank=True, + help_text="The fitted gamma distribution beta parameter of all closed sections' raw demand values at this time. The abbreviation 'csdv' stands for 'closed section demand values'; this is a collection of the raw demand values of each closed section at this time.", + null=True, + ), + ), + ( + "csdv_mean", + models.FloatField( + blank=True, + help_text="The mean of all closed sections' raw demand values at this time. The abbreviation 'csdv' stands for 'closed section demand values'; this is a collection of the raw demand values of each closed section at this time.", + null=True, + ), + ), + ( + "csdv_median", + models.FloatField( + blank=True, + help_text="The median of all closed sections' raw demand values at this time. The abbreviation 'csdv' stands for 'closed section demand values'; this is a collection of the raw demand values of each closed section at this time.", + null=True, + ), + ), + ( + "csdv_75th_percentile", + models.FloatField( + blank=True, + help_text="The 75th percentile of all closed sections' raw demand values at this time. The abbreviation 'csdv' stands for 'closed section demand values'; this is a collection of the raw demand values of each closed section at this time.", + null=True, + ), + ), + ( + "highest_demand_section", + models.ForeignKey( + help_text="A section with the highest raw demand value at this time.", + on_delete=django.db.models.deletion.CASCADE, + related_name="highest_demand_distribution_estimates", + to="courses.section", + ), + ), + ( + "lowest_demand_section", + models.ForeignKey( + help_text="A section with the lowest raw demand value at this time.", + on_delete=django.db.models.deletion.CASCADE, + related_name="lowest_demand_distribution_estimates", + to="courses.section", + ), + ), + ], + ), + ] diff --git a/backend/alert/migrations/0013_pcademanddistributionestimate_csdv_gamma_fit_log_likelihood.py b/backend/alert/migrations/0013_pcademanddistributionestimate_csdv_gamma_fit_log_likelihood.py index 58a4f5e8e..faa9e3235 100644 --- a/backend/alert/migrations/0013_pcademanddistributionestimate_csdv_gamma_fit_log_likelihood.py +++ b/backend/alert/migrations/0013_pcademanddistributionestimate_csdv_gamma_fit_log_likelihood.py @@ -1,22 +1,22 @@ -# Generated by Django 3.2 on 2021-04-18 11:53 - -from django.db import migrations, models - - -class Migration(migrations.Migration): - - dependencies = [ - ("alert", "0012_auto_20210418_0343"), - ] - - operations = [ - migrations.AddField( - model_name="pcademanddistributionestimate", - name="csdv_gamma_fit_log_likelihood", - field=models.FloatField( - blank=True, - help_text="The log likelihood of the fitted gamma distribution over all closed sections' raw demand values at this time. The abbreviation 'csdv' stands for 'closed section demand values'; this is a collection of the raw demand values of each closed section at this time.", - null=True, - ), - ), - ] +# Generated by Django 3.2 on 2021-04-18 11:53 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ("alert", "0012_auto_20210418_0343"), + ] + + operations = [ + migrations.AddField( + model_name="pcademanddistributionestimate", + name="csdv_gamma_fit_log_likelihood", + field=models.FloatField( + blank=True, + help_text="The log likelihood of the fitted gamma distribution over all closed sections' raw demand values at this time. The abbreviation 'csdv' stands for 'closed section demand values'; this is a collection of the raw demand values of each closed section at this time.", + null=True, + ), + ), + ] diff --git a/backend/alert/migrations/0014_auto_20210418_0847.py b/backend/alert/migrations/0014_auto_20210418_0847.py index 2448d4668..e799b6a4a 100644 --- a/backend/alert/migrations/0014_auto_20210418_0847.py +++ b/backend/alert/migrations/0014_auto_20210418_0847.py @@ -1,26 +1,26 @@ -# Generated by Django 3.2 on 2021-04-18 12:47 - -from django.db import migrations, models - - -class Migration(migrations.Migration): - - dependencies = [ - ("alert", "0013_pcademanddistributionestimate_csdv_gamma_fit_log_likelihood"), - ] - - operations = [ - migrations.RemoveField( - model_name="pcademanddistributionestimate", - name="csdv_gamma_fit_log_likelihood", - ), - migrations.AddField( - model_name="pcademanddistributionestimate", - name="csdv_gamma_fit_mean_log_likelihood", - field=models.FloatField( - blank=True, - help_text="The mean log likelihood of the fitted gamma distribution over all closed sections' raw demand values at this time. The abbreviation 'csdv' stands for 'closed section demand values'; this is a collection of the raw demand values of each closed section at this time.", - null=True, - ), - ), - ] +# Generated by Django 3.2 on 2021-04-18 12:47 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ("alert", "0013_pcademanddistributionestimate_csdv_gamma_fit_log_likelihood"), + ] + + operations = [ + migrations.RemoveField( + model_name="pcademanddistributionestimate", + name="csdv_gamma_fit_log_likelihood", + ), + migrations.AddField( + model_name="pcademanddistributionestimate", + name="csdv_gamma_fit_mean_log_likelihood", + field=models.FloatField( + blank=True, + help_text="The mean log likelihood of the fitted gamma distribution over all closed sections' raw demand values at this time. The abbreviation 'csdv' stands for 'closed section demand values'; this is a collection of the raw demand values of each closed section at this time.", + null=True, + ), + ), + ] diff --git a/backend/alert/migrations/0015_auto_20211010_1235.py b/backend/alert/migrations/0015_auto_20211010_1235.py index 8e2b8d98c..29fb2a4ff 100644 --- a/backend/alert/migrations/0015_auto_20211010_1235.py +++ b/backend/alert/migrations/0015_auto_20211010_1235.py @@ -1,45 +1,45 @@ -# Generated by Django 3.2 on 2021-10-10 16:35 - -import django.db.models.deletion -from django.db import migrations, models - - -class Migration(migrations.Migration): - - dependencies = [ - ("alert", "0014_auto_20210418_0847"), - ] - - operations = [ - migrations.AddField( - model_name="registration", - name="head_registration", - field=models.ForeignKey( - blank=True, - help_text="\nThe head of this registration's resubscribe chain (pointing to \nitself if this registration is the head of its chain).\n", - null=True, - on_delete=django.db.models.deletion.CASCADE, - to="alert.registration", - ), - ), - migrations.AlterField( - model_name="registration", - name="close_notification", - field=models.BooleanField( - default=False, - help_text="Defaults to false. If set to true, the user will receive\n a close notification (an alert when the section closes after an\n alert was sent for it opening).\n", - ), - ), - migrations.AlterField( - model_name="registration", - name="resubscribed_from", - field=models.OneToOneField( - blank=True, - help_text="\nThe registration which was resubscribed to, triggering the creation of this registration.\nIf this registration is the original registration in its resubscribe chain (the tail),\nthis field is null. The related field, 'resubscribed_to' only exists as an attribute of\na Registration object if the registration has been resubscribed. In that case,\nthe field resubscribed_to will point to the next element in the resubscribe chain.\nIf the field does not exist, this registration is the head of its resubscribe chain\n(note that an element can be both the head and the tail of its resubscribe chain,\nin which case it is the only element in its resubscribe chain).\n", - null=True, - on_delete=django.db.models.deletion.CASCADE, - related_name="resubscribed_to", - to="alert.registration", - ), - ), - ] +# Generated by Django 3.2 on 2021-10-10 16:35 + +import django.db.models.deletion +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ("alert", "0014_auto_20210418_0847"), + ] + + operations = [ + migrations.AddField( + model_name="registration", + name="head_registration", + field=models.ForeignKey( + blank=True, + help_text="\nThe head of this registration's resubscribe chain (pointing to \nitself if this registration is the head of its chain).\n", + null=True, + on_delete=django.db.models.deletion.CASCADE, + to="alert.registration", + ), + ), + migrations.AlterField( + model_name="registration", + name="close_notification", + field=models.BooleanField( + default=False, + help_text="Defaults to false. If set to true, the user will receive\n a close notification (an alert when the section closes after an\n alert was sent for it opening).\n", + ), + ), + migrations.AlterField( + model_name="registration", + name="resubscribed_from", + field=models.OneToOneField( + blank=True, + help_text="\nThe registration which was resubscribed to, triggering the creation of this registration.\nIf this registration is the original registration in its resubscribe chain (the tail),\nthis field is null. The related field, 'resubscribed_to' only exists as an attribute of\na Registration object if the registration has been resubscribed. In that case,\nthe field resubscribed_to will point to the next element in the resubscribe chain.\nIf the field does not exist, this registration is the head of its resubscribe chain\n(note that an element can be both the head and the tail of its resubscribe chain,\nin which case it is the only element in its resubscribe chain).\n", + null=True, + on_delete=django.db.models.deletion.CASCADE, + related_name="resubscribed_to", + to="alert.registration", + ), + ), + ] diff --git a/backend/alert/migrations/0016_auto_20211113_1537.py b/backend/alert/migrations/0016_auto_20211113_1537.py index 840633bc2..527bd413d 100644 --- a/backend/alert/migrations/0016_auto_20211113_1537.py +++ b/backend/alert/migrations/0016_auto_20211113_1537.py @@ -1,89 +1,89 @@ -# Generated by Django 3.2.9 on 2021-11-13 20:37 - -import django.db.models.deletion -from django.db import migrations, models - - -class Migration(migrations.Migration): - - dependencies = [ - ("alert", "0015_auto_20211010_1235"), - ] - - operations = [ - migrations.RemoveField( - model_name="pcademanddistributionestimate", - name="csdv_75th_percentile", - ), - migrations.RemoveField( - model_name="pcademanddistributionestimate", - name="csdv_gamma_fit_mean_log_likelihood", - ), - migrations.RemoveField( - model_name="pcademanddistributionestimate", - name="csdv_gamma_param_alpha", - ), - migrations.RemoveField( - model_name="pcademanddistributionestimate", - name="csdv_gamma_param_loc", - ), - migrations.RemoveField( - model_name="pcademanddistributionestimate", - name="csdv_gamma_param_scale", - ), - migrations.RemoveField( - model_name="pcademanddistributionestimate", - name="csdv_mean", - ), - migrations.RemoveField( - model_name="pcademanddistributionestimate", - name="csdv_median", - ), - migrations.AddField( - model_name="pcademanddistributionestimate", - name="csprdv_lognorm_param_loc", - field=models.FloatField( - blank=True, - help_text="The loc parameter of the fitted log-normal distribution on positive raw demand values from closed sections. Null if there are no closed sections that have positive raw demand values. The abbreviation 'csprdv' stands for 'closed section positive raw demand values'.", - null=True, - ), - ), - migrations.AddField( - model_name="pcademanddistributionestimate", - name="csprdv_lognorm_param_scale", - field=models.FloatField( - blank=True, - help_text="The scale parameter of the fitted log-normal distribution on positive raw demand values from closed sections. Null if there are no closed sections that have positive raw demand values. The abbreviation 'csprdv' stands for 'closed section positive raw demand values'.", - null=True, - ), - ), - migrations.AddField( - model_name="pcademanddistributionestimate", - name="csprdv_lognorm_param_shape", - field=models.FloatField( - blank=True, - help_text="The shape parameter of the fitted log-normal distribution on positive raw demand values from closed sections. Null if there are no closed sections that have positive raw demand values. The abbreviation 'csprdv' stands for 'closed section positive raw demand values'.", - null=True, - ), - ), - migrations.AddField( - model_name="pcademanddistributionestimate", - name="csrdv_frac_zero", - field=models.FloatField( - blank=True, - help_text="The fraction of closed sections' raw demand values that are 0 (non-positive), expressed as a float in the range [0,1]. Null if there are no closed sections. The abbreviation 'csrdv' stands for 'closed section raw demand values', not to be confused with 'csprdv', which stands for 'closed section positive raw demand values'.", - null=True, - ), - ), - migrations.AlterField( - model_name="registration", - name="head_registration", - field=models.ForeignKey( - blank=True, - help_text="\nThe head of this registration's resubscribe chain (pointing to\nitself if this registration is the head of its chain).\n", - null=True, - on_delete=django.db.models.deletion.CASCADE, - to="alert.registration", - ), - ), - ] +# Generated by Django 3.2.9 on 2021-11-13 20:37 + +import django.db.models.deletion +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ("alert", "0015_auto_20211010_1235"), + ] + + operations = [ + migrations.RemoveField( + model_name="pcademanddistributionestimate", + name="csdv_75th_percentile", + ), + migrations.RemoveField( + model_name="pcademanddistributionestimate", + name="csdv_gamma_fit_mean_log_likelihood", + ), + migrations.RemoveField( + model_name="pcademanddistributionestimate", + name="csdv_gamma_param_alpha", + ), + migrations.RemoveField( + model_name="pcademanddistributionestimate", + name="csdv_gamma_param_loc", + ), + migrations.RemoveField( + model_name="pcademanddistributionestimate", + name="csdv_gamma_param_scale", + ), + migrations.RemoveField( + model_name="pcademanddistributionestimate", + name="csdv_mean", + ), + migrations.RemoveField( + model_name="pcademanddistributionestimate", + name="csdv_median", + ), + migrations.AddField( + model_name="pcademanddistributionestimate", + name="csprdv_lognorm_param_loc", + field=models.FloatField( + blank=True, + help_text="The loc parameter of the fitted log-normal distribution on positive raw demand values from closed sections. Null if there are no closed sections that have positive raw demand values. The abbreviation 'csprdv' stands for 'closed section positive raw demand values'.", + null=True, + ), + ), + migrations.AddField( + model_name="pcademanddistributionestimate", + name="csprdv_lognorm_param_scale", + field=models.FloatField( + blank=True, + help_text="The scale parameter of the fitted log-normal distribution on positive raw demand values from closed sections. Null if there are no closed sections that have positive raw demand values. The abbreviation 'csprdv' stands for 'closed section positive raw demand values'.", + null=True, + ), + ), + migrations.AddField( + model_name="pcademanddistributionestimate", + name="csprdv_lognorm_param_shape", + field=models.FloatField( + blank=True, + help_text="The shape parameter of the fitted log-normal distribution on positive raw demand values from closed sections. Null if there are no closed sections that have positive raw demand values. The abbreviation 'csprdv' stands for 'closed section positive raw demand values'.", + null=True, + ), + ), + migrations.AddField( + model_name="pcademanddistributionestimate", + name="csrdv_frac_zero", + field=models.FloatField( + blank=True, + help_text="The fraction of closed sections' raw demand values that are 0 (non-positive), expressed as a float in the range [0,1]. Null if there are no closed sections. The abbreviation 'csrdv' stands for 'closed section raw demand values', not to be confused with 'csprdv', which stands for 'closed section positive raw demand values'.", + null=True, + ), + ), + migrations.AlterField( + model_name="registration", + name="head_registration", + field=models.ForeignKey( + blank=True, + help_text="\nThe head of this registration's resubscribe chain (pointing to\nitself if this registration is the head of its chain).\n", + null=True, + on_delete=django.db.models.deletion.CASCADE, + to="alert.registration", + ), + ), + ] diff --git a/backend/alert/migrations/0017_alter_registration_head_registration.py b/backend/alert/migrations/0017_alter_registration_head_registration.py index a7771886d..3544e5c29 100644 --- a/backend/alert/migrations/0017_alter_registration_head_registration.py +++ b/backend/alert/migrations/0017_alter_registration_head_registration.py @@ -1,35 +1,35 @@ -# Generated by Django 4.0.3 on 2022-04-07 02:45 - -import django.db.models.deletion -from django.db import migrations, models - - -def forwards_func(apps, schema_editor): - Registration = apps.get_model("alert", "Registration") - Registration.objects.filter(head_registration__isnull=True).update( - head_registration_id=models.F("id") - ) - - -def reverse_func(apps, schema_editor): - pass - - -class Migration(migrations.Migration): - - dependencies = [ - ("alert", "0016_auto_20211113_1537"), - ] - - operations = [ - migrations.RunPython(forwards_func, reverse_func), - migrations.AlterField( - model_name="registration", - name="head_registration", - field=models.ForeignKey( - help_text="\nThe head of this registration's resubscribe chain (pointing to\nitself if this registration is the head of its chain). If you call `.save()`\non a registration without setting its `head_registration` field, the overridden\n`Registration.save()` method will automatically set its `head_registration`\nto a self-reference.\n", - on_delete=django.db.models.deletion.CASCADE, - to="alert.registration", - ), - ), - ] +# Generated by Django 4.0.3 on 2022-04-07 02:45 + +import django.db.models.deletion +from django.db import migrations, models + + +def forwards_func(apps, schema_editor): + Registration = apps.get_model("alert", "Registration") + Registration.objects.filter(head_registration__isnull=True).update( + head_registration_id=models.F("id") + ) + + +def reverse_func(apps, schema_editor): + pass + + +class Migration(migrations.Migration): + + dependencies = [ + ("alert", "0016_auto_20211113_1537"), + ] + + operations = [ + migrations.RunPython(forwards_func, reverse_func), + migrations.AlterField( + model_name="registration", + name="head_registration", + field=models.ForeignKey( + help_text="\nThe head of this registration's resubscribe chain (pointing to\nitself if this registration is the head of its chain). If you call `.save()`\non a registration without setting its `head_registration` field, the overridden\n`Registration.save()` method will automatically set its `head_registration`\nto a self-reference.\n", + on_delete=django.db.models.deletion.CASCADE, + to="alert.registration", + ), + ), + ] diff --git a/backend/alert/models.py b/backend/alert/models.py index cb1625814..837d72b71 100644 --- a/backend/alert/models.py +++ b/backend/alert/models.py @@ -1,1088 +1,1088 @@ -import logging -from datetime import datetime -from enum import Enum, auto -from textwrap import dedent - -import phonenumbers # library for parsing and formatting phone numbers. -from dateutil.tz import gettz -from django.contrib.auth import get_user_model -from django.core.cache import cache -from django.core.exceptions import ValidationError -from django.db import models, transaction -from django.db.models import Case, F, Max, Q, Value, When -from django.db.models.functions import Extract -from django.utils import timezone -from django.utils.timezone import make_aware - -from alert.alerts import Email, PushNotification, Text -from courses.models import Course, Section, StatusUpdate, UserProfile, string_dict_to_html -from courses.util import ( - does_object_pass_filter, - get_course_and_section, - get_current_semester, - get_or_create_add_drop_period, -) -from PennCourses.settings.base import TIME_ZONE - - -class RegStatus(Enum): - SUCCESS = auto() - OPEN_REG_EXISTS = auto() - COURSE_OPEN = auto() - COURSE_NOT_FOUND = auto() - NO_CONTACT_INFO = auto() - TEXT_CLOSE_NOTIFICATION = auto() - - -SOURCE_PCA = "PCA" -SOURCE_API = "API" - - -class Registration(models.Model): - """ - A registration for sending an alert to the user upon the opening of a course - during open registration. - - In addition to sending alerts for when a class opens up, we have also implemented - an optionally user-enabled feature called "close notifications". - If a registration has close_notification enabled, it will act normally when the watched - section opens up for the first time (triggering an alert to be sent). However, once the - watched section closes, it will send another alert (the email alert will be in the same - chain as the original alert) to let the user know that the section has closed. Thus, - if a user sees a PCA notification on their phone during a class for instance, they won't - need to frantically open up their laptop and check PennInTouch to see if the class is still - open just to find that it is already closed. To avoid spam and wasted money, we DO NOT - send any close notifications over text. So the user must have an email saved or use - push notifications in order to be able to enable close notifications on a registration. - Note that the close_notification setting carries over across resubscriptions, but can be - disabled at any time using a PUT request to /api/alert/registrations/{id}/. - - An important concept for this Model is that of the "resubscribe chain". A resubscribe chain - is a chain of Registration objects where the tail of the chain was the original Registration - created through a POST request to /api/alert/registrations/ specifying a new section (one - that the user wasn't already registered to receive alerts for). Each next element in the chain - is a Registration created by resubscribing to the previous Registration (once that - Registration had triggered an alert to be sent), either manually by the user or - automatically if auto_resubscribe was set to true. Then, it follows that the head of the - resubscribe chain is the most relevant Registration for that user/section combo; if any - of the registrations in the chain are active, it would be the head. And if the head - is active, none of the other registrations in the chain are active. That said, a non-head - Registration may be waiting to send a close notification (if the watched section hasn't closed - yet). - - Note that a Registration will send an alert when the section it is watching opens, if and - only if it hasn't sent one before, it isn't cancelled, and it isn't deleted. If a - registration would send an alert when the section it is watching opens, we call it - "active". This rule is encoded in the is_active property. You can also filter - for active registrations using the static is_active_filter() method which returns a - dictionary that you can unpack into the kwargs of the filter method - (you cannot filter on a property). A Registration will send a close notification - when the section it is watching closes, if and only if it has already sent an open alert, - the user has enabled close notifications for that section, the Registration hasn't sent a - close_notification before, and its head registration isn't cancelled or deleted. - If a registration would send a close notification when the section it is watching closes, - we call it "waiting for close". This rule is encoded in the is_waiting_for_close property. - You can also filter for such registrations using the static is_waiting_for_close_filter() - method which returns a dictionary that you can unpack into the kwargs of the filter method - (you cannot filter on a property). - - After the PCA backend refactor in 2019C/2020A, all PCA Registrations have a user field - pointing to the user's Penn Labs Accounts User object. In other words, we implemented a - user/accounts system for PCA which required that - people log in to use the website. Thus, the contact information used in PCA alerts - is taken from the user's User Profile. You can edit this contact information using - a PUT or PATCH request to /accounts/me/. If push_notifications is set to True, then - a push notification will be sent when the user is alerted, but no text notifications will - be sent (as that would be a redundant alert to the user's phone). Otherwise, an email - or a text alert is sent if and only if contact information for that medium exists in - the user's profile. - - Alerts are triggered by webhook requests from the UPenn OpenData API - (https://esb.isc-seo.upenn.edu/8091/documentation/#coursestatuswebhookservice), and accepted - by alert/views.py/accept_webhook. Then if the SEND_FROM_WEBHOOK Option is set to True, - the semester of the webhook request equals courses.util.get_current_semester(), the new - course status is either "O" (meaning open) or "C" (meaning closed), and the current datetime - is less than the current AddDropPeriod's end field (if not null), then the method calls - alert/views.py/alert_for_course for the relevant course. That method then calls - alert/tasks.py/send_course_alerts asynchronously using the Celery delay function. - This allows for alerts to be queued without holding up the response. The send_course_alerts - function then loops through all registrations for the given section and calls - alert/tasks.py/send_alert asynchronously (again using Celery delay) which then calls the - Registration's alert method. In each Registration's alert method, a subclass of the - alert/alerts.py/Alert class is instantiated for each of the appropriate notification - channels (text, email, and/or push notification based on the User's settings). The - notification is then sent with the send_alert method on each Alert object. The send_alert - method calls other functions in alert/alerts.py to actually send out the alerts. - """ - - created_at = models.DateTimeField( - auto_now_add=True, help_text="The datetime at which this registration was created." - ) - original_created_at = models.DateTimeField( - null=True, - help_text=dedent( - """ - The datetime at which the tail of the resubscribe chain to which this registration belongs - was created. In other words, the datetime at which the user created the original - registration for this section, before resubscribing some number of times - (0 or more) to reach this registration. - """ - ), - ) - updated_at = models.DateTimeField( - auto_now=True, help_text="The datetime at which this registration was last modified." - ) - - SOURCE_CHOICES = ( - ("PCA", "Penn Course Alert"), - ("API", "3rd Party Integration"), - ("PCP", "Penn Course Plan"), - ("PCR", "Penn Course Review"), - ("PM", "Penn Mobile"), - ("SCRIPT_PCN", "The loadregistrations_pcn shell command"), - ("SCRIPT_PCA", "The loadregistrations_pca shell command"), - ) - - source = models.CharField( - max_length=16, - choices=SOURCE_CHOICES, - help_text="Where did the registration come from? Options and meanings: " - + string_dict_to_html(dict(SOURCE_CHOICES)), - ) - - api_key = models.ForeignKey( - "courses.APIKey", - blank=True, - null=True, - on_delete=models.CASCADE, - help_text=dedent( - """ - An API key for 3rd party alternatives to PCA. This is currently unused now that - Penn Course Notify has fallen, but may be used in the future. - """ - ), - ) - - user = models.ForeignKey( - get_user_model(), - on_delete=models.CASCADE, - blank=True, - null=True, - help_text=dedent( - """ - The User that registered for this alert. This object will be none if registration occurred - before the PCA refresh of Spring 2020 (before the refresh user's were only identified by - their email and phone numbers, which are legacy fields in this model now). This object - might also be none if registration occurred through a 3rd part API such as Penn Course - Notify (now that Notify has fallen this is an unlikely event). - """ - ), - ) - email = models.EmailField( - blank=True, - null=True, - help_text=dedent( - """ - A legacy field that stored the user's email before the Spring 2020 PCA refresh. Currently, - for all new registrations the email and phone fields will be None and contact information - can be found in the User's UserProfile object (related_name is profile, so you can - access the profile from the User object with `.user.profile`). - """ - ), - ) - phone = models.CharField( - blank=True, - null=True, - max_length=100, - help_text=dedent( - """ - A legacy field that stored the user's phone before the Spring 2020 PCA refresh. Currently, - for all new registrations the email and phone fields will be None and contact information - can be found in the User's UserProfile object (related_name is profile, so you can - access the profile from the User object with `.user.profile`). - """ - ), - ) - section = models.ForeignKey( - Section, - on_delete=models.CASCADE, - related_name="registrations", - help_text="The section that the user registered to be notified about.", - ) - cancelled = models.BooleanField( - default=False, - help_text=dedent( - """ - Defaults to False, changed to True if the registration has been cancelled. A cancelled - registration will not trigger any alerts to be sent even if the relevant section opens. - A cancelled section can be resubscribed to (unlike deleted alerts), and will show up - on the manage alerts page on the frontend (also unlike deleted alerts). Note that once - a registration is cancelled, it cannot be uncancelled (resubscribing creates a new - registration which is accessible via the resubscribed_to field, related name of - resubscribed_from). - """ - ), - ) - cancelled_at = models.DateTimeField( - blank=True, - null=True, - help_text="When was the registration cancelled? Null if it hasn't been cancelled.", - ) - deleted = models.BooleanField( - default=False, - help_text=dedent( - """ - Defaults to False, changed to True if the registration has been deleted. A deleted - registration will not trigger any alerts to be sent even if the relevant section opens. - A deleted section cannot be resubscribed to or undeleted, and will not show up on the - manage alerts page on the frontend. It is kept in the database for analytics purposes, - even though it serves no immediate functional purpose for its original user. - """ - ), - ) - deleted_at = models.DateTimeField( - blank=True, - null=True, - help_text="When was the registration deleted? Null if it hasn't been deleted.", - ) - auto_resubscribe = models.BooleanField( - default=False, - help_text=dedent( - """ - Defaults to False, in which case a registration will not be automatically resubscribed - after it triggers an alert to be sent (but the user can still resubscribe to a sent alert, - as long as it is not deleted). If set to True, the registration will be automatically - resubscribed to once it triggers an alert to be sent (this is useful in the case of - volatile sections which are opening and closing frequently, often before the user has - time to register). - """ - ), - ) - notification_sent = models.BooleanField( - default=False, help_text="True if an alert has been sent to the user, false otherwise." - ) - notification_sent_at = models.DateTimeField( - blank=True, - null=True, - help_text=dedent( - """ - When was an alert sent to the user as a result of this registration? - Null if an alert was not sent. - """ - ), - ) - close_notification = models.BooleanField( - default=False, - help_text=dedent( - """Defaults to false. If set to true, the user will receive - a close notification (an alert when the section closes after an - alert was sent for it opening). - """ - ), - ) - close_notification_sent = models.BooleanField( - default=False, - help_text="True if a close notification has been sent to the user, false otherwise.", - ) - close_notification_sent_at = models.DateTimeField( - blank=True, - null=True, - help_text=dedent( - """ - When was a close notification sent to the user as a result of this registration? - Null if a close notification was not sent. - """ - ), - ) - - METHOD_CHOICES = ( - ("", "Unsent"), - ("LEG", "[Legacy] Sequence of course API requests"), - ("WEB", "Webhook"), - ("SERV", "Course Status Service"), - ("ADM", "Admin Interface"), - ) - notification_sent_by = models.CharField( - max_length=16, - choices=METHOD_CHOICES, - default="", - blank=True, - help_text="What triggered the alert to be sent? Options and meanings: " - + string_dict_to_html(dict(METHOD_CHOICES)), - ) - close_notification_sent_by = models.CharField( - max_length=16, - choices=METHOD_CHOICES, - default="", - blank=True, - help_text="What triggered the close notification to be sent? Options and meanings: " - + string_dict_to_html(dict(METHOD_CHOICES)), - ) - - # track resubscriptions - resubscribed_from = models.OneToOneField( - "Registration", - blank=True, - null=True, - on_delete=models.CASCADE, - related_name="resubscribed_to", - help_text=dedent( - """ - The registration which was resubscribed to, triggering the creation of this registration. - If this registration is the original registration in its resubscribe chain (the tail), - this field is null. The related field, 'resubscribed_to' only exists as an attribute of - a Registration object if the registration has been resubscribed. In that case, - the field resubscribed_to will point to the next element in the resubscribe chain. - If the field does not exist, this registration is the head of its resubscribe chain - (note that an element can be both the head and the tail of its resubscribe chain, - in which case it is the only element in its resubscribe chain). - """ - ), - ) - head_registration = models.ForeignKey( - "Registration", - on_delete=models.CASCADE, - help_text=dedent( - """ - The head of this registration's resubscribe chain (pointing to - itself if this registration is the head of its chain). If you call `.save()` - on a registration without setting its `head_registration` field, the overridden - `Registration.save()` method will automatically set its `head_registration` - to a self-reference. - """ - ), - ) - - @staticmethod - def is_active_filter(): - """ - Returns a dict of filters defining the behavior of the is_active property. - Also used in database filtering of registrations (you cannot filter by a property value); - unpack the filters with two stars. - Example: - Registration.objects.filter(**Registration.is_active_filter()) - """ - return { - "notification_sent": False, - "deleted": False, - "cancelled": False, - } - - @property - def is_active(self): - """ - True if the registration would send an alert when the watched section changes to open, - False otherwise. This is equivalent to - [not(notification_sent or deleted or cancelled) and semester is current]. - """ - return does_object_pass_filter(self, self.is_active_filter()) - - @property - def deactivated_at(self): - """ - The datetime at which this registration was deactivated, if it is not active, - otherwise None. This checks all fields in the is_active definition which have a - corresponding field+"_at" datetime field, such as notification_sent_at, - deleted_at, or cancelled_at, and takes the minimum non-null datetime from these (or - returns null if they are all null). - """ - if self.is_active: - return None - deactivated_dt = None - for field in self.is_active_filter().keys(): - if hasattr(self, field + "_at"): - field_changed_at = getattr(self, field + "_at") - if deactivated_dt is None or ( - field_changed_at is not None and field_changed_at < deactivated_dt - ): - deactivated_dt = field_changed_at - return deactivated_dt - - @staticmethod - def is_waiting_for_close_filter(): - """ - Returns a dict of filters defining the behavior of the is_waiting_for_close property - (defining whether the registration is waiting to send a close notification to the user - once the section closes). - Also used in database filtering of registrations (you cannot filter by a property value); - unpack the filters with two stars. - Example: - Registration.objects.filter(**Registration.is_waiting_for_close_filter()) - """ - return { - "notification_sent": True, - "close_notification": True, - "close_notification_sent": False, - "head_registration__deleted": False, - "head_registration__cancelled": False, - } - - @property - def is_waiting_for_close(self): - """ - True if the registration is waiting to send a close notification to the user - once the section closes. False otherwise. - """ - return does_object_pass_filter(self, self.is_waiting_for_close_filter()) - - @property - def last_notification_sent_at(self): - """ - The last time the user was sent an opening notification for this registration's - section. This property is None (or null in JSON) if no notification has been sent to the - user for this registration's section. - - This is used on the frontend to tell the user a last time an alert was sent for - the SECTION of a certain registration in the manage alerts page. Since the idea of - Registration objects and resubscribe chains is completely abstracted out of the User's - understanding, they expect alerts to work by section (so the "LAST NOTIFIED" - column should tell them the last time they were alerted about that section). - """ - return ( - self.section.registrations.filter(user=self.user, notification_sent_at__isnull=False) - .aggregate(max_notification_sent_at=Max("notification_sent_at")) - .get("max_notification_sent_at", None) - ) - - def __str__(self): - return "%s: %s @ %s" % ( - (str(self.user) if self.user is not None else None) or self.email or self.phone, - str(self.section), - str(self.created_at), - ) - - def validate_phone(self): - """ - This method converts the phone field to the E164 format, unless the number is in a form - unparseable by the [phonenumbers library](https://pypi.org/project/phonenumbers/), - in which case it sets it to None. - """ - try: - phone_number = phonenumbers.parse(self.phone, "US") - self.phone = phonenumbers.format_number( - phone_number, phonenumbers.PhoneNumberFormat.E164 - ) - except phonenumbers.phonenumberutil.NumberParseException: - # if the phone number is unparseable, don't include it. - self.phone = None - - def save(self, load_script=False, *args, **kwargs): - """ - This save method enforces the following invariants on the registration: - - The `phone` field is converted to E164 format, or set to `None` if unparseable. - - If the `user` field is not `None`, but either of the legacy `phone` - or `email` fields are not `None`, the contents of the `phone` / `email` fields - are moved to the `profile` of the `user` object (this was only a concern during the - PCA refresh transition process, when we switched away from using these legacy fields). - - If `head_registration` is `None`, it is set to a self-reference. - - Any other registration whose `head_registration` equals `self.resubscribed_from` - are updated to have `self` as their `head_registration`. - - The `original_created_at` field is set to the `created_at` of the tail of the - resubscribe chain. - - If `load_script` is set to False (indicating this registration is being actively - created by a PCA user, rather than being loaded in from an external data source), - and the registration's semester is the current semester, and the registration - has just been created or deactivated, then the `PcaDemandDistributionEstimate` model - and `current_demand_distribution_estimate` cache are asynchronously updated - (via a celery task) to reflect the resulting section demand change. - """ - from alert.tasks import section_demand_change - from courses.util import get_set_id, is_fk_set - - # ^ imported here to avoid circular imports - - with transaction.atomic(): - self.validate_phone() - if self.user is not None: - if self.email is not None: - user_data, _ = UserProfile.objects.get_or_create(user=self.user) - user_data.email = self.email - user_data.save() - self.user.profile = user_data - self.user.save() - self.email = None - if self.phone is not None: - user_data, _ = UserProfile.objects.get_or_create(user=self.user) - user_data.phone = self.phone - user_data.save() - self.user.profile = user_data - self.user.save() - self.phone = None - - # Find old registration - old_registration = Registration.objects.get(id=self.id) if self.id else None - was_active = bool(old_registration and old_registration.is_active) - - # Set head_registration to self if not set - if not is_fk_set(self, "head_registration"): - self.head_registration_id = self.id or get_set_id(self) - - super().save(*args, **kwargs) - - if self.resubscribed_from_id: - Registration.objects.filter(head_registration_id=self.resubscribed_from_id).update( - head_registration=self - ) - - if self.original_created_at is None: - self.original_created_at = self.get_original_registration().created_at - super().save() - - if ( - not load_script - and self.section.semester == get_current_semester() - and was_active != self.is_active - ): - section = self.section - volume_change = int(self.is_active) - int(was_active) - if volume_change > 0 or section.registration_volume >= 1: - section.registration_volume += volume_change - section.save() - section_demand_change.delay(section.id, self.updated_at) - - def alert(self, forced=False, sent_by="", close_notification=False): - """ - Returns true iff an alert was successfully sent through at least one medium to the user. - """ - - if not forced: - if close_notification and not self.is_waiting_for_close: - return False - if not close_notification and not self.is_active: - return False - - push_notification = ( - self.user and self.user.profile and self.user.profile.push_notifications - ) # specifies whether we should use a push notification instead of a text - text_result = False - if not push_notification and not close_notification: - # never send close notifications by text - text_result = Text(self).send_alert(close_notification=close_notification) - if text_result is None: - logging.debug( - "ERROR OCCURRED WHILE ATTEMPTING TEXT NOTIFICATION FOR " + self.__str__() - ) - email_result = Email(self).send_alert(close_notification=close_notification) - if email_result is None: - logging.debug( - "ERROR OCCURRED WHILE ATTEMPTING EMAIL NOTIFICATION FOR " + self.__str__() - ) - push_notif_result = False - if push_notification: - push_notif_result = PushNotification(self).send_alert( - close_notification=close_notification - ) - if push_notif_result is None: - logging.debug( - "ERROR OCCURRED WHILE ATTEMPTING PUSH NOTIFICATION FOR " + self.__str__() - ) - if not email_result and not text_result and not push_notif_result: - logging.debug("ALERT CALLED BUT NOTIFICATION NOT SENT FOR " + self.__str__()) - return False - if not close_notification: - logging.debug("NOTIFICATION SENT FOR " + self.__str__()) - self.notification_sent = True - self.notification_sent_at = timezone.now() - self.notification_sent_by = sent_by - self.save() - if self.auto_resubscribe: - self.resubscribe() - return True - else: - logging.debug("CLOSE NOTIFICATION SENT FOR " + self.__str__()) - self.close_notification_sent = True - self.close_notification_sent_at = timezone.now() - self.close_notification_sent_by = sent_by - self.save() - return True - - def resubscribe(self): - """ - Resubscribe for notifications. If the head of this registration's resubscribe chain - is active, just return that registration (don't create a new active registration - for no reason). Otherwise, add a new active registration as the new head of the - resubscribe chain. - - Resubscription is idempotent. No matter how many times you call it (without - alert() being called on the registration), only one Registration model will - be created. - :return: Registration object for the resubscription - """ - most_recent_reg = self.get_most_current() - if most_recent_reg.is_active: # if the head of this resub chain is active - return most_recent_reg # don't create duplicate registrations for no reason. - - new_registration = Registration( - user=self.user, - email=self.email, - phone=self.phone, - section=self.section, - auto_resubscribe=self.auto_resubscribe, - close_notification=self.close_notification, - resubscribed_from=most_recent_reg, - original_created_at=self.original_created_at, - ) - new_registration.save() - return new_registration - - def get_resubscribe_group(self): - """ - Return a QuerySet of all the registrations in this registration's resubscribe chain. - """ - return Registration.objects.filter(head_registration=self.head_registration) - - def get_most_current(self): - """ - Returns the head of the resubscribe chain (the most recent registration). - """ - return self.get_resubscribe_group().get(resubscribed_to__isnull=True) - - def get_original_registration(self): - """ - Returns the tail of the resubscribe chain (the original registration). - """ - return self.get_resubscribe_group().get(resubscribed_from__isnull=True) - - def get_most_current_iter(self): - """ - An iterative version of get_most_current that doesn't utilize - head_registration relations. - """ - most_current = self - while hasattr(most_current, "resubscribed_to"): - most_current = most_current.resubscribed_to - return most_current - - def get_original_registration_iter(self): - """ - An iterative version of get_original_registration that doesn't utilize - head_registration relations. - """ - original = self - while original.resubscribed_from: - original = original.resubscribed_from - return original - - -def register_for_course( - course_code, - email_address=None, - phone=None, - source=SOURCE_PCA, - api_key=None, - user=None, - auto_resub=False, - close_notification=False, -): - """ - This method is for the PCA 3rd party API (originally planned to service - Penn Course Notify, until Notify's rejection of PCA's help and eventual downfall - (coincidence? we think not...). It still may be used in the future so we are - keeping the code. - Returns RegStatus., section.full_code, registration - or None for the second two when appropriate - """ - if (not user and not email_address and not phone) or ( - user - and not user.profile.email - and not user.profile.phone - and not user.profile.push_notifications - ): - return RegStatus.NO_CONTACT_INFO, None, None - try: - _, section = get_course_and_section(course_code, get_current_semester()) - except (Course.DoesNotExist, Section.DoesNotExist, ValueError): - return RegStatus.COURSE_NOT_FOUND, None, None - - if user is None: - registration = Registration( - section=section, email=email_address, phone=phone, source=source - ) - registration.validate_phone() - if section.registrations.filter( - email=email_address, phone=registration.phone, **Registration.is_active_filter() - ).exists(): - return RegStatus.OPEN_REG_EXISTS, section.full_code, None - else: - if section.registrations.filter(user=user, **Registration.is_active_filter()).exists(): - return RegStatus.OPEN_REG_EXISTS, section.full_code, None - if close_notification and not user.profile.email and not user.profile.push_notifications: - return RegStatus.TEXT_CLOSE_NOTIFICATION, section.full_code, None - registration = Registration(section=section, user=user, source=source) - registration.auto_resubscribe = auto_resub - registration.close_notification = close_notification - - registration.api_key = api_key - registration.save() - - return RegStatus.SUCCESS, section.full_code, registration - - -class PcaDemandDistributionEstimate(models.Model): - """ - This model tracks/estimates changes in the distribution of - raw PCA demand ratios across all sections in a given semester. - Raw PCA demand (as opposed to "Relative PCA demand", - which maps demand values according to an estimated CDF function to a fixed range of [0,1]) - is defined for any given section as (PCA registration volume)/(section capacity). - Note that capacity is not stored as a field, while volume is. We do not track capacity changes, - and for this reason, the recompute_demand_distribution_estimates function (in the recomputestats - management command script) should be run after each run of the registrarimport script, - in case capacity changes affect historical distributions. - """ - - semester = models.CharField( - max_length=5, - db_index=True, - help_text=dedent( - """ - The semester of this demand distribution estimate (of the form YYYYx where x is - A [for spring], B [summer], or C [fall]), e.g. `2019C` for fall 2019. - """ - ), - ) - - created_at = models.DateTimeField( - default=timezone.now, - db_index=True, - help_text="The datetime at which the distribution estimates were updated.", - ) - - percent_through_add_drop_period = models.FloatField( - default=0, - help_text=( - "The percentage through the add/drop period at which this demand distribution " - "estimate change occurred. This percentage is constrained within the range [0,1]." - ), - ) # This field is maintained in the save() method of alerts.models.AddDropPeriod, - # and the save() method of PcaDemandDistributionEstimate - - in_add_drop_period = models.BooleanField( - default=False, - help_text="Was this demand distribution estimate created during the add/drop period?", - ) # This field is maintained in the save() method of alerts.models.AddDropPeriod, - # and the save() method of PcaDemandDistributionEstimate - - highest_demand_section = models.ForeignKey( - Section, - on_delete=models.CASCADE, - related_name="highest_demand_distribution_estimates", - help_text="A section with the highest raw demand value at this time.", - ) # It is necessary to define related_name explicitly to avoid related name clash - highest_demand_section_volume = models.IntegerField( - help_text="The registration volume of the highest_demand_section at this time." - ) - lowest_demand_section = models.ForeignKey( - Section, - on_delete=models.CASCADE, - related_name="lowest_demand_distribution_estimates", - help_text="A section with the lowest raw demand value at this time.", - ) # It is necessary to define related_name explicitly to avoid related name clash - lowest_demand_section_volume = models.IntegerField( - help_text="The registration volume of the lowest_demand_section at this time." - ) - - csrdv_frac_zero = models.FloatField( - null=True, - blank=True, - help_text=( - "The fraction of closed sections' raw demand values that are 0 (non-positive), " - "expressed as a float in the range [0,1]. Null if there are no closed sections. " - "The abbreviation 'csrdv' stands for 'closed section raw demand values', not to be " - "confused with 'csprdv', which stands for 'closed section positive raw demand values'." - ), - ) - csprdv_lognorm_param_shape = models.FloatField( - null=True, - blank=True, - help_text=( - "The shape parameter of the fitted log-normal distribution on positive " - "raw demand values from closed sections. Null if there are no closed sections that " - "have positive raw demand values. The abbreviation 'csprdv' stands for " - "'closed section positive raw demand values'." - ), - ) - csprdv_lognorm_param_loc = models.FloatField( - null=True, - blank=True, - help_text=( - "The loc parameter of the fitted log-normal distribution on positive " - "raw demand values from closed sections. Null if there are no closed sections that " - "have positive raw demand values. The abbreviation 'csprdv' stands for " - "'closed section positive raw demand values'." - ), - ) - csprdv_lognorm_param_scale = models.FloatField( - null=True, - blank=True, - help_text=( - "The scale parameter of the fitted log-normal distribution on positive " - "raw demand values from closed sections. Null if there are no closed sections that " - "have positive raw demand values. The abbreviation 'csprdv' stands for " - "'closed section positive raw demand values'." - ), - ) - - @property - def highest_raw_demand(self): - if ( - self.highest_demand_section is None - or self.highest_demand_section.capacity is None - or self.highest_demand_section.capacity <= 0 - ): - return None - return float(self.highest_demand_section_volume) / float( - self.highest_demand_section.capacity - ) - - @property - def lowest_raw_demand(self): - if ( - self.lowest_demand_section is None - or self.lowest_demand_section.capacity is None - or self.lowest_demand_section.capacity <= 0 - ): - return None - return float(self.lowest_demand_section_volume) / float(self.lowest_demand_section.capacity) - - def save(self, *args, **kwargs): - """ - This save method first gets the add/drop period object for this - PcaDemandDistributionEstimate object's semester (either by calling the - get_or_create_add_drop_period method or by using a passed-in add_drop_period kwarg, - which can be used for efficiency in bulk operations over PcaDemandDistributionEstimate - objects). - """ - if "add_drop_period" in kwargs: - add_drop_period = kwargs["add_drop_period"] - del kwargs["add_drop_period"] - else: - add_drop_period = get_or_create_add_drop_period(self.semester) - super().save(*args, **kwargs) - created_at = self.created_at - start = add_drop_period.estimated_start - end = add_drop_period.estimated_end - if created_at < start: - self.in_add_drop_period = False - self.percent_through_add_drop_period = 0 - elif created_at > end: - self.in_add_drop_period = False - self.percent_through_add_drop_period = 1 - else: - self.in_add_drop_period = True - self.percent_through_add_drop_period = (created_at - start) / (end - start) - super().save() - - def __str__(self): - return f"PcaDemandDistributionEstimate {self.semester} @ {self.created_at}" - - -def validate_add_drop_semester(semester): - """ - Validate the passed-in string as a fall or spring semester, such as 2020A or 2021C. - """ - if len(semester) != 5: - raise ValidationError( - f"Semester {semester} is invalid; valid semesters contain 5 characters." - ) - if semester[4] not in ["A", "C"]: - raise ValidationError(f"Semester {semester} is invalid; valid semesters end in 'A' or 'C'.") - if not semester[:4].isnumeric(): - raise ValidationError( - f"Semester {semester} is invalid; the 4-letter prefix of a valid semester is numeric." - ) - - -class AddDropPeriod(models.Model): - """ - This model tracks the start and end date of the add drop period corresponding to - a semester (only fall or spring semesters are supported). - """ - - semester = models.CharField( - max_length=5, - db_index=True, - unique=True, - validators=[validate_add_drop_semester], - help_text=dedent( - """ - The semester of this add drop period (of the form YYYYx where x is - A [for spring], or C [fall]), e.g. `2019C` for fall 2019. - """ - ), - ) - start = models.DateTimeField( - null=True, blank=True, help_text="The datetime at which the add drop period started." - ) - end = models.DateTimeField( - null=True, blank=True, help_text="The datetime at which the add drop period ended." - ) - - # estimated_start and estimated_end are filled in automatically in the overridden save method, - # so there is no need to maintain them (they are derivative fields of start and end). - # The only reason why they aren't properties is we sometimes need to use them in database - # filters / aggregations. - estimated_start = models.DateTimeField( - null=True, - blank=True, - help_text=dedent( - """ - This field estimates the start of the add/drop period based on the semester - and historical data, even if the start field hasn't been filled in yet. - It equals the start of the add/drop period for this semester if it is explicitly set, - otherwise the most recent non-null start to an add/drop period, otherwise - (if none exist), estimate as April 5 @ 7:00am ET of the same year (for a fall semester), - or November 16 @ 7:00am ET of the previous year (for a spring semester). - """ - ), - ) - estimated_end = models.DateTimeField( - null=True, - blank=True, - help_text=dedent( - """ - This field estimates the end of the add/drop period based on the semester - and historical data, even if the end field hasn't been filled in yet. - The end of the add/drop period for this semester, if it is explicitly set, otherwise - the most recent non-null end to an add/drop period, otherwise (if none exist), - estimate as October 12 @ 11:59pm ET (for a fall semester), - or February 22 @ 11:59pm ET (for a spring semester), - of the same year. - """ - ), - ) - - def get_percent_through_add_drop(self, dt): - """ - The percentage through this add/drop period at which this dt occured. - This percentage is constrained within the range [0,1]." - """ - start = self.estimated_start - end = self.estimated_end - if dt < start: - return 0 - if dt > end: - return 1 - else: - return float((dt - start) / (end - start)) - - def save(self, *args, **kwargs): - """ - This save method invalidates the add_drop_periods cache, sets the estimated_start and - estimated_end fields, updates the in_add_drop_period and percent_through_add_drop_period - fields of StatusUpdates and PcaDemandDistributionEstimates from this semester, and then - calls the overridden save method. - """ - from alert.tasks import recompute_percent_open_async # avoid circular import - - with transaction.atomic(): - super().save(*args, **kwargs) - cache.delete("add_drop_periods") # invalidate add_drop_periods cache - self.estimated_start = self.estimate_start() - self.estimated_end = self.estimate_end() - period = self.estimated_end - self.estimated_start - for model, sem_filter_key in [ - (StatusUpdate, "section__course__semester"), - (PcaDemandDistributionEstimate, "semester"), - ]: - sem_filter = {sem_filter_key: self.semester} - model.objects.filter(**sem_filter).update( - in_add_drop_period=Case( - When( - Q(created_at__gte=self.estimated_start) - & Q(created_at__lte=self.estimated_end), - then=Value(True), - ), - default=Value(False), - output_field=models.BooleanField(), - ), - percent_through_add_drop_period=Case( - When( - Q(created_at__lte=self.estimated_start), - then=Value(0), - ), - When(Q(created_at__gte=self.estimated_end), then=Value(1)), - default=( - Extract(F("created_at"), "epoch") - - Value(self.estimated_start.timestamp()) - ) - / Value(period.total_seconds()), - output_field=models.FloatField(), - ), - ) - super().save() - recompute_percent_open_async.delay(self.semester) - - def estimate_start(self): - """ - The start of the add/drop period for this semester, if it is explicitly set in the - `start` field, otherwise the `estimated_start` field, if it is already set, - otherwise the most recent non-null start to an add/drop period, otherwise (if none exist), - estimate as April 5 @ 7:00am ET of the same year (for a fall semester), - or November 16 @ 7:00am ET of the previous year (for a spring semester). - """ - if self.start is not None: - return self.start - if self.estimated_start is not None: - return self.estimated_start - last_start = ( - AddDropPeriod.objects.filter( - start__isnull=False, semester__endswith=str(self.semester)[4] - ) - .order_by("-semester") - .first() - ) - if str(self.semester)[4] == "C": # fall semester - s_year = int(str(self.semester)[:4]) - s_month = 4 - s_day = 5 - else: # spring semester - s_year = int(str(self.semester)[:4]) - 1 - s_month = 11 - s_day = 16 - if last_start is None: - tz = gettz(TIME_ZONE) - return make_aware( - datetime.strptime(f"{s_year}-{s_month}-{s_day} 07:00", "%Y-%m-%d %H:%M"), - timezone=tz, - ) - return last_start.start.replace(year=s_year) - - def estimate_end(self): - """ - The end of the add/drop period for this semester, if it is explicitly set in the - `end` field, otherwise the `estimated_end` field, if it is already set, otherwise - the most recent non-null end to an add/drop period, otherwise (if none exist), - estimate as October 12 @ 11:59pm ET (for a fall semester), - or February 22 @ 11:59pm ET (for a spring semester), - of the same year. - """ - if self.end is not None: - return self.end - if self.estimated_end is not None: - return self.estimated_end - last_end = ( - AddDropPeriod.objects.filter( - end__isnull=False, semester__endswith=str(self.semester)[4] - ) - .order_by("-semester") - .first() - ) - e_year = int(str(self.semester)[:4]) - if last_end is None: - if str(self.semester)[4] == "C": # fall semester - e_month = 10 - e_day = 12 - else: # spring semester - e_month = 2 - e_day = 22 - tz = gettz(TIME_ZONE) - return make_aware( - datetime.strptime(f"{e_year}-{e_month}-{e_day} 23:59", "%Y-%m-%d %H:%M"), - timezone=tz, - ) - return last_end.end.replace(year=e_year) - - def __str__(self): - return f"AddDropPeriod {self.semester}" +import logging +from datetime import datetime +from enum import Enum, auto +from textwrap import dedent + +import phonenumbers # library for parsing and formatting phone numbers. +from dateutil.tz import gettz +from django.contrib.auth import get_user_model +from django.core.cache import cache +from django.core.exceptions import ValidationError +from django.db import models, transaction +from django.db.models import Case, F, Max, Q, Value, When +from django.db.models.functions import Extract +from django.utils import timezone +from django.utils.timezone import make_aware + +from alert.alerts import Email, PushNotification, Text +from courses.models import Course, Section, StatusUpdate, UserProfile, string_dict_to_html +from courses.util import ( + does_object_pass_filter, + get_course_and_section, + get_current_semester, + get_or_create_add_drop_period, +) +from PennCourses.settings.base import TIME_ZONE + + +class RegStatus(Enum): + SUCCESS = auto() + OPEN_REG_EXISTS = auto() + COURSE_OPEN = auto() + COURSE_NOT_FOUND = auto() + NO_CONTACT_INFO = auto() + TEXT_CLOSE_NOTIFICATION = auto() + + +SOURCE_PCA = "PCA" +SOURCE_API = "API" + + +class Registration(models.Model): + """ + A registration for sending an alert to the user upon the opening of a course + during open registration. + + In addition to sending alerts for when a class opens up, we have also implemented + an optionally user-enabled feature called "close notifications". + If a registration has close_notification enabled, it will act normally when the watched + section opens up for the first time (triggering an alert to be sent). However, once the + watched section closes, it will send another alert (the email alert will be in the same + chain as the original alert) to let the user know that the section has closed. Thus, + if a user sees a PCA notification on their phone during a class for instance, they won't + need to frantically open up their laptop and check PennInTouch to see if the class is still + open just to find that it is already closed. To avoid spam and wasted money, we DO NOT + send any close notifications over text. So the user must have an email saved or use + push notifications in order to be able to enable close notifications on a registration. + Note that the close_notification setting carries over across resubscriptions, but can be + disabled at any time using a PUT request to /api/alert/registrations/{id}/. + + An important concept for this Model is that of the "resubscribe chain". A resubscribe chain + is a chain of Registration objects where the tail of the chain was the original Registration + created through a POST request to /api/alert/registrations/ specifying a new section (one + that the user wasn't already registered to receive alerts for). Each next element in the chain + is a Registration created by resubscribing to the previous Registration (once that + Registration had triggered an alert to be sent), either manually by the user or + automatically if auto_resubscribe was set to true. Then, it follows that the head of the + resubscribe chain is the most relevant Registration for that user/section combo; if any + of the registrations in the chain are active, it would be the head. And if the head + is active, none of the other registrations in the chain are active. That said, a non-head + Registration may be waiting to send a close notification (if the watched section hasn't closed + yet). + + Note that a Registration will send an alert when the section it is watching opens, if and + only if it hasn't sent one before, it isn't cancelled, and it isn't deleted. If a + registration would send an alert when the section it is watching opens, we call it + "active". This rule is encoded in the is_active property. You can also filter + for active registrations using the static is_active_filter() method which returns a + dictionary that you can unpack into the kwargs of the filter method + (you cannot filter on a property). A Registration will send a close notification + when the section it is watching closes, if and only if it has already sent an open alert, + the user has enabled close notifications for that section, the Registration hasn't sent a + close_notification before, and its head registration isn't cancelled or deleted. + If a registration would send a close notification when the section it is watching closes, + we call it "waiting for close". This rule is encoded in the is_waiting_for_close property. + You can also filter for such registrations using the static is_waiting_for_close_filter() + method which returns a dictionary that you can unpack into the kwargs of the filter method + (you cannot filter on a property). + + After the PCA backend refactor in 2019C/2020A, all PCA Registrations have a user field + pointing to the user's Penn Labs Accounts User object. In other words, we implemented a + user/accounts system for PCA which required that + people log in to use the website. Thus, the contact information used in PCA alerts + is taken from the user's User Profile. You can edit this contact information using + a PUT or PATCH request to /accounts/me/. If push_notifications is set to True, then + a push notification will be sent when the user is alerted, but no text notifications will + be sent (as that would be a redundant alert to the user's phone). Otherwise, an email + or a text alert is sent if and only if contact information for that medium exists in + the user's profile. + + Alerts are triggered by webhook requests from the UPenn OpenData API + (https://esb.isc-seo.upenn.edu/8091/documentation/#coursestatuswebhookservice), and accepted + by alert/views.py/accept_webhook. Then if the SEND_FROM_WEBHOOK Option is set to True, + the semester of the webhook request equals courses.util.get_current_semester(), the new + course status is either "O" (meaning open) or "C" (meaning closed), and the current datetime + is less than the current AddDropPeriod's end field (if not null), then the method calls + alert/views.py/alert_for_course for the relevant course. That method then calls + alert/tasks.py/send_course_alerts asynchronously using the Celery delay function. + This allows for alerts to be queued without holding up the response. The send_course_alerts + function then loops through all registrations for the given section and calls + alert/tasks.py/send_alert asynchronously (again using Celery delay) which then calls the + Registration's alert method. In each Registration's alert method, a subclass of the + alert/alerts.py/Alert class is instantiated for each of the appropriate notification + channels (text, email, and/or push notification based on the User's settings). The + notification is then sent with the send_alert method on each Alert object. The send_alert + method calls other functions in alert/alerts.py to actually send out the alerts. + """ + + created_at = models.DateTimeField( + auto_now_add=True, help_text="The datetime at which this registration was created." + ) + original_created_at = models.DateTimeField( + null=True, + help_text=dedent( + """ + The datetime at which the tail of the resubscribe chain to which this registration belongs + was created. In other words, the datetime at which the user created the original + registration for this section, before resubscribing some number of times + (0 or more) to reach this registration. + """ + ), + ) + updated_at = models.DateTimeField( + auto_now=True, help_text="The datetime at which this registration was last modified." + ) + + SOURCE_CHOICES = ( + ("PCA", "Penn Course Alert"), + ("API", "3rd Party Integration"), + ("PCP", "Penn Course Plan"), + ("PCR", "Penn Course Review"), + ("PM", "Penn Mobile"), + ("SCRIPT_PCN", "The loadregistrations_pcn shell command"), + ("SCRIPT_PCA", "The loadregistrations_pca shell command"), + ) + + source = models.CharField( + max_length=16, + choices=SOURCE_CHOICES, + help_text="Where did the registration come from? Options and meanings: " + + string_dict_to_html(dict(SOURCE_CHOICES)), + ) + + api_key = models.ForeignKey( + "courses.APIKey", + blank=True, + null=True, + on_delete=models.CASCADE, + help_text=dedent( + """ + An API key for 3rd party alternatives to PCA. This is currently unused now that + Penn Course Notify has fallen, but may be used in the future. + """ + ), + ) + + user = models.ForeignKey( + get_user_model(), + on_delete=models.CASCADE, + blank=True, + null=True, + help_text=dedent( + """ + The User that registered for this alert. This object will be none if registration occurred + before the PCA refresh of Spring 2020 (before the refresh user's were only identified by + their email and phone numbers, which are legacy fields in this model now). This object + might also be none if registration occurred through a 3rd part API such as Penn Course + Notify (now that Notify has fallen this is an unlikely event). + """ + ), + ) + email = models.EmailField( + blank=True, + null=True, + help_text=dedent( + """ + A legacy field that stored the user's email before the Spring 2020 PCA refresh. Currently, + for all new registrations the email and phone fields will be None and contact information + can be found in the User's UserProfile object (related_name is profile, so you can + access the profile from the User object with `.user.profile`). + """ + ), + ) + phone = models.CharField( + blank=True, + null=True, + max_length=100, + help_text=dedent( + """ + A legacy field that stored the user's phone before the Spring 2020 PCA refresh. Currently, + for all new registrations the email and phone fields will be None and contact information + can be found in the User's UserProfile object (related_name is profile, so you can + access the profile from the User object with `.user.profile`). + """ + ), + ) + section = models.ForeignKey( + Section, + on_delete=models.CASCADE, + related_name="registrations", + help_text="The section that the user registered to be notified about.", + ) + cancelled = models.BooleanField( + default=False, + help_text=dedent( + """ + Defaults to False, changed to True if the registration has been cancelled. A cancelled + registration will not trigger any alerts to be sent even if the relevant section opens. + A cancelled section can be resubscribed to (unlike deleted alerts), and will show up + on the manage alerts page on the frontend (also unlike deleted alerts). Note that once + a registration is cancelled, it cannot be uncancelled (resubscribing creates a new + registration which is accessible via the resubscribed_to field, related name of + resubscribed_from). + """ + ), + ) + cancelled_at = models.DateTimeField( + blank=True, + null=True, + help_text="When was the registration cancelled? Null if it hasn't been cancelled.", + ) + deleted = models.BooleanField( + default=False, + help_text=dedent( + """ + Defaults to False, changed to True if the registration has been deleted. A deleted + registration will not trigger any alerts to be sent even if the relevant section opens. + A deleted section cannot be resubscribed to or undeleted, and will not show up on the + manage alerts page on the frontend. It is kept in the database for analytics purposes, + even though it serves no immediate functional purpose for its original user. + """ + ), + ) + deleted_at = models.DateTimeField( + blank=True, + null=True, + help_text="When was the registration deleted? Null if it hasn't been deleted.", + ) + auto_resubscribe = models.BooleanField( + default=False, + help_text=dedent( + """ + Defaults to False, in which case a registration will not be automatically resubscribed + after it triggers an alert to be sent (but the user can still resubscribe to a sent alert, + as long as it is not deleted). If set to True, the registration will be automatically + resubscribed to once it triggers an alert to be sent (this is useful in the case of + volatile sections which are opening and closing frequently, often before the user has + time to register). + """ + ), + ) + notification_sent = models.BooleanField( + default=False, help_text="True if an alert has been sent to the user, false otherwise." + ) + notification_sent_at = models.DateTimeField( + blank=True, + null=True, + help_text=dedent( + """ + When was an alert sent to the user as a result of this registration? + Null if an alert was not sent. + """ + ), + ) + close_notification = models.BooleanField( + default=False, + help_text=dedent( + """Defaults to false. If set to true, the user will receive + a close notification (an alert when the section closes after an + alert was sent for it opening). + """ + ), + ) + close_notification_sent = models.BooleanField( + default=False, + help_text="True if a close notification has been sent to the user, false otherwise.", + ) + close_notification_sent_at = models.DateTimeField( + blank=True, + null=True, + help_text=dedent( + """ + When was a close notification sent to the user as a result of this registration? + Null if a close notification was not sent. + """ + ), + ) + + METHOD_CHOICES = ( + ("", "Unsent"), + ("LEG", "[Legacy] Sequence of course API requests"), + ("WEB", "Webhook"), + ("SERV", "Course Status Service"), + ("ADM", "Admin Interface"), + ) + notification_sent_by = models.CharField( + max_length=16, + choices=METHOD_CHOICES, + default="", + blank=True, + help_text="What triggered the alert to be sent? Options and meanings: " + + string_dict_to_html(dict(METHOD_CHOICES)), + ) + close_notification_sent_by = models.CharField( + max_length=16, + choices=METHOD_CHOICES, + default="", + blank=True, + help_text="What triggered the close notification to be sent? Options and meanings: " + + string_dict_to_html(dict(METHOD_CHOICES)), + ) + + # track resubscriptions + resubscribed_from = models.OneToOneField( + "Registration", + blank=True, + null=True, + on_delete=models.CASCADE, + related_name="resubscribed_to", + help_text=dedent( + """ + The registration which was resubscribed to, triggering the creation of this registration. + If this registration is the original registration in its resubscribe chain (the tail), + this field is null. The related field, 'resubscribed_to' only exists as an attribute of + a Registration object if the registration has been resubscribed. In that case, + the field resubscribed_to will point to the next element in the resubscribe chain. + If the field does not exist, this registration is the head of its resubscribe chain + (note that an element can be both the head and the tail of its resubscribe chain, + in which case it is the only element in its resubscribe chain). + """ + ), + ) + head_registration = models.ForeignKey( + "Registration", + on_delete=models.CASCADE, + help_text=dedent( + """ + The head of this registration's resubscribe chain (pointing to + itself if this registration is the head of its chain). If you call `.save()` + on a registration without setting its `head_registration` field, the overridden + `Registration.save()` method will automatically set its `head_registration` + to a self-reference. + """ + ), + ) + + @staticmethod + def is_active_filter(): + """ + Returns a dict of filters defining the behavior of the is_active property. + Also used in database filtering of registrations (you cannot filter by a property value); + unpack the filters with two stars. + Example: + Registration.objects.filter(**Registration.is_active_filter()) + """ + return { + "notification_sent": False, + "deleted": False, + "cancelled": False, + } + + @property + def is_active(self): + """ + True if the registration would send an alert when the watched section changes to open, + False otherwise. This is equivalent to + [not(notification_sent or deleted or cancelled) and semester is current]. + """ + return does_object_pass_filter(self, self.is_active_filter()) + + @property + def deactivated_at(self): + """ + The datetime at which this registration was deactivated, if it is not active, + otherwise None. This checks all fields in the is_active definition which have a + corresponding field+"_at" datetime field, such as notification_sent_at, + deleted_at, or cancelled_at, and takes the minimum non-null datetime from these (or + returns null if they are all null). + """ + if self.is_active: + return None + deactivated_dt = None + for field in self.is_active_filter().keys(): + if hasattr(self, field + "_at"): + field_changed_at = getattr(self, field + "_at") + if deactivated_dt is None or ( + field_changed_at is not None and field_changed_at < deactivated_dt + ): + deactivated_dt = field_changed_at + return deactivated_dt + + @staticmethod + def is_waiting_for_close_filter(): + """ + Returns a dict of filters defining the behavior of the is_waiting_for_close property + (defining whether the registration is waiting to send a close notification to the user + once the section closes). + Also used in database filtering of registrations (you cannot filter by a property value); + unpack the filters with two stars. + Example: + Registration.objects.filter(**Registration.is_waiting_for_close_filter()) + """ + return { + "notification_sent": True, + "close_notification": True, + "close_notification_sent": False, + "head_registration__deleted": False, + "head_registration__cancelled": False, + } + + @property + def is_waiting_for_close(self): + """ + True if the registration is waiting to send a close notification to the user + once the section closes. False otherwise. + """ + return does_object_pass_filter(self, self.is_waiting_for_close_filter()) + + @property + def last_notification_sent_at(self): + """ + The last time the user was sent an opening notification for this registration's + section. This property is None (or null in JSON) if no notification has been sent to the + user for this registration's section. + + This is used on the frontend to tell the user a last time an alert was sent for + the SECTION of a certain registration in the manage alerts page. Since the idea of + Registration objects and resubscribe chains is completely abstracted out of the User's + understanding, they expect alerts to work by section (so the "LAST NOTIFIED" + column should tell them the last time they were alerted about that section). + """ + return ( + self.section.registrations.filter(user=self.user, notification_sent_at__isnull=False) + .aggregate(max_notification_sent_at=Max("notification_sent_at")) + .get("max_notification_sent_at", None) + ) + + def __str__(self): + return "%s: %s @ %s" % ( + (str(self.user) if self.user is not None else None) or self.email or self.phone, + str(self.section), + str(self.created_at), + ) + + def validate_phone(self): + """ + This method converts the phone field to the E164 format, unless the number is in a form + unparseable by the [phonenumbers library](https://pypi.org/project/phonenumbers/), + in which case it sets it to None. + """ + try: + phone_number = phonenumbers.parse(self.phone, "US") + self.phone = phonenumbers.format_number( + phone_number, phonenumbers.PhoneNumberFormat.E164 + ) + except phonenumbers.phonenumberutil.NumberParseException: + # if the phone number is unparseable, don't include it. + self.phone = None + + def save(self, load_script=False, *args, **kwargs): + """ + This save method enforces the following invariants on the registration: + - The `phone` field is converted to E164 format, or set to `None` if unparseable. + - If the `user` field is not `None`, but either of the legacy `phone` + or `email` fields are not `None`, the contents of the `phone` / `email` fields + are moved to the `profile` of the `user` object (this was only a concern during the + PCA refresh transition process, when we switched away from using these legacy fields). + - If `head_registration` is `None`, it is set to a self-reference. + - Any other registration whose `head_registration` equals `self.resubscribed_from` + are updated to have `self` as their `head_registration`. + - The `original_created_at` field is set to the `created_at` of the tail of the + resubscribe chain. + + If `load_script` is set to False (indicating this registration is being actively + created by a PCA user, rather than being loaded in from an external data source), + and the registration's semester is the current semester, and the registration + has just been created or deactivated, then the `PcaDemandDistributionEstimate` model + and `current_demand_distribution_estimate` cache are asynchronously updated + (via a celery task) to reflect the resulting section demand change. + """ + from alert.tasks import section_demand_change + from courses.util import get_set_id, is_fk_set + + # ^ imported here to avoid circular imports + + with transaction.atomic(): + self.validate_phone() + if self.user is not None: + if self.email is not None: + user_data, _ = UserProfile.objects.get_or_create(user=self.user) + user_data.email = self.email + user_data.save() + self.user.profile = user_data + self.user.save() + self.email = None + if self.phone is not None: + user_data, _ = UserProfile.objects.get_or_create(user=self.user) + user_data.phone = self.phone + user_data.save() + self.user.profile = user_data + self.user.save() + self.phone = None + + # Find old registration + old_registration = Registration.objects.get(id=self.id) if self.id else None + was_active = bool(old_registration and old_registration.is_active) + + # Set head_registration to self if not set + if not is_fk_set(self, "head_registration"): + self.head_registration_id = self.id or get_set_id(self) + + super().save(*args, **kwargs) + + if self.resubscribed_from_id: + Registration.objects.filter(head_registration_id=self.resubscribed_from_id).update( + head_registration=self + ) + + if self.original_created_at is None: + self.original_created_at = self.get_original_registration().created_at + super().save() + + if ( + not load_script + and self.section.semester == get_current_semester() + and was_active != self.is_active + ): + section = self.section + volume_change = int(self.is_active) - int(was_active) + if volume_change > 0 or section.registration_volume >= 1: + section.registration_volume += volume_change + section.save() + section_demand_change.delay(section.id, self.updated_at) + + def alert(self, forced=False, sent_by="", close_notification=False): + """ + Returns true iff an alert was successfully sent through at least one medium to the user. + """ + + if not forced: + if close_notification and not self.is_waiting_for_close: + return False + if not close_notification and not self.is_active: + return False + + push_notification = ( + self.user and self.user.profile and self.user.profile.push_notifications + ) # specifies whether we should use a push notification instead of a text + text_result = False + if not push_notification and not close_notification: + # never send close notifications by text + text_result = Text(self).send_alert(close_notification=close_notification) + if text_result is None: + logging.debug( + "ERROR OCCURRED WHILE ATTEMPTING TEXT NOTIFICATION FOR " + self.__str__() + ) + email_result = Email(self).send_alert(close_notification=close_notification) + if email_result is None: + logging.debug( + "ERROR OCCURRED WHILE ATTEMPTING EMAIL NOTIFICATION FOR " + self.__str__() + ) + push_notif_result = False + if push_notification: + push_notif_result = PushNotification(self).send_alert( + close_notification=close_notification + ) + if push_notif_result is None: + logging.debug( + "ERROR OCCURRED WHILE ATTEMPTING PUSH NOTIFICATION FOR " + self.__str__() + ) + if not email_result and not text_result and not push_notif_result: + logging.debug("ALERT CALLED BUT NOTIFICATION NOT SENT FOR " + self.__str__()) + return False + if not close_notification: + logging.debug("NOTIFICATION SENT FOR " + self.__str__()) + self.notification_sent = True + self.notification_sent_at = timezone.now() + self.notification_sent_by = sent_by + self.save() + if self.auto_resubscribe: + self.resubscribe() + return True + else: + logging.debug("CLOSE NOTIFICATION SENT FOR " + self.__str__()) + self.close_notification_sent = True + self.close_notification_sent_at = timezone.now() + self.close_notification_sent_by = sent_by + self.save() + return True + + def resubscribe(self): + """ + Resubscribe for notifications. If the head of this registration's resubscribe chain + is active, just return that registration (don't create a new active registration + for no reason). Otherwise, add a new active registration as the new head of the + resubscribe chain. + + Resubscription is idempotent. No matter how many times you call it (without + alert() being called on the registration), only one Registration model will + be created. + :return: Registration object for the resubscription + """ + most_recent_reg = self.get_most_current() + if most_recent_reg.is_active: # if the head of this resub chain is active + return most_recent_reg # don't create duplicate registrations for no reason. + + new_registration = Registration( + user=self.user, + email=self.email, + phone=self.phone, + section=self.section, + auto_resubscribe=self.auto_resubscribe, + close_notification=self.close_notification, + resubscribed_from=most_recent_reg, + original_created_at=self.original_created_at, + ) + new_registration.save() + return new_registration + + def get_resubscribe_group(self): + """ + Return a QuerySet of all the registrations in this registration's resubscribe chain. + """ + return Registration.objects.filter(head_registration=self.head_registration) + + def get_most_current(self): + """ + Returns the head of the resubscribe chain (the most recent registration). + """ + return self.get_resubscribe_group().get(resubscribed_to__isnull=True) + + def get_original_registration(self): + """ + Returns the tail of the resubscribe chain (the original registration). + """ + return self.get_resubscribe_group().get(resubscribed_from__isnull=True) + + def get_most_current_iter(self): + """ + An iterative version of get_most_current that doesn't utilize + head_registration relations. + """ + most_current = self + while hasattr(most_current, "resubscribed_to"): + most_current = most_current.resubscribed_to + return most_current + + def get_original_registration_iter(self): + """ + An iterative version of get_original_registration that doesn't utilize + head_registration relations. + """ + original = self + while original.resubscribed_from: + original = original.resubscribed_from + return original + + +def register_for_course( + course_code, + email_address=None, + phone=None, + source=SOURCE_PCA, + api_key=None, + user=None, + auto_resub=False, + close_notification=False, +): + """ + This method is for the PCA 3rd party API (originally planned to service + Penn Course Notify, until Notify's rejection of PCA's help and eventual downfall + (coincidence? we think not...). It still may be used in the future so we are + keeping the code. + Returns RegStatus., section.full_code, registration + or None for the second two when appropriate + """ + if (not user and not email_address and not phone) or ( + user + and not user.profile.email + and not user.profile.phone + and not user.profile.push_notifications + ): + return RegStatus.NO_CONTACT_INFO, None, None + try: + _, section = get_course_and_section(course_code, get_current_semester()) + except (Course.DoesNotExist, Section.DoesNotExist, ValueError): + return RegStatus.COURSE_NOT_FOUND, None, None + + if user is None: + registration = Registration( + section=section, email=email_address, phone=phone, source=source + ) + registration.validate_phone() + if section.registrations.filter( + email=email_address, phone=registration.phone, **Registration.is_active_filter() + ).exists(): + return RegStatus.OPEN_REG_EXISTS, section.full_code, None + else: + if section.registrations.filter(user=user, **Registration.is_active_filter()).exists(): + return RegStatus.OPEN_REG_EXISTS, section.full_code, None + if close_notification and not user.profile.email and not user.profile.push_notifications: + return RegStatus.TEXT_CLOSE_NOTIFICATION, section.full_code, None + registration = Registration(section=section, user=user, source=source) + registration.auto_resubscribe = auto_resub + registration.close_notification = close_notification + + registration.api_key = api_key + registration.save() + + return RegStatus.SUCCESS, section.full_code, registration + + +class PcaDemandDistributionEstimate(models.Model): + """ + This model tracks/estimates changes in the distribution of + raw PCA demand ratios across all sections in a given semester. + Raw PCA demand (as opposed to "Relative PCA demand", + which maps demand values according to an estimated CDF function to a fixed range of [0,1]) + is defined for any given section as (PCA registration volume)/(section capacity). + Note that capacity is not stored as a field, while volume is. We do not track capacity changes, + and for this reason, the recompute_demand_distribution_estimates function (in the recomputestats + management command script) should be run after each run of the registrarimport script, + in case capacity changes affect historical distributions. + """ + + semester = models.CharField( + max_length=5, + db_index=True, + help_text=dedent( + """ + The semester of this demand distribution estimate (of the form YYYYx where x is + A [for spring], B [summer], or C [fall]), e.g. `2019C` for fall 2019. + """ + ), + ) + + created_at = models.DateTimeField( + default=timezone.now, + db_index=True, + help_text="The datetime at which the distribution estimates were updated.", + ) + + percent_through_add_drop_period = models.FloatField( + default=0, + help_text=( + "The percentage through the add/drop period at which this demand distribution " + "estimate change occurred. This percentage is constrained within the range [0,1]." + ), + ) # This field is maintained in the save() method of alerts.models.AddDropPeriod, + # and the save() method of PcaDemandDistributionEstimate + + in_add_drop_period = models.BooleanField( + default=False, + help_text="Was this demand distribution estimate created during the add/drop period?", + ) # This field is maintained in the save() method of alerts.models.AddDropPeriod, + # and the save() method of PcaDemandDistributionEstimate + + highest_demand_section = models.ForeignKey( + Section, + on_delete=models.CASCADE, + related_name="highest_demand_distribution_estimates", + help_text="A section with the highest raw demand value at this time.", + ) # It is necessary to define related_name explicitly to avoid related name clash + highest_demand_section_volume = models.IntegerField( + help_text="The registration volume of the highest_demand_section at this time." + ) + lowest_demand_section = models.ForeignKey( + Section, + on_delete=models.CASCADE, + related_name="lowest_demand_distribution_estimates", + help_text="A section with the lowest raw demand value at this time.", + ) # It is necessary to define related_name explicitly to avoid related name clash + lowest_demand_section_volume = models.IntegerField( + help_text="The registration volume of the lowest_demand_section at this time." + ) + + csrdv_frac_zero = models.FloatField( + null=True, + blank=True, + help_text=( + "The fraction of closed sections' raw demand values that are 0 (non-positive), " + "expressed as a float in the range [0,1]. Null if there are no closed sections. " + "The abbreviation 'csrdv' stands for 'closed section raw demand values', not to be " + "confused with 'csprdv', which stands for 'closed section positive raw demand values'." + ), + ) + csprdv_lognorm_param_shape = models.FloatField( + null=True, + blank=True, + help_text=( + "The shape parameter of the fitted log-normal distribution on positive " + "raw demand values from closed sections. Null if there are no closed sections that " + "have positive raw demand values. The abbreviation 'csprdv' stands for " + "'closed section positive raw demand values'." + ), + ) + csprdv_lognorm_param_loc = models.FloatField( + null=True, + blank=True, + help_text=( + "The loc parameter of the fitted log-normal distribution on positive " + "raw demand values from closed sections. Null if there are no closed sections that " + "have positive raw demand values. The abbreviation 'csprdv' stands for " + "'closed section positive raw demand values'." + ), + ) + csprdv_lognorm_param_scale = models.FloatField( + null=True, + blank=True, + help_text=( + "The scale parameter of the fitted log-normal distribution on positive " + "raw demand values from closed sections. Null if there are no closed sections that " + "have positive raw demand values. The abbreviation 'csprdv' stands for " + "'closed section positive raw demand values'." + ), + ) + + @property + def highest_raw_demand(self): + if ( + self.highest_demand_section is None + or self.highest_demand_section.capacity is None + or self.highest_demand_section.capacity <= 0 + ): + return None + return float(self.highest_demand_section_volume) / float( + self.highest_demand_section.capacity + ) + + @property + def lowest_raw_demand(self): + if ( + self.lowest_demand_section is None + or self.lowest_demand_section.capacity is None + or self.lowest_demand_section.capacity <= 0 + ): + return None + return float(self.lowest_demand_section_volume) / float(self.lowest_demand_section.capacity) + + def save(self, *args, **kwargs): + """ + This save method first gets the add/drop period object for this + PcaDemandDistributionEstimate object's semester (either by calling the + get_or_create_add_drop_period method or by using a passed-in add_drop_period kwarg, + which can be used for efficiency in bulk operations over PcaDemandDistributionEstimate + objects). + """ + if "add_drop_period" in kwargs: + add_drop_period = kwargs["add_drop_period"] + del kwargs["add_drop_period"] + else: + add_drop_period = get_or_create_add_drop_period(self.semester) + super().save(*args, **kwargs) + created_at = self.created_at + start = add_drop_period.estimated_start + end = add_drop_period.estimated_end + if created_at < start: + self.in_add_drop_period = False + self.percent_through_add_drop_period = 0 + elif created_at > end: + self.in_add_drop_period = False + self.percent_through_add_drop_period = 1 + else: + self.in_add_drop_period = True + self.percent_through_add_drop_period = (created_at - start) / (end - start) + super().save() + + def __str__(self): + return f"PcaDemandDistributionEstimate {self.semester} @ {self.created_at}" + + +def validate_add_drop_semester(semester): + """ + Validate the passed-in string as a fall or spring semester, such as 2020A or 2021C. + """ + if len(semester) != 5: + raise ValidationError( + f"Semester {semester} is invalid; valid semesters contain 5 characters." + ) + if semester[4] not in ["A", "C"]: + raise ValidationError(f"Semester {semester} is invalid; valid semesters end in 'A' or 'C'.") + if not semester[:4].isnumeric(): + raise ValidationError( + f"Semester {semester} is invalid; the 4-letter prefix of a valid semester is numeric." + ) + + +class AddDropPeriod(models.Model): + """ + This model tracks the start and end date of the add drop period corresponding to + a semester (only fall or spring semesters are supported). + """ + + semester = models.CharField( + max_length=5, + db_index=True, + unique=True, + validators=[validate_add_drop_semester], + help_text=dedent( + """ + The semester of this add drop period (of the form YYYYx where x is + A [for spring], or C [fall]), e.g. `2019C` for fall 2019. + """ + ), + ) + start = models.DateTimeField( + null=True, blank=True, help_text="The datetime at which the add drop period started." + ) + end = models.DateTimeField( + null=True, blank=True, help_text="The datetime at which the add drop period ended." + ) + + # estimated_start and estimated_end are filled in automatically in the overridden save method, + # so there is no need to maintain them (they are derivative fields of start and end). + # The only reason why they aren't properties is we sometimes need to use them in database + # filters / aggregations. + estimated_start = models.DateTimeField( + null=True, + blank=True, + help_text=dedent( + """ + This field estimates the start of the add/drop period based on the semester + and historical data, even if the start field hasn't been filled in yet. + It equals the start of the add/drop period for this semester if it is explicitly set, + otherwise the most recent non-null start to an add/drop period, otherwise + (if none exist), estimate as April 5 @ 7:00am ET of the same year (for a fall semester), + or November 16 @ 7:00am ET of the previous year (for a spring semester). + """ + ), + ) + estimated_end = models.DateTimeField( + null=True, + blank=True, + help_text=dedent( + """ + This field estimates the end of the add/drop period based on the semester + and historical data, even if the end field hasn't been filled in yet. + The end of the add/drop period for this semester, if it is explicitly set, otherwise + the most recent non-null end to an add/drop period, otherwise (if none exist), + estimate as October 12 @ 11:59pm ET (for a fall semester), + or February 22 @ 11:59pm ET (for a spring semester), + of the same year. + """ + ), + ) + + def get_percent_through_add_drop(self, dt): + """ + The percentage through this add/drop period at which this dt occured. + This percentage is constrained within the range [0,1]." + """ + start = self.estimated_start + end = self.estimated_end + if dt < start: + return 0 + if dt > end: + return 1 + else: + return float((dt - start) / (end - start)) + + def save(self, *args, **kwargs): + """ + This save method invalidates the add_drop_periods cache, sets the estimated_start and + estimated_end fields, updates the in_add_drop_period and percent_through_add_drop_period + fields of StatusUpdates and PcaDemandDistributionEstimates from this semester, and then + calls the overridden save method. + """ + from alert.tasks import recompute_percent_open_async # avoid circular import + + with transaction.atomic(): + super().save(*args, **kwargs) + cache.delete("add_drop_periods") # invalidate add_drop_periods cache + self.estimated_start = self.estimate_start() + self.estimated_end = self.estimate_end() + period = self.estimated_end - self.estimated_start + for model, sem_filter_key in [ + (StatusUpdate, "section__course__semester"), + (PcaDemandDistributionEstimate, "semester"), + ]: + sem_filter = {sem_filter_key: self.semester} + model.objects.filter(**sem_filter).update( + in_add_drop_period=Case( + When( + Q(created_at__gte=self.estimated_start) + & Q(created_at__lte=self.estimated_end), + then=Value(True), + ), + default=Value(False), + output_field=models.BooleanField(), + ), + percent_through_add_drop_period=Case( + When( + Q(created_at__lte=self.estimated_start), + then=Value(0), + ), + When(Q(created_at__gte=self.estimated_end), then=Value(1)), + default=( + Extract(F("created_at"), "epoch") + - Value(self.estimated_start.timestamp()) + ) + / Value(period.total_seconds()), + output_field=models.FloatField(), + ), + ) + super().save() + recompute_percent_open_async.delay(self.semester) + + def estimate_start(self): + """ + The start of the add/drop period for this semester, if it is explicitly set in the + `start` field, otherwise the `estimated_start` field, if it is already set, + otherwise the most recent non-null start to an add/drop period, otherwise (if none exist), + estimate as April 5 @ 7:00am ET of the same year (for a fall semester), + or November 16 @ 7:00am ET of the previous year (for a spring semester). + """ + if self.start is not None: + return self.start + if self.estimated_start is not None: + return self.estimated_start + last_start = ( + AddDropPeriod.objects.filter( + start__isnull=False, semester__endswith=str(self.semester)[4] + ) + .order_by("-semester") + .first() + ) + if str(self.semester)[4] == "C": # fall semester + s_year = int(str(self.semester)[:4]) + s_month = 4 + s_day = 5 + else: # spring semester + s_year = int(str(self.semester)[:4]) - 1 + s_month = 11 + s_day = 16 + if last_start is None: + tz = gettz(TIME_ZONE) + return make_aware( + datetime.strptime(f"{s_year}-{s_month}-{s_day} 07:00", "%Y-%m-%d %H:%M"), + timezone=tz, + ) + return last_start.start.replace(year=s_year) + + def estimate_end(self): + """ + The end of the add/drop period for this semester, if it is explicitly set in the + `end` field, otherwise the `estimated_end` field, if it is already set, otherwise + the most recent non-null end to an add/drop period, otherwise (if none exist), + estimate as October 12 @ 11:59pm ET (for a fall semester), + or February 22 @ 11:59pm ET (for a spring semester), + of the same year. + """ + if self.end is not None: + return self.end + if self.estimated_end is not None: + return self.estimated_end + last_end = ( + AddDropPeriod.objects.filter( + end__isnull=False, semester__endswith=str(self.semester)[4] + ) + .order_by("-semester") + .first() + ) + e_year = int(str(self.semester)[:4]) + if last_end is None: + if str(self.semester)[4] == "C": # fall semester + e_month = 10 + e_day = 12 + else: # spring semester + e_month = 2 + e_day = 22 + tz = gettz(TIME_ZONE) + return make_aware( + datetime.strptime(f"{e_year}-{e_month}-{e_day} 23:59", "%Y-%m-%d %H:%M"), + timezone=tz, + ) + return last_end.end.replace(year=e_year) + + def __str__(self): + return f"AddDropPeriod {self.semester}" diff --git a/backend/alert/serializers.py b/backend/alert/serializers.py index 40740a80f..5eda9bacd 100644 --- a/backend/alert/serializers.py +++ b/backend/alert/serializers.py @@ -1,133 +1,133 @@ -from textwrap import dedent - -from rest_framework import serializers - -from alert.models import Registration -from courses.models import Section, StatusUpdate, string_dict_to_html - - -registration_fields = [ - "id", - "created_at", - "original_created_at", - "updated_at", - "section", - "user", - "cancelled", - "cancelled_at", - "deleted", - "deleted_at", - "auto_resubscribe", - "notification_sent", - "notification_sent_at", - "last_notification_sent_at", - "close_notification", - "close_notification_sent", - "close_notification_sent_at", - "is_active", - "is_waiting_for_close", -] - - -class RegistrationSerializer(serializers.ModelSerializer): - section = serializers.SlugRelatedField( - slug_field="full_code", - required=False, - queryset=Section.objects.none(), - help_text="The dash-separated full code of the section associated with this Registration.", - ) - user = serializers.SlugRelatedField( - slug_field="username", - read_only=True, - help_text="The Penn Labs Accounts username of the User who owns this Registration.", - ) - section_status = serializers.SerializerMethodField( - read_only=True, - help_text="The current status of the watched section. Options and meanings: " - + string_dict_to_html(dict(StatusUpdate.STATUS_CHOICES)), - ) - - def get_section_status(self, registration_object): - return registration_object.section.status - - class Meta: - model = Registration - fields = registration_fields + ["is_active", "section_status"] - read_only_fields = fields - - -class RegistrationCreateSerializer(serializers.ModelSerializer): - section = serializers.CharField( - max_length=16, - help_text="The dash-separated full code of the section associated with this Registration.", - ) - auto_resubscribe = serializers.BooleanField( - required=False, - help_text=dedent( - """ - Set this to true to turn on auto resubscribe (causing the registration to automatically - resubscribe once it sends out a notification). Default is false if not specified. - """ - ), - ) - id = serializers.IntegerField( - read_only=False, - required=False, - help_text="The id of the registration (can optionally be used to customize the " - "id of a new registration, or to update an existing registration).", - ) - - class Meta: - model = Registration - fields = registration_fields - read_only_fields = [ - f - for f in registration_fields - if f not in ["section", "auto_resubscribe", "close_notification", "id"] - ] - - -class RegistrationUpdateSerializer(serializers.ModelSerializer): - resubscribe = serializers.BooleanField( - required=False, - help_text=dedent( - """ - Set this to true to resubscribe to this registration (only works if the registration - has sent a notification and hasn't been deleted). - """ - ), - ) - deleted = serializers.BooleanField( - required=False, - help_text=dedent( - """ - Set this to true to delete this registration (making it inactive and preventing it from - showing up in List Registrations). - """ - ), - ) - cancelled = serializers.BooleanField( - required=False, - help_text=dedent( - """ - Set this to true to cancel to this registration (making it inactive while keeping it - in List Registration). - """ - ), - ) - auto_resubscribe = serializers.BooleanField( - required=False, - help_text=dedent( - """ - Set this to true to turn on auto resubscribe (causing the registration to automatically - resubscribe once it sends out a notification). - """ - ), - ) - - class Meta: - model = Registration - fields = registration_fields + ["cancelled", "deleted", "resubscribe"] - read_only_fields = [ - f for f in registration_fields if f not in ["auto_resubscribe", "close_notification"] - ] +from textwrap import dedent + +from rest_framework import serializers + +from alert.models import Registration +from courses.models import Section, StatusUpdate, string_dict_to_html + + +registration_fields = [ + "id", + "created_at", + "original_created_at", + "updated_at", + "section", + "user", + "cancelled", + "cancelled_at", + "deleted", + "deleted_at", + "auto_resubscribe", + "notification_sent", + "notification_sent_at", + "last_notification_sent_at", + "close_notification", + "close_notification_sent", + "close_notification_sent_at", + "is_active", + "is_waiting_for_close", +] + + +class RegistrationSerializer(serializers.ModelSerializer): + section = serializers.SlugRelatedField( + slug_field="full_code", + required=False, + queryset=Section.objects.none(), + help_text="The dash-separated full code of the section associated with this Registration.", + ) + user = serializers.SlugRelatedField( + slug_field="username", + read_only=True, + help_text="The Penn Labs Accounts username of the User who owns this Registration.", + ) + section_status = serializers.SerializerMethodField( + read_only=True, + help_text="The current status of the watched section. Options and meanings: " + + string_dict_to_html(dict(StatusUpdate.STATUS_CHOICES)), + ) + + def get_section_status(self, registration_object): + return registration_object.section.status + + class Meta: + model = Registration + fields = registration_fields + ["is_active", "section_status"] + read_only_fields = fields + + +class RegistrationCreateSerializer(serializers.ModelSerializer): + section = serializers.CharField( + max_length=16, + help_text="The dash-separated full code of the section associated with this Registration.", + ) + auto_resubscribe = serializers.BooleanField( + required=False, + help_text=dedent( + """ + Set this to true to turn on auto resubscribe (causing the registration to automatically + resubscribe once it sends out a notification). Default is false if not specified. + """ + ), + ) + id = serializers.IntegerField( + read_only=False, + required=False, + help_text="The id of the registration (can optionally be used to customize the " + "id of a new registration, or to update an existing registration).", + ) + + class Meta: + model = Registration + fields = registration_fields + read_only_fields = [ + f + for f in registration_fields + if f not in ["section", "auto_resubscribe", "close_notification", "id"] + ] + + +class RegistrationUpdateSerializer(serializers.ModelSerializer): + resubscribe = serializers.BooleanField( + required=False, + help_text=dedent( + """ + Set this to true to resubscribe to this registration (only works if the registration + has sent a notification and hasn't been deleted). + """ + ), + ) + deleted = serializers.BooleanField( + required=False, + help_text=dedent( + """ + Set this to true to delete this registration (making it inactive and preventing it from + showing up in List Registrations). + """ + ), + ) + cancelled = serializers.BooleanField( + required=False, + help_text=dedent( + """ + Set this to true to cancel to this registration (making it inactive while keeping it + in List Registration). + """ + ), + ) + auto_resubscribe = serializers.BooleanField( + required=False, + help_text=dedent( + """ + Set this to true to turn on auto resubscribe (causing the registration to automatically + resubscribe once it sends out a notification). + """ + ), + ) + + class Meta: + model = Registration + fields = registration_fields + ["cancelled", "deleted", "resubscribe"] + read_only_fields = [ + f for f in registration_fields if f not in ["auto_resubscribe", "close_notification"] + ] diff --git a/backend/alert/tasks.py b/backend/alert/tasks.py index ee594106e..ec808c1d4 100644 --- a/backend/alert/tasks.py +++ b/backend/alert/tasks.py @@ -1,180 +1,180 @@ -import logging -from datetime import datetime - -import numpy as np -import redis -import scipy.stats as stats -from celery import shared_task -from django.conf import settings -from django.core.cache import cache -from django.db import models, transaction -from django.db.models import Case, Q, When -from django.db.models.functions import Cast - -from alert.management.commands.recomputestats import recompute_percent_open -from alert.models import PcaDemandDistributionEstimate, Registration -from courses.models import Section, StatusUpdate -from courses.util import ( - get_course_and_section, - get_current_semester, - get_or_create_add_drop_period, - update_course_from_record, -) -from PennCourses.settings.base import ROUGH_MINIMUM_DEMAND_DISTRIBUTION_ESTIMATES -from review.views import extra_metrics_section_filters - - -logger = logging.getLogger(__name__) -r = redis.Redis.from_url(settings.REDIS_URL) - - -@shared_task(name="pca.tasks.run_course_updates") -def run_course_updates(semester=None): - if semester is None: - updates = StatusUpdate.objects.all() - else: - updates = StatusUpdate.objects.filter(section__course__semester=semester) - for u in updates: - update_course_from_record(u) - return {"result": "executed", "name": "pca.tasks.run_course_updates"} - - -@shared_task(name="pca.tasks.send_alert") -def send_alert(reg_id, close_notification, sent_by=""): - result = Registration.objects.get(id=reg_id).alert( - sent_by=sent_by, close_notification=close_notification - ) - return {"result": result, "task": "pca.tasks.send_alert"} - - -def get_registrations_for_alerts(course_code, semester, course_status="O"): - _, section = get_course_and_section(course_code, semester) - if course_status == "O": - return list(section.registrations.filter(**Registration.is_active_filter())) - elif course_status == "C": - return list(section.registrations.filter(**Registration.is_waiting_for_close_filter())) - else: - return [] - - -@shared_task(name="pca.tasks.send_course_alerts") -def send_course_alerts(course_code, course_status, semester=None, sent_by=""): - if semester is None: - semester = get_current_semester() - - for reg in get_registrations_for_alerts(course_code, semester, course_status=course_status): - send_alert.delay(reg.id, close_notification=(course_status == "C"), sent_by=sent_by) - - -@shared_task(name="pca.tasks.recompute_percent_open") -def recompute_percent_open_async(semester): - recompute_percent_open(semesters=[semester], semesters_precomputed=True) - - -@shared_task(name="pca.tasks.registration_update") -def section_demand_change(section_id, updated_at): - """ - This function should be called when a section's demand changes (i.e. the number of - active registrations changes, or the section's status is updated). It updates the - `PcaDemandDistributionEstimate` model and `current_demand_distribution_estimate` - cache to reflect the demand change. - - :param: section_id: the id of the section involved in the demand change - :param: updated_at: the datetime at which the demand change occurred - """ - if type(updated_at) is str: - updated_at = datetime.fromisoformat(updated_at.replace("Z", "+00:00")) - elif type(updated_at) is not datetime: - return - - section = Section.objects.get(id=section_id) - semester = section.semester - if semester != get_current_semester(): - return - - with transaction.atomic(): - create_new_distribution_estimate = False - sentinel = object() - current_demand_distribution_estimate = cache.get( - "current_demand_distribution_estimate", sentinel - ) - if ( - current_demand_distribution_estimate == sentinel - or current_demand_distribution_estimate.semester != semester - ): - create_new_distribution_estimate = True - - sections_qs = ( - Section.objects.filter(extra_metrics_section_filters, course__semester=semester) - .select_for_update() - .annotate( - raw_demand=Case( - When( - Q(capacity__gt=0), - then=( - Cast( - "registration_volume", - models.FloatField(), - ) - / Cast("capacity", models.FloatField()) - ), - ), - default=None, - output_field=models.FloatField(), - ), - ) - ) - - try: - lowest_demand_section = sections_qs.order_by("raw_demand")[:1].get() - highest_demand_section = sections_qs.order_by("-raw_demand")[:1].get() - except Section.DoesNotExist: - return # Don't add a PcaDemandDistributionEstimate -- there are no valid sections yet - - if ( - create_new_distribution_estimate - or highest_demand_section.raw_demand - > current_demand_distribution_estimate.highest_raw_demand - or lowest_demand_section.raw_demand - < current_demand_distribution_estimate.lowest_raw_demand - ): - closed_sections_demand_values = np.asarray( - sections_qs.filter(status="C").values_list("raw_demand", flat=True) - ) - # "The term 'closed sections positive raw demand values' is - # sometimes abbreviated as 'csprdv' - csrdv_frac_zero, fit_shape, fit_loc, fit_scale = (None, None, None, None) - if len(closed_sections_demand_values) > 0: - closed_sections_positive_demand_values = closed_sections_demand_values[ - np.where(closed_sections_demand_values > 0) - ] - csrdv_frac_zero = 1 - len(closed_sections_positive_demand_values) / len( - closed_sections_demand_values - ) - if len(closed_sections_positive_demand_values) > 0: - fit_shape, fit_loc, fit_scale = stats.lognorm.fit( - closed_sections_positive_demand_values - ) - new_demand_distribution_estimate = PcaDemandDistributionEstimate( - semester=semester, - highest_demand_section=highest_demand_section, - highest_demand_section_volume=highest_demand_section.registration_volume, - lowest_demand_section=lowest_demand_section, - lowest_demand_section_volume=lowest_demand_section.registration_volume, - csrdv_frac_zero=csrdv_frac_zero, - csprdv_lognorm_param_shape=fit_shape, - csprdv_lognorm_param_loc=fit_loc, - csprdv_lognorm_param_scale=fit_scale, - ) - add_drop_period = get_or_create_add_drop_period(semester) - new_demand_distribution_estimate.save(add_drop_period=add_drop_period) - new_demand_distribution_estimate.created_at = updated_at - new_demand_distribution_estimate.save(add_drop_period=add_drop_period) - cache.set( - "current_demand_distribution_estimate", - new_demand_distribution_estimate, - timeout=( - add_drop_period.estimated_end - add_drop_period.estimated_start - ).total_seconds() - // ROUGH_MINIMUM_DEMAND_DISTRIBUTION_ESTIMATES, - ) # set timeout to roughly follow ROUGH_MINIMUM_DEMAND_DISTRIBUTION_ESTIMATES +import logging +from datetime import datetime + +import numpy as np +import redis +import scipy.stats as stats +from celery import shared_task +from django.conf import settings +from django.core.cache import cache +from django.db import models, transaction +from django.db.models import Case, Q, When +from django.db.models.functions import Cast + +from alert.management.commands.recomputestats import recompute_percent_open +from alert.models import PcaDemandDistributionEstimate, Registration +from courses.models import Section, StatusUpdate +from courses.util import ( + get_course_and_section, + get_current_semester, + get_or_create_add_drop_period, + update_course_from_record, +) +from PennCourses.settings.base import ROUGH_MINIMUM_DEMAND_DISTRIBUTION_ESTIMATES +from review.views import extra_metrics_section_filters + + +logger = logging.getLogger(__name__) +r = redis.Redis.from_url(settings.REDIS_URL) + + +@shared_task(name="pca.tasks.run_course_updates") +def run_course_updates(semester=None): + if semester is None: + updates = StatusUpdate.objects.all() + else: + updates = StatusUpdate.objects.filter(section__course__semester=semester) + for u in updates: + update_course_from_record(u) + return {"result": "executed", "name": "pca.tasks.run_course_updates"} + + +@shared_task(name="pca.tasks.send_alert") +def send_alert(reg_id, close_notification, sent_by=""): + result = Registration.objects.get(id=reg_id).alert( + sent_by=sent_by, close_notification=close_notification + ) + return {"result": result, "task": "pca.tasks.send_alert"} + + +def get_registrations_for_alerts(course_code, semester, course_status="O"): + _, section = get_course_and_section(course_code, semester) + if course_status == "O": + return list(section.registrations.filter(**Registration.is_active_filter())) + elif course_status == "C": + return list(section.registrations.filter(**Registration.is_waiting_for_close_filter())) + else: + return [] + + +@shared_task(name="pca.tasks.send_course_alerts") +def send_course_alerts(course_code, course_status, semester=None, sent_by=""): + if semester is None: + semester = get_current_semester() + + for reg in get_registrations_for_alerts(course_code, semester, course_status=course_status): + send_alert.delay(reg.id, close_notification=(course_status == "C"), sent_by=sent_by) + + +@shared_task(name="pca.tasks.recompute_percent_open") +def recompute_percent_open_async(semester): + recompute_percent_open(semesters=[semester], semesters_precomputed=True) + + +@shared_task(name="pca.tasks.registration_update") +def section_demand_change(section_id, updated_at): + """ + This function should be called when a section's demand changes (i.e. the number of + active registrations changes, or the section's status is updated). It updates the + `PcaDemandDistributionEstimate` model and `current_demand_distribution_estimate` + cache to reflect the demand change. + + :param: section_id: the id of the section involved in the demand change + :param: updated_at: the datetime at which the demand change occurred + """ + if type(updated_at) is str: + updated_at = datetime.fromisoformat(updated_at.replace("Z", "+00:00")) + elif type(updated_at) is not datetime: + return + + section = Section.objects.get(id=section_id) + semester = section.semester + if semester != get_current_semester(): + return + + with transaction.atomic(): + create_new_distribution_estimate = False + sentinel = object() + current_demand_distribution_estimate = cache.get( + "current_demand_distribution_estimate", sentinel + ) + if ( + current_demand_distribution_estimate == sentinel + or current_demand_distribution_estimate.semester != semester + ): + create_new_distribution_estimate = True + + sections_qs = ( + Section.objects.filter(extra_metrics_section_filters, course__semester=semester) + .select_for_update() + .annotate( + raw_demand=Case( + When( + Q(capacity__gt=0), + then=( + Cast( + "registration_volume", + models.FloatField(), + ) + / Cast("capacity", models.FloatField()) + ), + ), + default=None, + output_field=models.FloatField(), + ), + ) + ) + + try: + lowest_demand_section = sections_qs.order_by("raw_demand")[:1].get() + highest_demand_section = sections_qs.order_by("-raw_demand")[:1].get() + except Section.DoesNotExist: + return # Don't add a PcaDemandDistributionEstimate -- there are no valid sections yet + + if ( + create_new_distribution_estimate + or highest_demand_section.raw_demand + > current_demand_distribution_estimate.highest_raw_demand + or lowest_demand_section.raw_demand + < current_demand_distribution_estimate.lowest_raw_demand + ): + closed_sections_demand_values = np.asarray( + sections_qs.filter(status="C").values_list("raw_demand", flat=True) + ) + # "The term 'closed sections positive raw demand values' is + # sometimes abbreviated as 'csprdv' + csrdv_frac_zero, fit_shape, fit_loc, fit_scale = (None, None, None, None) + if len(closed_sections_demand_values) > 0: + closed_sections_positive_demand_values = closed_sections_demand_values[ + np.where(closed_sections_demand_values > 0) + ] + csrdv_frac_zero = 1 - len(closed_sections_positive_demand_values) / len( + closed_sections_demand_values + ) + if len(closed_sections_positive_demand_values) > 0: + fit_shape, fit_loc, fit_scale = stats.lognorm.fit( + closed_sections_positive_demand_values + ) + new_demand_distribution_estimate = PcaDemandDistributionEstimate( + semester=semester, + highest_demand_section=highest_demand_section, + highest_demand_section_volume=highest_demand_section.registration_volume, + lowest_demand_section=lowest_demand_section, + lowest_demand_section_volume=lowest_demand_section.registration_volume, + csrdv_frac_zero=csrdv_frac_zero, + csprdv_lognorm_param_shape=fit_shape, + csprdv_lognorm_param_loc=fit_loc, + csprdv_lognorm_param_scale=fit_scale, + ) + add_drop_period = get_or_create_add_drop_period(semester) + new_demand_distribution_estimate.save(add_drop_period=add_drop_period) + new_demand_distribution_estimate.created_at = updated_at + new_demand_distribution_estimate.save(add_drop_period=add_drop_period) + cache.set( + "current_demand_distribution_estimate", + new_demand_distribution_estimate, + timeout=( + add_drop_period.estimated_end - add_drop_period.estimated_start + ).total_seconds() + // ROUGH_MINIMUM_DEMAND_DISTRIBUTION_ESTIMATES, + ) # set timeout to roughly follow ROUGH_MINIMUM_DEMAND_DISTRIBUTION_ESTIMATES diff --git a/backend/alert/templates/alert/email_alert.html b/backend/alert/templates/alert/email_alert.html index 1af6099e7..bd866cc38 100644 --- a/backend/alert/templates/alert/email_alert.html +++ b/backend/alert/templates/alert/email_alert.html @@ -1,33 +1,33 @@ - - - -
- - Penn Course Alert Logo - -
-

{{ course }} just {% block course_event %}opened up!{% endblock %}

-

- {% block email_body %} - - Register now on Path@Penn - {% endblock %} -

- -

- {% if auto_resubscribe %} - Don't need more alerts? - - Cancel here - {% else %} - Unlucky? - - Resubscribe here - {% endif %} -

-
- -
- Made with by Penn Labs -
+ + + +
+ + Penn Course Alert Logo + +
+

{{ course }} just {% block course_event %}opened up!{% endblock %}

+

+ {% block email_body %} + + Register now on Path@Penn + {% endblock %} +

+ +

+ {% if auto_resubscribe %} + Don't need more alerts? + + Cancel here + {% else %} + Unlucky? + + Resubscribe here + {% endif %} +

+
+ +
+ Made with by Penn Labs +
\ No newline at end of file diff --git a/backend/alert/templates/alert/email_alert_close.html b/backend/alert/templates/alert/email_alert_close.html index e618f06c9..358a06e2f 100644 --- a/backend/alert/templates/alert/email_alert_close.html +++ b/backend/alert/templates/alert/email_alert_close.html @@ -1,2 +1,2 @@ -{% extends "alert/email_alert.html" %} {% block course_event %}closed.{% endblock %} {% block email_body %} No need to check Path@Penn, if you haven't already.
(You are receiving this message because you enabled +{% extends "alert/email_alert.html" %} {% block course_event %}closed.{% endblock %} {% block email_body %} No need to check Path@Penn, if you haven't already.
(You are receiving this message because you enabled close notifications for this alert.) {% endblock %} \ No newline at end of file diff --git a/backend/alert/templates/alert/index.html b/backend/alert/templates/alert/index.html index c898836ea..4bcafeb55 100644 --- a/backend/alert/templates/alert/index.html +++ b/backend/alert/templates/alert/index.html @@ -1,344 +1,344 @@ -{% load staticfiles %} - - - - - - - - - - Penn Course Alert - - - - - - - - - - -
- {% if recruiting %} -
- Want to be part of the team that makes products like this? Penn Labs is recruiting. - Apply here! - -
- {% endif %} - {% for n in notifications%} -
- {% if n.closeable %} - - {% endif %} - {{ n.text }} -
- {% endfor %} -
-
-
-
-
- -
-

- Penn Course Alert -

-
-
-

- Get alerted when a course opens up, by text and email -

-
- -
-
-
-
- -
-
- -
-
- -
-
-
-
- -
-
- {% csrf_token %} -
- -
- -
-
-
- - -
- - - - - - - - - - - - - - - +{% load staticfiles %} + + + + + + + + + + Penn Course Alert + + + + + + + + + + +
+ {% if recruiting %} +
+ Want to be part of the team that makes products like this? Penn Labs is recruiting. + Apply here! + +
+ {% endif %} + {% for n in notifications%} +
+ {% if n.closeable %} + + {% endif %} + {{ n.text }} +
+ {% endfor %} +
+
+
+
+
+ +
+

+ Penn Course Alert +

+
+
+

+ Get alerted when a course opens up, by text and email +

+
+ +
+
+
+
+ +
+
+ +
+
+ +
+
+
+
+ +
+
+ {% csrf_token %} +
+ +
+ +
+
+
+ + +
+ + + + + + + + + + + + + + + \ No newline at end of file diff --git a/backend/alert/templates/alert/push_notif.txt b/backend/alert/templates/alert/push_notif.txt index 6e9da7f19..1f71f76c3 100644 --- a/backend/alert/templates/alert/push_notif.txt +++ b/backend/alert/templates/alert/push_notif.txt @@ -1,2 +1,2 @@ -{{course}} is now open! Register on Path@Penn: http://bit.ly/pathpenn. {% if auto_resubscribe %}To cancel alerts, visit{% else %}Resubscribe at{% endif %} http://penncoursealert.com#manage - - Thanks for using Penn Course Alert! +{{course}} is now open! Register on Path@Penn: http://bit.ly/pathpenn. {% if auto_resubscribe %}To cancel alerts, visit{% else %}Resubscribe at{% endif %} http://penncoursealert.com#manage + - Thanks for using Penn Course Alert! diff --git a/backend/alert/templates/alert/push_notif_close.txt b/backend/alert/templates/alert/push_notif_close.txt index aa6a33ab3..1a2aade3b 100644 --- a/backend/alert/templates/alert/push_notif_close.txt +++ b/backend/alert/templates/alert/push_notif_close.txt @@ -1,2 +1,2 @@ -{{course}} just closed. {% if auto_resubscribe %}To cancel alerts, visit{% else %}Resubscribe at{% endif %} http://penncoursealert.com#manage - (You are receiving this message because you enabled close notifications for this registration.) +{{course}} just closed. {% if auto_resubscribe %}To cancel alerts, visit{% else %}Resubscribe at{% endif %} http://penncoursealert.com#manage + (You are receiving this message because you enabled close notifications for this registration.) diff --git a/backend/alert/templates/alert/text_alert.txt b/backend/alert/templates/alert/text_alert.txt index 5b9733a00..099218575 100644 --- a/backend/alert/templates/alert/text_alert.txt +++ b/backend/alert/templates/alert/text_alert.txt @@ -1,3 +1,3 @@ -{{course}} is open! Register on Path@Penn: https://courses.upenn.edu/ -{% if auto_resubscribe %}To cancel alerts, visit{% else %}Resubscribe at{% endif %} http://u.pennlabs.org/pca -- Thanks for using Penn Course Alert! +{{course}} is open! Register on Path@Penn: https://courses.upenn.edu/ +{% if auto_resubscribe %}To cancel alerts, visit{% else %}Resubscribe at{% endif %} http://u.pennlabs.org/pca +- Thanks for using Penn Course Alert! diff --git a/backend/alert/urls.py b/backend/alert/urls.py index 9636203bb..c10ee9d22 100644 --- a/backend/alert/urls.py +++ b/backend/alert/urls.py @@ -1,15 +1,15 @@ -from django.urls import include, path -from rest_framework import routers - -from alert import views -from alert.views import RegistrationHistoryViewSet, RegistrationViewSet - - -router = routers.DefaultRouter() -router.register(r"registrations", RegistrationViewSet, basename="registrations") -router.register(r"registrationhistory", RegistrationHistoryViewSet, basename="registrationhistory") - -urlpatterns = [ - path("webhook", views.accept_webhook, name="webhook"), - path("", include(router.urls)), -] +from django.urls import include, path +from rest_framework import routers + +from alert import views +from alert.views import RegistrationHistoryViewSet, RegistrationViewSet + + +router = routers.DefaultRouter() +router.register(r"registrations", RegistrationViewSet, basename="registrations") +router.register(r"registrationhistory", RegistrationHistoryViewSet, basename="registrationhistory") + +urlpatterns = [ + path("webhook", views.accept_webhook, name="webhook"), + path("", include(router.urls)), +] diff --git a/backend/alert/util.py b/backend/alert/util.py index 4401e0b4e..e85e9a0a7 100644 --- a/backend/alert/util.py +++ b/backend/alert/util.py @@ -1,32 +1,32 @@ -from datetime import datetime - -from dateutil.tz.tz import gettz -from options.models import get_bool - -from courses.util import get_current_semester, get_or_create_add_drop_period -from PennCourses.settings.base import TIME_ZONE - - -def pca_registration_open(): - """ - Returns True iff PCA should be accepting new registrations. - """ - current_adp = get_or_create_add_drop_period(semester=get_current_semester()) - return get_bool("REGISTRATION_OPEN", True) and ( - current_adp.end is None - or datetime.utcnow().replace(tzinfo=gettz(TIME_ZONE)) < current_adp.end - ) - - -def should_send_pca_alert(course_term, course_status): - if get_current_semester() != course_term: - return False - add_drop_period = get_or_create_add_drop_period(course_term) - return ( - get_bool("SEND_FROM_WEBHOOK", False) - and (course_status == "O" or course_status == "C") - and ( - add_drop_period.end is None - or datetime.utcnow().replace(tzinfo=gettz(TIME_ZONE)) < add_drop_period.end - ) - ) +from datetime import datetime + +from dateutil.tz.tz import gettz +from options.models import get_bool + +from courses.util import get_current_semester, get_or_create_add_drop_period +from PennCourses.settings.base import TIME_ZONE + + +def pca_registration_open(): + """ + Returns True iff PCA should be accepting new registrations. + """ + current_adp = get_or_create_add_drop_period(semester=get_current_semester()) + return get_bool("REGISTRATION_OPEN", True) and ( + current_adp.end is None + or datetime.utcnow().replace(tzinfo=gettz(TIME_ZONE)) < current_adp.end + ) + + +def should_send_pca_alert(course_term, course_status): + if get_current_semester() != course_term: + return False + add_drop_period = get_or_create_add_drop_period(course_term) + return ( + get_bool("SEND_FROM_WEBHOOK", False) + and (course_status == "O" or course_status == "C") + and ( + add_drop_period.end is None + or datetime.utcnow().replace(tzinfo=gettz(TIME_ZONE)) < add_drop_period.end + ) + ) diff --git a/backend/alert/views.py b/backend/alert/views.py index 3afa36575..c5a71e15f 100644 --- a/backend/alert/views.py +++ b/backend/alert/views.py @@ -1,603 +1,603 @@ -import base64 -import json -import logging - -from django.conf import settings -from django.core.exceptions import ValidationError -from django.db import IntegrityError, transaction -from django.db.models import Max -from django.http import HttpResponse, JsonResponse -from django.utils import timezone -from django.views.decorators.csrf import csrf_exempt -from django_auto_prefetching import AutoPrefetchViewSetMixin -from rest_framework import status, viewsets -from rest_framework.permissions import IsAuthenticated -from rest_framework.response import Response - -from alert.models import Registration, RegStatus, register_for_course -from alert.serializers import ( - RegistrationCreateSerializer, - RegistrationSerializer, - RegistrationUpdateSerializer, -) -from alert.tasks import send_course_alerts -from alert.util import pca_registration_open, should_send_pca_alert -from courses.util import ( - get_current_semester, - get_or_create_course_and_section, - record_update, - translate_semester_inv, - update_course_from_record, -) -from PennCourses.docs_settings import PcxAutoSchema - - -logger = logging.getLogger(__name__) - - -def alert_for_course(c_id, semester, sent_by, course_status): - send_course_alerts.delay(c_id, course_status=course_status, semester=semester, sent_by=sent_by) - - -def extract_basic_auth(auth_header): - """ - extract username and password from a basic auth header - :param auth_header: content of the Authorization HTTP header - :return: username and password extracted from the header - """ - parts = auth_header.split(" ") - if parts[0] != "Basic" or len(parts) < 2: - return None, None - - auth_parts = base64.b64decode(parts[1]).split(b":") - if len(auth_parts) < 2: - return None, None - return auth_parts[0].decode(), auth_parts[1].decode() - - -@csrf_exempt -def accept_webhook(request): - auth_header = request.META.get("Authorization", request.META.get("HTTP_AUTHORIZATION", "")) - - username, password = extract_basic_auth(auth_header) - if username != settings.WEBHOOK_USERNAME or password != settings.WEBHOOK_PASSWORD: - return HttpResponse( - """Your credentials cannot be verified. - They should be placed in the header as "Authorization-Bearer", - YOUR_APP_ID and "Authorization-Token" , YOUR_TOKEN""", - status=401, - ) - - if request.method != "POST": - return HttpResponse("Methods other than POST are not allowed", status=405) - - if "json" not in request.content_type.lower(): - return HttpResponse("Request expected in JSON", status=415) - - try: - data = json.loads(request.body) - except json.JSONDecodeError: - return HttpResponse("Error decoding JSON body", status=400) - - course_id = data.get("section_id_normalized", None) - if course_id is None: - return HttpResponse("Course ID could not be extracted from response", status=400) - - course_status = data.get("status", None) - if course_status is None: - return HttpResponse("Course Status could not be extracted from response", status=400) - - prev_status = data.get("previous_status", None) or "" - - try: - course_term = data.get("term", None) - if course_term is None: - return HttpResponse("Course Term could not be extracted from response", status=400) - if any(course_term.endswith(s) for s in ["10", "20", "30"]): - course_term = translate_semester_inv(course_term) - if course_term.upper().endswith("B"): - return JsonResponse({"message": "webhook ignored (summer class)"}) - - _, section, _, _ = get_or_create_course_and_section(course_id, course_term) - - # Ignore duplicate updates - last_status_update = section.last_status_update - if last_status_update and last_status_update.new_status == course_status: - raise ValidationError( - f"Status update received changing section {section} from " - f"{prev_status} to {course_status}, " - f"after previous status update from {last_status_update.old_status} " - f"to {last_status_update.new_status} (duplicate or erroneous).", - ) - - alert_for_course_called = False - if should_send_pca_alert(course_term, course_status): - try: - alert_for_course( - course_id, semester=course_term, sent_by="WEB", course_status=course_status - ) - alert_for_course_called = True - response = JsonResponse({"message": "webhook recieved, alerts sent"}) - except ValueError: - response = JsonResponse({"message": "course code could not be parsed"}) - else: - response = JsonResponse({"message": "webhook recieved"}) - - u = record_update( - section, - course_term, - prev_status, - course_status, - alert_for_course_called, - request.body, - ) - update_course_from_record(u) - except (ValidationError, ValueError) as e: - logger.error(e, extra={"request": request}) - response = JsonResponse( - {"message": "We got an error but webhook should ignore it"}, status=200 - ) - - return response - - -class RegistrationViewSet(AutoPrefetchViewSetMixin, viewsets.ModelViewSet): - """ - retrieve: Get one of the logged-in user's PCA registrations for the current semester, using - the registration's ID. Note that if a registration with the specified ID exists, but that - registration is not at the head of its resubscribe chain (i.e. there is a more recent - registration which was created by resubscribing to the specified registration), the - HEAD of the resubscribe chain will be returned. This means the same registration could be - returned from a GET request to 2 distinct IDs (if they are in the same resubscribe chain). - If a registration with the specified ID exists, a 200 response code is returned, along - with the head registration object. If no registration with the given id exists, - a 404 is returned. - - list: Returns all registrations which are not deleted or made obsolete by resubscription. Put - another way, this endpoint will return a superset of all active registrations: all - active registrations (meaning registrations which would trigger an alert to be sent if their - section were to open up), IN ADDITION TO all inactive registrations from the current semester - which are at the head of their resubscribe chains and not deleted. However, one extra - modification is made: if multiple registrations for the same section are included in the - above-specified set, then all but the most recent (latest `created_at` value) are removed from - the list. This ensures that at most 1 registration is returned for each section. Note that this - is still a superset of all active registrations. If a registration is active, its `created_at` - value will be greater than any other registrations for the same section (our code ensures no - registration can be created or resubscribed to when an active registration exists for the same - section). This extra modification is actually made to prevent the user from being able to - resubscribe to an older registration after creating a new one (which would cause the backend to - return a 409 error). - - Each object in the returned list of registrations is of the same form as the object returned - by Retrieve Registration. - - Tip: if you sort this list by `original_created_at` (the `created_at` value of the tail of - a registration's resubscribe chain), cancelling or resubscribing to registrations will - not cause the registration to jump to a different place in the list (which makes for - more intuitive/understandable behavior for the user if the registrations are displayed - in that order). This is what PCA currently does on the manage alerts page. - - create: Use this route to create a PCA registration for a certain section. A PCA registration - represents a "subscription" to receive alerts for that section. The body of the request must - include a section field (with the dash-separated full code of the section) and optionally - can contain an auto_resubscribe field (defaults to false) which sets whether the registration - will automatically create a new registration once it triggers an alerts (i.e. whether it will - automatically resubscribe the user to receive alerts for that section). It can also optionally - contain a "close_notification" field, to enable close notifications on the registration. - Note that close notifications CANNOT be sent by text so you shouldn't allow the user to - enable close notifications for any registration unless they have an email set in their - User Profile or have push notifications enabled. If you try to create a registration with - close_notification enabled and the user only has texts enabled, , a 406 will be returned - and the registration will not be created. - Note that if you include the "id" field in the body of your POST request, and that id - does not already exist, the id of the created registration will be set to the given value. - However, if the given id does exist, the request will instead be treated as a PUT request for - the registration (see the Update Registration docs for more info on how - PUT requests are handled). - - This route returns a 201 if the registration is successfully created, a 400 if the input is - invalid (for instance if a null section is given), a 404 if the given section is not found in - the database, a 406 if the authenticated user does not have either a phone or an email set - in their profile (thus making it impossible for them to receive alerts), and a 409 if the - user is already currently registered to receive alerts for the given section. If the request - is redirected to update (when the passed in id is already associated with a registration), - other response codes may be returned (see the Update Registration docs for more info). If - registration is not currently open on PCA, a 503 is returned. - - update: Use this route to update existing PCA Registrations. Note that the provided id does - not always strictly specify which Registration gets modified. In fact, the actual Registration - that would get modified by a PUT request would be the head of the resubscribe chain of the - Registration with the specified id (so if you try to update an outdated Registration it will - instead update the most recent Registration). The parameters which can be - included in the request body are `resubscribe`, `auto_resubscribe`, 'close_notification', - `cancelled`, and `deleted`. If you include multiple parameters, the order of precedence in - choosing what action to take is `resubscribe` > `deleted` > `cancelled` > - [`auto_resubscribe` and `close_notification`] (so if you include multiple - parameters only the action associated with the highest priority parameter will be executed, - except both auto_resubscribe and close_notification can be updated in the same request). - Note that close notifications CANNOT be sent by text so you shouldn't allow the user to - enable close notifications for any registration unless they have an email set in their - User Profile or have push notifications enabled. If you try to update a registration to - enable close_notification and the user only has texts enabled, a 406 will be returned - and the registration will not be created. - Note that a registration will send an alert when the section it is watching opens, if and only - if it hasn't sent one before, it isn't cancelled, and it isn't deleted. If a registration would - send an alert when the section it is watching opens, we call it "active". Registrations which - have triggered an alert can be resubscribed to (which creates a new registration with the same - settings and adds that to the end of the original registration's resubscribe chain). Triggered - registrations would show up in List Registrations (as long as they are at the head of - their resubscribe chain), even though they aren't active. Cancelled registrations can also - be resubscribed to (in effect uncancelling), and also show up in List Registrations, despite - not being active. A user might cancel an alert rather than delete it if they want to keep it - in their PCA manage console but don't want to receive alerts for it. Deleted registrations - are not active, do not show up in List Registrations, and cannot be resubscribed to. You can - think of deleted registrations as effectively nonexistent; they are only kept on the backend - for analytics purposes. Note that while you can cancel a registration by setting the cancelled - parameter to true in your PUT request (and the same for delete/deleted), you cannot uncancel - or undelete (cancelled registrations can be resubscribed to and deleted registrations - are effectively nonexistent). - - If the update is successful, a 200 is returned. If there is some issue with the request, - a 400 is returned. This could be caused by trying to update a registration from a different - semester, trying to resubscribe to a deleted registration, resubscribing to a registration - which is not cancelled or hasn't yet triggered a notification, cancelling a deleted or - triggered registration, trying to make changes to a deleted registration, or otherwise - breaking rules. Look in the detail field of the response object for more detail on what - exactly went wrong if you encounter a 400. If no registration with the given id is found, - a 404 is returned. If registration is not currently open on PCA, a 503 is returned. - """ - - schema = PcxAutoSchema( - response_codes={ - "registrations-list": { - "POST": { - 201: "[DESCRIBE_RESPONSE_SCHEMA]Registration successfully created.", - 400: "Bad request (e.g. given null section).", - 404: "Given section not found in database.", - 406: "No contact information (phone or email) set for user.", - 409: "Registration for given section already exists.", - 503: "Registration not currently open.", - }, - "GET": {200: "[DESCRIBE_RESPONSE_SCHEMA]Registrations successfully listed."}, - }, - "registrations-detail": { - "PUT": { - 200: "Registration successfully updated (or no changes necessary).", - 400: "Bad request (see route description).", - 404: "Registration not found with given id.", - 503: "Registration not currently open.", - }, - "GET": { - 200: "[DESCRIBE_RESPONSE_SCHEMA]Registration detail successfully retrieved.", - 404: "Registration not found with given id.", - }, - }, - }, - override_response_schema={ - "registrations-list": { - "POST": { - 201: {"properties": {"message": {"type": "string"}, "id": {"type": "integer"}}}, - } - } - }, - ) - http_method_names = ["get", "post", "put"] - permission_classes = [IsAuthenticated] - - def get_serializer_class(self): - if self.action == "create": - return RegistrationCreateSerializer - elif self.action == "update": - return RegistrationUpdateSerializer - else: - return RegistrationSerializer - - @staticmethod - def handle_registration(request): - if not pca_registration_open(): - return Response( - {"message": "Registration is not open."}, - status=status.HTTP_503_SERVICE_UNAVAILABLE, - ) - - section_code = request.data.get("section", None) - - if section_code is None: - return Response( - {"message": "You must include a not null section"}, - status=status.HTTP_400_BAD_REQUEST, - ) - - res, normalized_course_code, reg = register_for_course( - course_code=section_code, - source="PCA", - user=request.user, - auto_resub=request.data.get("auto_resubscribe", False), - close_notification=request.data.get("close_notification", False), - ) - - if res == RegStatus.SUCCESS: - return Response( - { - "message": "Your registration for %s was successful!" % normalized_course_code, - "id": reg.pk, - }, - status=status.HTTP_201_CREATED, - ) - elif res == RegStatus.OPEN_REG_EXISTS: - return Response( - { - "message": "You've already registered to get alerts for %s!" - % normalized_course_code - }, - status=status.HTTP_409_CONFLICT, - ) - elif res == RegStatus.COURSE_NOT_FOUND: - return Response( - { - "message": "%s did not match any course in our database. Please try again!" - % section_code - }, - status=status.HTTP_404_NOT_FOUND, - ) - elif res == RegStatus.NO_CONTACT_INFO: - return Response( - { - "message": "You must set a phone number and/or an email address to " - "register for an alert." - }, - status=status.HTTP_406_NOT_ACCEPTABLE, - ) - elif res == RegStatus.TEXT_CLOSE_NOTIFICATION: - return Response( - { - "message": "You can only enable close notifications on a registration if the " - "user enables some form of communication other than just texts (we don't " - "send any close notifications by text)." - }, - status=status.HTTP_406_NOT_ACCEPTABLE, - ) - else: - return Response( - {"message": "There was an error on our end. Please try again!"}, - status=status.HTTP_500_INTERNAL_SERVER_ERROR, - ) - - def list(self, request, *args, **kwargs): - queryset = self.filter_queryset(self.get_queryset_current()) - - page = self.paginate_queryset(queryset) - if page is not None: - serializer = self.get_serializer(page, many=True) - return self.get_paginated_response(serializer.data) - - serializer = self.get_serializer(queryset, many=True) - return Response(serializer.data) - - def retrieve(self, request, *args, **kwargs): - instance = self.get_object().get_most_current() - serializer = self.get_serializer(instance) - return Response(serializer.data) - - def update(self, request, pk=None): - if not Registration.objects.filter(id=pk).exists(): - return Response({"detail": "Not found."}, status=status.HTTP_404_NOT_FOUND) - with transaction.atomic(): - try: - registration = self.get_queryset().select_for_update().get(id=pk) - except Registration.DoesNotExist: - return Response( - {"detail": "You do not have access to the specified registration."}, - status=status.HTTP_403_FORBIDDEN, - ) - - registration = registration.get_most_current() - - if registration.section.semester != get_current_semester(): - return Response( - {"detail": "You can only update registrations from the current semester."}, - status=status.HTTP_400_BAD_REQUEST, - ) - try: - if request.data.get("resubscribe", False): - if not pca_registration_open(): - return Response( - {"message": "Registration is not open."}, - status=status.HTTP_503_SERVICE_UNAVAILABLE, - ) - if registration.deleted: - return Response( - {"detail": "You cannot resubscribe to a deleted registration."}, - status=status.HTTP_400_BAD_REQUEST, - ) - if not registration.notification_sent and not registration.cancelled: - return Response( - { - "detail": "You can only resubscribe to a registration that " - "has already been sent or has been cancelled." - }, - status=status.HTTP_400_BAD_REQUEST, - ) - if registration.section.registrations.filter( - user=registration.user, **Registration.is_active_filter() - ).exists(): - # An active registration for this section already exists - return Response( - { - "message": "You've already registered to get alerts for %s!" - % registration.section.full_code - }, - status=status.HTTP_409_CONFLICT, - ) - resub = registration.resubscribe() - return Response( - {"detail": "Resubscribed successfully", "id": resub.id}, - status=status.HTTP_200_OK, - ) - elif request.data.get("deleted", False): - changed = not registration.deleted - registration.deleted = True - registration.save() - if changed: # else taken care of in generic return statement - registration.deleted_at = timezone.now() - registration.save() - return Response( - {"detail": "Registration deleted"}, status=status.HTTP_200_OK - ) - elif request.data.get("cancelled", False): - if registration.deleted: - return Response( - {"detail": "You cannot cancel a deleted registration."}, - status=status.HTTP_400_BAD_REQUEST, - ) - if registration.notification_sent: - return Response( - {"detail": "You cannot cancel a sent registration."}, - status=status.HTTP_400_BAD_REQUEST, - ) - changed = not registration.cancelled - registration.cancelled = True - registration.save() - if changed: # else taken care of in generic return statement - registration.cancelled_at = timezone.now() - registration.save() - return Response( - {"detail": "Registration cancelled"}, status=status.HTTP_200_OK - ) - elif "auto_resubscribe" in request.data or "close_notification" in request.data: - if registration.deleted: - return Response( - {"detail": "You cannot make changes to a deleted registration."}, - status=status.HTTP_400_BAD_REQUEST, - ) - auto_resubscribe_changed = registration.auto_resubscribe != request.data.get( - "auto_resubscribe", registration.auto_resubscribe - ) - close_notification_changed = ( - registration.close_notification - != request.data.get("close_notification", registration.close_notification) - ) - if ( - request.data.get("close_notification", registration.close_notification) - and not request.user.profile.email - and not request.user.profile.push_notifications - ): - return Response( - { - "detail": "You cannot enable close_notifications with only " - "your phone number saved in your user profile." - }, - status=status.HTTP_406_NOT_ACCEPTABLE, - ) - changed = auto_resubscribe_changed or close_notification_changed - registration.auto_resubscribe = request.data.get( - "auto_resubscribe", registration.auto_resubscribe - ) - registration.close_notification = request.data.get( - "close_notification", registration.close_notification - ) - registration.save() - if changed: # else taken care of in generic return statement - return Response( - { - "detail": ", ".join( - ( - [ - "auto_resubscribe updated to " - + str(registration.auto_resubscribe) - ] - if auto_resubscribe_changed - else [] - ) - + ( - [ - "close_notification updated to " - + str(registration.close_notification) - ] - if close_notification_changed - else [] - ) - ) - }, - status=status.HTTP_200_OK, - ) - except IntegrityError as e: - return Response( - { - "detail": "IntegrityError encountered while trying to update: " - + str(e.__cause__) - }, - status=status.HTTP_400_BAD_REQUEST, - ) - return Response({"detail": "no changes made"}, status=status.HTTP_200_OK) - - def create(self, request, *args, **kwargs): - if Registration.objects.filter(id=request.data.get("id")).exists(): - return self.update(request, request.data.get("id")) - return self.handle_registration(request) - - queryset = Registration.objects.none() # included redundantly for docs - - def get_queryset(self): - return Registration.objects.filter(user=self.request.user) - - def get_queryset_current(self): - """ - Returns a superset of all active registrations (also includes cancelled registrations - from the current semester at the head of their resubscribe chains). Returns at most 1 - registration per section (if multiple candidate registrations for a certain section exist, - the registration with the later created_at value is chosen). - """ - registrations = Registration.objects.filter( - user=self.request.user, - deleted=False, - resubscribed_to__isnull=True, - section__course__semester=get_current_semester(), - ) - # Now resolve conflicts where multiple registrations exist for the same section - # (by taking the registration with the later created_at date) - return registrations.filter( - created_at__in=registrations.values("section") - .annotate(max_created_at=Max("created_at")) - .values_list("max_created_at", flat=True) - ) - - -class RegistrationHistoryViewSet(AutoPrefetchViewSetMixin, viewsets.ReadOnlyModelViewSet): - """ - list: - List all of the user's registrations for the current semester, regardless of whether - they are active, obsolete (not at the head of their resubscribe chains), or deleted. Note - that this is not appropriate to use for listing registrations and presenting them to the user; - for that you should use List Registrations (GET `/api/alert/registrations/`). - retrieve: - Get the detail of a specific registration from the current semester. All registrations are - accessible via this endpoint, regardless of whether they are active, - obsolete (not at the head of their resubscribe chains), or deleted. Unless you need to access - inactive and obsolete registrations, you should probably use Retrieve Registration - (GET `/api/alert/registrations/{id}/`) rather than this endpoint. - """ - - schema = PcxAutoSchema( - response_codes={ - "registrationhistory-list": { - "GET": {200: "[DESCRIBE_RESPONSE_SCHEMA]Registration history successfully listed."} - }, - "registrationhistory-detail": { - "GET": { - 200: "[DESCRIBE_RESPONSE_SCHEMA]Historic registration detail " - "successfully retrieved.", - 404: "Historic registration not found with given id.", - } - }, - }, - ) - serializer_class = RegistrationSerializer - permission_classes = [IsAuthenticated] - - queryset = Registration.objects.none() # included redundantly for docs - - def get_queryset(self): - return Registration.objects.filter( - user=self.request.user, section__course__semester=get_current_semester() - ).prefetch_related("section") +import base64 +import json +import logging + +from django.conf import settings +from django.core.exceptions import ValidationError +from django.db import IntegrityError, transaction +from django.db.models import Max +from django.http import HttpResponse, JsonResponse +from django.utils import timezone +from django.views.decorators.csrf import csrf_exempt +from django_auto_prefetching import AutoPrefetchViewSetMixin +from rest_framework import status, viewsets +from rest_framework.permissions import IsAuthenticated +from rest_framework.response import Response + +from alert.models import Registration, RegStatus, register_for_course +from alert.serializers import ( + RegistrationCreateSerializer, + RegistrationSerializer, + RegistrationUpdateSerializer, +) +from alert.tasks import send_course_alerts +from alert.util import pca_registration_open, should_send_pca_alert +from courses.util import ( + get_current_semester, + get_or_create_course_and_section, + record_update, + translate_semester_inv, + update_course_from_record, +) +from PennCourses.docs_settings import PcxAutoSchema + + +logger = logging.getLogger(__name__) + + +def alert_for_course(c_id, semester, sent_by, course_status): + send_course_alerts.delay(c_id, course_status=course_status, semester=semester, sent_by=sent_by) + + +def extract_basic_auth(auth_header): + """ + extract username and password from a basic auth header + :param auth_header: content of the Authorization HTTP header + :return: username and password extracted from the header + """ + parts = auth_header.split(" ") + if parts[0] != "Basic" or len(parts) < 2: + return None, None + + auth_parts = base64.b64decode(parts[1]).split(b":") + if len(auth_parts) < 2: + return None, None + return auth_parts[0].decode(), auth_parts[1].decode() + + +@csrf_exempt +def accept_webhook(request): + auth_header = request.META.get("Authorization", request.META.get("HTTP_AUTHORIZATION", "")) + + username, password = extract_basic_auth(auth_header) + if username != settings.WEBHOOK_USERNAME or password != settings.WEBHOOK_PASSWORD: + return HttpResponse( + """Your credentials cannot be verified. + They should be placed in the header as "Authorization-Bearer", + YOUR_APP_ID and "Authorization-Token" , YOUR_TOKEN""", + status=401, + ) + + if request.method != "POST": + return HttpResponse("Methods other than POST are not allowed", status=405) + + if "json" not in request.content_type.lower(): + return HttpResponse("Request expected in JSON", status=415) + + try: + data = json.loads(request.body) + except json.JSONDecodeError: + return HttpResponse("Error decoding JSON body", status=400) + + course_id = data.get("section_id_normalized", None) + if course_id is None: + return HttpResponse("Course ID could not be extracted from response", status=400) + + course_status = data.get("status", None) + if course_status is None: + return HttpResponse("Course Status could not be extracted from response", status=400) + + prev_status = data.get("previous_status", None) or "" + + try: + course_term = data.get("term", None) + if course_term is None: + return HttpResponse("Course Term could not be extracted from response", status=400) + if any(course_term.endswith(s) for s in ["10", "20", "30"]): + course_term = translate_semester_inv(course_term) + if course_term.upper().endswith("B"): + return JsonResponse({"message": "webhook ignored (summer class)"}) + + _, section, _, _ = get_or_create_course_and_section(course_id, course_term) + + # Ignore duplicate updates + last_status_update = section.last_status_update + if last_status_update and last_status_update.new_status == course_status: + raise ValidationError( + f"Status update received changing section {section} from " + f"{prev_status} to {course_status}, " + f"after previous status update from {last_status_update.old_status} " + f"to {last_status_update.new_status} (duplicate or erroneous).", + ) + + alert_for_course_called = False + if should_send_pca_alert(course_term, course_status): + try: + alert_for_course( + course_id, semester=course_term, sent_by="WEB", course_status=course_status + ) + alert_for_course_called = True + response = JsonResponse({"message": "webhook recieved, alerts sent"}) + except ValueError: + response = JsonResponse({"message": "course code could not be parsed"}) + else: + response = JsonResponse({"message": "webhook recieved"}) + + u = record_update( + section, + course_term, + prev_status, + course_status, + alert_for_course_called, + request.body, + ) + update_course_from_record(u) + except (ValidationError, ValueError) as e: + logger.error(e, extra={"request": request}) + response = JsonResponse( + {"message": "We got an error but webhook should ignore it"}, status=200 + ) + + return response + + +class RegistrationViewSet(AutoPrefetchViewSetMixin, viewsets.ModelViewSet): + """ + retrieve: Get one of the logged-in user's PCA registrations for the current semester, using + the registration's ID. Note that if a registration with the specified ID exists, but that + registration is not at the head of its resubscribe chain (i.e. there is a more recent + registration which was created by resubscribing to the specified registration), the + HEAD of the resubscribe chain will be returned. This means the same registration could be + returned from a GET request to 2 distinct IDs (if they are in the same resubscribe chain). + If a registration with the specified ID exists, a 200 response code is returned, along + with the head registration object. If no registration with the given id exists, + a 404 is returned. + + list: Returns all registrations which are not deleted or made obsolete by resubscription. Put + another way, this endpoint will return a superset of all active registrations: all + active registrations (meaning registrations which would trigger an alert to be sent if their + section were to open up), IN ADDITION TO all inactive registrations from the current semester + which are at the head of their resubscribe chains and not deleted. However, one extra + modification is made: if multiple registrations for the same section are included in the + above-specified set, then all but the most recent (latest `created_at` value) are removed from + the list. This ensures that at most 1 registration is returned for each section. Note that this + is still a superset of all active registrations. If a registration is active, its `created_at` + value will be greater than any other registrations for the same section (our code ensures no + registration can be created or resubscribed to when an active registration exists for the same + section). This extra modification is actually made to prevent the user from being able to + resubscribe to an older registration after creating a new one (which would cause the backend to + return a 409 error). + + Each object in the returned list of registrations is of the same form as the object returned + by Retrieve Registration. + + Tip: if you sort this list by `original_created_at` (the `created_at` value of the tail of + a registration's resubscribe chain), cancelling or resubscribing to registrations will + not cause the registration to jump to a different place in the list (which makes for + more intuitive/understandable behavior for the user if the registrations are displayed + in that order). This is what PCA currently does on the manage alerts page. + + create: Use this route to create a PCA registration for a certain section. A PCA registration + represents a "subscription" to receive alerts for that section. The body of the request must + include a section field (with the dash-separated full code of the section) and optionally + can contain an auto_resubscribe field (defaults to false) which sets whether the registration + will automatically create a new registration once it triggers an alerts (i.e. whether it will + automatically resubscribe the user to receive alerts for that section). It can also optionally + contain a "close_notification" field, to enable close notifications on the registration. + Note that close notifications CANNOT be sent by text so you shouldn't allow the user to + enable close notifications for any registration unless they have an email set in their + User Profile or have push notifications enabled. If you try to create a registration with + close_notification enabled and the user only has texts enabled, , a 406 will be returned + and the registration will not be created. + Note that if you include the "id" field in the body of your POST request, and that id + does not already exist, the id of the created registration will be set to the given value. + However, if the given id does exist, the request will instead be treated as a PUT request for + the registration (see the Update Registration docs for more info on how + PUT requests are handled). + + This route returns a 201 if the registration is successfully created, a 400 if the input is + invalid (for instance if a null section is given), a 404 if the given section is not found in + the database, a 406 if the authenticated user does not have either a phone or an email set + in their profile (thus making it impossible for them to receive alerts), and a 409 if the + user is already currently registered to receive alerts for the given section. If the request + is redirected to update (when the passed in id is already associated with a registration), + other response codes may be returned (see the Update Registration docs for more info). If + registration is not currently open on PCA, a 503 is returned. + + update: Use this route to update existing PCA Registrations. Note that the provided id does + not always strictly specify which Registration gets modified. In fact, the actual Registration + that would get modified by a PUT request would be the head of the resubscribe chain of the + Registration with the specified id (so if you try to update an outdated Registration it will + instead update the most recent Registration). The parameters which can be + included in the request body are `resubscribe`, `auto_resubscribe`, 'close_notification', + `cancelled`, and `deleted`. If you include multiple parameters, the order of precedence in + choosing what action to take is `resubscribe` > `deleted` > `cancelled` > + [`auto_resubscribe` and `close_notification`] (so if you include multiple + parameters only the action associated with the highest priority parameter will be executed, + except both auto_resubscribe and close_notification can be updated in the same request). + Note that close notifications CANNOT be sent by text so you shouldn't allow the user to + enable close notifications for any registration unless they have an email set in their + User Profile or have push notifications enabled. If you try to update a registration to + enable close_notification and the user only has texts enabled, a 406 will be returned + and the registration will not be created. + Note that a registration will send an alert when the section it is watching opens, if and only + if it hasn't sent one before, it isn't cancelled, and it isn't deleted. If a registration would + send an alert when the section it is watching opens, we call it "active". Registrations which + have triggered an alert can be resubscribed to (which creates a new registration with the same + settings and adds that to the end of the original registration's resubscribe chain). Triggered + registrations would show up in List Registrations (as long as they are at the head of + their resubscribe chain), even though they aren't active. Cancelled registrations can also + be resubscribed to (in effect uncancelling), and also show up in List Registrations, despite + not being active. A user might cancel an alert rather than delete it if they want to keep it + in their PCA manage console but don't want to receive alerts for it. Deleted registrations + are not active, do not show up in List Registrations, and cannot be resubscribed to. You can + think of deleted registrations as effectively nonexistent; they are only kept on the backend + for analytics purposes. Note that while you can cancel a registration by setting the cancelled + parameter to true in your PUT request (and the same for delete/deleted), you cannot uncancel + or undelete (cancelled registrations can be resubscribed to and deleted registrations + are effectively nonexistent). + + If the update is successful, a 200 is returned. If there is some issue with the request, + a 400 is returned. This could be caused by trying to update a registration from a different + semester, trying to resubscribe to a deleted registration, resubscribing to a registration + which is not cancelled or hasn't yet triggered a notification, cancelling a deleted or + triggered registration, trying to make changes to a deleted registration, or otherwise + breaking rules. Look in the detail field of the response object for more detail on what + exactly went wrong if you encounter a 400. If no registration with the given id is found, + a 404 is returned. If registration is not currently open on PCA, a 503 is returned. + """ + + schema = PcxAutoSchema( + response_codes={ + "registrations-list": { + "POST": { + 201: "[DESCRIBE_RESPONSE_SCHEMA]Registration successfully created.", + 400: "Bad request (e.g. given null section).", + 404: "Given section not found in database.", + 406: "No contact information (phone or email) set for user.", + 409: "Registration for given section already exists.", + 503: "Registration not currently open.", + }, + "GET": {200: "[DESCRIBE_RESPONSE_SCHEMA]Registrations successfully listed."}, + }, + "registrations-detail": { + "PUT": { + 200: "Registration successfully updated (or no changes necessary).", + 400: "Bad request (see route description).", + 404: "Registration not found with given id.", + 503: "Registration not currently open.", + }, + "GET": { + 200: "[DESCRIBE_RESPONSE_SCHEMA]Registration detail successfully retrieved.", + 404: "Registration not found with given id.", + }, + }, + }, + override_response_schema={ + "registrations-list": { + "POST": { + 201: {"properties": {"message": {"type": "string"}, "id": {"type": "integer"}}}, + } + } + }, + ) + http_method_names = ["get", "post", "put"] + permission_classes = [IsAuthenticated] + + def get_serializer_class(self): + if self.action == "create": + return RegistrationCreateSerializer + elif self.action == "update": + return RegistrationUpdateSerializer + else: + return RegistrationSerializer + + @staticmethod + def handle_registration(request): + if not pca_registration_open(): + return Response( + {"message": "Registration is not open."}, + status=status.HTTP_503_SERVICE_UNAVAILABLE, + ) + + section_code = request.data.get("section", None) + + if section_code is None: + return Response( + {"message": "You must include a not null section"}, + status=status.HTTP_400_BAD_REQUEST, + ) + + res, normalized_course_code, reg = register_for_course( + course_code=section_code, + source="PCA", + user=request.user, + auto_resub=request.data.get("auto_resubscribe", False), + close_notification=request.data.get("close_notification", False), + ) + + if res == RegStatus.SUCCESS: + return Response( + { + "message": "Your registration for %s was successful!" % normalized_course_code, + "id": reg.pk, + }, + status=status.HTTP_201_CREATED, + ) + elif res == RegStatus.OPEN_REG_EXISTS: + return Response( + { + "message": "You've already registered to get alerts for %s!" + % normalized_course_code + }, + status=status.HTTP_409_CONFLICT, + ) + elif res == RegStatus.COURSE_NOT_FOUND: + return Response( + { + "message": "%s did not match any course in our database. Please try again!" + % section_code + }, + status=status.HTTP_404_NOT_FOUND, + ) + elif res == RegStatus.NO_CONTACT_INFO: + return Response( + { + "message": "You must set a phone number and/or an email address to " + "register for an alert." + }, + status=status.HTTP_406_NOT_ACCEPTABLE, + ) + elif res == RegStatus.TEXT_CLOSE_NOTIFICATION: + return Response( + { + "message": "You can only enable close notifications on a registration if the " + "user enables some form of communication other than just texts (we don't " + "send any close notifications by text)." + }, + status=status.HTTP_406_NOT_ACCEPTABLE, + ) + else: + return Response( + {"message": "There was an error on our end. Please try again!"}, + status=status.HTTP_500_INTERNAL_SERVER_ERROR, + ) + + def list(self, request, *args, **kwargs): + queryset = self.filter_queryset(self.get_queryset_current()) + + page = self.paginate_queryset(queryset) + if page is not None: + serializer = self.get_serializer(page, many=True) + return self.get_paginated_response(serializer.data) + + serializer = self.get_serializer(queryset, many=True) + return Response(serializer.data) + + def retrieve(self, request, *args, **kwargs): + instance = self.get_object().get_most_current() + serializer = self.get_serializer(instance) + return Response(serializer.data) + + def update(self, request, pk=None): + if not Registration.objects.filter(id=pk).exists(): + return Response({"detail": "Not found."}, status=status.HTTP_404_NOT_FOUND) + with transaction.atomic(): + try: + registration = self.get_queryset().select_for_update().get(id=pk) + except Registration.DoesNotExist: + return Response( + {"detail": "You do not have access to the specified registration."}, + status=status.HTTP_403_FORBIDDEN, + ) + + registration = registration.get_most_current() + + if registration.section.semester != get_current_semester(): + return Response( + {"detail": "You can only update registrations from the current semester."}, + status=status.HTTP_400_BAD_REQUEST, + ) + try: + if request.data.get("resubscribe", False): + if not pca_registration_open(): + return Response( + {"message": "Registration is not open."}, + status=status.HTTP_503_SERVICE_UNAVAILABLE, + ) + if registration.deleted: + return Response( + {"detail": "You cannot resubscribe to a deleted registration."}, + status=status.HTTP_400_BAD_REQUEST, + ) + if not registration.notification_sent and not registration.cancelled: + return Response( + { + "detail": "You can only resubscribe to a registration that " + "has already been sent or has been cancelled." + }, + status=status.HTTP_400_BAD_REQUEST, + ) + if registration.section.registrations.filter( + user=registration.user, **Registration.is_active_filter() + ).exists(): + # An active registration for this section already exists + return Response( + { + "message": "You've already registered to get alerts for %s!" + % registration.section.full_code + }, + status=status.HTTP_409_CONFLICT, + ) + resub = registration.resubscribe() + return Response( + {"detail": "Resubscribed successfully", "id": resub.id}, + status=status.HTTP_200_OK, + ) + elif request.data.get("deleted", False): + changed = not registration.deleted + registration.deleted = True + registration.save() + if changed: # else taken care of in generic return statement + registration.deleted_at = timezone.now() + registration.save() + return Response( + {"detail": "Registration deleted"}, status=status.HTTP_200_OK + ) + elif request.data.get("cancelled", False): + if registration.deleted: + return Response( + {"detail": "You cannot cancel a deleted registration."}, + status=status.HTTP_400_BAD_REQUEST, + ) + if registration.notification_sent: + return Response( + {"detail": "You cannot cancel a sent registration."}, + status=status.HTTP_400_BAD_REQUEST, + ) + changed = not registration.cancelled + registration.cancelled = True + registration.save() + if changed: # else taken care of in generic return statement + registration.cancelled_at = timezone.now() + registration.save() + return Response( + {"detail": "Registration cancelled"}, status=status.HTTP_200_OK + ) + elif "auto_resubscribe" in request.data or "close_notification" in request.data: + if registration.deleted: + return Response( + {"detail": "You cannot make changes to a deleted registration."}, + status=status.HTTP_400_BAD_REQUEST, + ) + auto_resubscribe_changed = registration.auto_resubscribe != request.data.get( + "auto_resubscribe", registration.auto_resubscribe + ) + close_notification_changed = ( + registration.close_notification + != request.data.get("close_notification", registration.close_notification) + ) + if ( + request.data.get("close_notification", registration.close_notification) + and not request.user.profile.email + and not request.user.profile.push_notifications + ): + return Response( + { + "detail": "You cannot enable close_notifications with only " + "your phone number saved in your user profile." + }, + status=status.HTTP_406_NOT_ACCEPTABLE, + ) + changed = auto_resubscribe_changed or close_notification_changed + registration.auto_resubscribe = request.data.get( + "auto_resubscribe", registration.auto_resubscribe + ) + registration.close_notification = request.data.get( + "close_notification", registration.close_notification + ) + registration.save() + if changed: # else taken care of in generic return statement + return Response( + { + "detail": ", ".join( + ( + [ + "auto_resubscribe updated to " + + str(registration.auto_resubscribe) + ] + if auto_resubscribe_changed + else [] + ) + + ( + [ + "close_notification updated to " + + str(registration.close_notification) + ] + if close_notification_changed + else [] + ) + ) + }, + status=status.HTTP_200_OK, + ) + except IntegrityError as e: + return Response( + { + "detail": "IntegrityError encountered while trying to update: " + + str(e.__cause__) + }, + status=status.HTTP_400_BAD_REQUEST, + ) + return Response({"detail": "no changes made"}, status=status.HTTP_200_OK) + + def create(self, request, *args, **kwargs): + if Registration.objects.filter(id=request.data.get("id")).exists(): + return self.update(request, request.data.get("id")) + return self.handle_registration(request) + + queryset = Registration.objects.none() # included redundantly for docs + + def get_queryset(self): + return Registration.objects.filter(user=self.request.user) + + def get_queryset_current(self): + """ + Returns a superset of all active registrations (also includes cancelled registrations + from the current semester at the head of their resubscribe chains). Returns at most 1 + registration per section (if multiple candidate registrations for a certain section exist, + the registration with the later created_at value is chosen). + """ + registrations = Registration.objects.filter( + user=self.request.user, + deleted=False, + resubscribed_to__isnull=True, + section__course__semester=get_current_semester(), + ) + # Now resolve conflicts where multiple registrations exist for the same section + # (by taking the registration with the later created_at date) + return registrations.filter( + created_at__in=registrations.values("section") + .annotate(max_created_at=Max("created_at")) + .values_list("max_created_at", flat=True) + ) + + +class RegistrationHistoryViewSet(AutoPrefetchViewSetMixin, viewsets.ReadOnlyModelViewSet): + """ + list: + List all of the user's registrations for the current semester, regardless of whether + they are active, obsolete (not at the head of their resubscribe chains), or deleted. Note + that this is not appropriate to use for listing registrations and presenting them to the user; + for that you should use List Registrations (GET `/api/alert/registrations/`). + retrieve: + Get the detail of a specific registration from the current semester. All registrations are + accessible via this endpoint, regardless of whether they are active, + obsolete (not at the head of their resubscribe chains), or deleted. Unless you need to access + inactive and obsolete registrations, you should probably use Retrieve Registration + (GET `/api/alert/registrations/{id}/`) rather than this endpoint. + """ + + schema = PcxAutoSchema( + response_codes={ + "registrationhistory-list": { + "GET": {200: "[DESCRIBE_RESPONSE_SCHEMA]Registration history successfully listed."} + }, + "registrationhistory-detail": { + "GET": { + 200: "[DESCRIBE_RESPONSE_SCHEMA]Historic registration detail " + "successfully retrieved.", + 404: "Historic registration not found with given id.", + } + }, + }, + ) + serializer_class = RegistrationSerializer + permission_classes = [IsAuthenticated] + + queryset = Registration.objects.none() # included redundantly for docs + + def get_queryset(self): + return Registration.objects.filter( + user=self.request.user, section__course__semester=get_current_semester() + ).prefetch_related("section") diff --git a/backend/courses/admin.py b/backend/courses/admin.py index 169bfc348..b6a27410d 100644 --- a/backend/courses/admin.py +++ b/backend/courses/admin.py @@ -1,224 +1,224 @@ -from django.contrib import admin -from django.contrib.auth.admin import UserAdmin -from django.contrib.auth.models import User -from django.template import loader -from django.urls import reverse -from django.utils.html import format_html, format_html_join - -from courses.models import ( - APIKey, - APIPrivilege, - Attribute, - Building, - Course, - Department, - Instructor, - Meeting, - NGSSRestriction, - PreNGSSRequirement, - PreNGSSRestriction, - Room, - Section, - StatusUpdate, - Topic, - UserProfile, -) - - -# User Profile: https://github.com/sibtc/django-admin-user-profile -class ProfileInline(admin.StackedInline): - model = UserProfile - can_delete = False - verbose_name_plural = "Profile" - fk_name = "user" - - -class CustomUserAdmin(UserAdmin): - inlines = (ProfileInline,) - - def get_inline_instances(self, request, obj=None): - if not obj: - return list() - return super(CustomUserAdmin, self).get_inline_instances(request, obj) - - -class DepartmentAdmin(admin.ModelAdmin): - search_fields = ("code",) - - -class InstructorAdmin(admin.ModelAdmin): - search_fields = ("name",) - - -class AttributeAdmin(admin.ModelAdmin): - search_fields = ( - "code", - "description", - ) - list_display = ("code", "school", "description") - exclude = ("courses",) - - -class NGSSRestrictionAdmin(admin.ModelAdmin): - search_fields = ( - "code", - "restriction_type", - "description", - ) - list_display = ("code", "restriction_type", "inclusive", "description") - exclude = ("courses",) - - -class CourseAdmin(admin.ModelAdmin): - search_fields = ("full_code", "department__code", "code", "semester", "title") - autocomplete_fields = ("department", "primary_listing") - readonly_fields = ("topic", "crosslistings", "course_attributes") - exclude = ("attributes",) - list_filter = ("semester",) - list_display = ("full_code", "semester", "title") - - list_select_related = ("department", "topic") - - def crosslistings(self, instance): - return format_html_join( - "\n", - '
  • {}
  • ', - ( - ( - reverse("admin:courses_course_change", args=[c.id]), - str(c), - ) - for c in instance.crosslistings.all() - ), - ) - - def course_attributes(self, instance): - return format_html_join( - "\n", - '
  • {}
  • ', - ( - ( - reverse("admin:courses_attribute_change", args=[a.id]), - str(a), - ) - for a in instance.attributes.all() - ), - ) - - -class TopicAdmin(admin.ModelAdmin): - readonly_fields = ( - "courses", - "branched_from", - ) - search_fields = ( - "id", - "most_recent__full_code", - ) - list_select_related = ("most_recent",) - - def get_queryset(self, request): - return super().get_queryset(request).prefetch_related("courses") - - def formfield_for_foreignkey(self, db_field, request, **kwargs): - # Hack to limit most_recent choices to courses of the same Topic - if db_field.name == "most_recent" and request.resolver_match.kwargs.get("object_id"): - topic_id = request.resolver_match.kwargs["object_id"] - kwargs["queryset"] = Course.objects.filter(topic_id=topic_id) - return super().formfield_for_foreignkey(db_field, request, **kwargs) - - def courses(self, instance): - t = loader.get_template("topic_courses_admin.html") - courses = instance.courses.all() - for course in courses: - course.a_link = reverse("admin:courses_course_change", args=[course.id]) - return t.render({"courses": instance.courses.all()}) - - def branched_from_id(self, instance): - """ - The original topic from which this topic branched. - """ - if instance.branched_from_id is None: - return "None" - link = reverse("admin:courses_topic_change", args=[instance.branched_from_id]) - return format_html('{}', link, str(instance.branched_from_id)) - - -class SectionAdmin(admin.ModelAdmin): - search_fields = ( - "full_code", - "course__department__code", - "course__code", - "code", - "course__semester", - ) - readonly_fields = ("course_link",) - autocomplete_fields = ( - "instructors", - "course", - "associated_sections", - ) - list_filter = ("course__semester",) - - list_display = ["full_code", "semester", "status"] - - list_select_related = ("course", "course__department") - - def get_object(self, request, object_id, from_field=None): - # Hook obj for use in formfield_for_manytomany - self.obj = super().get_object(request, object_id, from_field) - return self.obj - - def formfield_for_manytomany(self, db_field, request, **kwargs): - # Filter displayed restrictions by whether this section has that restriction - if db_field.name == "pre_ngss_restrictions": - kwargs["queryset"] = PreNGSSRestriction.objects.filter(sections__id=self.obj.id) - return super().formfield_for_manytomany(db_field, request, **kwargs) - - def course_link(self, instance): - link = reverse("admin:courses_course_change", args=[instance.course.id]) - return format_html('{}', link, instance.course.__str__()) - - -class MeetingAdmin(admin.ModelAdmin): - list_select_related = ( - "section", - "room", - "room__building", - "section__course", - "section__course__department", - ) - autocomplete_fields = ["section"] - - -class PreNGSSRequirementAdmin(admin.ModelAdmin): - autocomplete_fields = ("departments", "courses", "overrides") - - -class StatusUpdateAdmin(admin.ModelAdmin): - autocomplete_fields = ("section",) - readonly_fields = ("created_at",) - list_filter = ("section__course__semester",) - list_select_related = ["section", "section__course", "section__course__department"] - search_fields = ("section__full_code",) - - -admin.site.register(APIKey) -admin.site.register(APIPrivilege) -admin.site.register(Department, DepartmentAdmin) -admin.site.register(Course, CourseAdmin) -admin.site.register(Topic, TopicAdmin) -admin.site.register(Section, SectionAdmin) -admin.site.register(Building) -admin.site.register(Room) -admin.site.register(PreNGSSRequirement, PreNGSSRequirementAdmin) -admin.site.register(PreNGSSRestriction) -admin.site.register(Instructor, InstructorAdmin) -admin.site.register(Meeting, MeetingAdmin) -admin.site.register(StatusUpdate, StatusUpdateAdmin) -admin.site.register(Attribute, AttributeAdmin) -admin.site.register(NGSSRestriction, NGSSRestrictionAdmin) - -# https://github.com/sibtc/django-admin-user-profile -admin.site.unregister(User) -admin.site.register(User, CustomUserAdmin) +from django.contrib import admin +from django.contrib.auth.admin import UserAdmin +from django.contrib.auth.models import User +from django.template import loader +from django.urls import reverse +from django.utils.html import format_html, format_html_join + +from courses.models import ( + APIKey, + APIPrivilege, + Attribute, + Building, + Course, + Department, + Instructor, + Meeting, + NGSSRestriction, + PreNGSSRequirement, + PreNGSSRestriction, + Room, + Section, + StatusUpdate, + Topic, + UserProfile, +) + + +# User Profile: https://github.com/sibtc/django-admin-user-profile +class ProfileInline(admin.StackedInline): + model = UserProfile + can_delete = False + verbose_name_plural = "Profile" + fk_name = "user" + + +class CustomUserAdmin(UserAdmin): + inlines = (ProfileInline,) + + def get_inline_instances(self, request, obj=None): + if not obj: + return list() + return super(CustomUserAdmin, self).get_inline_instances(request, obj) + + +class DepartmentAdmin(admin.ModelAdmin): + search_fields = ("code",) + + +class InstructorAdmin(admin.ModelAdmin): + search_fields = ("name",) + + +class AttributeAdmin(admin.ModelAdmin): + search_fields = ( + "code", + "description", + ) + list_display = ("code", "school", "description") + exclude = ("courses",) + + +class NGSSRestrictionAdmin(admin.ModelAdmin): + search_fields = ( + "code", + "restriction_type", + "description", + ) + list_display = ("code", "restriction_type", "inclusive", "description") + exclude = ("courses",) + + +class CourseAdmin(admin.ModelAdmin): + search_fields = ("full_code", "department__code", "code", "semester", "title") + autocomplete_fields = ("department", "primary_listing") + readonly_fields = ("topic", "crosslistings", "course_attributes") + exclude = ("attributes",) + list_filter = ("semester",) + list_display = ("full_code", "semester", "title") + + list_select_related = ("department", "topic") + + def crosslistings(self, instance): + return format_html_join( + "\n", + '
  • {}
  • ', + ( + ( + reverse("admin:courses_course_change", args=[c.id]), + str(c), + ) + for c in instance.crosslistings.all() + ), + ) + + def course_attributes(self, instance): + return format_html_join( + "\n", + '
  • {}
  • ', + ( + ( + reverse("admin:courses_attribute_change", args=[a.id]), + str(a), + ) + for a in instance.attributes.all() + ), + ) + + +class TopicAdmin(admin.ModelAdmin): + readonly_fields = ( + "courses", + "branched_from", + ) + search_fields = ( + "id", + "most_recent__full_code", + ) + list_select_related = ("most_recent",) + + def get_queryset(self, request): + return super().get_queryset(request).prefetch_related("courses") + + def formfield_for_foreignkey(self, db_field, request, **kwargs): + # Hack to limit most_recent choices to courses of the same Topic + if db_field.name == "most_recent" and request.resolver_match.kwargs.get("object_id"): + topic_id = request.resolver_match.kwargs["object_id"] + kwargs["queryset"] = Course.objects.filter(topic_id=topic_id) + return super().formfield_for_foreignkey(db_field, request, **kwargs) + + def courses(self, instance): + t = loader.get_template("topic_courses_admin.html") + courses = instance.courses.all() + for course in courses: + course.a_link = reverse("admin:courses_course_change", args=[course.id]) + return t.render({"courses": instance.courses.all()}) + + def branched_from_id(self, instance): + """ + The original topic from which this topic branched. + """ + if instance.branched_from_id is None: + return "None" + link = reverse("admin:courses_topic_change", args=[instance.branched_from_id]) + return format_html('{}', link, str(instance.branched_from_id)) + + +class SectionAdmin(admin.ModelAdmin): + search_fields = ( + "full_code", + "course__department__code", + "course__code", + "code", + "course__semester", + ) + readonly_fields = ("course_link",) + autocomplete_fields = ( + "instructors", + "course", + "associated_sections", + ) + list_filter = ("course__semester",) + + list_display = ["full_code", "semester", "status"] + + list_select_related = ("course", "course__department") + + def get_object(self, request, object_id, from_field=None): + # Hook obj for use in formfield_for_manytomany + self.obj = super().get_object(request, object_id, from_field) + return self.obj + + def formfield_for_manytomany(self, db_field, request, **kwargs): + # Filter displayed restrictions by whether this section has that restriction + if db_field.name == "pre_ngss_restrictions": + kwargs["queryset"] = PreNGSSRestriction.objects.filter(sections__id=self.obj.id) + return super().formfield_for_manytomany(db_field, request, **kwargs) + + def course_link(self, instance): + link = reverse("admin:courses_course_change", args=[instance.course.id]) + return format_html('{}', link, instance.course.__str__()) + + +class MeetingAdmin(admin.ModelAdmin): + list_select_related = ( + "section", + "room", + "room__building", + "section__course", + "section__course__department", + ) + autocomplete_fields = ["section"] + + +class PreNGSSRequirementAdmin(admin.ModelAdmin): + autocomplete_fields = ("departments", "courses", "overrides") + + +class StatusUpdateAdmin(admin.ModelAdmin): + autocomplete_fields = ("section",) + readonly_fields = ("created_at",) + list_filter = ("section__course__semester",) + list_select_related = ["section", "section__course", "section__course__department"] + search_fields = ("section__full_code",) + + +admin.site.register(APIKey) +admin.site.register(APIPrivilege) +admin.site.register(Department, DepartmentAdmin) +admin.site.register(Course, CourseAdmin) +admin.site.register(Topic, TopicAdmin) +admin.site.register(Section, SectionAdmin) +admin.site.register(Building) +admin.site.register(Room) +admin.site.register(PreNGSSRequirement, PreNGSSRequirementAdmin) +admin.site.register(PreNGSSRestriction) +admin.site.register(Instructor, InstructorAdmin) +admin.site.register(Meeting, MeetingAdmin) +admin.site.register(StatusUpdate, StatusUpdateAdmin) +admin.site.register(Attribute, AttributeAdmin) +admin.site.register(NGSSRestriction, NGSSRestrictionAdmin) + +# https://github.com/sibtc/django-admin-user-profile +admin.site.unregister(User) +admin.site.register(User, CustomUserAdmin) diff --git a/backend/courses/apps.py b/backend/courses/apps.py index 463a06257..2085a1065 100644 --- a/backend/courses/apps.py +++ b/backend/courses/apps.py @@ -1,5 +1,5 @@ -from django.apps import AppConfig - - -class CoursesConfig(AppConfig): - name = "courses" +from django.apps import AppConfig + + +class CoursesConfig(AppConfig): + name = "courses" diff --git a/backend/courses/course_similarity/heuristics.py b/backend/courses/course_similarity/heuristics.py index 07e21835f..72daef114 100644 --- a/backend/courses/course_similarity/heuristics.py +++ b/backend/courses/course_similarity/heuristics.py @@ -1,87 +1,87 @@ -import re - - -def title_rejection_heuristics(title_a, title_b): - """ - Handle special cases indicating dissimilarity and return True if they occur, False otherwise. - 0. At least one string is only whitespace - 1. The title equals "dissertation" - 2. Identify if a course title is different only by a single roman numeral or digit: - ie CIS-120 is "Programming Languages and Techniques I" and CIS-121 is - "Programming Languages and Techniques II". The specific means of doing - this is to check if the segment directly preceding a roman numeral or - number is identical. If it is then the title falls into this case. - 3. Identify if a course differs by "beginner, intermediate, or advanced" at - the start of the title (or synonyms for each respective word). Note - additional levels like "Advanced intermediate" only have their first - word (e.g., "Advanced") considered. Note also that if one title doesn't - have such a first word, but the other does, False is returned. - """ - # Case 0 - if title_a == "" or title_b == "": - return True - - # Case 1 - if title_a == "dissertation" or title_b == "dissertation": - return True - - # Case 2 - sequels_regex = re.compile(r"\s+(\d+|ix|iv|v?i{0,3})$") - match_a, match_b = sequels_regex.search(title_a), sequels_regex.search(title_b) - if (match_a is not None) != (match_b is not None): - return True - if match_a and match_b and match_a.group(1) != match_b.group(1): - return True - - # Case 3 - levels = { - "introductory": 0, - "intruduction": 0, - "beginner": 0, - "elementary": 0, - "intermediate": 1, - "advanced": 2, - } - - def get_level(title): - level = -1 - for keyword, lev in levels.items(): - if keyword in title: - level = lev - return level - - if get_level(title_a) != get_level(title_b): - return True - - return False - - -def description_rejection_heuristics(desc_a, desc_b): - """ - Handle special cases (specifically when the description is non-informative because it does not - contain course-specific content) and return True if they occur, False otherwise. - 0. At least one string is only whitespace - 1. Identify if either description is of the form "topics may vary" (or some variation) - 2. Identify if either description is of the form "see department website" (or some variation) - """ - # Case 0 - if desc_a == "" or desc_b == "": - return True - - # Case 1 - topics_vary_regex = re.compile(r"topics\s*.*vary") - - # Case 2 - exclude_strings = [ - "department website for a current course description", - "complete description of the current offerings", - "department website for current description", - ] - for exclude_string in exclude_strings: - if exclude_string in desc_a or exclude_string in desc_b: - return True - for regex in [topics_vary_regex]: - if regex.search(desc_a) or regex.search(desc_b): - return True - - return False +import re + + +def title_rejection_heuristics(title_a, title_b): + """ + Handle special cases indicating dissimilarity and return True if they occur, False otherwise. + 0. At least one string is only whitespace + 1. The title equals "dissertation" + 2. Identify if a course title is different only by a single roman numeral or digit: + ie CIS-120 is "Programming Languages and Techniques I" and CIS-121 is + "Programming Languages and Techniques II". The specific means of doing + this is to check if the segment directly preceding a roman numeral or + number is identical. If it is then the title falls into this case. + 3. Identify if a course differs by "beginner, intermediate, or advanced" at + the start of the title (or synonyms for each respective word). Note + additional levels like "Advanced intermediate" only have their first + word (e.g., "Advanced") considered. Note also that if one title doesn't + have such a first word, but the other does, False is returned. + """ + # Case 0 + if title_a == "" or title_b == "": + return True + + # Case 1 + if title_a == "dissertation" or title_b == "dissertation": + return True + + # Case 2 + sequels_regex = re.compile(r"\s+(\d+|ix|iv|v?i{0,3})$") + match_a, match_b = sequels_regex.search(title_a), sequels_regex.search(title_b) + if (match_a is not None) != (match_b is not None): + return True + if match_a and match_b and match_a.group(1) != match_b.group(1): + return True + + # Case 3 + levels = { + "introductory": 0, + "intruduction": 0, + "beginner": 0, + "elementary": 0, + "intermediate": 1, + "advanced": 2, + } + + def get_level(title): + level = -1 + for keyword, lev in levels.items(): + if keyword in title: + level = lev + return level + + if get_level(title_a) != get_level(title_b): + return True + + return False + + +def description_rejection_heuristics(desc_a, desc_b): + """ + Handle special cases (specifically when the description is non-informative because it does not + contain course-specific content) and return True if they occur, False otherwise. + 0. At least one string is only whitespace + 1. Identify if either description is of the form "topics may vary" (or some variation) + 2. Identify if either description is of the form "see department website" (or some variation) + """ + # Case 0 + if desc_a == "" or desc_b == "": + return True + + # Case 1 + topics_vary_regex = re.compile(r"topics\s*.*vary") + + # Case 2 + exclude_strings = [ + "department website for a current course description", + "complete description of the current offerings", + "department website for current description", + ] + for exclude_string in exclude_strings: + if exclude_string in desc_a or exclude_string in desc_b: + return True + for regex in [topics_vary_regex]: + if regex.search(desc_a) or regex.search(desc_b): + return True + + return False diff --git a/backend/courses/filters.py b/backend/courses/filters.py index 0df2627d4..3f5a01120 100644 --- a/backend/courses/filters.py +++ b/backend/courses/filters.py @@ -1,513 +1,513 @@ -from decimal import Decimal - -from django.core.exceptions import BadRequest -from django.db.models import Count, Exists, OuterRef, Q -from django.db.models.expressions import F, Subquery -from lark import Lark, Transformer, Tree -from lark.exceptions import UnexpectedInput -from rest_framework import filters - -from courses.models import Course, Meeting, PreNGSSRequirement, Section -from courses.util import get_current_semester -from plan.models import Schedule - - -def section_ids_by_meeting_query(meeting_query): - """ - Returns a queryset of the ids of sections for which all meetings pass the - given meeting query. - """ - return ( - Meeting.objects.filter(meeting_query) - .values("section") - .annotate(num_matching_meetings=Count("id")) - .order_by() - .filter(section__num_meetings=F("num_matching_meetings")) - .values("section_id") - .distinct() - ) - - -def course_ids_by_section_query(section_query): - """ - Returns a queryset of the ids of courses for which at least one section - of each activity type passes the given section query. - """ - return ( - Section.objects.filter(section_query) - .values("course") - .annotate(num_matching_activities=Count("activity", distinct=True)) - .order_by() - .filter(course__num_activities=F("num_matching_activities")) - .values("course_id") - .distinct() - ) - - -def meeting_filter(queryset, meeting_query): - """ - Filters the given queryset of courses by the following condition: - include a course only if the specified meeting filter - (meeting_query, represented as a Q() query object) - does not limit the set of section activities we can participate in for the course. - For instance, if meeting_query=Q(day__in={"T","W","R"}), - then we would include a course with lecture and recitation sections only if - we could enroll in some lecture section and some recitation section and - only have to attend meetings on Tuesdays, Wednesdays, and/or Thursdays. - However, if the course had a lab section that only met on Fridays, - we would no longer include the course (since we cannot attend the meetings of the - lab section, and thus the set of course activities available to us is incomplete). - """ - return queryset.filter( - id__in=course_ids_by_section_query( - Q(num_meetings=0) | Q(id__in=section_ids_by_meeting_query(meeting_query)) - ) - ) - - -def is_open_filter(queryset, *args): - """ - Filters the given queryset of courses by the following condition: - include a course only if filtering its sections by `status="O"` does - not does not limit the set of section activities we can participate in for the course. - In other words, include only courses for which all activities have open sections. - Note that for compatibility, this function can take additional positional - arguments, but these are ignored. - """ - return queryset.filter(id__in=course_ids_by_section_query(Q(status="O"))) - - -def day_filter(days): - """ - Constructs a Q() query object for filtering meetings by day, - based on the given days filter string. - """ - days = set(days) - if not days.issubset({"M", "T", "W", "R", "F", "S", "U"}): - return Q() - return Q(day__isnull=True) | Q(day__in=set(days)) - - -def time_filter(time_range): - """ - Constructs a Q() query object for filtering meetings by start/end time, - based on the given time_range filter string. - """ - if not time_range: - return Q() - times = time_range.split("-") - if len(times) != 2: - return Q() - times = [t.strip() for t in times] - for time in times: - if time and not time.replace(".", "", 1).isdigit(): - return Q() - start_time, end_time = times - query = Q() - if start_time: - query &= Q(start__isnull=True) | Q(start__gte=Decimal(start_time)) - if end_time: - query &= Q(end__isnull=True) | Q(end__lte=Decimal(end_time)) - return query - - -def gen_schedule_filter(request): - """ - Generates a schedule filter function that checks for proper - authentication in the given request. - """ - - def schedule_filter(schedule_id): - """ - Constructs a Q() query object for filtering meetings by - whether they fit into the specified schedule. - """ - if not schedule_id: - return Q() - if not schedule_id.isdigit(): - return Q() - if not request.user.is_authenticated: - return Q() - meetings = Meeting.objects.filter( - section_id__in=Subquery( - Schedule.objects.filter(id=int(schedule_id), person_id=request.user.id).values( - "sections__id" - ) - ) - ) - query = Q() - for meeting in meetings: - query &= meeting.no_conflict_query - return query - - return schedule_filter - - -def pre_ngss_requirement_filter(queryset, req_ids): - if not req_ids: - return queryset - query = Q() - for req_id in req_ids.split(","): - code, school = req_id.split("@") - try: - requirement = PreNGSSRequirement.objects.get( - code=code, school=school, semester=get_current_semester() - ) - except PreNGSSRequirement.DoesNotExist: - continue - query &= Q(id__in=requirement.satisfying_courses.all()) - - return queryset.filter(query) - - -# See the attribute_filter docstring for an explanation of this grammar -# https://lark-parser.readthedocs.io/en/latest/examples/calc.html -attribute_query_parser = Lark( - r""" - ?expr : or_expr - - ?or_expr : and_expr - | and_expr "|" or_expr -> disjunction - - ?and_expr : atom - | atom "*" and_expr -> conjunction - - ?atom : attribute - | "~" atom -> negation - | "(" or_expr ")" - - attribute : WORD - - %import common.WORD - %import common.WS - %ignore WS - """, - start="expr", -) - - -class AttributeQueryTreeToCourseQ(Transformer): - """ - Each transformation step returns a tuple of the form `(is_leaf, q)`, - where `is_leaf` is a boolean indicating if that query expression - is a leaf-level attribute code filter, and `q` is the query expression. - """ - - def attribute(self, children): - (code,) = children - return True, Q(attributes__code=code.upper()) - - def disjunction(self, children): - (c1_leaf, c1), (c2_leaf, c2) = children - return (c1_leaf or c2_leaf), c1 | c2 - - def lift_exists(self, q): - """ - 'Lifts' the given `q` query object from a leaf-level attribute - filter (e.g. `Q(attributes__code="WUOM")`) to an 'exists' subquery, - e.g. `Q(Exists(Course.objects.filter(attributes__code="WUOM", id=OuterRef("id"))))`. - This is required for conjunction and negation operations, as `Q(attributes__code="WUOM")` - simply performs a join between the `Course` and `Attribute` tables and filters the joined - rows, so `Q(attributes__code="WUOM") & Q(attributes__code="EMCI")` - would filter out all rows, (as no row can have code equal to both "WUOM" and "EMCI"), and - `~Q(attributes__code="WUOM")` would filter for courses that contain some attribute - other than WUOM (not the desired behavior). Lifing these conditions with an exists subquery - before combining with the relevant logical connectives fixes this issue. - """ - return Q(Exists(Course.objects.filter(q, id=OuterRef("id")))) - - def conjunction(self, children): - children = [self.lift_exists(c) if c_leaf else c for c_leaf, c in children] - c1, c2 = children - return False, c1 & c2 - - def negation(self, children): - ((c_leaf, c),) = children - if c_leaf: - c = self.lift_exists(c) - return False, ~c - - -def attribute_filter(queryset, attr_query): - """ - :param queryset: initial Course object queryset - :param attr_query: the attribute query string; see the description - of the attributes query param below for an explanation of the - syntax/semantics of this filter - :return: filtered queryset - """ - if not attr_query: - return queryset - - expr = None - try: - expr = attribute_query_parser.parse(attr_query) - except UnexpectedInput as e: - raise BadRequest(e) - - def lift_demorgan(t): - """ - Optimization: Given a Lark parse tree t, tries to - convert `*` to leaf-level `|` operators as much as possible, - using DeMorgan's laws (for query performance). - """ - if t.data == "attribute": - return t - t.children = [lift_demorgan(c) for c in t.children] - if t.data == "conjunction": - c1, c2 = t.children - if c1.data == "negation" and c2.data == "negation": - (c1c,) = c1.children - (c2c,) = c2.children - return Tree( - data="negation", - children=[Tree(data="disjunction", children=[c1c, c2c])], - ) - return t - - expr = lift_demorgan(expr) - - _, query = AttributeQueryTreeToCourseQ().transform(expr) - - return queryset.filter(query).distinct() - - -def bound_filter(field): - def filter_bounds(queryset, bounds): - if not bounds: - return queryset - bound_arr = bounds.split("-") - if len(bound_arr) != 2: - return queryset - bound_arr = [b.strip() for b in bound_arr] - for bound in bound_arr: - if bound and not bound.replace(".", "", 1).isdigit(): - return queryset - lower_bound, upper_bound = bound_arr - lower_bound = Decimal(lower_bound) - upper_bound = Decimal(upper_bound) - - return queryset.filter( - Q(**{f"{field}__isnull": True}) - | Q( - **{ - f"{field}__gte": lower_bound, - f"{field}__lte": upper_bound, - } - ) - ) - - return filter_bounds - - -def choice_filter(field): - def filter_choices(queryset, choices): - if not choices: - return queryset - query = Q() - for choice in choices.split(","): - query = query | Q(**{field: choice}) - - return queryset.filter(query) - - return filter_choices - - -class CourseSearchFilterBackend(filters.BaseFilterBackend): - def filter_queryset(self, request, queryset, view): - filters = { - "attributes": attribute_filter, - "pre_ngss_requirements": pre_ngss_requirement_filter, - "cu": choice_filter("sections__credits"), - "activity": choice_filter("sections__activity"), - "course_quality": bound_filter("course_quality"), - "instructor_quality": bound_filter("instructor_quality"), - "difficulty": bound_filter("difficulty"), - "is_open": is_open_filter, - } - for field, filter_func in filters.items(): - param = request.query_params.get(field) - if param is not None: - queryset = filter_func(queryset, param) - - # Combine meeting filter queries for efficiency - meeting_filters = { - "days": day_filter, - "time": time_filter, - "schedule-fit": gen_schedule_filter(request), - } - meeting_query = Q() - for field, filter_func in meeting_filters.items(): - param = request.query_params.get(field) - if param is not None: - meeting_query &= filter_func(param) - if len(meeting_query) > 0: - queryset = meeting_filter(queryset, meeting_query) - - return queryset.distinct() - - def get_schema_operation_parameters(self, view): - return [ - { - "name": "type", - "required": False, - "in": "query", - "description": ( - "Can specify what kind of query to run. Course queries are faster, " - "keyword queries look against professor name and course title." - ), - "schema": { - "type": "string", - "default": "auto", - "enum": ["auto", "course", "keyword"], - }, - }, - { - "name": "pre_ngss_requirements", - "required": False, - "in": "query", - "description": ( - "Deprecated since 2022C. Filter courses by comma-separated pre " - "ngss requirements, ANDed together. Use the " - "[List Requirements](/api/documentation/#operation/List%20Pre-Ngss%20Requirements) " # noqa: E501 - "endpoint to get requirement IDs." - ), - "schema": {"type": "string"}, - "example": "SS@SEAS,H@SEAS", - }, - { - "name": "attributes", - "required": False, - "in": "query", - "description": ( - "This query parameter accepts a logical expression of attribute codes " - "separated by `*` (AND) or `|` (OR) connectives, optionally grouped " - "into clauses by parentheses and arbitrarily nested (we avoid using " - "`&` for the AND connective so the query string doesn't have to be escaped). " - "You can negate an individual attribute code or a clause with the `~` operator " - "(this will filter for courses that do NOT have that attribute or do not " - "satisfy that clause). Binary operators are left-associative, " - "and operator precedence is as follows: `~ > * > |`. " - "Whitespace is ignored. " - "A syntax error will cause a 400 response to be returned. " - "Example: `(EUHS|EUSS)*(QP|QS)` would filter for courses that " - "satisfy the EAS humanities or social science requirements " - "and also have a standard grade type or a pass/fail grade type. Use the " - "[List Attributes](/api/documentation/#operation/List%20Attributes) endpoint " - "to get a list of valid attribute codes and descriptions." - ), - "schema": {"type": "string"}, - "example": "WUOM|WUGA", - }, - { - "name": "cu", - "required": False, - "in": "query", - "description": "Filter course units to be within the given range.", - "schema": {"type": "string"}, - "example": "0-0.5", - }, - { - "name": "difficulty", - "required": False, - "in": "query", - "description": ( - "Filter course difficulty (average across all reviews) to be within " - "the given range." - ), - "schema": {"type": "string"}, - "example": "1-2.5", - }, - { - "name": "course_quality", - "required": False, - "in": "query", - "description": ( - "Filter course quality (average across all reviews) to be within " - "the given range." - ), - "schema": {"type": "string"}, - "example": "2.5-4", - }, - { - "name": "instructor_quality", - "required": False, - "in": "query", - "description": ( - "Filter instructor quality (average across all reviews) to be " - "within the given range." - ), - "schema": {"type": "string"}, - "example": "2.5-4", - }, - { - "name": "days", - "required": False, - "in": "query", - "description": ( - "Filter meetings to be within the specified set of days. " - "The set of days should be specified as a string containing some " - "combination of the characters [M, T, W, R, F, S, U]. " - "This filters courses by the following condition: " - "include a course only if the specified day filter " - "does not limit the set of section activities we can participate in " - "for the course. " - "Passing an empty string will return only asynchronous classes " - "or classes with meeting days TBD." - ), - "schema": {"type": "string"}, - "example": "TWR", - }, - { - "name": "time", - "required": False, - "in": "query", - "description": ( - "Filter meeting times to be within the specified range. " - "The start and end time of the filter should be dash-separated. " - "Times should be specified as decimal numbers of the form `h+mm/100` " - "where h is the hour `[0..23]` and mm is the minute `[0,60)`, in ET. " - "You can omit either the start or end time to leave that side unbounded, " - "e.g. '11.30-'. " - "This filters courses by the following condition: " - "include a course only if the specified time filter " - "does not limit the set of section activities we can participate in " - "for the course." - ), - "schema": {"type": "string"}, - "example": "11.30-18", - }, - { - "name": "schedule-fit", - "required": False, - "in": "query", - "description": ( - "Filter meeting times to fit into the schedule with the specified integer id. " - "You must be authenticated with the account owning the specified schedule, " - "or this filter will be ignored. " - "This filters courses by the following condition: " - "include a course only if the specified schedule-fit filter " - "does not limit the set of section activities we can participate in " - "for the course." - ), - "schema": {"type": "integer"}, - "example": "242", - }, - { - "name": "is_open", - "required": False, - "in": "query", - "description": ( - "Filter courses to only those that are open. " - "A boolean of true should be included if you want to apply the filter. " - "By default (ie when the `is_open` is not supplied, the filter is not applied. " - "This filters courses by the following condition: " - "include a course only if the specification that a section is open " - "does not limit the set of section activities we can participate in " - "for the course." - "In other words, filter to courses for which all activities have open sections." - ), - "schema": {"type": "boolean"}, - "example": "true", - }, - ] +from decimal import Decimal + +from django.core.exceptions import BadRequest +from django.db.models import Count, Exists, OuterRef, Q +from django.db.models.expressions import F, Subquery +from lark import Lark, Transformer, Tree +from lark.exceptions import UnexpectedInput +from rest_framework import filters + +from courses.models import Course, Meeting, PreNGSSRequirement, Section +from courses.util import get_current_semester +from plan.models import Schedule + + +def section_ids_by_meeting_query(meeting_query): + """ + Returns a queryset of the ids of sections for which all meetings pass the + given meeting query. + """ + return ( + Meeting.objects.filter(meeting_query) + .values("section") + .annotate(num_matching_meetings=Count("id")) + .order_by() + .filter(section__num_meetings=F("num_matching_meetings")) + .values("section_id") + .distinct() + ) + + +def course_ids_by_section_query(section_query): + """ + Returns a queryset of the ids of courses for which at least one section + of each activity type passes the given section query. + """ + return ( + Section.objects.filter(section_query) + .values("course") + .annotate(num_matching_activities=Count("activity", distinct=True)) + .order_by() + .filter(course__num_activities=F("num_matching_activities")) + .values("course_id") + .distinct() + ) + + +def meeting_filter(queryset, meeting_query): + """ + Filters the given queryset of courses by the following condition: + include a course only if the specified meeting filter + (meeting_query, represented as a Q() query object) + does not limit the set of section activities we can participate in for the course. + For instance, if meeting_query=Q(day__in={"T","W","R"}), + then we would include a course with lecture and recitation sections only if + we could enroll in some lecture section and some recitation section and + only have to attend meetings on Tuesdays, Wednesdays, and/or Thursdays. + However, if the course had a lab section that only met on Fridays, + we would no longer include the course (since we cannot attend the meetings of the + lab section, and thus the set of course activities available to us is incomplete). + """ + return queryset.filter( + id__in=course_ids_by_section_query( + Q(num_meetings=0) | Q(id__in=section_ids_by_meeting_query(meeting_query)) + ) + ) + + +def is_open_filter(queryset, *args): + """ + Filters the given queryset of courses by the following condition: + include a course only if filtering its sections by `status="O"` does + not does not limit the set of section activities we can participate in for the course. + In other words, include only courses for which all activities have open sections. + Note that for compatibility, this function can take additional positional + arguments, but these are ignored. + """ + return queryset.filter(id__in=course_ids_by_section_query(Q(status="O"))) + + +def day_filter(days): + """ + Constructs a Q() query object for filtering meetings by day, + based on the given days filter string. + """ + days = set(days) + if not days.issubset({"M", "T", "W", "R", "F", "S", "U"}): + return Q() + return Q(day__isnull=True) | Q(day__in=set(days)) + + +def time_filter(time_range): + """ + Constructs a Q() query object for filtering meetings by start/end time, + based on the given time_range filter string. + """ + if not time_range: + return Q() + times = time_range.split("-") + if len(times) != 2: + return Q() + times = [t.strip() for t in times] + for time in times: + if time and not time.replace(".", "", 1).isdigit(): + return Q() + start_time, end_time = times + query = Q() + if start_time: + query &= Q(start__isnull=True) | Q(start__gte=Decimal(start_time)) + if end_time: + query &= Q(end__isnull=True) | Q(end__lte=Decimal(end_time)) + return query + + +def gen_schedule_filter(request): + """ + Generates a schedule filter function that checks for proper + authentication in the given request. + """ + + def schedule_filter(schedule_id): + """ + Constructs a Q() query object for filtering meetings by + whether they fit into the specified schedule. + """ + if not schedule_id: + return Q() + if not schedule_id.isdigit(): + return Q() + if not request.user.is_authenticated: + return Q() + meetings = Meeting.objects.filter( + section_id__in=Subquery( + Schedule.objects.filter(id=int(schedule_id), person_id=request.user.id).values( + "sections__id" + ) + ) + ) + query = Q() + for meeting in meetings: + query &= meeting.no_conflict_query + return query + + return schedule_filter + + +def pre_ngss_requirement_filter(queryset, req_ids): + if not req_ids: + return queryset + query = Q() + for req_id in req_ids.split(","): + code, school = req_id.split("@") + try: + requirement = PreNGSSRequirement.objects.get( + code=code, school=school, semester=get_current_semester() + ) + except PreNGSSRequirement.DoesNotExist: + continue + query &= Q(id__in=requirement.satisfying_courses.all()) + + return queryset.filter(query) + + +# See the attribute_filter docstring for an explanation of this grammar +# https://lark-parser.readthedocs.io/en/latest/examples/calc.html +attribute_query_parser = Lark( + r""" + ?expr : or_expr + + ?or_expr : and_expr + | and_expr "|" or_expr -> disjunction + + ?and_expr : atom + | atom "*" and_expr -> conjunction + + ?atom : attribute + | "~" atom -> negation + | "(" or_expr ")" + + attribute : WORD + + %import common.WORD + %import common.WS + %ignore WS + """, + start="expr", +) + + +class AttributeQueryTreeToCourseQ(Transformer): + """ + Each transformation step returns a tuple of the form `(is_leaf, q)`, + where `is_leaf` is a boolean indicating if that query expression + is a leaf-level attribute code filter, and `q` is the query expression. + """ + + def attribute(self, children): + (code,) = children + return True, Q(attributes__code=code.upper()) + + def disjunction(self, children): + (c1_leaf, c1), (c2_leaf, c2) = children + return (c1_leaf or c2_leaf), c1 | c2 + + def lift_exists(self, q): + """ + 'Lifts' the given `q` query object from a leaf-level attribute + filter (e.g. `Q(attributes__code="WUOM")`) to an 'exists' subquery, + e.g. `Q(Exists(Course.objects.filter(attributes__code="WUOM", id=OuterRef("id"))))`. + This is required for conjunction and negation operations, as `Q(attributes__code="WUOM")` + simply performs a join between the `Course` and `Attribute` tables and filters the joined + rows, so `Q(attributes__code="WUOM") & Q(attributes__code="EMCI")` + would filter out all rows, (as no row can have code equal to both "WUOM" and "EMCI"), and + `~Q(attributes__code="WUOM")` would filter for courses that contain some attribute + other than WUOM (not the desired behavior). Lifing these conditions with an exists subquery + before combining with the relevant logical connectives fixes this issue. + """ + return Q(Exists(Course.objects.filter(q, id=OuterRef("id")))) + + def conjunction(self, children): + children = [self.lift_exists(c) if c_leaf else c for c_leaf, c in children] + c1, c2 = children + return False, c1 & c2 + + def negation(self, children): + ((c_leaf, c),) = children + if c_leaf: + c = self.lift_exists(c) + return False, ~c + + +def attribute_filter(queryset, attr_query): + """ + :param queryset: initial Course object queryset + :param attr_query: the attribute query string; see the description + of the attributes query param below for an explanation of the + syntax/semantics of this filter + :return: filtered queryset + """ + if not attr_query: + return queryset + + expr = None + try: + expr = attribute_query_parser.parse(attr_query) + except UnexpectedInput as e: + raise BadRequest(e) + + def lift_demorgan(t): + """ + Optimization: Given a Lark parse tree t, tries to + convert `*` to leaf-level `|` operators as much as possible, + using DeMorgan's laws (for query performance). + """ + if t.data == "attribute": + return t + t.children = [lift_demorgan(c) for c in t.children] + if t.data == "conjunction": + c1, c2 = t.children + if c1.data == "negation" and c2.data == "negation": + (c1c,) = c1.children + (c2c,) = c2.children + return Tree( + data="negation", + children=[Tree(data="disjunction", children=[c1c, c2c])], + ) + return t + + expr = lift_demorgan(expr) + + _, query = AttributeQueryTreeToCourseQ().transform(expr) + + return queryset.filter(query).distinct() + + +def bound_filter(field): + def filter_bounds(queryset, bounds): + if not bounds: + return queryset + bound_arr = bounds.split("-") + if len(bound_arr) != 2: + return queryset + bound_arr = [b.strip() for b in bound_arr] + for bound in bound_arr: + if bound and not bound.replace(".", "", 1).isdigit(): + return queryset + lower_bound, upper_bound = bound_arr + lower_bound = Decimal(lower_bound) + upper_bound = Decimal(upper_bound) + + return queryset.filter( + Q(**{f"{field}__isnull": True}) + | Q( + **{ + f"{field}__gte": lower_bound, + f"{field}__lte": upper_bound, + } + ) + ) + + return filter_bounds + + +def choice_filter(field): + def filter_choices(queryset, choices): + if not choices: + return queryset + query = Q() + for choice in choices.split(","): + query = query | Q(**{field: choice}) + + return queryset.filter(query) + + return filter_choices + + +class CourseSearchFilterBackend(filters.BaseFilterBackend): + def filter_queryset(self, request, queryset, view): + filters = { + "attributes": attribute_filter, + "pre_ngss_requirements": pre_ngss_requirement_filter, + "cu": choice_filter("sections__credits"), + "activity": choice_filter("sections__activity"), + "course_quality": bound_filter("course_quality"), + "instructor_quality": bound_filter("instructor_quality"), + "difficulty": bound_filter("difficulty"), + "is_open": is_open_filter, + } + for field, filter_func in filters.items(): + param = request.query_params.get(field) + if param is not None: + queryset = filter_func(queryset, param) + + # Combine meeting filter queries for efficiency + meeting_filters = { + "days": day_filter, + "time": time_filter, + "schedule-fit": gen_schedule_filter(request), + } + meeting_query = Q() + for field, filter_func in meeting_filters.items(): + param = request.query_params.get(field) + if param is not None: + meeting_query &= filter_func(param) + if len(meeting_query) > 0: + queryset = meeting_filter(queryset, meeting_query) + + return queryset.distinct() + + def get_schema_operation_parameters(self, view): + return [ + { + "name": "type", + "required": False, + "in": "query", + "description": ( + "Can specify what kind of query to run. Course queries are faster, " + "keyword queries look against professor name and course title." + ), + "schema": { + "type": "string", + "default": "auto", + "enum": ["auto", "course", "keyword"], + }, + }, + { + "name": "pre_ngss_requirements", + "required": False, + "in": "query", + "description": ( + "Deprecated since 2022C. Filter courses by comma-separated pre " + "ngss requirements, ANDed together. Use the " + "[List Requirements](/api/documentation/#operation/List%20Pre-Ngss%20Requirements) " # noqa: E501 + "endpoint to get requirement IDs." + ), + "schema": {"type": "string"}, + "example": "SS@SEAS,H@SEAS", + }, + { + "name": "attributes", + "required": False, + "in": "query", + "description": ( + "This query parameter accepts a logical expression of attribute codes " + "separated by `*` (AND) or `|` (OR) connectives, optionally grouped " + "into clauses by parentheses and arbitrarily nested (we avoid using " + "`&` for the AND connective so the query string doesn't have to be escaped). " + "You can negate an individual attribute code or a clause with the `~` operator " + "(this will filter for courses that do NOT have that attribute or do not " + "satisfy that clause). Binary operators are left-associative, " + "and operator precedence is as follows: `~ > * > |`. " + "Whitespace is ignored. " + "A syntax error will cause a 400 response to be returned. " + "Example: `(EUHS|EUSS)*(QP|QS)` would filter for courses that " + "satisfy the EAS humanities or social science requirements " + "and also have a standard grade type or a pass/fail grade type. Use the " + "[List Attributes](/api/documentation/#operation/List%20Attributes) endpoint " + "to get a list of valid attribute codes and descriptions." + ), + "schema": {"type": "string"}, + "example": "WUOM|WUGA", + }, + { + "name": "cu", + "required": False, + "in": "query", + "description": "Filter course units to be within the given range.", + "schema": {"type": "string"}, + "example": "0-0.5", + }, + { + "name": "difficulty", + "required": False, + "in": "query", + "description": ( + "Filter course difficulty (average across all reviews) to be within " + "the given range." + ), + "schema": {"type": "string"}, + "example": "1-2.5", + }, + { + "name": "course_quality", + "required": False, + "in": "query", + "description": ( + "Filter course quality (average across all reviews) to be within " + "the given range." + ), + "schema": {"type": "string"}, + "example": "2.5-4", + }, + { + "name": "instructor_quality", + "required": False, + "in": "query", + "description": ( + "Filter instructor quality (average across all reviews) to be " + "within the given range." + ), + "schema": {"type": "string"}, + "example": "2.5-4", + }, + { + "name": "days", + "required": False, + "in": "query", + "description": ( + "Filter meetings to be within the specified set of days. " + "The set of days should be specified as a string containing some " + "combination of the characters [M, T, W, R, F, S, U]. " + "This filters courses by the following condition: " + "include a course only if the specified day filter " + "does not limit the set of section activities we can participate in " + "for the course. " + "Passing an empty string will return only asynchronous classes " + "or classes with meeting days TBD." + ), + "schema": {"type": "string"}, + "example": "TWR", + }, + { + "name": "time", + "required": False, + "in": "query", + "description": ( + "Filter meeting times to be within the specified range. " + "The start and end time of the filter should be dash-separated. " + "Times should be specified as decimal numbers of the form `h+mm/100` " + "where h is the hour `[0..23]` and mm is the minute `[0,60)`, in ET. " + "You can omit either the start or end time to leave that side unbounded, " + "e.g. '11.30-'. " + "This filters courses by the following condition: " + "include a course only if the specified time filter " + "does not limit the set of section activities we can participate in " + "for the course." + ), + "schema": {"type": "string"}, + "example": "11.30-18", + }, + { + "name": "schedule-fit", + "required": False, + "in": "query", + "description": ( + "Filter meeting times to fit into the schedule with the specified integer id. " + "You must be authenticated with the account owning the specified schedule, " + "or this filter will be ignored. " + "This filters courses by the following condition: " + "include a course only if the specified schedule-fit filter " + "does not limit the set of section activities we can participate in " + "for the course." + ), + "schema": {"type": "integer"}, + "example": "242", + }, + { + "name": "is_open", + "required": False, + "in": "query", + "description": ( + "Filter courses to only those that are open. " + "A boolean of true should be included if you want to apply the filter. " + "By default (ie when the `is_open` is not supplied, the filter is not applied. " + "This filters courses by the following condition: " + "include a course only if the specification that a section is open " + "does not limit the set of section activities we can participate in " + "for the course." + "In other words, filter to courses for which all activities have open sections." + ), + "schema": {"type": "boolean"}, + "example": "true", + }, + ] diff --git a/backend/courses/management/commands/deduplicate_status_updates.py b/backend/courses/management/commands/deduplicate_status_updates.py index 2c2f4a8b3..a41de3101 100644 --- a/backend/courses/management/commands/deduplicate_status_updates.py +++ b/backend/courses/management/commands/deduplicate_status_updates.py @@ -1,30 +1,30 @@ -from textwrap import dedent - -from django.core.management.base import BaseCommand - -from alert.management.commands.recomputestats import deduplicate_status_updates - - -class Command(BaseCommand): - help = "Remove duplicate/redundant status updates from the given semesters." - - def add_arguments(self, parser): - parser.add_argument( - "--semesters", - type=str, - help=dedent( - """ - The semesters argument should be a comma-separated list of semesters - corresponding to the semesters for which you want to remove duplicate/redundant - status updates, i.e. "2019C,2020A,2020C" for fall 2019, spring 2020, and fall 2020. - If this argument is omitted, stats are only recomputed for the current semester. - If you pass "all" to this argument, this script will remove duplicate/redundant - status updates for all semesters found in Courses in the db. - """ - ), - nargs="?", - default=None, - ) - - def handle(self, *args, **kwargs): - deduplicate_status_updates(semesters=kwargs["semesters"], verbose=True) +from textwrap import dedent + +from django.core.management.base import BaseCommand + +from alert.management.commands.recomputestats import deduplicate_status_updates + + +class Command(BaseCommand): + help = "Remove duplicate/redundant status updates from the given semesters." + + def add_arguments(self, parser): + parser.add_argument( + "--semesters", + type=str, + help=dedent( + """ + The semesters argument should be a comma-separated list of semesters + corresponding to the semesters for which you want to remove duplicate/redundant + status updates, i.e. "2019C,2020A,2020C" for fall 2019, spring 2020, and fall 2020. + If this argument is omitted, stats are only recomputed for the current semester. + If you pass "all" to this argument, this script will remove duplicate/redundant + status updates for all semesters found in Courses in the db. + """ + ), + nargs="?", + default=None, + ) + + def handle(self, *args, **kwargs): + deduplicate_status_updates(semesters=kwargs["semesters"], verbose=True) diff --git a/backend/courses/management/commands/export_status_history.py b/backend/courses/management/commands/export_status_history.py index 93201b186..1548415a8 100644 --- a/backend/courses/management/commands/export_status_history.py +++ b/backend/courses/management/commands/export_status_history.py @@ -1,100 +1,100 @@ -import csv -import os -from textwrap import dedent - -from django.core.management.base import BaseCommand -from tqdm import tqdm - -from courses.models import StatusUpdate -from courses.util import get_semesters -from PennCourses.settings.base import S3_resource - - -class Command(BaseCommand): - help = ( - "Export Status Updates by semester with the 6 columns:\n" - "full_code, semester, created_at (%Y-%m-%d %H:%M:%S.%f %Z), old_status, new_status, " - "alert_sent" - ) - - def add_arguments(self, parser): - parser.add_argument( - "--path", - type=str, - help="The path (local or in S3) you want to export to (must be a .csv file).", - ) - parser.add_argument( - "--upload-to-s3", - default=False, - action="store_true", - help=( - "Enable this argument to upload the output of this script to the penn.courses " - "S3 bucket, at the path specified by the path argument. " - ), - ) - parser.add_argument( - "--courses-query", - default="", - type=str, - help=( - "A prefix of the course full_code (e.g. CIS-120) to filter exported updates by. " - "Omit this argument to export all updates from the given semesters." - ), - ) - parser.add_argument( - "--semesters", - type=str, - help=dedent( - """ - The semesters argument should be a comma-separated list of semesters - corresponding to the semesters from which you want to export PCA registrations, - i.e. "2019C,2020A,2020C" for fall 2019, spring 2020, and fall 2020. - If you pass "all" to this argument, this script will export all status updates. - """ - ), - default="", - ) - - def handle(self, *args, **kwargs): - path = kwargs["path"] - upload_to_s3 = kwargs["upload_to_s3"] - semesters = get_semesters(kwargs["semesters"], verbose=True) - if len(semesters) == 0: - raise ValueError("No semesters provided for status update export.") - assert path.endswith(".csv") or path == os.devnull - script_print_path = ("s3://penn.courses/" if upload_to_s3 else "") + path - print(f"Generating {script_print_path} with status updates from semesters {semesters}...") - rows = 0 - output_file_path = "/tmp/export_status_history_output.csv" if upload_to_s3 else path - with open(output_file_path, "w") as output_file: - csv_writer = csv.writer( - output_file, delimiter=",", quotechar='"', quoting=csv.QUOTE_MINIMAL - ) - for update in tqdm( - StatusUpdate.objects.filter( - section__course__semester__in=semesters, - section__course__full_code__startswith=kwargs["courses_query"], - ).select_related("section") - ): - rows += 1 - csv_writer.writerow( - [ - str(field) - for field in [ - update.section.full_code, - update.section.semester, - update.created_at.strftime("%Y-%m-%d %H:%M:%S.%f %Z"), - update.old_status, - update.new_status, - update.alert_sent, - ] - ] - ) - if rows % 5000 == 0: - output_file.flush() - if upload_to_s3: - S3_resource.meta.client.upload_file( - "/tmp/export_status_history_output.csv", "penn.courses", path - ) - os.remove("/tmp/export_status_history_output.csv") - print(f"Generated {script_print_path} with {rows} rows.") +import csv +import os +from textwrap import dedent + +from django.core.management.base import BaseCommand +from tqdm import tqdm + +from courses.models import StatusUpdate +from courses.util import get_semesters +from PennCourses.settings.base import S3_resource + + +class Command(BaseCommand): + help = ( + "Export Status Updates by semester with the 6 columns:\n" + "full_code, semester, created_at (%Y-%m-%d %H:%M:%S.%f %Z), old_status, new_status, " + "alert_sent" + ) + + def add_arguments(self, parser): + parser.add_argument( + "--path", + type=str, + help="The path (local or in S3) you want to export to (must be a .csv file).", + ) + parser.add_argument( + "--upload-to-s3", + default=False, + action="store_true", + help=( + "Enable this argument to upload the output of this script to the penn.courses " + "S3 bucket, at the path specified by the path argument. " + ), + ) + parser.add_argument( + "--courses-query", + default="", + type=str, + help=( + "A prefix of the course full_code (e.g. CIS-120) to filter exported updates by. " + "Omit this argument to export all updates from the given semesters." + ), + ) + parser.add_argument( + "--semesters", + type=str, + help=dedent( + """ + The semesters argument should be a comma-separated list of semesters + corresponding to the semesters from which you want to export PCA registrations, + i.e. "2019C,2020A,2020C" for fall 2019, spring 2020, and fall 2020. + If you pass "all" to this argument, this script will export all status updates. + """ + ), + default="", + ) + + def handle(self, *args, **kwargs): + path = kwargs["path"] + upload_to_s3 = kwargs["upload_to_s3"] + semesters = get_semesters(kwargs["semesters"], verbose=True) + if len(semesters) == 0: + raise ValueError("No semesters provided for status update export.") + assert path.endswith(".csv") or path == os.devnull + script_print_path = ("s3://penn.courses/" if upload_to_s3 else "") + path + print(f"Generating {script_print_path} with status updates from semesters {semesters}...") + rows = 0 + output_file_path = "/tmp/export_status_history_output.csv" if upload_to_s3 else path + with open(output_file_path, "w") as output_file: + csv_writer = csv.writer( + output_file, delimiter=",", quotechar='"', quoting=csv.QUOTE_MINIMAL + ) + for update in tqdm( + StatusUpdate.objects.filter( + section__course__semester__in=semesters, + section__course__full_code__startswith=kwargs["courses_query"], + ).select_related("section") + ): + rows += 1 + csv_writer.writerow( + [ + str(field) + for field in [ + update.section.full_code, + update.section.semester, + update.created_at.strftime("%Y-%m-%d %H:%M:%S.%f %Z"), + update.old_status, + update.new_status, + update.alert_sent, + ] + ] + ) + if rows % 5000 == 0: + output_file.flush() + if upload_to_s3: + S3_resource.meta.client.upload_file( + "/tmp/export_status_history_output.csv", "penn.courses", path + ) + os.remove("/tmp/export_status_history_output.csv") + print(f"Generated {script_print_path} with {rows} rows.") diff --git a/backend/courses/management/commands/export_test_courses_data.py b/backend/courses/management/commands/export_test_courses_data.py index b961628c8..e1a8bb829 100644 --- a/backend/courses/management/commands/export_test_courses_data.py +++ b/backend/courses/management/commands/export_test_courses_data.py @@ -1,266 +1,266 @@ -import contextlib -import csv -import os -from textwrap import dedent - -from django.core.management.base import BaseCommand -from django.db.models import Q, Subquery -from tqdm import tqdm - -from courses.models import Course, Department, Instructor, Section -from courses.util import get_semesters -from PennCourses.settings.base import S3_resource -from review.models import Review, ReviewBit - - -test_data_fields = { - "departments": ["id", "code", "name"], - "courses": [ - "id", - "semester", - "department_id", - "topic_id", - "code", - "title", - "description", - "full_code", - "prerequisites", - "primary_listing_id", - ], - "sections": [ - "id", - "code", - "course_id", - "full_code", - "crn", - "status", - "capacity", - "activity", - "meeting_times", - "credits", - ], - "instructors": ["id", "name"], - "sections_instructors_m2mfield": [ - "sections", - "id", - "instructors", - "instructor_id", - "instructors", - ], # _m2mfield schema: from model, from id, through field, to id, to model - "sections_associated_sections_m2mfield": [ - "sections", - "id", - "associated_sections", - "associated_section_id", - "sections", - ], - "reviews": [ - "id", - "section_id", - "instructor_id", - "enrollment", - "responses", - "form_type", - "comments", - ], - "review_bits": [ - "id", - "review_id", - "field", - "average", - "median", - "stddev", - "rating0", - "rating1", - "rating2", - "rating3", - "rating4", - ], -} # define fields to export from each data type - -cross_semester_data_types = ["departments", "instructors"] - -related_id_fields = { - "courses": { - "department_id": "departments", - }, - "sections": {"course_id": "courses"}, - "reviews": { - "section_id": "sections", - "instructor_id": "instructors", - }, - "review_bits": { - "review_id": "reviews", - }, -} # specify fields which represent foreign key relationships to a strictly other model, -# and the pointed-to model - -self_related_id_fields = { - "courses": ["primary_listing_id"] -} # specify fields which represent foreign key relationships to the same model - -models = { - "departments": Department, - "courses": Course, - "sections": Section, - "instructors": Instructor, - "reviews": Review, - "review_bits": ReviewBit, -} # maps data type to corresponding model (if the data type represents a model) - -unique_identifying_fields = { - "departments": ["code"], - "courses": ["full_code", "semester"], - "sections": ["course_id", "code"], - "instructors": ["name"], - "reviews": ["section_id", "instructor_id"], - "review_bits": ["review_id", "field"], -} # maps data type to its identifying fields other than id (if the data type represents a model) - -semester_filter = { - "courses": "semester", - "sections": "course__semester", - "reviews": "section__course__semester", - "review_bits": "review__section__course__semester", -} # maps data type to the query for its semester - - -class Command(BaseCommand): - help = "Export test courses, sections, instructors, and reviews data from the given semesters." - - def add_arguments(self, parser): - parser.add_argument( - "--courses-query", - default="", - type=str, - help=( - "A prefix of the course full_code (e.g. CIS-120) to filter exported courses by. " - "Omit this argument to export all courses from the given semesters." - ), - ) - parser.add_argument( - "--path", - type=str, - help="The path (local or in S3) you want to export test data to (must be a .csv file).", - ) - parser.add_argument( - "--upload-to-s3", - default=False, - action="store_true", - help=( - "Enable this argument to upload the output of this script to the penn.courses " - "S3 bucket, at the path specified by the path argument. " - ), - ) - parser.add_argument( - "--semesters", - type=str, - help=dedent( - """ - The semesters argument should be a comma-separated list of semesters - corresponding to the semesters from which you want to export PCA registrations, - i.e. "2019C,2020A,2020C" for fall 2019, spring 2020, and fall 2020. - If you pass "all" to this argument, this script will export all status updates. - """ - ), - default="", - ) - - def handle(self, *args, **kwargs): - upload_to_s3 = kwargs["upload_to_s3"] - semesters = get_semesters(kwargs["semesters"], verbose=True) - if len(semesters) == 0: - raise ValueError("No semesters provided for status update export.") - - path = kwargs["path"] - assert path.endswith(".csv") or path == os.devnull - script_print_path = ("s3://penn.courses/" if upload_to_s3 else "") + path - print(f"Exporting test data from semesters {semesters} to {script_print_path}...") - - querysets = dict() # will map datatype to the queryset generated for that datatype - fields = test_data_fields - data_types = fields.keys() - - rows = 0 - output_file_path = "/tmp/export_test_data_output.csv" if upload_to_s3 else path - if output_file_path != os.devnull: - with contextlib.suppress(FileNotFoundError): - os.remove(output_file_path) - - for i, semester in enumerate(semesters): - print(f"Processing semester {semester} ({i+1}/{len(semesters)})...") - with open(output_file_path, "a") as output_file: - csv_writer = csv.writer( - output_file, delimiter=",", quotechar='"', quoting=csv.QUOTE_MINIMAL - ) - - for data_type in data_types: - if i > 0 and data_type in cross_semester_data_types: - continue - print(f"Processing {data_type}...") - - if data_type.endswith("_m2mfield"): - for object in tqdm(querysets[fields[data_type][0]]): - for related_object in getattr(object, fields[data_type][2]).all(): - rows += 1 - # _m2mfield schema: - # from model, from id, through field, to id, to model - csv_writer.writerow( - [data_type] - + [ - fields[data_type][0], - object.id, - fields[data_type][2], - str(related_object.id), - fields[data_type][4], - ] - ) - if rows % 5000 == 0: - output_file.flush() - continue - - if data_type == "departments": - queryset = Department.objects.all() - elif data_type == "courses": - queryset = Course.objects.filter( - Q(full_code__startswith=kwargs["courses_query"]) - | Q( - id__in=Subquery( - Course.objects.filter( - full_code__startswith=kwargs["courses_query"], - semester=semester, - ).values_list("primary_listing_id", flat=True) - ) - ), - semester=semester, - ) - querysets["courses"] = queryset - elif data_type == "sections": - queryset = Section.objects.filter( - course__in=querysets["courses"] - ).prefetch_related("associated_sections", "instructors") - querysets["sections"] = queryset - elif data_type == "instructors": - queryset = Instructor.objects.all() - querysets["instructors"] = queryset - elif data_type == "reviews": - queryset = Review.objects.filter(section__in=querysets["sections"]) - querysets["reviews"] = queryset - elif data_type == "review_bits": - queryset = ReviewBit.objects.filter(review__in=querysets["reviews"]) - querysets["review_bits"] = queryset - - for object in tqdm(queryset): - rows += 1 - csv_writer.writerow( - [data_type] - + [str(getattr(object, field)) for field in fields[data_type]] - ) - if rows % 5000 == 0: - output_file.flush() - - if upload_to_s3: - S3_resource.meta.client.upload_file(output_file_path, "penn.courses", path) - os.remove(output_file_path) - - print(f"Exported {rows} of test data from semesters {semesters} to {script_print_path}.") +import contextlib +import csv +import os +from textwrap import dedent + +from django.core.management.base import BaseCommand +from django.db.models import Q, Subquery +from tqdm import tqdm + +from courses.models import Course, Department, Instructor, Section +from courses.util import get_semesters +from PennCourses.settings.base import S3_resource +from review.models import Review, ReviewBit + + +test_data_fields = { + "departments": ["id", "code", "name"], + "courses": [ + "id", + "semester", + "department_id", + "topic_id", + "code", + "title", + "description", + "full_code", + "prerequisites", + "primary_listing_id", + ], + "sections": [ + "id", + "code", + "course_id", + "full_code", + "crn", + "status", + "capacity", + "activity", + "meeting_times", + "credits", + ], + "instructors": ["id", "name"], + "sections_instructors_m2mfield": [ + "sections", + "id", + "instructors", + "instructor_id", + "instructors", + ], # _m2mfield schema: from model, from id, through field, to id, to model + "sections_associated_sections_m2mfield": [ + "sections", + "id", + "associated_sections", + "associated_section_id", + "sections", + ], + "reviews": [ + "id", + "section_id", + "instructor_id", + "enrollment", + "responses", + "form_type", + "comments", + ], + "review_bits": [ + "id", + "review_id", + "field", + "average", + "median", + "stddev", + "rating0", + "rating1", + "rating2", + "rating3", + "rating4", + ], +} # define fields to export from each data type + +cross_semester_data_types = ["departments", "instructors"] + +related_id_fields = { + "courses": { + "department_id": "departments", + }, + "sections": {"course_id": "courses"}, + "reviews": { + "section_id": "sections", + "instructor_id": "instructors", + }, + "review_bits": { + "review_id": "reviews", + }, +} # specify fields which represent foreign key relationships to a strictly other model, +# and the pointed-to model + +self_related_id_fields = { + "courses": ["primary_listing_id"] +} # specify fields which represent foreign key relationships to the same model + +models = { + "departments": Department, + "courses": Course, + "sections": Section, + "instructors": Instructor, + "reviews": Review, + "review_bits": ReviewBit, +} # maps data type to corresponding model (if the data type represents a model) + +unique_identifying_fields = { + "departments": ["code"], + "courses": ["full_code", "semester"], + "sections": ["course_id", "code"], + "instructors": ["name"], + "reviews": ["section_id", "instructor_id"], + "review_bits": ["review_id", "field"], +} # maps data type to its identifying fields other than id (if the data type represents a model) + +semester_filter = { + "courses": "semester", + "sections": "course__semester", + "reviews": "section__course__semester", + "review_bits": "review__section__course__semester", +} # maps data type to the query for its semester + + +class Command(BaseCommand): + help = "Export test courses, sections, instructors, and reviews data from the given semesters." + + def add_arguments(self, parser): + parser.add_argument( + "--courses-query", + default="", + type=str, + help=( + "A prefix of the course full_code (e.g. CIS-120) to filter exported courses by. " + "Omit this argument to export all courses from the given semesters." + ), + ) + parser.add_argument( + "--path", + type=str, + help="The path (local or in S3) you want to export test data to (must be a .csv file).", + ) + parser.add_argument( + "--upload-to-s3", + default=False, + action="store_true", + help=( + "Enable this argument to upload the output of this script to the penn.courses " + "S3 bucket, at the path specified by the path argument. " + ), + ) + parser.add_argument( + "--semesters", + type=str, + help=dedent( + """ + The semesters argument should be a comma-separated list of semesters + corresponding to the semesters from which you want to export PCA registrations, + i.e. "2019C,2020A,2020C" for fall 2019, spring 2020, and fall 2020. + If you pass "all" to this argument, this script will export all status updates. + """ + ), + default="", + ) + + def handle(self, *args, **kwargs): + upload_to_s3 = kwargs["upload_to_s3"] + semesters = get_semesters(kwargs["semesters"], verbose=True) + if len(semesters) == 0: + raise ValueError("No semesters provided for status update export.") + + path = kwargs["path"] + assert path.endswith(".csv") or path == os.devnull + script_print_path = ("s3://penn.courses/" if upload_to_s3 else "") + path + print(f"Exporting test data from semesters {semesters} to {script_print_path}...") + + querysets = dict() # will map datatype to the queryset generated for that datatype + fields = test_data_fields + data_types = fields.keys() + + rows = 0 + output_file_path = "/tmp/export_test_data_output.csv" if upload_to_s3 else path + if output_file_path != os.devnull: + with contextlib.suppress(FileNotFoundError): + os.remove(output_file_path) + + for i, semester in enumerate(semesters): + print(f"Processing semester {semester} ({i+1}/{len(semesters)})...") + with open(output_file_path, "a") as output_file: + csv_writer = csv.writer( + output_file, delimiter=",", quotechar='"', quoting=csv.QUOTE_MINIMAL + ) + + for data_type in data_types: + if i > 0 and data_type in cross_semester_data_types: + continue + print(f"Processing {data_type}...") + + if data_type.endswith("_m2mfield"): + for object in tqdm(querysets[fields[data_type][0]]): + for related_object in getattr(object, fields[data_type][2]).all(): + rows += 1 + # _m2mfield schema: + # from model, from id, through field, to id, to model + csv_writer.writerow( + [data_type] + + [ + fields[data_type][0], + object.id, + fields[data_type][2], + str(related_object.id), + fields[data_type][4], + ] + ) + if rows % 5000 == 0: + output_file.flush() + continue + + if data_type == "departments": + queryset = Department.objects.all() + elif data_type == "courses": + queryset = Course.objects.filter( + Q(full_code__startswith=kwargs["courses_query"]) + | Q( + id__in=Subquery( + Course.objects.filter( + full_code__startswith=kwargs["courses_query"], + semester=semester, + ).values_list("primary_listing_id", flat=True) + ) + ), + semester=semester, + ) + querysets["courses"] = queryset + elif data_type == "sections": + queryset = Section.objects.filter( + course__in=querysets["courses"] + ).prefetch_related("associated_sections", "instructors") + querysets["sections"] = queryset + elif data_type == "instructors": + queryset = Instructor.objects.all() + querysets["instructors"] = queryset + elif data_type == "reviews": + queryset = Review.objects.filter(section__in=querysets["sections"]) + querysets["reviews"] = queryset + elif data_type == "review_bits": + queryset = ReviewBit.objects.filter(review__in=querysets["reviews"]) + querysets["review_bits"] = queryset + + for object in tqdm(queryset): + rows += 1 + csv_writer.writerow( + [data_type] + + [str(getattr(object, field)) for field in fields[data_type]] + ) + if rows % 5000 == 0: + output_file.flush() + + if upload_to_s3: + S3_resource.meta.client.upload_file(output_file_path, "penn.courses", path) + os.remove(output_file_path) + + print(f"Exported {rows} of test data from semesters {semesters} to {script_print_path}.") diff --git a/backend/courses/management/commands/fillprofiles.py b/backend/courses/management/commands/fillprofiles.py index e4aee9303..fd0f74b22 100644 --- a/backend/courses/management/commands/fillprofiles.py +++ b/backend/courses/management/commands/fillprofiles.py @@ -1,22 +1,22 @@ -import logging - -from django.core.management.base import BaseCommand - -from courses.models import UserProfile - - -class Command(BaseCommand): - help = "Ensure that user profiles begin with a user's school email if one is on file." - - def handle(self, *args, **kwargs): - root_logger = logging.getLogger("") - root_logger.setLevel(logging.DEBUG) - - num = 0 - for prof in UserProfile.objects.all(): - if prof.email is None and prof.user.email != "": - prof.email = prof.user.email - prof.save() - num += 1 - - print(f"filled in {num} email addresses.") +import logging + +from django.core.management.base import BaseCommand + +from courses.models import UserProfile + + +class Command(BaseCommand): + help = "Ensure that user profiles begin with a user's school email if one is on file." + + def handle(self, *args, **kwargs): + root_logger = logging.getLogger("") + root_logger.setLevel(logging.DEBUG) + + num = 0 + for prof in UserProfile.objects.all(): + if prof.email is None and prof.user.email != "": + prof.email = prof.user.email + prof.save() + num += 1 + + print(f"filled in {num} email addresses.") diff --git a/backend/courses/management/commands/form_simple_topics.py b/backend/courses/management/commands/form_simple_topics.py index 4e28eb48a..cf014637f 100644 --- a/backend/courses/management/commands/form_simple_topics.py +++ b/backend/courses/management/commands/form_simple_topics.py @@ -1,81 +1,81 @@ -from django.core.management.base import BaseCommand -from django.db import transaction -from django.db.models import Exists, F, OuterRef - -from alert.management.commands.recomputestats import garbage_collect_topics -from courses.management.commands.load_crosswalk import get_crosswalk_s3 -from courses.models import Course, Topic - - -def form_simple_topics(): - Course.objects.all().update(topic=None) - garbage_collect_topics() - - print("Cleared topics.") - - # create topics for each primary listing - primary_listings = Course.objects.filter(primary_listing_id=F("id")) - for primary_listing in primary_listings: - topic = Topic.objects.create(most_recent=primary_listing) - primary_listing.listing_set.all().update(topic=topic) - - print("Created new topics (one per course).") - - # merge topics based on full_code of primary_listing - for full_code in primary_listings.values_list("full_code", flat=True): - Topic.merge_all( - list( - set( - course.topic - for course in ( - primary_listings.filter(full_code=full_code).select_related("topic") - ) - ) - ) - ) - - print("Merged topics based on full_code") - - # use crosswalk - crosswalk = get_crosswalk_s3(verbose=True) - for old_code, new_codes in crosswalk.items(): - old_topic = Topic.objects.filter(most_recent__listing_set__full_code=old_code).first() - new_course = ( - Course.objects.filter( - full_code__in=new_codes, - ) - .annotate( - title_matches=Exists( # Prefer Course.objects.all().with matching title - Course.objects.all().filter(full_code=old_code, title=OuterRef("title")) - ), - ) - .order_by("-title_matches") - .select_related("topic") - .first() - ) - if old_topic and new_course: - new_course.topic.merge_with(old_topic) - - print("Merged topics based on crosswalk") - - garbage_collect_topics() - - print("Done!") - - -class Command(BaseCommand): - help = ( - "This script deletes all existing topics and re-creates them " - "so that courses with the same full_code are in the same topic. " - "this script also uses the crosswalk. " - ) - - def handle(self, *args, **kwargs): - print( - "This script is atomic, meaning either all Topic changes will be comitted to the " - "database, or otherwise if an error is encountered, all changes will be rolled back " - "and the database will remain as it was before the script was run." - ) - - with transaction.atomic(): - form_simple_topics() +from django.core.management.base import BaseCommand +from django.db import transaction +from django.db.models import Exists, F, OuterRef + +from alert.management.commands.recomputestats import garbage_collect_topics +from courses.management.commands.load_crosswalk import get_crosswalk_s3 +from courses.models import Course, Topic + + +def form_simple_topics(): + Course.objects.all().update(topic=None) + garbage_collect_topics() + + print("Cleared topics.") + + # create topics for each primary listing + primary_listings = Course.objects.filter(primary_listing_id=F("id")) + for primary_listing in primary_listings: + topic = Topic.objects.create(most_recent=primary_listing) + primary_listing.listing_set.all().update(topic=topic) + + print("Created new topics (one per course).") + + # merge topics based on full_code of primary_listing + for full_code in primary_listings.values_list("full_code", flat=True): + Topic.merge_all( + list( + set( + course.topic + for course in ( + primary_listings.filter(full_code=full_code).select_related("topic") + ) + ) + ) + ) + + print("Merged topics based on full_code") + + # use crosswalk + crosswalk = get_crosswalk_s3(verbose=True) + for old_code, new_codes in crosswalk.items(): + old_topic = Topic.objects.filter(most_recent__listing_set__full_code=old_code).first() + new_course = ( + Course.objects.filter( + full_code__in=new_codes, + ) + .annotate( + title_matches=Exists( # Prefer Course.objects.all().with matching title + Course.objects.all().filter(full_code=old_code, title=OuterRef("title")) + ), + ) + .order_by("-title_matches") + .select_related("topic") + .first() + ) + if old_topic and new_course: + new_course.topic.merge_with(old_topic) + + print("Merged topics based on crosswalk") + + garbage_collect_topics() + + print("Done!") + + +class Command(BaseCommand): + help = ( + "This script deletes all existing topics and re-creates them " + "so that courses with the same full_code are in the same topic. " + "this script also uses the crosswalk. " + ) + + def handle(self, *args, **kwargs): + print( + "This script is atomic, meaning either all Topic changes will be comitted to the " + "database, or otherwise if an error is encountered, all changes will be rolled back " + "and the database will remain as it was before the script was run." + ) + + with transaction.atomic(): + form_simple_topics() diff --git a/backend/courses/management/commands/load_add_drop_dates.py b/backend/courses/management/commands/load_add_drop_dates.py index e5663e954..0e981777f 100644 --- a/backend/courses/management/commands/load_add_drop_dates.py +++ b/backend/courses/management/commands/load_add_drop_dates.py @@ -1,163 +1,163 @@ -import logging -from datetime import datetime, timedelta - -import requests -from bs4 import BeautifulSoup -from dateutil.tz import gettz -from django.core.exceptions import ValidationError -from django.core.management.base import BaseCommand -from django.db import transaction -from django.utils.timezone import make_aware - -from alert.models import AddDropPeriod, validate_add_drop_semester -from courses.models import Course -from courses.util import get_current_semester, get_or_create_add_drop_period -from PennCourses.settings.base import TIME_ZONE - - -def fill_in_add_drop_periods(verbose=False): - all_semesters = set(Course.objects.values_list("semester", flat=True).distinct()) - adp_semesters = set(AddDropPeriod.objects.values_list("semester", flat=True).distinct()) - missing_semesters = set() - for candidate in all_semesters - adp_semesters: - try: - validate_add_drop_semester(candidate) - missing_semesters.add(candidate) - except ValidationError: - pass - if verbose and len(missing_semesters) > 0: - print(f"Filling in AddDropPeriod objects for semesters {missing_semesters}") - for semester in missing_semesters: - AddDropPeriod(semester=semester).save() - return adp_semesters.union(missing_semesters) - - -def load_add_drop_dates(verbose=False): - semester = get_current_semester() - validate_add_drop_semester(semester) - - if verbose: - print(f"Loading course selection period dates for semester {semester} from the Almanac") - with transaction.atomic(): - adp = get_or_create_add_drop_period(semester) - start_date = adp.start - end_date = adp.end - html = requests.get("https://almanac.upenn.edu/penn-academic-calendar").content - soup = BeautifulSoup(html, "html.parser") - if semester[4] == "C": - start_sem = semester[:4] + " spring" - end_sem = semester[:4] + " fall" - elif semester[4] == "A": - start_sem = str(int(semester[:4]) - 1) + " fall" - end_sem = semester[:4] + " spring" - else: - raise ValueError( - "This script currently only supports fall or spring semesters; " - f"{semester} is invalid" - ) - tz = gettz(TIME_ZONE) - - s_year, s_month, s_day, e_year, e_month, e_day = (None,) * 6 - start_mode = 0 # 0 if start semester hasn't been found, 1 if it has, 2 if finished sem - end_mode = 0 # 0 if end semester hasn't been found, 1 if it has, 2 if finished sem - all_th_parents = {el.parent for el in soup.find_all("th")} - months = [ - "january", - "february", - "march", - "april", - "may", - "june", - "july", - "august", - "september", - "october", - "november", - "december", - ] - for tr_el in soup.find_all("tr"): - if tr_el in all_th_parents: - sem_name = tr_el.th.get_text().lower() - if start_sem in sem_name: - start_mode = 1 - elif start_mode == 1: - start_mode = 2 - if end_sem in sem_name: - end_mode = 1 - elif end_mode == 1: - end_mode = 2 - else: - children = list(tr_el.findChildren("td", recursive=False)) - title = children[0] - date_string = children[1].get_text() - if title is not None and "advance registration" in title.get_text().lower(): - if start_mode == 1: - dates = date_string.split("-") - ar_begin_month = None - for month in months: - if month in dates[0].lower(): - ar_begin_month = month - ar_end_month = None - for month in months: - if month in dates[0].lower(): - ar_end_month = month - if ar_end_month is None: - ar_end_month = ar_begin_month - s_year = int(start_sem[:4]) - if ar_end_month is not None: - s_month = months.index(ar_end_month) + 1 - day_candidates = [int(s) for s in dates[1].split() if s.isdigit()] - if len(day_candidates) > 0: - s_day = day_candidates[0] - if title is not None and "course selection period ends" in title.get_text().lower(): - if end_mode == 1: - course_sel_end_month = None - for month in months: - if month in date_string.lower(): - course_sel_end_month = month - e_year = int(end_sem[:4]) - if course_sel_end_month is not None: - e_month = months.index(course_sel_end_month) + 1 - day_candidates = [int(s) for s in date_string.split() if s.isdigit()] - if len(day_candidates) > 0: - e_day = day_candidates[0] - if None not in [s_year, s_month, s_day] and start_date is None: - start_date = make_aware( - datetime.strptime(f"{s_year}-{s_month}-{s_day} 07:00", "%Y-%m-%d %H:%M") - + timedelta(days=1), - timezone=tz, - ) - if verbose: - print( - "NOTE: Add/drop date start was estimated as the end of the advanced " - "registration period. Replace this date with the actual start of the " - "add/drop period through the Django admin console when it is announced " - "to students each semester." - ) - if None not in [e_year, e_month, e_day]: - end_date = make_aware( - datetime.strptime(f"{e_year}-{e_month}-{e_day} 11:59", "%Y-%m-%d %H:%M"), - timezone=tz, - ) - adp.estimated_start, adp.end = start_date, end_date - adp.save() - if verbose: - print("Done!") - - -class Command(BaseCommand): - help = ( - "Load in the start and end date of the current semester's open registration period " - "from the Penn Almanac. If an AddDropPeriod object from the given semester " - "already exists and its start field is not null, this script will continue " - "to use that same start date rather than estimating the start date based on " - "the end of the advanced registration period. The end date will always " - "be updated if it is found in the almanac, since the almanac explicitly posts " - "the date for the end of the course selection period." - ) - - def handle(self, *args, **kwargs): - root_logger = logging.getLogger("") - root_logger.setLevel(logging.DEBUG) - fill_in_add_drop_periods(verbose=True) - load_add_drop_dates(verbose=True) +import logging +from datetime import datetime, timedelta + +import requests +from bs4 import BeautifulSoup +from dateutil.tz import gettz +from django.core.exceptions import ValidationError +from django.core.management.base import BaseCommand +from django.db import transaction +from django.utils.timezone import make_aware + +from alert.models import AddDropPeriod, validate_add_drop_semester +from courses.models import Course +from courses.util import get_current_semester, get_or_create_add_drop_period +from PennCourses.settings.base import TIME_ZONE + + +def fill_in_add_drop_periods(verbose=False): + all_semesters = set(Course.objects.values_list("semester", flat=True).distinct()) + adp_semesters = set(AddDropPeriod.objects.values_list("semester", flat=True).distinct()) + missing_semesters = set() + for candidate in all_semesters - adp_semesters: + try: + validate_add_drop_semester(candidate) + missing_semesters.add(candidate) + except ValidationError: + pass + if verbose and len(missing_semesters) > 0: + print(f"Filling in AddDropPeriod objects for semesters {missing_semesters}") + for semester in missing_semesters: + AddDropPeriod(semester=semester).save() + return adp_semesters.union(missing_semesters) + + +def load_add_drop_dates(verbose=False): + semester = get_current_semester() + validate_add_drop_semester(semester) + + if verbose: + print(f"Loading course selection period dates for semester {semester} from the Almanac") + with transaction.atomic(): + adp = get_or_create_add_drop_period(semester) + start_date = adp.start + end_date = adp.end + html = requests.get("https://almanac.upenn.edu/penn-academic-calendar").content + soup = BeautifulSoup(html, "html.parser") + if semester[4] == "C": + start_sem = semester[:4] + " spring" + end_sem = semester[:4] + " fall" + elif semester[4] == "A": + start_sem = str(int(semester[:4]) - 1) + " fall" + end_sem = semester[:4] + " spring" + else: + raise ValueError( + "This script currently only supports fall or spring semesters; " + f"{semester} is invalid" + ) + tz = gettz(TIME_ZONE) + + s_year, s_month, s_day, e_year, e_month, e_day = (None,) * 6 + start_mode = 0 # 0 if start semester hasn't been found, 1 if it has, 2 if finished sem + end_mode = 0 # 0 if end semester hasn't been found, 1 if it has, 2 if finished sem + all_th_parents = {el.parent for el in soup.find_all("th")} + months = [ + "january", + "february", + "march", + "april", + "may", + "june", + "july", + "august", + "september", + "october", + "november", + "december", + ] + for tr_el in soup.find_all("tr"): + if tr_el in all_th_parents: + sem_name = tr_el.th.get_text().lower() + if start_sem in sem_name: + start_mode = 1 + elif start_mode == 1: + start_mode = 2 + if end_sem in sem_name: + end_mode = 1 + elif end_mode == 1: + end_mode = 2 + else: + children = list(tr_el.findChildren("td", recursive=False)) + title = children[0] + date_string = children[1].get_text() + if title is not None and "advance registration" in title.get_text().lower(): + if start_mode == 1: + dates = date_string.split("-") + ar_begin_month = None + for month in months: + if month in dates[0].lower(): + ar_begin_month = month + ar_end_month = None + for month in months: + if month in dates[0].lower(): + ar_end_month = month + if ar_end_month is None: + ar_end_month = ar_begin_month + s_year = int(start_sem[:4]) + if ar_end_month is not None: + s_month = months.index(ar_end_month) + 1 + day_candidates = [int(s) for s in dates[1].split() if s.isdigit()] + if len(day_candidates) > 0: + s_day = day_candidates[0] + if title is not None and "course selection period ends" in title.get_text().lower(): + if end_mode == 1: + course_sel_end_month = None + for month in months: + if month in date_string.lower(): + course_sel_end_month = month + e_year = int(end_sem[:4]) + if course_sel_end_month is not None: + e_month = months.index(course_sel_end_month) + 1 + day_candidates = [int(s) for s in date_string.split() if s.isdigit()] + if len(day_candidates) > 0: + e_day = day_candidates[0] + if None not in [s_year, s_month, s_day] and start_date is None: + start_date = make_aware( + datetime.strptime(f"{s_year}-{s_month}-{s_day} 07:00", "%Y-%m-%d %H:%M") + + timedelta(days=1), + timezone=tz, + ) + if verbose: + print( + "NOTE: Add/drop date start was estimated as the end of the advanced " + "registration period. Replace this date with the actual start of the " + "add/drop period through the Django admin console when it is announced " + "to students each semester." + ) + if None not in [e_year, e_month, e_day]: + end_date = make_aware( + datetime.strptime(f"{e_year}-{e_month}-{e_day} 11:59", "%Y-%m-%d %H:%M"), + timezone=tz, + ) + adp.estimated_start, adp.end = start_date, end_date + adp.save() + if verbose: + print("Done!") + + +class Command(BaseCommand): + help = ( + "Load in the start and end date of the current semester's open registration period " + "from the Penn Almanac. If an AddDropPeriod object from the given semester " + "already exists and its start field is not null, this script will continue " + "to use that same start date rather than estimating the start date based on " + "the end of the advanced registration period. The end date will always " + "be updated if it is found in the almanac, since the almanac explicitly posts " + "the date for the end of the course selection period." + ) + + def handle(self, *args, **kwargs): + root_logger = logging.getLogger("") + root_logger.setLevel(logging.DEBUG) + fill_in_add_drop_periods(verbose=True) + load_add_drop_dates(verbose=True) diff --git a/backend/courses/management/commands/load_crosswalk.py b/backend/courses/management/commands/load_crosswalk.py index 53b6f646c..47f48c4e9 100644 --- a/backend/courses/management/commands/load_crosswalk.py +++ b/backend/courses/management/commands/load_crosswalk.py @@ -1,144 +1,144 @@ -import os -from collections import defaultdict - -import pandas as pd -from django.core.management.base import BaseCommand -from django.db import transaction -from tqdm import tqdm - -from courses.models import Course, Topic -from PennCourses.settings.base import XWALK_S3_BUCKET, XWALK_SRC, S3_client -from review.management.commands.clearcache import clear_cache - - -def get_crosswalk(cross_walk): - """ - From a given crosswalk csv path, generate a dict mapping old_full_code to - a list of the new codes originating from that source. - """ - links = defaultdict(list) - cross_walk = pd.read_csv(cross_walk, delimiter="|", encoding="unicode_escape", dtype=str) - for _, r in cross_walk.iterrows(): - old_full_code = f"{r['SRS_SUBJ_CODE']}-{r['SRS_COURSE_NUMBER']}" - new_full_code = f"{r['NGSS_SUBJECT']}-{r['NGSS_COURSE_NUMBER']}" - links[old_full_code].append(new_full_code) - return links - - -def get_crosswalk_s3(verbose=False): - """ - From the crosswalk crosswalk from settings/base.py, generate a dict mapping - old_full_code to a list of the new codes originating from that source. - """ - fp = "/tmp/" + XWALK_SRC - if verbose: - print(f"downloading crosswalk from s3://{XWALK_S3_BUCKET}/{XWALK_SRC}") - S3_client.download_file(XWALK_S3_BUCKET, XWALK_SRC, fp) - - crosswalk = get_crosswalk(fp) - - # Remove temporary file - os.remove(fp) - - return crosswalk - - -def load_crosswalk(print_missing=False, verbose=False): - """ - Loads the crosswalk from settings/base.py, updating branched_from fields - and merging Topics as appropriate. - - :param print_missing: If True, prints courses involved in crosswalk links that were - not found in the database. - :param verbose: A flag indicating whether this script should print its progress. - """ - crosswalk = get_crosswalk_s3(verbose=verbose) - - if verbose: - print("Loading crosswalk.") - - num_merges = 0 - num_branch_updates = 0 - num_missing_roots = 0 - num_missing_children = 0 - - with transaction.atomic(): - Topic.objects.all().update(branched_from=None) - for root_course_code, children_codes in tqdm(crosswalk.items()): - root_course = ( - Course.objects.filter(full_code=root_course_code) - .order_by("-semester") - .select_related("topic") - .first() - ) - if not root_course: - num_missing_roots += 1 - if print_missing: - print(f"Root course {root_course} not found in db") - continue - root_topic = root_course.topic - assert root_topic, f"Root course {root_course} has no topic" - - children = ( - Course.objects.filter( - full_code__in=children_codes, semester__gt=root_course.semester - ) - .order_by("-semester") - .select_related("topic") - ) - # Take minimum semester course (after root course semester) matching child code - child_to_topic = {child.full_code: child.topic for child in children} - for child in {child.full_code: child for child in children}.values(): - assert child.topic, f"Child course {child} of root {root_course} has no topic" - child_topics = set(child_to_topic.values()) - missing_codes = set(children_codes) - set(child_to_topic.keys()) - - for child_code in missing_codes: - num_missing_children += 1 - if print_missing: - print(f"Child course {child_code} not found in db") - - if len(child_topics) == 1 and not missing_codes: - child_topic = child_topics.pop() - if child_topic.branched_from: - child_topic.branched_from = None - child_topic.save() - if root_topic != child_topic: - root_topic.merge_with(child_topic) - num_merges += 1 - else: - for child_topic in child_topics: - if root_topic not in [child_topic, child_topic.branched_from]: - num_branch_updates += 1 - child_topic.branched_from = root_topic - child_topic.save() - - if verbose: - print(f"Performed {num_merges} Topic merges.") - print(f"Added branches, updating the branched_from field of {num_branch_updates} Topics.") - print(f"{num_missing_roots}/{len(crosswalk)} roots not found in db") - print( - f"{num_missing_children}/{sum(len(c) for c in crosswalk.values())} " - "children not found in db" - ) - - -class Command(BaseCommand): - help = ( - "This script loads the crosswalk from settings/base.py, updating " - "branched_from fields and merging Topics as appropriate." - ) - - def add_arguments(self, parser): - parser.add_argument( - "--print_missing", - action="store_true", - help="Print out all missing roots and children.", - ) - - def handle(self, *args, **kwargs): - load_crosswalk(print_missing=kwargs["print_missing"], verbose=True) - - print("Clearing cache") - del_count = clear_cache() - print(f"{del_count if del_count >=0 else 'all'} cache entries removed.") +import os +from collections import defaultdict + +import pandas as pd +from django.core.management.base import BaseCommand +from django.db import transaction +from tqdm import tqdm + +from courses.models import Course, Topic +from PennCourses.settings.base import XWALK_S3_BUCKET, XWALK_SRC, S3_client +from review.management.commands.clearcache import clear_cache + + +def get_crosswalk(cross_walk): + """ + From a given crosswalk csv path, generate a dict mapping old_full_code to + a list of the new codes originating from that source. + """ + links = defaultdict(list) + cross_walk = pd.read_csv(cross_walk, delimiter="|", encoding="unicode_escape", dtype=str) + for _, r in cross_walk.iterrows(): + old_full_code = f"{r['SRS_SUBJ_CODE']}-{r['SRS_COURSE_NUMBER']}" + new_full_code = f"{r['NGSS_SUBJECT']}-{r['NGSS_COURSE_NUMBER']}" + links[old_full_code].append(new_full_code) + return links + + +def get_crosswalk_s3(verbose=False): + """ + From the crosswalk crosswalk from settings/base.py, generate a dict mapping + old_full_code to a list of the new codes originating from that source. + """ + fp = "/tmp/" + XWALK_SRC + if verbose: + print(f"downloading crosswalk from s3://{XWALK_S3_BUCKET}/{XWALK_SRC}") + S3_client.download_file(XWALK_S3_BUCKET, XWALK_SRC, fp) + + crosswalk = get_crosswalk(fp) + + # Remove temporary file + os.remove(fp) + + return crosswalk + + +def load_crosswalk(print_missing=False, verbose=False): + """ + Loads the crosswalk from settings/base.py, updating branched_from fields + and merging Topics as appropriate. + + :param print_missing: If True, prints courses involved in crosswalk links that were + not found in the database. + :param verbose: A flag indicating whether this script should print its progress. + """ + crosswalk = get_crosswalk_s3(verbose=verbose) + + if verbose: + print("Loading crosswalk.") + + num_merges = 0 + num_branch_updates = 0 + num_missing_roots = 0 + num_missing_children = 0 + + with transaction.atomic(): + Topic.objects.all().update(branched_from=None) + for root_course_code, children_codes in tqdm(crosswalk.items()): + root_course = ( + Course.objects.filter(full_code=root_course_code) + .order_by("-semester") + .select_related("topic") + .first() + ) + if not root_course: + num_missing_roots += 1 + if print_missing: + print(f"Root course {root_course} not found in db") + continue + root_topic = root_course.topic + assert root_topic, f"Root course {root_course} has no topic" + + children = ( + Course.objects.filter( + full_code__in=children_codes, semester__gt=root_course.semester + ) + .order_by("-semester") + .select_related("topic") + ) + # Take minimum semester course (after root course semester) matching child code + child_to_topic = {child.full_code: child.topic for child in children} + for child in {child.full_code: child for child in children}.values(): + assert child.topic, f"Child course {child} of root {root_course} has no topic" + child_topics = set(child_to_topic.values()) + missing_codes = set(children_codes) - set(child_to_topic.keys()) + + for child_code in missing_codes: + num_missing_children += 1 + if print_missing: + print(f"Child course {child_code} not found in db") + + if len(child_topics) == 1 and not missing_codes: + child_topic = child_topics.pop() + if child_topic.branched_from: + child_topic.branched_from = None + child_topic.save() + if root_topic != child_topic: + root_topic.merge_with(child_topic) + num_merges += 1 + else: + for child_topic in child_topics: + if root_topic not in [child_topic, child_topic.branched_from]: + num_branch_updates += 1 + child_topic.branched_from = root_topic + child_topic.save() + + if verbose: + print(f"Performed {num_merges} Topic merges.") + print(f"Added branches, updating the branched_from field of {num_branch_updates} Topics.") + print(f"{num_missing_roots}/{len(crosswalk)} roots not found in db") + print( + f"{num_missing_children}/{sum(len(c) for c in crosswalk.values())} " + "children not found in db" + ) + + +class Command(BaseCommand): + help = ( + "This script loads the crosswalk from settings/base.py, updating " + "branched_from fields and merging Topics as appropriate." + ) + + def add_arguments(self, parser): + parser.add_argument( + "--print_missing", + action="store_true", + help="Print out all missing roots and children.", + ) + + def handle(self, *args, **kwargs): + load_crosswalk(print_missing=kwargs["print_missing"], verbose=True) + + print("Clearing cache") + del_count = clear_cache() + print(f"{del_count if del_count >=0 else 'all'} cache entries removed.") diff --git a/backend/courses/management/commands/load_status_history.py b/backend/courses/management/commands/load_status_history.py index 25de333c4..d9ab82c02 100644 --- a/backend/courses/management/commands/load_status_history.py +++ b/backend/courses/management/commands/load_status_history.py @@ -1,101 +1,101 @@ -import csv -import os -from datetime import datetime - -from dateutil.tz import gettz -from django.core.management.base import BaseCommand -from django.db import transaction -from django.db.models import F -from django.utils.timezone import make_aware -from tqdm import tqdm - -from alert.management.commands.recomputestats import recompute_stats -from alert.models import AddDropPeriod -from courses.models import Section, StatusUpdate -from courses.util import get_or_create_add_drop_period -from PennCourses.settings.base import TIME_ZONE - - -class Command(BaseCommand): - help = ( - "Load course status history into the database from a CSV file with the 6 columns:\n" - "full_code, semester, created_at (%Y-%m-%d %H:%M:%S.%f %Z), old_status, new_status, " - "alert_sent\n" - "You should load all sections referenced by this StatusUpdate dataset using the " - "load_test_courses_data script, before running this import script." - ) - - def add_arguments(self, parser): - parser.add_argument( - "--src", - type=str, - default="", - help="The file path of the .csv file containing the status update " - "data you want to import", - ) - - def handle(self, *args, **kwargs): - src = os.path.abspath(kwargs["src"]) - _, file_extension = os.path.splitext(kwargs["src"]) - if not os.path.exists(src): - return "File does not exist." - if file_extension != ".csv": - return "File is not a csv." - sections_map = dict() # maps (full_code, semester) to section id - row_count = 0 - with open(src) as history_file: - history_reader = csv.reader(history_file) - sections_to_fetch = set() - for row in history_reader: - sections_to_fetch.add((row[0], row[1])) - row_count += 1 - full_codes = list(set([sec[0] for sec in sections_to_fetch])) - semesters = list(set([sec[1] for sec in sections_to_fetch])) - section_obs = Section.objects.filter( - full_code__in=full_codes, course__semester__in=semesters - ).annotate(efficient_semester=F("course__semester")) - for section_ob in section_obs: - sections_map[section_ob.full_code, section_ob.efficient_semester] = section_ob.id - add_drop_periods = dict() # maps semester to AddDropPeriod object - for adp in AddDropPeriod.objects.filter(semester__in=semesters): - add_drop_periods[adp.semester] = adp - print( - "This script is atomic, meaning either all the status updates from the given " - "CSV will be loaded into the database, or otherwise if an error is encountered, " - "all changes will be rolled back and the database will remain as it was " - "before the script was run." - ) - with transaction.atomic(): - with open(src) as history_file: - print(f"Beginning to load status history from {src}") - history_reader = csv.reader(history_file) - for row in tqdm(history_reader, total=row_count): - full_code = row[0] - semester = row[1] - created_at = datetime.strptime(row[2], "%Y-%m-%d %H:%M:%S.%f %Z") - created_at = make_aware(created_at, timezone=gettz(TIME_ZONE), is_dst=None) - old_status = row[3] - new_status = row[4] - alert_sent = row[5] - if old_status != "O" and old_status != "C" and old_status != "X": - old_status = "" - if new_status != "O" and new_status != "C" and new_status != "X": - new_status = "" - if (full_code, semester) not in sections_map.keys(): - raise ValueError(f"Section {full_code} {semester} not found in db.") - section_id = sections_map[full_code, semester] - status_update = StatusUpdate( - section_id=section_id, - old_status=old_status, - new_status=new_status, - created_at=created_at, - alert_sent=alert_sent, - ) - if semester not in add_drop_periods: - add_drop_periods[semester] = get_or_create_add_drop_period(semester) - status_update.save(add_drop_period=add_drop_periods[semester]) - - print(f"Finished loading status history from {src}... processed {row_count} rows. ") - - print(f"Recomputing PCA Stats for {len(semesters)} semesters...") - recompute_stats(semesters=",".join(semesters), verbose=True) +import csv +import os +from datetime import datetime + +from dateutil.tz import gettz +from django.core.management.base import BaseCommand +from django.db import transaction +from django.db.models import F +from django.utils.timezone import make_aware +from tqdm import tqdm + +from alert.management.commands.recomputestats import recompute_stats +from alert.models import AddDropPeriod +from courses.models import Section, StatusUpdate +from courses.util import get_or_create_add_drop_period +from PennCourses.settings.base import TIME_ZONE + + +class Command(BaseCommand): + help = ( + "Load course status history into the database from a CSV file with the 6 columns:\n" + "full_code, semester, created_at (%Y-%m-%d %H:%M:%S.%f %Z), old_status, new_status, " + "alert_sent\n" + "You should load all sections referenced by this StatusUpdate dataset using the " + "load_test_courses_data script, before running this import script." + ) + + def add_arguments(self, parser): + parser.add_argument( + "--src", + type=str, + default="", + help="The file path of the .csv file containing the status update " + "data you want to import", + ) + + def handle(self, *args, **kwargs): + src = os.path.abspath(kwargs["src"]) + _, file_extension = os.path.splitext(kwargs["src"]) + if not os.path.exists(src): + return "File does not exist." + if file_extension != ".csv": + return "File is not a csv." + sections_map = dict() # maps (full_code, semester) to section id + row_count = 0 + with open(src) as history_file: + history_reader = csv.reader(history_file) + sections_to_fetch = set() + for row in history_reader: + sections_to_fetch.add((row[0], row[1])) + row_count += 1 + full_codes = list(set([sec[0] for sec in sections_to_fetch])) + semesters = list(set([sec[1] for sec in sections_to_fetch])) + section_obs = Section.objects.filter( + full_code__in=full_codes, course__semester__in=semesters + ).annotate(efficient_semester=F("course__semester")) + for section_ob in section_obs: + sections_map[section_ob.full_code, section_ob.efficient_semester] = section_ob.id + add_drop_periods = dict() # maps semester to AddDropPeriod object + for adp in AddDropPeriod.objects.filter(semester__in=semesters): + add_drop_periods[adp.semester] = adp + print( + "This script is atomic, meaning either all the status updates from the given " + "CSV will be loaded into the database, or otherwise if an error is encountered, " + "all changes will be rolled back and the database will remain as it was " + "before the script was run." + ) + with transaction.atomic(): + with open(src) as history_file: + print(f"Beginning to load status history from {src}") + history_reader = csv.reader(history_file) + for row in tqdm(history_reader, total=row_count): + full_code = row[0] + semester = row[1] + created_at = datetime.strptime(row[2], "%Y-%m-%d %H:%M:%S.%f %Z") + created_at = make_aware(created_at, timezone=gettz(TIME_ZONE), is_dst=None) + old_status = row[3] + new_status = row[4] + alert_sent = row[5] + if old_status != "O" and old_status != "C" and old_status != "X": + old_status = "" + if new_status != "O" and new_status != "C" and new_status != "X": + new_status = "" + if (full_code, semester) not in sections_map.keys(): + raise ValueError(f"Section {full_code} {semester} not found in db.") + section_id = sections_map[full_code, semester] + status_update = StatusUpdate( + section_id=section_id, + old_status=old_status, + new_status=new_status, + created_at=created_at, + alert_sent=alert_sent, + ) + if semester not in add_drop_periods: + add_drop_periods[semester] = get_or_create_add_drop_period(semester) + status_update.save(add_drop_period=add_drop_periods[semester]) + + print(f"Finished loading status history from {src}... processed {row_count} rows. ") + + print(f"Recomputing PCA Stats for {len(semesters)} semesters...") + recompute_stats(semesters=",".join(semesters), verbose=True) diff --git a/backend/courses/management/commands/load_test_courses_data.py b/backend/courses/management/commands/load_test_courses_data.py index 1929bcff3..ba76c81b5 100644 --- a/backend/courses/management/commands/load_test_courses_data.py +++ b/backend/courses/management/commands/load_test_courses_data.py @@ -1,278 +1,278 @@ -import csv -import os -from collections import defaultdict - -from django.core.management.base import BaseCommand -from django.db import transaction -from tqdm import tqdm - -from alert.management.commands.recomputestats import recompute_stats -from courses.management.commands.export_test_courses_data import ( - models, - related_id_fields, - self_related_id_fields, - semester_filter, - test_data_fields, - unique_identifying_fields, -) -from courses.models import Course, Topic -from courses.util import get_set_id, in_dev - - -class Command(BaseCommand): - help = ( - "Import test data (courses, sections, instructors, and reviews data) from the given csv. " - "WARNING: this script will delete all pre-existing objects of the following datatypes " - "from all semesters represented in the given csv (except for departments and instructors) " - "if the import is successful:" - f"\n{str([f for f in test_data_fields.keys() if f != 'instructors'])}." - "\nIf an error is encountered at any point, you will be alerted and the database will " - "remain as it was before this script was run.\n" - "This script cannot be run in production." - ) - - def add_arguments(self, parser): - parser.add_argument( - "--src", - type=str, - help="The file path of the .csv file containing the test data you want to import.", - ) - - def handle(self, *args, **kwargs): - if not in_dev(): - raise ValueError("This script cannot be run in a non-development environment.") - src = os.path.abspath(kwargs["src"]) - _, file_extension = os.path.splitext(kwargs["src"]) - if not os.path.exists(src): - return "File does not exist." - if file_extension != ".csv": - return "File is not a csv." - - fields = test_data_fields - data_types = fields.keys() - - row_count = 0 - rows_map = {data_type: [] for data_type in data_types} - # rows_map: maps datatype to list of rows for that datatype - semesters = set() # set of semesters represented in the given csv - with open(src) as data_file: - data_reader = csv.reader(data_file, delimiter=",", quotechar='"') - for row in data_reader: - data_type = row[0] - if data_type in "courses": - semesters.add(row[2]) - assert data_type in data_types, ( - f"Datatype {data_type} in the given csv is not valid for this version " - f"of the import script. Valid datatypes: {data_types}" - ) - should_be = 6 if data_type.endswith("_m2mfield") else (1 + len(fields[data_type])) - assert len(row) == should_be, ( - f"The row {row} in the given csv is not valid for this version of the import " - f"script. Contains {len(row)} columns, while valid " - f"is {should_be}." - ) - rows_map[data_type].append(row) - row_count += 1 - objects = dict() # maps datatype to object id to object - to_save = {data_type: [] for data_type in data_types} - # to_save: maps datatype to list of objects to save - - identify_id_map = {data_type: dict() for data_type in data_types} - # identify_id_map: maps datatype to unique identification str to old id - id_change_map = {data_type: dict() for data_type in data_types} - # id_change_map: maps datatype to old id to new id - self_related_ids = {data_type: defaultdict(dict) for data_type in data_types} - # self_related_ids: maps datatype to field to object id to self-related object id - deferred_related_ids = {data_type: defaultdict(dict) for data_type in data_types} - # deferred_related_ids: maps datatype to field to object id to deferred related object id - topic_id_to_course_uid_strs = defaultdict(set) - # topic_id_to_course_uid_strs: maps old topic id to a set of course unique id strs - - def generate_unique_id_str_from_row(data_type, row): - """ - Given a datatype and a row, generates a unique identification str - """ - components = [] - for field in unique_identifying_fields[data_type]: - field_value = row[1 + fields[data_type].index(field)] - if data_type in related_id_fields and field in related_id_fields[data_type]: - field_value = id_change_map[related_id_fields[data_type][field]][field_value] - components.append(field_value) - return tuple(components) - - def generate_unique_id_str_from_object(data_type, object): - """ - Given a datatype and an object, generates a unique identification str - """ - components = [] - for field in unique_identifying_fields[data_type]: - field_value = getattr(object, field) - components.append(field_value) - return tuple(components) - - print( - "This script is atomic, meaning either all the test data from the given " - "CSV will be loaded into the database, or otherwise if an error is encountered, " - "all changes will be rolled back and the database will remain as it was " - "before the script was run." - ) - with transaction.atomic(): - print(f"Deleting existing objects from semesters {semesters}...") - for data_type in data_types: - if data_type not in semester_filter.keys(): - continue - models[data_type].objects.filter( - **{semester_filter[data_type] + "__in": list(semesters)} - ).delete() - for i, data_type in enumerate(data_types): - print(f"Loading {data_type} data ({i+1}/{len(data_types)})...") - for row in tqdm(rows_map[data_type]): - if data_type.endswith("_m2mfield"): - dtype = row[1] - object_id = id_change_map[dtype][row[2]] - object = objects[dtype][object_id] - related_dtype = row[5] - getattr(object, row[3]).add(id_change_map[related_dtype][row[4]]) - continue - unique_str = generate_unique_id_str_from_row(data_type, row) - identify_id_map[data_type][unique_str] = row[1] - field_to_index = {field: (1 + i) for i, field in enumerate(fields[data_type])} - to_save_dict = dict() # this will be unpacked into the model initialization - for field in fields[data_type]: - if row[field_to_index[field]] is None or ( - row[field_to_index[field]] == "None" and field != "prerequisites" - ): - to_save_dict[field] = None - continue - if field == "id": - continue - if data_type in related_id_fields and field in related_id_fields[data_type]: - related_data_type = related_id_fields[data_type][field] - if related_data_type in id_change_map: - # Related object has already been loaded - to_save_dict[field] = id_change_map[related_data_type][ - row[field_to_index[field]] - ] - else: - deferred_related_ids[data_type][field][ - row[field_to_index["id"]] - ] = row[field_to_index[field]] - elif ( - data_type in self_related_id_fields - and field in self_related_id_fields[data_type] - ): - self_related_ids[data_type][field][row[field_to_index["id"]]] = row[ - field_to_index[field] - ] - elif data_type == "courses" and field == "topic_id": - topic_id_to_course_uid_strs[row[field_to_index[field]]].add(unique_str) - else: - to_save_dict[field] = row[field_to_index[field]] - to_save[data_type].append(models[data_type](**to_save_dict)) - ob = to_save[data_type][-1] - self_id = get_set_id(ob) - if data_type in self_related_id_fields: - for field in self_related_id_fields[data_type]: - # This self-related id will be changed later to the correct value - setattr(ob, field, self_id) - - if data_type not in semester_filter.keys() and data_type in models: - existing_objects = set( - generate_unique_id_str_from_object(data_type, m) - for m in models[data_type].objects.all() - ) - to_save[data_type] = [ - ob - for ob in to_save[data_type] - if generate_unique_id_str_from_object(data_type, ob) not in existing_objects - ] - if data_type.endswith("_m2mfield"): - continue - - objects[data_type] = dict() - print(f"Saving {data_type} (this might take a while)...") - models[data_type].objects.bulk_create(to_save[data_type]) - if data_type not in semester_filter.keys(): - queryset = models[data_type].objects.all() - else: - queryset = models[data_type].objects.filter( - **{semester_filter[data_type] + "__in": list(semesters)} - ) - for obj in queryset: - if ( - generate_unique_id_str_from_object(data_type, obj) - not in identify_id_map[data_type] - ): - continue - objects[data_type][obj.id] = obj - id_change_map[data_type][ - identify_id_map[data_type][ - generate_unique_id_str_from_object(data_type, obj) - ] - ] = obj.id - if data_type in self_related_ids.keys(): - for field in self_related_ids[data_type].keys(): - to_update = [] - for self_id, other_id in self_related_ids[data_type][field].items(): - self_new_id = id_change_map[data_type][self_id] - self_other_id = id_change_map[data_type][other_id] - obj = objects[data_type][self_new_id] - setattr(obj, field, self_other_id) - to_update.append(obj) - print(f"Updating {data_type} (this might take a while)...") - models[data_type].objects.bulk_update(to_update, [field]) - - for data_type in deferred_related_ids.keys(): - if not deferred_related_ids[data_type]: - continue - print(f"Loading deferred related fields for {data_type}...") - for field in deferred_related_ids[data_type].keys(): - related_data_type = related_id_fields[data_type][field] - to_update = [] - for obj_id, related_id in deferred_related_ids[data_type][field].items(): - obj_new_id = id_change_map[data_type][obj_id] - related_new_id = id_change_map[related_data_type][related_id] - obj = objects[data_type][obj_new_id] - setattr(obj, field, related_new_id) - to_update.append(obj) - print(f"Updating {data_type} (this might take a while)...") - models[data_type].objects.bulk_update(to_update, [field]) - - print("Manually loading Topics...") - # Assumes topics are only ever merged, not split - for course_uid_strs in tqdm(topic_id_to_course_uid_strs.values()): - course_ids = { - id_change_map["courses"][identify_id_map["courses"][uid_str]] - for uid_str in course_uid_strs - } - topics = list( - Topic.objects.filter(courses__id__in=course_ids) - .select_related("most_recent") - .distinct() - ) - - courses = Course.objects.filter(id__in=course_ids).select_related("primary_listing") - most_recent = None - for course in courses: - course = course.primary_listing - if not most_recent or course.semester > most_recent.semester: - most_recent = course - - if not topics: - topic = Topic(most_recent=most_recent) - topic.save() - Course.objects.filter(id__in=course_ids).update(topic=topic) - continue - - topic = Topic.merge_all(topics) - - Course.objects.filter(id__in=course_ids).update(topic=topic) - if topic.most_recent != most_recent: - topic.most_recent = most_recent - topic.save() - - recompute_stats( - semesters=sorted(list(semesters)), semesters_precomputed=True, verbose=True - ) - - print(f"Finished loading test data {src}... processed {row_count} rows. ") +import csv +import os +from collections import defaultdict + +from django.core.management.base import BaseCommand +from django.db import transaction +from tqdm import tqdm + +from alert.management.commands.recomputestats import recompute_stats +from courses.management.commands.export_test_courses_data import ( + models, + related_id_fields, + self_related_id_fields, + semester_filter, + test_data_fields, + unique_identifying_fields, +) +from courses.models import Course, Topic +from courses.util import get_set_id, in_dev + + +class Command(BaseCommand): + help = ( + "Import test data (courses, sections, instructors, and reviews data) from the given csv. " + "WARNING: this script will delete all pre-existing objects of the following datatypes " + "from all semesters represented in the given csv (except for departments and instructors) " + "if the import is successful:" + f"\n{str([f for f in test_data_fields.keys() if f != 'instructors'])}." + "\nIf an error is encountered at any point, you will be alerted and the database will " + "remain as it was before this script was run.\n" + "This script cannot be run in production." + ) + + def add_arguments(self, parser): + parser.add_argument( + "--src", + type=str, + help="The file path of the .csv file containing the test data you want to import.", + ) + + def handle(self, *args, **kwargs): + if not in_dev(): + raise ValueError("This script cannot be run in a non-development environment.") + src = os.path.abspath(kwargs["src"]) + _, file_extension = os.path.splitext(kwargs["src"]) + if not os.path.exists(src): + return "File does not exist." + if file_extension != ".csv": + return "File is not a csv." + + fields = test_data_fields + data_types = fields.keys() + + row_count = 0 + rows_map = {data_type: [] for data_type in data_types} + # rows_map: maps datatype to list of rows for that datatype + semesters = set() # set of semesters represented in the given csv + with open(src) as data_file: + data_reader = csv.reader(data_file, delimiter=",", quotechar='"') + for row in data_reader: + data_type = row[0] + if data_type in "courses": + semesters.add(row[2]) + assert data_type in data_types, ( + f"Datatype {data_type} in the given csv is not valid for this version " + f"of the import script. Valid datatypes: {data_types}" + ) + should_be = 6 if data_type.endswith("_m2mfield") else (1 + len(fields[data_type])) + assert len(row) == should_be, ( + f"The row {row} in the given csv is not valid for this version of the import " + f"script. Contains {len(row)} columns, while valid " + f"is {should_be}." + ) + rows_map[data_type].append(row) + row_count += 1 + objects = dict() # maps datatype to object id to object + to_save = {data_type: [] for data_type in data_types} + # to_save: maps datatype to list of objects to save + + identify_id_map = {data_type: dict() for data_type in data_types} + # identify_id_map: maps datatype to unique identification str to old id + id_change_map = {data_type: dict() for data_type in data_types} + # id_change_map: maps datatype to old id to new id + self_related_ids = {data_type: defaultdict(dict) for data_type in data_types} + # self_related_ids: maps datatype to field to object id to self-related object id + deferred_related_ids = {data_type: defaultdict(dict) for data_type in data_types} + # deferred_related_ids: maps datatype to field to object id to deferred related object id + topic_id_to_course_uid_strs = defaultdict(set) + # topic_id_to_course_uid_strs: maps old topic id to a set of course unique id strs + + def generate_unique_id_str_from_row(data_type, row): + """ + Given a datatype and a row, generates a unique identification str + """ + components = [] + for field in unique_identifying_fields[data_type]: + field_value = row[1 + fields[data_type].index(field)] + if data_type in related_id_fields and field in related_id_fields[data_type]: + field_value = id_change_map[related_id_fields[data_type][field]][field_value] + components.append(field_value) + return tuple(components) + + def generate_unique_id_str_from_object(data_type, object): + """ + Given a datatype and an object, generates a unique identification str + """ + components = [] + for field in unique_identifying_fields[data_type]: + field_value = getattr(object, field) + components.append(field_value) + return tuple(components) + + print( + "This script is atomic, meaning either all the test data from the given " + "CSV will be loaded into the database, or otherwise if an error is encountered, " + "all changes will be rolled back and the database will remain as it was " + "before the script was run." + ) + with transaction.atomic(): + print(f"Deleting existing objects from semesters {semesters}...") + for data_type in data_types: + if data_type not in semester_filter.keys(): + continue + models[data_type].objects.filter( + **{semester_filter[data_type] + "__in": list(semesters)} + ).delete() + for i, data_type in enumerate(data_types): + print(f"Loading {data_type} data ({i+1}/{len(data_types)})...") + for row in tqdm(rows_map[data_type]): + if data_type.endswith("_m2mfield"): + dtype = row[1] + object_id = id_change_map[dtype][row[2]] + object = objects[dtype][object_id] + related_dtype = row[5] + getattr(object, row[3]).add(id_change_map[related_dtype][row[4]]) + continue + unique_str = generate_unique_id_str_from_row(data_type, row) + identify_id_map[data_type][unique_str] = row[1] + field_to_index = {field: (1 + i) for i, field in enumerate(fields[data_type])} + to_save_dict = dict() # this will be unpacked into the model initialization + for field in fields[data_type]: + if row[field_to_index[field]] is None or ( + row[field_to_index[field]] == "None" and field != "prerequisites" + ): + to_save_dict[field] = None + continue + if field == "id": + continue + if data_type in related_id_fields and field in related_id_fields[data_type]: + related_data_type = related_id_fields[data_type][field] + if related_data_type in id_change_map: + # Related object has already been loaded + to_save_dict[field] = id_change_map[related_data_type][ + row[field_to_index[field]] + ] + else: + deferred_related_ids[data_type][field][ + row[field_to_index["id"]] + ] = row[field_to_index[field]] + elif ( + data_type in self_related_id_fields + and field in self_related_id_fields[data_type] + ): + self_related_ids[data_type][field][row[field_to_index["id"]]] = row[ + field_to_index[field] + ] + elif data_type == "courses" and field == "topic_id": + topic_id_to_course_uid_strs[row[field_to_index[field]]].add(unique_str) + else: + to_save_dict[field] = row[field_to_index[field]] + to_save[data_type].append(models[data_type](**to_save_dict)) + ob = to_save[data_type][-1] + self_id = get_set_id(ob) + if data_type in self_related_id_fields: + for field in self_related_id_fields[data_type]: + # This self-related id will be changed later to the correct value + setattr(ob, field, self_id) + + if data_type not in semester_filter.keys() and data_type in models: + existing_objects = set( + generate_unique_id_str_from_object(data_type, m) + for m in models[data_type].objects.all() + ) + to_save[data_type] = [ + ob + for ob in to_save[data_type] + if generate_unique_id_str_from_object(data_type, ob) not in existing_objects + ] + if data_type.endswith("_m2mfield"): + continue + + objects[data_type] = dict() + print(f"Saving {data_type} (this might take a while)...") + models[data_type].objects.bulk_create(to_save[data_type]) + if data_type not in semester_filter.keys(): + queryset = models[data_type].objects.all() + else: + queryset = models[data_type].objects.filter( + **{semester_filter[data_type] + "__in": list(semesters)} + ) + for obj in queryset: + if ( + generate_unique_id_str_from_object(data_type, obj) + not in identify_id_map[data_type] + ): + continue + objects[data_type][obj.id] = obj + id_change_map[data_type][ + identify_id_map[data_type][ + generate_unique_id_str_from_object(data_type, obj) + ] + ] = obj.id + if data_type in self_related_ids.keys(): + for field in self_related_ids[data_type].keys(): + to_update = [] + for self_id, other_id in self_related_ids[data_type][field].items(): + self_new_id = id_change_map[data_type][self_id] + self_other_id = id_change_map[data_type][other_id] + obj = objects[data_type][self_new_id] + setattr(obj, field, self_other_id) + to_update.append(obj) + print(f"Updating {data_type} (this might take a while)...") + models[data_type].objects.bulk_update(to_update, [field]) + + for data_type in deferred_related_ids.keys(): + if not deferred_related_ids[data_type]: + continue + print(f"Loading deferred related fields for {data_type}...") + for field in deferred_related_ids[data_type].keys(): + related_data_type = related_id_fields[data_type][field] + to_update = [] + for obj_id, related_id in deferred_related_ids[data_type][field].items(): + obj_new_id = id_change_map[data_type][obj_id] + related_new_id = id_change_map[related_data_type][related_id] + obj = objects[data_type][obj_new_id] + setattr(obj, field, related_new_id) + to_update.append(obj) + print(f"Updating {data_type} (this might take a while)...") + models[data_type].objects.bulk_update(to_update, [field]) + + print("Manually loading Topics...") + # Assumes topics are only ever merged, not split + for course_uid_strs in tqdm(topic_id_to_course_uid_strs.values()): + course_ids = { + id_change_map["courses"][identify_id_map["courses"][uid_str]] + for uid_str in course_uid_strs + } + topics = list( + Topic.objects.filter(courses__id__in=course_ids) + .select_related("most_recent") + .distinct() + ) + + courses = Course.objects.filter(id__in=course_ids).select_related("primary_listing") + most_recent = None + for course in courses: + course = course.primary_listing + if not most_recent or course.semester > most_recent.semester: + most_recent = course + + if not topics: + topic = Topic(most_recent=most_recent) + topic.save() + Course.objects.filter(id__in=course_ids).update(topic=topic) + continue + + topic = Topic.merge_all(topics) + + Course.objects.filter(id__in=course_ids).update(topic=topic) + if topic.most_recent != most_recent: + topic.most_recent = most_recent + topic.save() + + recompute_stats( + semesters=sorted(list(semesters)), semesters_precomputed=True, verbose=True + ) + + print(f"Finished loading test data {src}... processed {row_count} rows. ") diff --git a/backend/courses/management/commands/loadstatus.py b/backend/courses/management/commands/loadstatus.py index 2deddcbd6..cc9f77c0f 100644 --- a/backend/courses/management/commands/loadstatus.py +++ b/backend/courses/management/commands/loadstatus.py @@ -1,38 +1,38 @@ -import logging - -from django.core.management.base import BaseCommand -from tqdm import tqdm - -from courses import registrar -from courses.models import Course, Section -from courses.util import get_course_and_section, get_current_semester - - -def set_all_status(semester=None): - if semester is None: - semester = get_current_semester() - statuses = registrar.get_all_course_status(semester) - for status in tqdm(statuses): - section_code = status.get("section_id_normalized") - if section_code is None: - continue - - try: - _, section = get_course_and_section(section_code, semester) - except (Section.DoesNotExist, Course.DoesNotExist): - continue - section.status = status["status"] - section.save() - - -class Command(BaseCommand): - help = "Load course status for courses in the DB" - - def add_arguments(self, parser): - parser.add_argument("--semester", nargs="?", type=str) - - def handle(self, *args, **kwargs): - root_logger = logging.getLogger("") - root_logger.setLevel(logging.DEBUG) - - set_all_status(semester=kwargs["semester"]) +import logging + +from django.core.management.base import BaseCommand +from tqdm import tqdm + +from courses import registrar +from courses.models import Course, Section +from courses.util import get_course_and_section, get_current_semester + + +def set_all_status(semester=None): + if semester is None: + semester = get_current_semester() + statuses = registrar.get_all_course_status(semester) + for status in tqdm(statuses): + section_code = status.get("section_id_normalized") + if section_code is None: + continue + + try: + _, section = get_course_and_section(section_code, semester) + except (Section.DoesNotExist, Course.DoesNotExist): + continue + section.status = status["status"] + section.save() + + +class Command(BaseCommand): + help = "Load course status for courses in the DB" + + def add_arguments(self, parser): + parser.add_argument("--semester", nargs="?", type=str) + + def handle(self, *args, **kwargs): + root_logger = logging.getLogger("") + root_logger.setLevel(logging.DEBUG) + + set_all_status(semester=kwargs["semester"]) diff --git a/backend/courses/management/commands/merge_topics.py b/backend/courses/management/commands/merge_topics.py index f53b75907..a5cfffb54 100644 --- a/backend/courses/management/commands/merge_topics.py +++ b/backend/courses/management/commands/merge_topics.py @@ -1,254 +1,254 @@ -import logging -from enum import Enum, auto -from textwrap import dedent - -from django.core.management.base import BaseCommand -from django.db import transaction -from tqdm import tqdm - -from courses.course_similarity.heuristics import ( - description_rejection_heuristics, - title_rejection_heuristics, -) -from courses.management.commands.load_crosswalk import load_crosswalk -from courses.management.commands.reset_topics import fill_topics -from courses.models import Topic -from review.management.commands.clearcache import clear_cache - - -def prompt_for_link_topics(topics): - """ - Prompts the user to confirm or reject a merge of topics. - Returns a boolean representing whether the topics should be merged. - """ - for topic in topics: - print(f"\n============> {topic}:\n") - print("\n------\n".join(course.full_str() for course in topic.courses.all())) - print("\n<============") - prompt = input(f"Should the above {len(topics)} topics be merged? (y/N) ") - return prompt.strip().upper() == "Y" - - -def prompt_for_link(course1, course2): - """ - Prompts the user to confirm or reject a possible link between courses. - Returns a boolean representing whether the courses should be linked. - """ - print("\n\n============>\n") - course1.full_str() - print("------") - course2.full_str() - print("\n<============") - prompt = input("Should the above 2 courses be linked? (y/N) ") - print("\n\n") - return prompt.strip().upper() == "Y" - - -def same_course(course_a, course_b): - return any( - course_bc.full_code == course_a.primary_listing.full_code - for course_bc in course_b.primary_listing.listing_set.all() - ) - - -def similar_courses(course_a, course_b): - title_a, title_b = course_a.title.strip().lower(), course_b.title.strip().lower() - if not title_rejection_heuristics(title_a, title_b): - return True - desc_a, desc_b = course_a.description.strip().lower(), course_b.description.strip().lower() - if not description_rejection_heuristics(desc_a, desc_b): - return True - return False - - -class ShouldLinkCoursesResponse(Enum): - DEFINITELY = auto() - MAYBE = auto() - NO = auto() - - -def should_link_courses(course_a, course_b, verbose=True, ignore_inexact=False): - """ - Checks if the two courses should be linked, based on information about those - courses stored in our database. Prompts for user input in the case of possible links, - if in verbose mode (otherwise just logs possible links). If in `ignore_inexact` mode, - completely skips any course merges that are inexact (ie, rely on `similar_courses`), - and therefore will neither prompt for user input nor log. - Returns a response in the form of a ShouldLinkCoursesResponse enum. - """ - if same_course(course_a, course_b): - return ShouldLinkCoursesResponse.DEFINITELY - elif course_a.semester == course_b.semester: - return ShouldLinkCoursesResponse.NO - elif (course_a.code < "5000") != (course_b.code < "5000"): - return ShouldLinkCoursesResponse.NO - elif (not ignore_inexact) and similar_courses(course_a, course_b): - if verbose: - return ( - ShouldLinkCoursesResponse.DEFINITELY - if prompt_for_link(course_a, course_b) - else ShouldLinkCoursesResponse.NO - ) - else: - # Log possible link - logging.info(f"Found possible link between {course_a} and {course_b}") - return ShouldLinkCoursesResponse.MAYBE - return ShouldLinkCoursesResponse.NO - - -def merge_topics(verbose=False, ignore_inexact=False): - """ - Finds and merges Topics that should be merged. - - :param verbose: If verbose=True, this script will print its progress and prompt for user input - upon finding possible (but not definite) links. Otherwise it will run silently and - log found possible links to Sentry (more appropriate if this function is called - from an automated cron job like registrarimport). - :param ignore_inexact: If ignore_inexact=True, will only ever merge if two courses - are exactly matching as judged by `same_course`. `ignore_inexact` means - the user will not be prompted and that there will never be logging. - Corresponds to never checking the similarity of two courses using `similar_courses`. - """ - if verbose: - print("Merging topics") - topics = set( - Topic.objects.select_related("most_recent") - .prefetch_related( - "courses", - "courses__primary_listing", - "courses__primary_listing__listing_set", - ) - .all() - ) - dont_link = set() - merge_count = 0 - - for topic in tqdm(list(topics), disable=(not verbose)): - if topic not in topics: - continue - keep_linking = True - while keep_linking: - keep_linking = False - for topic2 in topics: - if topic == topic2: - continue - if topic.most_recent.semester == topic2.most_recent.semester: - continue - merged_courses = list(topic.courses.all()) + list(topic2.courses.all()) - merged_courses.sort(key=lambda c: (c.semester, c.topic_id)) - course_links = [] - last = merged_courses[0] - for course in merged_courses[1:]: - if last.topic_id != course.topic_id: - course_links.append((last, course)) - last = course - if any( - course_a.semester == course_b.semester and not same_course(course_a, course_b) - for course_a, course_b in course_links - ): - continue - should_link = True - for last, course in course_links: - if (last, course) in dont_link or ( - should_link_courses( - last, course, verbose=verbose, ignore_inexact=ignore_inexact - ) - != ShouldLinkCoursesResponse.DEFINITELY - ): - dont_link.add((last, course)) - should_link = False - break - if should_link: - topics.remove(topic) - topics.remove(topic2) - topic = topic.merge_with(topic2) - topics.add(topic) - merge_count += 1 - keep_linking = True - break - - if verbose: - print(f"Finished merging topics (performed {merge_count} merges).") - - -def manual_merge(topic_ids): - invalid_ids = [i for i in topic_ids if not i.isdigit()] - if invalid_ids: - print( - f"The following topic IDs are invalid (non-integer):\n{invalid_ids}\n" "Aborting merge." - ) - return - topic_ids = [int(i) for i in topic_ids] - topics = ( - Topic.objects.filter(id__in=topic_ids) - .select_related("most_recent") - .prefetch_related("courses") - ) - found_ids = topics.values_list("id", flat=True) - not_found_ids = list(set(topic_ids) - set(found_ids)) - if not_found_ids: - print(f"The following topic IDs were not found:\n{not_found_ids}\nAborting merge.") - return - if not prompt_for_link_topics(topics): - print("Aborting merge.") - return - topic = Topic.merge_all(topics) - print(f"Successfully merged {len(topics)} topics into: {topic}.") - - -class Command(BaseCommand): - help = ( - "This script uses a combination of heuristics and user input " - "to merge Topics in the database." - ) - - def add_arguments(self, parser): - parser.add_argument( - "-t", - "--topic-ids", - nargs="*", - help=dedent( - """ - Optionally, specify a (space-separated) list of Topic IDs to merge into a single topic. - You can find Topic IDs from the django admin interface (either by searching through - Topics or by following the topic field from a course entry). - If this argument is omitted, the script will automatically detect merge opportunities - among all Topics, prompting the user for confirmation before merging in each case. - """ - ), - required=False, - ) - parser.add_argument( - "--ignore-inexact", - action="store_true", - help=dedent( - """ - Optionally, ignore inexact matches between courses (ie where there is no match - between course a's code and the codes of all cross listings of course b (including - course b) AND there is no cross walk entry. Corresponds to never checking - the similarity of two courses using `similar_courses`. - """ - ), - ) - - def handle(self, *args, **kwargs): - topic_ids = set(kwargs["topic_ids"] or []) - ignore_inexact = kwargs["ignore_inexact"] - - print( - "This script is atomic, meaning either all Topic merges will be comitted to the " - "database, or otherwise if an error is encountered, all changes will be rolled back " - "and the database will remain as it was before the script was run." - ) - - if topic_ids: - manual_merge(topic_ids) - else: - with transaction.atomic(): - fill_topics(verbose=True) - merge_topics(verbose=True, ignore_inexact=ignore_inexact) - load_crosswalk(verbose=True) - - print("Clearing cache") - del_count = clear_cache() - print(f"{del_count if del_count >=0 else 'all'} cache entries removed.") +import logging +from enum import Enum, auto +from textwrap import dedent + +from django.core.management.base import BaseCommand +from django.db import transaction +from tqdm import tqdm + +from courses.course_similarity.heuristics import ( + description_rejection_heuristics, + title_rejection_heuristics, +) +from courses.management.commands.load_crosswalk import load_crosswalk +from courses.management.commands.reset_topics import fill_topics +from courses.models import Topic +from review.management.commands.clearcache import clear_cache + + +def prompt_for_link_topics(topics): + """ + Prompts the user to confirm or reject a merge of topics. + Returns a boolean representing whether the topics should be merged. + """ + for topic in topics: + print(f"\n============> {topic}:\n") + print("\n------\n".join(course.full_str() for course in topic.courses.all())) + print("\n<============") + prompt = input(f"Should the above {len(topics)} topics be merged? (y/N) ") + return prompt.strip().upper() == "Y" + + +def prompt_for_link(course1, course2): + """ + Prompts the user to confirm or reject a possible link between courses. + Returns a boolean representing whether the courses should be linked. + """ + print("\n\n============>\n") + course1.full_str() + print("------") + course2.full_str() + print("\n<============") + prompt = input("Should the above 2 courses be linked? (y/N) ") + print("\n\n") + return prompt.strip().upper() == "Y" + + +def same_course(course_a, course_b): + return any( + course_bc.full_code == course_a.primary_listing.full_code + for course_bc in course_b.primary_listing.listing_set.all() + ) + + +def similar_courses(course_a, course_b): + title_a, title_b = course_a.title.strip().lower(), course_b.title.strip().lower() + if not title_rejection_heuristics(title_a, title_b): + return True + desc_a, desc_b = course_a.description.strip().lower(), course_b.description.strip().lower() + if not description_rejection_heuristics(desc_a, desc_b): + return True + return False + + +class ShouldLinkCoursesResponse(Enum): + DEFINITELY = auto() + MAYBE = auto() + NO = auto() + + +def should_link_courses(course_a, course_b, verbose=True, ignore_inexact=False): + """ + Checks if the two courses should be linked, based on information about those + courses stored in our database. Prompts for user input in the case of possible links, + if in verbose mode (otherwise just logs possible links). If in `ignore_inexact` mode, + completely skips any course merges that are inexact (ie, rely on `similar_courses`), + and therefore will neither prompt for user input nor log. + Returns a response in the form of a ShouldLinkCoursesResponse enum. + """ + if same_course(course_a, course_b): + return ShouldLinkCoursesResponse.DEFINITELY + elif course_a.semester == course_b.semester: + return ShouldLinkCoursesResponse.NO + elif (course_a.code < "5000") != (course_b.code < "5000"): + return ShouldLinkCoursesResponse.NO + elif (not ignore_inexact) and similar_courses(course_a, course_b): + if verbose: + return ( + ShouldLinkCoursesResponse.DEFINITELY + if prompt_for_link(course_a, course_b) + else ShouldLinkCoursesResponse.NO + ) + else: + # Log possible link + logging.info(f"Found possible link between {course_a} and {course_b}") + return ShouldLinkCoursesResponse.MAYBE + return ShouldLinkCoursesResponse.NO + + +def merge_topics(verbose=False, ignore_inexact=False): + """ + Finds and merges Topics that should be merged. + + :param verbose: If verbose=True, this script will print its progress and prompt for user input + upon finding possible (but not definite) links. Otherwise it will run silently and + log found possible links to Sentry (more appropriate if this function is called + from an automated cron job like registrarimport). + :param ignore_inexact: If ignore_inexact=True, will only ever merge if two courses + are exactly matching as judged by `same_course`. `ignore_inexact` means + the user will not be prompted and that there will never be logging. + Corresponds to never checking the similarity of two courses using `similar_courses`. + """ + if verbose: + print("Merging topics") + topics = set( + Topic.objects.select_related("most_recent") + .prefetch_related( + "courses", + "courses__primary_listing", + "courses__primary_listing__listing_set", + ) + .all() + ) + dont_link = set() + merge_count = 0 + + for topic in tqdm(list(topics), disable=(not verbose)): + if topic not in topics: + continue + keep_linking = True + while keep_linking: + keep_linking = False + for topic2 in topics: + if topic == topic2: + continue + if topic.most_recent.semester == topic2.most_recent.semester: + continue + merged_courses = list(topic.courses.all()) + list(topic2.courses.all()) + merged_courses.sort(key=lambda c: (c.semester, c.topic_id)) + course_links = [] + last = merged_courses[0] + for course in merged_courses[1:]: + if last.topic_id != course.topic_id: + course_links.append((last, course)) + last = course + if any( + course_a.semester == course_b.semester and not same_course(course_a, course_b) + for course_a, course_b in course_links + ): + continue + should_link = True + for last, course in course_links: + if (last, course) in dont_link or ( + should_link_courses( + last, course, verbose=verbose, ignore_inexact=ignore_inexact + ) + != ShouldLinkCoursesResponse.DEFINITELY + ): + dont_link.add((last, course)) + should_link = False + break + if should_link: + topics.remove(topic) + topics.remove(topic2) + topic = topic.merge_with(topic2) + topics.add(topic) + merge_count += 1 + keep_linking = True + break + + if verbose: + print(f"Finished merging topics (performed {merge_count} merges).") + + +def manual_merge(topic_ids): + invalid_ids = [i for i in topic_ids if not i.isdigit()] + if invalid_ids: + print( + f"The following topic IDs are invalid (non-integer):\n{invalid_ids}\n" "Aborting merge." + ) + return + topic_ids = [int(i) for i in topic_ids] + topics = ( + Topic.objects.filter(id__in=topic_ids) + .select_related("most_recent") + .prefetch_related("courses") + ) + found_ids = topics.values_list("id", flat=True) + not_found_ids = list(set(topic_ids) - set(found_ids)) + if not_found_ids: + print(f"The following topic IDs were not found:\n{not_found_ids}\nAborting merge.") + return + if not prompt_for_link_topics(topics): + print("Aborting merge.") + return + topic = Topic.merge_all(topics) + print(f"Successfully merged {len(topics)} topics into: {topic}.") + + +class Command(BaseCommand): + help = ( + "This script uses a combination of heuristics and user input " + "to merge Topics in the database." + ) + + def add_arguments(self, parser): + parser.add_argument( + "-t", + "--topic-ids", + nargs="*", + help=dedent( + """ + Optionally, specify a (space-separated) list of Topic IDs to merge into a single topic. + You can find Topic IDs from the django admin interface (either by searching through + Topics or by following the topic field from a course entry). + If this argument is omitted, the script will automatically detect merge opportunities + among all Topics, prompting the user for confirmation before merging in each case. + """ + ), + required=False, + ) + parser.add_argument( + "--ignore-inexact", + action="store_true", + help=dedent( + """ + Optionally, ignore inexact matches between courses (ie where there is no match + between course a's code and the codes of all cross listings of course b (including + course b) AND there is no cross walk entry. Corresponds to never checking + the similarity of two courses using `similar_courses`. + """ + ), + ) + + def handle(self, *args, **kwargs): + topic_ids = set(kwargs["topic_ids"] or []) + ignore_inexact = kwargs["ignore_inexact"] + + print( + "This script is atomic, meaning either all Topic merges will be comitted to the " + "database, or otherwise if an error is encountered, all changes will be rolled back " + "and the database will remain as it was before the script was run." + ) + + if topic_ids: + manual_merge(topic_ids) + else: + with transaction.atomic(): + fill_topics(verbose=True) + merge_topics(verbose=True, ignore_inexact=ignore_inexact) + load_crosswalk(verbose=True) + + print("Clearing cache") + del_count = clear_cache() + print(f"{del_count if del_count >=0 else 'all'} cache entries removed.") diff --git a/backend/courses/management/commands/registrarimport.py b/backend/courses/management/commands/registrarimport.py index 99084e963..a609b2a46 100644 --- a/backend/courses/management/commands/registrarimport.py +++ b/backend/courses/management/commands/registrarimport.py @@ -1,70 +1,70 @@ -import logging - -from botocore.exceptions import NoCredentialsError -from django.core.management.base import BaseCommand -from tqdm import tqdm - -from alert.management.commands.recomputestats import recompute_stats -from courses import registrar -from courses.management.commands.load_crosswalk import load_crosswalk -from courses.management.commands.loadstatus import set_all_status -from courses.management.commands.reset_topics import fill_topics -from courses.models import Department, Section -from courses.util import get_current_semester, in_dev, upsert_course_from_opendata -from review.management.commands.clearcache import clear_cache - - -def registrar_import(semester=None, query=""): - if semester is None: - semester = get_current_semester() - - print("Loading in courses with prefix %s from %s..." % (query, semester)) - results = registrar.get_courses(query, semester) - - missing_sections = set( - Section.objects.filter(course__semester=semester).values_list("full_code", flat=True) - ) - for info in tqdm(results): - upsert_course_from_opendata(info, semester, missing_sections) - Section.objects.filter(full_code__in=missing_sections).update(status="X") - - print("Updating department names...") - departments = registrar.get_departments() - for dept_code, dept_name in tqdm(departments.items()): - dept, _ = Department.objects.get_or_create(code=dept_code) - dept.name = dept_name - dept.save() - - print("Loading course statuses from registrar...") - set_all_status(semester=semester) - - recompute_stats(semesters=semester, verbose=True) - - fill_topics(verbose=True) - try: - load_crosswalk(print_missing=False, verbose=True) - except NoCredentialsError as e: - if not in_dev(): - raise e - print("NOTE: load_crosswalk skipped due to missing AWS credentials.") - - -class Command(BaseCommand): - help = "Load in courses, sections and associated models from the Penn registrar and requirements data sources." # noqa: E501 - - def add_arguments(self, parser): - parser.add_argument("--semester", nargs="?", type=str) - parser.add_argument("--query", nargs="?", default="") - - def handle(self, *args, **kwargs): - root_logger = logging.getLogger("") - root_logger.setLevel(logging.DEBUG) - - semester = kwargs.get("semester") - query = kwargs.get("query") - - registrar_import(semester, query) - - print("Clearing cache") - del_count = clear_cache() - print(f"{del_count if del_count >=0 else 'all'} cache entries removed.") +import logging + +from botocore.exceptions import NoCredentialsError +from django.core.management.base import BaseCommand +from tqdm import tqdm + +from alert.management.commands.recomputestats import recompute_stats +from courses import registrar +from courses.management.commands.load_crosswalk import load_crosswalk +from courses.management.commands.loadstatus import set_all_status +from courses.management.commands.reset_topics import fill_topics +from courses.models import Department, Section +from courses.util import get_current_semester, in_dev, upsert_course_from_opendata +from review.management.commands.clearcache import clear_cache + + +def registrar_import(semester=None, query=""): + if semester is None: + semester = get_current_semester() + + print("Loading in courses with prefix %s from %s..." % (query, semester)) + results = registrar.get_courses(query, semester) + + missing_sections = set( + Section.objects.filter(course__semester=semester).values_list("full_code", flat=True) + ) + for info in tqdm(results): + upsert_course_from_opendata(info, semester, missing_sections) + Section.objects.filter(full_code__in=missing_sections).update(status="X") + + print("Updating department names...") + departments = registrar.get_departments() + for dept_code, dept_name in tqdm(departments.items()): + dept, _ = Department.objects.get_or_create(code=dept_code) + dept.name = dept_name + dept.save() + + print("Loading course statuses from registrar...") + set_all_status(semester=semester) + + recompute_stats(semesters=semester, verbose=True) + + fill_topics(verbose=True) + try: + load_crosswalk(print_missing=False, verbose=True) + except NoCredentialsError as e: + if not in_dev(): + raise e + print("NOTE: load_crosswalk skipped due to missing AWS credentials.") + + +class Command(BaseCommand): + help = "Load in courses, sections and associated models from the Penn registrar and requirements data sources." # noqa: E501 + + def add_arguments(self, parser): + parser.add_argument("--semester", nargs="?", type=str) + parser.add_argument("--query", nargs="?", default="") + + def handle(self, *args, **kwargs): + root_logger = logging.getLogger("") + root_logger.setLevel(logging.DEBUG) + + semester = kwargs.get("semester") + query = kwargs.get("query") + + registrar_import(semester, query) + + print("Clearing cache") + del_count = clear_cache() + print(f"{del_count if del_count >=0 else 'all'} cache entries removed.") diff --git a/backend/courses/management/commands/reset_topics.py b/backend/courses/management/commands/reset_topics.py index f6d6ac8a8..f04dff481 100644 --- a/backend/courses/management/commands/reset_topics.py +++ b/backend/courses/management/commands/reset_topics.py @@ -1,71 +1,71 @@ -import gc -from textwrap import dedent - -from django.core.management.base import BaseCommand -from django.db import transaction -from tqdm import tqdm - -from alert.management.commands.recomputestats import garbage_collect_topics -from courses.management.commands.load_crosswalk import load_crosswalk -from courses.models import Course -from courses.util import get_semesters -from review.management.commands.clearcache import clear_cache - - -def fill_topics(verbose=False): - if verbose: - print("Filling courses without topics...") - filled = 0 - for course in tqdm(Course.objects.filter(topic__isnull=True).order_by("semester")): - if not course.topic: - filled += 1 - course.save() - gc.collect() - if verbose: - print(f"Filled the topic field of {filled} courses.") - - -class Command(BaseCommand): - help = ( - "This script remakes Topics by saving courses in chronological order " - "(relying on the behavior of `Course.save()`), and then runs " - "`load_crosswalk` (all in a single transaction)." - ) - - def add_arguments(self, parser): - parser.add_argument( - "--semesters", - type=str, - help=dedent( - """ - A comma-separated list of semesters for which you want to reset courses' topics, - e.g. "2019C,2020A,2020C" for fall 2019, spring 2020, and fall 2020. - If this argument is omitted, no topics will be deleted (topics will only be - computed/linked for courses not already linked to a topic). - If you pass "all" to this argument, this script will delete/recompute all topics. - """ - ), - nargs="?", - default=None, - ) - - def handle(self, *args, **kwargs): - print( - "This script is atomic, meaning either all Topic links will be added to the database, " - "or otherwise if an error is encountered, all changes will be rolled back and the " - "database will remain as it was before the script was run." - ) - - semesters = kwargs["semesters"] and get_semesters(semesters=kwargs["semesters"]) - - with transaction.atomic(): - if semesters: - Course.objects.filter(semester__in=semesters).update(topic=None) - - garbage_collect_topics() - fill_topics(verbose=True) - load_crosswalk(print_missing=False, verbose=True) - - print("Clearing cache") - del_count = clear_cache() - print(f"{del_count if del_count >=0 else 'all'} cache entries removed.") +import gc +from textwrap import dedent + +from django.core.management.base import BaseCommand +from django.db import transaction +from tqdm import tqdm + +from alert.management.commands.recomputestats import garbage_collect_topics +from courses.management.commands.load_crosswalk import load_crosswalk +from courses.models import Course +from courses.util import get_semesters +from review.management.commands.clearcache import clear_cache + + +def fill_topics(verbose=False): + if verbose: + print("Filling courses without topics...") + filled = 0 + for course in tqdm(Course.objects.filter(topic__isnull=True).order_by("semester")): + if not course.topic: + filled += 1 + course.save() + gc.collect() + if verbose: + print(f"Filled the topic field of {filled} courses.") + + +class Command(BaseCommand): + help = ( + "This script remakes Topics by saving courses in chronological order " + "(relying on the behavior of `Course.save()`), and then runs " + "`load_crosswalk` (all in a single transaction)." + ) + + def add_arguments(self, parser): + parser.add_argument( + "--semesters", + type=str, + help=dedent( + """ + A comma-separated list of semesters for which you want to reset courses' topics, + e.g. "2019C,2020A,2020C" for fall 2019, spring 2020, and fall 2020. + If this argument is omitted, no topics will be deleted (topics will only be + computed/linked for courses not already linked to a topic). + If you pass "all" to this argument, this script will delete/recompute all topics. + """ + ), + nargs="?", + default=None, + ) + + def handle(self, *args, **kwargs): + print( + "This script is atomic, meaning either all Topic links will be added to the database, " + "or otherwise if an error is encountered, all changes will be rolled back and the " + "database will remain as it was before the script was run." + ) + + semesters = kwargs["semesters"] and get_semesters(semesters=kwargs["semesters"]) + + with transaction.atomic(): + if semesters: + Course.objects.filter(semester__in=semesters).update(topic=None) + + garbage_collect_topics() + fill_topics(verbose=True) + load_crosswalk(print_missing=False, verbose=True) + + print("Clearing cache") + del_count = clear_cache() + print(f"{del_count if del_count >=0 else 'all'} cache entries removed.") diff --git a/backend/courses/migrations/0001_initial.py b/backend/courses/migrations/0001_initial.py index 3668d2866..e1aeed9a6 100644 --- a/backend/courses/migrations/0001_initial.py +++ b/backend/courses/migrations/0001_initial.py @@ -1,129 +1,129 @@ -# Generated by Django 2.2 on 2019-04-11 02:09 - -import django.db.models.deletion -from django.db import migrations, models - - -class Migration(migrations.Migration): - - initial = True - - dependencies = [] - - operations = [ - migrations.CreateModel( - name="Course", - fields=[ - ( - "id", - models.AutoField( - auto_created=True, primary_key=True, serialize=False, verbose_name="ID" - ), - ), - ("created_at", models.DateTimeField(auto_now_add=True)), - ("updated_at", models.DateTimeField(auto_now=True)), - ("department", models.CharField(max_length=8)), - ("code", models.CharField(max_length=8)), - ("semester", models.CharField(max_length=5)), - ("title", models.TextField()), - ("description", models.TextField(blank=True)), - ], - ), - migrations.CreateModel( - name="Instructor", - fields=[ - ( - "id", - models.AutoField( - auto_created=True, primary_key=True, serialize=False, verbose_name="ID" - ), - ), - ("created_at", models.DateTimeField(auto_now_add=True)), - ("updated_at", models.DateTimeField(auto_now=True)), - ("name", models.TextField()), - ], - ), - migrations.CreateModel( - name="Section", - fields=[ - ( - "id", - models.AutoField( - auto_created=True, primary_key=True, serialize=False, verbose_name="ID" - ), - ), - ("created_at", models.DateTimeField(auto_now_add=True)), - ("updated_at", models.DateTimeField(auto_now=True)), - ("code", models.CharField(max_length=16)), - ( - "status", - models.CharField( - choices=[ - ("O", "Open"), - ("C", "Closed"), - ("X", "Cancelled"), - ("", "Unlisted"), - ], - max_length=4, - ), - ), - ("capacity", models.IntegerField(default=0)), - ("activity", models.CharField(blank=True, max_length=50, null=True)), - ("meeting_times", models.TextField(blank=True)), - ( - "course", - models.ForeignKey( - on_delete=django.db.models.deletion.CASCADE, to="courses.Course" - ), - ), - ("instructors", models.ManyToManyField(to="courses.Instructor")), - ], - options={ - "unique_together": {("code", "course")}, - }, - ), - migrations.CreateModel( - name="StatusUpdate", - fields=[ - ( - "id", - models.AutoField( - auto_created=True, primary_key=True, serialize=False, verbose_name="ID" - ), - ), - ( - "old_status", - models.CharField( - choices=[ - ("O", "Open"), - ("C", "Closed"), - ("X", "Cancelled"), - ("", "Unlisted"), - ], - max_length=16, - ), - ), - ( - "new_status", - models.CharField( - choices=[ - ("O", "Open"), - ("C", "Closed"), - ("X", "Cancelled"), - ("", "Unlisted"), - ], - max_length=16, - ), - ), - ("created_at", models.DateTimeField(auto_now_add=True)), - ("alert_sent", models.BooleanField()), - ("request_body", models.TextField()), - ( - "section", - models.ForeignKey( - on_delete=django.db.models.deletion.CASCADE, to="courses.Section" - ), - ), - ], - ), - ] +# Generated by Django 2.2 on 2019-04-11 02:09 + +import django.db.models.deletion +from django.db import migrations, models + + +class Migration(migrations.Migration): + + initial = True + + dependencies = [] + + operations = [ + migrations.CreateModel( + name="Course", + fields=[ + ( + "id", + models.AutoField( + auto_created=True, primary_key=True, serialize=False, verbose_name="ID" + ), + ), + ("created_at", models.DateTimeField(auto_now_add=True)), + ("updated_at", models.DateTimeField(auto_now=True)), + ("department", models.CharField(max_length=8)), + ("code", models.CharField(max_length=8)), + ("semester", models.CharField(max_length=5)), + ("title", models.TextField()), + ("description", models.TextField(blank=True)), + ], + ), + migrations.CreateModel( + name="Instructor", + fields=[ + ( + "id", + models.AutoField( + auto_created=True, primary_key=True, serialize=False, verbose_name="ID" + ), + ), + ("created_at", models.DateTimeField(auto_now_add=True)), + ("updated_at", models.DateTimeField(auto_now=True)), + ("name", models.TextField()), + ], + ), + migrations.CreateModel( + name="Section", + fields=[ + ( + "id", + models.AutoField( + auto_created=True, primary_key=True, serialize=False, verbose_name="ID" + ), + ), + ("created_at", models.DateTimeField(auto_now_add=True)), + ("updated_at", models.DateTimeField(auto_now=True)), + ("code", models.CharField(max_length=16)), + ( + "status", + models.CharField( + choices=[ + ("O", "Open"), + ("C", "Closed"), + ("X", "Cancelled"), + ("", "Unlisted"), + ], + max_length=4, + ), + ), + ("capacity", models.IntegerField(default=0)), + ("activity", models.CharField(blank=True, max_length=50, null=True)), + ("meeting_times", models.TextField(blank=True)), + ( + "course", + models.ForeignKey( + on_delete=django.db.models.deletion.CASCADE, to="courses.Course" + ), + ), + ("instructors", models.ManyToManyField(to="courses.Instructor")), + ], + options={ + "unique_together": {("code", "course")}, + }, + ), + migrations.CreateModel( + name="StatusUpdate", + fields=[ + ( + "id", + models.AutoField( + auto_created=True, primary_key=True, serialize=False, verbose_name="ID" + ), + ), + ( + "old_status", + models.CharField( + choices=[ + ("O", "Open"), + ("C", "Closed"), + ("X", "Cancelled"), + ("", "Unlisted"), + ], + max_length=16, + ), + ), + ( + "new_status", + models.CharField( + choices=[ + ("O", "Open"), + ("C", "Closed"), + ("X", "Cancelled"), + ("", "Unlisted"), + ], + max_length=16, + ), + ), + ("created_at", models.DateTimeField(auto_now_add=True)), + ("alert_sent", models.BooleanField()), + ("request_body", models.TextField()), + ( + "section", + models.ForeignKey( + on_delete=django.db.models.deletion.CASCADE, to="courses.Section" + ), + ), + ], + ), + ] diff --git a/backend/courses/migrations/0002_auto_20190426_2158.py b/backend/courses/migrations/0002_auto_20190426_2158.py index b1561d6a5..ff59c24d1 100644 --- a/backend/courses/migrations/0002_auto_20190426_2158.py +++ b/backend/courses/migrations/0002_auto_20190426_2158.py @@ -1,116 +1,116 @@ -# Generated by Django 2.2 on 2019-04-26 21:58 - -import django.db.models.deletion -from django.db import migrations, models - - -class Migration(migrations.Migration): - - dependencies = [ - ("courses", "0001_initial"), - ] - - operations = [ - migrations.CreateModel( - name="Building", - fields=[ - ( - "id", - models.AutoField( - auto_created=True, primary_key=True, serialize=False, verbose_name="ID" - ), - ), - ("code", models.CharField(max_length=4)), - ("name", models.CharField(max_length=80)), - ("latitude", models.FloatField()), - ("longitude", models.FloatField()), - ], - ), - migrations.AddField( - model_name="course", - name="primary_listing", - field=models.ForeignKey( - default=None, - on_delete=django.db.models.deletion.CASCADE, - related_name="listing_set", - to="courses.Course", - ), - preserve_default=False, - ), - migrations.AlterField( - model_name="section", - name="activity", - field=models.CharField( - choices=[ - ("CLN", "Clinic"), - ("DIS", "Dissertation"), - ("IND", "Independent Study"), - ("LAB", "Lab"), - ("LEC", "Lecture"), - ("MST", "Masters Thesis"), - ("REC", "Recitation"), - ("SEM", "Seminar"), - ("SRT", "Senior Thesis"), - ("STU", "Studio"), - ("***", "Undefined"), - ], - default="***", - max_length=50, - ), - preserve_default=False, - ), - migrations.AlterUniqueTogether( - name="course", - unique_together={("department", "code", "semester")}, - ), - migrations.CreateModel( - name="Room", - fields=[ - ( - "id", - models.AutoField( - auto_created=True, primary_key=True, serialize=False, verbose_name="ID" - ), - ), - ("roomnum", models.CharField(max_length=5)), - ("name", models.CharField(max_length=80)), - ( - "building", - models.ForeignKey( - on_delete=django.db.models.deletion.CASCADE, to="courses.Building" - ), - ), - ], - options={ - "unique_together": {("building", "roomnum")}, - }, - ), - migrations.CreateModel( - name="Meeting", - fields=[ - ( - "id", - models.AutoField( - auto_created=True, primary_key=True, serialize=False, verbose_name="ID" - ), - ), - ("day", models.CharField(max_length=1)), - ("start", models.IntegerField()), - ("end", models.IntegerField()), - ( - "room", - models.ForeignKey( - on_delete=django.db.models.deletion.CASCADE, to="courses.Room" - ), - ), - ( - "section", - models.ForeignKey( - on_delete=django.db.models.deletion.CASCADE, - related_name="meetings", - to="courses.Section", - ), - ), - ], - ), - ] +# Generated by Django 2.2 on 2019-04-26 21:58 + +import django.db.models.deletion +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ("courses", "0001_initial"), + ] + + operations = [ + migrations.CreateModel( + name="Building", + fields=[ + ( + "id", + models.AutoField( + auto_created=True, primary_key=True, serialize=False, verbose_name="ID" + ), + ), + ("code", models.CharField(max_length=4)), + ("name", models.CharField(max_length=80)), + ("latitude", models.FloatField()), + ("longitude", models.FloatField()), + ], + ), + migrations.AddField( + model_name="course", + name="primary_listing", + field=models.ForeignKey( + default=None, + on_delete=django.db.models.deletion.CASCADE, + related_name="listing_set", + to="courses.Course", + ), + preserve_default=False, + ), + migrations.AlterField( + model_name="section", + name="activity", + field=models.CharField( + choices=[ + ("CLN", "Clinic"), + ("DIS", "Dissertation"), + ("IND", "Independent Study"), + ("LAB", "Lab"), + ("LEC", "Lecture"), + ("MST", "Masters Thesis"), + ("REC", "Recitation"), + ("SEM", "Seminar"), + ("SRT", "Senior Thesis"), + ("STU", "Studio"), + ("***", "Undefined"), + ], + default="***", + max_length=50, + ), + preserve_default=False, + ), + migrations.AlterUniqueTogether( + name="course", + unique_together={("department", "code", "semester")}, + ), + migrations.CreateModel( + name="Room", + fields=[ + ( + "id", + models.AutoField( + auto_created=True, primary_key=True, serialize=False, verbose_name="ID" + ), + ), + ("roomnum", models.CharField(max_length=5)), + ("name", models.CharField(max_length=80)), + ( + "building", + models.ForeignKey( + on_delete=django.db.models.deletion.CASCADE, to="courses.Building" + ), + ), + ], + options={ + "unique_together": {("building", "roomnum")}, + }, + ), + migrations.CreateModel( + name="Meeting", + fields=[ + ( + "id", + models.AutoField( + auto_created=True, primary_key=True, serialize=False, verbose_name="ID" + ), + ), + ("day", models.CharField(max_length=1)), + ("start", models.IntegerField()), + ("end", models.IntegerField()), + ( + "room", + models.ForeignKey( + on_delete=django.db.models.deletion.CASCADE, to="courses.Room" + ), + ), + ( + "section", + models.ForeignKey( + on_delete=django.db.models.deletion.CASCADE, + related_name="meetings", + to="courses.Section", + ), + ), + ], + ), + ] diff --git a/backend/courses/migrations/0003_auto_20190428_1707.py b/backend/courses/migrations/0003_auto_20190428_1707.py index b533df5d4..e58a26f2e 100644 --- a/backend/courses/migrations/0003_auto_20190428_1707.py +++ b/backend/courses/migrations/0003_auto_20190428_1707.py @@ -1,28 +1,28 @@ -# Generated by Django 2.2 on 2019-04-28 17:07 - -from django.db import migrations, models - - -class Migration(migrations.Migration): - - dependencies = [ - ("courses", "0002_auto_20190426_2158"), - ] - - operations = [ - migrations.AlterField( - model_name="building", - name="latitude", - field=models.FloatField(blank=True, null=True), - ), - migrations.AlterField( - model_name="building", - name="longitude", - field=models.FloatField(blank=True, null=True), - ), - migrations.AlterField( - model_name="building", - name="name", - field=models.CharField(blank=True, max_length=80), - ), - ] +# Generated by Django 2.2 on 2019-04-28 17:07 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ("courses", "0002_auto_20190426_2158"), + ] + + operations = [ + migrations.AlterField( + model_name="building", + name="latitude", + field=models.FloatField(blank=True, null=True), + ), + migrations.AlterField( + model_name="building", + name="longitude", + field=models.FloatField(blank=True, null=True), + ), + migrations.AlterField( + model_name="building", + name="name", + field=models.CharField(blank=True, max_length=80), + ), + ] diff --git a/backend/courses/migrations/0004_auto_20190428_1710.py b/backend/courses/migrations/0004_auto_20190428_1710.py index 9e303a407..c90f779d9 100644 --- a/backend/courses/migrations/0004_auto_20190428_1710.py +++ b/backend/courses/migrations/0004_auto_20190428_1710.py @@ -1,27 +1,27 @@ -# Generated by Django 2.2 on 2019-04-28 17:10 - -from django.db import migrations, models - - -class Migration(migrations.Migration): - - dependencies = [ - ("courses", "0003_auto_20190428_1707"), - ] - - operations = [ - migrations.RenameField( - model_name="room", - old_name="roomnum", - new_name="number", - ), - migrations.AlterField( - model_name="building", - name="code", - field=models.CharField(max_length=4, unique=True), - ), - migrations.AlterUniqueTogether( - name="room", - unique_together={("building", "number")}, - ), - ] +# Generated by Django 2.2 on 2019-04-28 17:10 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ("courses", "0003_auto_20190428_1707"), + ] + + operations = [ + migrations.RenameField( + model_name="room", + old_name="roomnum", + new_name="number", + ), + migrations.AlterField( + model_name="building", + name="code", + field=models.CharField(max_length=4, unique=True), + ), + migrations.AlterUniqueTogether( + name="room", + unique_together={("building", "number")}, + ), + ] diff --git a/backend/courses/migrations/0005_auto_20190428_1845.py b/backend/courses/migrations/0005_auto_20190428_1845.py index d65de009f..d0e107b16 100644 --- a/backend/courses/migrations/0005_auto_20190428_1845.py +++ b/backend/courses/migrations/0005_auto_20190428_1845.py @@ -1,24 +1,24 @@ -# Generated by Django 2.2 on 2019-04-28 18:45 - -from django.db import migrations, models - - -class Migration(migrations.Migration): - - dependencies = [ - ("courses", "0004_auto_20190428_1710"), - ] - - operations = [ - migrations.AddField( - model_name="section", - name="associated_sections", - field=models.ManyToManyField(to="courses.Section"), - ), - migrations.AddField( - model_name="section", - name="credits", - field=models.DecimalField(decimal_places=2, default=1.0, max_digits=3), - preserve_default=False, - ), - ] +# Generated by Django 2.2 on 2019-04-28 18:45 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ("courses", "0004_auto_20190428_1710"), + ] + + operations = [ + migrations.AddField( + model_name="section", + name="associated_sections", + field=models.ManyToManyField(to="courses.Section"), + ), + migrations.AddField( + model_name="section", + name="credits", + field=models.DecimalField(decimal_places=2, default=1.0, max_digits=3), + preserve_default=False, + ), + ] diff --git a/backend/courses/migrations/0006_auto_20190508_0200.py b/backend/courses/migrations/0006_auto_20190508_0200.py index cf246aa58..c20baf419 100644 --- a/backend/courses/migrations/0006_auto_20190508_0200.py +++ b/backend/courses/migrations/0006_auto_20190508_0200.py @@ -1,25 +1,25 @@ -# Generated by Django 2.2 on 2019-05-08 02:00 - -import django.db.models.deletion -from django.db import migrations, models - - -class Migration(migrations.Migration): - - dependencies = [ - ("courses", "0005_auto_20190428_1845"), - ] - - operations = [ - migrations.AlterField( - model_name="course", - name="primary_listing", - field=models.ForeignKey( - blank=True, - null=True, - on_delete=django.db.models.deletion.CASCADE, - related_name="listing_set", - to="courses.Course", - ), - ), - ] +# Generated by Django 2.2 on 2019-05-08 02:00 + +import django.db.models.deletion +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ("courses", "0005_auto_20190428_1845"), + ] + + operations = [ + migrations.AlterField( + model_name="course", + name="primary_listing", + field=models.ForeignKey( + blank=True, + null=True, + on_delete=django.db.models.deletion.CASCADE, + related_name="listing_set", + to="courses.Course", + ), + ), + ] diff --git a/backend/courses/migrations/0007_auto_20190508_0202.py b/backend/courses/migrations/0007_auto_20190508_0202.py index 03c08f929..2d5cbc317 100644 --- a/backend/courses/migrations/0007_auto_20190508_0202.py +++ b/backend/courses/migrations/0007_auto_20190508_0202.py @@ -1,18 +1,18 @@ -# Generated by Django 2.2 on 2019-05-08 02:02 - -from django.db import migrations, models - - -class Migration(migrations.Migration): - - dependencies = [ - ("courses", "0006_auto_20190508_0200"), - ] - - operations = [ - migrations.AlterField( - model_name="section", - name="credits", - field=models.DecimalField(blank=True, decimal_places=2, max_digits=3, null=True), - ), - ] +# Generated by Django 2.2 on 2019-05-08 02:02 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ("courses", "0006_auto_20190508_0200"), + ] + + operations = [ + migrations.AlterField( + model_name="section", + name="credits", + field=models.DecimalField(blank=True, decimal_places=2, max_digits=3, null=True), + ), + ] diff --git a/backend/courses/migrations/0008_auto_20190510_0114.py b/backend/courses/migrations/0008_auto_20190510_0114.py index 18878cdd3..3e197d1c1 100644 --- a/backend/courses/migrations/0008_auto_20190510_0114.py +++ b/backend/courses/migrations/0008_auto_20190510_0114.py @@ -1,59 +1,59 @@ -# Generated by Django 2.2 on 2019-05-10 01:14 - -import django.db.models.deletion -from django.db import migrations, models - - -class Migration(migrations.Migration): - - dependencies = [ - ("courses", "0007_auto_20190508_0202"), - ] - - operations = [ - migrations.CreateModel( - name="Department", - fields=[ - ( - "id", - models.AutoField( - auto_created=True, primary_key=True, serialize=False, verbose_name="ID" - ), - ), - ("code", models.CharField(max_length=8, unique=True)), - ("name", models.CharField(max_length=255)), - ], - ), - migrations.CreateModel( - name="Restriction", - fields=[ - ( - "id", - models.AutoField( - auto_created=True, primary_key=True, serialize=False, verbose_name="ID" - ), - ), - ("code", models.CharField(max_length=10, unique=True)), - ("description", models.TextField()), - ], - ), - migrations.AddField( - model_name="section", - name="prereq_notes", - field=models.TextField(blank=True), - ), - migrations.AlterField( - model_name="course", - name="department", - field=models.ForeignKey( - on_delete=django.db.models.deletion.CASCADE, - related_name="courses", - to="courses.Department", - ), - ), - migrations.AddField( - model_name="section", - name="restrictions", - field=models.ManyToManyField(to="courses.Restriction"), - ), - ] +# Generated by Django 2.2 on 2019-05-10 01:14 + +import django.db.models.deletion +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ("courses", "0007_auto_20190508_0202"), + ] + + operations = [ + migrations.CreateModel( + name="Department", + fields=[ + ( + "id", + models.AutoField( + auto_created=True, primary_key=True, serialize=False, verbose_name="ID" + ), + ), + ("code", models.CharField(max_length=8, unique=True)), + ("name", models.CharField(max_length=255)), + ], + ), + migrations.CreateModel( + name="Restriction", + fields=[ + ( + "id", + models.AutoField( + auto_created=True, primary_key=True, serialize=False, verbose_name="ID" + ), + ), + ("code", models.CharField(max_length=10, unique=True)), + ("description", models.TextField()), + ], + ), + migrations.AddField( + model_name="section", + name="prereq_notes", + field=models.TextField(blank=True), + ), + migrations.AlterField( + model_name="course", + name="department", + field=models.ForeignKey( + on_delete=django.db.models.deletion.CASCADE, + related_name="courses", + to="courses.Department", + ), + ), + migrations.AddField( + model_name="section", + name="restrictions", + field=models.ManyToManyField(to="courses.Restriction"), + ), + ] diff --git a/backend/courses/migrations/0009_requirement.py b/backend/courses/migrations/0009_requirement.py index 523851363..94b012237 100644 --- a/backend/courses/migrations/0009_requirement.py +++ b/backend/courses/migrations/0009_requirement.py @@ -1,48 +1,48 @@ -# Generated by Django 2.2 on 2019-05-10 01:46 - -from django.db import migrations, models - - -class Migration(migrations.Migration): - - dependencies = [ - ("courses", "0008_auto_20190510_0114"), - ] - - operations = [ - migrations.CreateModel( - name="Requirement", - fields=[ - ( - "id", - models.AutoField( - auto_created=True, primary_key=True, serialize=False, verbose_name="ID" - ), - ), - ("semester", models.CharField(max_length=5)), - ("code", models.CharField(max_length=10)), - ( - "school", - models.CharField( - choices=[ - ("SEAS", "Engineering"), - ("WH17-", "Wharton 2017-"), - ("WH17+", "Wharton 2017+"), - ("SAS", "College"), - ], - max_length=5, - ), - ), - ("satisfies", models.BooleanField()), - ("name", models.CharField(max_length=255)), - ("courses", models.ManyToManyField(related_name="overrides", to="courses.Course")), - ( - "departments", - models.ManyToManyField(related_name="requirements", to="courses.Department"), - ), - ], - options={ - "unique_together": {("semester", "code", "satisfies")}, - }, - ), - ] +# Generated by Django 2.2 on 2019-05-10 01:46 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ("courses", "0008_auto_20190510_0114"), + ] + + operations = [ + migrations.CreateModel( + name="Requirement", + fields=[ + ( + "id", + models.AutoField( + auto_created=True, primary_key=True, serialize=False, verbose_name="ID" + ), + ), + ("semester", models.CharField(max_length=5)), + ("code", models.CharField(max_length=10)), + ( + "school", + models.CharField( + choices=[ + ("SEAS", "Engineering"), + ("WH17-", "Wharton 2017-"), + ("WH17+", "Wharton 2017+"), + ("SAS", "College"), + ], + max_length=5, + ), + ), + ("satisfies", models.BooleanField()), + ("name", models.CharField(max_length=255)), + ("courses", models.ManyToManyField(related_name="overrides", to="courses.Course")), + ( + "departments", + models.ManyToManyField(related_name="requirements", to="courses.Department"), + ), + ], + options={ + "unique_together": {("semester", "code", "satisfies")}, + }, + ), + ] diff --git a/backend/courses/migrations/0010_auto_20190510_0454.py b/backend/courses/migrations/0010_auto_20190510_0454.py index 8e4ff87ae..033f13380 100644 --- a/backend/courses/migrations/0010_auto_20190510_0454.py +++ b/backend/courses/migrations/0010_auto_20190510_0454.py @@ -1,17 +1,17 @@ -# Generated by Django 2.2.1 on 2019-05-10 04:54 - -from django.db import migrations - - -class Migration(migrations.Migration): - - dependencies = [ - ("courses", "0009_requirement"), - ] - - operations = [ - migrations.AlterUniqueTogether( - name="meeting", - unique_together={("section", "day", "start", "end", "room")}, - ), - ] +# Generated by Django 2.2.1 on 2019-05-10 04:54 + +from django.db import migrations + + +class Migration(migrations.Migration): + + dependencies = [ + ("courses", "0009_requirement"), + ] + + operations = [ + migrations.AlterUniqueTogether( + name="meeting", + unique_together={("section", "day", "start", "end", "room")}, + ), + ] diff --git a/backend/courses/migrations/0011_auto_20190510_0504.py b/backend/courses/migrations/0011_auto_20190510_0504.py index 391fdff17..d7360453b 100644 --- a/backend/courses/migrations/0011_auto_20190510_0504.py +++ b/backend/courses/migrations/0011_auto_20190510_0504.py @@ -1,23 +1,23 @@ -# Generated by Django 2.2.1 on 2019-05-10 05:04 - -from django.db import migrations, models - - -class Migration(migrations.Migration): - - dependencies = [ - ("courses", "0010_auto_20190510_0454"), - ] - - operations = [ - migrations.AlterField( - model_name="meeting", - name="end", - field=models.DecimalField(decimal_places=2, max_digits=4), - ), - migrations.AlterField( - model_name="meeting", - name="start", - field=models.DecimalField(decimal_places=2, max_digits=4), - ), - ] +# Generated by Django 2.2.1 on 2019-05-10 05:04 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ("courses", "0010_auto_20190510_0454"), + ] + + operations = [ + migrations.AlterField( + model_name="meeting", + name="end", + field=models.DecimalField(decimal_places=2, max_digits=4), + ), + migrations.AlterField( + model_name="meeting", + name="start", + field=models.DecimalField(decimal_places=2, max_digits=4), + ), + ] diff --git a/backend/courses/migrations/0012_auto_20190510_0559.py b/backend/courses/migrations/0012_auto_20190510_0559.py index 1518f1e08..0c5fd74c2 100644 --- a/backend/courses/migrations/0012_auto_20190510_0559.py +++ b/backend/courses/migrations/0012_auto_20190510_0559.py @@ -1,23 +1,23 @@ -# Generated by Django 2.2.1 on 2019-05-10 05:59 - -import django.db.models.deletion -from django.db import migrations, models - - -class Migration(migrations.Migration): - - dependencies = [ - ("courses", "0011_auto_20190510_0504"), - ] - - operations = [ - migrations.AlterField( - model_name="section", - name="course", - field=models.ForeignKey( - on_delete=django.db.models.deletion.CASCADE, - related_name="sections", - to="courses.Course", - ), - ), - ] +# Generated by Django 2.2.1 on 2019-05-10 05:59 + +import django.db.models.deletion +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ("courses", "0011_auto_20190510_0504"), + ] + + operations = [ + migrations.AlterField( + model_name="section", + name="course", + field=models.ForeignKey( + on_delete=django.db.models.deletion.CASCADE, + related_name="sections", + to="courses.Course", + ), + ), + ] diff --git a/backend/courses/migrations/0013_auto_20190517_0313.py b/backend/courses/migrations/0013_auto_20190517_0313.py index 1909ee7db..67e96fdfb 100644 --- a/backend/courses/migrations/0013_auto_20190517_0313.py +++ b/backend/courses/migrations/0013_auto_20190517_0313.py @@ -1,39 +1,39 @@ -# Generated by Django 2.2.1 on 2019-05-17 03:13 - -from django.db import migrations, models - - -class Migration(migrations.Migration): - - dependencies = [ - ("courses", "0012_auto_20190510_0559"), - ] - - operations = [ - migrations.AddField( - model_name="requirement", - name="overrides", - field=models.ManyToManyField(related_name="nonrequirement_set", to="courses.Course"), - ), - migrations.AlterField( - model_name="requirement", - name="courses", - field=models.ManyToManyField(related_name="requirement_set", to="courses.Course"), - ), - migrations.AlterField( - model_name="requirement", - name="school", - field=models.CharField( - choices=[("SEAS", "Engineering"), ("WH+", "Wharton"), ("SAS", "College")], - max_length=5, - ), - ), - migrations.AlterUniqueTogether( - name="requirement", - unique_together={("semester", "code")}, - ), - migrations.RemoveField( - model_name="requirement", - name="satisfies", - ), - ] +# Generated by Django 2.2.1 on 2019-05-17 03:13 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ("courses", "0012_auto_20190510_0559"), + ] + + operations = [ + migrations.AddField( + model_name="requirement", + name="overrides", + field=models.ManyToManyField(related_name="nonrequirement_set", to="courses.Course"), + ), + migrations.AlterField( + model_name="requirement", + name="courses", + field=models.ManyToManyField(related_name="requirement_set", to="courses.Course"), + ), + migrations.AlterField( + model_name="requirement", + name="school", + field=models.CharField( + choices=[("SEAS", "Engineering"), ("WH+", "Wharton"), ("SAS", "College")], + max_length=5, + ), + ), + migrations.AlterUniqueTogether( + name="requirement", + unique_together={("semester", "code")}, + ), + migrations.RemoveField( + model_name="requirement", + name="satisfies", + ), + ] diff --git a/backend/courses/migrations/0013_course_full_code.py b/backend/courses/migrations/0013_course_full_code.py index b52469fc1..2e85c8263 100644 --- a/backend/courses/migrations/0013_course_full_code.py +++ b/backend/courses/migrations/0013_course_full_code.py @@ -1,18 +1,18 @@ -# Generated by Django 2.2.1 on 2019-05-12 02:29 - -from django.db import migrations, models - - -class Migration(migrations.Migration): - - dependencies = [ - ("courses", "0012_auto_20190510_0559"), - ] - - operations = [ - migrations.AddField( - model_name="course", - name="full_code", - field=models.CharField(blank=True, max_length=16, null=True), - ), - ] +# Generated by Django 2.2.1 on 2019-05-12 02:29 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ("courses", "0012_auto_20190510_0559"), + ] + + operations = [ + migrations.AddField( + model_name="course", + name="full_code", + field=models.CharField(blank=True, max_length=16, null=True), + ), + ] diff --git a/backend/courses/migrations/0014_auto_20190518_1641.py b/backend/courses/migrations/0014_auto_20190518_1641.py index b967e27c9..18444967d 100644 --- a/backend/courses/migrations/0014_auto_20190518_1641.py +++ b/backend/courses/migrations/0014_auto_20190518_1641.py @@ -1,25 +1,25 @@ -# Generated by Django 2.2.1 on 2019-05-18 16:41 - -from django.db import migrations, models - - -class Migration(migrations.Migration): - - dependencies = [ - ("courses", "0013_auto_20190517_0313"), - ] - - operations = [ - migrations.AlterField( - model_name="requirement", - name="school", - field=models.CharField( - choices=[("SEAS", "Engineering"), ("WH", "Wharton"), ("SAS", "College")], - max_length=5, - ), - ), - migrations.AlterUniqueTogether( - name="requirement", - unique_together={("semester", "code", "school")}, - ), - ] +# Generated by Django 2.2.1 on 2019-05-18 16:41 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ("courses", "0013_auto_20190517_0313"), + ] + + operations = [ + migrations.AlterField( + model_name="requirement", + name="school", + field=models.CharField( + choices=[("SEAS", "Engineering"), ("WH", "Wharton"), ("SAS", "College")], + max_length=5, + ), + ), + migrations.AlterUniqueTogether( + name="requirement", + unique_together={("semester", "code", "school")}, + ), + ] diff --git a/backend/courses/migrations/0015_merge_20190518_2155.py b/backend/courses/migrations/0015_merge_20190518_2155.py index be36f7da2..bc9f97818 100644 --- a/backend/courses/migrations/0015_merge_20190518_2155.py +++ b/backend/courses/migrations/0015_merge_20190518_2155.py @@ -1,13 +1,13 @@ -# Generated by Django 2.2.1 on 2019-05-18 21:55 - -from django.db import migrations - - -class Migration(migrations.Migration): - - dependencies = [ - ("courses", "0013_course_full_code"), - ("courses", "0014_auto_20190518_1641"), - ] - - operations = [] +# Generated by Django 2.2.1 on 2019-05-18 21:55 + +from django.db import migrations + + +class Migration(migrations.Migration): + + dependencies = [ + ("courses", "0013_course_full_code"), + ("courses", "0014_auto_20190518_1641"), + ] + + operations = [] diff --git a/backend/courses/migrations/0016_auto_20190523_1554.py b/backend/courses/migrations/0016_auto_20190523_1554.py index a4ba89a3a..d843f0d17 100644 --- a/backend/courses/migrations/0016_auto_20190523_1554.py +++ b/backend/courses/migrations/0016_auto_20190523_1554.py @@ -1,18 +1,18 @@ -# Generated by Django 2.2.1 on 2019-05-23 15:54 - -from django.db import migrations, models - - -class Migration(migrations.Migration): - - dependencies = [ - ("courses", "0015_merge_20190518_2155"), - ] - - operations = [ - migrations.AlterField( - model_name="instructor", - name="name", - field=models.CharField(max_length=255, unique=True), - ), - ] +# Generated by Django 2.2.1 on 2019-05-23 15:54 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ("courses", "0015_merge_20190518_2155"), + ] + + operations = [ + migrations.AlterField( + model_name="instructor", + name="name", + field=models.CharField(max_length=255, unique=True), + ), + ] diff --git a/backend/courses/migrations/0017_auto_20190525_2235.py b/backend/courses/migrations/0017_auto_20190525_2235.py index 63441ea95..d1f60ed7c 100644 --- a/backend/courses/migrations/0017_auto_20190525_2235.py +++ b/backend/courses/migrations/0017_auto_20190525_2235.py @@ -1,22 +1,22 @@ -# Generated by Django 2.2.1 on 2019-05-25 22:35 - -from django.db import migrations, models - - -class Migration(migrations.Migration): - - dependencies = [ - ("courses", "0016_auto_20190523_1554"), - ] - - operations = [ - migrations.AlterField( - model_name="course", - name="full_code", - field=models.CharField(blank=True, max_length=16), - ), - migrations.AlterUniqueTogether( - name="course", - unique_together={("full_code", "semester"), ("department", "code", "semester")}, - ), - ] +# Generated by Django 2.2.1 on 2019-05-25 22:35 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ("courses", "0016_auto_20190523_1554"), + ] + + operations = [ + migrations.AlterField( + model_name="course", + name="full_code", + field=models.CharField(blank=True, max_length=16), + ), + migrations.AlterUniqueTogether( + name="course", + unique_together={("full_code", "semester"), ("department", "code", "semester")}, + ), + ] diff --git a/backend/courses/migrations/0017_auto_20190526_1655.py b/backend/courses/migrations/0017_auto_20190526_1655.py index 6dd727d5f..4d623df65 100644 --- a/backend/courses/migrations/0017_auto_20190526_1655.py +++ b/backend/courses/migrations/0017_auto_20190526_1655.py @@ -1,22 +1,22 @@ -# Generated by Django 2.2.1 on 2019-05-26 16:55 - -from django.db import migrations, models - - -class Migration(migrations.Migration): - - dependencies = [ - ("courses", "0016_auto_20190523_1554"), - ] - - operations = [ - migrations.RemoveField( - model_name="section", - name="prereq_notes", - ), - migrations.AddField( - model_name="course", - name="prerequisites", - field=models.TextField(blank=True), - ), - ] +# Generated by Django 2.2.1 on 2019-05-26 16:55 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ("courses", "0016_auto_20190523_1554"), + ] + + operations = [ + migrations.RemoveField( + model_name="section", + name="prereq_notes", + ), + migrations.AddField( + model_name="course", + name="prerequisites", + field=models.TextField(blank=True), + ), + ] diff --git a/backend/courses/migrations/0018_merge_20190526_1901.py b/backend/courses/migrations/0018_merge_20190526_1901.py index 50fb25a57..afafdddb8 100644 --- a/backend/courses/migrations/0018_merge_20190526_1901.py +++ b/backend/courses/migrations/0018_merge_20190526_1901.py @@ -1,13 +1,13 @@ -# Generated by Django 2.2.1 on 2019-05-26 19:01 - -from django.db import migrations - - -class Migration(migrations.Migration): - - dependencies = [ - ("courses", "0017_auto_20190526_1655"), - ("courses", "0017_auto_20190525_2235"), - ] - - operations = [] +# Generated by Django 2.2.1 on 2019-05-26 19:01 + +from django.db import migrations + + +class Migration(migrations.Migration): + + dependencies = [ + ("courses", "0017_auto_20190526_1655"), + ("courses", "0017_auto_20190525_2235"), + ] + + operations = [] diff --git a/backend/courses/migrations/0019_apikey_apiprivilege.py b/backend/courses/migrations/0019_apikey_apiprivilege.py index 2a7d7a134..5fedc6d05 100644 --- a/backend/courses/migrations/0019_apikey_apiprivilege.py +++ b/backend/courses/migrations/0019_apikey_apiprivilege.py @@ -1,49 +1,49 @@ -# Generated by Django 2.2.5 on 2019-09-26 05:49 - -import uuid - -from django.db import migrations, models - - -class Migration(migrations.Migration): - - dependencies = [ - ("courses", "0018_merge_20190526_1901"), - ] - - operations = [ - migrations.CreateModel( - name="APIPrivilege", - fields=[ - ( - "id", - models.AutoField( - auto_created=True, primary_key=True, serialize=False, verbose_name="ID" - ), - ), - ("code", models.CharField(max_length=255, unique=True)), - ("description", models.TextField(blank=True)), - ], - ), - migrations.CreateModel( - name="APIKey", - fields=[ - ( - "id", - models.AutoField( - auto_created=True, primary_key=True, serialize=False, verbose_name="ID" - ), - ), - ("email", models.EmailField(max_length=254)), - ( - "code", - models.CharField(blank=True, default=uuid.uuid4, max_length=100, unique=True), - ), - ("active", models.BooleanField(blank=True, default=True)), - ( - "privileges", - models.ManyToManyField(related_name="key_set", to="courses.APIPrivilege"), - ), - ], - ), - ] +# Generated by Django 2.2.5 on 2019-09-26 05:49 + +import uuid + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ("courses", "0018_merge_20190526_1901"), + ] + + operations = [ + migrations.CreateModel( + name="APIPrivilege", + fields=[ + ( + "id", + models.AutoField( + auto_created=True, primary_key=True, serialize=False, verbose_name="ID" + ), + ), + ("code", models.CharField(max_length=255, unique=True)), + ("description", models.TextField(blank=True)), + ], + ), + migrations.CreateModel( + name="APIKey", + fields=[ + ( + "id", + models.AutoField( + auto_created=True, primary_key=True, serialize=False, verbose_name="ID" + ), + ), + ("email", models.EmailField(max_length=254)), + ( + "code", + models.CharField(blank=True, default=uuid.uuid4, max_length=100, unique=True), + ), + ("active", models.BooleanField(blank=True, default=True)), + ( + "privileges", + models.ManyToManyField(related_name="key_set", to="courses.APIPrivilege"), + ), + ], + ), + ] diff --git a/backend/courses/migrations/0020_auto_20190928_0046.py b/backend/courses/migrations/0020_auto_20190928_0046.py index 538ae9057..9e09e071a 100644 --- a/backend/courses/migrations/0020_auto_20190928_0046.py +++ b/backend/courses/migrations/0020_auto_20190928_0046.py @@ -1,20 +1,20 @@ -# Generated by Django 2.2.5 on 2019-09-28 00:46 - -from django.db import migrations, models - - -class Migration(migrations.Migration): - - dependencies = [ - ("courses", "0019_apikey_apiprivilege"), - ] - - operations = [ - migrations.AlterField( - model_name="apikey", - name="privileges", - field=models.ManyToManyField( - blank=True, related_name="key_set", to="courses.APIPrivilege" - ), - ), - ] +# Generated by Django 2.2.5 on 2019-09-28 00:46 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ("courses", "0019_apikey_apiprivilege"), + ] + + operations = [ + migrations.AlterField( + model_name="apikey", + name="privileges", + field=models.ManyToManyField( + blank=True, related_name="key_set", to="courses.APIPrivilege" + ), + ), + ] diff --git a/backend/courses/migrations/0021_auto_20191019_2140.py b/backend/courses/migrations/0021_auto_20191019_2140.py index 9ad044ddd..042a6a456 100644 --- a/backend/courses/migrations/0021_auto_20191019_2140.py +++ b/backend/courses/migrations/0021_auto_20191019_2140.py @@ -1,39 +1,39 @@ -# Generated by Django 2.2.6 on 2019-10-19 21:40 - -from django.db import migrations, models - - -class Migration(migrations.Migration): - - dependencies = [ - ("courses", "0020_auto_20190928_0046"), - ] - - operations = [ - migrations.AlterField( - model_name="requirement", - name="courses", - field=models.ManyToManyField( - blank=True, related_name="requirement_set", to="courses.Course" - ), - ), - migrations.AlterField( - model_name="requirement", - name="departments", - field=models.ManyToManyField( - blank=True, related_name="requirements", to="courses.Department" - ), - ), - migrations.AlterField( - model_name="requirement", - name="overrides", - field=models.ManyToManyField( - blank=True, related_name="nonrequirement_set", to="courses.Course" - ), - ), - migrations.AlterField( - model_name="section", - name="restrictions", - field=models.ManyToManyField(blank=True, to="courses.Restriction"), - ), - ] +# Generated by Django 2.2.6 on 2019-10-19 21:40 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ("courses", "0020_auto_20190928_0046"), + ] + + operations = [ + migrations.AlterField( + model_name="requirement", + name="courses", + field=models.ManyToManyField( + blank=True, related_name="requirement_set", to="courses.Course" + ), + ), + migrations.AlterField( + model_name="requirement", + name="departments", + field=models.ManyToManyField( + blank=True, related_name="requirements", to="courses.Department" + ), + ), + migrations.AlterField( + model_name="requirement", + name="overrides", + field=models.ManyToManyField( + blank=True, related_name="nonrequirement_set", to="courses.Course" + ), + ), + migrations.AlterField( + model_name="section", + name="restrictions", + field=models.ManyToManyField(blank=True, to="courses.Restriction"), + ), + ] diff --git a/backend/courses/migrations/0022_auto_20191029_1927.py b/backend/courses/migrations/0022_auto_20191029_1927.py index ea6977e53..80c42315b 100644 --- a/backend/courses/migrations/0022_auto_20191029_1927.py +++ b/backend/courses/migrations/0022_auto_20191029_1927.py @@ -1,89 +1,89 @@ -# Generated by Django 2.2.6 on 2019-10-29 19:27 - -from django.db import migrations, models - - -class Migration(migrations.Migration): - - dependencies = [ - ("courses", "0021_auto_20191019_2140"), - ] - - operations = [ - migrations.AlterField( - model_name="course", - name="full_code", - field=models.CharField(blank=True, db_index=True, max_length=16), - ), - migrations.AlterField( - model_name="course", - name="semester", - field=models.CharField(db_index=True, max_length=5), - ), - migrations.AlterField( - model_name="department", - name="code", - field=models.CharField(db_index=True, max_length=8, unique=True), - ), - migrations.AlterField( - model_name="instructor", - name="name", - field=models.CharField(db_index=True, max_length=255, unique=True), - ), - migrations.AlterField( - model_name="requirement", - name="code", - field=models.CharField(db_index=True, max_length=10), - ), - migrations.AlterField( - model_name="requirement", - name="school", - field=models.CharField( - choices=[("SEAS", "Engineering"), ("WH", "Wharton"), ("SAS", "College")], - db_index=True, - max_length=5, - ), - ), - migrations.AlterField( - model_name="requirement", - name="semester", - field=models.CharField(db_index=True, max_length=5), - ), - migrations.AlterField( - model_name="section", - name="activity", - field=models.CharField( - choices=[ - ("CLN", "Clinic"), - ("DIS", "Dissertation"), - ("IND", "Independent Study"), - ("LAB", "Lab"), - ("LEC", "Lecture"), - ("MST", "Masters Thesis"), - ("REC", "Recitation"), - ("SEM", "Seminar"), - ("SRT", "Senior Thesis"), - ("STU", "Studio"), - ("***", "Undefined"), - ], - db_index=True, - max_length=50, - ), - ), - migrations.AlterField( - model_name="section", - name="credits", - field=models.DecimalField( - blank=True, db_index=True, decimal_places=2, max_digits=3, null=True - ), - ), - migrations.AlterField( - model_name="section", - name="status", - field=models.CharField( - choices=[("O", "Open"), ("C", "Closed"), ("X", "Cancelled"), ("", "Unlisted")], - db_index=True, - max_length=4, - ), - ), - ] +# Generated by Django 2.2.6 on 2019-10-29 19:27 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ("courses", "0021_auto_20191019_2140"), + ] + + operations = [ + migrations.AlterField( + model_name="course", + name="full_code", + field=models.CharField(blank=True, db_index=True, max_length=16), + ), + migrations.AlterField( + model_name="course", + name="semester", + field=models.CharField(db_index=True, max_length=5), + ), + migrations.AlterField( + model_name="department", + name="code", + field=models.CharField(db_index=True, max_length=8, unique=True), + ), + migrations.AlterField( + model_name="instructor", + name="name", + field=models.CharField(db_index=True, max_length=255, unique=True), + ), + migrations.AlterField( + model_name="requirement", + name="code", + field=models.CharField(db_index=True, max_length=10), + ), + migrations.AlterField( + model_name="requirement", + name="school", + field=models.CharField( + choices=[("SEAS", "Engineering"), ("WH", "Wharton"), ("SAS", "College")], + db_index=True, + max_length=5, + ), + ), + migrations.AlterField( + model_name="requirement", + name="semester", + field=models.CharField(db_index=True, max_length=5), + ), + migrations.AlterField( + model_name="section", + name="activity", + field=models.CharField( + choices=[ + ("CLN", "Clinic"), + ("DIS", "Dissertation"), + ("IND", "Independent Study"), + ("LAB", "Lab"), + ("LEC", "Lecture"), + ("MST", "Masters Thesis"), + ("REC", "Recitation"), + ("SEM", "Seminar"), + ("SRT", "Senior Thesis"), + ("STU", "Studio"), + ("***", "Undefined"), + ], + db_index=True, + max_length=50, + ), + ), + migrations.AlterField( + model_name="section", + name="credits", + field=models.DecimalField( + blank=True, db_index=True, decimal_places=2, max_digits=3, null=True + ), + ), + migrations.AlterField( + model_name="section", + name="status", + field=models.CharField( + choices=[("O", "Open"), ("C", "Closed"), ("X", "Cancelled"), ("", "Unlisted")], + db_index=True, + max_length=4, + ), + ), + ] diff --git a/backend/courses/migrations/0022_section_full_code.py b/backend/courses/migrations/0022_section_full_code.py index 8186d27d4..9c98a231a 100644 --- a/backend/courses/migrations/0022_section_full_code.py +++ b/backend/courses/migrations/0022_section_full_code.py @@ -1,18 +1,18 @@ -# Generated by Django 2.2.6 on 2019-11-01 17:03 - -from django.db import migrations, models - - -class Migration(migrations.Migration): - - dependencies = [ - ("courses", "0021_auto_20191019_2140"), - ] - - operations = [ - migrations.AddField( - model_name="section", - name="full_code", - field=models.CharField(blank=True, max_length=32), - ), - ] +# Generated by Django 2.2.6 on 2019-11-01 17:03 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ("courses", "0021_auto_20191019_2140"), + ] + + operations = [ + migrations.AddField( + model_name="section", + name="full_code", + field=models.CharField(blank=True, max_length=32), + ), + ] diff --git a/backend/courses/migrations/0023_auto_20191101_1717.py b/backend/courses/migrations/0023_auto_20191101_1717.py index 19c59d3af..9e34744a5 100644 --- a/backend/courses/migrations/0023_auto_20191101_1717.py +++ b/backend/courses/migrations/0023_auto_20191101_1717.py @@ -1,18 +1,18 @@ -# Generated by Django 2.2.6 on 2019-11-01 17:17 - -from django.db import migrations, models - - -class Migration(migrations.Migration): - - dependencies = [ - ("courses", "0022_section_full_code"), - ] - - operations = [ - migrations.AlterField( - model_name="section", - name="full_code", - field=models.CharField(blank=True, db_index=True, max_length=32), - ), - ] +# Generated by Django 2.2.6 on 2019-11-01 17:17 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ("courses", "0022_section_full_code"), + ] + + operations = [ + migrations.AlterField( + model_name="section", + name="full_code", + field=models.CharField(blank=True, db_index=True, max_length=32), + ), + ] diff --git a/backend/courses/migrations/0024_merge_20191103_1941.py b/backend/courses/migrations/0024_merge_20191103_1941.py index ab1bbbf63..eda551d25 100644 --- a/backend/courses/migrations/0024_merge_20191103_1941.py +++ b/backend/courses/migrations/0024_merge_20191103_1941.py @@ -1,13 +1,13 @@ -# Generated by Django 2.2.6 on 2019-11-03 19:41 - -from django.db import migrations - - -class Migration(migrations.Migration): - - dependencies = [ - ("courses", "0022_auto_20191029_1927"), - ("courses", "0023_auto_20191101_1717"), - ] - - operations = [] +# Generated by Django 2.2.6 on 2019-11-03 19:41 + +from django.db import migrations + + +class Migration(migrations.Migration): + + dependencies = [ + ("courses", "0022_auto_20191029_1927"), + ("courses", "0023_auto_20191101_1717"), + ] + + operations = [] diff --git a/backend/courses/migrations/0024_userdata.py b/backend/courses/migrations/0024_userdata.py index a7172ddd7..4618404a8 100644 --- a/backend/courses/migrations/0024_userdata.py +++ b/backend/courses/migrations/0024_userdata.py @@ -1,35 +1,35 @@ -# Generated by Django 2.2.5 on 2019-11-10 18:57 - -import django.db.models.deletion -from django.conf import settings -from django.db import migrations, models - - -class Migration(migrations.Migration): - - dependencies = [ - migrations.swappable_dependency(settings.AUTH_USER_MODEL), - ("courses", "0023_auto_20191101_1717"), - ] - - operations = [ - migrations.CreateModel( - name="UserData", - fields=[ - ( - "id", - models.AutoField( - auto_created=True, primary_key=True, serialize=False, verbose_name="ID" - ), - ), - ("email", models.EmailField(blank=True, max_length=254, null=True)), - ("phone", models.CharField(blank=True, max_length=100, null=True)), - ( - "user", - models.OneToOneField( - on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL - ), - ), - ], - ), - ] +# Generated by Django 2.2.5 on 2019-11-10 18:57 + +import django.db.models.deletion +from django.conf import settings +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + migrations.swappable_dependency(settings.AUTH_USER_MODEL), + ("courses", "0023_auto_20191101_1717"), + ] + + operations = [ + migrations.CreateModel( + name="UserData", + fields=[ + ( + "id", + models.AutoField( + auto_created=True, primary_key=True, serialize=False, verbose_name="ID" + ), + ), + ("email", models.EmailField(blank=True, max_length=254, null=True)), + ("phone", models.CharField(blank=True, max_length=100, null=True)), + ( + "user", + models.OneToOneField( + on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL + ), + ), + ], + ), + ] diff --git a/backend/courses/migrations/0025_auto_20191117_1309.py b/backend/courses/migrations/0025_auto_20191117_1309.py index 268265260..dd137fbec 100644 --- a/backend/courses/migrations/0025_auto_20191117_1309.py +++ b/backend/courses/migrations/0025_auto_20191117_1309.py @@ -1,24 +1,24 @@ -# Generated by Django 2.2.5 on 2019-11-17 18:09 - -import django.db.models.deletion -from django.conf import settings -from django.db import migrations, models - - -class Migration(migrations.Migration): - - dependencies = [ - ("courses", "0024_userdata"), - ] - - operations = [ - migrations.AlterField( - model_name="userdata", - name="user", - field=models.OneToOneField( - on_delete=django.db.models.deletion.CASCADE, - related_name="user", - to=settings.AUTH_USER_MODEL, - ), - ), - ] +# Generated by Django 2.2.5 on 2019-11-17 18:09 + +import django.db.models.deletion +from django.conf import settings +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ("courses", "0024_userdata"), + ] + + operations = [ + migrations.AlterField( + model_name="userdata", + name="user", + field=models.OneToOneField( + on_delete=django.db.models.deletion.CASCADE, + related_name="user", + to=settings.AUTH_USER_MODEL, + ), + ), + ] diff --git a/backend/courses/migrations/0026_merge_20191117_1420.py b/backend/courses/migrations/0026_merge_20191117_1420.py index 400a62cca..03cdc602e 100644 --- a/backend/courses/migrations/0026_merge_20191117_1420.py +++ b/backend/courses/migrations/0026_merge_20191117_1420.py @@ -1,13 +1,13 @@ -# Generated by Django 2.2.5 on 2019-11-17 19:20 - -from django.db import migrations - - -class Migration(migrations.Migration): - - dependencies = [ - ("courses", "0025_auto_20191117_1309"), - ("courses", "0024_merge_20191103_1941"), - ] - - operations = [] +# Generated by Django 2.2.5 on 2019-11-17 19:20 + +from django.db import migrations + + +class Migration(migrations.Migration): + + dependencies = [ + ("courses", "0025_auto_20191117_1309"), + ("courses", "0024_merge_20191103_1941"), + ] + + operations = [] diff --git a/backend/courses/migrations/0027_auto_20191227_1213.py b/backend/courses/migrations/0027_auto_20191227_1213.py index 7dd937b55..1d395ec48 100644 --- a/backend/courses/migrations/0027_auto_20191227_1213.py +++ b/backend/courses/migrations/0027_auto_20191227_1213.py @@ -1,56 +1,56 @@ -# Generated by Django 2.2.9 on 2019-12-27 11:13 - -import django.db.models.deletion -import django.utils.timezone -from django.conf import settings -from django.db import migrations, models - -import courses.models - - -class Migration(migrations.Migration): - - dependencies = [ - migrations.swappable_dependency(settings.AUTH_USER_MODEL), - ("courses", "0026_merge_20191117_1420"), - ] - - operations = [ - migrations.CreateModel( - name="UserProfile", - fields=[ - ( - "id", - models.AutoField( - auto_created=True, primary_key=True, serialize=False, verbose_name="ID" - ), - ), - ("email", models.EmailField(blank=True, max_length=254, null=True)), - ( - "phone", - models.CharField( - blank=True, - max_length=100, - null=True, - validators=[courses.models.UserProfile.validate_phone], - ), - ), - ( - "user", - models.OneToOneField( - on_delete=django.db.models.deletion.CASCADE, - related_name="user", - to=settings.AUTH_USER_MODEL, - ), - ), - ], - ), - migrations.AlterField( - model_name="statusupdate", - name="created_at", - field=models.DateTimeField(default=django.utils.timezone.now), - ), - migrations.DeleteModel( - name="UserData", - ), - ] +# Generated by Django 2.2.9 on 2019-12-27 11:13 + +import django.db.models.deletion +import django.utils.timezone +from django.conf import settings +from django.db import migrations, models + +import courses.models + + +class Migration(migrations.Migration): + + dependencies = [ + migrations.swappable_dependency(settings.AUTH_USER_MODEL), + ("courses", "0026_merge_20191117_1420"), + ] + + operations = [ + migrations.CreateModel( + name="UserProfile", + fields=[ + ( + "id", + models.AutoField( + auto_created=True, primary_key=True, serialize=False, verbose_name="ID" + ), + ), + ("email", models.EmailField(blank=True, max_length=254, null=True)), + ( + "phone", + models.CharField( + blank=True, + max_length=100, + null=True, + validators=[courses.models.UserProfile.validate_phone], + ), + ), + ( + "user", + models.OneToOneField( + on_delete=django.db.models.deletion.CASCADE, + related_name="user", + to=settings.AUTH_USER_MODEL, + ), + ), + ], + ), + migrations.AlterField( + model_name="statusupdate", + name="created_at", + field=models.DateTimeField(default=django.utils.timezone.now), + ), + migrations.DeleteModel( + name="UserData", + ), + ] diff --git a/backend/courses/migrations/0028_auto_20200131_1619.py b/backend/courses/migrations/0028_auto_20200131_1619.py index d5c70a4f4..9e98f3426 100644 --- a/backend/courses/migrations/0028_auto_20200131_1619.py +++ b/backend/courses/migrations/0028_auto_20200131_1619.py @@ -1,24 +1,24 @@ -# Generated by Django 2.2.9 on 2020-01-31 21:19 - -import django.db.models.deletion -from django.conf import settings -from django.db import migrations, models - - -class Migration(migrations.Migration): - - dependencies = [ - ("courses", "0027_auto_20191227_1213"), - ] - - operations = [ - migrations.AlterField( - model_name="userprofile", - name="user", - field=models.OneToOneField( - on_delete=django.db.models.deletion.CASCADE, - related_name="profile", - to=settings.AUTH_USER_MODEL, - ), - ), - ] +# Generated by Django 2.2.9 on 2020-01-31 21:19 + +import django.db.models.deletion +from django.conf import settings +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ("courses", "0027_auto_20191227_1213"), + ] + + operations = [ + migrations.AlterField( + model_name="userprofile", + name="user", + field=models.OneToOneField( + on_delete=django.db.models.deletion.CASCADE, + related_name="profile", + to=settings.AUTH_USER_MODEL, + ), + ), + ] diff --git a/backend/courses/migrations/0029_auto_20200512_1525.py b/backend/courses/migrations/0029_auto_20200512_1525.py index c0115cde0..71f89912b 100644 --- a/backend/courses/migrations/0029_auto_20200512_1525.py +++ b/backend/courses/migrations/0029_auto_20200512_1525.py @@ -1,36 +1,36 @@ -# Generated by Django 3.0.6 on 2020-05-12 19:25 - -import django.db.models.deletion -from django.conf import settings -from django.db import migrations, models - - -class Migration(migrations.Migration): - - dependencies = [ - migrations.swappable_dependency(settings.AUTH_USER_MODEL), - ("courses", "0028_auto_20200131_1619"), - ] - - operations = [ - migrations.AddField( - model_name="instructor", - name="user", - field=models.ForeignKey( - blank=True, - null=True, - on_delete=django.db.models.deletion.SET_NULL, - to=settings.AUTH_USER_MODEL, - ), - ), - migrations.AlterField( - model_name="course", - name="code", - field=models.CharField(db_index=True, max_length=8), - ), - migrations.AlterField( - model_name="section", - name="code", - field=models.CharField(db_index=True, max_length=16), - ), - ] +# Generated by Django 3.0.6 on 2020-05-12 19:25 + +import django.db.models.deletion +from django.conf import settings +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + migrations.swappable_dependency(settings.AUTH_USER_MODEL), + ("courses", "0028_auto_20200131_1619"), + ] + + operations = [ + migrations.AddField( + model_name="instructor", + name="user", + field=models.ForeignKey( + blank=True, + null=True, + on_delete=django.db.models.deletion.SET_NULL, + to=settings.AUTH_USER_MODEL, + ), + ), + migrations.AlterField( + model_name="course", + name="code", + field=models.CharField(db_index=True, max_length=8), + ), + migrations.AlterField( + model_name="section", + name="code", + field=models.CharField(db_index=True, max_length=16), + ), + ] diff --git a/backend/courses/migrations/0030_auto_20201002_0714.py b/backend/courses/migrations/0030_auto_20201002_0714.py index 00b1d6d3c..3612140a7 100644 --- a/backend/courses/migrations/0030_auto_20201002_0714.py +++ b/backend/courses/migrations/0030_auto_20201002_0714.py @@ -1,495 +1,495 @@ -# Generated by Django 3.1.1 on 2020-10-02 11:14 - -import django.db.models.deletion -from django.conf import settings -from django.db import migrations, models - -import courses.models - - -class Migration(migrations.Migration): - - dependencies = [ - migrations.swappable_dependency(settings.AUTH_USER_MODEL), - ("courses", "0029_auto_20200512_1525"), - ] - - operations = [ - migrations.AlterField( - model_name="building", - name="code", - field=models.CharField( - help_text="\nThe building code, for instance 570 for the Towne Building. To find the building code\nof a certain building, visit the [Penn Facilities Website](https://bit.ly/2BfE2FE).\n", - max_length=4, - unique=True, - ), - ), - migrations.AlterField( - model_name="building", - name="latitude", - field=models.FloatField( - blank=True, - help_text="\nThe latitude of the building, in the signed decimal degrees format (global range of\n[-90.0, 90.0]), e.g. 39.961380 for the Towne Building.\n", - null=True, - ), - ), - migrations.AlterField( - model_name="building", - name="longitude", - field=models.FloatField( - blank=True, - help_text="\nThe longitude of the building, in the signed decimal degrees format (global range of\n[-180.0, 180.0]), e.g. -75.176773 for the Towne Building.\n", - null=True, - ), - ), - migrations.AlterField( - model_name="building", - name="name", - field=models.CharField( - blank=True, - help_text="\nThe name of the building, for instance 'Towne Building' for the Towne Building. For a\nlist of building names, visit the [Penn Facilities Website](https://bit.ly/2BfE2FE).\n", - max_length=80, - ), - ), - migrations.AlterField( - model_name="course", - name="code", - field=models.CharField( - db_index=True, help_text="The course code, e.g. '120' for CIS-120.", max_length=8 - ), - ), - migrations.AlterField( - model_name="course", - name="department", - field=models.ForeignKey( - help_text="\nThe Department object to which the course belongs, e.g. the CIS Department object\nfor CIS-120.\n", - on_delete=django.db.models.deletion.CASCADE, - related_name="courses", - to="courses.department", - ), - ), - migrations.AlterField( - model_name="course", - name="description", - field=models.TextField( - blank=True, - help_text="\nThe description of the course, e.g. 'A fast-paced introduction to the fundamental concepts\nof programming... [etc.]' for CIS-120.\n", - ), - ), - migrations.AlterField( - model_name="course", - name="full_code", - field=models.CharField( - blank=True, - db_index=True, - help_text="The dash-joined department and code of the course, e.g. 'CIS-120' for CIS-120.", - max_length=16, - ), - ), - migrations.AlterField( - model_name="course", - name="prerequisites", - field=models.TextField( - blank=True, - help_text="Text describing the prereqs for a course, e.g. 'CIS 120, 160' for CIS-121.", - ), - ), - migrations.AlterField( - model_name="course", - name="primary_listing", - field=models.ForeignKey( - blank=True, - help_text="\nThe primary Course object with which this course is crosslisted (all crosslisted courses\nhave a primary listing). The set of crosslisted courses to which this course belongs can\nthus be accessed with the related field listing_set on the primary_listing course.\n", - null=True, - on_delete=django.db.models.deletion.CASCADE, - related_name="listing_set", - to="courses.course", - ), - ), - migrations.AlterField( - model_name="course", - name="semester", - field=models.CharField( - db_index=True, - help_text="\nThe semester of the course (of the form YYYYx where x is A [for spring],\nB [summer], or C [fall]), e.g. 2019C for fall 2019.\n", - max_length=5, - ), - ), - migrations.AlterField( - model_name="course", - name="title", - field=models.TextField( - help_text="\nThe title of the course, e.g. 'Programming Languages and Techniques I' for CIS-120.\n" - ), - ), - migrations.AlterField( - model_name="department", - name="code", - field=models.CharField( - db_index=True, - help_text="The department code, e.g. 'CIS' for the CIS department.", - max_length=8, - unique=True, - ), - ), - migrations.AlterField( - model_name="department", - name="name", - field=models.CharField( - help_text="\nThe name of the department, e.g. 'Computer and Information Sci' for the CIS department.\n", - max_length=255, - ), - ), - migrations.AlterField( - model_name="instructor", - name="name", - field=models.CharField( - db_index=True, - help_text="The full name of the instructor.", - max_length=255, - unique=True, - ), - ), - migrations.AlterField( - model_name="instructor", - name="user", - field=models.ForeignKey( - blank=True, - help_text="The instructor's Penn Labs Accounts User object.", - null=True, - on_delete=django.db.models.deletion.SET_NULL, - to=settings.AUTH_USER_MODEL, - ), - ), - migrations.AlterField( - model_name="meeting", - name="day", - field=models.CharField( - help_text="The single day on which the meeting takes place (one of M, T, W, R, or F).", - max_length=1, - ), - ), - migrations.AlterField( - model_name="meeting", - name="end", - field=models.DecimalField( - decimal_places=2, - help_text="The end time of the meeting; hh:mm is formatted as hh.mm = h+mm/100.", - max_digits=4, - ), - ), - migrations.AlterField( - model_name="meeting", - name="room", - field=models.ForeignKey( - help_text="The Room object in which the meeting is taking place.", - on_delete=django.db.models.deletion.CASCADE, - to="courses.room", - ), - ), - migrations.AlterField( - model_name="meeting", - name="section", - field=models.ForeignKey( - help_text="The Section object to which this class meeting belongs.", - on_delete=django.db.models.deletion.CASCADE, - related_name="meetings", - to="courses.section", - ), - ), - migrations.AlterField( - model_name="meeting", - name="start", - field=models.DecimalField( - decimal_places=2, - help_text="The start time of the meeting; hh:mm is formatted as hh.mm = h+mm/100.", - max_digits=4, - ), - ), - migrations.AlterField( - model_name="requirement", - name="code", - field=models.CharField( - db_index=True, - help_text="\nThe code identifying this requirement, e.g. 'MFR' for 'Formal Reasoning Course',\nan SAS requirement satisfied by CIS-120.\n", - max_length=10, - ), - ), - migrations.AlterField( - model_name="requirement", - name="courses", - field=models.ManyToManyField( - blank=True, - help_text="\n Individual Course objects which satisfy this requirement (not necessarily\n comprehensive, as often entire departments will satisfy the requirement, but not\n every course in the department will necessarily be added to this set). For example,\n CIS 398 would be in the courses set for the NATSCI engineering requirement, since\n it is the only CIS class that satisfies that requirement.\n\nNote that a course satisfies a requirement if and only if it is not in the\noverrides set, and it is either in the courses set or its department is in the departments\nset.\n", - related_name="requirement_set", - to="courses.Course", - ), - ), - migrations.AlterField( - model_name="requirement", - name="departments", - field=models.ManyToManyField( - blank=True, - help_text="\n All the Department objects for which any course in that department\n (if not in overrides) would satisfy this requirement. Usually if a whole department\n satisfies a requirement, individual courses from that department will not be added to\n the courses set. Also, to specify specific courses which do not satisfy the requirement\n (even if their department is in the departments set), the overrides set is used.\n For example, CIS classes count as engineering (ENG) courses, but CIS-125 is NOT an\n engineering class, so for the ENG requirement, CIS-125 would be in the overrides\n set even though the CIS Department object would be in the departments set.\n\nNote that a course satisfies a requirement if and only if it is not in the\noverrides set, and it is either in the courses set or its department is in the departments\nset.\n", - related_name="requirements", - to="courses.Department", - ), - ), - migrations.AlterField( - model_name="requirement", - name="name", - field=models.CharField( - help_text="\nThe name of the requirement, e.g. 'Formal Reasoning Course', an SAS requirement\nsatisfied by CIS-120.\n", - max_length=255, - ), - ), - migrations.AlterField( - model_name="requirement", - name="overrides", - field=models.ManyToManyField( - blank=True, - help_text="\n Individual Course objects which do not satisfy this requirement. This set\n is usually used to add exceptions to departments which satisfy requirements.\n For example, CIS classes count as engineering (ENG) courses, but CIS-125 is NOT an\n engineering class, so for the ENG requirement, CIS-125 would be in the overrides\n set even though the CIS Department would be in the departments set.\n\nNote that a course satisfies a requirement if and only if it is not in the\noverrides set, and it is either in the courses set or its department is in the departments\nset.\n", - related_name="nonrequirement_set", - to="courses.Course", - ), - ), - migrations.AlterField( - model_name="requirement", - name="school", - field=models.CharField( - choices=[("SEAS", "Engineering"), ("WH", "Wharton"), ("SAS", "College")], - db_index=True, - help_text='\nWhat school this requirement belongs to, e.g. \'SAS\' for the SAS \'Formal Reasoning Course\'\nrequirement satisfied by CIS-120. Options and meanings:\n
    "SEAS""Engineering"
    "WH""Wharton"
    "SAS""College"
    ', - max_length=5, - ), - ), - migrations.AlterField( - model_name="requirement", - name="semester", - field=models.CharField( - db_index=True, - help_text="\nThe semester of the requirement (of the form YYYYx where x is A [for spring], B [summer],\nor C [fall]), e.g. 2019C for fall 2019. We organize requirements by semester so that we\ndon't get huge related sets which don't give particularly good info.\n", - max_length=5, - ), - ), - migrations.AlterField( - model_name="restriction", - name="code", - field=models.CharField( - help_text="\nA registration restriction control code, for instance 'PDP' for CIS-121 (permission\nrequired from dept for registration). See [bit.ly/3eu17m2](https://bit.ly/3eu17m2)\nfor all options.\n", - max_length=10, - unique=True, - ), - ), - migrations.AlterField( - model_name="restriction", - name="description", - field=models.TextField( - help_text="\nThe registration restriction description, e.g. 'Permission Needed From Department'\nfor the PDP restriction (on CIS-121, for example). See\n[bit.ly/3eu17m2](https://bit.ly/3eu17m2) for all options.\n" - ), - ), - migrations.AlterField( - model_name="room", - name="building", - field=models.ForeignKey( - help_text="\nThe Building object in which the room is located, e.g. the Levine Hall Building\nobject for Wu and Chen Auditorium (rm 101).\n", - on_delete=django.db.models.deletion.CASCADE, - to="courses.building", - ), - ), - migrations.AlterField( - model_name="room", - name="name", - field=models.CharField( - help_text="The room name (optional, empty string if none), e.g. 'Wu and Chen Auditorium'.", - max_length=80, - ), - ), - migrations.AlterField( - model_name="room", - name="number", - field=models.CharField( - help_text="The room number, e.g. 101 for Wu and Chen Auditorium in Levine.", - max_length=5, - ), - ), - migrations.AlterField( - model_name="section", - name="activity", - field=models.CharField( - choices=[ - ("CLN", "Clinic"), - ("DIS", "Dissertation"), - ("IND", "Independent Study"), - ("LAB", "Lab"), - ("LEC", "Lecture"), - ("MST", "Masters Thesis"), - ("REC", "Recitation"), - ("SEM", "Seminar"), - ("SRT", "Senior Thesis"), - ("STU", "Studio"), - ("***", "Undefined"), - ], - db_index=True, - help_text='The section activity, e.g. \'LEC\' for CIS-120-001 (2020A). Options and meanings:
    "CLN""Clinic"
    "DIS""Dissertation"
    "IND""Independent Study"
    "LAB""Lab"
    "LEC""Lecture"
    "MST""Masters Thesis"
    "REC""Recitation"
    "SEM""Seminar"
    "SRT""Senior Thesis"
    "STU""Studio"
    "***""Undefined"
    ', - max_length=50, - ), - ), - migrations.AlterField( - model_name="section", - name="associated_sections", - field=models.ManyToManyField( - help_text="\nA list of all sections associated with the Course which this section belongs to; e.g. for\nCIS-120-001, all of the lecture and recitation sections for CIS-120 (including CIS-120-001)\nin the same semester.\n", - to="courses.Section", - ), - ), - migrations.AlterField( - model_name="section", - name="capacity", - field=models.IntegerField( - default=0, - help_text="The number of allowed registrations for this section, e.g. 220 for CIS-120-001 (2020A).", - ), - ), - migrations.AlterField( - model_name="section", - name="code", - field=models.CharField( - db_index=True, - help_text="The section code, e.g. '001' for the section CIS-120-001.", - max_length=16, - ), - ), - migrations.AlterField( - model_name="section", - name="course", - field=models.ForeignKey( - help_text="\nThe Course object to which this section belongs, e.g. the CIS-120 Course object for\nCIS-120-001.\n", - on_delete=django.db.models.deletion.CASCADE, - related_name="sections", - to="courses.course", - ), - ), - migrations.AlterField( - model_name="section", - name="credits", - field=models.DecimalField( - blank=True, - db_index=True, - decimal_places=2, - help_text="The number of credits this section is worth.", - max_digits=3, - null=True, - ), - ), - migrations.AlterField( - model_name="section", - name="full_code", - field=models.CharField( - blank=True, - db_index=True, - help_text="\nThe full code of the section, in the form '{dept code}-{course code}-{section code}',\ne.g. 'CIS-120-001' for the 001 section of CIS-120.\n", - max_length=32, - ), - ), - migrations.AlterField( - model_name="section", - name="instructors", - field=models.ManyToManyField( - help_text="The Instructor object(s) of the instructor(s) teaching the section.", - to="courses.Instructor", - ), - ), - migrations.AlterField( - model_name="section", - name="meeting_times", - field=models.TextField( - blank=True, - help_text='\nA JSON-stringified list of meeting times of the form\n\'{days code} {start time} - {end time}\', e.g.\n\'["MWF 09:00 AM - 10:00 AM","F 11:00 AM - 12:00 PM","T 05:00 PM - 06:00 PM"]\' for\nPHYS-151-001 (2020A). Each letter of the days code is of the form M, T, W, R, F for each\nday of the work week, respectively (and multiple days are combined with concatenation).\nTo access the Meeting objects for this section, the related field `meetings` can be used.\n', - ), - ), - migrations.AlterField( - model_name="section", - name="restrictions", - field=models.ManyToManyField( - blank=True, - help_text="All registration Restriction objects to which this section is subject.", - to="courses.Restriction", - ), - ), - migrations.AlterField( - model_name="section", - name="status", - field=models.CharField( - choices=[("O", "Open"), ("C", "Closed"), ("X", "Cancelled"), ("", "Unlisted")], - db_index=True, - help_text='The registration status of the section. Options and meanings:
    "O""Open"
    "C""Closed"
    "X""Cancelled"
    """Unlisted"
    ', - max_length=4, - ), - ), - migrations.AlterField( - model_name="statusupdate", - name="alert_sent", - field=models.BooleanField( - help_text="Was an alert was sent to a User as a result of this status update?" - ), - ), - migrations.AlterField( - model_name="statusupdate", - name="new_status", - field=models.CharField( - choices=[("O", "Open"), ("C", "Closed"), ("X", "Cancelled"), ("", "Unlisted")], - help_text='The new status code (to which the section changed). Options and meanings:
    "O""Open"
    "C""Closed"
    "X""Cancelled"
    """Unlisted"
    ', - max_length=16, - ), - ), - migrations.AlterField( - model_name="statusupdate", - name="old_status", - field=models.CharField( - choices=[("O", "Open"), ("C", "Closed"), ("X", "Cancelled"), ("", "Unlisted")], - help_text='The old status code (from which the section changed). Options and meanings:
    "O""Open"
    "C""Closed"
    "X""Cancelled"
    """Unlisted"
    ', - max_length=16, - ), - ), - migrations.AlterField( - model_name="statusupdate", - name="section", - field=models.ForeignKey( - help_text="The section which this status update applies to.", - on_delete=django.db.models.deletion.CASCADE, - to="courses.section", - ), - ), - migrations.AlterField( - model_name="userprofile", - name="email", - field=models.EmailField( - blank=True, - help_text="The email of the User. Defaults to null.", - max_length=254, - null=True, - ), - ), - migrations.AlterField( - model_name="userprofile", - name="phone", - field=models.CharField( - blank=True, - help_text="\nThe phone number of the user. Defaults to null.\nThe phone number will be stored in the E164 format, but any form parseable by the\n[phonenumbers library](https://pypi.org/project/phonenumbers/)\nwill be accepted and converted to E164 format automatically upon saving.\n", - max_length=100, - null=True, - validators=[courses.models.UserProfile.validate_phone], - ), - ), - migrations.AlterField( - model_name="userprofile", - name="user", - field=models.OneToOneField( - help_text="The User object to which this User Profile object belongs.", - on_delete=django.db.models.deletion.CASCADE, - related_name="profile", - to=settings.AUTH_USER_MODEL, - ), - ), - ] +# Generated by Django 3.1.1 on 2020-10-02 11:14 + +import django.db.models.deletion +from django.conf import settings +from django.db import migrations, models + +import courses.models + + +class Migration(migrations.Migration): + + dependencies = [ + migrations.swappable_dependency(settings.AUTH_USER_MODEL), + ("courses", "0029_auto_20200512_1525"), + ] + + operations = [ + migrations.AlterField( + model_name="building", + name="code", + field=models.CharField( + help_text="\nThe building code, for instance 570 for the Towne Building. To find the building code\nof a certain building, visit the [Penn Facilities Website](https://bit.ly/2BfE2FE).\n", + max_length=4, + unique=True, + ), + ), + migrations.AlterField( + model_name="building", + name="latitude", + field=models.FloatField( + blank=True, + help_text="\nThe latitude of the building, in the signed decimal degrees format (global range of\n[-90.0, 90.0]), e.g. 39.961380 for the Towne Building.\n", + null=True, + ), + ), + migrations.AlterField( + model_name="building", + name="longitude", + field=models.FloatField( + blank=True, + help_text="\nThe longitude of the building, in the signed decimal degrees format (global range of\n[-180.0, 180.0]), e.g. -75.176773 for the Towne Building.\n", + null=True, + ), + ), + migrations.AlterField( + model_name="building", + name="name", + field=models.CharField( + blank=True, + help_text="\nThe name of the building, for instance 'Towne Building' for the Towne Building. For a\nlist of building names, visit the [Penn Facilities Website](https://bit.ly/2BfE2FE).\n", + max_length=80, + ), + ), + migrations.AlterField( + model_name="course", + name="code", + field=models.CharField( + db_index=True, help_text="The course code, e.g. '120' for CIS-120.", max_length=8 + ), + ), + migrations.AlterField( + model_name="course", + name="department", + field=models.ForeignKey( + help_text="\nThe Department object to which the course belongs, e.g. the CIS Department object\nfor CIS-120.\n", + on_delete=django.db.models.deletion.CASCADE, + related_name="courses", + to="courses.department", + ), + ), + migrations.AlterField( + model_name="course", + name="description", + field=models.TextField( + blank=True, + help_text="\nThe description of the course, e.g. 'A fast-paced introduction to the fundamental concepts\nof programming... [etc.]' for CIS-120.\n", + ), + ), + migrations.AlterField( + model_name="course", + name="full_code", + field=models.CharField( + blank=True, + db_index=True, + help_text="The dash-joined department and code of the course, e.g. 'CIS-120' for CIS-120.", + max_length=16, + ), + ), + migrations.AlterField( + model_name="course", + name="prerequisites", + field=models.TextField( + blank=True, + help_text="Text describing the prereqs for a course, e.g. 'CIS 120, 160' for CIS-121.", + ), + ), + migrations.AlterField( + model_name="course", + name="primary_listing", + field=models.ForeignKey( + blank=True, + help_text="\nThe primary Course object with which this course is crosslisted (all crosslisted courses\nhave a primary listing). The set of crosslisted courses to which this course belongs can\nthus be accessed with the related field listing_set on the primary_listing course.\n", + null=True, + on_delete=django.db.models.deletion.CASCADE, + related_name="listing_set", + to="courses.course", + ), + ), + migrations.AlterField( + model_name="course", + name="semester", + field=models.CharField( + db_index=True, + help_text="\nThe semester of the course (of the form YYYYx where x is A [for spring],\nB [summer], or C [fall]), e.g. 2019C for fall 2019.\n", + max_length=5, + ), + ), + migrations.AlterField( + model_name="course", + name="title", + field=models.TextField( + help_text="\nThe title of the course, e.g. 'Programming Languages and Techniques I' for CIS-120.\n" + ), + ), + migrations.AlterField( + model_name="department", + name="code", + field=models.CharField( + db_index=True, + help_text="The department code, e.g. 'CIS' for the CIS department.", + max_length=8, + unique=True, + ), + ), + migrations.AlterField( + model_name="department", + name="name", + field=models.CharField( + help_text="\nThe name of the department, e.g. 'Computer and Information Sci' for the CIS department.\n", + max_length=255, + ), + ), + migrations.AlterField( + model_name="instructor", + name="name", + field=models.CharField( + db_index=True, + help_text="The full name of the instructor.", + max_length=255, + unique=True, + ), + ), + migrations.AlterField( + model_name="instructor", + name="user", + field=models.ForeignKey( + blank=True, + help_text="The instructor's Penn Labs Accounts User object.", + null=True, + on_delete=django.db.models.deletion.SET_NULL, + to=settings.AUTH_USER_MODEL, + ), + ), + migrations.AlterField( + model_name="meeting", + name="day", + field=models.CharField( + help_text="The single day on which the meeting takes place (one of M, T, W, R, or F).", + max_length=1, + ), + ), + migrations.AlterField( + model_name="meeting", + name="end", + field=models.DecimalField( + decimal_places=2, + help_text="The end time of the meeting; hh:mm is formatted as hh.mm = h+mm/100.", + max_digits=4, + ), + ), + migrations.AlterField( + model_name="meeting", + name="room", + field=models.ForeignKey( + help_text="The Room object in which the meeting is taking place.", + on_delete=django.db.models.deletion.CASCADE, + to="courses.room", + ), + ), + migrations.AlterField( + model_name="meeting", + name="section", + field=models.ForeignKey( + help_text="The Section object to which this class meeting belongs.", + on_delete=django.db.models.deletion.CASCADE, + related_name="meetings", + to="courses.section", + ), + ), + migrations.AlterField( + model_name="meeting", + name="start", + field=models.DecimalField( + decimal_places=2, + help_text="The start time of the meeting; hh:mm is formatted as hh.mm = h+mm/100.", + max_digits=4, + ), + ), + migrations.AlterField( + model_name="requirement", + name="code", + field=models.CharField( + db_index=True, + help_text="\nThe code identifying this requirement, e.g. 'MFR' for 'Formal Reasoning Course',\nan SAS requirement satisfied by CIS-120.\n", + max_length=10, + ), + ), + migrations.AlterField( + model_name="requirement", + name="courses", + field=models.ManyToManyField( + blank=True, + help_text="\n Individual Course objects which satisfy this requirement (not necessarily\n comprehensive, as often entire departments will satisfy the requirement, but not\n every course in the department will necessarily be added to this set). For example,\n CIS 398 would be in the courses set for the NATSCI engineering requirement, since\n it is the only CIS class that satisfies that requirement.\n\nNote that a course satisfies a requirement if and only if it is not in the\noverrides set, and it is either in the courses set or its department is in the departments\nset.\n", + related_name="requirement_set", + to="courses.Course", + ), + ), + migrations.AlterField( + model_name="requirement", + name="departments", + field=models.ManyToManyField( + blank=True, + help_text="\n All the Department objects for which any course in that department\n (if not in overrides) would satisfy this requirement. Usually if a whole department\n satisfies a requirement, individual courses from that department will not be added to\n the courses set. Also, to specify specific courses which do not satisfy the requirement\n (even if their department is in the departments set), the overrides set is used.\n For example, CIS classes count as engineering (ENG) courses, but CIS-125 is NOT an\n engineering class, so for the ENG requirement, CIS-125 would be in the overrides\n set even though the CIS Department object would be in the departments set.\n\nNote that a course satisfies a requirement if and only if it is not in the\noverrides set, and it is either in the courses set or its department is in the departments\nset.\n", + related_name="requirements", + to="courses.Department", + ), + ), + migrations.AlterField( + model_name="requirement", + name="name", + field=models.CharField( + help_text="\nThe name of the requirement, e.g. 'Formal Reasoning Course', an SAS requirement\nsatisfied by CIS-120.\n", + max_length=255, + ), + ), + migrations.AlterField( + model_name="requirement", + name="overrides", + field=models.ManyToManyField( + blank=True, + help_text="\n Individual Course objects which do not satisfy this requirement. This set\n is usually used to add exceptions to departments which satisfy requirements.\n For example, CIS classes count as engineering (ENG) courses, but CIS-125 is NOT an\n engineering class, so for the ENG requirement, CIS-125 would be in the overrides\n set even though the CIS Department would be in the departments set.\n\nNote that a course satisfies a requirement if and only if it is not in the\noverrides set, and it is either in the courses set or its department is in the departments\nset.\n", + related_name="nonrequirement_set", + to="courses.Course", + ), + ), + migrations.AlterField( + model_name="requirement", + name="school", + field=models.CharField( + choices=[("SEAS", "Engineering"), ("WH", "Wharton"), ("SAS", "College")], + db_index=True, + help_text='\nWhat school this requirement belongs to, e.g. \'SAS\' for the SAS \'Formal Reasoning Course\'\nrequirement satisfied by CIS-120. Options and meanings:\n
    "SEAS""Engineering"
    "WH""Wharton"
    "SAS""College"
    ', + max_length=5, + ), + ), + migrations.AlterField( + model_name="requirement", + name="semester", + field=models.CharField( + db_index=True, + help_text="\nThe semester of the requirement (of the form YYYYx where x is A [for spring], B [summer],\nor C [fall]), e.g. 2019C for fall 2019. We organize requirements by semester so that we\ndon't get huge related sets which don't give particularly good info.\n", + max_length=5, + ), + ), + migrations.AlterField( + model_name="restriction", + name="code", + field=models.CharField( + help_text="\nA registration restriction control code, for instance 'PDP' for CIS-121 (permission\nrequired from dept for registration). See [bit.ly/3eu17m2](https://bit.ly/3eu17m2)\nfor all options.\n", + max_length=10, + unique=True, + ), + ), + migrations.AlterField( + model_name="restriction", + name="description", + field=models.TextField( + help_text="\nThe registration restriction description, e.g. 'Permission Needed From Department'\nfor the PDP restriction (on CIS-121, for example). See\n[bit.ly/3eu17m2](https://bit.ly/3eu17m2) for all options.\n" + ), + ), + migrations.AlterField( + model_name="room", + name="building", + field=models.ForeignKey( + help_text="\nThe Building object in which the room is located, e.g. the Levine Hall Building\nobject for Wu and Chen Auditorium (rm 101).\n", + on_delete=django.db.models.deletion.CASCADE, + to="courses.building", + ), + ), + migrations.AlterField( + model_name="room", + name="name", + field=models.CharField( + help_text="The room name (optional, empty string if none), e.g. 'Wu and Chen Auditorium'.", + max_length=80, + ), + ), + migrations.AlterField( + model_name="room", + name="number", + field=models.CharField( + help_text="The room number, e.g. 101 for Wu and Chen Auditorium in Levine.", + max_length=5, + ), + ), + migrations.AlterField( + model_name="section", + name="activity", + field=models.CharField( + choices=[ + ("CLN", "Clinic"), + ("DIS", "Dissertation"), + ("IND", "Independent Study"), + ("LAB", "Lab"), + ("LEC", "Lecture"), + ("MST", "Masters Thesis"), + ("REC", "Recitation"), + ("SEM", "Seminar"), + ("SRT", "Senior Thesis"), + ("STU", "Studio"), + ("***", "Undefined"), + ], + db_index=True, + help_text='The section activity, e.g. \'LEC\' for CIS-120-001 (2020A). Options and meanings:
    "CLN""Clinic"
    "DIS""Dissertation"
    "IND""Independent Study"
    "LAB""Lab"
    "LEC""Lecture"
    "MST""Masters Thesis"
    "REC""Recitation"
    "SEM""Seminar"
    "SRT""Senior Thesis"
    "STU""Studio"
    "***""Undefined"
    ', + max_length=50, + ), + ), + migrations.AlterField( + model_name="section", + name="associated_sections", + field=models.ManyToManyField( + help_text="\nA list of all sections associated with the Course which this section belongs to; e.g. for\nCIS-120-001, all of the lecture and recitation sections for CIS-120 (including CIS-120-001)\nin the same semester.\n", + to="courses.Section", + ), + ), + migrations.AlterField( + model_name="section", + name="capacity", + field=models.IntegerField( + default=0, + help_text="The number of allowed registrations for this section, e.g. 220 for CIS-120-001 (2020A).", + ), + ), + migrations.AlterField( + model_name="section", + name="code", + field=models.CharField( + db_index=True, + help_text="The section code, e.g. '001' for the section CIS-120-001.", + max_length=16, + ), + ), + migrations.AlterField( + model_name="section", + name="course", + field=models.ForeignKey( + help_text="\nThe Course object to which this section belongs, e.g. the CIS-120 Course object for\nCIS-120-001.\n", + on_delete=django.db.models.deletion.CASCADE, + related_name="sections", + to="courses.course", + ), + ), + migrations.AlterField( + model_name="section", + name="credits", + field=models.DecimalField( + blank=True, + db_index=True, + decimal_places=2, + help_text="The number of credits this section is worth.", + max_digits=3, + null=True, + ), + ), + migrations.AlterField( + model_name="section", + name="full_code", + field=models.CharField( + blank=True, + db_index=True, + help_text="\nThe full code of the section, in the form '{dept code}-{course code}-{section code}',\ne.g. 'CIS-120-001' for the 001 section of CIS-120.\n", + max_length=32, + ), + ), + migrations.AlterField( + model_name="section", + name="instructors", + field=models.ManyToManyField( + help_text="The Instructor object(s) of the instructor(s) teaching the section.", + to="courses.Instructor", + ), + ), + migrations.AlterField( + model_name="section", + name="meeting_times", + field=models.TextField( + blank=True, + help_text='\nA JSON-stringified list of meeting times of the form\n\'{days code} {start time} - {end time}\', e.g.\n\'["MWF 09:00 AM - 10:00 AM","F 11:00 AM - 12:00 PM","T 05:00 PM - 06:00 PM"]\' for\nPHYS-151-001 (2020A). Each letter of the days code is of the form M, T, W, R, F for each\nday of the work week, respectively (and multiple days are combined with concatenation).\nTo access the Meeting objects for this section, the related field `meetings` can be used.\n', + ), + ), + migrations.AlterField( + model_name="section", + name="restrictions", + field=models.ManyToManyField( + blank=True, + help_text="All registration Restriction objects to which this section is subject.", + to="courses.Restriction", + ), + ), + migrations.AlterField( + model_name="section", + name="status", + field=models.CharField( + choices=[("O", "Open"), ("C", "Closed"), ("X", "Cancelled"), ("", "Unlisted")], + db_index=True, + help_text='The registration status of the section. Options and meanings:
    "O""Open"
    "C""Closed"
    "X""Cancelled"
    """Unlisted"
    ', + max_length=4, + ), + ), + migrations.AlterField( + model_name="statusupdate", + name="alert_sent", + field=models.BooleanField( + help_text="Was an alert was sent to a User as a result of this status update?" + ), + ), + migrations.AlterField( + model_name="statusupdate", + name="new_status", + field=models.CharField( + choices=[("O", "Open"), ("C", "Closed"), ("X", "Cancelled"), ("", "Unlisted")], + help_text='The new status code (to which the section changed). Options and meanings:
    "O""Open"
    "C""Closed"
    "X""Cancelled"
    """Unlisted"
    ', + max_length=16, + ), + ), + migrations.AlterField( + model_name="statusupdate", + name="old_status", + field=models.CharField( + choices=[("O", "Open"), ("C", "Closed"), ("X", "Cancelled"), ("", "Unlisted")], + help_text='The old status code (from which the section changed). Options and meanings:
    "O""Open"
    "C""Closed"
    "X""Cancelled"
    """Unlisted"
    ', + max_length=16, + ), + ), + migrations.AlterField( + model_name="statusupdate", + name="section", + field=models.ForeignKey( + help_text="The section which this status update applies to.", + on_delete=django.db.models.deletion.CASCADE, + to="courses.section", + ), + ), + migrations.AlterField( + model_name="userprofile", + name="email", + field=models.EmailField( + blank=True, + help_text="The email of the User. Defaults to null.", + max_length=254, + null=True, + ), + ), + migrations.AlterField( + model_name="userprofile", + name="phone", + field=models.CharField( + blank=True, + help_text="\nThe phone number of the user. Defaults to null.\nThe phone number will be stored in the E164 format, but any form parseable by the\n[phonenumbers library](https://pypi.org/project/phonenumbers/)\nwill be accepted and converted to E164 format automatically upon saving.\n", + max_length=100, + null=True, + validators=[courses.models.UserProfile.validate_phone], + ), + ), + migrations.AlterField( + model_name="userprofile", + name="user", + field=models.OneToOneField( + help_text="The User object to which this User Profile object belongs.", + on_delete=django.db.models.deletion.CASCADE, + related_name="profile", + to=settings.AUTH_USER_MODEL, + ), + ), + ] diff --git a/backend/courses/migrations/0031_userprofile_push_notifications.py b/backend/courses/migrations/0031_userprofile_push_notifications.py index f85ceb940..5908c8920 100644 --- a/backend/courses/migrations/0031_userprofile_push_notifications.py +++ b/backend/courses/migrations/0031_userprofile_push_notifications.py @@ -1,21 +1,21 @@ -# Generated by Django 3.1.1 on 2020-11-08 20:35 - -from django.db import migrations, models - - -class Migration(migrations.Migration): - - dependencies = [ - ("courses", "0030_auto_20201002_0714"), - ] - - operations = [ - migrations.AddField( - model_name="userprofile", - name="push_notifications", - field=models.BooleanField( - default=False, - help_text="\nDefaults to False, changed to True if the User enables mobile push notifications\nfor PCA, rather than text notifications.\n", - ), - ), - ] +# Generated by Django 3.1.1 on 2020-11-08 20:35 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ("courses", "0030_auto_20201002_0714"), + ] + + operations = [ + migrations.AddField( + model_name="userprofile", + name="push_notifications", + field=models.BooleanField( + default=False, + help_text="\nDefaults to False, changed to True if the User enables mobile push notifications\nfor PCA, rather than text notifications.\n", + ), + ), + ] diff --git a/backend/courses/migrations/0032_auto_20210418_0343.py b/backend/courses/migrations/0032_auto_20210418_0343.py index b80daf1e9..6f3e519d2 100644 --- a/backend/courses/migrations/0032_auto_20210418_0343.py +++ b/backend/courses/migrations/0032_auto_20210418_0343.py @@ -1,232 +1,232 @@ -# Generated by Django 3.2 on 2021-04-18 07:43 - -import django.core.validators -import django.db.models.expressions -import django.db.models.functions.comparison -from django.db import migrations, models - - -class Migration(migrations.Migration): - - dependencies = [ - ("courses", "0031_userprofile_push_notifications"), - ] - - operations = [ - migrations.AddField( - model_name="section", - name="percent_open", - field=models.FloatField( - default=0, - help_text="\nIf this section is from the current semester, this is the percentage (expressed as a\ndecimal number between 0 and 1) of the period between the beginning of its\nadd/drop period and its last status update that this section was open\n(or 0 if it has had no status updates strictly within its add/drop period).\nIf this section is from a previous semester, this is the percentage of its\nwhole add/drop period that it was open.\n", - validators=[ - django.core.validators.MinValueValidator(0), - django.core.validators.MaxValueValidator(1), - ], - ), - ), - migrations.AddField( - model_name="section", - name="registration_volume", - field=models.PositiveIntegerField( - default=0, - help_text="The number of active PCA registrations watching this section.", - ), - ), - migrations.AddField( - model_name="statusupdate", - name="in_add_drop_period", - field=models.BooleanField( - default=False, - help_text="Was this status update created during the add/drop period?", - ), - ), - migrations.AddField( - model_name="statusupdate", - name="percent_through_add_drop_period", - field=models.FloatField( - blank=True, - help_text="The percentage through the add/drop period at which this status update occurred.This percentage is constrained within the range [0,1].", - null=True, - ), - ), - migrations.AlterField( - model_name="building", - name="latitude", - field=models.FloatField( - blank=True, - help_text="\nThe latitude of the building, in the signed decimal degrees format (global range of\n[-90.0, 90.0]), e.g. `39.961380` for the Towne Building.\n", - null=True, - ), - ), - migrations.AlterField( - model_name="building", - name="longitude", - field=models.FloatField( - blank=True, - help_text="\nThe longitude of the building, in the signed decimal degrees format (global range of\n[-180.0, 180.0]), e.g. `-75.176773` for the Towne Building.\n", - null=True, - ), - ), - migrations.AlterField( - model_name="course", - name="code", - field=models.CharField( - db_index=True, help_text="The course code, e.g. `120` for CIS-120.", max_length=8 - ), - ), - migrations.AlterField( - model_name="course", - name="full_code", - field=models.CharField( - blank=True, - db_index=True, - help_text="The dash-joined department and code of the course, e.g. `CIS-120` for CIS-120.", - max_length=16, - ), - ), - migrations.AlterField( - model_name="course", - name="semester", - field=models.CharField( - db_index=True, - help_text="\nThe semester of the course (of the form YYYYx where x is A [for spring],\nB [summer], or C [fall]), e.g. `2019C` for fall 2019.\n", - max_length=5, - ), - ), - migrations.AlterField( - model_name="department", - name="code", - field=models.CharField( - db_index=True, - help_text="The department code, e.g. `CIS` for the CIS department.", - max_length=8, - unique=True, - ), - ), - migrations.AlterField( - model_name="requirement", - name="code", - field=models.CharField( - db_index=True, - help_text="\nThe code identifying this requirement, e.g. `MFR` for 'Formal Reasoning Course',\nan SAS requirement satisfied by CIS-120.\n", - max_length=10, - ), - ), - migrations.AlterField( - model_name="requirement", - name="school", - field=models.CharField( - choices=[("SEAS", "Engineering"), ("WH", "Wharton"), ("SAS", "College")], - db_index=True, - help_text='\nWhat school this requirement belongs to, e.g. `SAS` for the SAS \'Formal Reasoning Course\'\nrequirement satisfied by CIS-120. Options and meanings:\n
    "SEAS""Engineering"
    "WH""Wharton"
    "SAS""College"
    ', - max_length=5, - ), - ), - migrations.AlterField( - model_name="requirement", - name="semester", - field=models.CharField( - db_index=True, - help_text="\nThe semester of the requirement (of the form YYYYx where x is A [for spring], B [summer],\nor C [fall]), e.g. `2019C` for fall 2019. We organize requirements by semester so that we\ndon't get huge related sets which don't give particularly good info.\n", - max_length=5, - ), - ), - migrations.AlterField( - model_name="room", - name="number", - field=models.CharField( - help_text="The room number, e.g. `101` for Wu and Chen Auditorium in Levine.", - max_length=5, - ), - ), - migrations.AlterField( - model_name="section", - name="activity", - field=models.CharField( - choices=[ - ("CLN", "Clinic"), - ("DIS", "Dissertation"), - ("IND", "Independent Study"), - ("LAB", "Lab"), - ("LEC", "Lecture"), - ("MST", "Masters Thesis"), - ("REC", "Recitation"), - ("SEM", "Seminar"), - ("SRT", "Senior Thesis"), - ("STU", "Studio"), - ("***", "Undefined"), - ], - db_index=True, - help_text='The section activity, e.g. `LEC` for CIS-120-001 (2020A). Options and meanings:
    "CLN""Clinic"
    "DIS""Dissertation"
    "IND""Independent Study"
    "LAB""Lab"
    "LEC""Lecture"
    "MST""Masters Thesis"
    "REC""Recitation"
    "SEM""Seminar"
    "SRT""Senior Thesis"
    "STU""Studio"
    "***""Undefined"
    ', - max_length=50, - ), - ), - migrations.AlterField( - model_name="section", - name="capacity", - field=models.IntegerField( - default=0, - help_text="The number of allowed registrations for this section, e.g. `220` for CIS-120-001 (2020A).", - ), - ), - migrations.AlterField( - model_name="section", - name="code", - field=models.CharField( - db_index=True, - help_text="The section code, e.g. `001` for the section CIS-120-001.", - max_length=16, - ), - ), - migrations.AlterField( - model_name="section", - name="full_code", - field=models.CharField( - blank=True, - db_index=True, - help_text="\nThe full code of the section, in the form '{dept code}-{course code}-{section code}',\ne.g. `CIS-120-001` for the 001 section of CIS-120.\n", - max_length=32, - ), - ), - migrations.AlterField( - model_name="section", - name="meeting_times", - field=models.TextField( - blank=True, - help_text='\nA JSON-stringified list of meeting times of the form\n`{days code} {start time} - {end time}`, e.g.\n`["MWF 09:00 AM - 10:00 AM","F 11:00 AM - 12:00 PM","T 05:00 PM - 06:00 PM"]` for\nPHYS-151-001 (2020A). Each letter of the days code is of the form M, T, W, R, F for each\nday of the work week, respectively (and multiple days are combined with concatenation).\nTo access the Meeting objects for this section, the related field `meetings` can be used.\n', - ), - ), - migrations.AlterField( - model_name="section", - name="restrictions", - field=models.ManyToManyField( - blank=True, - help_text="All registration Restriction objects to which this section is subject.", - related_name="sections", - to="courses.Restriction", - ), - ), - migrations.AddIndex( - model_name="section", - index=models.Index( - django.db.models.expressions.Case( - django.db.models.expressions.When( - models.Q(("capacity__isnull", False), ("capacity__gt", 0)), - then=django.db.models.expressions.CombinedExpression( - django.db.models.functions.comparison.Cast( - "registration_volume", models.FloatField() - ), - "/", - django.db.models.functions.comparison.Cast( - "capacity", models.FloatField() - ), - ), - ), - default=None, - output_field=models.FloatField(blank=True, null=True), - ), - name="raw_demand", - ), - ), - ] +# Generated by Django 3.2 on 2021-04-18 07:43 + +import django.core.validators +import django.db.models.expressions +import django.db.models.functions.comparison +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ("courses", "0031_userprofile_push_notifications"), + ] + + operations = [ + migrations.AddField( + model_name="section", + name="percent_open", + field=models.FloatField( + default=0, + help_text="\nIf this section is from the current semester, this is the percentage (expressed as a\ndecimal number between 0 and 1) of the period between the beginning of its\nadd/drop period and its last status update that this section was open\n(or 0 if it has had no status updates strictly within its add/drop period).\nIf this section is from a previous semester, this is the percentage of its\nwhole add/drop period that it was open.\n", + validators=[ + django.core.validators.MinValueValidator(0), + django.core.validators.MaxValueValidator(1), + ], + ), + ), + migrations.AddField( + model_name="section", + name="registration_volume", + field=models.PositiveIntegerField( + default=0, + help_text="The number of active PCA registrations watching this section.", + ), + ), + migrations.AddField( + model_name="statusupdate", + name="in_add_drop_period", + field=models.BooleanField( + default=False, + help_text="Was this status update created during the add/drop period?", + ), + ), + migrations.AddField( + model_name="statusupdate", + name="percent_through_add_drop_period", + field=models.FloatField( + blank=True, + help_text="The percentage through the add/drop period at which this status update occurred.This percentage is constrained within the range [0,1].", + null=True, + ), + ), + migrations.AlterField( + model_name="building", + name="latitude", + field=models.FloatField( + blank=True, + help_text="\nThe latitude of the building, in the signed decimal degrees format (global range of\n[-90.0, 90.0]), e.g. `39.961380` for the Towne Building.\n", + null=True, + ), + ), + migrations.AlterField( + model_name="building", + name="longitude", + field=models.FloatField( + blank=True, + help_text="\nThe longitude of the building, in the signed decimal degrees format (global range of\n[-180.0, 180.0]), e.g. `-75.176773` for the Towne Building.\n", + null=True, + ), + ), + migrations.AlterField( + model_name="course", + name="code", + field=models.CharField( + db_index=True, help_text="The course code, e.g. `120` for CIS-120.", max_length=8 + ), + ), + migrations.AlterField( + model_name="course", + name="full_code", + field=models.CharField( + blank=True, + db_index=True, + help_text="The dash-joined department and code of the course, e.g. `CIS-120` for CIS-120.", + max_length=16, + ), + ), + migrations.AlterField( + model_name="course", + name="semester", + field=models.CharField( + db_index=True, + help_text="\nThe semester of the course (of the form YYYYx where x is A [for spring],\nB [summer], or C [fall]), e.g. `2019C` for fall 2019.\n", + max_length=5, + ), + ), + migrations.AlterField( + model_name="department", + name="code", + field=models.CharField( + db_index=True, + help_text="The department code, e.g. `CIS` for the CIS department.", + max_length=8, + unique=True, + ), + ), + migrations.AlterField( + model_name="requirement", + name="code", + field=models.CharField( + db_index=True, + help_text="\nThe code identifying this requirement, e.g. `MFR` for 'Formal Reasoning Course',\nan SAS requirement satisfied by CIS-120.\n", + max_length=10, + ), + ), + migrations.AlterField( + model_name="requirement", + name="school", + field=models.CharField( + choices=[("SEAS", "Engineering"), ("WH", "Wharton"), ("SAS", "College")], + db_index=True, + help_text='\nWhat school this requirement belongs to, e.g. `SAS` for the SAS \'Formal Reasoning Course\'\nrequirement satisfied by CIS-120. Options and meanings:\n
    "SEAS""Engineering"
    "WH""Wharton"
    "SAS""College"
    ', + max_length=5, + ), + ), + migrations.AlterField( + model_name="requirement", + name="semester", + field=models.CharField( + db_index=True, + help_text="\nThe semester of the requirement (of the form YYYYx where x is A [for spring], B [summer],\nor C [fall]), e.g. `2019C` for fall 2019. We organize requirements by semester so that we\ndon't get huge related sets which don't give particularly good info.\n", + max_length=5, + ), + ), + migrations.AlterField( + model_name="room", + name="number", + field=models.CharField( + help_text="The room number, e.g. `101` for Wu and Chen Auditorium in Levine.", + max_length=5, + ), + ), + migrations.AlterField( + model_name="section", + name="activity", + field=models.CharField( + choices=[ + ("CLN", "Clinic"), + ("DIS", "Dissertation"), + ("IND", "Independent Study"), + ("LAB", "Lab"), + ("LEC", "Lecture"), + ("MST", "Masters Thesis"), + ("REC", "Recitation"), + ("SEM", "Seminar"), + ("SRT", "Senior Thesis"), + ("STU", "Studio"), + ("***", "Undefined"), + ], + db_index=True, + help_text='The section activity, e.g. `LEC` for CIS-120-001 (2020A). Options and meanings:
    "CLN""Clinic"
    "DIS""Dissertation"
    "IND""Independent Study"
    "LAB""Lab"
    "LEC""Lecture"
    "MST""Masters Thesis"
    "REC""Recitation"
    "SEM""Seminar"
    "SRT""Senior Thesis"
    "STU""Studio"
    "***""Undefined"
    ', + max_length=50, + ), + ), + migrations.AlterField( + model_name="section", + name="capacity", + field=models.IntegerField( + default=0, + help_text="The number of allowed registrations for this section, e.g. `220` for CIS-120-001 (2020A).", + ), + ), + migrations.AlterField( + model_name="section", + name="code", + field=models.CharField( + db_index=True, + help_text="The section code, e.g. `001` for the section CIS-120-001.", + max_length=16, + ), + ), + migrations.AlterField( + model_name="section", + name="full_code", + field=models.CharField( + blank=True, + db_index=True, + help_text="\nThe full code of the section, in the form '{dept code}-{course code}-{section code}',\ne.g. `CIS-120-001` for the 001 section of CIS-120.\n", + max_length=32, + ), + ), + migrations.AlterField( + model_name="section", + name="meeting_times", + field=models.TextField( + blank=True, + help_text='\nA JSON-stringified list of meeting times of the form\n`{days code} {start time} - {end time}`, e.g.\n`["MWF 09:00 AM - 10:00 AM","F 11:00 AM - 12:00 PM","T 05:00 PM - 06:00 PM"]` for\nPHYS-151-001 (2020A). Each letter of the days code is of the form M, T, W, R, F for each\nday of the work week, respectively (and multiple days are combined with concatenation).\nTo access the Meeting objects for this section, the related field `meetings` can be used.\n', + ), + ), + migrations.AlterField( + model_name="section", + name="restrictions", + field=models.ManyToManyField( + blank=True, + help_text="All registration Restriction objects to which this section is subject.", + related_name="sections", + to="courses.Restriction", + ), + ), + migrations.AddIndex( + model_name="section", + index=models.Index( + django.db.models.expressions.Case( + django.db.models.expressions.When( + models.Q(("capacity__isnull", False), ("capacity__gt", 0)), + then=django.db.models.expressions.CombinedExpression( + django.db.models.functions.comparison.Cast( + "registration_volume", models.FloatField() + ), + "/", + django.db.models.functions.comparison.Cast( + "capacity", models.FloatField() + ), + ), + ), + default=None, + output_field=models.FloatField(blank=True, null=True), + ), + name="raw_demand", + ), + ), + ] diff --git a/backend/courses/migrations/0033_alter_statusupdate_section.py b/backend/courses/migrations/0033_alter_statusupdate_section.py index 4bb41ae6e..78c8a2bf1 100644 --- a/backend/courses/migrations/0033_alter_statusupdate_section.py +++ b/backend/courses/migrations/0033_alter_statusupdate_section.py @@ -1,24 +1,24 @@ -# Generated by Django 3.2 on 2021-04-18 18:26 - -import django.db.models.deletion -from django.db import migrations, models - - -class Migration(migrations.Migration): - - dependencies = [ - ("courses", "0032_auto_20210418_0343"), - ] - - operations = [ - migrations.AlterField( - model_name="statusupdate", - name="section", - field=models.ForeignKey( - help_text="The section which this status update applies to.", - on_delete=django.db.models.deletion.CASCADE, - related_name="status_updates", - to="courses.section", - ), - ), - ] +# Generated by Django 3.2 on 2021-04-18 18:26 + +import django.db.models.deletion +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ("courses", "0032_auto_20210418_0343"), + ] + + operations = [ + migrations.AlterField( + model_name="statusupdate", + name="section", + field=models.ForeignKey( + help_text="The section which this status update applies to.", + on_delete=django.db.models.deletion.CASCADE, + related_name="status_updates", + to="courses.section", + ), + ), + ] diff --git a/backend/courses/migrations/0034_auto_20211114_0032.py b/backend/courses/migrations/0034_auto_20211114_0032.py index dd98648de..1634bcfe2 100644 --- a/backend/courses/migrations/0034_auto_20211114_0032.py +++ b/backend/courses/migrations/0034_auto_20211114_0032.py @@ -1,29 +1,29 @@ -# Generated by Django 3.2.9 on 2021-11-14 05:32 - -from django.db import migrations, models - - -class Migration(migrations.Migration): - - dependencies = [ - ("courses", "0033_alter_statusupdate_section"), - ] - - operations = [ - migrations.AddField( - model_name="course", - name="num_activities", - field=models.IntegerField( - default=0, - help_text="\nThe number of distinct activities belonging to this course (precomputed for efficiency).\nMaintained by the registrar import / recomputestats script.\n", - ), - ), - migrations.AddField( - model_name="section", - name="num_meetings", - field=models.IntegerField( - default=0, - help_text="\nThe number of meetings belonging to this section (precomputed for efficiency).\nMaintained by the registrar import / recomputestats script.\n", - ), - ), - ] +# Generated by Django 3.2.9 on 2021-11-14 05:32 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ("courses", "0033_alter_statusupdate_section"), + ] + + operations = [ + migrations.AddField( + model_name="course", + name="num_activities", + field=models.IntegerField( + default=0, + help_text="\nThe number of distinct activities belonging to this course (precomputed for efficiency).\nMaintained by the registrar import / recomputestats script.\n", + ), + ), + migrations.AddField( + model_name="section", + name="num_meetings", + field=models.IntegerField( + default=0, + help_text="\nThe number of meetings belonging to this section (precomputed for efficiency).\nMaintained by the registrar import / recomputestats script.\n", + ), + ), + ] diff --git a/backend/courses/migrations/0035_topic_course_topic.py b/backend/courses/migrations/0035_topic_course_topic.py index ebd484413..36f0078a2 100644 --- a/backend/courses/migrations/0035_topic_course_topic.py +++ b/backend/courses/migrations/0035_topic_course_topic.py @@ -1,57 +1,57 @@ -# Generated by Django 4.0.1 on 2022-03-30 02:33 - -import django.db.models.deletion -from django.db import migrations, models - - -class Migration(migrations.Migration): - - dependencies = [ - ("courses", "0034_auto_20211114_0032"), - ] - - operations = [ - migrations.CreateModel( - name="Topic", - fields=[ - ( - "id", - models.AutoField( - auto_created=True, primary_key=True, serialize=False, verbose_name="ID" - ), - ), - ( - "branched_from", - models.ForeignKey( - blank=True, - help_text="\nWhen relevant, the Topic from which this Topic was branched (this will likely only be\nuseful for the spring 2022 NGSS course code changes, where some courses were split into\nmultiple new courses of different topics).\n", - null=True, - on_delete=django.db.models.deletion.SET_NULL, - related_name="branched_to", - to="courses.topic", - ), - ), - ( - "most_recent", - models.ForeignKey( - help_text="\nThe most recent course (by semester) of this topic. The `most_recent` course should\nbe the `primary_listing` if it has crosslistings. These invariants are maintained\nby the `Topic.merge_with`, `Topic.add_course`, `Topic.from_course`, and `Course.save`\nmethods. Defer to using these methods rather than setting this field manually.\nYou must change the corresponding `Topic` object's `most_recent` field before\ndeleting a Course if it is the `most_recent` course (`on_delete=models.PROTECT`).\n", - on_delete=django.db.models.deletion.PROTECT, - related_name="+", - to="courses.course", - ), - ), - ], - ), - migrations.AddField( - model_name="course", - name="topic", - field=models.ForeignKey( - blank=True, - help_text="The Topic of this course", - null=True, - on_delete=django.db.models.deletion.SET_NULL, - related_name="courses", - to="courses.topic", - ), - ), - ] +# Generated by Django 4.0.1 on 2022-03-30 02:33 + +import django.db.models.deletion +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ("courses", "0034_auto_20211114_0032"), + ] + + operations = [ + migrations.CreateModel( + name="Topic", + fields=[ + ( + "id", + models.AutoField( + auto_created=True, primary_key=True, serialize=False, verbose_name="ID" + ), + ), + ( + "branched_from", + models.ForeignKey( + blank=True, + help_text="\nWhen relevant, the Topic from which this Topic was branched (this will likely only be\nuseful for the spring 2022 NGSS course code changes, where some courses were split into\nmultiple new courses of different topics).\n", + null=True, + on_delete=django.db.models.deletion.SET_NULL, + related_name="branched_to", + to="courses.topic", + ), + ), + ( + "most_recent", + models.ForeignKey( + help_text="\nThe most recent course (by semester) of this topic. The `most_recent` course should\nbe the `primary_listing` if it has crosslistings. These invariants are maintained\nby the `Topic.merge_with`, `Topic.add_course`, `Topic.from_course`, and `Course.save`\nmethods. Defer to using these methods rather than setting this field manually.\nYou must change the corresponding `Topic` object's `most_recent` field before\ndeleting a Course if it is the `most_recent` course (`on_delete=models.PROTECT`).\n", + on_delete=django.db.models.deletion.PROTECT, + related_name="+", + to="courses.course", + ), + ), + ], + ), + migrations.AddField( + model_name="course", + name="topic", + field=models.ForeignKey( + blank=True, + help_text="The Topic of this course", + null=True, + on_delete=django.db.models.deletion.SET_NULL, + related_name="courses", + to="courses.topic", + ), + ), + ] diff --git a/backend/courses/migrations/0036_course_syllabus_url_meeting_end_date_and_more.py b/backend/courses/migrations/0036_course_syllabus_url_meeting_end_date_and_more.py index a5d89f97c..7e4fc3b80 100644 --- a/backend/courses/migrations/0036_course_syllabus_url_meeting_end_date_and_more.py +++ b/backend/courses/migrations/0036_course_syllabus_url_meeting_end_date_and_more.py @@ -1,42 +1,42 @@ -# Generated by Django 4.0.3 on 2022-04-04 00:05 - -from django.db import migrations, models - - -class Migration(migrations.Migration): - - dependencies = [ - ("courses", "0035_topic_course_topic"), - ] - - operations = [ - migrations.AddField( - model_name="course", - name="syllabus_url", - field=models.TextField( - blank=True, - help_text="\nA URL for the syllabus of the course, if available.\nNot available for courses offered in or before spring 2022.\n", - null=True, - ), - ), - migrations.AddField( - model_name="meeting", - name="end_date", - field=models.TextField( - blank=True, - help_text="\nThe last day this meeting takes place, in the form 'YYYY-MM-DD', e.g. '2022-12-12'.\nNot available for sections offered in or before spring 2022.\n", - max_length=10, - null=True, - ), - ), - migrations.AddField( - model_name="meeting", - name="start_date", - field=models.TextField( - blank=True, - help_text="\nThe first day this meeting takes place, in the form 'YYYY-MM-DD', e.g. '2022-08-30'.\nNot available for sections offered in or before spring 2022.\n", - max_length=10, - null=True, - ), - ), - ] +# Generated by Django 4.0.3 on 2022-04-04 00:05 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ("courses", "0035_topic_course_topic"), + ] + + operations = [ + migrations.AddField( + model_name="course", + name="syllabus_url", + field=models.TextField( + blank=True, + help_text="\nA URL for the syllabus of the course, if available.\nNot available for courses offered in or before spring 2022.\n", + null=True, + ), + ), + migrations.AddField( + model_name="meeting", + name="end_date", + field=models.TextField( + blank=True, + help_text="\nThe last day this meeting takes place, in the form 'YYYY-MM-DD', e.g. '2022-12-12'.\nNot available for sections offered in or before spring 2022.\n", + max_length=10, + null=True, + ), + ), + migrations.AddField( + model_name="meeting", + name="start_date", + field=models.TextField( + blank=True, + help_text="\nThe first day this meeting takes place, in the form 'YYYY-MM-DD', e.g. '2022-08-30'.\nNot available for sections offered in or before spring 2022.\n", + max_length=10, + null=True, + ), + ), + ] diff --git a/backend/courses/migrations/0037_alter_meeting_room_alter_section_credits.py b/backend/courses/migrations/0037_alter_meeting_room_alter_section_credits.py index a8ea15476..d9c05bffd 100644 --- a/backend/courses/migrations/0037_alter_meeting_room_alter_section_credits.py +++ b/backend/courses/migrations/0037_alter_meeting_room_alter_section_credits.py @@ -1,37 +1,37 @@ -# Generated by Django 4.0.3 on 2022-04-04 01:26 - -import django.db.models.deletion -from django.db import migrations, models - - -class Migration(migrations.Migration): - - dependencies = [ - ("courses", "0036_course_syllabus_url_meeting_end_date_and_more"), - ] - - operations = [ - migrations.AlterField( - model_name="meeting", - name="room", - field=models.ForeignKey( - blank=True, - help_text="The Room object in which the meeting is taking place (null if this is an online meeting).", - null=True, - on_delete=django.db.models.deletion.CASCADE, - to="courses.room", - ), - ), - migrations.AlterField( - model_name="section", - name="credits", - field=models.DecimalField( - blank=True, - db_index=True, - decimal_places=2, - help_text="The number of credits this section is worth.", - max_digits=4, - null=True, - ), - ), - ] +# Generated by Django 4.0.3 on 2022-04-04 01:26 + +import django.db.models.deletion +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ("courses", "0036_course_syllabus_url_meeting_end_date_and_more"), + ] + + operations = [ + migrations.AlterField( + model_name="meeting", + name="room", + field=models.ForeignKey( + blank=True, + help_text="The Room object in which the meeting is taking place (null if this is an online meeting).", + null=True, + on_delete=django.db.models.deletion.CASCADE, + to="courses.room", + ), + ), + migrations.AlterField( + model_name="section", + name="credits", + field=models.DecimalField( + blank=True, + db_index=True, + decimal_places=2, + help_text="The number of credits this section is worth.", + max_digits=4, + null=True, + ), + ), + ] diff --git a/backend/courses/migrations/0038_alter_meeting_room.py b/backend/courses/migrations/0038_alter_meeting_room.py index 8dfd5c147..673b5fb0f 100644 --- a/backend/courses/migrations/0038_alter_meeting_room.py +++ b/backend/courses/migrations/0038_alter_meeting_room.py @@ -1,25 +1,25 @@ -# Generated by Django 4.0.3 on 2022-04-04 18:54 - -import django.db.models.deletion -from django.db import migrations, models - - -class Migration(migrations.Migration): - - dependencies = [ - ("courses", "0037_alter_meeting_room_alter_section_credits"), - ] - - operations = [ - migrations.AlterField( - model_name="meeting", - name="room", - field=models.ForeignKey( - blank=True, - help_text="\nThe Room object in which the meeting is taking place\n(null if this is an online meeting).\n", - null=True, - on_delete=django.db.models.deletion.CASCADE, - to="courses.room", - ), - ), - ] +# Generated by Django 4.0.3 on 2022-04-04 18:54 + +import django.db.models.deletion +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ("courses", "0037_alter_meeting_room_alter_section_credits"), + ] + + operations = [ + migrations.AlterField( + model_name="meeting", + name="room", + field=models.ForeignKey( + blank=True, + help_text="\nThe Room object in which the meeting is taking place\n(null if this is an online meeting).\n", + null=True, + on_delete=django.db.models.deletion.CASCADE, + to="courses.room", + ), + ), + ] diff --git a/backend/courses/migrations/0039_alter_course_primary_listing.py b/backend/courses/migrations/0039_alter_course_primary_listing.py index 9f6f6ac4a..dd9c5da2d 100644 --- a/backend/courses/migrations/0039_alter_course_primary_listing.py +++ b/backend/courses/migrations/0039_alter_course_primary_listing.py @@ -1,35 +1,35 @@ -# Generated by Django 4.0.3 on 2022-04-06 16:51 - -import django.db.models.deletion -from django.db import migrations, models - - -def forwards_func(apps, schema_editor): - Course = apps.get_model("courses", "Course") - Course.objects.filter(primary_listing__isnull=True).update(primary_listing_id=models.F("id")) - - -def reverse_func(apps, schema_editor): - Course = apps.get_model("courses", "Course") - Course.objects.filter(primary_listing_id=models.F("id")).update(primary_listing=None) - - -class Migration(migrations.Migration): - - dependencies = [ - ("courses", "0038_alter_meeting_room"), - ] - - operations = [ - migrations.RunPython(forwards_func, reverse_func), - migrations.AlterField( - model_name="course", - name="primary_listing", - field=models.ForeignKey( - help_text="\nThe primary Course object with which this course is crosslisted. The set of crosslisted courses\nto which this course belongs can thus be accessed with the related field listing_set on the\nprimary_listing course. If you are creating a course without any crosslistings, you must set this\nfield to self.\n", - on_delete=django.db.models.deletion.CASCADE, - related_name="listing_set", - to="courses.course", - ), - ), - ] +# Generated by Django 4.0.3 on 2022-04-06 16:51 + +import django.db.models.deletion +from django.db import migrations, models + + +def forwards_func(apps, schema_editor): + Course = apps.get_model("courses", "Course") + Course.objects.filter(primary_listing__isnull=True).update(primary_listing_id=models.F("id")) + + +def reverse_func(apps, schema_editor): + Course = apps.get_model("courses", "Course") + Course.objects.filter(primary_listing_id=models.F("id")).update(primary_listing=None) + + +class Migration(migrations.Migration): + + dependencies = [ + ("courses", "0038_alter_meeting_room"), + ] + + operations = [ + migrations.RunPython(forwards_func, reverse_func), + migrations.AlterField( + model_name="course", + name="primary_listing", + field=models.ForeignKey( + help_text="\nThe primary Course object with which this course is crosslisted. The set of crosslisted courses\nto which this course belongs can thus be accessed with the related field listing_set on the\nprimary_listing course. If you are creating a course without any crosslistings, you must set this\nfield to self.\n", + on_delete=django.db.models.deletion.CASCADE, + related_name="listing_set", + to="courses.course", + ), + ), + ] diff --git a/backend/courses/migrations/0040_alter_course_primary_listing.py b/backend/courses/migrations/0040_alter_course_primary_listing.py index 0cddd7ee8..5386ad349 100644 --- a/backend/courses/migrations/0040_alter_course_primary_listing.py +++ b/backend/courses/migrations/0040_alter_course_primary_listing.py @@ -1,24 +1,24 @@ -# Generated by Django 4.0.3 on 2022-04-07 02:45 - -import django.db.models.deletion -from django.db import migrations, models - - -class Migration(migrations.Migration): - - dependencies = [ - ("courses", "0039_alter_course_primary_listing"), - ] - - operations = [ - migrations.AlterField( - model_name="course", - name="primary_listing", - field=models.ForeignKey( - help_text="\nThe primary Course object with which this course is crosslisted. The set of crosslisted\ncourses to which this course belongs can thus be accessed with the related field\n`listing_set` on the `primary_listing` course. If a course doesn't have any crosslistings,\nits `primary_listing` foreign key will point to itself. If you call `.save()` on a course\nwithout setting its `primary_listing` field, the overridden `Course.save()` method will\nautomatically set its `primary_listing` to a self-reference.\n", - on_delete=django.db.models.deletion.CASCADE, - related_name="listing_set", - to="courses.course", - ), - ), - ] +# Generated by Django 4.0.3 on 2022-04-07 02:45 + +import django.db.models.deletion +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ("courses", "0039_alter_course_primary_listing"), + ] + + operations = [ + migrations.AlterField( + model_name="course", + name="primary_listing", + field=models.ForeignKey( + help_text="\nThe primary Course object with which this course is crosslisted. The set of crosslisted\ncourses to which this course belongs can thus be accessed with the related field\n`listing_set` on the `primary_listing` course. If a course doesn't have any crosslistings,\nits `primary_listing` foreign key will point to itself. If you call `.save()` on a course\nwithout setting its `primary_listing` field, the overridden `Course.save()` method will\nautomatically set its `primary_listing` to a self-reference.\n", + on_delete=django.db.models.deletion.CASCADE, + related_name="listing_set", + to="courses.course", + ), + ), + ] diff --git a/backend/courses/migrations/0041_remove_section_raw_demand.py b/backend/courses/migrations/0041_remove_section_raw_demand.py index 33170aa46..1b31936f0 100644 --- a/backend/courses/migrations/0041_remove_section_raw_demand.py +++ b/backend/courses/migrations/0041_remove_section_raw_demand.py @@ -1,17 +1,17 @@ -# Generated by Django 4.0.3 on 2022-04-07 10:22 - -from django.db import migrations - - -class Migration(migrations.Migration): - - dependencies = [ - ("courses", "0040_alter_course_primary_listing"), - ] - - operations = [ - migrations.RemoveIndex( - model_name="section", - name="raw_demand", - ), - ] +# Generated by Django 4.0.3 on 2022-04-07 10:22 + +from django.db import migrations + + +class Migration(migrations.Migration): + + dependencies = [ + ("courses", "0040_alter_course_primary_listing"), + ] + + operations = [ + migrations.RemoveIndex( + model_name="section", + name="raw_demand", + ), + ] diff --git a/backend/courses/migrations/0042_section_has_reviews.py b/backend/courses/migrations/0042_section_has_reviews.py index 1a73b9e90..4cf60eef8 100644 --- a/backend/courses/migrations/0042_section_has_reviews.py +++ b/backend/courses/migrations/0042_section_has_reviews.py @@ -1,29 +1,29 @@ -# Generated by Django 4.0.3 on 2022-04-08 00:57 - -from django.db import migrations, models - -from alert.management.commands.recomputestats import recompute_has_reviews - - -def compute_has_reviews(apps, schema_editor): - recompute_has_reviews() - - -class Migration(migrations.Migration): - - dependencies = [ - ("courses", "0041_remove_section_raw_demand"), - ("review", "0004_auto_20200512_1526"), - ] - - operations = [ - migrations.AddField( - model_name="section", - name="has_reviews", - field=models.BooleanField( - default=False, - help_text="\nA flag indicating whether this section has reviews (precomputed for efficiency).\n", - ), - ), - migrations.RunPython(compute_has_reviews), - ] +# Generated by Django 4.0.3 on 2022-04-08 00:57 + +from django.db import migrations, models + +from alert.management.commands.recomputestats import recompute_has_reviews + + +def compute_has_reviews(apps, schema_editor): + recompute_has_reviews() + + +class Migration(migrations.Migration): + + dependencies = [ + ("courses", "0041_remove_section_raw_demand"), + ("review", "0004_auto_20200512_1526"), + ] + + operations = [ + migrations.AddField( + model_name="section", + name="has_reviews", + field=models.BooleanField( + default=False, + help_text="\nA flag indicating whether this section has reviews (precomputed for efficiency).\n", + ), + ), + migrations.RunPython(compute_has_reviews), + ] diff --git a/backend/courses/migrations/0043_section_has_status_updates.py b/backend/courses/migrations/0043_section_has_status_updates.py index aac318f40..1ba0255c5 100644 --- a/backend/courses/migrations/0043_section_has_status_updates.py +++ b/backend/courses/migrations/0043_section_has_status_updates.py @@ -1,28 +1,28 @@ -# Generated by Django 4.0.3 on 2022-04-08 04:16 - -from django.db import migrations, models - -from alert.management.commands.recomputestats import recompute_has_status_updates - - -def compute_has_status_updates(apps, schema_editor): - recompute_has_status_updates() - - -class Migration(migrations.Migration): - - dependencies = [ - ("courses", "0042_section_has_reviews"), - ] - - operations = [ - migrations.AddField( - model_name="section", - name="has_status_updates", - field=models.BooleanField( - default=False, - help_text="\nA flag indicating whether this section has Status Updates (precomputed for efficiency).\n", - ), - ), - migrations.RunPython(compute_has_status_updates), - ] +# Generated by Django 4.0.3 on 2022-04-08 04:16 + +from django.db import migrations, models + +from alert.management.commands.recomputestats import recompute_has_status_updates + + +def compute_has_status_updates(apps, schema_editor): + recompute_has_status_updates() + + +class Migration(migrations.Migration): + + dependencies = [ + ("courses", "0042_section_has_reviews"), + ] + + operations = [ + migrations.AddField( + model_name="section", + name="has_status_updates", + field=models.BooleanField( + default=False, + help_text="\nA flag indicating whether this section has Status Updates (precomputed for efficiency).\n", + ), + ), + migrations.RunPython(compute_has_status_updates), + ] diff --git a/backend/courses/migrations/0044_prengssrequirement_and_more.py b/backend/courses/migrations/0044_prengssrequirement_and_more.py index 929a79443..9c873c395 100644 --- a/backend/courses/migrations/0044_prengssrequirement_and_more.py +++ b/backend/courses/migrations/0044_prengssrequirement_and_more.py @@ -1,77 +1,77 @@ -# Generated by Django 4.0.3 on 2022-04-12 23:27 - -import django.db.models.deletion -from django.db import migrations, models - - -class Migration(migrations.Migration): - - dependencies = [ - ("courses", "0043_section_has_status_updates"), - ] - - operations = [ - migrations.AlterField( - model_name="requirement", - name="departments", - field=models.ManyToManyField( - blank=True, - help_text="\n All the Department objects for which any course in that department\n (if not in overrides) would satisfy this requirement. Usually if a whole department\n satisfies a requirement, individual courses from that department will not be added to\n the courses set. Also, to specify specific courses which do not satisfy the requirement\n (even if their department is in the departments set), the overrides set is used.\n For example, CIS classes count as engineering (ENG) courses, but CIS-125 is NOT an\n engineering class, so for the ENG requirement, CIS-125 would be in the overrides\n set even though the CIS Department object would be in the departments set.\n\nNote that a course satisfies a requirement if and only if it is not in the\noverrides set, and it is either in the courses set or its department is in the departments\nset.\n", - related_name="pre_ngss_requirements", - to="courses.Department", - ), - ), - migrations.AlterField( - model_name="requirement", - name="courses", - field=models.ManyToManyField( - blank=True, - help_text="\n Individual Course objects which satisfy this requirement (not necessarily\n comprehensive, as often entire departments will satisfy the requirement, but not\n every course in the department will necessarily be added to this set). For example,\n CIS 398 would be in the courses set for the NATSCI engineering requirement, since\n it is the only CIS class that satisfies that requirement.\n\nNote that a course satisfies a requirement if and only if it is not in the\noverrides set, and it is either in the courses set or its department is in the departments\nset.\n", - related_name="pre_ngss_requirement_set", - to="courses.Course", - ), - ), - migrations.AlterField( - model_name="requirement", - name="overrides", - field=models.ManyToManyField( - blank=True, - help_text="\n Individual Course objects which do not satisfy this requirement. This set\n is usually used to add exceptions to departments which satisfy requirements.\n For example, CIS classes count as engineering (ENG) courses, but CIS-125 is NOT an\n engineering class, so for the ENG requirement, CIS-125 would be in the overrides\n set even though the CIS Department would be in the departments set.\n\nNote that a course satisfies a requirement if and only if it is not in the\noverrides set, and it is either in the courses set or its department is in the departments\nset.\n", - related_name="pre_ngss_nonrequirement_set", - to="courses.Course", - ), - ), - migrations.RenameModel( - old_name="Requirement", - new_name="PreNGSSRequirement", - ), - migrations.RenameField( - model_name="section", - old_name="restrictions", - new_name="pre_ngss_restrictions", - ), - migrations.RenameModel( - old_name="Restriction", - new_name="PreNGSSRestriction", - ), - migrations.AlterField( - model_name="section", - name="pre_ngss_restrictions", - field=models.ManyToManyField( - blank=True, - help_text="All pre-NGSS (deprecated since 2022C) registration Restriction objects to which this section is subject. This field will be empty for sections in 2022C or later.", - related_name="sections", - to="courses.prengssrestriction", - ), - ), - migrations.AlterField( - model_name="topic", - name="most_recent", - field=models.ForeignKey( - help_text="\nThe most recent course (by semester) of this topic. The `most_recent` course should\nbe the `primary_listing` if it has crosslistings. These invariants are maintained\nby the `Course.save()` and `Topic.merge_with()` methods. Defer to using these methods\nrather than setting this field manually. You must change the corresponding\n`Topic` object's `most_recent` field before deleting a Course if it is the\n`most_recent` course (`on_delete=models.PROTECT`).\n", - on_delete=django.db.models.deletion.PROTECT, - related_name="+", - to="courses.course", - ), - ), - ] +# Generated by Django 4.0.3 on 2022-04-12 23:27 + +import django.db.models.deletion +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ("courses", "0043_section_has_status_updates"), + ] + + operations = [ + migrations.AlterField( + model_name="requirement", + name="departments", + field=models.ManyToManyField( + blank=True, + help_text="\n All the Department objects for which any course in that department\n (if not in overrides) would satisfy this requirement. Usually if a whole department\n satisfies a requirement, individual courses from that department will not be added to\n the courses set. Also, to specify specific courses which do not satisfy the requirement\n (even if their department is in the departments set), the overrides set is used.\n For example, CIS classes count as engineering (ENG) courses, but CIS-125 is NOT an\n engineering class, so for the ENG requirement, CIS-125 would be in the overrides\n set even though the CIS Department object would be in the departments set.\n\nNote that a course satisfies a requirement if and only if it is not in the\noverrides set, and it is either in the courses set or its department is in the departments\nset.\n", + related_name="pre_ngss_requirements", + to="courses.Department", + ), + ), + migrations.AlterField( + model_name="requirement", + name="courses", + field=models.ManyToManyField( + blank=True, + help_text="\n Individual Course objects which satisfy this requirement (not necessarily\n comprehensive, as often entire departments will satisfy the requirement, but not\n every course in the department will necessarily be added to this set). For example,\n CIS 398 would be in the courses set for the NATSCI engineering requirement, since\n it is the only CIS class that satisfies that requirement.\n\nNote that a course satisfies a requirement if and only if it is not in the\noverrides set, and it is either in the courses set or its department is in the departments\nset.\n", + related_name="pre_ngss_requirement_set", + to="courses.Course", + ), + ), + migrations.AlterField( + model_name="requirement", + name="overrides", + field=models.ManyToManyField( + blank=True, + help_text="\n Individual Course objects which do not satisfy this requirement. This set\n is usually used to add exceptions to departments which satisfy requirements.\n For example, CIS classes count as engineering (ENG) courses, but CIS-125 is NOT an\n engineering class, so for the ENG requirement, CIS-125 would be in the overrides\n set even though the CIS Department would be in the departments set.\n\nNote that a course satisfies a requirement if and only if it is not in the\noverrides set, and it is either in the courses set or its department is in the departments\nset.\n", + related_name="pre_ngss_nonrequirement_set", + to="courses.Course", + ), + ), + migrations.RenameModel( + old_name="Requirement", + new_name="PreNGSSRequirement", + ), + migrations.RenameField( + model_name="section", + old_name="restrictions", + new_name="pre_ngss_restrictions", + ), + migrations.RenameModel( + old_name="Restriction", + new_name="PreNGSSRestriction", + ), + migrations.AlterField( + model_name="section", + name="pre_ngss_restrictions", + field=models.ManyToManyField( + blank=True, + help_text="All pre-NGSS (deprecated since 2022C) registration Restriction objects to which this section is subject. This field will be empty for sections in 2022C or later.", + related_name="sections", + to="courses.prengssrestriction", + ), + ), + migrations.AlterField( + model_name="topic", + name="most_recent", + field=models.ForeignKey( + help_text="\nThe most recent course (by semester) of this topic. The `most_recent` course should\nbe the `primary_listing` if it has crosslistings. These invariants are maintained\nby the `Course.save()` and `Topic.merge_with()` methods. Defer to using these methods\nrather than setting this field manually. You must change the corresponding\n`Topic` object's `most_recent` field before deleting a Course if it is the\n`most_recent` course (`on_delete=models.PROTECT`).\n", + on_delete=django.db.models.deletion.PROTECT, + related_name="+", + to="courses.course", + ), + ), + ] diff --git a/backend/courses/migrations/0045_course_crn_course_non_null_crn_semester_unique.py b/backend/courses/migrations/0045_course_crn_course_non_null_crn_semester_unique.py index 985a6108e..6619de2a1 100644 --- a/backend/courses/migrations/0045_course_crn_course_non_null_crn_semester_unique.py +++ b/backend/courses/migrations/0045_course_crn_course_non_null_crn_semester_unique.py @@ -1,32 +1,32 @@ -# Generated by Django 4.0.4 on 2022-04-22 06:57 - -from django.db import migrations, models - - -class Migration(migrations.Migration): - - dependencies = [ - ("courses", "0044_prengssrequirement_and_more"), - ] - - operations = [ - migrations.AddField( - model_name="course", - name="crn", - field=models.CharField( - blank=True, - db_index=True, - help_text="\nThe CRN ID of the course (unique by course/semester if non-null).\nOnly available on courses after spring 2022 (i.e. after the NGSS transition).\n", - max_length=8, - null=True, - ), - ), - migrations.AddConstraint( - model_name="course", - constraint=models.UniqueConstraint( - condition=models.Q(("crn__isnull", False)), - fields=("crn", "semester"), - name="non_null_crn_semester_unique", - ), - ), - ] +# Generated by Django 4.0.4 on 2022-04-22 06:57 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ("courses", "0044_prengssrequirement_and_more"), + ] + + operations = [ + migrations.AddField( + model_name="course", + name="crn", + field=models.CharField( + blank=True, + db_index=True, + help_text="\nThe CRN ID of the course (unique by course/semester if non-null).\nOnly available on courses after spring 2022 (i.e. after the NGSS transition).\n", + max_length=8, + null=True, + ), + ), + migrations.AddConstraint( + model_name="course", + constraint=models.UniqueConstraint( + condition=models.Q(("crn__isnull", False)), + fields=("crn", "semester"), + name="non_null_crn_semester_unique", + ), + ), + ] diff --git a/backend/courses/migrations/0046_remove_course_non_null_crn_semester_unique_and_more.py b/backend/courses/migrations/0046_remove_course_non_null_crn_semester_unique_and_more.py index 45084f6c8..0e1a35793 100644 --- a/backend/courses/migrations/0046_remove_course_non_null_crn_semester_unique_and_more.py +++ b/backend/courses/migrations/0046_remove_course_non_null_crn_semester_unique_and_more.py @@ -1,32 +1,32 @@ -# Generated by Django 4.0.4 on 2022-04-22 20:49 - -from django.db import migrations, models - - -class Migration(migrations.Migration): - - dependencies = [ - ("courses", "0045_course_crn_course_non_null_crn_semester_unique"), - ] - - operations = [ - migrations.RemoveConstraint( - model_name="course", - name="non_null_crn_semester_unique", - ), - migrations.RemoveField( - model_name="course", - name="crn", - ), - migrations.AddField( - model_name="section", - name="crn", - field=models.CharField( - blank=True, - db_index=True, - help_text="\nThe CRN ID of the section.\nOnly available on sections after spring 2022 (i.e. after the NGSS transition).\n", - max_length=8, - null=True, - ), - ), - ] +# Generated by Django 4.0.4 on 2022-04-22 20:49 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ("courses", "0045_course_crn_course_non_null_crn_semester_unique"), + ] + + operations = [ + migrations.RemoveConstraint( + model_name="course", + name="non_null_crn_semester_unique", + ), + migrations.RemoveField( + model_name="course", + name="crn", + ), + migrations.AddField( + model_name="section", + name="crn", + field=models.CharField( + blank=True, + db_index=True, + help_text="\nThe CRN ID of the section.\nOnly available on sections after spring 2022 (i.e. after the NGSS transition).\n", + max_length=8, + null=True, + ), + ), + ] diff --git a/backend/courses/migrations/0047_alter_room_number.py b/backend/courses/migrations/0047_alter_room_number.py index 3bbfaeff6..4fc8a1525 100644 --- a/backend/courses/migrations/0047_alter_room_number.py +++ b/backend/courses/migrations/0047_alter_room_number.py @@ -1,21 +1,21 @@ -# Generated by Django 4.0.3 on 2022-05-26 05:24 - -from django.db import migrations, models - - -class Migration(migrations.Migration): - - dependencies = [ - ("courses", "0046_remove_course_non_null_crn_semester_unique_and_more"), - ] - - operations = [ - migrations.AlterField( - model_name="room", - name="number", - field=models.CharField( - help_text="The room number, e.g. `101` for Wu and Chen Auditorium in Levine.", - max_length=8, - ), - ), - ] +# Generated by Django 4.0.3 on 2022-05-26 05:24 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ("courses", "0046_remove_course_non_null_crn_semester_unique_and_more"), + ] + + operations = [ + migrations.AlterField( + model_name="room", + name="number", + field=models.CharField( + help_text="The room number, e.g. `101` for Wu and Chen Auditorium in Levine.", + max_length=8, + ), + ), + ] diff --git a/backend/courses/migrations/0047_alter_section_pre_ngss_restrictions_ngssrestriction_and_more.py b/backend/courses/migrations/0047_alter_section_pre_ngss_restrictions_ngssrestriction_and_more.py index 7e43ea716..16ea8bef4 100644 --- a/backend/courses/migrations/0047_alter_section_pre_ngss_restrictions_ngssrestriction_and_more.py +++ b/backend/courses/migrations/0047_alter_section_pre_ngss_restrictions_ngssrestriction_and_more.py @@ -1,136 +1,136 @@ -# Generated by Django 4.0.4 on 2022-05-22 22:47 - -from django.db import migrations, models - - -class Migration(migrations.Migration): - - dependencies = [ - ("courses", "0046_remove_course_non_null_crn_semester_unique_and_more"), - ] - - operations = [ - migrations.AlterField( - model_name="section", - name="pre_ngss_restrictions", - field=models.ManyToManyField( - blank=True, - help_text="All pre-NGSS (deprecated since 2022C) registration NGSSRestriction objects to which this section is subject. This field will be empty for sections in 2022C or later.", - related_name="sections", - to="courses.prengssrestriction", - ), - ), - migrations.CreateModel( - name="NGSSRestriction", - fields=[ - ( - "id", - models.AutoField( - auto_created=True, primary_key=True, serialize=False, verbose_name="ID" - ), - ), - ( - "code", - models.CharField( - help_text="\nThe code of the restriction.\n", max_length=10, unique=True - ), - ), - ( - "restriction_type", - models.CharField( - choices=[ - ("ATTR", "Attribute"), - ("CAMP", "Campus"), - ("CLASI", "Classification"), - ("COHO", "Cohort"), - ("DEGR", "Degree"), - ("DIVI", "Division"), - ("LVL", "Level"), - ("MAJ", "Major"), - ("MIN", "Minor"), - ("PROG", "Program"), - ("SPEC", "Special Approval"), - ], - db_index=True, - help_text='\nWhat the restriction is based on (e.g., campus).\n
    "ATTR""Attribute"
    "CAMP""Campus"
    "CLASI""Classification"
    "COHO""Cohort"
    "DEGR""Degree"
    "DIVI""Division"
    "LVL""Level"
    "MAJ""Major"
    "MIN""Minor"
    "PROG""Program"
    "SPEC""Special Approval"
    ', - max_length=5, - ), - ), - ( - "include_or_exclude", - models.BooleanField( - help_text='\nWhether this is an include or exclude restriction. Corresponds to the incl_excl_ind\nresponse field. True if include (ie, incl_excl_ind is "I") and False if exclude ("E").\n' - ), - ), - ( - "description", - models.TextField(help_text="\nThe registration restriction description.\n"), - ), - ( - "courses", - models.ManyToManyField( - blank=True, - help_text="\nIndividual Course objects which have this restriction.\n", - related_name="ngss_restrictions", - to="courses.course", - ), - ), - ], - ), - migrations.CreateModel( - name="Attribute", - fields=[ - ( - "id", - models.AutoField( - auto_created=True, primary_key=True, serialize=False, verbose_name="ID" - ), - ), - ( - "code", - models.CharField( - help_text="\nA registration attribute code, for instance 'WUOM' for Wharton OIDD Operations track.\nSee [https://bit.ly/3L8bQDA](https://bit.ly/3L8bQDA)\nfor all options\n", - max_length=10, - unique=True, - ), - ), - ( - "description", - models.TextField( - help_text="\nThe registration attribute description, e.g. 'Wharton OIDD Operation'\nfor the WUOM attribute.\nSee [https://bit.ly/3L8bQDA](https://bit.ly/3L8bQDA) for all options.\n" - ), - ), - ( - "school", - models.CharField( - choices=[ - ("SAS", "School of Arts and Sciences"), - ("LPS", "College of Liberal and Professional Studies"), - ("SEAS", "Engineering"), - ("DSGN", "Design"), - ("GSE", "Graduate School of Education"), - ("LAW", "Law School"), - ("MED", "School of Medicine"), - ("MODE", "Grade Mode"), - ("VET", "School of Veterinary Medicine"), - ("NUR", "Nursing"), - ("WH", "Wharton"), - ("OTHER", "Other"), - ], - db_index=True, - help_text='\nWhat school/program this attribute belongs to, e.g. `SAS` for `ASOC` restriction\nor `WH` for `WUOM` or `MODE` for `QP` \n
    "SAS""School of Arts and Sciences"
    "LPS""College of Liberal and Professional Studies"
    "SEAS""Engineering"
    "DSGN""Design"
    "GSE""Graduate School of Education"
    "LAW""Law School"
    "MED""School of Medicine"
    "MODE""Grade Mode"
    "VET""School of Veterinary Medicine"
    "NUR""Nursing"
    "WH""Wharton"
    "OTHER""Other"
    ', - max_length=5, - ), - ), - ( - "courses", - models.ManyToManyField( - blank=True, - help_text="\nCourse objects which have this attribute\n", - related_name="attributes", - to="courses.course", - ), - ), - ], - ), - ] +# Generated by Django 4.0.4 on 2022-05-22 22:47 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ("courses", "0046_remove_course_non_null_crn_semester_unique_and_more"), + ] + + operations = [ + migrations.AlterField( + model_name="section", + name="pre_ngss_restrictions", + field=models.ManyToManyField( + blank=True, + help_text="All pre-NGSS (deprecated since 2022C) registration NGSSRestriction objects to which this section is subject. This field will be empty for sections in 2022C or later.", + related_name="sections", + to="courses.prengssrestriction", + ), + ), + migrations.CreateModel( + name="NGSSRestriction", + fields=[ + ( + "id", + models.AutoField( + auto_created=True, primary_key=True, serialize=False, verbose_name="ID" + ), + ), + ( + "code", + models.CharField( + help_text="\nThe code of the restriction.\n", max_length=10, unique=True + ), + ), + ( + "restriction_type", + models.CharField( + choices=[ + ("ATTR", "Attribute"), + ("CAMP", "Campus"), + ("CLASI", "Classification"), + ("COHO", "Cohort"), + ("DEGR", "Degree"), + ("DIVI", "Division"), + ("LVL", "Level"), + ("MAJ", "Major"), + ("MIN", "Minor"), + ("PROG", "Program"), + ("SPEC", "Special Approval"), + ], + db_index=True, + help_text='\nWhat the restriction is based on (e.g., campus).\n
    "ATTR""Attribute"
    "CAMP""Campus"
    "CLASI""Classification"
    "COHO""Cohort"
    "DEGR""Degree"
    "DIVI""Division"
    "LVL""Level"
    "MAJ""Major"
    "MIN""Minor"
    "PROG""Program"
    "SPEC""Special Approval"
    ', + max_length=5, + ), + ), + ( + "include_or_exclude", + models.BooleanField( + help_text='\nWhether this is an include or exclude restriction. Corresponds to the incl_excl_ind\nresponse field. True if include (ie, incl_excl_ind is "I") and False if exclude ("E").\n' + ), + ), + ( + "description", + models.TextField(help_text="\nThe registration restriction description.\n"), + ), + ( + "courses", + models.ManyToManyField( + blank=True, + help_text="\nIndividual Course objects which have this restriction.\n", + related_name="ngss_restrictions", + to="courses.course", + ), + ), + ], + ), + migrations.CreateModel( + name="Attribute", + fields=[ + ( + "id", + models.AutoField( + auto_created=True, primary_key=True, serialize=False, verbose_name="ID" + ), + ), + ( + "code", + models.CharField( + help_text="\nA registration attribute code, for instance 'WUOM' for Wharton OIDD Operations track.\nSee [https://bit.ly/3L8bQDA](https://bit.ly/3L8bQDA)\nfor all options\n", + max_length=10, + unique=True, + ), + ), + ( + "description", + models.TextField( + help_text="\nThe registration attribute description, e.g. 'Wharton OIDD Operation'\nfor the WUOM attribute.\nSee [https://bit.ly/3L8bQDA](https://bit.ly/3L8bQDA) for all options.\n" + ), + ), + ( + "school", + models.CharField( + choices=[ + ("SAS", "School of Arts and Sciences"), + ("LPS", "College of Liberal and Professional Studies"), + ("SEAS", "Engineering"), + ("DSGN", "Design"), + ("GSE", "Graduate School of Education"), + ("LAW", "Law School"), + ("MED", "School of Medicine"), + ("MODE", "Grade Mode"), + ("VET", "School of Veterinary Medicine"), + ("NUR", "Nursing"), + ("WH", "Wharton"), + ("OTHER", "Other"), + ], + db_index=True, + help_text='\nWhat school/program this attribute belongs to, e.g. `SAS` for `ASOC` restriction\nor `WH` for `WUOM` or `MODE` for `QP` \n
    "SAS""School of Arts and Sciences"
    "LPS""College of Liberal and Professional Studies"
    "SEAS""Engineering"
    "DSGN""Design"
    "GSE""Graduate School of Education"
    "LAW""Law School"
    "MED""School of Medicine"
    "MODE""Grade Mode"
    "VET""School of Veterinary Medicine"
    "NUR""Nursing"
    "WH""Wharton"
    "OTHER""Other"
    ', + max_length=5, + ), + ), + ( + "courses", + models.ManyToManyField( + blank=True, + help_text="\nCourse objects which have this attribute\n", + related_name="attributes", + to="courses.course", + ), + ), + ], + ), + ] diff --git a/backend/courses/migrations/0048_alter_attribute_school.py b/backend/courses/migrations/0048_alter_attribute_school.py index bce93bc1c..ddf7524d6 100644 --- a/backend/courses/migrations/0048_alter_attribute_school.py +++ b/backend/courses/migrations/0048_alter_attribute_school.py @@ -1,36 +1,36 @@ -# Generated by Django 4.0.4 on 2022-05-25 00:44 - -from django.db import migrations, models - - -class Migration(migrations.Migration): - - dependencies = [ - ("courses", "0047_alter_section_pre_ngss_restrictions_ngssrestriction_and_more"), - ] - - operations = [ - migrations.AlterField( - model_name="attribute", - name="school", - field=models.CharField( - choices=[ - ("SAS", "School of Arts and Sciences"), - ("LPS", "College of Liberal and Professional Studies"), - ("SEAS", "Engineering"), - ("DSGN", "Design"), - ("GSE", "Graduate School of Education"), - ("LAW", "Law School"), - ("MED", "School of Medicine"), - ("MODE", "Grade Mode"), - ("VET", "School of Veterinary Medicine"), - ("NUR", "Nursing"), - ("WH", "Wharton"), - ("OTHER", "Other"), - ], - db_index=True, - help_text='\nWhat school/program this attribute belongs to, e.g. `SAS` for `ASOC` restriction\nor `WH` for `WUOM` or `MODE` for `QP`\n
    "SAS""School of Arts and Sciences"
    "LPS""College of Liberal and Professional Studies"
    "SEAS""Engineering"
    "DSGN""Design"
    "GSE""Graduate School of Education"
    "LAW""Law School"
    "MED""School of Medicine"
    "MODE""Grade Mode"
    "VET""School of Veterinary Medicine"
    "NUR""Nursing"
    "WH""Wharton"
    "OTHER""Other"
    ', - max_length=5, - ), - ), - ] +# Generated by Django 4.0.4 on 2022-05-25 00:44 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ("courses", "0047_alter_section_pre_ngss_restrictions_ngssrestriction_and_more"), + ] + + operations = [ + migrations.AlterField( + model_name="attribute", + name="school", + field=models.CharField( + choices=[ + ("SAS", "School of Arts and Sciences"), + ("LPS", "College of Liberal and Professional Studies"), + ("SEAS", "Engineering"), + ("DSGN", "Design"), + ("GSE", "Graduate School of Education"), + ("LAW", "Law School"), + ("MED", "School of Medicine"), + ("MODE", "Grade Mode"), + ("VET", "School of Veterinary Medicine"), + ("NUR", "Nursing"), + ("WH", "Wharton"), + ("OTHER", "Other"), + ], + db_index=True, + help_text='\nWhat school/program this attribute belongs to, e.g. `SAS` for `ASOC` restriction\nor `WH` for `WUOM` or `MODE` for `QP`\n
    "SAS""School of Arts and Sciences"
    "LPS""College of Liberal and Professional Studies"
    "SEAS""Engineering"
    "DSGN""Design"
    "GSE""Graduate School of Education"
    "LAW""Law School"
    "MED""School of Medicine"
    "MODE""Grade Mode"
    "VET""School of Veterinary Medicine"
    "NUR""Nursing"
    "WH""Wharton"
    "OTHER""Other"
    ', + max_length=5, + ), + ), + ] diff --git a/backend/courses/migrations/0049_merge_20220529_2355.py b/backend/courses/migrations/0049_merge_20220529_2355.py index 9cc7267a3..3f739b2db 100644 --- a/backend/courses/migrations/0049_merge_20220529_2355.py +++ b/backend/courses/migrations/0049_merge_20220529_2355.py @@ -1,13 +1,13 @@ -# Generated by Django 4.0.4 on 2022-05-30 03:55 - -from django.db import migrations - - -class Migration(migrations.Migration): - - dependencies = [ - ("courses", "0047_alter_room_number"), - ("courses", "0048_alter_attribute_school"), - ] - - operations = [] +# Generated by Django 4.0.4 on 2022-05-30 03:55 + +from django.db import migrations + + +class Migration(migrations.Migration): + + dependencies = [ + ("courses", "0047_alter_room_number"), + ("courses", "0048_alter_attribute_school"), + ] + + operations = [] diff --git a/backend/courses/migrations/0050_rename_include_or_exclude_ngssrestriction_inclusive_and_more.py b/backend/courses/migrations/0050_rename_include_or_exclude_ngssrestriction_inclusive_and_more.py index 4195907f4..75d061eae 100644 --- a/backend/courses/migrations/0050_rename_include_or_exclude_ngssrestriction_inclusive_and_more.py +++ b/backend/courses/migrations/0050_rename_include_or_exclude_ngssrestriction_inclusive_and_more.py @@ -1,87 +1,87 @@ -# Generated by Django 4.0.4 on 2022-05-30 05:15 - -from django.db import migrations, models - - -class Migration(migrations.Migration): - - dependencies = [ - ("courses", "0049_merge_20220529_2355"), - ] - - operations = [ - migrations.RenameField( - model_name="ngssrestriction", - old_name="include_or_exclude", - new_name="inclusive", - ), - migrations.AlterField( - model_name="attribute", - name="code", - field=models.CharField( - db_index=True, - help_text="\nA registration attribute code, for instance 'WUOM' for Wharton OIDD Operations track.\n", - max_length=10, - unique=True, - ), - ), - migrations.AlterField( - model_name="attribute", - name="description", - field=models.TextField( - help_text="\nThe registration attribute description, e.g. 'Wharton OIDD Operation'\nfor the WUOM attribute.\n" - ), - ), - migrations.AlterField( - model_name="attribute", - name="school", - field=models.CharField( - choices=[ - ("SAS", "School of Arts and Sciences"), - ("LPS", "College of Liberal and Professional Studies"), - ("SEAS", "Engineering"), - ("DSGN", "Design"), - ("GSE", "Graduate School of Education"), - ("LAW", "Law School"), - ("MED", "School of Medicine"), - ("MODE", "Grade Mode"), - ("VET", "School of Veterinary Medicine"), - ("NUR", "Nursing"), - ("WH", "Wharton"), - ("OTHER", "Other"), - ], - db_index=True, - help_text='\nWhat school/program this attribute belongs to, e.g. `SAS` for `ASOC` restriction\nor `WH` for `WUOM` or `MODE` for `QP`. Options and meanings:\n
    "SAS""School of Arts and Sciences"
    "LPS""College of Liberal and Professional Studies"
    "SEAS""Engineering"
    "DSGN""Design"
    "GSE""Graduate School of Education"
    "LAW""Law School"
    "MED""School of Medicine"
    "MODE""Grade Mode"
    "VET""School of Veterinary Medicine"
    "NUR""Nursing"
    "WH""Wharton"
    "OTHER""Other"
    ', - max_length=5, - ), - ), - migrations.AlterField( - model_name="ngssrestriction", - name="code", - field=models.CharField( - db_index=True, - help_text="\nThe code of the restriction.\n", - max_length=10, - unique=True, - ), - ), - migrations.AlterField( - model_name="ngssrestriction", - name="restriction_type", - field=models.CharField( - db_index=True, - help_text="\nWhat the restriction is based on (e.g., Campus).\n", - max_length=25, - ), - ), - migrations.AlterField( - model_name="section", - name="pre_ngss_restrictions", - field=models.ManyToManyField( - blank=True, - help_text="All pre-NGSS (deprecated since 2022C) registration Restriction objects to which this section is subject. This field will be empty for sections in 2022C or later.", - related_name="sections", - to="courses.prengssrestriction", - ), - ), - ] +# Generated by Django 4.0.4 on 2022-05-30 05:15 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ("courses", "0049_merge_20220529_2355"), + ] + + operations = [ + migrations.RenameField( + model_name="ngssrestriction", + old_name="include_or_exclude", + new_name="inclusive", + ), + migrations.AlterField( + model_name="attribute", + name="code", + field=models.CharField( + db_index=True, + help_text="\nA registration attribute code, for instance 'WUOM' for Wharton OIDD Operations track.\n", + max_length=10, + unique=True, + ), + ), + migrations.AlterField( + model_name="attribute", + name="description", + field=models.TextField( + help_text="\nThe registration attribute description, e.g. 'Wharton OIDD Operation'\nfor the WUOM attribute.\n" + ), + ), + migrations.AlterField( + model_name="attribute", + name="school", + field=models.CharField( + choices=[ + ("SAS", "School of Arts and Sciences"), + ("LPS", "College of Liberal and Professional Studies"), + ("SEAS", "Engineering"), + ("DSGN", "Design"), + ("GSE", "Graduate School of Education"), + ("LAW", "Law School"), + ("MED", "School of Medicine"), + ("MODE", "Grade Mode"), + ("VET", "School of Veterinary Medicine"), + ("NUR", "Nursing"), + ("WH", "Wharton"), + ("OTHER", "Other"), + ], + db_index=True, + help_text='\nWhat school/program this attribute belongs to, e.g. `SAS` for `ASOC` restriction\nor `WH` for `WUOM` or `MODE` for `QP`. Options and meanings:\n
    "SAS""School of Arts and Sciences"
    "LPS""College of Liberal and Professional Studies"
    "SEAS""Engineering"
    "DSGN""Design"
    "GSE""Graduate School of Education"
    "LAW""Law School"
    "MED""School of Medicine"
    "MODE""Grade Mode"
    "VET""School of Veterinary Medicine"
    "NUR""Nursing"
    "WH""Wharton"
    "OTHER""Other"
    ', + max_length=5, + ), + ), + migrations.AlterField( + model_name="ngssrestriction", + name="code", + field=models.CharField( + db_index=True, + help_text="\nThe code of the restriction.\n", + max_length=10, + unique=True, + ), + ), + migrations.AlterField( + model_name="ngssrestriction", + name="restriction_type", + field=models.CharField( + db_index=True, + help_text="\nWhat the restriction is based on (e.g., Campus).\n", + max_length=25, + ), + ), + migrations.AlterField( + model_name="section", + name="pre_ngss_restrictions", + field=models.ManyToManyField( + blank=True, + help_text="All pre-NGSS (deprecated since 2022C) registration Restriction objects to which this section is subject. This field will be empty for sections in 2022C or later.", + related_name="sections", + to="courses.prengssrestriction", + ), + ), + ] diff --git a/backend/courses/migrations/0051_alter_attribute_school.py b/backend/courses/migrations/0051_alter_attribute_school.py index 7f9e17613..6b9768fe0 100644 --- a/backend/courses/migrations/0051_alter_attribute_school.py +++ b/backend/courses/migrations/0051_alter_attribute_school.py @@ -1,36 +1,36 @@ -# Generated by Django 4.0.4 on 2022-06-14 04:21 - -from django.db import migrations, models - - -class Migration(migrations.Migration): - - dependencies = [ - ("courses", "0050_rename_include_or_exclude_ngssrestriction_inclusive_and_more"), - ] - - operations = [ - migrations.AlterField( - model_name="attribute", - name="school", - field=models.CharField( - choices=[ - ("SAS", "School of Arts and Sciences"), - ("LPS", "College of Liberal and Professional Studies"), - ("SEAS", "Engineering"), - ("DSGN", "Design"), - ("GSE", "Graduate School of Education"), - ("LAW", "Law School"), - ("MED", "School of Medicine"), - ("MODE", "Grade Mode"), - ("VET", "School of Veterinary Medicine"), - ("NUR", "Nursing"), - ("WH", "Wharton"), - ], - db_index=True, - help_text='\nWhat school/program this attribute belongs to, e.g. `SAS` for `ASOC` restriction\nor `WH` for `WUOM` or `MODE` for `QP`. Options and meanings:\n
    "SAS""School of Arts and Sciences"
    "LPS""College of Liberal and Professional Studies"
    "SEAS""Engineering"
    "DSGN""Design"
    "GSE""Graduate School of Education"
    "LAW""Law School"
    "MED""School of Medicine"
    "MODE""Grade Mode"
    "VET""School of Veterinary Medicine"
    "NUR""Nursing"
    "WH""Wharton"
    ', - max_length=5, - null=True, - ), - ), - ] +# Generated by Django 4.0.4 on 2022-06-14 04:21 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ("courses", "0050_rename_include_or_exclude_ngssrestriction_inclusive_and_more"), + ] + + operations = [ + migrations.AlterField( + model_name="attribute", + name="school", + field=models.CharField( + choices=[ + ("SAS", "School of Arts and Sciences"), + ("LPS", "College of Liberal and Professional Studies"), + ("SEAS", "Engineering"), + ("DSGN", "Design"), + ("GSE", "Graduate School of Education"), + ("LAW", "Law School"), + ("MED", "School of Medicine"), + ("MODE", "Grade Mode"), + ("VET", "School of Veterinary Medicine"), + ("NUR", "Nursing"), + ("WH", "Wharton"), + ], + db_index=True, + help_text='\nWhat school/program this attribute belongs to, e.g. `SAS` for `ASOC` restriction\nor `WH` for `WUOM` or `MODE` for `QP`. Options and meanings:\n
    "SAS""School of Arts and Sciences"
    "LPS""College of Liberal and Professional Studies"
    "SEAS""Engineering"
    "DSGN""Design"
    "GSE""Graduate School of Education"
    "LAW""Law School"
    "MED""School of Medicine"
    "MODE""Grade Mode"
    "VET""School of Veterinary Medicine"
    "NUR""Nursing"
    "WH""Wharton"
    ', + max_length=5, + null=True, + ), + ), + ] diff --git a/backend/courses/migrations/0051_alter_ngssrestriction_inclusive.py b/backend/courses/migrations/0051_alter_ngssrestriction_inclusive.py index 6f30294f7..3620ff77b 100644 --- a/backend/courses/migrations/0051_alter_ngssrestriction_inclusive.py +++ b/backend/courses/migrations/0051_alter_ngssrestriction_inclusive.py @@ -1,20 +1,20 @@ -# Generated by Django 4.0.4 on 2022-05-30 23:41 - -from django.db import migrations, models - - -class Migration(migrations.Migration): - - dependencies = [ - ("courses", "0050_rename_include_or_exclude_ngssrestriction_inclusive_and_more"), - ] - - operations = [ - migrations.AlterField( - model_name="ngssrestriction", - name="inclusive", - field=models.BooleanField( - help_text='\nWhether this is an include or exclude restriction. Corresponds to the `incl_excl_ind`\nresponse field. `True` if include (ie, `incl_excl_ind` is "I") and `False` if exclude ("E").\n' - ), - ), - ] +# Generated by Django 4.0.4 on 2022-05-30 23:41 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ("courses", "0050_rename_include_or_exclude_ngssrestriction_inclusive_and_more"), + ] + + operations = [ + migrations.AlterField( + model_name="ngssrestriction", + name="inclusive", + field=models.BooleanField( + help_text='\nWhether this is an include or exclude restriction. Corresponds to the `incl_excl_ind`\nresponse field. `True` if include (ie, `incl_excl_ind` is "I") and `False` if exclude ("E").\n' + ), + ), + ] diff --git a/backend/courses/migrations/0052_merge_20220614_0027.py b/backend/courses/migrations/0052_merge_20220614_0027.py index baade6f5c..bef662e43 100644 --- a/backend/courses/migrations/0052_merge_20220614_0027.py +++ b/backend/courses/migrations/0052_merge_20220614_0027.py @@ -1,13 +1,13 @@ -# Generated by Django 4.0.4 on 2022-06-14 04:27 - -from django.db import migrations - - -class Migration(migrations.Migration): - - dependencies = [ - ("courses", "0051_alter_attribute_school"), - ("courses", "0051_alter_ngssrestriction_inclusive"), - ] - - operations = [] +# Generated by Django 4.0.4 on 2022-06-14 04:27 + +from django.db import migrations + + +class Migration(migrations.Migration): + + dependencies = [ + ("courses", "0051_alter_attribute_school"), + ("courses", "0051_alter_ngssrestriction_inclusive"), + ] + + operations = [] diff --git a/backend/courses/migrations/0053_alter_ngssrestriction_code.py b/backend/courses/migrations/0053_alter_ngssrestriction_code.py index 881a64cb5..593a238fb 100644 --- a/backend/courses/migrations/0053_alter_ngssrestriction_code.py +++ b/backend/courses/migrations/0053_alter_ngssrestriction_code.py @@ -1,23 +1,23 @@ -# Generated by Django 4.0.5 on 2022-06-26 01:12 - -from django.db import migrations, models - - -class Migration(migrations.Migration): - - dependencies = [ - ("courses", "0052_merge_20220614_0027"), - ] - - operations = [ - migrations.AlterField( - model_name="ngssrestriction", - name="code", - field=models.CharField( - db_index=True, - help_text="\nThe code of the restriction.\n", - max_length=16, - unique=True, - ), - ), - ] +# Generated by Django 4.0.5 on 2022-06-26 01:12 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ("courses", "0052_merge_20220614_0027"), + ] + + operations = [ + migrations.AlterField( + model_name="ngssrestriction", + name="code", + field=models.CharField( + db_index=True, + help_text="\nThe code of the restriction.\n", + max_length=16, + unique=True, + ), + ), + ] diff --git a/backend/courses/migrations/0054_userprofile_uuid_secret_and_more.py b/backend/courses/migrations/0054_userprofile_uuid_secret_and_more.py index 86ed89727..68255d34e 100644 --- a/backend/courses/migrations/0054_userprofile_uuid_secret_and_more.py +++ b/backend/courses/migrations/0054_userprofile_uuid_secret_and_more.py @@ -1,22 +1,22 @@ -# Generated by Django 4.1.5 on 2023-01-15 16:39 - -import uuid - -from django.db import migrations, models - - -class Migration(migrations.Migration): - - dependencies = [ - ("courses", "0053_alter_ngssrestriction_code"), - ] - - operations = [ - migrations.AlterField( - model_name="ngssrestriction", - name="inclusive", - field=models.BooleanField( - help_text='\nWhether this is an include or exclude restriction. Corresponds to the `incl_excl_ind`\nresponse field. `True` if include (ie, `incl_excl_ind` is "I") and `False`\nif exclude ("E").\n' - ), - ), - ] +# Generated by Django 4.1.5 on 2023-01-15 16:39 + +import uuid + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ("courses", "0053_alter_ngssrestriction_code"), + ] + + operations = [ + migrations.AlterField( + model_name="ngssrestriction", + name="inclusive", + field=models.BooleanField( + help_text='\nWhether this is an include or exclude restriction. Corresponds to the `incl_excl_ind`\nresponse field. `True` if include (ie, `incl_excl_ind` is "I") and `False`\nif exclude ("E").\n' + ), + ), + ] diff --git a/backend/courses/registrar.py b/backend/courses/registrar.py index 48908f504..e7842d6a6 100644 --- a/backend/courses/registrar.py +++ b/backend/courses/registrar.py @@ -1,109 +1,109 @@ -import json -import logging - -import requests -from django.conf import settings -from tqdm import tqdm - -from courses.util import translate_semester - - -logger = logging.getLogger(__name__) - - -def get_token(): - r = requests.post( - settings.OPEN_DATA_TOKEN_URL, - data={"grant_type": "client_credentials"}, - auth=(settings.OPEN_DATA_CLIENT_ID, settings.OPEN_DATA_OIDC_SECRET), - ) - if not r.ok: - raise ValueError(f"OpenData token URL responded with status code {r.status_code}: {r.text}") - return r.json()["access_token"] - - -def get_headers(): - return { - "Authorization": "Bearer " + get_token(), - } - - -def make_api_request(params): - headers = get_headers() - url = f"{settings.OPEN_DATA_API_BASE}/v1/course_section_search" - r = requests.get( - url, - params=params, - headers=headers, - ) - if not r.ok: - raise ValueError(f"OpenData API request failed with status code {r.status_code}: {r.text}") - return r.json() - - -def report_api_error(err): - try: - msg = json.loads(err) - logger.error(msg.get("service_meta", {}).get("error_text", "no error text")) - except json.JSONDecodeError: - logger.error("Penn API error", extra={"error_msg": err}) - - -def get_all_course_status(semester): - semester = translate_semester(semester) - headers = get_headers() - url = f"{settings.OPEN_DATA_API_BASE}/v1/course_section_status/{semester}/all" - r = requests.get(url, headers=headers) - if r.status_code == requests.codes.ok: - return r.json().get("result_data", []) - else: - report_api_error(r.text) - raise RuntimeError( - f"Registrar API request failed with code {r.status_code}. " - f'Message returned: "{r.text}"' - ) - - -def get_departments(): - headers = get_headers() - url = f"{settings.OPEN_DATA_API_BASE}/v1/course_section_search_parameters" - r = requests.get(url, headers=headers) - if r.status_code == requests.codes.ok: - result_data = r.json().get("result_data", []) - if len(result_data) > 0: - return result_data[0]["subjects_map"] - else: - raise ValueError("OpenData API returned data with no populated result_data field.") - else: - raise ValueError(f"OpenData API responded with status code {r.status_code}: {r.text}.") - - -def get_courses(query, semester): - semester = translate_semester(semester) - - params = { - "section_id": query, - "term": semester, - "page_number": 1, - "number_of_results_per_page": 1000, - } - - results = [] - pbar = None - while True: - logger.info("making request for page #%d" % params["page_number"]) - data = make_api_request(params) - if pbar is None: - pbar = tqdm(total=data["service_meta"]["number_of_pages"]) - pbar.update(1) - next_page = data["service_meta"]["next_page_number"] - results.extend(data["result_data"]) - if not next_page or int(next_page) <= params["page_number"]: - break - params["page_number"] = next_page - if pbar is not None: - pbar.close() - - distinct_results = {r["section_id"]: r for r in results if r["section_id"]}.values() - - return distinct_results +import json +import logging + +import requests +from django.conf import settings +from tqdm import tqdm + +from courses.util import translate_semester + + +logger = logging.getLogger(__name__) + + +def get_token(): + r = requests.post( + settings.OPEN_DATA_TOKEN_URL, + data={"grant_type": "client_credentials"}, + auth=(settings.OPEN_DATA_CLIENT_ID, settings.OPEN_DATA_OIDC_SECRET), + ) + if not r.ok: + raise ValueError(f"OpenData token URL responded with status code {r.status_code}: {r.text}") + return r.json()["access_token"] + + +def get_headers(): + return { + "Authorization": "Bearer " + get_token(), + } + + +def make_api_request(params): + headers = get_headers() + url = f"{settings.OPEN_DATA_API_BASE}/v1/course_section_search" + r = requests.get( + url, + params=params, + headers=headers, + ) + if not r.ok: + raise ValueError(f"OpenData API request failed with status code {r.status_code}: {r.text}") + return r.json() + + +def report_api_error(err): + try: + msg = json.loads(err) + logger.error(msg.get("service_meta", {}).get("error_text", "no error text")) + except json.JSONDecodeError: + logger.error("Penn API error", extra={"error_msg": err}) + + +def get_all_course_status(semester): + semester = translate_semester(semester) + headers = get_headers() + url = f"{settings.OPEN_DATA_API_BASE}/v1/course_section_status/{semester}/all" + r = requests.get(url, headers=headers) + if r.status_code == requests.codes.ok: + return r.json().get("result_data", []) + else: + report_api_error(r.text) + raise RuntimeError( + f"Registrar API request failed with code {r.status_code}. " + f'Message returned: "{r.text}"' + ) + + +def get_departments(): + headers = get_headers() + url = f"{settings.OPEN_DATA_API_BASE}/v1/course_section_search_parameters" + r = requests.get(url, headers=headers) + if r.status_code == requests.codes.ok: + result_data = r.json().get("result_data", []) + if len(result_data) > 0: + return result_data[0]["subjects_map"] + else: + raise ValueError("OpenData API returned data with no populated result_data field.") + else: + raise ValueError(f"OpenData API responded with status code {r.status_code}: {r.text}.") + + +def get_courses(query, semester): + semester = translate_semester(semester) + + params = { + "section_id": query, + "term": semester, + "page_number": 1, + "number_of_results_per_page": 1000, + } + + results = [] + pbar = None + while True: + logger.info("making request for page #%d" % params["page_number"]) + data = make_api_request(params) + if pbar is None: + pbar = tqdm(total=data["service_meta"]["number_of_pages"]) + pbar.update(1) + next_page = data["service_meta"]["next_page_number"] + results.extend(data["result_data"]) + if not next_page or int(next_page) <= params["page_number"]: + break + params["page_number"] = next_page + if pbar is not None: + pbar.close() + + distinct_results = {r["section_id"]: r for r in results if r["section_id"]}.values() + + return distinct_results diff --git a/backend/courses/search.py b/backend/courses/search.py index 2ed191fa3..38a3d0b52 100644 --- a/backend/courses/search.py +++ b/backend/courses/search.py @@ -1,133 +1,133 @@ -import operator -import re -from functools import reduce - -from django.db.models import Q -from rest_framework import filters - - -def filter_or_lookups_terms(queryset, orm_lookups, search_terms): - """ - Filters the queryset by any of the given orm lookups matching any of the given search terms. - """ - conditions = [] - for search_term in search_terms: - queries = [Q(**{orm_lookup: search_term}) for orm_lookup in orm_lookups] - conditions.append(reduce(operator.or_, queries)) - return queryset.filter(reduce(operator.or_, conditions)) - - -class TypedCourseSearchBackend(filters.SearchFilter): - code_res = [ - re.compile(r"^([A-Za-z]{" + str(dept_len) + r"})\s*-?\s*(\d{1,4}[A-Za-z]?|[A-Za-z]{1,3})?$") - for dept_len in reversed(range(1, 5)) - ] # To avoid ambiguity (e.g. INTL-BUL as INTLBUL), try each dept code length separately - - def get_schema_operation_parameters(self, view): - """For autodocs.""" - return [ - { - "name": "search", - "schema": {"type": "string"}, - "required": False, - "in": "query", - "description": "Search query. Can be either a fragment of a course code, or any " - "keyword/professor name.", - }, - ] - - def infer_search_type(self, query): - if not any(r.match(query) for r in self.code_res): - return "keyword" - elif re.search(r"[A-Za-z]{5,}", query): - return "both" - else: - return "course" - - def get_query(self, request): - return request.GET.get(self.search_param, "").strip() - - def get_search_type(self, request): - search_type = request.GET.get("type", "auto") - if search_type == "auto": - # Cache regex results for performance - inferred_search_type = getattr(self, "inferred_search_type", None) - search_type = inferred_search_type or self.infer_search_type(self.get_query(request)) - self.inferred_search_type = search_type - return search_type - - def get_search_terms(self, request): - search_type = self.get_search_type(request) - query = self.get_query(request) - - if search_type == "keyword": - return [query] - - def get_code_prefix(r): - match = r.match(query) - if match: - components = (match.group(1), match.group(2)) - return "-".join([c for c in components if c]) - - terms = [get_code_prefix(r) for r in self.code_res] - if search_type == "both": - terms.append(query) - - return [t for t in terms if t] - - def get_search_fields(self, view, request): - search_type = self.get_search_type(request) - if search_type == "course": - return ["^full_code"] - elif search_type == "keyword": - return ["title", "sections__instructors__name"] - else: - return ["^full_code", "title", "sections__instructors__name"] - - def filter_queryset(self, request, queryset, view): - if not self.get_query(request): - return queryset - - search_fields = self.get_search_fields(view, request) - orm_lookups = [self.construct_search(str(search_field)) for search_field in search_fields] - search_terms = self.get_search_terms(request) - if not search_terms: - return queryset.none() - - return filter_or_lookups_terms(queryset, orm_lookups, search_terms) - - -class TypedSectionSearchBackend(filters.SearchFilter): - code_res = [ - re.compile( - r"^([A-Za-z]{" + str(dept_len) + r"})\s*-?\s*" - r"(\d{1,4}[A-Za-z]?|[A-Za-z]{1,3})?\s*-?\s*" - r"(\d{1,3}|[A-Za-z]{1,3})?$" - ) # To avoid ambiguity (e.g. INTL-BUL as INTLBUL), try each dept code length separately - for dept_len in reversed(range(1, 5)) - ] - - def get_query(self, request): - return request.GET.get(self.search_param, "").strip() - - def get_search_terms(self, request): - query = self.get_query(request) - - def get_code_prefix(r): - match = r.match(query) - if match: - components = (match.group(1), match.group(2), match.group(3)) - return "-".join([c for c in components if c]) - - terms = [get_code_prefix(r) for r in self.code_res] - return [t for t in terms if t] - - def filter_queryset(self, request, queryset, view): - if not self.get_query(request): - return queryset - orm_lookups = [self.construct_search("^full_code")] - search_terms = self.get_search_terms(request) - if not search_terms: - return queryset.none() - - return filter_or_lookups_terms(queryset, orm_lookups, search_terms) +import operator +import re +from functools import reduce + +from django.db.models import Q +from rest_framework import filters + + +def filter_or_lookups_terms(queryset, orm_lookups, search_terms): + """ + Filters the queryset by any of the given orm lookups matching any of the given search terms. + """ + conditions = [] + for search_term in search_terms: + queries = [Q(**{orm_lookup: search_term}) for orm_lookup in orm_lookups] + conditions.append(reduce(operator.or_, queries)) + return queryset.filter(reduce(operator.or_, conditions)) + + +class TypedCourseSearchBackend(filters.SearchFilter): + code_res = [ + re.compile(r"^([A-Za-z]{" + str(dept_len) + r"})\s*-?\s*(\d{1,4}[A-Za-z]?|[A-Za-z]{1,3})?$") + for dept_len in reversed(range(1, 5)) + ] # To avoid ambiguity (e.g. INTL-BUL as INTLBUL), try each dept code length separately + + def get_schema_operation_parameters(self, view): + """For autodocs.""" + return [ + { + "name": "search", + "schema": {"type": "string"}, + "required": False, + "in": "query", + "description": "Search query. Can be either a fragment of a course code, or any " + "keyword/professor name.", + }, + ] + + def infer_search_type(self, query): + if not any(r.match(query) for r in self.code_res): + return "keyword" + elif re.search(r"[A-Za-z]{5,}", query): + return "both" + else: + return "course" + + def get_query(self, request): + return request.GET.get(self.search_param, "").strip() + + def get_search_type(self, request): + search_type = request.GET.get("type", "auto") + if search_type == "auto": + # Cache regex results for performance + inferred_search_type = getattr(self, "inferred_search_type", None) + search_type = inferred_search_type or self.infer_search_type(self.get_query(request)) + self.inferred_search_type = search_type + return search_type + + def get_search_terms(self, request): + search_type = self.get_search_type(request) + query = self.get_query(request) + + if search_type == "keyword": + return [query] + + def get_code_prefix(r): + match = r.match(query) + if match: + components = (match.group(1), match.group(2)) + return "-".join([c for c in components if c]) + + terms = [get_code_prefix(r) for r in self.code_res] + if search_type == "both": + terms.append(query) + + return [t for t in terms if t] + + def get_search_fields(self, view, request): + search_type = self.get_search_type(request) + if search_type == "course": + return ["^full_code"] + elif search_type == "keyword": + return ["title", "sections__instructors__name"] + else: + return ["^full_code", "title", "sections__instructors__name"] + + def filter_queryset(self, request, queryset, view): + if not self.get_query(request): + return queryset + + search_fields = self.get_search_fields(view, request) + orm_lookups = [self.construct_search(str(search_field)) for search_field in search_fields] + search_terms = self.get_search_terms(request) + if not search_terms: + return queryset.none() + + return filter_or_lookups_terms(queryset, orm_lookups, search_terms) + + +class TypedSectionSearchBackend(filters.SearchFilter): + code_res = [ + re.compile( + r"^([A-Za-z]{" + str(dept_len) + r"})\s*-?\s*" + r"(\d{1,4}[A-Za-z]?|[A-Za-z]{1,3})?\s*-?\s*" + r"(\d{1,3}|[A-Za-z]{1,3})?$" + ) # To avoid ambiguity (e.g. INTL-BUL as INTLBUL), try each dept code length separately + for dept_len in reversed(range(1, 5)) + ] + + def get_query(self, request): + return request.GET.get(self.search_param, "").strip() + + def get_search_terms(self, request): + query = self.get_query(request) + + def get_code_prefix(r): + match = r.match(query) + if match: + components = (match.group(1), match.group(2), match.group(3)) + return "-".join([c for c in components if c]) + + terms = [get_code_prefix(r) for r in self.code_res] + return [t for t in terms if t] + + def filter_queryset(self, request, queryset, view): + if not self.get_query(request): + return queryset + orm_lookups = [self.construct_search("^full_code")] + search_terms = self.get_search_terms(request) + if not search_terms: + return queryset.none() + + return filter_or_lookups_terms(queryset, orm_lookups, search_terms) diff --git a/backend/courses/util.py b/backend/courses/util.py index 258bb4314..7547e675b 100644 --- a/backend/courses/util.py +++ b/backend/courses/util.py @@ -1,953 +1,1005 @@ -import json -import logging -import os -import re -import uuid -import random -import itertools - -from decimal import Decimal - -from django.core.cache import cache -from django.core.exceptions import ObjectDoesNotExist, ValidationError -from django.db import connection -from django.db.models.aggregates import Count -from django.db.models.expressions import Subquery, Value -from django.db.models.functions.comparison import Coalesce -from django.db.models.signals import post_save -from django.dispatch import receiver -from options.models import Option, get_value -from rest_framework.exceptions import APIException - -from courses.models import ( - Attribute, - Building, - Course, - Department, - Instructor, - Meeting, - NGSSRestriction, - Room, - Section, - StatusUpdate, - User, -) - - -logger = logging.getLogger(__name__) - - -def in_dev(): - return "PennCourses.settings.development" in os.environ["DJANGO_SETTINGS_MODULE"] - - -semester_suffix_map = { - "A": "10", - "B": "20", - "C": "30", -} -semester_suffix_map_inv = {v: k for k, v in semester_suffix_map.items()} - - -def translate_semester(semester): - """ - Translates a semester string (e.g. "2022C") to the format accepted by the new - OpenData API (e.g. "202230"). - """ - if not semester: - return None - old_suffix = semester[-1].upper() - if old_suffix not in semester_suffix_map: - raise ValueError( - f"Invalid semester suffix {old_suffix} (semester must have " - "suffix A, B, or C; e.g. '2022C')." - ) - return semester[:-1] + semester_suffix_map[old_suffix] - - -def translate_semester_inv(semester): - """ - Translates a semester string in the format of the new OpenData API (e.g. "202230") - to the format used by our backend (e.g. "2022C") - """ - if not semester: - return None - new_suffix = semester[-2:] - if new_suffix not in semester_suffix_map_inv: - raise ValueError( - f"Invalid semester suffix {new_suffix} (semester must have " - "suffix '10', '20', or '30'; e.g. '202230')." - ) - return semester[:-2] + semester_suffix_map_inv[new_suffix] - - -def get_current_semester(allow_not_found=False): - """ - This function retrieves the string value of the current semester, either from - memory (if the value has been cached), or from the db (after which it will cache - the value for future use). If the value retrieved from the db is None, an error is thrown - indicating that the SEMESTER Option must be set for this API to work properly. - You can prevent an error from being thrown (and cause the function to just return None - in this case) by setting allow_not_found=True. - The cache has a timeout of 25 hours, but is also invalidated whenever the SEMESTER Option - is saved (which will occur whenever it is updated), using a post_save hook. - See the invalidate_current_semester_cache function below to see how this works. - """ - cached_val = cache.get("SEMESTER", None) - if cached_val is not None: - return cached_val - - retrieved_val = get_value("SEMESTER", None) - if not allow_not_found and retrieved_val is None: - raise APIException( - "The SEMESTER runtime option is not set. If you are in dev, you can set this " - "option by running the command " - "'python manage.py setoption SEMESTER 2020C', " - "replacing 2020C with the current semester, in the backend directory (remember " - "to run 'pipenv shell' before running this command, though)." - ) - cache.set("SEMESTER", retrieved_val, timeout=90000) # cache expires every 25 hours - return retrieved_val - - -@receiver(post_save, sender=Option, dispatch_uid="invalidate_current_semester_cache") -def invalidate_current_semester_cache(sender, instance, **kwargs): - """ - This function invalidates the cached SEMESTER value when the SEMESTER option is updated. - """ - from courses.management.commands.load_add_drop_dates import load_add_drop_dates - - # ^ imported here to avoid circular imports - - if instance.key == "SEMESTER": - cache.delete("SEMESTER") - get_or_create_add_drop_period(instance.value) - load_add_drop_dates() - - -def get_semester(datetime): - """ - Given a datetime, estimate the semester of the period of course registration it occurred in. - """ - if 3 <= datetime.month and datetime.month <= 9: - return str(datetime.year) + "C" - if datetime.month < 3: - return str(datetime.year) + "A" - return str(datetime.year + 1) + "A" - - -def get_add_drop_period(semester): - """ - Returns the AddDropPeriod object corresponding to the given semester. Throws the same - errors and behaves the same way as AddDropPeriod.objects.get(semester=semester) but runs faster. - This function uses caching to speed up add/drop period object retrieval. Cached objects - expire every 25 hours, and are also invalidated in the AddDropPeriod.save method. - The add_drop_periods key in cache points to a dictionary mapping semester to add/drop period - object. - """ - from alert.models import AddDropPeriod # imported here to avoid circular imports - - cached_adps = cache.get("add_drop_periods", dict()) - if semester not in cached_adps: - cached_adps[semester] = AddDropPeriod.objects.get(semester=semester) - cache.set("add_drop_periods", cached_adps, timeout=90000) # cache expires every 25 hours - return cached_adps[semester] - - -def get_or_create_add_drop_period(semester): - """ - Behaves the same as get_add_drop_period if an AddDropPeriod object already exists for the given - semester, and otherwise creates a new AddDropPeriod object for the given semester, returning - the created object. - """ - from alert.models import AddDropPeriod - - try: - add_drop = get_add_drop_period(semester) - except AddDropPeriod.DoesNotExist: - add_drop = AddDropPeriod(semester=semester) - add_drop.save() - return add_drop - - -def get_set_id(obj): - """ - Returns the next ID for the given object (which hasn't yet been created). - """ - if obj.id: - return obj.id - # Source: https://djangosnippets.org/snippets/10474/ - with connection.cursor() as cursor: - # NOTE: this relies on PostgreSQL-specific details for autoincrement - # https://www.postgresql.org/docs/9.4/functions-sequence.html - cursor.execute( - "SELECT nextval('{0}_{1}_{2}_seq'::regclass)".format( - obj._meta.app_label.lower(), - obj._meta.object_name.lower(), - obj._meta.pk.name, - ) - ) - obj.id = obj.pk = cursor.fetchone()[0] - return obj.pk - - -def is_fk_set(obj, fk_field): - """ - Returns true if the specified foreign key field has been - set on the given object, false otherwise. - """ - return bool(getattr(obj, fk_field, None) or getattr(obj, fk_field + "_id", None)) - - -""" -Assumptions of our course code parsing regex: - - Department code is 1-4 letters - - Course code is (4 digits with an optional trailing letter) or (3 digits) or (3 letters) - - Section code is 3 digits or 3 letters -""" -section_code_re = re.compile( - r"^([A-Za-z]{1,4})\s*-?\s*(\d{4}[A-Za-z]?|\d{3}|[A-Za-z]{3})\s*-?\s*(\d{3}|[A-Za-z]{3})?$" -) - - -def separate_course_code(course_code, allow_partial=False): - """ - Parse and return a (dept, course, section) ID tuple - given a section full_code in any possible format. - If allow_partial is True, then missing components will be returned as None. - Otherwise, an incomplete match will raise a ValueError. - """ - course_code = course_code.strip() - match = section_code_re.match(course_code) - if match: - components = (match.group(1).upper(), match.group(2), match.group(3)) - if allow_partial or None not in components: - return components - raise ValueError(f"Course code could not be parsed: {course_code}") - - -def get_or_create_course(dept_code, course_id, semester, defaults=None): - dept, _ = Department.objects.get_or_create(code=dept_code) - return Course.objects.get_or_create( - department=dept, code=course_id, semester=semester, defaults=defaults - ) - - -def get_or_create_course_and_section( - course_code, semester, section_manager=None, course_defaults=None, section_defaults=None -): - if section_manager is None: - section_manager = Section.objects - dept_code, course_id, section_id = separate_course_code(course_code) - - course, course_c = get_or_create_course( - dept_code, course_id, semester, defaults=course_defaults - ) - section, section_c = section_manager.get_or_create( - course=course, code=section_id, defaults=section_defaults - ) - - return course, section, course_c, section_c - - -def get_course_and_section(course_code, semester, section_manager=None): - if section_manager is None: - section_manager = Section.objects - - dept_code, course_id, section_id = separate_course_code(course_code) - course = Course.objects.get(department__code=dept_code, code=course_id, semester=semester) - section = section_manager.get(course=course, code=section_id) - return course, section - - -def update_percent_open(section, new_status_update): - """ - This function updates a section's percent_open field when a new status update is processed. - """ - add_drop = get_or_create_add_drop_period(section.semester) - last_status_update = section.last_status_update - if new_status_update.created_at < add_drop.estimated_start: - return - if last_status_update is None: - section.percent_open = Decimal(int(new_status_update.old_status == "O")) - section.save() - else: - if last_status_update.created_at >= add_drop.estimated_end: - return - seconds_before_last = Decimal( - max((last_status_update.created_at - add_drop.estimated_start).total_seconds(), 0) - ) - seconds_since_last = Decimal( - max( - ( - min(new_status_update.created_at, add_drop.estimated_end) - - max(last_status_update.created_at, add_drop.estimated_start) - ).total_seconds(), - 0, - ) - ) - section.percent_open = ( - Decimal(section.percent_open) * seconds_before_last - + int(new_status_update.old_status == "O") * seconds_since_last - ) / (seconds_before_last + seconds_since_last) - section.save() - - -def record_update(section, semester, old_status, new_status, alerted, req, created_at=None): - from alert.models import validate_add_drop_semester # avoid circular imports - - u = StatusUpdate( - section=section, - old_status=old_status, - new_status=new_status, - alert_sent=alerted, - request_body=req, - ) - if created_at is not None: - u.created_at = created_at - u.save() - - valid_status_choices = dict(Section.STATUS_CHOICES).keys() - - def validate_status(name, status): - if status not in valid_status_choices: - raise ValidationError( - f"{name} is invalid; expected a value in {valid_status_choices}, but got {status}" - ) - - validate_status("Old status", old_status) - validate_status("New status", new_status) - - # Raises ValidationError if semester is not fall or spring (and correctly formatted) - validate_add_drop_semester(semester) - update_percent_open(section, u) - - return u - - -def merge_instructors(user, name): - """ - Merge the instructor corresponding to the given user into the - instructor with the given name, if both exist. - """ - from review.management.commands.mergeinstructors import resolve_duplicates - - def stat(key, amt=1, element=None): - return - - try: - user_instructor = Instructor.objects.get(user=user) - name_instructor = Instructor.objects.get(name=name) - duplicates = {user_instructor, name_instructor} - if len(duplicates) == 1: - return - resolve_duplicates([duplicates], dry_run=False, stat=stat) - except Instructor.DoesNotExist: - pass - - -def set_instructors(section, instructors): - instructor_obs = [] - for instructor in instructors: - middle_initial = instructor["middle_initial"] - if middle_initial: - middle_initial += "." - name_components = ( - instructor["first_name"], - middle_initial, - instructor["last_name"], - ) - name = " ".join([c for c in name_components if c]) - penn_id = int(instructor["penn_id"]) - try: - merge_instructors(User.objects.get(id=penn_id), name) - instructor_ob = Instructor.objects.get(user_id=penn_id) - instructor_ob.name = name - instructor_ob.save() - except (Instructor.DoesNotExist, User.DoesNotExist): - user, user_created = User.objects.get_or_create( - id=penn_id, defaults={"username": uuid.uuid4()} - ) - if user_created: - user.set_unusable_password() - user.save() - instructor_ob, _ = Instructor.objects.get_or_create(name=name) - instructor_ob.user = user - instructor_ob.save() - instructor_obs.append(instructor_ob) - section.instructors.set(instructor_obs) - - -def get_room(building_code, room_number): - building, _ = Building.objects.get_or_create(code=building_code) - room, _ = Room.objects.get_or_create(building=building, number=room_number) - return room - - -def extract_date(date_str): - if not date_str: - return None - date_str = date_str.split(" ")[0] - if len(date_str.split("-")) != 3: - return None - return date_str - - -def clean_meetings(meetings): - return { - ( - tuple(sorted(list(set(m["days"])))), - m["begin_time"], - m["end_time"], - m["building_code"], - m["room_code"], - ): m - for m in meetings - if m["days"] and m["begin_time"] and m["end_time"] - }.values() - - -def set_meetings(section, meetings): - meetings = clean_meetings(meetings) - - for meeting in meetings: - meeting["days"] = "".join(sorted(list(set(meeting["days"])))) - meeting_times = [ - f"{meeting['days']} {meeting['begin_time']} - {meeting['end_time']}" for meeting in meetings - ] - section.meeting_times = json.dumps(meeting_times) - - section.meetings.all().delete() - for meeting in meetings: - online = ( - not meeting["building_code"] - or not meeting["room_code"] - or meeting.get("building_desc") - and ( - meeting["building_desc"].lower() == "online" - or meeting["building_desc"].lower() == "no room needed" - ) - ) - room = None if online else get_room(meeting["building_code"], meeting["room_code"]) - start_time = Decimal(meeting["begin_time_24"]) / 100 - end_time = Decimal(meeting["end_time_24"]) / 100 - start_date = extract_date(meeting.get("start_date")) - end_date = extract_date(meeting.get("end_date")) - for day in list(meeting["days"]): - meeting = Meeting.objects.update_or_create( - section=section, - day=day, - start=start_time, - end=end_time, - room=room, - defaults={ - "start_date": start_date, - "end_date": end_date, - }, - ) - - -def add_associated_sections(section, linked_sections): - semester = section.course.semester - section.associated_sections.clear() - for s in linked_sections: - subject_code = s.get("subject_code") or s.get("subject_code ") - course_number = s.get("course_number") or s.get("course_number ") - section_number = s.get("section_number") or s.get("section_number ") - if not (subject_code and course_number and section_number): - continue - full_code = f"{subject_code}-{course_number}-{section_number}" - _, associated, _, _ = get_or_create_course_and_section(full_code, semester) - section.associated_sections.add(associated) - - -def set_crosslistings(course, crosslistings): - if not crosslistings: - course.primary_listing = course - return - for crosslisting in crosslistings: - if crosslisting["is_primary_section"]: - primary_course, _ = get_or_create_course( - crosslisting["subject_code"], crosslisting["course_number"], course.semester - ) - course.primary_listing = primary_course - return - - -def upsert_course_from_opendata(info, semester, missing_sections=None): - dept_code = info.get("subject") or info.get("course_department") - assert dept_code, json.dumps(info, indent=2) - course_code = f"{dept_code}-{info['course_number']}-{info['section_number']}" - course, section, _, _ = get_or_create_course_and_section(course_code, semester) - - course.title = info["course_title"] or "" - course.description = (info["course_description"] or "").strip() - if info.get("additional_section_narrative"): - course.description += (course.description and "\n") + info["additional_section_narrative"] - # course.prerequisites = "\n".join(info["prerequisite_notes"]) # TODO: get prerequisite info - course.syllabus_url = info.get("syllabus_url") or None - - # set course primary listing - set_crosslistings(course, info["crosslistings"]) - - section.crn = info["crn"] - section.credits = Decimal(info["credits"] or "0") if "credits" in info else None - section.capacity = int(info["max_enrollment"] or 0) - section.activity = info["activity"] or "***" - - set_meetings(section, info["meetings"]) - - set_instructors(section, info["instructors"]) - add_associated_sections(section, info["linked_courses"]) - - add_attributes(course, info["attributes"]) - add_restrictions(course, info["course_restrictions"]) - # add_grade_modes(section, info["grade_modes"]) # TODO: save grade modes - - section.save() - course.save() - - if missing_sections: - missing_sections.discard(section.full_code) - - -def add_attributes(course, attributes): - """ - Clear attributes of a course and add new ones. - Create attribute if it does not exist - """ - course.attributes.clear() - for attribute in attributes: - school = identify_school(attribute.get("attribute_code")) - desc = attribute.get("attribute_desc") - attr, _ = Attribute.objects.update_or_create( - code=attribute.get("attribute_code"), - defaults={"description": desc, "school": school}, - ) - attr.courses.add(course) - - -def identify_school(attribute_code): - """ - Determine the school short code (defined in the Attribute model's SCHOOL_CHOICES attribute) - based on the first one or two letters of attribute_code - :param attribute_code: the attribute's attribute_code - :return: the short code representing the school this attribute fits in or None - """ - prefix_to_school = { - "A": "SAS", - "B": "LPS", - "E": "SEAS", - "F": "DSGN", - "G": "GSE", - "L": "LAW", - "MM": "MED", - "Q": "MODE", - "V": "VET", - "N": "NUR", - "W": "WH", - } - for prefix, school in prefix_to_school.items(): - if attribute_code.startswith(prefix): - return school - return None - - -def add_restrictions(course, restrictions): - """ - Add restrictions to course of section. - Create restriction if it does not exist. - """ - course.ngss_restrictions.clear() - for restriction in restrictions: - code = restriction.get("restriction_code") - description = restriction.get("restriction_desc") - restriction_type = restriction.get("restriction_type") - inclusive = restriction.get("incl_excl_ind") == "I" - res, _ = NGSSRestriction.objects.update_or_create( - code=code, - defaults={ - "description": description, - "restriction_type": restriction_type, - "inclusive": inclusive, - }, - ) - res.courses.add(course) - - -def update_course_from_record(update): - section = update.section - section.status = update.new_status - section.save() - - -# averages review data for a given field, given a list of Review objects -def get_average_reviews(reviews, field): - count = 0 - total = 0 - for r in reviews: - try: - rb = r.reviewbit_set.get(field=field) - count += 1 - total += rb.average - except ObjectDoesNotExist: - pass - if count == 0: - raise ValueError("No reviews found for given field") - return total / count - - -def subquery_count_distinct(subquery, column): - """ - Returns a coalesced count of the number of distinct values in the specified column - of the specified subquery. Usage example: - Course.objects.annotate( - num_activities=subquery_count_distinct( - subquery=Section.objects.filter(course_id=OuterRef("id")), - column="activity" - ) - ) # counts the number of distinct activities each course has - """ - return Coalesce( - Subquery( - subquery.annotate(common=Value(1)) - .values("common") - .annotate(count=Count(column, distinct=True)) - .values("count") - ), - 0, - ) - - -def does_object_pass_filter(obj, filter): - """ - Returns True iff the given obj satisfies the given filter dictionary. - Note that this only supports simple equality constraints (although it - can traverse a relation to a single related object specified with double - underscore notation). It does not support more complex filter conditions - such as __gt. - Example: - does_object_pass_filter(obj, {"key": "value", "parent__key": "value2"}) - """ - for field, expected_value in filter.items(): - assert field != "" - components = field.split("__") - actual_value = getattr(obj, components[0]) - for component in components[1:]: - actual_value = getattr(actual_value, component) - if actual_value != expected_value: - return False - return True - - -def all_semesters(): - return set(Course.objects.values_list("semester", flat=True).distinct()) - - -def get_semesters(semesters=None, verbose=False): - """ - Validate a given string semesters argument, and return a list of the individual string semesters - specified by the argument. - """ - possible_semesters = all_semesters() - if semesters is None: - semesters = [get_current_semester()] - elif semesters == "all": - semesters = list(possible_semesters) - else: - semesters = semesters.strip().split(",") - for s in semesters: - if s not in possible_semesters: - raise ValueError(f"Provided semester {s} was not found in the db.") - if verbose: - if len(semesters) > 1: - print( - "This script's updates for each semester are atomic, i.e. either all the " - "updates for a certain semester are accepted by the database, or none of them are " - "(if an error is encountered). If an error is encountered during the " - "processing of a certain semester, any correctly completed updates for previously " - "processed semesters will have already been accepted by the database." - ) - else: - print( - "This script's updates for the given semester are atomic, i.e. either all the " - "updates will be accepted by the database, or none of them will be " - "(if an error is encountered)." - ) - return semesters - - - -def find_possible_schedules(courses, count=None,breaks={"M": [], "T": [], "W": [], "R": [], "F": []}): - day_to_num = {"M": 0, "T": 1, "W": 2, "R": 3, "F": 4} - class Scheduler: - def __init__(self): - self.intervals = [] - - def add_interval(self, start_time, end_time, class_name, section): - # Adds an interval to the list of intervals - self.intervals.append((start_time, end_time, class_name, section)) - return True - - def find_optimal_schedule(self, unwanted_intervals=[]): - # Sort intervals by end time - sorted_intervals = sorted(self.intervals, key=lambda x: x[1]) - # Remove unwanted intervals - not_ok = [] - for unwanted_interval in unwanted_intervals: - [not_ok.append(interval[3]) for interval in sorted_intervals if not (interval[0] > unwanted_interval[1] or interval[1] < unwanted_interval[0])] - sorted_intervals = [interval for interval in sorted_intervals if not (interval[3] in not_ok)] - # Initialize variables - n = len(sorted_intervals) - dp = [1] * n - prev = [-1] * n - # Iterate over sorted intervals - for i in range(1, n): - for j in range(i): - if sorted_intervals[j][1] <= sorted_intervals[i][0] and sorted_intervals[j][2] != sorted_intervals[i][2]: - if dp[j] + 1 > dp[i]: - dp[i] = dp[j] + 1 - prev[i] = j - # Find the maximum number of non-overlapping intervals - max_intervals = max(dp) - # Find the index of the last interval in the optimal schedule - last_interval_index = dp.index(max_intervals) - # Build the optimal schedule by backtracking through the prev array - optimal_schedule = [] - while last_interval_index != -1: - optimal_schedule.append(sorted_intervals[last_interval_index]) - last_interval_index = prev[last_interval_index] - optimal_schedule.reverse() - # Select only one section for each class in the optimal schedule - selected_classes = set() - final_schedule = [] - for interval in optimal_schedule: - class_name = interval[2] - sections = [i for i in optimal_schedule if i[2] == class_name] - section = random.choice(sections) - if class_name not in selected_classes: - selected_classes.add(class_name) - final_schedule.append(section) - return final_schedule - - def find_sections(courses): - """ - Separates the lectures and recitations of a certain course into a hash map - """ - course_to_sections = {} - for course in courses.keys(): - course_to_sections[course] = {"LEC": [section for section in courses[course] if section["activity"] == "LEC"], - "REC": [section for section in courses[course] if section["activity"] == "REC"],} - return course_to_sections - - def find_activities(c_to_s, activity): - """ - Given a dictionary of courses and their sections, returns a list of only the activities of a certain type (ex: LEC). - - Parameters: - c_to_s (dict): A dictionary of courses and their sections. - activity (str): The type of activity to filter by (ex: LEC). - - Returns: - list: A list of activities of the specified type. - """ - activities = [] - for key in c_to_s.keys(): - activities.append(c_to_s[key][activity]) - return activities - - def find_lectures_on_day(lectures, day): - """ - Finds all the lectures on a given day. - - Parameters: - lectures (list): A list of lectures. - day (str): The day to search for lectures on. - - Returns: - list: A list of tuples containing the start time, end time, course code, and section ID for each lecture on the given day. - """ - lectures_on_day = [] - for lecture in lectures: - for section in lecture: - for meeting in section["meetings"]: - if meeting["day"] == day: - # Adds a randomizer to randomize the order they are fed to the scheduler - lectures_on_day.append((float(meeting["start"]), float(meeting["end"])+0.001*random.random(), "-".join(section["id"].split("-")[0:2]), section["id"] )) - return lectures_on_day - - - def schedule_to_section(schedules): - """ - Given a list of schedules, returns a list of sections by stripping the time intervals from the schedule list. - - Parameters: - schedules (list): A list of schedules. - - Returns: - list: A list of sections. - """ - # Removes the time intervals from the schedule list - sections = [] - for s in schedules: - sections.append(s[3]) - return sections - - def remove_duplicates(l): - """ - Removes duplicates from a list. - Example: - remove_duplicates([1,1,1,3,5,7,7]) = [1,3,5,7] - """ - newl = [] - [newl.append(x) for x in l if x not in newl] - return newl - - def choose_class_hash(l): - """ - Given a list of section nodes, chooses one section per class from each schedule randomly. - - Parameters: - l (list): A list of section nodes. - - Returns: - list: A list of chosen sections, one per class. - """ - hash = {} - courses = [] - for node in l: - class_name = "-".join(node.split("-")[0:2]) - if class_name not in hash.keys(): - hash[class_name] = [node] - else: - hash[class_name].append(node) - for key in hash.keys(): - # Chooses a random lecture for each class - courses.append(random.choice(hash[key])) - return courses - - def check_if_schedule_possible(schedule, courses, unwanted_intervals): - """ - Given a schedule and a list of courses, checks if the schedule is possible by ensuring that there are no time conflicts - between the meetings of the courses in the schedule. - - Parameters: - schedule (list): A list of section IDs representing the courses in the schedule. - courses (list): A list of dictionaries representing the courses, where each dictionary contains a list of sections. - - Returns: - bool: True if the schedule is possible, False otherwise. - """ - - intervals = [] - for course in courses: - for section in course: - if section["id"] in schedule: - for meeting in section["meetings"]: - intervals.append((day_to_num[meeting["day"]]+0.01*float(meeting["start"]), day_to_num[meeting["day"]]+0.01*float(meeting["end"]), section["id"])) - intervals = sorted(intervals, key=lambda x: x[0]) - not_ok = [] - unwanted = [] - for day in unwanted_intervals.keys(): - for break_i in unwanted_intervals[day]: - unwanted.append((day_to_num[day]+0.01*float(break_i[0]), day_to_num[day]+0.01*float(break_i[1]))) - for unwanted_interval in unwanted: - [not_ok.append(interval[2]) for interval in intervals if not (interval[0] > unwanted_interval[1] or interval[1] < unwanted_interval[0])] - if (len(not_ok) > 0): - return False - for i in range(len(intervals)): - if i == 0: - continue - if intervals[i][0] < intervals[i-1][1]: - return False - return True - - def scheduler_for_day(lectures, day, breaks): - """ - Takes a list of lectures and a day of week, and returns a list of unique schedules for that day. - The schedules are generated by taking 10 samples of possible schedules based on the dynamic programming algorithm. - """ - day_classes = find_lectures_on_day(lectures, day) - scheduler = Scheduler() - for day_class in day_classes: - scheduler.add_interval(day_class[0], day_class[1], day_class[2], day_class[3]) - day_schedules = [] - for _ in range(5): - day_schedules.append(scheduler.find_optimal_schedule(breaks[day])) - day_schedules_unique = remove_duplicates(day_schedules) - return day_schedules_unique - - def add_recs_to_schedule(schedule, recs, lectures, breaks): - """ - Bruteforces recitations into the schedule based on the lectures. - - Parameters: - schedule (list): A list of strings representing the current schedule. - recs (list): A list of lists, where each inner list contains dictionaries representing recitation sections for a course. - lectures (list): A list of lists, where each inner list contains dictionaries representing lecture sections for a course. - - Returns: - list: A list of strings representing the updated schedule with recitation sections added. - - """ - newschedule = schedule - for course in recs: - if course != []: - course_name = "-".join(course[0]["id"].split("-")[0:2]) - schedule_names = list(map(lambda x: "-".join(x.split("-")[0:2]), schedule)) - if course_name in schedule_names: - for section in course: - if check_if_schedule_possible(schedule+[section["id"]], recs+lectures, breaks): - newschedule.append(section["id"]) - break - return newschedule - """ - Given a list of courses, returns a list of all possible schedules that can be made from those courses. - If a count is specified, returns only schedules with that many courses. - """ - c_to_s = find_sections(courses) - lectures = find_activities(c_to_s, "LEC") - recs = find_activities(c_to_s, "REC") - - monday_schedules = scheduler_for_day(lectures, "M", breaks) - tues_schedules = scheduler_for_day(lectures, "T", breaks) - wed_schedules = scheduler_for_day(lectures, "W", breaks) - thurs_schedules = scheduler_for_day(lectures, "R", breaks) - fri_schedules = scheduler_for_day(lectures, "F", breaks) - - possible_mwf = [] - for i in range(len(monday_schedules)): - for j in range(len(wed_schedules)): - for k in range(len(fri_schedules)): - possible_mwf.append(monday_schedules[i] + wed_schedules[j] + fri_schedules[k]) - - possible_tr = [] - for i in range(len(tues_schedules)): - for j in range(len(thurs_schedules)): - possible_tr.append(tues_schedules[i] + thurs_schedules[j]) - - total_schedules = [] - for i in range(len(possible_tr)): - for j in range(len(possible_mwf)): - total_schedules.append(possible_tr[i] + possible_mwf[j]) - - - courses = list(map(schedule_to_section, total_schedules)) - courses_unique = list(map(remove_duplicates, courses)) - - choose = list(map(choose_class_hash, courses_unique)) - choose = [schedule for schedule in choose if check_if_schedule_possible(schedule, lectures, breaks)] - if count != None: - combinations = [] - for i in range(len(choose)): - if len(choose[i]) <= count: - combinations.append(choose[i]) - else: - [combinations.append(list(c)) for c in itertools.combinations(choose[i], count) if c not in combinations] - else: - combinations = choose - choose = [add_recs_to_schedule(schedule, recs, lectures, breaks) for schedule in combinations] - choose = sorted(choose, key=lambda x: len(x)) - return choose - - +import itertools +import json +import logging +import os +import random +import re +import uuid +from decimal import Decimal + +from django.core.cache import cache +from django.core.exceptions import ObjectDoesNotExist, ValidationError +from django.db import connection +from django.db.models.aggregates import Count +from django.db.models.expressions import Subquery, Value +from django.db.models.functions.comparison import Coalesce +from django.db.models.signals import post_save +from django.dispatch import receiver +from options.models import Option, get_value +from rest_framework.exceptions import APIException + +from courses.models import ( + Attribute, + Building, + Course, + Department, + Instructor, + Meeting, + NGSSRestriction, + Room, + Section, + StatusUpdate, + User, +) + + +logger = logging.getLogger(__name__) + + +def in_dev(): + return "PennCourses.settings.development" in os.environ["DJANGO_SETTINGS_MODULE"] + + +semester_suffix_map = { + "A": "10", + "B": "20", + "C": "30", +} +semester_suffix_map_inv = {v: k for k, v in semester_suffix_map.items()} + + +def translate_semester(semester): + """ + Translates a semester string (e.g. "2022C") to the format accepted by the new + OpenData API (e.g. "202230"). + """ + if not semester: + return None + old_suffix = semester[-1].upper() + if old_suffix not in semester_suffix_map: + raise ValueError( + f"Invalid semester suffix {old_suffix} (semester must have " + "suffix A, B, or C; e.g. '2022C')." + ) + return semester[:-1] + semester_suffix_map[old_suffix] + + +def translate_semester_inv(semester): + """ + Translates a semester string in the format of the new OpenData API (e.g. "202230") + to the format used by our backend (e.g. "2022C") + """ + if not semester: + return None + new_suffix = semester[-2:] + if new_suffix not in semester_suffix_map_inv: + raise ValueError( + f"Invalid semester suffix {new_suffix} (semester must have " + "suffix '10', '20', or '30'; e.g. '202230')." + ) + return semester[:-2] + semester_suffix_map_inv[new_suffix] + + +def get_current_semester(allow_not_found=False): + """ + This function retrieves the string value of the current semester, either from + memory (if the value has been cached), or from the db (after which it will cache + the value for future use). If the value retrieved from the db is None, an error is thrown + indicating that the SEMESTER Option must be set for this API to work properly. + You can prevent an error from being thrown (and cause the function to just return None + in this case) by setting allow_not_found=True. + The cache has a timeout of 25 hours, but is also invalidated whenever the SEMESTER Option + is saved (which will occur whenever it is updated), using a post_save hook. + See the invalidate_current_semester_cache function below to see how this works. + """ + cached_val = cache.get("SEMESTER", None) + if cached_val is not None: + return cached_val + + retrieved_val = get_value("SEMESTER", None) + if not allow_not_found and retrieved_val is None: + raise APIException( + "The SEMESTER runtime option is not set. If you are in dev, you can set this " + "option by running the command " + "'python manage.py setoption SEMESTER 2020C', " + "replacing 2020C with the current semester, in the backend directory (remember " + "to run 'pipenv shell' before running this command, though)." + ) + cache.set("SEMESTER", retrieved_val, timeout=90000) # cache expires every 25 hours + return retrieved_val + + +@receiver(post_save, sender=Option, dispatch_uid="invalidate_current_semester_cache") +def invalidate_current_semester_cache(sender, instance, **kwargs): + """ + This function invalidates the cached SEMESTER value when the SEMESTER option is updated. + """ + from courses.management.commands.load_add_drop_dates import load_add_drop_dates + + # ^ imported here to avoid circular imports + + if instance.key == "SEMESTER": + cache.delete("SEMESTER") + get_or_create_add_drop_period(instance.value) + load_add_drop_dates() + + +def get_semester(datetime): + """ + Given a datetime, estimate the semester of the period of course registration it occurred in. + """ + if 3 <= datetime.month and datetime.month <= 9: + return str(datetime.year) + "C" + if datetime.month < 3: + return str(datetime.year) + "A" + return str(datetime.year + 1) + "A" + + +def get_add_drop_period(semester): + """ + Returns the AddDropPeriod object corresponding to the given semester. Throws the same + errors and behaves the same way as AddDropPeriod.objects.get(semester=semester) but runs faster. + This function uses caching to speed up add/drop period object retrieval. Cached objects + expire every 25 hours, and are also invalidated in the AddDropPeriod.save method. + The add_drop_periods key in cache points to a dictionary mapping semester to add/drop period + object. + """ + from alert.models import AddDropPeriod # imported here to avoid circular imports + + cached_adps = cache.get("add_drop_periods", dict()) + if semester not in cached_adps: + cached_adps[semester] = AddDropPeriod.objects.get(semester=semester) + cache.set("add_drop_periods", cached_adps, timeout=90000) # cache expires every 25 hours + return cached_adps[semester] + + +def get_or_create_add_drop_period(semester): + """ + Behaves the same as get_add_drop_period if an AddDropPeriod object already exists for the given + semester, and otherwise creates a new AddDropPeriod object for the given semester, returning + the created object. + """ + from alert.models import AddDropPeriod + + try: + add_drop = get_add_drop_period(semester) + except AddDropPeriod.DoesNotExist: + add_drop = AddDropPeriod(semester=semester) + add_drop.save() + return add_drop + + +def get_set_id(obj): + """ + Returns the next ID for the given object (which hasn't yet been created). + """ + if obj.id: + return obj.id + # Source: https://djangosnippets.org/snippets/10474/ + with connection.cursor() as cursor: + # NOTE: this relies on PostgreSQL-specific details for autoincrement + # https://www.postgresql.org/docs/9.4/functions-sequence.html + cursor.execute( + "SELECT nextval('{0}_{1}_{2}_seq'::regclass)".format( + obj._meta.app_label.lower(), + obj._meta.object_name.lower(), + obj._meta.pk.name, + ) + ) + obj.id = obj.pk = cursor.fetchone()[0] + return obj.pk + + +def is_fk_set(obj, fk_field): + """ + Returns true if the specified foreign key field has been + set on the given object, false otherwise. + """ + return bool(getattr(obj, fk_field, None) or getattr(obj, fk_field + "_id", None)) + + +""" +Assumptions of our course code parsing regex: + - Department code is 1-4 letters + - Course code is (4 digits with an optional trailing letter) or (3 digits) or (3 letters) + - Section code is 3 digits or 3 letters +""" +section_code_re = re.compile( + r"^([A-Za-z]{1,4})\s*-?\s*(\d{4}[A-Za-z]?|\d{3}|[A-Za-z]{3})\s*-?\s*(\d{3}|[A-Za-z]{3})?$" +) + + +def separate_course_code(course_code, allow_partial=False): + """ + Parse and return a (dept, course, section) ID tuple + given a section full_code in any possible format. + If allow_partial is True, then missing components will be returned as None. + Otherwise, an incomplete match will raise a ValueError. + """ + course_code = course_code.strip() + match = section_code_re.match(course_code) + if match: + components = (match.group(1).upper(), match.group(2), match.group(3)) + if allow_partial or None not in components: + return components + raise ValueError(f"Course code could not be parsed: {course_code}") + + +def get_or_create_course(dept_code, course_id, semester, defaults=None): + dept, _ = Department.objects.get_or_create(code=dept_code) + return Course.objects.get_or_create( + department=dept, code=course_id, semester=semester, defaults=defaults + ) + + +def get_or_create_course_and_section( + course_code, semester, section_manager=None, course_defaults=None, section_defaults=None +): + if section_manager is None: + section_manager = Section.objects + dept_code, course_id, section_id = separate_course_code(course_code) + + course, course_c = get_or_create_course( + dept_code, course_id, semester, defaults=course_defaults + ) + section, section_c = section_manager.get_or_create( + course=course, code=section_id, defaults=section_defaults + ) + + return course, section, course_c, section_c + + +def get_course_and_section(course_code, semester, section_manager=None): + if section_manager is None: + section_manager = Section.objects + + dept_code, course_id, section_id = separate_course_code(course_code) + course = Course.objects.get(department__code=dept_code, code=course_id, semester=semester) + section = section_manager.get(course=course, code=section_id) + return course, section + + +def update_percent_open(section, new_status_update): + """ + This function updates a section's percent_open field when a new status update is processed. + """ + add_drop = get_or_create_add_drop_period(section.semester) + last_status_update = section.last_status_update + if new_status_update.created_at < add_drop.estimated_start: + return + if last_status_update is None: + section.percent_open = Decimal(int(new_status_update.old_status == "O")) + section.save() + else: + if last_status_update.created_at >= add_drop.estimated_end: + return + seconds_before_last = Decimal( + max((last_status_update.created_at - add_drop.estimated_start).total_seconds(), 0) + ) + seconds_since_last = Decimal( + max( + ( + min(new_status_update.created_at, add_drop.estimated_end) + - max(last_status_update.created_at, add_drop.estimated_start) + ).total_seconds(), + 0, + ) + ) + section.percent_open = ( + Decimal(section.percent_open) * seconds_before_last + + int(new_status_update.old_status == "O") * seconds_since_last + ) / (seconds_before_last + seconds_since_last) + section.save() + + +def record_update(section, semester, old_status, new_status, alerted, req, created_at=None): + from alert.models import validate_add_drop_semester # avoid circular imports + + u = StatusUpdate( + section=section, + old_status=old_status, + new_status=new_status, + alert_sent=alerted, + request_body=req, + ) + if created_at is not None: + u.created_at = created_at + u.save() + + valid_status_choices = dict(Section.STATUS_CHOICES).keys() + + def validate_status(name, status): + if status not in valid_status_choices: + raise ValidationError( + f"{name} is invalid; expected a value in {valid_status_choices}, but got {status}" + ) + + validate_status("Old status", old_status) + validate_status("New status", new_status) + + # Raises ValidationError if semester is not fall or spring (and correctly formatted) + validate_add_drop_semester(semester) + update_percent_open(section, u) + + return u + + +def merge_instructors(user, name): + """ + Merge the instructor corresponding to the given user into the + instructor with the given name, if both exist. + """ + from review.management.commands.mergeinstructors import resolve_duplicates + + def stat(key, amt=1, element=None): + return + + try: + user_instructor = Instructor.objects.get(user=user) + name_instructor = Instructor.objects.get(name=name) + duplicates = {user_instructor, name_instructor} + if len(duplicates) == 1: + return + resolve_duplicates([duplicates], dry_run=False, stat=stat) + except Instructor.DoesNotExist: + pass + + +def set_instructors(section, instructors): + instructor_obs = [] + for instructor in instructors: + middle_initial = instructor["middle_initial"] + if middle_initial: + middle_initial += "." + name_components = ( + instructor["first_name"], + middle_initial, + instructor["last_name"], + ) + name = " ".join([c for c in name_components if c]) + penn_id = int(instructor["penn_id"]) + try: + merge_instructors(User.objects.get(id=penn_id), name) + instructor_ob = Instructor.objects.get(user_id=penn_id) + instructor_ob.name = name + instructor_ob.save() + except (Instructor.DoesNotExist, User.DoesNotExist): + user, user_created = User.objects.get_or_create( + id=penn_id, defaults={"username": uuid.uuid4()} + ) + if user_created: + user.set_unusable_password() + user.save() + instructor_ob, _ = Instructor.objects.get_or_create(name=name) + instructor_ob.user = user + instructor_ob.save() + instructor_obs.append(instructor_ob) + section.instructors.set(instructor_obs) + + +def get_room(building_code, room_number): + building, _ = Building.objects.get_or_create(code=building_code) + room, _ = Room.objects.get_or_create(building=building, number=room_number) + return room + + +def extract_date(date_str): + if not date_str: + return None + date_str = date_str.split(" ")[0] + if len(date_str.split("-")) != 3: + return None + return date_str + + +def clean_meetings(meetings): + return { + ( + tuple(sorted(list(set(m["days"])))), + m["begin_time"], + m["end_time"], + m["building_code"], + m["room_code"], + ): m + for m in meetings + if m["days"] and m["begin_time"] and m["end_time"] + }.values() + + +def set_meetings(section, meetings): + meetings = clean_meetings(meetings) + + for meeting in meetings: + meeting["days"] = "".join(sorted(list(set(meeting["days"])))) + meeting_times = [ + f"{meeting['days']} {meeting['begin_time']} - {meeting['end_time']}" for meeting in meetings + ] + section.meeting_times = json.dumps(meeting_times) + + section.meetings.all().delete() + for meeting in meetings: + online = ( + not meeting["building_code"] + or not meeting["room_code"] + or meeting.get("building_desc") + and ( + meeting["building_desc"].lower() == "online" + or meeting["building_desc"].lower() == "no room needed" + ) + ) + room = None if online else get_room(meeting["building_code"], meeting["room_code"]) + start_time = Decimal(meeting["begin_time_24"]) / 100 + end_time = Decimal(meeting["end_time_24"]) / 100 + start_date = extract_date(meeting.get("start_date")) + end_date = extract_date(meeting.get("end_date")) + for day in list(meeting["days"]): + meeting = Meeting.objects.update_or_create( + section=section, + day=day, + start=start_time, + end=end_time, + room=room, + defaults={ + "start_date": start_date, + "end_date": end_date, + }, + ) + + +def add_associated_sections(section, linked_sections): + semester = section.course.semester + section.associated_sections.clear() + for s in linked_sections: + subject_code = s.get("subject_code") or s.get("subject_code ") + course_number = s.get("course_number") or s.get("course_number ") + section_number = s.get("section_number") or s.get("section_number ") + if not (subject_code and course_number and section_number): + continue + full_code = f"{subject_code}-{course_number}-{section_number}" + _, associated, _, _ = get_or_create_course_and_section(full_code, semester) + section.associated_sections.add(associated) + + +def set_crosslistings(course, crosslistings): + if not crosslistings: + course.primary_listing = course + return + for crosslisting in crosslistings: + if crosslisting["is_primary_section"]: + primary_course, _ = get_or_create_course( + crosslisting["subject_code"], crosslisting["course_number"], course.semester + ) + course.primary_listing = primary_course + return + + +def upsert_course_from_opendata(info, semester, missing_sections=None): + dept_code = info.get("subject") or info.get("course_department") + assert dept_code, json.dumps(info, indent=2) + course_code = f"{dept_code}-{info['course_number']}-{info['section_number']}" + course, section, _, _ = get_or_create_course_and_section(course_code, semester) + + course.title = info["course_title"] or "" + course.description = (info["course_description"] or "").strip() + if info.get("additional_section_narrative"): + course.description += (course.description and "\n") + info["additional_section_narrative"] + # course.prerequisites = "\n".join(info["prerequisite_notes"]) # TODO: get prerequisite info + course.syllabus_url = info.get("syllabus_url") or None + + # set course primary listing + set_crosslistings(course, info["crosslistings"]) + + section.crn = info["crn"] + section.credits = Decimal(info["credits"] or "0") if "credits" in info else None + section.capacity = int(info["max_enrollment"] or 0) + section.activity = info["activity"] or "***" + + set_meetings(section, info["meetings"]) + + set_instructors(section, info["instructors"]) + add_associated_sections(section, info["linked_courses"]) + + add_attributes(course, info["attributes"]) + add_restrictions(course, info["course_restrictions"]) + # add_grade_modes(section, info["grade_modes"]) # TODO: save grade modes + + section.save() + course.save() + + if missing_sections: + missing_sections.discard(section.full_code) + + +def add_attributes(course, attributes): + """ + Clear attributes of a course and add new ones. + Create attribute if it does not exist + """ + course.attributes.clear() + for attribute in attributes: + school = identify_school(attribute.get("attribute_code")) + desc = attribute.get("attribute_desc") + attr, _ = Attribute.objects.update_or_create( + code=attribute.get("attribute_code"), + defaults={"description": desc, "school": school}, + ) + attr.courses.add(course) + + +def identify_school(attribute_code): + """ + Determine the school short code (defined in the Attribute model's SCHOOL_CHOICES attribute) + based on the first one or two letters of attribute_code + :param attribute_code: the attribute's attribute_code + :return: the short code representing the school this attribute fits in or None + """ + prefix_to_school = { + "A": "SAS", + "B": "LPS", + "E": "SEAS", + "F": "DSGN", + "G": "GSE", + "L": "LAW", + "MM": "MED", + "Q": "MODE", + "V": "VET", + "N": "NUR", + "W": "WH", + } + for prefix, school in prefix_to_school.items(): + if attribute_code.startswith(prefix): + return school + return None + + +def add_restrictions(course, restrictions): + """ + Add restrictions to course of section. + Create restriction if it does not exist. + """ + course.ngss_restrictions.clear() + for restriction in restrictions: + code = restriction.get("restriction_code") + description = restriction.get("restriction_desc") + restriction_type = restriction.get("restriction_type") + inclusive = restriction.get("incl_excl_ind") == "I" + res, _ = NGSSRestriction.objects.update_or_create( + code=code, + defaults={ + "description": description, + "restriction_type": restriction_type, + "inclusive": inclusive, + }, + ) + res.courses.add(course) + + +def update_course_from_record(update): + section = update.section + section.status = update.new_status + section.save() + + +# averages review data for a given field, given a list of Review objects +def get_average_reviews(reviews, field): + count = 0 + total = 0 + for r in reviews: + try: + rb = r.reviewbit_set.get(field=field) + count += 1 + total += rb.average + except ObjectDoesNotExist: + pass + if count == 0: + raise ValueError("No reviews found for given field") + return total / count + + +def subquery_count_distinct(subquery, column): + """ + Returns a coalesced count of the number of distinct values in the specified column + of the specified subquery. Usage example: + Course.objects.annotate( + num_activities=subquery_count_distinct( + subquery=Section.objects.filter(course_id=OuterRef("id")), + column="activity" + ) + ) # counts the number of distinct activities each course has + """ + return Coalesce( + Subquery( + subquery.annotate(common=Value(1)) + .values("common") + .annotate(count=Count(column, distinct=True)) + .values("count") + ), + 0, + ) + + +def does_object_pass_filter(obj, filter): + """ + Returns True iff the given obj satisfies the given filter dictionary. + Note that this only supports simple equality constraints (although it + can traverse a relation to a single related object specified with double + underscore notation). It does not support more complex filter conditions + such as __gt. + Example: + does_object_pass_filter(obj, {"key": "value", "parent__key": "value2"}) + """ + for field, expected_value in filter.items(): + assert field != "" + components = field.split("__") + actual_value = getattr(obj, components[0]) + for component in components[1:]: + actual_value = getattr(actual_value, component) + if actual_value != expected_value: + return False + return True + + +def all_semesters(): + return set(Course.objects.values_list("semester", flat=True).distinct()) + + +def get_semesters(semesters=None, verbose=False): + """ + Validate a given string semesters argument, and return a list of the individual string semesters + specified by the argument. + """ + possible_semesters = all_semesters() + if semesters is None: + semesters = [get_current_semester()] + elif semesters == "all": + semesters = list(possible_semesters) + else: + semesters = semesters.strip().split(",") + for s in semesters: + if s not in possible_semesters: + raise ValueError(f"Provided semester {s} was not found in the db.") + if verbose: + if len(semesters) > 1: + print( + "This script's updates for each semester are atomic, i.e. either all the " + "updates for a certain semester are accepted by the database, or none of them are " + "(if an error is encountered). If an error is encountered during the " + "processing of a certain semester, any correctly completed updates for previously " + "processed semesters will have already been accepted by the database." + ) + else: + print( + "This script's updates for the given semester are atomic, i.e. either all the " + "updates will be accepted by the database, or none of them will be " + "(if an error is encountered)." + ) + return semesters + + +def find_possible_schedules( + courses, count=None, breaks={"M": [], "T": [], "W": [], "R": [], "F": []} +): + + day_to_num = {"M": 0, "T": 1, "W": 2, "R": 3, "F": 4} + + class Scheduler: + def __init__(self): + self.intervals = [] + + def add_interval(self, start_time, end_time, class_name, section): + # Adds an interval to the list of intervals + self.intervals.append((start_time, end_time, class_name, section)) + return True + + def find_optimal_schedule(self, unwanted_intervals=[]): + # Sort intervals by end time + sorted_intervals = sorted(self.intervals, key=lambda x: x[1]) + # Remove unwanted intervals + not_ok = [] + for unwanted_interval in unwanted_intervals: + [ + not_ok.append(interval[3]) + for interval in sorted_intervals + if not ( + interval[0] > unwanted_interval[1] or interval[1] < unwanted_interval[0] + ) + ] + sorted_intervals = [ + interval for interval in sorted_intervals if not (interval[3] in not_ok) + ] + # Initialize variables + n = len(sorted_intervals) + dp = [1] * n + prev = [-1] * n + # Iterate over sorted intervals + for i in range(1, n): + for j in range(i): + if (sorted_intervals[j][1] <= sorted_intervals[i][0]) and ( + sorted_intervals[j][2] != sorted_intervals[i][2] + ): + if dp[j] + 1 > dp[i]: + dp[i] = dp[j] + 1 + prev[i] = j + # Find the maximum number of non-overlapping intervals + max_intervals = max(dp) + # Find the index of the last interval in the optimal schedule + last_interval_index = dp.index(max_intervals) + # Build the optimal schedule by backtracking through the prev array + optimal_schedule = [] + while last_interval_index != -1: + optimal_schedule.append(sorted_intervals[last_interval_index]) + last_interval_index = prev[last_interval_index] + optimal_schedule.reverse() + # Select only one section for each class in the optimal schedule + selected_classes = set() + final_schedule = [] + for interval in optimal_schedule: + class_name = interval[2] + sections = [i for i in optimal_schedule if i[2] == class_name] + section = random.choice(sections) + if class_name not in selected_classes: + selected_classes.add(class_name) + final_schedule.append(section) + return final_schedule + + def find_sections(courses): + """ + Separates the lectures and recitations of a certain course into a hash map + """ + course_to_sections = {} + for course in courses.keys(): + course_to_sections[course] = { + "LEC": [section for section in courses[course] if section["activity"] == "LEC"], + "REC": [section for section in courses[course] if section["activity"] == "REC"], + } + return course_to_sections + + def find_activities(c_to_s, activity): + """ + Given a dictionary of courses and their sections, + returns a list of only the activities of a certain type (ex: LEC). + + Parameters: + c_to_s (dict): A dictionary of courses and their sections. + activity (str): The type of activity to filter by (ex: LEC). + + Returns: + list: A list of activities of the specified type. + """ + activities = [] + for key in c_to_s.keys(): + activities.append(c_to_s[key][activity]) + return activities + + def find_lectures_on_day(lectures, day): + """ + Finds all the lectures on a given day. + + Parameters: + lectures (list): A list of lectures. + day (str): The day to search for lectures on. + + Returns: + list: A list of tuples containing the start time, end time, + course code, and section ID for each lecture on the given day. + """ + lectures_on_day = [] + for lecture in lectures: + for section in lecture: + for meeting in section["meetings"]: + if meeting["day"] == day: + # Adds a randomizer to randomize the order they are fed to the scheduler + lectures_on_day.append( + ( + float(meeting["start"]), + float(meeting["end"]) + 0.001 * random.random(), + "-".join(section["id"].split("-")[0:2]), + section["id"], + ) + ) + return lectures_on_day + + def schedule_to_section(schedules): + """ + Given a list of schedules, + returns a list of sections by stripping the time intervals from the schedule list. + + Parameters: + schedules (list): A list of schedules. + + Returns: + list: A list of sections. + """ + # Removes the time intervals from the schedule list + sections = [] + for s in schedules: + sections.append(s[3]) + return sections + + def remove_duplicates(given_l): + """ + Removes duplicates from a list. + Example: + remove_duplicates([1,1,1,3,5,7,7]) = [1,3,5,7] + """ + newl = [] + [newl.append(x) for x in given_l if x not in newl] + return newl + + def choose_class_hash(given_l): + """ + Given a list of section nodes, chooses one section per class from each schedule randomly. + + Parameters: + l (list): A list of section nodes. + + Returns: + list: A list of chosen sections, one per class. + """ + hash = {} + courses = [] + for node in given_l: + class_name = "-".join(node.split("-")[0:2]) + if class_name not in hash.keys(): + hash[class_name] = [node] + else: + hash[class_name].append(node) + for key in hash.keys(): + # Chooses a random lecture for each class + courses.append(random.choice(hash[key])) + return courses + + def check_if_schedule_possible(schedule, courses, unwanted_intervals): + """ + Given a schedule and a list of courses, + checks if the schedule is possible by ensuring that there are no time conflicts + between the meetings of the courses in the schedule. + + Parameters: + schedule (list): A list of section IDs representing the courses in the schedule. + courses (list): A list of dictionaries representing the courses, + where each dictionary contains a list of sections. + + Returns: + bool: True if the schedule is possible, False otherwise. + """ + + intervals = [] + for course in courses: + for section in course: + if section["id"] in schedule: + for meeting in section["meetings"]: + intervals.append( + ( + day_to_num[meeting["day"]] + 0.01 * float(meeting["start"]), + day_to_num[meeting["day"]] + 0.01 * float(meeting["end"]), + section["id"], + ) + ) + intervals = sorted(intervals, key=lambda x: x[0]) + not_ok = [] + unwanted = [] + for day in unwanted_intervals.keys(): + for break_i in unwanted_intervals[day]: + unwanted.append( + ( + day_to_num[day] + 0.01 * float(break_i[0]), + day_to_num[day] + 0.01 * float(break_i[1]), + ) + ) + for unwanted_interval in unwanted: + [ + not_ok.append(interval[2]) + for interval in intervals + if not (interval[0] > unwanted_interval[1] or interval[1] < unwanted_interval[0]) + ] + if len(not_ok) > 0: + return False + for i in range(len(intervals)): + if i == 0: + continue + if intervals[i][0] < intervals[i - 1][1]: + return False + return True + + def scheduler_for_day(lectures, day, breaks): + """ + Takes a list of lectures and a day of week, + and returns a list of unique schedules for that day. + The schedules are generated by taking 10 samples of possible schedules + based on the dynamic programming algorithm. + """ + day_classes = find_lectures_on_day(lectures, day) + scheduler = Scheduler() + for day_class in day_classes: + scheduler.add_interval(day_class[0], day_class[1], day_class[2], day_class[3]) + day_schedules = [] + for _ in range(5): + day_schedules.append(scheduler.find_optimal_schedule(breaks[day])) + day_schedules_unique = remove_duplicates(day_schedules) + return day_schedules_unique + + def add_recs_to_schedule(schedule, recs, lectures, breaks): + """ + Bruteforces recitations into the schedule based on the lectures. + + Parameters: + schedule (list): A list of strings representing the current schedule. + recs (list): A list of lists, where each inner list contains dictionaries + representing recitation sections for a course. + lectures (list): A list of lists, where each inner list contains dictionaries + representing lecture sections for a course. + + Returns: + list: A list of strings representing the updated schedule + with recitation sections added. + + """ + newschedule = schedule + for course in recs: + if course != []: + course_name = "-".join(course[0]["id"].split("-")[0:2]) + schedule_names = list(map(lambda x: "-".join(x.split("-")[0:2]), schedule)) + if course_name in schedule_names: + for section in course: + if check_if_schedule_possible( + schedule + [section["id"]], recs + lectures, breaks + ): + newschedule.append(section["id"]) + break + return newschedule + + """ + Given a list of courses, returns a list of all possible schedules + that can be made from those courses. + If a count is specified, returns only schedules with that many courses. + """ + c_to_s = find_sections(courses) + lectures = find_activities(c_to_s, "LEC") + recs = find_activities(c_to_s, "REC") + + monday_schedules = scheduler_for_day(lectures, "M", breaks) + tues_schedules = scheduler_for_day(lectures, "T", breaks) + wed_schedules = scheduler_for_day(lectures, "W", breaks) + thurs_schedules = scheduler_for_day(lectures, "R", breaks) + fri_schedules = scheduler_for_day(lectures, "F", breaks) + + possible_mwf = [] + for i in range(len(monday_schedules)): + for j in range(len(wed_schedules)): + for k in range(len(fri_schedules)): + possible_mwf.append(monday_schedules[i] + wed_schedules[j] + fri_schedules[k]) + + possible_tr = [] + for i in range(len(tues_schedules)): + for j in range(len(thurs_schedules)): + possible_tr.append(tues_schedules[i] + thurs_schedules[j]) + + total_schedules = [] + for i in range(len(possible_tr)): + for j in range(len(possible_mwf)): + total_schedules.append(possible_tr[i] + possible_mwf[j]) + + courses = list(map(schedule_to_section, total_schedules)) + courses_unique = list(map(remove_duplicates, courses)) + + choose = list(map(choose_class_hash, courses_unique)) + choose = [ + schedule for schedule in choose if check_if_schedule_possible(schedule, lectures, breaks) + ] + if count is not None: + combinations = [] + for i in range(len(choose)): + if len(choose[i]) <= count: + combinations.append(choose[i]) + else: + [ + combinations.append(list(c)) + for c in itertools.combinations(choose[i], count) + if c not in combinations + ] + else: + combinations = choose + choose = [add_recs_to_schedule(schedule, recs, lectures, breaks) for schedule in combinations] + choose = sorted(choose, key=lambda x: len(x)) + return choose diff --git a/backend/docker-compose.yaml b/backend/docker-compose.yaml index bff916da9..537185f0b 100644 --- a/backend/docker-compose.yaml +++ b/backend/docker-compose.yaml @@ -1,35 +1,35 @@ -version: "3" - -services: - db: - image: postgres - command: postgres - environment: - - POSTGRES_DB=postgres - - POSTGRES_USER=penn-courses - - POSTGRES_PASSWORD=postgres - ports: - - "5432:5432" - volumes: - - ./postgres:/var/lib/postgresql/pgdata - redis: - image: redis:4.0 - ports: - - "6379:6379" - development: - depends_on: - - db - - redis - profiles: - - dev - build: - context: . - dockerfile: Dockerfile.dev - ports: - - "8000:8000" - volumes: - - .:/backend - environment: - - REDIS_URL=redis://redis:6379/1 - - DATABASE_URL=postgres://penn-courses:postgres@db:5432/postgres - command: pipenv run python manage.py runserver 0.0.0.0:8000 +version: "3" + +services: + db: + image: postgres + command: postgres + environment: + - POSTGRES_DB=postgres + - POSTGRES_USER=penn-courses + - POSTGRES_PASSWORD=postgres + ports: + - "5432:5432" + volumes: + - ./postgres:/var/lib/postgresql/pgdata + redis: + image: redis:4.0 + ports: + - "6379:6379" + development: + depends_on: + - db + - redis + profiles: + - dev + build: + context: . + dockerfile: Dockerfile.dev + ports: + - "8000:8000" + volumes: + - .:/backend + environment: + - REDIS_URL=redis://redis:6379/1 + - DATABASE_URL=postgres://penn-courses:postgres@db:5432/postgres + command: pipenv run python manage.py runserver 0.0.0.0:8000 diff --git a/backend/manage.py b/backend/manage.py index 9b64f71ac..4a9bb1429 100755 --- a/backend/manage.py +++ b/backend/manage.py @@ -1,21 +1,21 @@ -#!/usr/bin/env python -"""Django's command-line utility for administrative tasks.""" -import os -import sys - - -def main(): - os.environ.setdefault("DJANGO_SETTINGS_MODULE", "PennCourses.settings.development") - try: - from django.core.management import execute_from_command_line - except ImportError as exc: - raise ImportError( - "Couldn't import Django. Are you sure it's installed and " - "available on your PYTHONPATH environment variable? Did you " - "forget to activate a virtual environment?" - ) from exc - execute_from_command_line(sys.argv) - - -if __name__ == "__main__": - main() +#!/usr/bin/env python +"""Django's command-line utility for administrative tasks.""" +import os +import sys + + +def main(): + os.environ.setdefault("DJANGO_SETTINGS_MODULE", "PennCourses.settings.development") + try: + from django.core.management import execute_from_command_line + except ImportError as exc: + raise ImportError( + "Couldn't import Django. Are you sure it's installed and " + "available on your PYTHONPATH environment variable? Did you " + "forget to activate a virtual environment?" + ) from exc + execute_from_command_line(sys.argv) + + +if __name__ == "__main__": + main() diff --git a/backend/plan/admin.py b/backend/plan/admin.py index 787b9b805..1df5f9aaf 100644 --- a/backend/plan/admin.py +++ b/backend/plan/admin.py @@ -1,15 +1,15 @@ -from django.contrib import admin - -from plan.models import Schedule - - -class ScheduleAdmin(admin.ModelAdmin): - search_fields = ("person__username",) - autocomplete_fields = ("person", "sections") - - list_filter = [ - "semester", - ] - - -admin.site.register(Schedule, ScheduleAdmin) +from django.contrib import admin + +from plan.models import Schedule + + +class ScheduleAdmin(admin.ModelAdmin): + search_fields = ("person__username",) + autocomplete_fields = ("person", "sections") + + list_filter = [ + "semester", + ] + + +admin.site.register(Schedule, ScheduleAdmin) diff --git a/backend/plan/apps.py b/backend/plan/apps.py index 76eb3f4b0..66019409f 100644 --- a/backend/plan/apps.py +++ b/backend/plan/apps.py @@ -1,5 +1,5 @@ -from django.apps import AppConfig - - -class PlanConfig(AppConfig): - name = "plan" +from django.apps import AppConfig + + +class PlanConfig(AppConfig): + name = "plan" diff --git a/backend/plan/management/commands/recommendcourses.py b/backend/plan/management/commands/recommendcourses.py index 87fe16161..e6600f956 100644 --- a/backend/plan/management/commands/recommendcourses.py +++ b/backend/plan/management/commands/recommendcourses.py @@ -1,259 +1,259 @@ -import heapq -import os -import pickle -from typing import Optional, Set - -import numpy as np -from django.conf import settings -from django.contrib.auth import get_user_model -from django.core.cache import cache -from django.core.management.base import BaseCommand - -from courses.models import Course -from courses.util import get_current_semester, in_dev -from PennCourses.settings.base import S3_client -from plan.management.commands.trainrecommender import train_recommender -from plan.models import Schedule - - -# The proportion by which to up-weight current courses -# relative to past courses when computing a user vector -CURR_COURSES_BIAS = 3 - - -def vectorize_user_by_courses( - curr_courses, past_courses, curr_course_vectors_dict, past_course_vectors_dict -): - n = len(next(iter(curr_course_vectors_dict.values()))) - - # Input validation - all_courses = set(curr_courses) | set(past_courses) - if len(all_courses) != len(curr_courses) + len(past_courses): - raise ValueError( - "Repeated courses given in curr_courses and/or past_courses. " - f"curr_courses: {str(curr_courses)}. past_courses: {str(past_courses)}" - ) - invalid_curr_courses = set(curr_courses) - { - c.full_code - for c in Course.objects.filter(semester=get_current_semester(), full_code__in=curr_courses) - } - if len(invalid_curr_courses) > 0: - raise ValueError( - "The following courses in curr_courses are invalid or not offered this semester: " - f"{str(invalid_curr_courses)}" - ) - invalid_past_courses = set(past_courses) - { - c.full_code for c in Course.objects.filter(full_code__in=past_courses) - } - if len(invalid_past_courses) > 0: - raise ValueError( - f"The following courses in past_courses are invalid: {str(invalid_past_courses)}" - ) - - # Eliminate courses not in the model - curr_courses = [c for c in curr_courses if c in curr_course_vectors_dict] - past_courses = [c for c in past_courses if c in past_course_vectors_dict] - - curr_courses_vector = ( - np.zeros(n) - if len(curr_courses) == 0 - else sum(curr_course_vectors_dict[course] for course in curr_courses) - ) - past_courses_vector = ( - np.zeros(n) - if len(past_courses) == 0 - else sum(past_course_vectors_dict[course] for course in past_courses) - ) - - vector = curr_courses_vector * CURR_COURSES_BIAS + past_courses_vector - norm = np.linalg.norm(vector) - vector = vector / norm if norm > 0 else vector - return vector, all_courses - - -def vectorize_user(user, curr_course_vectors_dict, past_course_vectors_dict): - """ - Aggregates a vector over all the courses in the user's schedule - """ - curr_semester = get_current_semester() - curr_courses = set( - [ - s - for s in Schedule.objects.filter(person=user, semester=curr_semester).values_list( - "sections__course__full_code", flat=True - ) - if s is not None - ] - ) - past_courses = set( - [ - s - for s in Schedule.objects.filter(person=user, semester__lt=curr_semester).values_list( - "sections__course__full_code", flat=True - ) - if s is not None - ] - ) - past_courses = past_courses - curr_courses - return vectorize_user_by_courses( - list(curr_courses), list(past_courses), curr_course_vectors_dict, past_course_vectors_dict - ) - - -def cosine_similarity(v1, v2): - norm_prod = np.linalg.norm(v1) * np.linalg.norm(v2) - return np.dot(v1, v2) / norm_prod if norm_prod > 0 else 0 - - -def best_recommendations( - cluster, - curr_course_vectors_dict, - user_vector, - exclude: Optional[Set[str]] = None, - n_recommendations=5, -): - recs = [] - for course in cluster: - if exclude is not None and course in exclude: - continue - course_vector = curr_course_vectors_dict[course] - similarity = cosine_similarity(course_vector, user_vector) - recs.append((course, similarity)) - rec_course_to_score = {course: score for course, score in recs} - recs = [ - (c.full_code, rec_course_to_score[c.full_code]) - for c in Course.objects.filter( - semester=get_current_semester(), full_code__in=list(rec_course_to_score.keys()) - ) - ] # only recommend currently offered courses - if n_recommendations > len(recs): - n_recommendations = len(recs) - - return [course for course, _ in heapq.nlargest(n_recommendations, recs, lambda x: x[1])] - - -def recommend_courses( - curr_course_vectors_dict, - cluster_centroids, - clusters, - user_vector, - user_courses, - n_recommendations=5, -): - min_distance = -1 - best_cluster_index = -1 - for cluster_index, centroid in enumerate(cluster_centroids): - distance = np.linalg.norm(centroid - user_vector) - if best_cluster_index == -1 or distance < min_distance: - min_distance = distance - best_cluster_index = cluster_index - - return best_recommendations( - clusters[best_cluster_index], - curr_course_vectors_dict, - user_vector, - exclude=user_courses, - n_recommendations=n_recommendations, - ) - - -dev_course_clusters = None # a global variable used to "cache" the course clusters in dev - - -def retrieve_course_clusters(): - global dev_course_clusters - if in_dev() and os.environ.get("USE_PROD_MODEL", "false") != "true": - if dev_course_clusters is None: - print("TRAINING DEVELOPMENT MODEL... PLEASE WAIT") - dev_course_clusters = train_recommender( - course_data_path=( - settings.BASE_DIR + "/tests/plan/course_recs_test_data/course_data_test.csv" - ), - preloaded_descriptions_path=( - settings.BASE_DIR - + "/tests/plan/course_recs_test_data/course_descriptions_test.csv" - ), - output_path=os.devnull, - ) - print("Done training development model.") - return dev_course_clusters - cached_data = cache.get("course-cluster-data", None) - if cached_data is not None: - return cached_data - # Need to redownload - course_cluster_data = pickle.loads( - S3_client.get_object(Bucket="penn.courses", Key="course-cluster-data.pkl")["Body"].read() - ) - cache.set("course-cluster-data", course_cluster_data, timeout=90000) - return course_cluster_data - - -def clean_course_input(course_input): - return [course for course in course_input if len(course) > 0] - - -class Command(BaseCommand): - help = ( - "Use this script to recommend courses. If a username is specified, the script will " - "predict based on that user's PCP schedules. Otherwise, the script will " - "predict based on the provided curr_courses and past_courses lists." - ) - - def add_arguments(self, parser): - parser.add_argument( - "--username", - default=None, - type=str, - help=( - "The username of a user you would like to predict on. If this argument is " - "omitted, you should provide the curr_courses and/or past_courses arguments." - ), - ) - parser.add_argument( - "--curr_courses", - default="", - type=str, - help=( - "A comma-separated list of courses the user is currently planning to take " - "(each course represented by its string full code, e.g. `CIS-120` for CIS-120)." - ), - ) - parser.add_argument( - "--past_courses", - default="", - type=str, - help=( - "A comma-separated list of courses the user has previously taken (each course " - "represented by its string full code, e.g. `CIS-120` for CIS-120)." - ), - ) - - def handle(self, *args, **kwargs): - curr_courses = kwargs["curr_courses"].split(",") - past_courses = kwargs["past_courses"].split(",") - username = kwargs["username"] - - ( - cluster_centroids, - clusters, - curr_course_vectors_dict, - past_course_vectors_dict, - ) = retrieve_course_clusters() - if username is not None: - user = get_user_model().objects.get(username=username) - user_vector, user_courses = vectorize_user( - user, curr_course_vectors_dict, past_course_vectors_dict - ) - else: - user_vector, user_courses = vectorize_user_by_courses( - clean_course_input(curr_courses), - clean_course_input(past_courses), - curr_course_vectors_dict, - past_course_vectors_dict, - ) - - print( - recommend_courses( - curr_course_vectors_dict, cluster_centroids, clusters, user_vector, user_courses - ) - ) +import heapq +import os +import pickle +from typing import Optional, Set + +import numpy as np +from django.conf import settings +from django.contrib.auth import get_user_model +from django.core.cache import cache +from django.core.management.base import BaseCommand + +from courses.models import Course +from courses.util import get_current_semester, in_dev +from PennCourses.settings.base import S3_client +from plan.management.commands.trainrecommender import train_recommender +from plan.models import Schedule + + +# The proportion by which to up-weight current courses +# relative to past courses when computing a user vector +CURR_COURSES_BIAS = 3 + + +def vectorize_user_by_courses( + curr_courses, past_courses, curr_course_vectors_dict, past_course_vectors_dict +): + n = len(next(iter(curr_course_vectors_dict.values()))) + + # Input validation + all_courses = set(curr_courses) | set(past_courses) + if len(all_courses) != len(curr_courses) + len(past_courses): + raise ValueError( + "Repeated courses given in curr_courses and/or past_courses. " + f"curr_courses: {str(curr_courses)}. past_courses: {str(past_courses)}" + ) + invalid_curr_courses = set(curr_courses) - { + c.full_code + for c in Course.objects.filter(semester=get_current_semester(), full_code__in=curr_courses) + } + if len(invalid_curr_courses) > 0: + raise ValueError( + "The following courses in curr_courses are invalid or not offered this semester: " + f"{str(invalid_curr_courses)}" + ) + invalid_past_courses = set(past_courses) - { + c.full_code for c in Course.objects.filter(full_code__in=past_courses) + } + if len(invalid_past_courses) > 0: + raise ValueError( + f"The following courses in past_courses are invalid: {str(invalid_past_courses)}" + ) + + # Eliminate courses not in the model + curr_courses = [c for c in curr_courses if c in curr_course_vectors_dict] + past_courses = [c for c in past_courses if c in past_course_vectors_dict] + + curr_courses_vector = ( + np.zeros(n) + if len(curr_courses) == 0 + else sum(curr_course_vectors_dict[course] for course in curr_courses) + ) + past_courses_vector = ( + np.zeros(n) + if len(past_courses) == 0 + else sum(past_course_vectors_dict[course] for course in past_courses) + ) + + vector = curr_courses_vector * CURR_COURSES_BIAS + past_courses_vector + norm = np.linalg.norm(vector) + vector = vector / norm if norm > 0 else vector + return vector, all_courses + + +def vectorize_user(user, curr_course_vectors_dict, past_course_vectors_dict): + """ + Aggregates a vector over all the courses in the user's schedule + """ + curr_semester = get_current_semester() + curr_courses = set( + [ + s + for s in Schedule.objects.filter(person=user, semester=curr_semester).values_list( + "sections__course__full_code", flat=True + ) + if s is not None + ] + ) + past_courses = set( + [ + s + for s in Schedule.objects.filter(person=user, semester__lt=curr_semester).values_list( + "sections__course__full_code", flat=True + ) + if s is not None + ] + ) + past_courses = past_courses - curr_courses + return vectorize_user_by_courses( + list(curr_courses), list(past_courses), curr_course_vectors_dict, past_course_vectors_dict + ) + + +def cosine_similarity(v1, v2): + norm_prod = np.linalg.norm(v1) * np.linalg.norm(v2) + return np.dot(v1, v2) / norm_prod if norm_prod > 0 else 0 + + +def best_recommendations( + cluster, + curr_course_vectors_dict, + user_vector, + exclude: Optional[Set[str]] = None, + n_recommendations=5, +): + recs = [] + for course in cluster: + if exclude is not None and course in exclude: + continue + course_vector = curr_course_vectors_dict[course] + similarity = cosine_similarity(course_vector, user_vector) + recs.append((course, similarity)) + rec_course_to_score = {course: score for course, score in recs} + recs = [ + (c.full_code, rec_course_to_score[c.full_code]) + for c in Course.objects.filter( + semester=get_current_semester(), full_code__in=list(rec_course_to_score.keys()) + ) + ] # only recommend currently offered courses + if n_recommendations > len(recs): + n_recommendations = len(recs) + + return [course for course, _ in heapq.nlargest(n_recommendations, recs, lambda x: x[1])] + + +def recommend_courses( + curr_course_vectors_dict, + cluster_centroids, + clusters, + user_vector, + user_courses, + n_recommendations=5, +): + min_distance = -1 + best_cluster_index = -1 + for cluster_index, centroid in enumerate(cluster_centroids): + distance = np.linalg.norm(centroid - user_vector) + if best_cluster_index == -1 or distance < min_distance: + min_distance = distance + best_cluster_index = cluster_index + + return best_recommendations( + clusters[best_cluster_index], + curr_course_vectors_dict, + user_vector, + exclude=user_courses, + n_recommendations=n_recommendations, + ) + + +dev_course_clusters = None # a global variable used to "cache" the course clusters in dev + + +def retrieve_course_clusters(): + global dev_course_clusters + if in_dev() and os.environ.get("USE_PROD_MODEL", "false") != "true": + if dev_course_clusters is None: + print("TRAINING DEVELOPMENT MODEL... PLEASE WAIT") + dev_course_clusters = train_recommender( + course_data_path=( + settings.BASE_DIR + "/tests/plan/course_recs_test_data/course_data_test.csv" + ), + preloaded_descriptions_path=( + settings.BASE_DIR + + "/tests/plan/course_recs_test_data/course_descriptions_test.csv" + ), + output_path=os.devnull, + ) + print("Done training development model.") + return dev_course_clusters + cached_data = cache.get("course-cluster-data", None) + if cached_data is not None: + return cached_data + # Need to redownload + course_cluster_data = pickle.loads( + S3_client.get_object(Bucket="penn.courses", Key="course-cluster-data.pkl")["Body"].read() + ) + cache.set("course-cluster-data", course_cluster_data, timeout=90000) + return course_cluster_data + + +def clean_course_input(course_input): + return [course for course in course_input if len(course) > 0] + + +class Command(BaseCommand): + help = ( + "Use this script to recommend courses. If a username is specified, the script will " + "predict based on that user's PCP schedules. Otherwise, the script will " + "predict based on the provided curr_courses and past_courses lists." + ) + + def add_arguments(self, parser): + parser.add_argument( + "--username", + default=None, + type=str, + help=( + "The username of a user you would like to predict on. If this argument is " + "omitted, you should provide the curr_courses and/or past_courses arguments." + ), + ) + parser.add_argument( + "--curr_courses", + default="", + type=str, + help=( + "A comma-separated list of courses the user is currently planning to take " + "(each course represented by its string full code, e.g. `CIS-120` for CIS-120)." + ), + ) + parser.add_argument( + "--past_courses", + default="", + type=str, + help=( + "A comma-separated list of courses the user has previously taken (each course " + "represented by its string full code, e.g. `CIS-120` for CIS-120)." + ), + ) + + def handle(self, *args, **kwargs): + curr_courses = kwargs["curr_courses"].split(",") + past_courses = kwargs["past_courses"].split(",") + username = kwargs["username"] + + ( + cluster_centroids, + clusters, + curr_course_vectors_dict, + past_course_vectors_dict, + ) = retrieve_course_clusters() + if username is not None: + user = get_user_model().objects.get(username=username) + user_vector, user_courses = vectorize_user( + user, curr_course_vectors_dict, past_course_vectors_dict + ) + else: + user_vector, user_courses = vectorize_user_by_courses( + clean_course_input(curr_courses), + clean_course_input(past_courses), + curr_course_vectors_dict, + past_course_vectors_dict, + ) + + print( + recommend_courses( + curr_course_vectors_dict, cluster_centroids, clusters, user_vector, user_courses + ) + ) diff --git a/backend/plan/management/commands/redownloadmodel.py b/backend/plan/management/commands/redownloadmodel.py index 9d1395259..7da008f8f 100644 --- a/backend/plan/management/commands/redownloadmodel.py +++ b/backend/plan/management/commands/redownloadmodel.py @@ -1,21 +1,21 @@ -from django.core.cache import cache -from django.core.management.base import BaseCommand - -from plan.management.commands.recommendcourses import retrieve_course_clusters - - -def redownload_course_rec_model(): - cache.delete("course-cluster-data") - retrieve_course_clusters() - - -class Command(BaseCommand): - help = ( - "Run this command to invalidate the course-cluster-data key in cache, causing the " - "latest course-cluster-data.pkl course recommendation model to be immediately " - "redownloaded from S3." - ) - - def handle(self, *args, **kwargs): - redownload_course_rec_model() - print("Done!") +from django.core.cache import cache +from django.core.management.base import BaseCommand + +from plan.management.commands.recommendcourses import retrieve_course_clusters + + +def redownload_course_rec_model(): + cache.delete("course-cluster-data") + retrieve_course_clusters() + + +class Command(BaseCommand): + help = ( + "Run this command to invalidate the course-cluster-data key in cache, causing the " + "latest course-cluster-data.pkl course recommendation model to be immediately " + "redownloaded from S3." + ) + + def handle(self, *args, **kwargs): + redownload_course_rec_model() + print("Done!") diff --git a/backend/plan/management/commands/trainrecommender.py b/backend/plan/management/commands/trainrecommender.py index 42b93803c..bfe4f0792 100644 --- a/backend/plan/management/commands/trainrecommender.py +++ b/backend/plan/management/commands/trainrecommender.py @@ -1,522 +1,522 @@ -import codecs -import csv -import math -import os -import pickle -from typing import Dict, Iterable, List, Tuple - -import numpy as np -from django.core.cache import cache -from django.core.management.base import BaseCommand -from sklearn.cluster import KMeans -from sklearn.decomposition import PCA, TruncatedSVD -from sklearn.feature_extraction.text import TfidfVectorizer -from sklearn.preprocessing import normalize - -from courses.models import Course -from PennCourses.settings.base import S3_client, S3_resource -from plan.models import Schedule - - -def lookup_course(course): - try: - return Course.objects.filter(full_code=course).latest("semester") - except Course.DoesNotExist: - return None - - -def courses_data_from_db(): - """ - Fetches data from the courses db and yields tuples of the form person_id, course, semester - """ - user_to_semester_to_courses = dict() - for schedule in Schedule.objects.prefetch_related("sections").all(): - if schedule.person_id not in user_to_semester_to_courses: - user_to_semester_to_courses[schedule.person_id] = dict() - if schedule.semester not in user_to_semester_to_courses[schedule.person_id]: - user_to_semester_to_courses[schedule.person_id][schedule.semester] = set() - for section in schedule.sections.all(): - user_to_semester_to_courses[schedule.person_id][schedule.semester].add( - section.course.full_code - ) - for person_id in user_to_semester_to_courses: - for semester in user_to_semester_to_courses[person_id]: - for course_code in user_to_semester_to_courses[person_id][semester]: - yield person_id, course_code, semester - - -def courses_data_from_csv(course_data_path): - with open(course_data_path) as course_data_file: - data_reader = csv.reader(course_data_file) - for row in data_reader: - yield tuple(row) - - -def courses_data_from_s3(): - for row in csv.reader( - codecs.getreader("utf-8")( - S3_client.get_object(Bucket="penn.courses", Key="course_data.csv")["Body"] - ) - ): - yield tuple(row) - - -def get_description(course): - course_obj = lookup_course(course) - if course_obj is None or not course_obj.description: - return "" - return course_obj.description - - -def vectorize_courses_by_description(descriptions): - vectorizer = TfidfVectorizer() - has_nonempty_descriptions = ( - sum(1 for description in descriptions if description and len(description) > 0) > 0 - ) - if has_nonempty_descriptions: - vectors = vectorizer.fit_transform(descriptions) - else: - vectors = np.array([[0] for _ in descriptions]) - _, dim = vectors.shape - if dim >= 500: - dim_reducer = TruncatedSVD(n_components=500) - vectors = dim_reducer.fit_transform(vectors) - # divide the vectors by their norms - return normalize(vectors) - - -def group_courses(courses_data: Iterable[Tuple[int, str, str]]): - """ - courses_data should be an iterable of person id, course string, semester string - """ - # The dict below stores a person_id in association with a dict that associates - # a semester with a multiset of the courses taken during that semester. The reason this is a - # multiset is to take into account users with multiple mock schedules. - # This is an intermediate data form that is used to construct the two dicts returned. - courses_by_semester_by_user: Dict[int, Dict[str, Dict[str, int]]] = dict() - for person_id, course, semester in courses_data: - course = normalize_class_name(course) - # maps a course to a list of semesters - if person_id not in courses_by_semester_by_user: - user_dict = dict() - courses_by_semester_by_user[person_id] = user_dict - else: - user_dict = courses_by_semester_by_user[person_id] - - if semester not in user_dict: - semester_courses_multiset = dict() - user_dict[semester] = semester_courses_multiset - else: - semester_courses_multiset = user_dict[semester] - - if course in semester_courses_multiset: - semester_courses_multiset[course] += 1 - else: - semester_courses_multiset[course] = 1 - - return courses_by_semester_by_user - - -def vectorize_by_copresence( - courses_by_semester_by_user, as_past_class=False -) -> Dict[str, np.ndarray]: - """ - Vectorizes courses by whether they're in the user's schedule at the same time, - as well as the number of times they come after other courses. - :param courses_by_semester_by_user: Grouped courses data returned by group_courses - :return: - """ - courses_set = set() - for _, courses_by_semester in courses_by_semester_by_user.items(): - for _, course_multiset in courses_by_semester.items(): - for course, _ in course_multiset.items(): - courses_set.add(course) - courses_list = list(courses_set) - course_to_index = {course: i for i, course in enumerate(courses_list)} - - copresence_vectors_by_course = {course: np.zeros(len(courses_list)) for course in courses_list} - order_vectors_by_course = {course: np.zeros(len(courses_list)) for course in courses_list} - - for user, courses_by_semester in courses_by_semester_by_user.items(): - for sem, course_multiset in courses_by_semester.items(): - for course_a, frequency_a in course_multiset.items(): - index_a = course_to_index[course_a] - relevant_vector_a = copresence_vectors_by_course[course_a] - # A past class does not occur with classes in the same semester - if not as_past_class: - for course_b, frequency_b in course_multiset.items(): - co_frequency = min(frequency_a, frequency_b) - index_b = course_to_index[course_b] - relevant_vector_a[index_b] += co_frequency - # make sure that every course appears with itself - relevant_vector_a[index_a] += frequency_a - ordered_sems = sorted(courses_by_semester.keys()) - for i, sem in enumerate(ordered_sems): - courses_first_sem = courses_by_semester[sem] - # if this class is being encoded as a past class, it happens after itself - start_sem_index = i if as_past_class else i + 1 - for later_sem in ordered_sems[start_sem_index:]: - courses_later_sem = courses_by_semester[later_sem] - for course_later, freq1 in courses_later_sem.items(): - add_to_copres = as_past_class and later_sem != ordered_sems[start_sem_index] - for course_earlier, freq2 in courses_first_sem.items(): - earlier_index = course_to_index[course_earlier] - cofreq = min(freq1, freq2) - order_vectors_by_course[course_later][earlier_index] += cofreq - if add_to_copres: - later_index = course_to_index[course_later] - copresence_vectors_by_course[course_earlier][later_index] += cofreq - - concatenated = { - key: order_vectors_by_course[key] + copresence_vectors_by_course[key] - for key in order_vectors_by_course - } - - return concatenated - - -def vectorize_courses_by_schedule_presence(courses_by_user: List[Dict[str, int]]): - """ - @:param courses_by_user: A list of dicts in which each dict maps a course id to - the number of times a user has it in their schedules. - :return: A dict mapping course ids to a vector wherein each component - contains how many times the corresponding user - has that course in their schedules. - """ - num_users = len(courses_by_user) - course_vectors_dict = {} - for user_index, user_courses in enumerate(courses_by_user): - for course, frequency in user_courses.items(): - relevant_vector: np.ndarray - if course not in course_vectors_dict: - relevant_vector = np.zeros(num_users) - course_vectors_dict[course] = relevant_vector - else: - relevant_vector = course_vectors_dict[course] - relevant_vector[user_index] = frequency - - courses, vectors = zip(*course_vectors_dict.items()) - # reduce dimensionality to the log of the number of users - vectors = np.array(vectors) - _, dims = vectors.shape - dim_reduced_components = round(math.log2(num_users + 2)) - if min(dims, dim_reduced_components) > 5: - dim_reducer = PCA(n_components=dim_reduced_components) - dim_reduced = dim_reducer.fit_transform(vectors) - else: - dim_reduced = np.array(vectors) - # divide the vectors by the average norm - scaled = normalize(dim_reduced) - return {course: scaled for course, scaled in zip(courses, scaled)} - - -def get_unsequenced_courses_by_user(courses_by_semester_by_user): - """ - Takes in grouped courses data and returns a list of multisets, - wherein each multiset is the multiset of courses - for a particular user - :param courses_by_semester_by_user: Grouped courses data returned by group_courses - :return: - """ - unsequenced_courses_by_user = {} - for user, courses_by_semester in courses_by_semester_by_user.items(): - combined_multiset = {} - for semester, course_multiset in courses_by_semester.items(): - for course, frequency in course_multiset.items(): - combined_multiset[course] = frequency - unsequenced_courses_by_user[user] = combined_multiset - - return list(unsequenced_courses_by_user.values()) - - -def get_descriptions(courses, preloaded_descriptions): - descriptions = [] - for course in courses: - if course in preloaded_descriptions: - descriptions.append(preloaded_descriptions[course]) - else: - descriptions.append(get_description(course)) - return descriptions - - -def generate_course_vectors_dict(courses_data, use_descriptions=True, preloaded_descriptions={}): - """ - Generates a dict associating courses to vectors for those courses, - as well as courses to vector representations - of having taken that class in the past. - """ - courses_to_vectors_curr = {} - courses_to_vectors_past = {} - grouped_courses = group_courses(courses_data) - copresence_vectors_by_course = vectorize_by_copresence(grouped_courses) - copresence_vectors_by_course_past = vectorize_by_copresence(grouped_courses, as_past_class=True) - courses_by_user = get_unsequenced_courses_by_user(grouped_courses) - courses, courses_vectorized_by_schedule_presence = zip( - *vectorize_courses_by_schedule_presence(courses_by_user).items() - ) - courses_vectorized_by_description = vectorize_courses_by_description( - get_descriptions(courses, preloaded_descriptions) - ) - copresence_vectors = [copresence_vectors_by_course[course] for course in courses] - copresence_vectors_past = [copresence_vectors_by_course_past[course] for course in courses] - copresence_vectors = normalize(copresence_vectors) - copresence_vectors_past = normalize(copresence_vectors_past) - _, dims = copresence_vectors_past.shape - dim_reduced_components = round(30 * math.log2(len(courses))) - if min(dims, dim_reduced_components) > 5: - dim_reduce = TruncatedSVD(n_components=dim_reduced_components) - copresence_vectors = dim_reduce.fit_transform(copresence_vectors) - dim_reduce = TruncatedSVD(n_components=dim_reduced_components) - copresence_vectors_past = dim_reduce.fit_transform(copresence_vectors_past) - for ( - course, - schedule_vector, - description_vector, - copresence_vector, - copresence_vector_past, - ) in zip( - courses, - courses_vectorized_by_schedule_presence, - courses_vectorized_by_description, - copresence_vectors, - copresence_vectors_past, - ): - if use_descriptions: - if np.linalg.norm(description_vector) == 0: - continue - total_vector_curr = np.concatenate( - [schedule_vector, description_vector, copresence_vector * 2] - ) - total_vector_past = np.concatenate( - [schedule_vector, description_vector, copresence_vector_past * 2] - ) - else: - total_vector_curr = np.concatenate([schedule_vector, copresence_vector * 2]) - total_vector_past = np.concatenate([schedule_vector, copresence_vector_past * 2]) - courses_to_vectors_curr[course] = total_vector_curr / np.linalg.norm(total_vector_curr) - courses_to_vectors_past[course] = total_vector_past / np.linalg.norm(total_vector_past) - return courses_to_vectors_curr, courses_to_vectors_past - - -def normalize_class_name(class_name): - """ - Take in a class name and return the standard name for that class - """ - course_obj: Course = lookup_course(class_name) - if course_obj is None: - return class_name - return course_obj.primary_listing.full_code - - -def generate_course_clusters(courses_data, n_per_cluster=100, preloaded_descriptions={}): - """ - Clusters courses and also returns a vector representation of each class - (one for having taken that class now, and another for having taken it in the past) - """ - course_vectors_dict_curr, course_vectors_dict_past = generate_course_vectors_dict( - courses_data, preloaded_descriptions=preloaded_descriptions - ) - _courses, _course_vectors = zip(*course_vectors_dict_curr.items()) - courses, course_vectors = list(_courses), np.array(list(_course_vectors)) - num_clusters = round(len(courses) / n_per_cluster) - model = KMeans(n_clusters=num_clusters) - raw_cluster_result = model.fit_predict(course_vectors) - clusters = [[] for _ in range(num_clusters)] - for course_index, cluster_index in enumerate(raw_cluster_result): - clusters[cluster_index].append(courses[course_index]) - - cluster_centroids = [ - sum(course_vectors_dict_curr[course] for course in cluster) / len(cluster) - for cluster in clusters - ] - return cluster_centroids, clusters, course_vectors_dict_curr, course_vectors_dict_past - - -def train_recommender( - course_data_path=None, - preloaded_descriptions_path=None, - train_from_s3=False, - output_path=None, - upload_to_s3=False, - n_per_cluster=100, - verbose=False, -): - # input validation - if train_from_s3: - assert ( - course_data_path is None - ), "If you are training on data from S3, there's no need to supply a local data path" - if course_data_path is not None: - assert course_data_path.endswith(".csv"), "Local data path must be .csv" - if preloaded_descriptions_path is not None: - assert preloaded_descriptions_path.endswith( - ".csv" - ), "Local course descriptions path must be .csv" - - if output_path is None: - assert upload_to_s3, "You must either specify an output path, or upload to S3" - if upload_to_s3: - assert output_path is None, ( - "If you are uploading the trained model to S3, there's no need to specify an " - "output path." - ) - else: - assert output_path is not None, "You must either specify an output path, or upload to S3" - assert ( - output_path.endswith(".pkl") or output_path == os.devnull - ), "Output file must have a .pkl extension" - - if verbose and not upload_to_s3 and not output_path.endswith("course-cluster-data.pkl"): - print( - "Warning: The name of the course recommendation model used in prod (stored in S3) " - "must be course-cluster-data.pkl." - ) - if verbose and "production" not in os.environ.get("DJANGO_SETTINGS_MODULE", ""): - print( - "Warning: Make sure you have all the courses in your data source " - "(especially their descriptions) loaded into to your local/dev database, otherwise " - "this training may fail (causing an error like ValueError: empty vocabulary) " - "or produce a low quality model." - ) - if verbose: - print("Training...") - - if train_from_s3: - courses_data = courses_data_from_s3() - else: - courses_data = ( - courses_data_from_csv(course_data_path) - if course_data_path is not None - else courses_data_from_db() - ) - - preloaded_descriptions = dict() - if preloaded_descriptions_path is not None: - preloaded_descriptions = dict(courses_data_from_csv(preloaded_descriptions_path)) - - if preloaded_descriptions_path is None and verbose: - print( - "A preloaded_descriptions_path has not been supplied." - "the database will be queried to get descriptions downstream" - ) - - course_clusters = generate_course_clusters( - courses_data, n_per_cluster, preloaded_descriptions=preloaded_descriptions - ) - - if upload_to_s3: - S3_resource.Object("penn.courses", "course-cluster-data.pkl").put( - Body=pickle.dumps(course_clusters) - ) - cache.set("course-cluster-data", course_clusters, timeout=90000) - else: - pickle.dump( - course_clusters, - open(output_path, "wb"), - ) - - if verbose: - print("Done!") - - return course_clusters - - -class Command(BaseCommand): - help = ( - "Use this script to train a PCP course recommendation model on given training data " - "(specified via a local path, or from S3), and output the trained model (as a .pkl file) " - "to a specified local filepath (or to S3).\n" - "If you overwrite the course-cluster-data.pkl object in the penn.courses S3 bucket, " - "the course recommendation model actually used in prod will be updated within 25 hours, " - "or after the registrarimport management command is next run (done daily by a " - "cron job), or when the redownloadmodel management command is run to manually trigger a " - "redownload, whichever comes first." - ) - - def add_arguments(self, parser): - parser.add_argument( - "--course-data-path", - type=str, - default=None, - help=( - "The local path to the training data csv. If this argument is omitted, the model " - "will be trained on Schedule data from the db (this only makes sense in prod).\n" - "The csv pointed to by this path should have 3 columns:\n" - "person_id, course, semester" - "\nThe person_id column should contain a user hash, the course column should " - "contain the course code (in the format DEPT-XXX, e.g. CIS-120), and " - "the semester column should contain the semester in which the course was taken " - "by that user." - ), - ) - parser.add_argument( - "--preloaded-descriptions-path", - type=str, - default=None, - help=( - "The local path to a course description data csv.\n" - "If this argument is included, the course_data_path argument should be included. " - "If this argument is omitted, the model will only trained on description " - "data from the db.\n" - "When this argument is included, descriptions will preferentially be pulled " - "from the file that this argument points to. If a course's description " - "is not in the file, then the course's description is pulled from " - "the db (if it is not present there, an empty string is used as the " - "description).\n" - "The csv pointed to by this path should have 2 columns:\n" - "course, description" - "\nthe course column should " - "contain the course code (in the format DEPT-XXX, e.g. CIS-120) " - "as provided in the course_data_path csv, and " - "the description column should contain the full text of the description " - "corresponding to the course." - ), - ) - parser.add_argument( - "--train-from-s3", - default=False, - action="store_true", - help=( - "Enable this argument to train this model using data stored in S3. If this " - "argument is flagged, the course_data_path argument must be omitted." - ), - ) - parser.add_argument( - "--output-path", - default=None, - type=str, - help="The local path where the model pkl should be saved.", - ) - parser.add_argument( - "--upload-to-s3", - default=False, - action="store_true", - help=( - "Enable this argument to upload this model to S3, replacing the " - "course-cluster-data.pkl key in the penn.courses bucket. " - "If this argument is flagged, the output_path argument must be omitted." - ), - ) - parser.add_argument( - "--n-per-cluster", - type=int, - default=100, - help="The number of courses to include in each cluster (a hyperparameter). " - "Defaults to 100.", - ) - - def handle(self, *args, **kwargs): - course_data_path = kwargs["course_data_path"] - train_from_s3 = kwargs["train_from_s3"] - output_path = kwargs["output_path"] - upload_to_s3 = kwargs["upload_to_s3"] - n_per_cluster = kwargs["n_per_cluster"] - - train_recommender( - course_data_path=course_data_path, - train_from_s3=train_from_s3, - output_path=output_path, - upload_to_s3=upload_to_s3, - n_per_cluster=n_per_cluster, - verbose=True, - ) +import codecs +import csv +import math +import os +import pickle +from typing import Dict, Iterable, List, Tuple + +import numpy as np +from django.core.cache import cache +from django.core.management.base import BaseCommand +from sklearn.cluster import KMeans +from sklearn.decomposition import PCA, TruncatedSVD +from sklearn.feature_extraction.text import TfidfVectorizer +from sklearn.preprocessing import normalize + +from courses.models import Course +from PennCourses.settings.base import S3_client, S3_resource +from plan.models import Schedule + + +def lookup_course(course): + try: + return Course.objects.filter(full_code=course).latest("semester") + except Course.DoesNotExist: + return None + + +def courses_data_from_db(): + """ + Fetches data from the courses db and yields tuples of the form person_id, course, semester + """ + user_to_semester_to_courses = dict() + for schedule in Schedule.objects.prefetch_related("sections").all(): + if schedule.person_id not in user_to_semester_to_courses: + user_to_semester_to_courses[schedule.person_id] = dict() + if schedule.semester not in user_to_semester_to_courses[schedule.person_id]: + user_to_semester_to_courses[schedule.person_id][schedule.semester] = set() + for section in schedule.sections.all(): + user_to_semester_to_courses[schedule.person_id][schedule.semester].add( + section.course.full_code + ) + for person_id in user_to_semester_to_courses: + for semester in user_to_semester_to_courses[person_id]: + for course_code in user_to_semester_to_courses[person_id][semester]: + yield person_id, course_code, semester + + +def courses_data_from_csv(course_data_path): + with open(course_data_path) as course_data_file: + data_reader = csv.reader(course_data_file) + for row in data_reader: + yield tuple(row) + + +def courses_data_from_s3(): + for row in csv.reader( + codecs.getreader("utf-8")( + S3_client.get_object(Bucket="penn.courses", Key="course_data.csv")["Body"] + ) + ): + yield tuple(row) + + +def get_description(course): + course_obj = lookup_course(course) + if course_obj is None or not course_obj.description: + return "" + return course_obj.description + + +def vectorize_courses_by_description(descriptions): + vectorizer = TfidfVectorizer() + has_nonempty_descriptions = ( + sum(1 for description in descriptions if description and len(description) > 0) > 0 + ) + if has_nonempty_descriptions: + vectors = vectorizer.fit_transform(descriptions) + else: + vectors = np.array([[0] for _ in descriptions]) + _, dim = vectors.shape + if dim >= 500: + dim_reducer = TruncatedSVD(n_components=500) + vectors = dim_reducer.fit_transform(vectors) + # divide the vectors by their norms + return normalize(vectors) + + +def group_courses(courses_data: Iterable[Tuple[int, str, str]]): + """ + courses_data should be an iterable of person id, course string, semester string + """ + # The dict below stores a person_id in association with a dict that associates + # a semester with a multiset of the courses taken during that semester. The reason this is a + # multiset is to take into account users with multiple mock schedules. + # This is an intermediate data form that is used to construct the two dicts returned. + courses_by_semester_by_user: Dict[int, Dict[str, Dict[str, int]]] = dict() + for person_id, course, semester in courses_data: + course = normalize_class_name(course) + # maps a course to a list of semesters + if person_id not in courses_by_semester_by_user: + user_dict = dict() + courses_by_semester_by_user[person_id] = user_dict + else: + user_dict = courses_by_semester_by_user[person_id] + + if semester not in user_dict: + semester_courses_multiset = dict() + user_dict[semester] = semester_courses_multiset + else: + semester_courses_multiset = user_dict[semester] + + if course in semester_courses_multiset: + semester_courses_multiset[course] += 1 + else: + semester_courses_multiset[course] = 1 + + return courses_by_semester_by_user + + +def vectorize_by_copresence( + courses_by_semester_by_user, as_past_class=False +) -> Dict[str, np.ndarray]: + """ + Vectorizes courses by whether they're in the user's schedule at the same time, + as well as the number of times they come after other courses. + :param courses_by_semester_by_user: Grouped courses data returned by group_courses + :return: + """ + courses_set = set() + for _, courses_by_semester in courses_by_semester_by_user.items(): + for _, course_multiset in courses_by_semester.items(): + for course, _ in course_multiset.items(): + courses_set.add(course) + courses_list = list(courses_set) + course_to_index = {course: i for i, course in enumerate(courses_list)} + + copresence_vectors_by_course = {course: np.zeros(len(courses_list)) for course in courses_list} + order_vectors_by_course = {course: np.zeros(len(courses_list)) for course in courses_list} + + for user, courses_by_semester in courses_by_semester_by_user.items(): + for sem, course_multiset in courses_by_semester.items(): + for course_a, frequency_a in course_multiset.items(): + index_a = course_to_index[course_a] + relevant_vector_a = copresence_vectors_by_course[course_a] + # A past class does not occur with classes in the same semester + if not as_past_class: + for course_b, frequency_b in course_multiset.items(): + co_frequency = min(frequency_a, frequency_b) + index_b = course_to_index[course_b] + relevant_vector_a[index_b] += co_frequency + # make sure that every course appears with itself + relevant_vector_a[index_a] += frequency_a + ordered_sems = sorted(courses_by_semester.keys()) + for i, sem in enumerate(ordered_sems): + courses_first_sem = courses_by_semester[sem] + # if this class is being encoded as a past class, it happens after itself + start_sem_index = i if as_past_class else i + 1 + for later_sem in ordered_sems[start_sem_index:]: + courses_later_sem = courses_by_semester[later_sem] + for course_later, freq1 in courses_later_sem.items(): + add_to_copres = as_past_class and later_sem != ordered_sems[start_sem_index] + for course_earlier, freq2 in courses_first_sem.items(): + earlier_index = course_to_index[course_earlier] + cofreq = min(freq1, freq2) + order_vectors_by_course[course_later][earlier_index] += cofreq + if add_to_copres: + later_index = course_to_index[course_later] + copresence_vectors_by_course[course_earlier][later_index] += cofreq + + concatenated = { + key: order_vectors_by_course[key] + copresence_vectors_by_course[key] + for key in order_vectors_by_course + } + + return concatenated + + +def vectorize_courses_by_schedule_presence(courses_by_user: List[Dict[str, int]]): + """ + @:param courses_by_user: A list of dicts in which each dict maps a course id to + the number of times a user has it in their schedules. + :return: A dict mapping course ids to a vector wherein each component + contains how many times the corresponding user + has that course in their schedules. + """ + num_users = len(courses_by_user) + course_vectors_dict = {} + for user_index, user_courses in enumerate(courses_by_user): + for course, frequency in user_courses.items(): + relevant_vector: np.ndarray + if course not in course_vectors_dict: + relevant_vector = np.zeros(num_users) + course_vectors_dict[course] = relevant_vector + else: + relevant_vector = course_vectors_dict[course] + relevant_vector[user_index] = frequency + + courses, vectors = zip(*course_vectors_dict.items()) + # reduce dimensionality to the log of the number of users + vectors = np.array(vectors) + _, dims = vectors.shape + dim_reduced_components = round(math.log2(num_users + 2)) + if min(dims, dim_reduced_components) > 5: + dim_reducer = PCA(n_components=dim_reduced_components) + dim_reduced = dim_reducer.fit_transform(vectors) + else: + dim_reduced = np.array(vectors) + # divide the vectors by the average norm + scaled = normalize(dim_reduced) + return {course: scaled for course, scaled in zip(courses, scaled)} + + +def get_unsequenced_courses_by_user(courses_by_semester_by_user): + """ + Takes in grouped courses data and returns a list of multisets, + wherein each multiset is the multiset of courses + for a particular user + :param courses_by_semester_by_user: Grouped courses data returned by group_courses + :return: + """ + unsequenced_courses_by_user = {} + for user, courses_by_semester in courses_by_semester_by_user.items(): + combined_multiset = {} + for semester, course_multiset in courses_by_semester.items(): + for course, frequency in course_multiset.items(): + combined_multiset[course] = frequency + unsequenced_courses_by_user[user] = combined_multiset + + return list(unsequenced_courses_by_user.values()) + + +def get_descriptions(courses, preloaded_descriptions): + descriptions = [] + for course in courses: + if course in preloaded_descriptions: + descriptions.append(preloaded_descriptions[course]) + else: + descriptions.append(get_description(course)) + return descriptions + + +def generate_course_vectors_dict(courses_data, use_descriptions=True, preloaded_descriptions={}): + """ + Generates a dict associating courses to vectors for those courses, + as well as courses to vector representations + of having taken that class in the past. + """ + courses_to_vectors_curr = {} + courses_to_vectors_past = {} + grouped_courses = group_courses(courses_data) + copresence_vectors_by_course = vectorize_by_copresence(grouped_courses) + copresence_vectors_by_course_past = vectorize_by_copresence(grouped_courses, as_past_class=True) + courses_by_user = get_unsequenced_courses_by_user(grouped_courses) + courses, courses_vectorized_by_schedule_presence = zip( + *vectorize_courses_by_schedule_presence(courses_by_user).items() + ) + courses_vectorized_by_description = vectorize_courses_by_description( + get_descriptions(courses, preloaded_descriptions) + ) + copresence_vectors = [copresence_vectors_by_course[course] for course in courses] + copresence_vectors_past = [copresence_vectors_by_course_past[course] for course in courses] + copresence_vectors = normalize(copresence_vectors) + copresence_vectors_past = normalize(copresence_vectors_past) + _, dims = copresence_vectors_past.shape + dim_reduced_components = round(30 * math.log2(len(courses))) + if min(dims, dim_reduced_components) > 5: + dim_reduce = TruncatedSVD(n_components=dim_reduced_components) + copresence_vectors = dim_reduce.fit_transform(copresence_vectors) + dim_reduce = TruncatedSVD(n_components=dim_reduced_components) + copresence_vectors_past = dim_reduce.fit_transform(copresence_vectors_past) + for ( + course, + schedule_vector, + description_vector, + copresence_vector, + copresence_vector_past, + ) in zip( + courses, + courses_vectorized_by_schedule_presence, + courses_vectorized_by_description, + copresence_vectors, + copresence_vectors_past, + ): + if use_descriptions: + if np.linalg.norm(description_vector) == 0: + continue + total_vector_curr = np.concatenate( + [schedule_vector, description_vector, copresence_vector * 2] + ) + total_vector_past = np.concatenate( + [schedule_vector, description_vector, copresence_vector_past * 2] + ) + else: + total_vector_curr = np.concatenate([schedule_vector, copresence_vector * 2]) + total_vector_past = np.concatenate([schedule_vector, copresence_vector_past * 2]) + courses_to_vectors_curr[course] = total_vector_curr / np.linalg.norm(total_vector_curr) + courses_to_vectors_past[course] = total_vector_past / np.linalg.norm(total_vector_past) + return courses_to_vectors_curr, courses_to_vectors_past + + +def normalize_class_name(class_name): + """ + Take in a class name and return the standard name for that class + """ + course_obj: Course = lookup_course(class_name) + if course_obj is None: + return class_name + return course_obj.primary_listing.full_code + + +def generate_course_clusters(courses_data, n_per_cluster=100, preloaded_descriptions={}): + """ + Clusters courses and also returns a vector representation of each class + (one for having taken that class now, and another for having taken it in the past) + """ + course_vectors_dict_curr, course_vectors_dict_past = generate_course_vectors_dict( + courses_data, preloaded_descriptions=preloaded_descriptions + ) + _courses, _course_vectors = zip(*course_vectors_dict_curr.items()) + courses, course_vectors = list(_courses), np.array(list(_course_vectors)) + num_clusters = round(len(courses) / n_per_cluster) + model = KMeans(n_clusters=num_clusters) + raw_cluster_result = model.fit_predict(course_vectors) + clusters = [[] for _ in range(num_clusters)] + for course_index, cluster_index in enumerate(raw_cluster_result): + clusters[cluster_index].append(courses[course_index]) + + cluster_centroids = [ + sum(course_vectors_dict_curr[course] for course in cluster) / len(cluster) + for cluster in clusters + ] + return cluster_centroids, clusters, course_vectors_dict_curr, course_vectors_dict_past + + +def train_recommender( + course_data_path=None, + preloaded_descriptions_path=None, + train_from_s3=False, + output_path=None, + upload_to_s3=False, + n_per_cluster=100, + verbose=False, +): + # input validation + if train_from_s3: + assert ( + course_data_path is None + ), "If you are training on data from S3, there's no need to supply a local data path" + if course_data_path is not None: + assert course_data_path.endswith(".csv"), "Local data path must be .csv" + if preloaded_descriptions_path is not None: + assert preloaded_descriptions_path.endswith( + ".csv" + ), "Local course descriptions path must be .csv" + + if output_path is None: + assert upload_to_s3, "You must either specify an output path, or upload to S3" + if upload_to_s3: + assert output_path is None, ( + "If you are uploading the trained model to S3, there's no need to specify an " + "output path." + ) + else: + assert output_path is not None, "You must either specify an output path, or upload to S3" + assert ( + output_path.endswith(".pkl") or output_path == os.devnull + ), "Output file must have a .pkl extension" + + if verbose and not upload_to_s3 and not output_path.endswith("course-cluster-data.pkl"): + print( + "Warning: The name of the course recommendation model used in prod (stored in S3) " + "must be course-cluster-data.pkl." + ) + if verbose and "production" not in os.environ.get("DJANGO_SETTINGS_MODULE", ""): + print( + "Warning: Make sure you have all the courses in your data source " + "(especially their descriptions) loaded into to your local/dev database, otherwise " + "this training may fail (causing an error like ValueError: empty vocabulary) " + "or produce a low quality model." + ) + if verbose: + print("Training...") + + if train_from_s3: + courses_data = courses_data_from_s3() + else: + courses_data = ( + courses_data_from_csv(course_data_path) + if course_data_path is not None + else courses_data_from_db() + ) + + preloaded_descriptions = dict() + if preloaded_descriptions_path is not None: + preloaded_descriptions = dict(courses_data_from_csv(preloaded_descriptions_path)) + + if preloaded_descriptions_path is None and verbose: + print( + "A preloaded_descriptions_path has not been supplied." + "the database will be queried to get descriptions downstream" + ) + + course_clusters = generate_course_clusters( + courses_data, n_per_cluster, preloaded_descriptions=preloaded_descriptions + ) + + if upload_to_s3: + S3_resource.Object("penn.courses", "course-cluster-data.pkl").put( + Body=pickle.dumps(course_clusters) + ) + cache.set("course-cluster-data", course_clusters, timeout=90000) + else: + pickle.dump( + course_clusters, + open(output_path, "wb"), + ) + + if verbose: + print("Done!") + + return course_clusters + + +class Command(BaseCommand): + help = ( + "Use this script to train a PCP course recommendation model on given training data " + "(specified via a local path, or from S3), and output the trained model (as a .pkl file) " + "to a specified local filepath (or to S3).\n" + "If you overwrite the course-cluster-data.pkl object in the penn.courses S3 bucket, " + "the course recommendation model actually used in prod will be updated within 25 hours, " + "or after the registrarimport management command is next run (done daily by a " + "cron job), or when the redownloadmodel management command is run to manually trigger a " + "redownload, whichever comes first." + ) + + def add_arguments(self, parser): + parser.add_argument( + "--course-data-path", + type=str, + default=None, + help=( + "The local path to the training data csv. If this argument is omitted, the model " + "will be trained on Schedule data from the db (this only makes sense in prod).\n" + "The csv pointed to by this path should have 3 columns:\n" + "person_id, course, semester" + "\nThe person_id column should contain a user hash, the course column should " + "contain the course code (in the format DEPT-XXX, e.g. CIS-120), and " + "the semester column should contain the semester in which the course was taken " + "by that user." + ), + ) + parser.add_argument( + "--preloaded-descriptions-path", + type=str, + default=None, + help=( + "The local path to a course description data csv.\n" + "If this argument is included, the course_data_path argument should be included. " + "If this argument is omitted, the model will only trained on description " + "data from the db.\n" + "When this argument is included, descriptions will preferentially be pulled " + "from the file that this argument points to. If a course's description " + "is not in the file, then the course's description is pulled from " + "the db (if it is not present there, an empty string is used as the " + "description).\n" + "The csv pointed to by this path should have 2 columns:\n" + "course, description" + "\nthe course column should " + "contain the course code (in the format DEPT-XXX, e.g. CIS-120) " + "as provided in the course_data_path csv, and " + "the description column should contain the full text of the description " + "corresponding to the course." + ), + ) + parser.add_argument( + "--train-from-s3", + default=False, + action="store_true", + help=( + "Enable this argument to train this model using data stored in S3. If this " + "argument is flagged, the course_data_path argument must be omitted." + ), + ) + parser.add_argument( + "--output-path", + default=None, + type=str, + help="The local path where the model pkl should be saved.", + ) + parser.add_argument( + "--upload-to-s3", + default=False, + action="store_true", + help=( + "Enable this argument to upload this model to S3, replacing the " + "course-cluster-data.pkl key in the penn.courses bucket. " + "If this argument is flagged, the output_path argument must be omitted." + ), + ) + parser.add_argument( + "--n-per-cluster", + type=int, + default=100, + help="The number of courses to include in each cluster (a hyperparameter). " + "Defaults to 100.", + ) + + def handle(self, *args, **kwargs): + course_data_path = kwargs["course_data_path"] + train_from_s3 = kwargs["train_from_s3"] + output_path = kwargs["output_path"] + upload_to_s3 = kwargs["upload_to_s3"] + n_per_cluster = kwargs["n_per_cluster"] + + train_recommender( + course_data_path=course_data_path, + train_from_s3=train_from_s3, + output_path=output_path, + upload_to_s3=upload_to_s3, + n_per_cluster=n_per_cluster, + verbose=True, + ) diff --git a/backend/plan/migrations/0001_initial.py b/backend/plan/migrations/0001_initial.py index 6aa26d57d..9d820802a 100644 --- a/backend/plan/migrations/0001_initial.py +++ b/backend/plan/migrations/0001_initial.py @@ -1,40 +1,40 @@ -# Generated by Django 2.2.5 on 2019-10-19 20:29 - -import django.db.models.deletion -from django.conf import settings -from django.db import migrations, models - - -class Migration(migrations.Migration): - - initial = True - - dependencies = [ - ("courses", "0020_auto_20190928_0046"), - migrations.swappable_dependency(settings.AUTH_USER_MODEL), - ] - - operations = [ - migrations.CreateModel( - name="Schedule", - fields=[ - ( - "id", - models.AutoField( - auto_created=True, primary_key=True, serialize=False, verbose_name="ID" - ), - ), - ("semester", models.CharField(max_length=5)), - ("name", models.CharField(max_length=255)), - ("created_at", models.DateTimeField(auto_now_add=True)), - ("updated_at", models.DateTimeField(auto_now=True)), - ( - "person", - models.ForeignKey( - on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL - ), - ), - ("sections", models.ManyToManyField(to="courses.Section")), - ], - ), - ] +# Generated by Django 2.2.5 on 2019-10-19 20:29 + +import django.db.models.deletion +from django.conf import settings +from django.db import migrations, models + + +class Migration(migrations.Migration): + + initial = True + + dependencies = [ + ("courses", "0020_auto_20190928_0046"), + migrations.swappable_dependency(settings.AUTH_USER_MODEL), + ] + + operations = [ + migrations.CreateModel( + name="Schedule", + fields=[ + ( + "id", + models.AutoField( + auto_created=True, primary_key=True, serialize=False, verbose_name="ID" + ), + ), + ("semester", models.CharField(max_length=5)), + ("name", models.CharField(max_length=255)), + ("created_at", models.DateTimeField(auto_now_add=True)), + ("updated_at", models.DateTimeField(auto_now=True)), + ( + "person", + models.ForeignKey( + on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL + ), + ), + ("sections", models.ManyToManyField(to="courses.Section")), + ], + ), + ] diff --git a/backend/plan/migrations/0002_auto_20191027_1510.py b/backend/plan/migrations/0002_auto_20191027_1510.py index 8e5974395..2daca69ce 100644 --- a/backend/plan/migrations/0002_auto_20191027_1510.py +++ b/backend/plan/migrations/0002_auto_20191027_1510.py @@ -1,19 +1,19 @@ -# Generated by Django 2.2.5 on 2019-10-27 19:10 - -from django.conf import settings -from django.db import migrations - - -class Migration(migrations.Migration): - - dependencies = [ - migrations.swappable_dependency(settings.AUTH_USER_MODEL), - ("plan", "0001_initial"), - ] - - operations = [ - migrations.AlterUniqueTogether( - name="schedule", - unique_together={("name", "semester", "person")}, - ), - ] +# Generated by Django 2.2.5 on 2019-10-27 19:10 + +from django.conf import settings +from django.db import migrations + + +class Migration(migrations.Migration): + + dependencies = [ + migrations.swappable_dependency(settings.AUTH_USER_MODEL), + ("plan", "0001_initial"), + ] + + operations = [ + migrations.AlterUniqueTogether( + name="schedule", + unique_together={("name", "semester", "person")}, + ), + ] diff --git a/backend/plan/migrations/0003_auto_20201002_0714.py b/backend/plan/migrations/0003_auto_20201002_0714.py index cb2bd024c..8c2003ed6 100644 --- a/backend/plan/migrations/0003_auto_20201002_0714.py +++ b/backend/plan/migrations/0003_auto_20201002_0714.py @@ -1,50 +1,50 @@ -# Generated by Django 3.1.1 on 2020-10-02 11:14 - -import django.db.models.deletion -from django.conf import settings -from django.db import migrations, models - - -class Migration(migrations.Migration): - - dependencies = [ - ("courses", "0030_auto_20201002_0714"), - migrations.swappable_dependency(settings.AUTH_USER_MODEL), - ("plan", "0002_auto_20191027_1510"), - ] - - operations = [ - migrations.AlterField( - model_name="schedule", - name="name", - field=models.CharField( - help_text="\nThe user's nick-name for the schedule. No two schedules can match in all of the fields\n`[name, semester, person]`\n", - max_length=255, - ), - ), - migrations.AlterField( - model_name="schedule", - name="person", - field=models.ForeignKey( - help_text="The person (user) to which the schedule belongs.", - on_delete=django.db.models.deletion.CASCADE, - to=settings.AUTH_USER_MODEL, - ), - ), - migrations.AlterField( - model_name="schedule", - name="sections", - field=models.ManyToManyField( - help_text="\nThe class sections which comprise the schedule. The semester of each of these sections is\nassumed to match the semester defined by the semester field below.\n", - to="courses.Section", - ), - ), - migrations.AlterField( - model_name="schedule", - name="semester", - field=models.CharField( - help_text="\nThe academic semester planned out by the schedule (of the form YYYYx where x is A\n[for spring], B [summer], or C [fall]), e.g. 2019C for fall 2019.\n", - max_length=5, - ), - ), - ] +# Generated by Django 3.1.1 on 2020-10-02 11:14 + +import django.db.models.deletion +from django.conf import settings +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ("courses", "0030_auto_20201002_0714"), + migrations.swappable_dependency(settings.AUTH_USER_MODEL), + ("plan", "0002_auto_20191027_1510"), + ] + + operations = [ + migrations.AlterField( + model_name="schedule", + name="name", + field=models.CharField( + help_text="\nThe user's nick-name for the schedule. No two schedules can match in all of the fields\n`[name, semester, person]`\n", + max_length=255, + ), + ), + migrations.AlterField( + model_name="schedule", + name="person", + field=models.ForeignKey( + help_text="The person (user) to which the schedule belongs.", + on_delete=django.db.models.deletion.CASCADE, + to=settings.AUTH_USER_MODEL, + ), + ), + migrations.AlterField( + model_name="schedule", + name="sections", + field=models.ManyToManyField( + help_text="\nThe class sections which comprise the schedule. The semester of each of these sections is\nassumed to match the semester defined by the semester field below.\n", + to="courses.Section", + ), + ), + migrations.AlterField( + model_name="schedule", + name="semester", + field=models.CharField( + help_text="\nThe academic semester planned out by the schedule (of the form YYYYx where x is A\n[for spring], B [summer], or C [fall]), e.g. 2019C for fall 2019.\n", + max_length=5, + ), + ), + ] diff --git a/backend/plan/migrations/0004_alter_schedule_semester.py b/backend/plan/migrations/0004_alter_schedule_semester.py index 91f69a130..300086d57 100644 --- a/backend/plan/migrations/0004_alter_schedule_semester.py +++ b/backend/plan/migrations/0004_alter_schedule_semester.py @@ -1,21 +1,21 @@ -# Generated by Django 3.2b1 on 2021-04-05 08:15 - -from django.db import migrations, models - - -class Migration(migrations.Migration): - - dependencies = [ - ("plan", "0003_auto_20201002_0714"), - ] - - operations = [ - migrations.AlterField( - model_name="schedule", - name="semester", - field=models.CharField( - help_text="\nThe academic semester planned out by the schedule (of the form YYYYx where x is A\n[for spring], B [summer], or C [fall]), e.g. `2019C` for fall 2019.\n", - max_length=5, - ), - ), - ] +# Generated by Django 3.2b1 on 2021-04-05 08:15 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ("plan", "0003_auto_20201002_0714"), + ] + + operations = [ + migrations.AlterField( + model_name="schedule", + name="semester", + field=models.CharField( + help_text="\nThe academic semester planned out by the schedule (of the form YYYYx where x is A\n[for spring], B [summer], or C [fall]), e.g. `2019C` for fall 2019.\n", + max_length=5, + ), + ), + ] diff --git a/backend/plan/serializers.py b/backend/plan/serializers.py index b306f1ac9..d6d8a6bd7 100644 --- a/backend/plan/serializers.py +++ b/backend/plan/serializers.py @@ -1,3 +1,4 @@ +<<<<<<< HEAD from rest_framework import serializers from courses.serializers import PublicUserSerializer, SectionDetailSerializer @@ -34,3 +35,23 @@ class PrimaryScheduleSerializer(serializers.ModelSerializer): class Meta: model = PrimarySchedule fields = ["user", "user_id", "schedule_id", "schedule"] +======= +from rest_framework import serializers + +from courses.serializers import SectionDetailSerializer +from plan.models import Schedule + + +class ScheduleSerializer(serializers.ModelSerializer): + sections = SectionDetailSerializer( + many=True, read_only=False, help_text="The sections in the schedule.", required=True + ) + id = serializers.IntegerField( + read_only=False, required=False, help_text="The id of the schedule." + ) + + class Meta: + model = Schedule + exclude = ["person"] + extra_kwargs = {"semester": {"required": False}} +>>>>>>> 32c7da33 (Fixed linting) diff --git a/backend/plan/templates/plan_construction/index.html b/backend/plan/templates/plan_construction/index.html index 2b9c64bb5..f94cac31d 100644 --- a/backend/plan/templates/plan_construction/index.html +++ b/backend/plan/templates/plan_construction/index.html @@ -1,40 +1,40 @@ -{% load staticfiles %} - - - - - - - - Penn Course Search - - - - Penn Course Search -

    Check back soon!

    -

    Penn Course Search is undergoing a major renovation!

    -

    - In the meantime, check out
    Penn Course Review - to browse courses. -

    - -

    Made with by Penn Labs

    - - +{% load staticfiles %} + + + + + + + + Penn Course Search + + + + Penn Course Search +

    Check back soon!

    +

    Penn Course Search is undergoing a major renovation!

    +

    + In the meantime, check out
    Penn Course Review + to browse courses. +

    + +

    Made with by Penn Labs

    + + diff --git a/backend/plan/urls.py b/backend/plan/urls.py index a6e644b0f..ddfbe1cdc 100644 --- a/backend/plan/urls.py +++ b/backend/plan/urls.py @@ -21,4 +21,4 @@ path("", TemplateView.as_view(template_name="plan/build/index.html")), path("recommendations/", recommend_courses_view, name="recommend-courses"), path("", include(router.urls)), -] \ No newline at end of file +] diff --git a/backend/plan/views.py b/backend/plan/views.py index b5d48fd3f..263c9eb82 100644 --- a/backend/plan/views.py +++ b/backend/plan/views.py @@ -506,7 +506,7 @@ def post(self, request, *args, **kwargs): a = [] for section in schedule: a.append(section_hash[section]) - output.append(MiniSectionSerializer(a, many=True).data) + output.append(SectionDetailSerializer(a, many=True).data) if i >= 5: break diff --git a/backend/pyproject.toml b/backend/pyproject.toml index aa4949aa1..bedefbdeb 100644 --- a/backend/pyproject.toml +++ b/backend/pyproject.toml @@ -1,2 +1,2 @@ -[tool.black] -line-length = 100 +[tool.black] +line-length = 100 diff --git a/backend/review/admin.py b/backend/review/admin.py index dc1902225..0dd1ed329 100644 --- a/backend/review/admin.py +++ b/backend/review/admin.py @@ -1,31 +1,31 @@ -from django.contrib import admin - -from review.models import Review, ReviewBit - - -class ReviewAdmin(admin.ModelAdmin): - search_fields = ["section__full_code"] - - autocomplete_fields = ["section", "instructor"] - - list_select_related = [ - "section", - "section__course", - "instructor", - ] - - -class ReviewBitAdmin(admin.ModelAdmin): - search_fields = ["review__section__full_code"] - - autocomplete_fields = ["review"] - - list_select_related = [ - "review", - ] - - -# Register your models here. - -admin.site.register(Review, ReviewAdmin) -admin.site.register(ReviewBit, ReviewBitAdmin) +from django.contrib import admin + +from review.models import Review, ReviewBit + + +class ReviewAdmin(admin.ModelAdmin): + search_fields = ["section__full_code"] + + autocomplete_fields = ["section", "instructor"] + + list_select_related = [ + "section", + "section__course", + "instructor", + ] + + +class ReviewBitAdmin(admin.ModelAdmin): + search_fields = ["review__section__full_code"] + + autocomplete_fields = ["review"] + + list_select_related = [ + "review", + ] + + +# Register your models here. + +admin.site.register(Review, ReviewAdmin) +admin.site.register(ReviewBit, ReviewBitAdmin) diff --git a/backend/review/annotations.py b/backend/review/annotations.py index 81d3b9525..c10dc770c 100644 --- a/backend/review/annotations.py +++ b/backend/review/annotations.py @@ -1,297 +1,297 @@ -from django.db.models import ( - Avg, - Case, - Count, - FloatField, - IntegerField, - Max, - OuterRef, - Q, - Subquery, - Value, - When, -) - -from review.models import ALL_FIELD_SLUGS, Review, ReviewBit - - -""" -Queryset annotations -==================== - -Actual review data is stored in an Entity-Attribute-Value (EAV) format in the ReviewBit -model. This means that getting associated review data for a queryset requires a few -JOINs under the hood. Doing aggregations on these ReviewBits also requires the explicit -use of subqueries. You can read about Subqueries here: -https://docs.djangoproject.com/en/2.2/ref/models/expressions/#subquery-expressions. - -In short, however, subqueries allow us to query for review data from the ReviewBit table -*inside* any other queryset to use in aggregations and annotations. We can filter down -the ReviewBits that we want to aggregate based on their field name, along with any other -Django filter query that can be different *per row* in the outer query. To match on fields -from the outer query, we use the OuterRef() expressions. - -This allows us to have the database do all of the work of averaging PCR data. Were we to do -this aggregation all in Python code, it would likely take many more queries (read: round-trips to -the DB), be *much* slower, and require cacheing. -""" - - -def review_averages( - queryset, - reviewbit_subfilters, - section_subfilters, - fields=None, - prefix="", - semester_aggregations=False, - extra_metrics=True, -): - """ - Annotate the queryset with the average of all ReviewBits matching the given subfilters. - :param queryset: Queryset to annotate with averages. - :param reviewbit_subfilters: `Q()` expression to filter down the ReviewBits used in each - individual aggregation. Use OuterRef() to refer to values in the outer queryset. - :param: section_subfilters: The same as reviewbit_subfilters, but for filtering Sections. - :param fields: the ReviewBit fields to aggregate. if None, defaults to the four fields - used in PCP. - :param prefix: prefix for fields in annotated queryset. Useful when applying review_averages - multiple times to the same queryset with different subfilters. - :param semester_aggregations: option to annotate additional semester aggregations for the - semester returned by the subfilters (only useful if subfilters filter down to one semester), - as well as the count of the number of semesters included in the queryset's annotations. - :param: extra_metrics: option to include extra metrics in PCR aggregations; final enrollment, - percent of add/drop period open, average number of openings during add/drop, - and percentage of sections filled in advance registration - """ - from courses.models import Section, StatusUpdate - from review.views import extra_metrics_section_filters_pcr - - # ^ imported here to avoid circular imports - - if fields is None: - fields = ["course_quality", "difficulty", "instructor_quality", "work_required"] - - class PercentOpenSubqueryAvg(Subquery): - template = "(SELECT AVG(percent_open) FROM (%(subquery)s) percent_open_avg_view)" - - class NumOpeningsSubqueryAvg(Subquery): - template = "(SELECT AVG(num_openings) FROM (%(subquery)s) num_openings_view)" - - class FilledInAdvRegAvg(Subquery): - template = "(SELECT AVG(filled_in_adv_reg) FROM (%(subquery)s) filled_in_adv_reg_view)" - - queryset = queryset.annotate( - **{ - **{ - (prefix + field): Subquery( - ReviewBit.objects.filter( - reviewbit_subfilters, - field=field, - review__responses__gt=0, - ) - .values("field") - .order_by() - .annotate(avg=Avg("average")) - .values("avg")[:1], - output_field=FloatField(), - ) - for field in fields - }, - **( - { - (prefix + "final_enrollment"): Subquery( - ReviewBit.objects.filter(reviewbit_subfilters, review__responses__gt=0) - .values("review_id", "review__enrollment", "review__section__capacity") - .order_by() - .distinct() - .annotate(common=Value(1)) - .values("common") - .annotate(avg_final_enrollment=Avg("review__enrollment")) - .values("avg_final_enrollment")[:1], - output_field=FloatField(), - ), - (prefix + "percent_open"): PercentOpenSubqueryAvg( - Section.objects.filter( - extra_metrics_section_filters_pcr() & section_subfilters - ) - .order_by() - .distinct(), - output_field=FloatField(), - ), - (prefix + "num_openings"): NumOpeningsSubqueryAvg( - Section.objects.filter( - extra_metrics_section_filters_pcr() & section_subfilters - ) - .order_by() - .distinct() - .annotate( - num_openings=Subquery( - StatusUpdate.objects.filter( - in_add_drop_period=True, - new_status="O", - section_id=OuterRef("id"), - ) - .annotate(common=Value(1)) - .values("common") - .order_by() - .annotate(count=Count("*")) - .values("count")[:1], - output_field=IntegerField(), - ) - ), - output_field=FloatField(), - ), - (prefix + "filled_in_adv_reg"): FilledInAdvRegAvg( - Section.objects.filter( - extra_metrics_section_filters_pcr() & section_subfilters - ) - .order_by() - .distinct() - .annotate( - filled_in_adv_reg=Subquery( - StatusUpdate.objects.filter( - in_add_drop_period=False, - percent_through_add_drop_period=0, - section_id=OuterRef("id"), - ) - .order_by("-created_at") - .annotate( - filled=Case( - When( - Q(new_status="C"), - then=Value(1.0), - ), - When( - Q(new_status="O"), - then=Value(0.0), - ), - output_field=FloatField(), - ) - ) - .values("filled")[:1], - output_field=FloatField(), - ) - ), - output_field=FloatField(), - ), - } - if extra_metrics - else dict() - ), - } - ) - if semester_aggregations: - queryset = queryset.annotate( - **{ - (prefix + "semester_calc"): Subquery( - Section.objects.filter(section_subfilters) - .values("course__semester") - .order_by("-course__semester")[:1] - ), - (prefix + "semester_count"): Subquery( - Section.objects.filter(section_subfilters) - .annotate(common=Value(1)) - .values("common") - .annotate(count=Count("course__semester", distinct=True)) - .values("count")[:1] - ), - } - ) - return queryset - - -def annotate_with_matching_reviews( - qs, - match_review_on, - match_section_on, - most_recent=False, - fields=None, - prefix="", - extra_metrics=True, -): - """ - Annotate each element the passed-in queryset with a subset of all review averages. - :param qs: queryset to annotate. - :param match_review_on: `Q()` expression representing a filtered subset of reviews to aggregate - for each row. Use `OuterRef(OuterRef(''))` to refer to on the row - in the queryset. - :param: match_section_on: `Q()` expression representing a filtered subset of sections to group - for each row. This should essentially be the same as match_review_on, but translated - to Section filters. Use `OuterRef(OuterRef(''))` to refer to on the row - in the queryset. - :param most_recent: If `True`, only aggregate results for the most recent semester. - :param fields: list of fields to aggregate. - :param prefix: prefix of annotated fields on the queryset. - :param: extra_metrics: option to include extra metrics in PCR aggregations; final enrollment, - percent of add/drop period open, average number of openings during add/drop, - and percentage of sections filled in advance registration - """ - - from courses.models import Section # avoid circular imports - - if fields is None: - fields = ALL_FIELD_SLUGS - - matching_reviews = Review.objects.filter(match_review_on, responses__gt=0) - reviewbit_subfilters = Q(review_id__in=Subquery(matching_reviews.values("id"))) - matching_sections = Section.objects.filter(match_section_on) - section_subfilters = Q(id__in=Subquery(matching_sections.values("id"))) - if most_recent: - # Filter the queryset to include only rows from the most recent semester. - recent_sem_subquery = Subquery( - matching_reviews.annotate(common=Value(1)) - .values("common") - .annotate(max_semester=Max("section__course__semester")) - .values("max_semester")[:1] - ) - reviewbit_subfilters &= Q(review__section__course__semester=recent_sem_subquery) - section_subfilters &= Q(course__semester=recent_sem_subquery) - - return review_averages( - qs, - reviewbit_subfilters, - section_subfilters, - fields, - prefix, - semester_aggregations=True, - extra_metrics=extra_metrics, - ) - - -def annotate_average_and_recent( - qs, match_review_on, match_section_on, extra_metrics=True, fields=None -): - """ - Annotate queryset with both all reviews and recent reviews. - :param qs: Queryset to annotate. - :param match_review_on: `Q()` expression representing a filtered subset of reviews to aggregate - for each row. Use `OuterRef(OuterRef(''))` to refer to on the row - in the queryset. - :param: match_section_on: `Q()` expression representing a filtered subset of sections to group - for each row. This should essentially be the same as match_review_on, but translated - to Section filters. Use `OuterRef(OuterRef(''))` to refer to on the row - in the queryset. - :param: extra_metrics: option to include extra metrics in PCR aggregations; final enrollment, - percent of add/drop period open, average number of openings during add/drop, - and percentage of sections filled in advance registration - :param: fields: option to specify the fields averaged by the query - """ - qs = annotate_with_matching_reviews( - qs, - match_review_on, - match_section_on, - most_recent=False, - prefix="average_", - extra_metrics=extra_metrics, - fields=fields, - ) - qs = annotate_with_matching_reviews( - qs, - match_review_on, - match_section_on, - most_recent=True, - prefix="recent_", - extra_metrics=extra_metrics, - fields=fields, - ) - return qs +from django.db.models import ( + Avg, + Case, + Count, + FloatField, + IntegerField, + Max, + OuterRef, + Q, + Subquery, + Value, + When, +) + +from review.models import ALL_FIELD_SLUGS, Review, ReviewBit + + +""" +Queryset annotations +==================== + +Actual review data is stored in an Entity-Attribute-Value (EAV) format in the ReviewBit +model. This means that getting associated review data for a queryset requires a few +JOINs under the hood. Doing aggregations on these ReviewBits also requires the explicit +use of subqueries. You can read about Subqueries here: +https://docs.djangoproject.com/en/2.2/ref/models/expressions/#subquery-expressions. + +In short, however, subqueries allow us to query for review data from the ReviewBit table +*inside* any other queryset to use in aggregations and annotations. We can filter down +the ReviewBits that we want to aggregate based on their field name, along with any other +Django filter query that can be different *per row* in the outer query. To match on fields +from the outer query, we use the OuterRef() expressions. + +This allows us to have the database do all of the work of averaging PCR data. Were we to do +this aggregation all in Python code, it would likely take many more queries (read: round-trips to +the DB), be *much* slower, and require cacheing. +""" + + +def review_averages( + queryset, + reviewbit_subfilters, + section_subfilters, + fields=None, + prefix="", + semester_aggregations=False, + extra_metrics=True, +): + """ + Annotate the queryset with the average of all ReviewBits matching the given subfilters. + :param queryset: Queryset to annotate with averages. + :param reviewbit_subfilters: `Q()` expression to filter down the ReviewBits used in each + individual aggregation. Use OuterRef() to refer to values in the outer queryset. + :param: section_subfilters: The same as reviewbit_subfilters, but for filtering Sections. + :param fields: the ReviewBit fields to aggregate. if None, defaults to the four fields + used in PCP. + :param prefix: prefix for fields in annotated queryset. Useful when applying review_averages + multiple times to the same queryset with different subfilters. + :param semester_aggregations: option to annotate additional semester aggregations for the + semester returned by the subfilters (only useful if subfilters filter down to one semester), + as well as the count of the number of semesters included in the queryset's annotations. + :param: extra_metrics: option to include extra metrics in PCR aggregations; final enrollment, + percent of add/drop period open, average number of openings during add/drop, + and percentage of sections filled in advance registration + """ + from courses.models import Section, StatusUpdate + from review.views import extra_metrics_section_filters_pcr + + # ^ imported here to avoid circular imports + + if fields is None: + fields = ["course_quality", "difficulty", "instructor_quality", "work_required"] + + class PercentOpenSubqueryAvg(Subquery): + template = "(SELECT AVG(percent_open) FROM (%(subquery)s) percent_open_avg_view)" + + class NumOpeningsSubqueryAvg(Subquery): + template = "(SELECT AVG(num_openings) FROM (%(subquery)s) num_openings_view)" + + class FilledInAdvRegAvg(Subquery): + template = "(SELECT AVG(filled_in_adv_reg) FROM (%(subquery)s) filled_in_adv_reg_view)" + + queryset = queryset.annotate( + **{ + **{ + (prefix + field): Subquery( + ReviewBit.objects.filter( + reviewbit_subfilters, + field=field, + review__responses__gt=0, + ) + .values("field") + .order_by() + .annotate(avg=Avg("average")) + .values("avg")[:1], + output_field=FloatField(), + ) + for field in fields + }, + **( + { + (prefix + "final_enrollment"): Subquery( + ReviewBit.objects.filter(reviewbit_subfilters, review__responses__gt=0) + .values("review_id", "review__enrollment", "review__section__capacity") + .order_by() + .distinct() + .annotate(common=Value(1)) + .values("common") + .annotate(avg_final_enrollment=Avg("review__enrollment")) + .values("avg_final_enrollment")[:1], + output_field=FloatField(), + ), + (prefix + "percent_open"): PercentOpenSubqueryAvg( + Section.objects.filter( + extra_metrics_section_filters_pcr() & section_subfilters + ) + .order_by() + .distinct(), + output_field=FloatField(), + ), + (prefix + "num_openings"): NumOpeningsSubqueryAvg( + Section.objects.filter( + extra_metrics_section_filters_pcr() & section_subfilters + ) + .order_by() + .distinct() + .annotate( + num_openings=Subquery( + StatusUpdate.objects.filter( + in_add_drop_period=True, + new_status="O", + section_id=OuterRef("id"), + ) + .annotate(common=Value(1)) + .values("common") + .order_by() + .annotate(count=Count("*")) + .values("count")[:1], + output_field=IntegerField(), + ) + ), + output_field=FloatField(), + ), + (prefix + "filled_in_adv_reg"): FilledInAdvRegAvg( + Section.objects.filter( + extra_metrics_section_filters_pcr() & section_subfilters + ) + .order_by() + .distinct() + .annotate( + filled_in_adv_reg=Subquery( + StatusUpdate.objects.filter( + in_add_drop_period=False, + percent_through_add_drop_period=0, + section_id=OuterRef("id"), + ) + .order_by("-created_at") + .annotate( + filled=Case( + When( + Q(new_status="C"), + then=Value(1.0), + ), + When( + Q(new_status="O"), + then=Value(0.0), + ), + output_field=FloatField(), + ) + ) + .values("filled")[:1], + output_field=FloatField(), + ) + ), + output_field=FloatField(), + ), + } + if extra_metrics + else dict() + ), + } + ) + if semester_aggregations: + queryset = queryset.annotate( + **{ + (prefix + "semester_calc"): Subquery( + Section.objects.filter(section_subfilters) + .values("course__semester") + .order_by("-course__semester")[:1] + ), + (prefix + "semester_count"): Subquery( + Section.objects.filter(section_subfilters) + .annotate(common=Value(1)) + .values("common") + .annotate(count=Count("course__semester", distinct=True)) + .values("count")[:1] + ), + } + ) + return queryset + + +def annotate_with_matching_reviews( + qs, + match_review_on, + match_section_on, + most_recent=False, + fields=None, + prefix="", + extra_metrics=True, +): + """ + Annotate each element the passed-in queryset with a subset of all review averages. + :param qs: queryset to annotate. + :param match_review_on: `Q()` expression representing a filtered subset of reviews to aggregate + for each row. Use `OuterRef(OuterRef(''))` to refer to on the row + in the queryset. + :param: match_section_on: `Q()` expression representing a filtered subset of sections to group + for each row. This should essentially be the same as match_review_on, but translated + to Section filters. Use `OuterRef(OuterRef(''))` to refer to on the row + in the queryset. + :param most_recent: If `True`, only aggregate results for the most recent semester. + :param fields: list of fields to aggregate. + :param prefix: prefix of annotated fields on the queryset. + :param: extra_metrics: option to include extra metrics in PCR aggregations; final enrollment, + percent of add/drop period open, average number of openings during add/drop, + and percentage of sections filled in advance registration + """ + + from courses.models import Section # avoid circular imports + + if fields is None: + fields = ALL_FIELD_SLUGS + + matching_reviews = Review.objects.filter(match_review_on, responses__gt=0) + reviewbit_subfilters = Q(review_id__in=Subquery(matching_reviews.values("id"))) + matching_sections = Section.objects.filter(match_section_on) + section_subfilters = Q(id__in=Subquery(matching_sections.values("id"))) + if most_recent: + # Filter the queryset to include only rows from the most recent semester. + recent_sem_subquery = Subquery( + matching_reviews.annotate(common=Value(1)) + .values("common") + .annotate(max_semester=Max("section__course__semester")) + .values("max_semester")[:1] + ) + reviewbit_subfilters &= Q(review__section__course__semester=recent_sem_subquery) + section_subfilters &= Q(course__semester=recent_sem_subquery) + + return review_averages( + qs, + reviewbit_subfilters, + section_subfilters, + fields, + prefix, + semester_aggregations=True, + extra_metrics=extra_metrics, + ) + + +def annotate_average_and_recent( + qs, match_review_on, match_section_on, extra_metrics=True, fields=None +): + """ + Annotate queryset with both all reviews and recent reviews. + :param qs: Queryset to annotate. + :param match_review_on: `Q()` expression representing a filtered subset of reviews to aggregate + for each row. Use `OuterRef(OuterRef(''))` to refer to on the row + in the queryset. + :param: match_section_on: `Q()` expression representing a filtered subset of sections to group + for each row. This should essentially be the same as match_review_on, but translated + to Section filters. Use `OuterRef(OuterRef(''))` to refer to on the row + in the queryset. + :param: extra_metrics: option to include extra metrics in PCR aggregations; final enrollment, + percent of add/drop period open, average number of openings during add/drop, + and percentage of sections filled in advance registration + :param: fields: option to specify the fields averaged by the query + """ + qs = annotate_with_matching_reviews( + qs, + match_review_on, + match_section_on, + most_recent=False, + prefix="average_", + extra_metrics=extra_metrics, + fields=fields, + ) + qs = annotate_with_matching_reviews( + qs, + match_review_on, + match_section_on, + most_recent=True, + prefix="recent_", + extra_metrics=extra_metrics, + fields=fields, + ) + return qs diff --git a/backend/review/apps.py b/backend/review/apps.py index 88f7453cd..899bab60e 100644 --- a/backend/review/apps.py +++ b/backend/review/apps.py @@ -1,5 +1,5 @@ -from django.apps import AppConfig - - -class ReviewConfig(AppConfig): - name = "review" +from django.apps import AppConfig + + +class ReviewConfig(AppConfig): + name = "review" diff --git a/backend/review/documentation.py b/backend/review/documentation.py index f56c06b20..d06935802 100644 --- a/backend/review/documentation.py +++ b/backend/review/documentation.py @@ -1,560 +1,560 @@ -from courses.models import Section -from review.models import REVIEW_BIT_LABEL -from review.util import to_r_camel - - -ACTIVITY_CHOICES = dict(Section.ACTIVITY_CHOICES) -ACTIVITY_CHOICES["***"] = None - -# Unless you are looking to modify documentation, it is probably easier to view this -# documentation at localhost:8000/api/documentation/ in the PCR section, rather than -# reading through this file - -EXPANDED_REVIEW_BIT_LABEL = tuple( - list(REVIEW_BIT_LABEL) - + [ - ( - "RFINALENROLLMENT", - "Final Enrollment at the End of the Semester", - "final_enrollment", - ), - ("RPERCENTOPEN", "Percent of Add/Drop Period Open", "percent_open"), - ("RNUMOPENINGS", "Number of Openings During Add/Drop", "num_openings"), - ( - "RFILLEDINADVREG", - "Percent of Sections Completely Filled During Advance Registration", - "filled_in_adv_reg", - ), - ] -) - -course_review_aggregation_schema = { - # This dict contains the schema of the "_reviews" fields returned in course review views - **{ - "rSemesterCalc": { - "type": "string", - "description": "The oldest semester included in these review aggregations (of the form YYYYx where x is A [for spring], B [summer], or C [fall]), e.g. `2019C` for fall 2019. This field will not be missing.", # noqa E501 - }, - "rSemesterCount": { - "type": "integer", - "description": "The number of semesters included in these review aggregations. This field will not be missing.", # noqa E501 - }, - }, - **{ - to_r_camel(bit_label[2]): {"type": "number", "description": f"Average {bit_label[1]}"} - for bit_label in EXPANDED_REVIEW_BIT_LABEL - }, -} - -plots_schema = { - "pca_demand_plot": { - "type": "array", - "description": ( - "The plot of average relative pca demand for sections (excluding non-primary " - "crosslisted sections) of this topic over time during historical add/drop periods. " - "It is an array of pairs (2-length arrays), " - "with each pair of the form `[percent_through, relative_pca_demand]`. The " - "`percent_through` value is a float in the range [0,1], and represents percentage " - "through the add/drop period. The `relative_pca_demand` value is a float in the " - "range [0,1], and represents the average of the relative pca demands of all primary " - "sections of this topic, at that point in time. The first item of each pair " - "should be plotted on the x-axis and the second item should be plotted on the " - "y-axis. Note that floating point imprecision may cause " - "some of the percent_through values to be slightly off (like 0.35000000000000003), " - "so you should round if you display these data as strings anywhere. " - "This field will not be missing. However, this field will be null if there are no " - "underlying sections with [no permit required for registration] and [valid data]." - ), - }, - "pca_demand_plot_since_semester": { - "type": "string", - "description": ( - "The earliest semester from which data is taken for the PCA demand plot given in the " - "`pca_demand_plot` field." - ), - }, - "pca_demand_plot_num_semesters": { - "type": "integer", - "description": ( - "The number of semesters from which data is taken for the PCA demand plot given in the " - "`pca_demand_plot` field." - ), - }, - "percent_open_plot": { - "type": "array", - "description": ( - "The plot of percentage of sections (excluding non-primary crosslisted sections) of " - "this topic that were open at each point in time during historical add/drop periods. " - "It is an array of pairs (2-length arrays), with each pair of the form " - "`[percent_through, percent_open]`. The `percent_through` value is a float in the " - "range [0,1], and represents percentage through the add/drop period. " - "The `percent_open` value is a float in the range [0,1], and represents " - "the percent of sections of this course that were open (excluding non-primary " - "crosslisted sections), at that point in time. The first item of each pair " - "should be plotted on the 'x-axis' and the second item should be plotted on the " - "'y-axis'. Note that floating point imprecision may cause " - "some values to be slightly off (like 0.35000000000000003), " - "so you should round if you display these data as strings anywhere. " - "This field will not be missing. However, this field will be null if there are no " - "underlying sections with [no permit required for registration] and [valid data]." - ), - }, - "percent_open_plot_since_semester": { - "type": "string", - "description": ( - "The earliest semester from which data is taken for the percent open plot given in the " - "`percent_open_plot` field." - ), - }, - "percent_open_plot_num_semesters": { - "type": "integer", - "description": ( - "The number of semesters from which data is taken for the percent open plot given in " - "the `percent_open_plot` field." - ), - }, -} - -instructor_review_aggregation_schema = { - # This dict contains the schema of the "_reviews" fields returned in the - # course-specific instructor review aggregation object within the response returned by - # course review views - to_r_camel(bit_label[2]): {"type": "number", "description": f"Average {bit_label[1]}"} - for bit_label in EXPANDED_REVIEW_BIT_LABEL -} - -course_reviews_response_schema = { - "course-reviews": { - "GET": { - 200: { - "properties": { - "code": { - "type": "string", - "description": "The dash-joined department and most-recent code of this course, e.g. `CIS-1200`.", # noqa E501 - }, - "aliases": { - "type": "array", - "description": "A list of courses that are crosslisted with this course (each represented by its dash-joined department and code).", # noqa E501 - "items": { - "type": "string", - }, - }, - "historical_codes": { - "description": "The historical lineage of primary course codes that have represented this course (from most recent to oldest).", # noqa E501 - "items": { - "type": "object", - "properties": { - "full_code": { - "type": "string", - "description": "The dash-joined department and course code.", - }, - "branched_from": { - "type": "boolean", - "description": "A flag indicating whether this code was branched into multiple new codes (e.g. in fall 2022). In these cases we should link to the old course on PCR because its reviews will not be included on the same page (unlike linear links).", # noqa E501 - }, - "semester": { - "type": "string", - "description": "The most recent semester this code was used (of the form YYYYx where x is A [for spring], B [summer], or C [fall]), e.g. `2022C` for fall 2022.", # noqa E501 - }, - }, - }, - }, - "name": { - "type": "string", - "description": "The title of the course, e.g. 'Programming Languages and Techniques I' for CIS-120.", # noqa E501 - }, - "description": { - "type": "string", - "description": "The description of the course, e.g. 'A fast-paced introduction to the fundamental concepts of programming... [etc.]' for CIS-120.", # noqa E501 - }, - "latest_semester": { - "type": "string", - "description": "The most recent semester this course was offered (of the form YYYYx where x is A [for spring], B [summer], or C [fall]), e.g. `2022C` for fall 2022.", # noqa E501 - }, - "registration_metrics": { - "type": "boolean", - "description": "True if this course has registration metrics that you can access via the Retrieve Plots route.", # noqa E501 - }, - "num_sections": { - "type": "integer", - "description": "The number of sections belonging to this course (excluding non-primary crosslisted sections) across all semesters (that we have data for).", # noqa E501 - }, - "num_sections_recent": { - "type": "integer", - "description": "The number of sections belonging to this course (excluding non-primary crosslisted sections) in its most recent semester.", # noqa E501 - }, - "average_reviews": { - "type": "object", - "description": "This course's average reviews across all of its sections (excluding non-primary crosslisted sections) from all semesters. Note that if any of these subfields are missing or null, that means the subfield is not applicable or missing from our data (you should check for null values).", # noqa E501 - "properties": course_review_aggregation_schema, - }, - "recent_reviews": { - "type": "object", - "description": "This course's average reviews across all of its sections (excluding non-primary crosslisted sections) from the most recent semester. Note that if any of these subfields are missing or null, that means the subfield is not applicable or missing from our data (you should check for null values).", # noqa E501 - "properties": course_review_aggregation_schema, - }, - "num_semesters": { - "type": "integer", - "description": "The number of semesters for which this course has been taught (that we have data for).", # noqa E501 - }, - "instructors": { - "type": "object", - "description": "Reviews for this course broken down by instructor. Note that each key in this subdictionary is a stringified instructor id (indicated by `STRINGIFIED_INSTRUCTOR_ID`; this is not an actual key but a placeholder for potentially many keys).", # noqa E501 - "properties": { - "STRINGIFIED_INSTRUCTOR_ID": { - "type": "object", - "description": "This key `STRINGIFIED_INSTRUCTOR_ID` is a placeholder for potentially many stringified instructor id keys.", # noqa E501 - "properties": { - "id": { - "type": "integer", - "description": "The integer id of this instructor; note that this is just the int version of this subdictionary's key in the parent dictionary.", # noqa E501 - }, - "average_reviews": { - "type": "object", - "description": "This instructor's average reviews across all of the sections of this course that he/she has taught. Note that if any of these subfields are missing or null, that means the subfield is not applicable or missing from our data (you should check for null values).", # noqa E501 - "properties": instructor_review_aggregation_schema, - }, - "recent_reviews": { - "type": "object", - "description": "This instructor's average reviews across all of the sections of this course that he/she has taught in his/her most recent semester teaching this course that has review data. Note that if any of these subfields are missing or null, that means the subfield is not applicable or missing from our data (you should check for null values).", # noqa E501 - "properties": instructor_review_aggregation_schema, - }, - "latest_semester": { - "type": "string", - "description": "The most recent semester this instructor taught this course (of the form YYYYx where x is A [for spring], B [summer], or C [fall]), e.g. `2019C` for fall 2019.", # noqa E501 - }, - "num_semesters": { - "type": "integer", - "description": "The number of semesters that this instructor has taught this course (that we have data for).", # noqa E501 - }, - "name": { - "type": "string", - "description": "The instructor's name", - }, - }, - } - }, - }, - } - }, - } - }, -} - -course_plots_response_schema = { - "course-plots": { - "GET": { - 200: { - "properties": { - "code": { - "type": "string", - "description": "The dash-joined department and most-recent (across all codes representing the topic) code of the course, e.g. `CIS-120` for CIS-120.", # noqa E501 - }, - "current_add_drop_period": { - "type": "object", - "description": "The start and end dates of the upcoming/current semester's add/drop period", # noqa E501 - "properties": { - "start": { - "type": "string", - "description": "A string datetime representation of the start of the current/upcoming add/drop period.", # noqa E501 - }, - "end": { - "type": "string", - "description": "A string datetime representation of the end of the current/upcoming add/drop period.", # noqa E501 - }, - }, - }, - "average_plots": { - "type": "object", - "description": "This course's plots (PCA demand, percent sections open), averaged across all of its sections from all semesters. Note that if any of these subfields are missing or null, that means the subfield is not applicable or missing from our data (you should check for null values).", # noqa E501 - "properties": course_review_aggregation_schema, - }, - "recent_plots": { - "type": "object", - "description": "This course's plots (PCA demand, percent sections open), averaged across all of its sections from the most recent semester before the current semester. Note that if any of these subfields are missing or null, that means the subfield is not applicable or missing from our data (you should check for null values).", # noqa E501 - "properties": course_review_aggregation_schema, - }, - } - }, - } - }, -} - -instructor_reviews_response_schema = { - "instructor-reviews": { - "GET": { - 200: { - "properties": { - "name": {"type": "string", "description": "The full name of the instructor."}, - "num_sections_recent": { - "type": "integer", - "description": "The number of sections this instructor taught in his/her most recent semester teaching.", # noqa E501 - }, - "num_sections": { - "type": "integer", - "description": "The number of sections this instructor has taught (that we have data for).", # noqa E501 - }, - "average_reviews": { - "type": "object", - "description": "This instructor's average reviews across all of his/her taught sections from all semesters. Note that if any of these subfields are missing or null, that means the subfield is not applicable or missing from our data (you should check for null values).", # noqa E501 - "properties": instructor_review_aggregation_schema, - }, - "recent_reviews": { - "type": "object", - "description": "This instructor's average reviews across all of his/her taught sections from only his/her most recent semester teaching that has review data. Note that if any of these subfields are missing or null, that means the subfield is not applicable or missing from our data (you should check for null values).", # noqa E501 - "properties": instructor_review_aggregation_schema, - }, - "num_semesters": { - "type": "integer", - "description": "The number of semesters this instructor has taught (that we have data for).", # noqa E501 - }, - "courses": { - "type": "object", - "description": "Reviews for this instructor broken down by the courses he/she has taught. Note that each key in this subdictionary is the course full code (indicated by `COURSE_FULL_CODE`; this is not an actual key but a placeholder for potentially many keys).", # noqa E501 - "properties": { - "COURSE_FULL_CODE": { - "type": "object", - "description": "This key `COURSE_FULL_CODE` is a placeholder for potentially many course full code keys. Each full code is the dash-joined department and most-recent (across all codes representing the topic) code of the course, e.g. `CIS-120` for CIS-120.", # noqa E501 - "properties": { - "full_code": { - "type": "string", - "description": "The dash-joined department and most-recent (across all codes representing the topic) code of the course, e.g. `CIS-120` for CIS-120.", # noqa E501 - }, - "average_reviews": { - "type": "object", - "description": "This course's average reviews across all of its sections taught by this instructor from all semesters. Note that if any of these subfields are missing or null, that means the subfield is not applicable or missing from our data (you should check for null values).", # noqa E501 - "properties": course_review_aggregation_schema, - }, - "recent_reviews": { - "type": "object", - "description": "This course's average reviews across all of its sections taught by this instructor from the most recent semester. Note that if any of these subfields are missing or null, that means the subfield is not applicable or missing from our data (you should check for null values).", # noqa E501 - "properties": course_review_aggregation_schema, - }, - "latest_semester": { - "type": "string", - "description": "The most recent semester this course was taught by this instructor (of the form YYYYx where x is A [for spring], B [summer], or C [fall]), e.g. `2019C` for fall 2019.", # noqa E501 - }, - "num_semesters": { - "type": "integer", - "description": "The number of semesters in which this course was taught by this instructor (that we have data for).", # noqa E501 - }, - "code": { - "type": "string", - "description": "Same as `full_code`.", - }, - "name": { - "type": "string", - "description": "The title of the course, e.g. 'Programming Languages and Techniques I' for CIS-120.", # noqa E501 - }, - }, - } - }, - }, - } - } - } - } -} - -autocomplete_response_schema = { - "review-autocomplete": { - "GET": { - 200: { - "properties": { - "courses": { - "type": "array", - "description": "Data on courses for autocomplete.", - "items": { - "type": "object", - "properties": { - "title": { - "type": "string", - "description": "The dash-joined department and code of the course, e.g. `CIS-120` for CIS-120.", # noqa E501 - }, - "desc": { - "type": "string", - "description": "The title of the course, e.g. 'Programming Languages and Techniques I' for CIS-120.", # noqa E501 - }, - "url": { - "type": "url", - "description": "The relative route through which this course's reviews can be accessed (a prefix of `/api/review/` is assumed).", # noqa E501 - }, - }, - }, - }, - "departments": { - "type": "array", - "description": "Data on departments for autocomplete.", - "items": { - "type": "object", - "properties": { - "title": { - "type": "string", - "description": "The string department code, e.g. `CIS` for the CIS department.", # noqa E501 - }, - "desc": { - "type": "string", - "description": "The name of the department, e.g. 'Computer and Information Sci' for the CIS department.", # noqa E501 - }, - "url": { - "type": "url", - "description": "The relative route through which this department's reviews can be accessed (a prefix of `/api/review/` is assumed).", # noqa E501 - }, - }, - }, - }, - "instructors": { - "type": "array", - "description": "Data on instructors for autocomplete.", - "items": { - "type": "object", - "properties": { - "title": { - "type": "string", - "description": "The full name of the instructor.", - }, - "desc": { - "type": "string", - "description": "A comma-separated string list of department codes to which this instructor belongs.", # noqa E501 - }, - "url": { - "type": "url", - "description": "The relative route through which this instructor's reviews can be accessed (a prefix of `/api/review/` is assumed).", # noqa E501 - }, - }, - }, - }, - } - } - } - } -} - -department_reviews_response_schema = { - "department-reviews": { - "GET": { - 200: { - "properties": { - "code": { - "type": "string", - "description": "The department code, e.g. `CIS` for the CIS department.", - }, - "name": { - "type": "string", - "description": "The name of the department, e.g. 'Computer and Information Sci' for the CIS department.", # noqa E501 - }, - "courses": { - "type": "object", - "description": "Reviews for this department broken down by its courses. Note that each key in this subdictionary is the course full code (indicated by `COURSE_FULL_CODE`; this is not an actual key but a placeholder for potentially many keys).", # noqa E501 - "properties": { - "COURSE_FULL_CODE": { - "type": "object", - "description": "This key `COURSE_FULL_CODE` is a placeholder for potentially many course full code keys. Each full code is the dash-joined department and most-recent (across all codes representing the topic) code of the course, e.g. `CIS-120` for CIS-120.", # noqa E501 - "properties": { - "id": { - "type": "string", - "description": "The dash-joined department and most-recent (across all codes representing the topic) code of the course, e.g. `CIS-120` for CIS-120.", # noqa E501 - }, - "average_reviews": { - "type": "object", - "description": "This course's average reviews across all of its sections from all semesters. Note that if any of these subfields are missing or null, that means the subfield is not applicable or missing from our data (you should check for null values).", # noqa E501 - "properties": course_review_aggregation_schema, - }, - "recent_reviews": { - "type": "object", - "description": "This course's average reviews across all of its sections from the most recent semester. Note that if any of these subfields are missing or null, that means the subfield is not applicable or missing from our data (you should check for null values).", # noqa E501 - "properties": course_review_aggregation_schema, - }, - "latest_semester": { - "type": "string", - "description": "The most recent semester this course was taught (of the form YYYYx where x is A [for spring], B [summer], or C [fall]), e.g. `2019C` for fall 2019.", # noqa E501 - }, - "num_semesters": { - "type": "integer", - "description": "The number of semesters this class has been taught (that we have data for).", # noqa E501 - }, - "code": {"type": "string", "description": "Same as `id`."}, - "name": { - "type": "string", - "description": "The title of the course, e.g. 'Programming Languages and Techniques I' for CIS-120.", # noqa E501 - }, - }, - } - }, - }, - } - } - } - } -} - -instructor_for_course_reviews_response_schema = { - "course-history": { - "GET": { - 200: { - "properties": { - "instructor": { - "type": "object", - "description": "Information about the instructor.", - "properties": { - "id": { - "type": "integer", - "description": "The integer id of the instructor.", - }, - "name": { - "type": "string", - "description": "The full name of the instructor.", - }, - }, - }, - "course_code": { - "type": "string", - "description": "The dash-joined department and most-recent (across all codes representing the topic) code of the course, e.g. `CIS-120` for CIS-120.", # noqa E501 - }, - "sections": { - "type": "array", - "description": "The sections of this course taught by this instructor.", - "items": { - "type": "object", - "properties": { - "course_code": { - "type": "string", - "description": "The dash-joined department and course code of the section, `CIS-120` for CIS-120-001.", # noqa E501 - }, - "course_name": { - "type": "string", - "description": "The title of the section's course, e.g. 'Programming Languages and Techniques I' for CIS-120-001.", # noqa E501 - }, - "activity": { - "type": "string", - "description": f"The activity of the section. Options: `{str(list(dict(ACTIVITY_CHOICES).values()))}`", # noqa E501 - }, - "semester": { - "type": "string", - "description": "The semester this section was taught (of the form YYYYx where x is A [for spring], B [summer], or C [fall]), e.g. `2019C` for fall 2019.", # noqa E501 - }, - "forms_returned": { - "type": "integer", - "description": "The number of review responses collected for this section (or null if this section does not have review data).", # noqa E501 - }, - "forms_produced": { - "type": "integer", - "description": "The final enrollment of this section (or null if this section does not have review data).", # noqa E501 - }, - "ratings": { - "type": "object", - "description": "The reviews for this section.", - "properties": course_review_aggregation_schema, - }, - "comments": { - "type": "string", - "description": "A textual description of the section, as well as common sentiment about it from reviews.", # noqa E501 - }, - }, - }, - }, - } - } - } - } -} +from courses.models import Section +from review.models import REVIEW_BIT_LABEL +from review.util import to_r_camel + + +ACTIVITY_CHOICES = dict(Section.ACTIVITY_CHOICES) +ACTIVITY_CHOICES["***"] = None + +# Unless you are looking to modify documentation, it is probably easier to view this +# documentation at localhost:8000/api/documentation/ in the PCR section, rather than +# reading through this file + +EXPANDED_REVIEW_BIT_LABEL = tuple( + list(REVIEW_BIT_LABEL) + + [ + ( + "RFINALENROLLMENT", + "Final Enrollment at the End of the Semester", + "final_enrollment", + ), + ("RPERCENTOPEN", "Percent of Add/Drop Period Open", "percent_open"), + ("RNUMOPENINGS", "Number of Openings During Add/Drop", "num_openings"), + ( + "RFILLEDINADVREG", + "Percent of Sections Completely Filled During Advance Registration", + "filled_in_adv_reg", + ), + ] +) + +course_review_aggregation_schema = { + # This dict contains the schema of the "_reviews" fields returned in course review views + **{ + "rSemesterCalc": { + "type": "string", + "description": "The oldest semester included in these review aggregations (of the form YYYYx where x is A [for spring], B [summer], or C [fall]), e.g. `2019C` for fall 2019. This field will not be missing.", # noqa E501 + }, + "rSemesterCount": { + "type": "integer", + "description": "The number of semesters included in these review aggregations. This field will not be missing.", # noqa E501 + }, + }, + **{ + to_r_camel(bit_label[2]): {"type": "number", "description": f"Average {bit_label[1]}"} + for bit_label in EXPANDED_REVIEW_BIT_LABEL + }, +} + +plots_schema = { + "pca_demand_plot": { + "type": "array", + "description": ( + "The plot of average relative pca demand for sections (excluding non-primary " + "crosslisted sections) of this topic over time during historical add/drop periods. " + "It is an array of pairs (2-length arrays), " + "with each pair of the form `[percent_through, relative_pca_demand]`. The " + "`percent_through` value is a float in the range [0,1], and represents percentage " + "through the add/drop period. The `relative_pca_demand` value is a float in the " + "range [0,1], and represents the average of the relative pca demands of all primary " + "sections of this topic, at that point in time. The first item of each pair " + "should be plotted on the x-axis and the second item should be plotted on the " + "y-axis. Note that floating point imprecision may cause " + "some of the percent_through values to be slightly off (like 0.35000000000000003), " + "so you should round if you display these data as strings anywhere. " + "This field will not be missing. However, this field will be null if there are no " + "underlying sections with [no permit required for registration] and [valid data]." + ), + }, + "pca_demand_plot_since_semester": { + "type": "string", + "description": ( + "The earliest semester from which data is taken for the PCA demand plot given in the " + "`pca_demand_plot` field." + ), + }, + "pca_demand_plot_num_semesters": { + "type": "integer", + "description": ( + "The number of semesters from which data is taken for the PCA demand plot given in the " + "`pca_demand_plot` field." + ), + }, + "percent_open_plot": { + "type": "array", + "description": ( + "The plot of percentage of sections (excluding non-primary crosslisted sections) of " + "this topic that were open at each point in time during historical add/drop periods. " + "It is an array of pairs (2-length arrays), with each pair of the form " + "`[percent_through, percent_open]`. The `percent_through` value is a float in the " + "range [0,1], and represents percentage through the add/drop period. " + "The `percent_open` value is a float in the range [0,1], and represents " + "the percent of sections of this course that were open (excluding non-primary " + "crosslisted sections), at that point in time. The first item of each pair " + "should be plotted on the 'x-axis' and the second item should be plotted on the " + "'y-axis'. Note that floating point imprecision may cause " + "some values to be slightly off (like 0.35000000000000003), " + "so you should round if you display these data as strings anywhere. " + "This field will not be missing. However, this field will be null if there are no " + "underlying sections with [no permit required for registration] and [valid data]." + ), + }, + "percent_open_plot_since_semester": { + "type": "string", + "description": ( + "The earliest semester from which data is taken for the percent open plot given in the " + "`percent_open_plot` field." + ), + }, + "percent_open_plot_num_semesters": { + "type": "integer", + "description": ( + "The number of semesters from which data is taken for the percent open plot given in " + "the `percent_open_plot` field." + ), + }, +} + +instructor_review_aggregation_schema = { + # This dict contains the schema of the "_reviews" fields returned in the + # course-specific instructor review aggregation object within the response returned by + # course review views + to_r_camel(bit_label[2]): {"type": "number", "description": f"Average {bit_label[1]}"} + for bit_label in EXPANDED_REVIEW_BIT_LABEL +} + +course_reviews_response_schema = { + "course-reviews": { + "GET": { + 200: { + "properties": { + "code": { + "type": "string", + "description": "The dash-joined department and most-recent code of this course, e.g. `CIS-1200`.", # noqa E501 + }, + "aliases": { + "type": "array", + "description": "A list of courses that are crosslisted with this course (each represented by its dash-joined department and code).", # noqa E501 + "items": { + "type": "string", + }, + }, + "historical_codes": { + "description": "The historical lineage of primary course codes that have represented this course (from most recent to oldest).", # noqa E501 + "items": { + "type": "object", + "properties": { + "full_code": { + "type": "string", + "description": "The dash-joined department and course code.", + }, + "branched_from": { + "type": "boolean", + "description": "A flag indicating whether this code was branched into multiple new codes (e.g. in fall 2022). In these cases we should link to the old course on PCR because its reviews will not be included on the same page (unlike linear links).", # noqa E501 + }, + "semester": { + "type": "string", + "description": "The most recent semester this code was used (of the form YYYYx where x is A [for spring], B [summer], or C [fall]), e.g. `2022C` for fall 2022.", # noqa E501 + }, + }, + }, + }, + "name": { + "type": "string", + "description": "The title of the course, e.g. 'Programming Languages and Techniques I' for CIS-120.", # noqa E501 + }, + "description": { + "type": "string", + "description": "The description of the course, e.g. 'A fast-paced introduction to the fundamental concepts of programming... [etc.]' for CIS-120.", # noqa E501 + }, + "latest_semester": { + "type": "string", + "description": "The most recent semester this course was offered (of the form YYYYx where x is A [for spring], B [summer], or C [fall]), e.g. `2022C` for fall 2022.", # noqa E501 + }, + "registration_metrics": { + "type": "boolean", + "description": "True if this course has registration metrics that you can access via the Retrieve Plots route.", # noqa E501 + }, + "num_sections": { + "type": "integer", + "description": "The number of sections belonging to this course (excluding non-primary crosslisted sections) across all semesters (that we have data for).", # noqa E501 + }, + "num_sections_recent": { + "type": "integer", + "description": "The number of sections belonging to this course (excluding non-primary crosslisted sections) in its most recent semester.", # noqa E501 + }, + "average_reviews": { + "type": "object", + "description": "This course's average reviews across all of its sections (excluding non-primary crosslisted sections) from all semesters. Note that if any of these subfields are missing or null, that means the subfield is not applicable or missing from our data (you should check for null values).", # noqa E501 + "properties": course_review_aggregation_schema, + }, + "recent_reviews": { + "type": "object", + "description": "This course's average reviews across all of its sections (excluding non-primary crosslisted sections) from the most recent semester. Note that if any of these subfields are missing or null, that means the subfield is not applicable or missing from our data (you should check for null values).", # noqa E501 + "properties": course_review_aggregation_schema, + }, + "num_semesters": { + "type": "integer", + "description": "The number of semesters for which this course has been taught (that we have data for).", # noqa E501 + }, + "instructors": { + "type": "object", + "description": "Reviews for this course broken down by instructor. Note that each key in this subdictionary is a stringified instructor id (indicated by `STRINGIFIED_INSTRUCTOR_ID`; this is not an actual key but a placeholder for potentially many keys).", # noqa E501 + "properties": { + "STRINGIFIED_INSTRUCTOR_ID": { + "type": "object", + "description": "This key `STRINGIFIED_INSTRUCTOR_ID` is a placeholder for potentially many stringified instructor id keys.", # noqa E501 + "properties": { + "id": { + "type": "integer", + "description": "The integer id of this instructor; note that this is just the int version of this subdictionary's key in the parent dictionary.", # noqa E501 + }, + "average_reviews": { + "type": "object", + "description": "This instructor's average reviews across all of the sections of this course that he/she has taught. Note that if any of these subfields are missing or null, that means the subfield is not applicable or missing from our data (you should check for null values).", # noqa E501 + "properties": instructor_review_aggregation_schema, + }, + "recent_reviews": { + "type": "object", + "description": "This instructor's average reviews across all of the sections of this course that he/she has taught in his/her most recent semester teaching this course that has review data. Note that if any of these subfields are missing or null, that means the subfield is not applicable or missing from our data (you should check for null values).", # noqa E501 + "properties": instructor_review_aggregation_schema, + }, + "latest_semester": { + "type": "string", + "description": "The most recent semester this instructor taught this course (of the form YYYYx where x is A [for spring], B [summer], or C [fall]), e.g. `2019C` for fall 2019.", # noqa E501 + }, + "num_semesters": { + "type": "integer", + "description": "The number of semesters that this instructor has taught this course (that we have data for).", # noqa E501 + }, + "name": { + "type": "string", + "description": "The instructor's name", + }, + }, + } + }, + }, + } + }, + } + }, +} + +course_plots_response_schema = { + "course-plots": { + "GET": { + 200: { + "properties": { + "code": { + "type": "string", + "description": "The dash-joined department and most-recent (across all codes representing the topic) code of the course, e.g. `CIS-120` for CIS-120.", # noqa E501 + }, + "current_add_drop_period": { + "type": "object", + "description": "The start and end dates of the upcoming/current semester's add/drop period", # noqa E501 + "properties": { + "start": { + "type": "string", + "description": "A string datetime representation of the start of the current/upcoming add/drop period.", # noqa E501 + }, + "end": { + "type": "string", + "description": "A string datetime representation of the end of the current/upcoming add/drop period.", # noqa E501 + }, + }, + }, + "average_plots": { + "type": "object", + "description": "This course's plots (PCA demand, percent sections open), averaged across all of its sections from all semesters. Note that if any of these subfields are missing or null, that means the subfield is not applicable or missing from our data (you should check for null values).", # noqa E501 + "properties": course_review_aggregation_schema, + }, + "recent_plots": { + "type": "object", + "description": "This course's plots (PCA demand, percent sections open), averaged across all of its sections from the most recent semester before the current semester. Note that if any of these subfields are missing or null, that means the subfield is not applicable or missing from our data (you should check for null values).", # noqa E501 + "properties": course_review_aggregation_schema, + }, + } + }, + } + }, +} + +instructor_reviews_response_schema = { + "instructor-reviews": { + "GET": { + 200: { + "properties": { + "name": {"type": "string", "description": "The full name of the instructor."}, + "num_sections_recent": { + "type": "integer", + "description": "The number of sections this instructor taught in his/her most recent semester teaching.", # noqa E501 + }, + "num_sections": { + "type": "integer", + "description": "The number of sections this instructor has taught (that we have data for).", # noqa E501 + }, + "average_reviews": { + "type": "object", + "description": "This instructor's average reviews across all of his/her taught sections from all semesters. Note that if any of these subfields are missing or null, that means the subfield is not applicable or missing from our data (you should check for null values).", # noqa E501 + "properties": instructor_review_aggregation_schema, + }, + "recent_reviews": { + "type": "object", + "description": "This instructor's average reviews across all of his/her taught sections from only his/her most recent semester teaching that has review data. Note that if any of these subfields are missing or null, that means the subfield is not applicable or missing from our data (you should check for null values).", # noqa E501 + "properties": instructor_review_aggregation_schema, + }, + "num_semesters": { + "type": "integer", + "description": "The number of semesters this instructor has taught (that we have data for).", # noqa E501 + }, + "courses": { + "type": "object", + "description": "Reviews for this instructor broken down by the courses he/she has taught. Note that each key in this subdictionary is the course full code (indicated by `COURSE_FULL_CODE`; this is not an actual key but a placeholder for potentially many keys).", # noqa E501 + "properties": { + "COURSE_FULL_CODE": { + "type": "object", + "description": "This key `COURSE_FULL_CODE` is a placeholder for potentially many course full code keys. Each full code is the dash-joined department and most-recent (across all codes representing the topic) code of the course, e.g. `CIS-120` for CIS-120.", # noqa E501 + "properties": { + "full_code": { + "type": "string", + "description": "The dash-joined department and most-recent (across all codes representing the topic) code of the course, e.g. `CIS-120` for CIS-120.", # noqa E501 + }, + "average_reviews": { + "type": "object", + "description": "This course's average reviews across all of its sections taught by this instructor from all semesters. Note that if any of these subfields are missing or null, that means the subfield is not applicable or missing from our data (you should check for null values).", # noqa E501 + "properties": course_review_aggregation_schema, + }, + "recent_reviews": { + "type": "object", + "description": "This course's average reviews across all of its sections taught by this instructor from the most recent semester. Note that if any of these subfields are missing or null, that means the subfield is not applicable or missing from our data (you should check for null values).", # noqa E501 + "properties": course_review_aggregation_schema, + }, + "latest_semester": { + "type": "string", + "description": "The most recent semester this course was taught by this instructor (of the form YYYYx where x is A [for spring], B [summer], or C [fall]), e.g. `2019C` for fall 2019.", # noqa E501 + }, + "num_semesters": { + "type": "integer", + "description": "The number of semesters in which this course was taught by this instructor (that we have data for).", # noqa E501 + }, + "code": { + "type": "string", + "description": "Same as `full_code`.", + }, + "name": { + "type": "string", + "description": "The title of the course, e.g. 'Programming Languages and Techniques I' for CIS-120.", # noqa E501 + }, + }, + } + }, + }, + } + } + } + } +} + +autocomplete_response_schema = { + "review-autocomplete": { + "GET": { + 200: { + "properties": { + "courses": { + "type": "array", + "description": "Data on courses for autocomplete.", + "items": { + "type": "object", + "properties": { + "title": { + "type": "string", + "description": "The dash-joined department and code of the course, e.g. `CIS-120` for CIS-120.", # noqa E501 + }, + "desc": { + "type": "string", + "description": "The title of the course, e.g. 'Programming Languages and Techniques I' for CIS-120.", # noqa E501 + }, + "url": { + "type": "url", + "description": "The relative route through which this course's reviews can be accessed (a prefix of `/api/review/` is assumed).", # noqa E501 + }, + }, + }, + }, + "departments": { + "type": "array", + "description": "Data on departments for autocomplete.", + "items": { + "type": "object", + "properties": { + "title": { + "type": "string", + "description": "The string department code, e.g. `CIS` for the CIS department.", # noqa E501 + }, + "desc": { + "type": "string", + "description": "The name of the department, e.g. 'Computer and Information Sci' for the CIS department.", # noqa E501 + }, + "url": { + "type": "url", + "description": "The relative route through which this department's reviews can be accessed (a prefix of `/api/review/` is assumed).", # noqa E501 + }, + }, + }, + }, + "instructors": { + "type": "array", + "description": "Data on instructors for autocomplete.", + "items": { + "type": "object", + "properties": { + "title": { + "type": "string", + "description": "The full name of the instructor.", + }, + "desc": { + "type": "string", + "description": "A comma-separated string list of department codes to which this instructor belongs.", # noqa E501 + }, + "url": { + "type": "url", + "description": "The relative route through which this instructor's reviews can be accessed (a prefix of `/api/review/` is assumed).", # noqa E501 + }, + }, + }, + }, + } + } + } + } +} + +department_reviews_response_schema = { + "department-reviews": { + "GET": { + 200: { + "properties": { + "code": { + "type": "string", + "description": "The department code, e.g. `CIS` for the CIS department.", + }, + "name": { + "type": "string", + "description": "The name of the department, e.g. 'Computer and Information Sci' for the CIS department.", # noqa E501 + }, + "courses": { + "type": "object", + "description": "Reviews for this department broken down by its courses. Note that each key in this subdictionary is the course full code (indicated by `COURSE_FULL_CODE`; this is not an actual key but a placeholder for potentially many keys).", # noqa E501 + "properties": { + "COURSE_FULL_CODE": { + "type": "object", + "description": "This key `COURSE_FULL_CODE` is a placeholder for potentially many course full code keys. Each full code is the dash-joined department and most-recent (across all codes representing the topic) code of the course, e.g. `CIS-120` for CIS-120.", # noqa E501 + "properties": { + "id": { + "type": "string", + "description": "The dash-joined department and most-recent (across all codes representing the topic) code of the course, e.g. `CIS-120` for CIS-120.", # noqa E501 + }, + "average_reviews": { + "type": "object", + "description": "This course's average reviews across all of its sections from all semesters. Note that if any of these subfields are missing or null, that means the subfield is not applicable or missing from our data (you should check for null values).", # noqa E501 + "properties": course_review_aggregation_schema, + }, + "recent_reviews": { + "type": "object", + "description": "This course's average reviews across all of its sections from the most recent semester. Note that if any of these subfields are missing or null, that means the subfield is not applicable or missing from our data (you should check for null values).", # noqa E501 + "properties": course_review_aggregation_schema, + }, + "latest_semester": { + "type": "string", + "description": "The most recent semester this course was taught (of the form YYYYx where x is A [for spring], B [summer], or C [fall]), e.g. `2019C` for fall 2019.", # noqa E501 + }, + "num_semesters": { + "type": "integer", + "description": "The number of semesters this class has been taught (that we have data for).", # noqa E501 + }, + "code": {"type": "string", "description": "Same as `id`."}, + "name": { + "type": "string", + "description": "The title of the course, e.g. 'Programming Languages and Techniques I' for CIS-120.", # noqa E501 + }, + }, + } + }, + }, + } + } + } + } +} + +instructor_for_course_reviews_response_schema = { + "course-history": { + "GET": { + 200: { + "properties": { + "instructor": { + "type": "object", + "description": "Information about the instructor.", + "properties": { + "id": { + "type": "integer", + "description": "The integer id of the instructor.", + }, + "name": { + "type": "string", + "description": "The full name of the instructor.", + }, + }, + }, + "course_code": { + "type": "string", + "description": "The dash-joined department and most-recent (across all codes representing the topic) code of the course, e.g. `CIS-120` for CIS-120.", # noqa E501 + }, + "sections": { + "type": "array", + "description": "The sections of this course taught by this instructor.", + "items": { + "type": "object", + "properties": { + "course_code": { + "type": "string", + "description": "The dash-joined department and course code of the section, `CIS-120` for CIS-120-001.", # noqa E501 + }, + "course_name": { + "type": "string", + "description": "The title of the section's course, e.g. 'Programming Languages and Techniques I' for CIS-120-001.", # noqa E501 + }, + "activity": { + "type": "string", + "description": f"The activity of the section. Options: `{str(list(dict(ACTIVITY_CHOICES).values()))}`", # noqa E501 + }, + "semester": { + "type": "string", + "description": "The semester this section was taught (of the form YYYYx where x is A [for spring], B [summer], or C [fall]), e.g. `2019C` for fall 2019.", # noqa E501 + }, + "forms_returned": { + "type": "integer", + "description": "The number of review responses collected for this section (or null if this section does not have review data).", # noqa E501 + }, + "forms_produced": { + "type": "integer", + "description": "The final enrollment of this section (or null if this section does not have review data).", # noqa E501 + }, + "ratings": { + "type": "object", + "description": "The reviews for this section.", + "properties": course_review_aggregation_schema, + }, + "comments": { + "type": "string", + "description": "A textual description of the section, as well as common sentiment about it from reviews.", # noqa E501 + }, + }, + }, + }, + } + } + } + } +} diff --git a/backend/review/management/commands/clearcache.py b/backend/review/management/commands/clearcache.py index 63a44ca85..5b8b3b14a 100644 --- a/backend/review/management/commands/clearcache.py +++ b/backend/review/management/commands/clearcache.py @@ -1,34 +1,34 @@ -import logging - -import redis -from django.conf import settings -from django.core.cache import cache -from django.core.management import BaseCommand - - -def clear_cache(): - # If we are not using redis as the cache backend, then we can delete everything from the cache. - if ( - settings.CACHES is None - or settings.CACHES.get("default").get("BACKEND") != "django_redis.cache.RedisCache" - ): - cache.clear() - return -1 - - # If redis is the cache backend, then we need to be careful to only delete django cache entries, - # since celery also uses redis as a message broker backend. - r = redis.Redis.from_url(settings.REDIS_URL) - del_count = 0 - for key in r.scan_iter("*views.decorators.cache*"): - r.delete(key) - del_count += 1 - return del_count - - -class Command(BaseCommand): - def handle(self, *args, **options): - root_logger = logging.getLogger("") - root_logger.setLevel(logging.DEBUG) - - del_count = clear_cache() - print(f"{del_count if del_count >=0 else 'all'} cache entries removed.") +import logging + +import redis +from django.conf import settings +from django.core.cache import cache +from django.core.management import BaseCommand + + +def clear_cache(): + # If we are not using redis as the cache backend, then we can delete everything from the cache. + if ( + settings.CACHES is None + or settings.CACHES.get("default").get("BACKEND") != "django_redis.cache.RedisCache" + ): + cache.clear() + return -1 + + # If redis is the cache backend, then we need to be careful to only delete django cache entries, + # since celery also uses redis as a message broker backend. + r = redis.Redis.from_url(settings.REDIS_URL) + del_count = 0 + for key in r.scan_iter("*views.decorators.cache*"): + r.delete(key) + del_count += 1 + return del_count + + +class Command(BaseCommand): + def handle(self, *args, **options): + root_logger = logging.getLogger("") + root_logger.setLevel(logging.DEBUG) + + del_count = clear_cache() + print(f"{del_count if del_count >=0 else 'all'} cache entries removed.") diff --git a/backend/review/management/commands/export_department_reviews_by_semester.py b/backend/review/management/commands/export_department_reviews_by_semester.py index 738aff274..c8f1d150b 100644 --- a/backend/review/management/commands/export_department_reviews_by_semester.py +++ b/backend/review/management/commands/export_department_reviews_by_semester.py @@ -1,167 +1,167 @@ -import json -import os -from textwrap import dedent - -from django.core.management.base import BaseCommand -from django.db.models import F, OuterRef, Q, Sum - -from courses.models import Department -from courses.util import get_semesters -from PennCourses.settings.base import S3_resource -from review.annotations import review_averages -from review.models import ALL_FIELD_SLUGS, Review -from review.views import reviewbit_filters_pcr, section_filters_pcr - - -def average_by_dept(fields, semesters, departments=None, verbose=False): - """ - For each department and year, compute the average of given fields - (see `alert.models.ReviewBit` for an enumeration of fields) across all (valid) sections. - Note that fields should be a list of strings representing the review fields to be aggregated. - """ - dept_avgs = {} - - for i, semester in enumerate(semesters): - if verbose: - print(f"Processing semester {semester} ({i+1}/{len(semesters)})") - if departments is None: - depts_qs = Department.objects.all() - else: - depts_qs = Department.objects.filter(code__in=departments) - semester_dept_avgs = review_averages( - depts_qs, - fields=fields, - reviewbit_subfilters=( - reviewbit_filters_pcr - & Q(review__section__course__semester=semester) - & Q(review__section__course__department_id=OuterRef("id")) - ), - section_subfilters=( - section_filters_pcr - & Q(course__semester=semester) - & Q(course__department_id=OuterRef("id")) - ), - extra_metrics=False, - ).values("code", *fields) - - dept_avgs[semester] = {dept_dict.pop("code"): dept_dict for dept_dict in semester_dept_avgs} - - for code, enrollments_sum in ( - Review.objects.filter( - Q(section__course__primary_listing_id=F("section__course_id")) - & ~Q(section__activity__in=["LAB", "REC"]) - & Q(section__course__semester=semester) - ) - .annotate(code=F("section__course__department__code")) - .values("code") - .annotate(enrollments_sum=Sum("enrollment")) - .values_list("code", "enrollments_sum") - ): - dept_avgs[semester][code]["enrollments_sum"] = enrollments_sum - - return dept_avgs - - -class Command(BaseCommand): - help = dedent( - """ - Compute the average of given `fields` - (see `alert.models.ReviewBit` for an enumeration of fields) - by semester by department, and print or save to a file. - """ - ) - - def add_arguments(self, parser): - parser.add_argument( - "--fields", - nargs="?", - default=None, - help=dedent( - """ - fields as strings seperated by commas. If not provided, defaults to all fields. - """ - ), - ) - parser.add_argument( - "--path", - nargs="?", - default=None, - type=str, - help=dedent( - """ - path to the output file. If not provided then will simply be printed to console. - """ - ), - ) - parser.add_argument( - "--upload-to-s3", - default=False, - action="store_true", - help=( - "Enable this argument to upload the output of this script to the penn.courses " - "S3 bucket, at the path specified by the path argument." - ), - ) - parser.add_argument( - "--semesters", - nargs="?", - default="all", - type=str, - help=dedent( - """ - semesters to aggregate data for (in XXXXx form) as strings seperated - by commas. If semesters not provided then all semesters used. - """ - ), - ) - parser.add_argument( - "--departments", - nargs="?", - default=None, - type=str, - help=dedent( - """ - department codes to aggregate data for as strings seperated by - commas. If departments not provided then all departments used. - """ - ), - ) - - def handle(self, *args, **kwargs): - upload_to_s3 = kwargs["upload_to_s3"] - path = kwargs["path"] - assert path is None or (path.endswith(".json") and "/" not in path) - semesters = get_semesters(semesters=kwargs["semesters"]) - - if kwargs["fields"] is None: - fields = ALL_FIELD_SLUGS - else: - fields = kwargs["fields"].strip().split(",") - if kwargs["departments"] is None: - departments = None - else: - departments = kwargs["departments"].strip().split(",") - - print( - f"Averaging department review data ({', '.join(fields)}) by semester " - f"for semester(s): {', '.join(semesters)}" - ) - - dept_avgs = average_by_dept( - fields, semesters=semesters, departments=departments, verbose=True - ) - - if path is None: - print(json.dumps(dept_avgs, indent=4)) - else: - output_file_path = ( - "/tmp/review_semester_department_export.json" if upload_to_s3 else path - ) - os.makedirs(os.path.dirname(output_file_path), exist_ok=True) - - with open(output_file_path, "w") as f: - json.dump(dept_avgs, f, indent=4) - - if upload_to_s3: - S3_resource.meta.client.upload_file(output_file_path, "penn.courses", path) - os.remove(output_file_path) +import json +import os +from textwrap import dedent + +from django.core.management.base import BaseCommand +from django.db.models import F, OuterRef, Q, Sum + +from courses.models import Department +from courses.util import get_semesters +from PennCourses.settings.base import S3_resource +from review.annotations import review_averages +from review.models import ALL_FIELD_SLUGS, Review +from review.views import reviewbit_filters_pcr, section_filters_pcr + + +def average_by_dept(fields, semesters, departments=None, verbose=False): + """ + For each department and year, compute the average of given fields + (see `alert.models.ReviewBit` for an enumeration of fields) across all (valid) sections. + Note that fields should be a list of strings representing the review fields to be aggregated. + """ + dept_avgs = {} + + for i, semester in enumerate(semesters): + if verbose: + print(f"Processing semester {semester} ({i+1}/{len(semesters)})") + if departments is None: + depts_qs = Department.objects.all() + else: + depts_qs = Department.objects.filter(code__in=departments) + semester_dept_avgs = review_averages( + depts_qs, + fields=fields, + reviewbit_subfilters=( + reviewbit_filters_pcr + & Q(review__section__course__semester=semester) + & Q(review__section__course__department_id=OuterRef("id")) + ), + section_subfilters=( + section_filters_pcr + & Q(course__semester=semester) + & Q(course__department_id=OuterRef("id")) + ), + extra_metrics=False, + ).values("code", *fields) + + dept_avgs[semester] = {dept_dict.pop("code"): dept_dict for dept_dict in semester_dept_avgs} + + for code, enrollments_sum in ( + Review.objects.filter( + Q(section__course__primary_listing_id=F("section__course_id")) + & ~Q(section__activity__in=["LAB", "REC"]) + & Q(section__course__semester=semester) + ) + .annotate(code=F("section__course__department__code")) + .values("code") + .annotate(enrollments_sum=Sum("enrollment")) + .values_list("code", "enrollments_sum") + ): + dept_avgs[semester][code]["enrollments_sum"] = enrollments_sum + + return dept_avgs + + +class Command(BaseCommand): + help = dedent( + """ + Compute the average of given `fields` + (see `alert.models.ReviewBit` for an enumeration of fields) + by semester by department, and print or save to a file. + """ + ) + + def add_arguments(self, parser): + parser.add_argument( + "--fields", + nargs="?", + default=None, + help=dedent( + """ + fields as strings seperated by commas. If not provided, defaults to all fields. + """ + ), + ) + parser.add_argument( + "--path", + nargs="?", + default=None, + type=str, + help=dedent( + """ + path to the output file. If not provided then will simply be printed to console. + """ + ), + ) + parser.add_argument( + "--upload-to-s3", + default=False, + action="store_true", + help=( + "Enable this argument to upload the output of this script to the penn.courses " + "S3 bucket, at the path specified by the path argument." + ), + ) + parser.add_argument( + "--semesters", + nargs="?", + default="all", + type=str, + help=dedent( + """ + semesters to aggregate data for (in XXXXx form) as strings seperated + by commas. If semesters not provided then all semesters used. + """ + ), + ) + parser.add_argument( + "--departments", + nargs="?", + default=None, + type=str, + help=dedent( + """ + department codes to aggregate data for as strings seperated by + commas. If departments not provided then all departments used. + """ + ), + ) + + def handle(self, *args, **kwargs): + upload_to_s3 = kwargs["upload_to_s3"] + path = kwargs["path"] + assert path is None or (path.endswith(".json") and "/" not in path) + semesters = get_semesters(semesters=kwargs["semesters"]) + + if kwargs["fields"] is None: + fields = ALL_FIELD_SLUGS + else: + fields = kwargs["fields"].strip().split(",") + if kwargs["departments"] is None: + departments = None + else: + departments = kwargs["departments"].strip().split(",") + + print( + f"Averaging department review data ({', '.join(fields)}) by semester " + f"for semester(s): {', '.join(semesters)}" + ) + + dept_avgs = average_by_dept( + fields, semesters=semesters, departments=departments, verbose=True + ) + + if path is None: + print(json.dumps(dept_avgs, indent=4)) + else: + output_file_path = ( + "/tmp/review_semester_department_export.json" if upload_to_s3 else path + ) + os.makedirs(os.path.dirname(output_file_path), exist_ok=True) + + with open(output_file_path, "w") as f: + json.dump(dept_avgs, f, indent=4) + + if upload_to_s3: + S3_resource.meta.client.upload_file(output_file_path, "penn.courses", path) + os.remove(output_file_path) diff --git a/backend/review/management/commands/loadcomments.py b/backend/review/management/commands/loadcomments.py index 5c3a09fcf..2ed0a3f4f 100644 --- a/backend/review/management/commands/loadcomments.py +++ b/backend/review/management/commands/loadcomments.py @@ -1,68 +1,68 @@ -import csv -import logging - -import boto3 -from django.core.management import BaseCommand -from tqdm import tqdm - -from review.models import Review - - -def leftpad(num: int): - if num < 10: - return f"00{num}" - elif num < 100: - return f"0{num}" - else: - return f"{num}" - - -def get_semester(code): - code = int(code) - if code == 0: - return "A" - elif code == 1: - return "B" - elif code == 2: - return "C" - else: - return "*" - - -class Command(BaseCommand): - def add_arguments(self, parser): - parser.add_argument("path_to_csv", help="path to CSV file to use.") - parser.add_argument( - "-s3", "--s3_bucket", help="download zip file from specified s3 bucket." - ) - - def handle(self, *args, **kwargs): - root_logger = logging.getLogger("") - root_logger.setLevel(logging.DEBUG) - - src = kwargs["path_to_csv"] - if kwargs["s3_bucket"] is not None: - fp = "/tmp/comments.csv" - # Make sure AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY - # are loaded in as environment variables. - print(f"downloading zip from s3 bucket: {src}") - boto3.client("s3").download_file(kwargs["s3_bucket"], src, fp) - src = fp - - with open(src) as csvfile: - reader = csv.reader(csvfile) - num_updated = 0 - lines = list(reader) - print("Starting upload...") - for row in tqdm(lines): - dept, course_id, section_id, year, semester_num, instructor_name, comment = row - section_code = f"{dept}-{course_id}-{leftpad(int(section_id))}" - semester = f"{year}{get_semester(semester_num)}" - instructor_name = instructor_name.lower() - num_updated += Review.objects.filter( - section__full_code=section_code, - section__course__semester=semester, - instructor__name__iexact=instructor_name, - ).update(comments=comment) - - print(f"{num_updated} reviews updated.") +import csv +import logging + +import boto3 +from django.core.management import BaseCommand +from tqdm import tqdm + +from review.models import Review + + +def leftpad(num: int): + if num < 10: + return f"00{num}" + elif num < 100: + return f"0{num}" + else: + return f"{num}" + + +def get_semester(code): + code = int(code) + if code == 0: + return "A" + elif code == 1: + return "B" + elif code == 2: + return "C" + else: + return "*" + + +class Command(BaseCommand): + def add_arguments(self, parser): + parser.add_argument("path_to_csv", help="path to CSV file to use.") + parser.add_argument( + "-s3", "--s3_bucket", help="download zip file from specified s3 bucket." + ) + + def handle(self, *args, **kwargs): + root_logger = logging.getLogger("") + root_logger.setLevel(logging.DEBUG) + + src = kwargs["path_to_csv"] + if kwargs["s3_bucket"] is not None: + fp = "/tmp/comments.csv" + # Make sure AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY + # are loaded in as environment variables. + print(f"downloading zip from s3 bucket: {src}") + boto3.client("s3").download_file(kwargs["s3_bucket"], src, fp) + src = fp + + with open(src) as csvfile: + reader = csv.reader(csvfile) + num_updated = 0 + lines = list(reader) + print("Starting upload...") + for row in tqdm(lines): + dept, course_id, section_id, year, semester_num, instructor_name, comment = row + section_code = f"{dept}-{course_id}-{leftpad(int(section_id))}" + semester = f"{year}{get_semester(semester_num)}" + instructor_name = instructor_name.lower() + num_updated += Review.objects.filter( + section__full_code=section_code, + section__course__semester=semester, + instructor__name__iexact=instructor_name, + ).update(comments=comment) + + print(f"{num_updated} reviews updated.") diff --git a/backend/review/management/commands/mergeinstructors.py b/backend/review/management/commands/mergeinstructors.py index 53d2adcbb..073e0ed02 100644 --- a/backend/review/management/commands/mergeinstructors.py +++ b/backend/review/management/commands/mergeinstructors.py @@ -1,233 +1,233 @@ -import logging -from collections import defaultdict -from typing import Callable, Dict, List, Optional, Set - -from django.core.management import BaseCommand -from tqdm import tqdm - -from courses.models import Instructor -from review.management.commands.clearcache import clear_cache - - -# Statistic keys -INSTRUCTORS_KEPT = "instructors kept" -INSTRUCTORS_REMOVED = "instructors removed" -SECTIONS_MODIFIED = "sections modified" -REVIEWS_MODIFIED = "reviews modified" -INSTRUCTORS_UNMERGED = "instructors unmerged" - - -def batch_duplicates(qs, get_prop=None, union_find=None) -> List[Set[Instructor]]: - """ - Group queryset rows by a property defined in `get_prop()` (or alternatively - specify groups with a union find dictionary). Return a list of groups of size > 1. - :param qs: Queryset of instructors to use - :param get_prop: Function mapping a row to a value to group on. - If `get_prop` returns `None`, don't include the row in the groupings. - :param union_find: Alternatively to specifying `get_prop`, you can specify - groups with a union find dictionary (mapping instructor id to - a root instructor id representing the group). - This kwarg accepts a function mapping `qs` to a union find dictionary. - :return: List of instructor groups of size > 1. - """ - rows_by_prop = defaultdict(set) - if union_find: - union_find = union_find(qs) - for row in qs: - rows_by_prop[union_find[row.id]].add(row) - else: - assert get_prop - for row in qs: - rows_by_prop[get_prop(row)].add(row) - return [rows for prop, rows in rows_by_prop.items() if prop and len(rows) > 1] - - -def resolve_duplicates( - duplicate_instructor_groups: List[Set[Instructor]], dry_run: bool, stat, force=False -): - """ - Given a list of list of duplicate instructor groups, resolve the foreign key and many-to-many - relationships among duplicates to all point to the same instance. - - :param duplicate_instructor_groups: List of sets of duplicate instructors - e.g. [{a1, a2, a3}, {b1, b2}] - :param dry_run: If true, just calculate stats without actually modifying the database. - :param stat: Function to collect statistics. - :param force: Manually override conflicting user information. - """ - for instructor_set in tqdm(duplicate_instructor_groups): - # Find a primary instance in the duplicate set. This should be the instance that is most - # "complete" -- in the case of instructors, this means that there is a linked user object. - potential_primary = [inst for inst in instructor_set if inst.user is not None] - # If no instance has a linked user, just pick one instructor. - if len(potential_primary) == 0: - stat(INSTRUCTORS_KEPT, 1) - primary_instructor = max(instructor_set, key=lambda i: i.updated_at) - # If there's only one user with a linked user object, select that as the primary. - # This should be the case that is hit most often. - elif len(potential_primary) == 1: - stat(INSTRUCTORS_KEPT, 1) - primary_instructor = potential_primary[0] - else: - # If all potential primary rows relate to the same user (with the same pk), - # go ahead and choose one arbitrarily. - if len(set([inst.user.pk for inst in potential_primary])) == 1 or force: - stat(INSTRUCTORS_KEPT, 1) - primary_instructor = max(potential_primary, key=lambda i: i.updated_at) - # Otherwise, we don't have enough information to merge automatically. There are multiple - # instructors marked as duplicates, but they link to different users and so could be - # different people. Report the PKs of these instructors in the statistics, but don't - # merge. - else: - stat(INSTRUCTORS_KEPT, len(instructor_set)) - stat(INSTRUCTORS_UNMERGED, element=[i.pk for i in instructor_set]) - # We don't have enough information to do this merge. - continue - - # Filter for all instructors that aren't the primary. - duplicate_instructors = instructor_set - {primary_instructor} - # Transfer the sections and reviews of all non-primary instances to the primary instance. - for duplicate_instructor in duplicate_instructors: - for section in duplicate_instructor.section_set.all(): - stat(SECTIONS_MODIFIED, 1) - if not dry_run: - section.instructors.remove(duplicate_instructor) - section.instructors.add(primary_instructor) - - for review in duplicate_instructor.review_set.all(): - stat(REVIEWS_MODIFIED, 1) - if not dry_run: - review.instructor = primary_instructor - review.save() - - stat(INSTRUCTORS_REMOVED, 1) - if not dry_run: - duplicate_instructor.delete() - - -""" -Strategy definitions. Keys are the strategy name, values are lambdas -which resolve a list of duplicate lists when called. The lambdas are to ensure -lazy evaluation, since we won't necessarily be running all (or any) of the -given strategies. -""" - - -def first_last_name_sections_uf(instructors): - """ - Groups instructors if they share a first name / last name, and have - taught the same section. - """ - - def get_first_last(name): - components = name.split(" ") - return (components[0], components[-1]) - - union_find = {inst.id: inst.id for inst in instructors} - - for inst in tqdm(instructors): - inst_first_last = get_first_last(inst.name) - for section in inst.section_set.all(): - for other_inst in section.instructors.all(): - if inst_first_last == get_first_last(other_inst.name): - union_find[inst.id] = union_find[other_inst.id] - - def get_root_id(inst_id): - while union_find[inst_id] != inst_id: - inst_id = union_find[inst_id] - return inst_id - - for inst_id in union_find: - union_find[inst_id] = get_root_id(inst_id) - - return union_find - - -strategies: Dict[str, Callable[[], List[Set[Instructor]]]] = { - "case-insensitive": lambda: batch_duplicates( - Instructor.objects.all().prefetch_related("section_set", "review_set"), - lambda row: row.name.lower(), - ), - "pennid": lambda: batch_duplicates( - Instructor.objects.all().prefetch_related("section_set", "review_set"), - lambda row: row.user_id, - ), - "first-last-name-sections": lambda: batch_duplicates( - Instructor.objects.all().prefetch_related( - "section_set", "review_set", "section_set__instructors" - ), - union_find=lambda rows: first_last_name_sections_uf(rows), - ), -} - - -class Command(BaseCommand): - help = """ - Merge duplicate instructor entries through different strategies. - - case-insensitive: Merge instructors with the same name but different cases [O'leary, O'Leary] - pennid: Merge instructors with the same pennid - first-last-name-sections: Merge instructors based on firstname, lastname and shared sections. - """ - - def add_arguments(self, parser): - parser.add_argument("--dryrun", action="store_true", help="perform a dry run of merge.") - group = parser.add_mutually_exclusive_group(required=True) - group.add_argument( - "--instructor", - "-i", - action="append", - dest="manual", - help="manually merge instructors with the provided IDs.", - default=list(), - ) - group.add_argument("--strategy", "-s", dest="strategies", action="append") - group.add_argument("--all", "-a", action="store_const", const=None, dest="strategies") - - def handle(self, *args, **kwargs): - root_logger = logging.getLogger("") - root_logger.setLevel(logging.DEBUG) - - dry_run = kwargs["dryrun"] - manual_merge: List[str] = kwargs["manual"] - selected_strategies: Optional[List[str]] = kwargs["strategies"] - - stats = dict() - - def stat(key, amt=1, element=None): - """ - Helper function to keep track of how many rows we are changing - """ - value = stats.get(key, 0) - if element is None: - stats[key] = value + amt - else: - stats.setdefault(key, []).append(element) - - def run_merge(strat: Callable[[], List[Set[Instructor]]], force=False): - """ - Run a merge pass, printing out helpful messages along the way. - """ - print("Finding duplicates...") - duplicates = strat() - print(f"Found {len(duplicates)} instructors with multiple rows. Merging records...") - resolve_duplicates(duplicates, dry_run, stat, force) - - if len(manual_merge) > 0: - print("***Merging records manually***") - run_merge(lambda: [set(Instructor.objects.filter(pk__in=manual_merge))], force=True) - else: - if selected_strategies is None: - selected_strategies = list(strategies.keys()) - for strategy in selected_strategies: - if strategy in strategies: - print(f"***Merging according to <{strategy}>***") - run_merge(strategies[strategy]) - else: - print(f"***Could not find strategy <{strategy}>***") - - print("Clearing cache") - del_count = clear_cache() - print(f"{del_count if del_count >=0 else 'all'} cache entries removed.") - - print(stats) +import logging +from collections import defaultdict +from typing import Callable, Dict, List, Optional, Set + +from django.core.management import BaseCommand +from tqdm import tqdm + +from courses.models import Instructor +from review.management.commands.clearcache import clear_cache + + +# Statistic keys +INSTRUCTORS_KEPT = "instructors kept" +INSTRUCTORS_REMOVED = "instructors removed" +SECTIONS_MODIFIED = "sections modified" +REVIEWS_MODIFIED = "reviews modified" +INSTRUCTORS_UNMERGED = "instructors unmerged" + + +def batch_duplicates(qs, get_prop=None, union_find=None) -> List[Set[Instructor]]: + """ + Group queryset rows by a property defined in `get_prop()` (or alternatively + specify groups with a union find dictionary). Return a list of groups of size > 1. + :param qs: Queryset of instructors to use + :param get_prop: Function mapping a row to a value to group on. + If `get_prop` returns `None`, don't include the row in the groupings. + :param union_find: Alternatively to specifying `get_prop`, you can specify + groups with a union find dictionary (mapping instructor id to + a root instructor id representing the group). + This kwarg accepts a function mapping `qs` to a union find dictionary. + :return: List of instructor groups of size > 1. + """ + rows_by_prop = defaultdict(set) + if union_find: + union_find = union_find(qs) + for row in qs: + rows_by_prop[union_find[row.id]].add(row) + else: + assert get_prop + for row in qs: + rows_by_prop[get_prop(row)].add(row) + return [rows for prop, rows in rows_by_prop.items() if prop and len(rows) > 1] + + +def resolve_duplicates( + duplicate_instructor_groups: List[Set[Instructor]], dry_run: bool, stat, force=False +): + """ + Given a list of list of duplicate instructor groups, resolve the foreign key and many-to-many + relationships among duplicates to all point to the same instance. + + :param duplicate_instructor_groups: List of sets of duplicate instructors + e.g. [{a1, a2, a3}, {b1, b2}] + :param dry_run: If true, just calculate stats without actually modifying the database. + :param stat: Function to collect statistics. + :param force: Manually override conflicting user information. + """ + for instructor_set in tqdm(duplicate_instructor_groups): + # Find a primary instance in the duplicate set. This should be the instance that is most + # "complete" -- in the case of instructors, this means that there is a linked user object. + potential_primary = [inst for inst in instructor_set if inst.user is not None] + # If no instance has a linked user, just pick one instructor. + if len(potential_primary) == 0: + stat(INSTRUCTORS_KEPT, 1) + primary_instructor = max(instructor_set, key=lambda i: i.updated_at) + # If there's only one user with a linked user object, select that as the primary. + # This should be the case that is hit most often. + elif len(potential_primary) == 1: + stat(INSTRUCTORS_KEPT, 1) + primary_instructor = potential_primary[0] + else: + # If all potential primary rows relate to the same user (with the same pk), + # go ahead and choose one arbitrarily. + if len(set([inst.user.pk for inst in potential_primary])) == 1 or force: + stat(INSTRUCTORS_KEPT, 1) + primary_instructor = max(potential_primary, key=lambda i: i.updated_at) + # Otherwise, we don't have enough information to merge automatically. There are multiple + # instructors marked as duplicates, but they link to different users and so could be + # different people. Report the PKs of these instructors in the statistics, but don't + # merge. + else: + stat(INSTRUCTORS_KEPT, len(instructor_set)) + stat(INSTRUCTORS_UNMERGED, element=[i.pk for i in instructor_set]) + # We don't have enough information to do this merge. + continue + + # Filter for all instructors that aren't the primary. + duplicate_instructors = instructor_set - {primary_instructor} + # Transfer the sections and reviews of all non-primary instances to the primary instance. + for duplicate_instructor in duplicate_instructors: + for section in duplicate_instructor.section_set.all(): + stat(SECTIONS_MODIFIED, 1) + if not dry_run: + section.instructors.remove(duplicate_instructor) + section.instructors.add(primary_instructor) + + for review in duplicate_instructor.review_set.all(): + stat(REVIEWS_MODIFIED, 1) + if not dry_run: + review.instructor = primary_instructor + review.save() + + stat(INSTRUCTORS_REMOVED, 1) + if not dry_run: + duplicate_instructor.delete() + + +""" +Strategy definitions. Keys are the strategy name, values are lambdas +which resolve a list of duplicate lists when called. The lambdas are to ensure +lazy evaluation, since we won't necessarily be running all (or any) of the +given strategies. +""" + + +def first_last_name_sections_uf(instructors): + """ + Groups instructors if they share a first name / last name, and have + taught the same section. + """ + + def get_first_last(name): + components = name.split(" ") + return (components[0], components[-1]) + + union_find = {inst.id: inst.id for inst in instructors} + + for inst in tqdm(instructors): + inst_first_last = get_first_last(inst.name) + for section in inst.section_set.all(): + for other_inst in section.instructors.all(): + if inst_first_last == get_first_last(other_inst.name): + union_find[inst.id] = union_find[other_inst.id] + + def get_root_id(inst_id): + while union_find[inst_id] != inst_id: + inst_id = union_find[inst_id] + return inst_id + + for inst_id in union_find: + union_find[inst_id] = get_root_id(inst_id) + + return union_find + + +strategies: Dict[str, Callable[[], List[Set[Instructor]]]] = { + "case-insensitive": lambda: batch_duplicates( + Instructor.objects.all().prefetch_related("section_set", "review_set"), + lambda row: row.name.lower(), + ), + "pennid": lambda: batch_duplicates( + Instructor.objects.all().prefetch_related("section_set", "review_set"), + lambda row: row.user_id, + ), + "first-last-name-sections": lambda: batch_duplicates( + Instructor.objects.all().prefetch_related( + "section_set", "review_set", "section_set__instructors" + ), + union_find=lambda rows: first_last_name_sections_uf(rows), + ), +} + + +class Command(BaseCommand): + help = """ + Merge duplicate instructor entries through different strategies. + + case-insensitive: Merge instructors with the same name but different cases [O'leary, O'Leary] + pennid: Merge instructors with the same pennid + first-last-name-sections: Merge instructors based on firstname, lastname and shared sections. + """ + + def add_arguments(self, parser): + parser.add_argument("--dryrun", action="store_true", help="perform a dry run of merge.") + group = parser.add_mutually_exclusive_group(required=True) + group.add_argument( + "--instructor", + "-i", + action="append", + dest="manual", + help="manually merge instructors with the provided IDs.", + default=list(), + ) + group.add_argument("--strategy", "-s", dest="strategies", action="append") + group.add_argument("--all", "-a", action="store_const", const=None, dest="strategies") + + def handle(self, *args, **kwargs): + root_logger = logging.getLogger("") + root_logger.setLevel(logging.DEBUG) + + dry_run = kwargs["dryrun"] + manual_merge: List[str] = kwargs["manual"] + selected_strategies: Optional[List[str]] = kwargs["strategies"] + + stats = dict() + + def stat(key, amt=1, element=None): + """ + Helper function to keep track of how many rows we are changing + """ + value = stats.get(key, 0) + if element is None: + stats[key] = value + amt + else: + stats.setdefault(key, []).append(element) + + def run_merge(strat: Callable[[], List[Set[Instructor]]], force=False): + """ + Run a merge pass, printing out helpful messages along the way. + """ + print("Finding duplicates...") + duplicates = strat() + print(f"Found {len(duplicates)} instructors with multiple rows. Merging records...") + resolve_duplicates(duplicates, dry_run, stat, force) + + if len(manual_merge) > 0: + print("***Merging records manually***") + run_merge(lambda: [set(Instructor.objects.filter(pk__in=manual_merge))], force=True) + else: + if selected_strategies is None: + selected_strategies = list(strategies.keys()) + for strategy in selected_strategies: + if strategy in strategies: + print(f"***Merging according to <{strategy}>***") + run_merge(strategies[strategy]) + else: + print(f"***Could not find strategy <{strategy}>***") + + print("Clearing cache") + del_count = clear_cache() + print(f"{del_count if del_count >=0 else 'all'} cache entries removed.") + + print(stats) diff --git a/backend/review/migrations/0001_initial.py b/backend/review/migrations/0001_initial.py index 00f5b8a15..68f0127f0 100644 --- a/backend/review/migrations/0001_initial.py +++ b/backend/review/migrations/0001_initial.py @@ -1,46 +1,46 @@ -# Generated by Django 2.2.1 on 2019-05-25 20:04 - -import django.db.models.deletion -from django.db import migrations, models - - -class Migration(migrations.Migration): - - initial = True - - dependencies = [ - ("courses", "0016_auto_20190523_1554"), - ] - - operations = [ - migrations.CreateModel( - name="Review", - fields=[ - ( - "id", - models.AutoField( - auto_created=True, primary_key=True, serialize=False, verbose_name="ID" - ), - ), - ("course_quality", models.DecimalField(decimal_places=2, max_digits=3)), - ("difficulty", models.DecimalField(decimal_places=2, max_digits=3)), - ("instructor_quality", models.DecimalField(decimal_places=2, max_digits=3)), - ("work_required", models.DecimalField(decimal_places=2, max_digits=3)), - ( - "instructor", - models.ForeignKey( - on_delete=django.db.models.deletion.CASCADE, to="courses.Instructor" - ), - ), - ( - "section", - models.ForeignKey( - on_delete=django.db.models.deletion.CASCADE, to="courses.Section" - ), - ), - ], - options={ - "unique_together": {("section", "instructor")}, - }, - ), - ] +# Generated by Django 2.2.1 on 2019-05-25 20:04 + +import django.db.models.deletion +from django.db import migrations, models + + +class Migration(migrations.Migration): + + initial = True + + dependencies = [ + ("courses", "0016_auto_20190523_1554"), + ] + + operations = [ + migrations.CreateModel( + name="Review", + fields=[ + ( + "id", + models.AutoField( + auto_created=True, primary_key=True, serialize=False, verbose_name="ID" + ), + ), + ("course_quality", models.DecimalField(decimal_places=2, max_digits=3)), + ("difficulty", models.DecimalField(decimal_places=2, max_digits=3)), + ("instructor_quality", models.DecimalField(decimal_places=2, max_digits=3)), + ("work_required", models.DecimalField(decimal_places=2, max_digits=3)), + ( + "instructor", + models.ForeignKey( + on_delete=django.db.models.deletion.CASCADE, to="courses.Instructor" + ), + ), + ( + "section", + models.ForeignKey( + on_delete=django.db.models.deletion.CASCADE, to="courses.Section" + ), + ), + ], + options={ + "unique_together": {("section", "instructor")}, + }, + ), + ] diff --git a/backend/review/migrations/0002_auto_20190525_2010.py b/backend/review/migrations/0002_auto_20190525_2010.py index 033037e72..376210482 100644 --- a/backend/review/migrations/0002_auto_20190525_2010.py +++ b/backend/review/migrations/0002_auto_20190525_2010.py @@ -1,33 +1,33 @@ -# Generated by Django 2.2.1 on 2019-05-25 20:10 - -from django.db import migrations, models - - -class Migration(migrations.Migration): - - dependencies = [ - ("review", "0001_initial"), - ] - - operations = [ - migrations.AlterField( - model_name="review", - name="course_quality", - field=models.DecimalField(decimal_places=2, max_digits=3, null=True), - ), - migrations.AlterField( - model_name="review", - name="difficulty", - field=models.DecimalField(decimal_places=2, max_digits=3, null=True), - ), - migrations.AlterField( - model_name="review", - name="instructor_quality", - field=models.DecimalField(decimal_places=2, max_digits=3, null=True), - ), - migrations.AlterField( - model_name="review", - name="work_required", - field=models.DecimalField(decimal_places=2, max_digits=3, null=True), - ), - ] +# Generated by Django 2.2.1 on 2019-05-25 20:10 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ("review", "0001_initial"), + ] + + operations = [ + migrations.AlterField( + model_name="review", + name="course_quality", + field=models.DecimalField(decimal_places=2, max_digits=3, null=True), + ), + migrations.AlterField( + model_name="review", + name="difficulty", + field=models.DecimalField(decimal_places=2, max_digits=3, null=True), + ), + migrations.AlterField( + model_name="review", + name="instructor_quality", + field=models.DecimalField(decimal_places=2, max_digits=3, null=True), + ), + migrations.AlterField( + model_name="review", + name="work_required", + field=models.DecimalField(decimal_places=2, max_digits=3, null=True), + ), + ] diff --git a/backend/review/migrations/0003_auto_20190525_2040.py b/backend/review/migrations/0003_auto_20190525_2040.py index 2bd57d0fc..6071f86fd 100644 --- a/backend/review/migrations/0003_auto_20190525_2040.py +++ b/backend/review/migrations/0003_auto_20190525_2040.py @@ -1,52 +1,52 @@ -# Generated by Django 2.2.1 on 2019-05-25 20:40 - -import django.db.models.deletion -from django.db import migrations, models - - -class Migration(migrations.Migration): - - dependencies = [ - ("review", "0002_auto_20190525_2010"), - ] - - operations = [ - migrations.RemoveField( - model_name="review", - name="course_quality", - ), - migrations.RemoveField( - model_name="review", - name="difficulty", - ), - migrations.RemoveField( - model_name="review", - name="instructor_quality", - ), - migrations.RemoveField( - model_name="review", - name="work_required", - ), - migrations.CreateModel( - name="ReviewBit", - fields=[ - ( - "id", - models.AutoField( - auto_created=True, primary_key=True, serialize=False, verbose_name="ID" - ), - ), - ("field", models.CharField(max_length=32)), - ("score", models.DecimalField(decimal_places=5, max_digits=6)), - ( - "review", - models.ForeignKey( - on_delete=django.db.models.deletion.CASCADE, to="review.Review" - ), - ), - ], - options={ - "unique_together": {("review", "field")}, - }, - ), - ] +# Generated by Django 2.2.1 on 2019-05-25 20:40 + +import django.db.models.deletion +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ("review", "0002_auto_20190525_2010"), + ] + + operations = [ + migrations.RemoveField( + model_name="review", + name="course_quality", + ), + migrations.RemoveField( + model_name="review", + name="difficulty", + ), + migrations.RemoveField( + model_name="review", + name="instructor_quality", + ), + migrations.RemoveField( + model_name="review", + name="work_required", + ), + migrations.CreateModel( + name="ReviewBit", + fields=[ + ( + "id", + models.AutoField( + auto_created=True, primary_key=True, serialize=False, verbose_name="ID" + ), + ), + ("field", models.CharField(max_length=32)), + ("score", models.DecimalField(decimal_places=5, max_digits=6)), + ( + "review", + models.ForeignKey( + on_delete=django.db.models.deletion.CASCADE, to="review.Review" + ), + ), + ], + options={ + "unique_together": {("review", "field")}, + }, + ), + ] diff --git a/backend/review/migrations/0004_auto_20200512_1526.py b/backend/review/migrations/0004_auto_20200512_1526.py index ecd8e735d..df7e433cd 100644 --- a/backend/review/migrations/0004_auto_20200512_1526.py +++ b/backend/review/migrations/0004_auto_20200512_1526.py @@ -1,73 +1,73 @@ -# Generated by Django 3.0.6 on 2020-05-12 19:26 - -from django.db import migrations, models - - -class Migration(migrations.Migration): - - dependencies = [ - ("review", "0003_auto_20190525_2040"), - ] - - operations = [ - migrations.RenameField( - model_name="reviewbit", - old_name="score", - new_name="average", - ), - migrations.AddField( - model_name="review", - name="enrollment", - field=models.IntegerField(blank=True, null=True), - ), - migrations.AddField( - model_name="review", - name="form_type", - field=models.IntegerField(blank=True, null=True), - ), - migrations.AddField( - model_name="review", - name="responses", - field=models.IntegerField(blank=True, null=True), - ), - migrations.AddField( - model_name="reviewbit", - name="median", - field=models.DecimalField(blank=True, decimal_places=5, max_digits=6, null=True), - ), - migrations.AddField( - model_name="reviewbit", - name="rating0", - field=models.IntegerField(blank=True, null=True), - ), - migrations.AddField( - model_name="reviewbit", - name="rating1", - field=models.IntegerField(blank=True, null=True), - ), - migrations.AddField( - model_name="reviewbit", - name="rating2", - field=models.IntegerField(blank=True, null=True), - ), - migrations.AddField( - model_name="reviewbit", - name="rating3", - field=models.IntegerField(blank=True, null=True), - ), - migrations.AddField( - model_name="reviewbit", - name="rating4", - field=models.IntegerField(blank=True, null=True), - ), - migrations.AddField( - model_name="reviewbit", - name="stddev", - field=models.DecimalField(blank=True, decimal_places=5, max_digits=6, null=True), - ), - migrations.AlterField( - model_name="reviewbit", - name="field", - field=models.CharField(db_index=True, max_length=32), - ), - ] +# Generated by Django 3.0.6 on 2020-05-12 19:26 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ("review", "0003_auto_20190525_2040"), + ] + + operations = [ + migrations.RenameField( + model_name="reviewbit", + old_name="score", + new_name="average", + ), + migrations.AddField( + model_name="review", + name="enrollment", + field=models.IntegerField(blank=True, null=True), + ), + migrations.AddField( + model_name="review", + name="form_type", + field=models.IntegerField(blank=True, null=True), + ), + migrations.AddField( + model_name="review", + name="responses", + field=models.IntegerField(blank=True, null=True), + ), + migrations.AddField( + model_name="reviewbit", + name="median", + field=models.DecimalField(blank=True, decimal_places=5, max_digits=6, null=True), + ), + migrations.AddField( + model_name="reviewbit", + name="rating0", + field=models.IntegerField(blank=True, null=True), + ), + migrations.AddField( + model_name="reviewbit", + name="rating1", + field=models.IntegerField(blank=True, null=True), + ), + migrations.AddField( + model_name="reviewbit", + name="rating2", + field=models.IntegerField(blank=True, null=True), + ), + migrations.AddField( + model_name="reviewbit", + name="rating3", + field=models.IntegerField(blank=True, null=True), + ), + migrations.AddField( + model_name="reviewbit", + name="rating4", + field=models.IntegerField(blank=True, null=True), + ), + migrations.AddField( + model_name="reviewbit", + name="stddev", + field=models.DecimalField(blank=True, decimal_places=5, max_digits=6, null=True), + ), + migrations.AlterField( + model_name="reviewbit", + name="field", + field=models.CharField(db_index=True, max_length=32), + ), + ] diff --git a/backend/review/migrations/0005_review_comments.py b/backend/review/migrations/0005_review_comments.py index 42119288e..3a31a1dd2 100644 --- a/backend/review/migrations/0005_review_comments.py +++ b/backend/review/migrations/0005_review_comments.py @@ -1,18 +1,18 @@ -# Generated by Django 3.0.6 on 2020-05-29 02:56 - -from django.db import migrations, models - - -class Migration(migrations.Migration): - - dependencies = [ - ("review", "0004_auto_20200512_1526"), - ] - - operations = [ - migrations.AddField( - model_name="review", - name="comments", - field=models.TextField(blank=True), - ), - ] +# Generated by Django 3.0.6 on 2020-05-29 02:56 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ("review", "0004_auto_20200512_1526"), + ] + + operations = [ + migrations.AddField( + model_name="review", + name="comments", + field=models.TextField(blank=True), + ), + ] diff --git a/backend/review/models.py b/backend/review/models.py index dc4f63a52..7d9d40a72 100644 --- a/backend/review/models.py +++ b/backend/review/models.py @@ -1,121 +1,121 @@ -from django.db import models -from django.db.models import Avg, Q - - -class Review(models.Model): - """ - Represents the aggregate review for an instructor for a single section of a course. - By virtue of being associated to a course, every semester of a course will have a new Review - object. - - Actual scores for the review is stored in the ReviewBit related object, can be accessed via the - `reviewbit_set` of the object. - """ - - # sections have at most one review per instructor attached to the section. - section = models.ForeignKey("courses.Section", on_delete=models.CASCADE) - instructor = models.ForeignKey("courses.Instructor", on_delete=models.CASCADE) - - enrollment = models.IntegerField(blank=True, null=True) - responses = models.IntegerField(blank=True, null=True) - form_type = models.IntegerField(blank=True, null=True) - - comments = models.TextField(blank=True) - - class Meta: - unique_together = (("section", "instructor"),) - - def __str__(self): - return f"{self.section} - {self.instructor}" - - def set_averages(self, bits): - for key, value in bits.items(): - ReviewBit.objects.update_or_create(review=self, field=key, defaults={"average": value}) - - @staticmethod - def get_averages(topic_id, instructor_name=None, fields=None): - if fields is None: - # Default fields (topline numbers on PCR) - fields = [ - "course_quality", - "difficulty", - "instructor_quality", - ] - - # We're using some of the aggregation tricks documented on Django's Aggregation Cheat Sheet: - # https://docs.djangoproject.com/en/2.2/topics/db/aggregation/#cheat-sheet - - # Filter down a queryset to just include this course - qs = Review.objects.filter(section__course__topic_id=topic_id, responses__gt=0) - if ( - instructor_name is not None - ): # if an instructor is specified, filter down to just that instructor - qs = qs.filter(instructor_name__contains=instructor_name) - - # pass each aggregation as its own argument to `aggregate` (using dictionary comprehensions) - return qs.aggregate( - **{ - # average the average of all the reviewbits of a certain field - # (that's where the filter comes in) - field: Avg("reviewbit__average", filter=Q(reviewbit__field=field)) - for field in fields - } - ) - - -""" -Review Bits have different labels in the summary table and the rating table. -This tuple keeps track of the association between the two, along with an -intermediate, general label that we use internally. -""" -REVIEW_BIT_LABEL = ( - ("RINSTRUCTORQUALITY", "Instructor Quality", "instructor_quality"), - ("RCOURSEQUALITY", "Course Quality", "course_quality"), - ("RCOMMABILITY", "Comm. Ability", "communication_ability"), - ("RSTIMULATEINTEREST", "Stimulate Ability", "stimulate_interest"), - ("RINSTRUCTORACCESS", "Instructor Access", "instructor_access"), - ("RDIFFICULTY", "Difficulty", "difficulty"), - ("RWORKREQUIRED", "Work Required", "work_required"), - ("RTAQUALITY", "TA Quality", "ta_quality"), - ("RREADINGSVALUE", "Readings Value", "readings_value"), - ("RAMOUNTLEARNED", "Amount Learned", "amount_learned"), - ("RRECOMMENDMAJOR", "Recommend Major", "recommend_major"), - ("RRECOMMENDNONMAJOR", "Recommend Non-Major", "recommend_nonmajor"), - ("RABILITIESCHALLENGED", "Abilities Challenged", "abilities_challenged"), - ("RCLASSPACE", "Class Pace", "class_pace"), - ("RINSTRUCTOREFFECTIVE", "Instructor Effectiveness", "instructor_effective"), - ("RNATIVEABILITY", "Native Ability", "native_ability"), -) - -# Maps column name from SUMMARY sql tables to common slug. -COLUMN_TO_SLUG = {x[0]: x[2] for x in REVIEW_BIT_LABEL} -# Maps "context" value from RATING table to common slug. -CONTEXT_TO_SLUG = {x[1]: x[2] for x in REVIEW_BIT_LABEL} -ALL_FIELD_SLUGS = [x[2] for x in REVIEW_BIT_LABEL] - - -class ReviewBit(models.Model): - """ - A single key/value pair associated with a review. Fields are things like "course_quality", - and averages which range from 0 to 4. - """ - - review = models.ForeignKey(Review, on_delete=models.CASCADE) - field = models.CharField(max_length=32, db_index=True) - - average = models.DecimalField(max_digits=6, decimal_places=5) - median = models.DecimalField(max_digits=6, decimal_places=5, null=True, blank=True) - stddev = models.DecimalField(max_digits=6, decimal_places=5, null=True, blank=True) - - # The integer counts for how many students rated 0-4 on a given property. - rating0 = models.IntegerField(null=True, blank=True) - rating1 = models.IntegerField(null=True, blank=True) - rating2 = models.IntegerField(null=True, blank=True) - rating3 = models.IntegerField(null=True, blank=True) - rating4 = models.IntegerField(null=True, blank=True) - - class Meta: - unique_together = (("review", "field"),) - - def __str__(self): - return f"#{self.review.pk} - {self.field}: {self.average}" +from django.db import models +from django.db.models import Avg, Q + + +class Review(models.Model): + """ + Represents the aggregate review for an instructor for a single section of a course. + By virtue of being associated to a course, every semester of a course will have a new Review + object. + + Actual scores for the review is stored in the ReviewBit related object, can be accessed via the + `reviewbit_set` of the object. + """ + + # sections have at most one review per instructor attached to the section. + section = models.ForeignKey("courses.Section", on_delete=models.CASCADE) + instructor = models.ForeignKey("courses.Instructor", on_delete=models.CASCADE) + + enrollment = models.IntegerField(blank=True, null=True) + responses = models.IntegerField(blank=True, null=True) + form_type = models.IntegerField(blank=True, null=True) + + comments = models.TextField(blank=True) + + class Meta: + unique_together = (("section", "instructor"),) + + def __str__(self): + return f"{self.section} - {self.instructor}" + + def set_averages(self, bits): + for key, value in bits.items(): + ReviewBit.objects.update_or_create(review=self, field=key, defaults={"average": value}) + + @staticmethod + def get_averages(topic_id, instructor_name=None, fields=None): + if fields is None: + # Default fields (topline numbers on PCR) + fields = [ + "course_quality", + "difficulty", + "instructor_quality", + ] + + # We're using some of the aggregation tricks documented on Django's Aggregation Cheat Sheet: + # https://docs.djangoproject.com/en/2.2/topics/db/aggregation/#cheat-sheet + + # Filter down a queryset to just include this course + qs = Review.objects.filter(section__course__topic_id=topic_id, responses__gt=0) + if ( + instructor_name is not None + ): # if an instructor is specified, filter down to just that instructor + qs = qs.filter(instructor_name__contains=instructor_name) + + # pass each aggregation as its own argument to `aggregate` (using dictionary comprehensions) + return qs.aggregate( + **{ + # average the average of all the reviewbits of a certain field + # (that's where the filter comes in) + field: Avg("reviewbit__average", filter=Q(reviewbit__field=field)) + for field in fields + } + ) + + +""" +Review Bits have different labels in the summary table and the rating table. +This tuple keeps track of the association between the two, along with an +intermediate, general label that we use internally. +""" +REVIEW_BIT_LABEL = ( + ("RINSTRUCTORQUALITY", "Instructor Quality", "instructor_quality"), + ("RCOURSEQUALITY", "Course Quality", "course_quality"), + ("RCOMMABILITY", "Comm. Ability", "communication_ability"), + ("RSTIMULATEINTEREST", "Stimulate Ability", "stimulate_interest"), + ("RINSTRUCTORACCESS", "Instructor Access", "instructor_access"), + ("RDIFFICULTY", "Difficulty", "difficulty"), + ("RWORKREQUIRED", "Work Required", "work_required"), + ("RTAQUALITY", "TA Quality", "ta_quality"), + ("RREADINGSVALUE", "Readings Value", "readings_value"), + ("RAMOUNTLEARNED", "Amount Learned", "amount_learned"), + ("RRECOMMENDMAJOR", "Recommend Major", "recommend_major"), + ("RRECOMMENDNONMAJOR", "Recommend Non-Major", "recommend_nonmajor"), + ("RABILITIESCHALLENGED", "Abilities Challenged", "abilities_challenged"), + ("RCLASSPACE", "Class Pace", "class_pace"), + ("RINSTRUCTOREFFECTIVE", "Instructor Effectiveness", "instructor_effective"), + ("RNATIVEABILITY", "Native Ability", "native_ability"), +) + +# Maps column name from SUMMARY sql tables to common slug. +COLUMN_TO_SLUG = {x[0]: x[2] for x in REVIEW_BIT_LABEL} +# Maps "context" value from RATING table to common slug. +CONTEXT_TO_SLUG = {x[1]: x[2] for x in REVIEW_BIT_LABEL} +ALL_FIELD_SLUGS = [x[2] for x in REVIEW_BIT_LABEL] + + +class ReviewBit(models.Model): + """ + A single key/value pair associated with a review. Fields are things like "course_quality", + and averages which range from 0 to 4. + """ + + review = models.ForeignKey(Review, on_delete=models.CASCADE) + field = models.CharField(max_length=32, db_index=True) + + average = models.DecimalField(max_digits=6, decimal_places=5) + median = models.DecimalField(max_digits=6, decimal_places=5, null=True, blank=True) + stddev = models.DecimalField(max_digits=6, decimal_places=5, null=True, blank=True) + + # The integer counts for how many students rated 0-4 on a given property. + rating0 = models.IntegerField(null=True, blank=True) + rating1 = models.IntegerField(null=True, blank=True) + rating2 = models.IntegerField(null=True, blank=True) + rating3 = models.IntegerField(null=True, blank=True) + rating4 = models.IntegerField(null=True, blank=True) + + class Meta: + unique_together = (("review", "field"),) + + def __str__(self): + return f"#{self.review.pk} - {self.field}: {self.average}" diff --git a/backend/review/serializers.py b/backend/review/serializers.py index 211269ed9..995737ee2 100644 --- a/backend/review/serializers.py +++ b/backend/review/serializers.py @@ -1,20 +1,20 @@ -from rest_framework import serializers - -from courses.serializers import SectionIdSerializer -from review.models import Review, ReviewBit - - -class ReviewBitSerializer(serializers.ModelSerializer): - class Meta: - model = ReviewBit - fields = ("field", "average") - - -class ReviewSerializer(serializers.ModelSerializer): - section = SectionIdSerializer(read_only=True) - instructor = serializers.StringRelatedField() - fields = ReviewBitSerializer(source="reviewbit_set", many=True) - - class Meta: - model = Review - fields = ("section", "instructor") +from rest_framework import serializers + +from courses.serializers import SectionIdSerializer +from review.models import Review, ReviewBit + + +class ReviewBitSerializer(serializers.ModelSerializer): + class Meta: + model = ReviewBit + fields = ("field", "average") + + +class ReviewSerializer(serializers.ModelSerializer): + section = SectionIdSerializer(read_only=True) + instructor = serializers.StringRelatedField() + fields = ReviewBitSerializer(source="reviewbit_set", many=True) + + class Meta: + model = Review + fields = ("section", "instructor") diff --git a/backend/review/urls.py b/backend/review/urls.py index 5930156f0..43a66313a 100644 --- a/backend/review/urls.py +++ b/backend/review/urls.py @@ -1,45 +1,45 @@ -from django.urls import path -from django.views.decorators.cache import cache_page - -from review.views import ( - autocomplete, - course_plots, - course_reviews, - department_reviews, - instructor_for_course_reviews, - instructor_reviews, -) - - -HOUR_IN_SECONDS = 60 * 60 -DAY_IN_SECONDS = HOUR_IN_SECONDS * 24 -MONTH_IN_SECONDS = DAY_IN_SECONDS * 30 - -urlpatterns = [ - path( - "course/", - cache_page(MONTH_IN_SECONDS)(course_reviews), - name="course-reviews", - ), - path( - "course_plots/", - cache_page(DAY_IN_SECONDS)(course_plots), - name="course-plots", - ), - path( - "instructor/", - cache_page(MONTH_IN_SECONDS)(instructor_reviews), - name="instructor-reviews", - ), - path( - "department/", - cache_page(MONTH_IN_SECONDS)(department_reviews), - name="department-reviews", - ), - path( - "course//", - cache_page(MONTH_IN_SECONDS)(instructor_for_course_reviews), - name="course-history", - ), - path("autocomplete", cache_page(MONTH_IN_SECONDS)(autocomplete), name="review-autocomplete"), -] +from django.urls import path +from django.views.decorators.cache import cache_page + +from review.views import ( + autocomplete, + course_plots, + course_reviews, + department_reviews, + instructor_for_course_reviews, + instructor_reviews, +) + + +HOUR_IN_SECONDS = 60 * 60 +DAY_IN_SECONDS = HOUR_IN_SECONDS * 24 +MONTH_IN_SECONDS = DAY_IN_SECONDS * 30 + +urlpatterns = [ + path( + "course/", + cache_page(MONTH_IN_SECONDS)(course_reviews), + name="course-reviews", + ), + path( + "course_plots/", + cache_page(DAY_IN_SECONDS)(course_plots), + name="course-plots", + ), + path( + "instructor/", + cache_page(MONTH_IN_SECONDS)(instructor_reviews), + name="instructor-reviews", + ), + path( + "department/", + cache_page(MONTH_IN_SECONDS)(department_reviews), + name="department-reviews", + ), + path( + "course//", + cache_page(MONTH_IN_SECONDS)(instructor_for_course_reviews), + name="course-history", + ), + path("autocomplete", cache_page(MONTH_IN_SECONDS)(autocomplete), name="review-autocomplete"), +] diff --git a/backend/review/util.py b/backend/review/util.py index 9b9274a77..18d2dbfe5 100644 --- a/backend/review/util.py +++ b/backend/review/util.py @@ -1,589 +1,589 @@ -import re -from collections import defaultdict -from math import isclose -from typing import Dict, List - -import scipy.stats as stats -from django.db.models import Count, F -from django.http import Http404 - -from courses.models import Section -from PennCourses.settings.base import ( - PCA_REGISTRATIONS_RECORDED_SINCE, - STATUS_UPDATES_RECORDED_SINCE, -) - - -def titleize(name): - """ - Titleize a course name or instructor, taking into account exceptions such as II. - """ - # string.title() will capitalize the first letter of every word, - # where a word is a substring delimited by a non-letter character. So, "o'leary" is two words - # and will be capitalized (properly) as "O'Leary". - name = name.strip().title() - # Roman-numeral suffixes - name = re.sub(r"([XVI])(x|v|i+)", lambda m: m.group(1) + m.group(2).upper(), name) - # "1st".title() -> "1St", but it should still be "1st". - name = re.sub(r"(\d)(St|Nd|Rd|Th)", lambda m: m.group(1) + m.group(2).lower(), name) - # Like McDonald. - name = re.sub(r"Mc([a-z])", lambda m: "Mc" + m.group(1).upper(), name) - # Possessives shouldn't get capitalized. - name = name.replace("'S", "'s") - return name - - -def to_r_camel(s): - """ - Turns fields from python snake_case to the PCR frontend's rCamelCase. - """ - return "r" + "".join([x.title() for x in s.split("_")]) - - -def make_subdict(field_prefix, d): - """ - Rows in a queryset that don't represent related database models are flat. But we want - our JSON to have a nested structure that makes more sense to the client. This function - takes fields from a flat dictionary with a certain prefix and returns a dictionary - of those entries, with the prefix removed from the keys. - """ - start_at = len(field_prefix) - return { - to_r_camel(k[start_at:]): v - for k, v in d.items() - if k.startswith(field_prefix) and v is not None - } - - -def get_single_dict_from_qs(qs): - """ - Returns the first object in a qs as a dict (as returned by `.values()`). - """ - vals = qs[:1].values() - if not vals: - raise Http404() - return dict(vals[0]) - - -def get_average_and_recent_dict_single(values_dict, extra_fields=None, **extra_fields_conv): - """ - Accepts a dict taken from the `.values()` list of a queryset - previously annotated by `annotate_average_and_recent`. - Returns a dict with keys `["average_reviews", "recent_reviews", - "num_semesters", "latest_semester"] + extra_fields` (these keys are documented in - the PCR API docs). You can specify any extra keys to include using the `extra_fields` list, - as long as those keys show up in `values_dict`. You can also specify extra keys to include - using kwargs of the form `new_key=old_key`. The resulting dict will have entries of the form - `new_key: values_dict[old_key]`. - """ - values_dict = dict(values_dict) - extra_fields = extra_fields or [] - return { - "average_reviews": make_subdict("average_", values_dict), - "recent_reviews": make_subdict("recent_", values_dict), - "num_semesters": values_dict["average_semester_count"], - "latest_semester": values_dict["average_semester_calc"], - **{k: values_dict[k] for k in extra_fields}, - **{new_key: values_dict[old_key] for new_key, old_key in extra_fields_conv.items()}, - } - - -def get_historical_codes(topic, exclude_codes): - historical_codes = dict() - - for course in topic.courses.all(): - full_code = course.full_code - semester = course.semester - if full_code in exclude_codes: - continue - if full_code not in historical_codes or historical_codes[full_code]["semester"] < semester: - historical_codes[full_code] = { - "full_code": full_code, - "branched_from": False, - "semester": semester, - } - - if topic.branched_from: - c = topic.branched_from.most_recent - historical_codes[c.full_code] = { - "full_code": c.full_code, - "branched_from": True, - "semester": c.semester, - } - - return sorted(list(historical_codes.values()), key=lambda c: c["semester"], reverse=True) - - -def get_num_sections(*args, **kwargs): - """ - Returns num_sections, num_sections_recent - Sections are filtered by the given args and kwargs. - """ - num_sections_by_semester = ( - Section.objects.filter( - *args, - **kwargs, - ) - .values("course__semester") - .annotate(num_sections=Count("id", distinct=True)) - .values_list("course__semester", "num_sections") - ) - num_sections = 0 - max_sem = None - num_sections_recent = 0 - for semester, num in num_sections_by_semester: - num_sections += num - if not max_sem or max_sem < semester: - max_sem = semester - num_sections_recent = num - return num_sections, num_sections_recent - - -def dict_average(entries: List[Dict[str, float]]) -> Dict[str, float]: - """ - Average a list of dicts into one dict with averages. - :param entries: - :return: - """ - keys = set() - for entry in entries: - keys.update(entry.keys()) - - averages = {k: (0, 0) for k in keys} - for entry in entries: - for k, v in entry.items(): - sum_, count_ = averages[k] - if averages.get(k) is not None: - averages[k] = (sum_ + v, count_ + 1) - - return {k: v[0] / v[1] if v[1] > 0 else None for k, v in averages.items()} - - -def aggregate_reviews(reviews, group_by, **extra_fields): - """ - Aggregate a list of reviews (as dictionaries), grouping by some field. - :param reviews: A list of dictionaries representing Review objects, with reviewbits inlined - using review.annotations.review_averages(). And dict-ified by calling .values() on a queryset. - :param group_by: Field to group by in the review. - :param extra_fields: Any extra fields from the dictionaries to carry through to the response. - :return: Average reviews, recent reviews, number of semesters taught, and other data needed - for the response to the frontend. - """ - grouped_reviews = dict() - # First pass: Group individual reviews by the provided key. - for review in reviews: - key = review[group_by] - grouped_reviews.setdefault(key, []).append( - { - "semester": review["semester"], - "exclude_from_recent": review.get("exclude_from_recent", False), - "scores": make_subdict("bit_", review), - **{ - response_prop: review[instance_prop] - for response_prop, instance_prop in extra_fields.items() - }, - } - ) - aggregated = dict() - # Second pass: Aggregate grouped reviews by taking the average of all scores and recent scores. - for k, reviews in grouped_reviews.items(): - latest_sem_with_reviews = max( - [r["semester"] for r in reviews if not r.get("exclude_from_recent")], default=None - ) - latest_sem = max([r["semester"] for r in reviews], default=None) - all_scores = [r["scores"] for r in reviews] - recent_scores = [r["scores"] for r in reviews if r["semester"] == latest_sem_with_reviews] - aggregated[k] = { - "id": k, - "average_reviews": dict_average(all_scores), - "recent_reviews": dict_average(recent_scores), - "latest_semester": latest_sem, - "num_semesters": len(set([r["semester"] for r in reviews])), - **{extra_field: reviews[0][extra_field] for extra_field, _ in extra_fields.items()}, - } - - return aggregated - - -def average_given_plots(plots_dict, bin_size=0.000001): - """ - Given plots (i.e. demands plots or section status plots), which should be a dict with - plot lists as leaves at some depth, aggregate all these plots and return a single average plot. - For instance, if a dict mapping semesters to section ids to demand plot lists is given, - this function will return the average demand plot across all given sections and semesters. - If a dict mapping section ids to section status plot lists is given, - this function will return a plot of the average percentage of the given sections that - were open at each point in time. - The bin_size argument allows you to specify how far after a certain data point to squash - following data points and average into the same point. By default, only data points - that are within 0.000001 will be squashed (i.e. almost equal, ignoring floating point - precision issues). - Returns None if no valid plots are found in the given plots_dict dict. - Note that demand plots are lists of tuples of the form (percent_through, value). - """ - # Extract plots from dict - plots = [] # A list of all plots in the dict - - def explore(to_explore): - if isinstance(to_explore, dict): - for value in to_explore.values(): - explore(value) - elif isinstance(to_explore, list): - plots.append(to_explore) - - explore(plots_dict) - - if len(plots) == 0: - return None - - assert all(len(plot) > 0 for plot in plots), f"Empty plot given: \n{plots}" - frontier_candidate_indices = [0 for _ in range(len(plots))] - # frontier_candidate_indices: A list of the indices of the next candidate elements to add to - # the frontier - - averaged_plot = [] - latest_values = [None for _ in range(len(plots))] - # averaged_plot: This will be our final averaged plot (which we will return) - while any([plot_idx < len(plots[i]) for i, plot_idx in enumerate(frontier_candidate_indices)]): - min_percent_through = min( - plots[i][frontier_candidate_indices[i]][0] - for i in range(len(plots)) - if frontier_candidate_indices[i] < len(plots[i]) - ) - plots_bins = [[] for _ in range(len(plots))] - # plots_bins is a list of lists of y values (one list for each given plot) - for plot_num in range(len(plots)): - new_frontier_candidate_index = frontier_candidate_indices[plot_num] - take_latest_value = True - while ( - new_frontier_candidate_index < len(plots[plot_num]) - and plots[plot_num][new_frontier_candidate_index][0] - <= min_percent_through + bin_size - ): - take_latest_value = False - plots_bins[plot_num].append(plots[plot_num][new_frontier_candidate_index][1]) - new_frontier_candidate_index += 1 - if take_latest_value and latest_values[plot_num] is not None: - plots_bins[plot_num].append(latest_values[plot_num]) - frontier_candidate_indices[plot_num] = new_frontier_candidate_index - latest_values = [sum(lst) / len(lst) if len(lst) > 0 else None for lst in plots_bins] - non_null_latest_values = [val for val in latest_values if val is not None] - latest_val_avg = sum(non_null_latest_values) / len(non_null_latest_values) - if ( - len(averaged_plot) == 0 - or not isclose(averaged_plot[-1][1], latest_val_avg) - or min_percent_through == 1 - ): - averaged_plot.append((min_percent_through, latest_val_avg)) - return averaged_plot - - -def get_status_updates_map(section_map): - """ - Returns status_updates_map, mapping semester to section id to a list of status updates - for that section. Every section from the given section_map dict is represented in the - returned status_updates_map dict. Note that section_map should map semester to section id - to section object. - """ - from courses.models import StatusUpdate # imported here to avoid circular imports - - status_updates = StatusUpdate.objects.filter( - section_id__in=[ - section_id for semester in section_map.keys() for section_id in section_map[semester] - ], - in_add_drop_period=True, - ).annotate(semester=F("section__course__semester")) - status_updates_map = dict() - # status_updates_map: maps semester to section id to the status updates for that section - for semester in section_map.keys(): - status_updates_map[semester] = dict() - for section_id in section_map[semester].keys(): - status_updates_map[semester][section_id] = [] - for status_update in status_updates: - status_updates_map[status_update.semester][status_update.section_id].append(status_update) - return status_updates_map - - -def avg_and_recent_demand_plots(section_map, status_updates_map, bin_size=0.01): - """ - Aggregate demand plots over time (during historical add/drop periods) for the given - sections (specified by section_map). - Demand plots are lists of tuples of the form (percent_through, relative_demand). - The average plot will average across all sections, and the recent plot will average across - sections from only the most recent semester. - Note that section_map should map semester to section id to section object. - The status_updates_map should map semester to section id to a list of status updates - for that section (this can be retrieved with the call get_status_updates_map(section_map)). - Points are grouped together with all all remaining points within bin_size to the right, - so the minimum separation between data points will be bin_size. - Returns (avg_demand_plot, avg_demand_plot_min_semester, avg_percent_open_plot_num_semesters, - recent_demand_plot, recent_demand_plot_semester) - """ - from alert.models import AddDropPeriod, PcaDemandDistributionEstimate, Registration - - # ^ imported here to avoid circular imports - add_drop_periods = AddDropPeriod.objects.filter(semester__in=section_map.keys()) - add_drop_periods_map = dict() - # add_drop_periods_map: maps semester to that semester's add drop period object - for adp in add_drop_periods: - add_drop_periods_map[adp.semester] = adp - - demand_distribution_estimates = PcaDemandDistributionEstimate.objects.filter( - semester__in=section_map.keys(), in_add_drop_period=True - ).select_related("highest_demand_section", "lowest_demand_section") - demand_distribution_estimates_map = defaultdict(list) - # demand_distribution_estimates_map: maps semester - # to a list of the demand distribution_estimates from that semester - for ext in demand_distribution_estimates: - demand_distribution_estimates_map[ext.semester].append(ext) - - registrations_map = defaultdict(lambda: defaultdict(list)) - # registrations_map: maps semester to section id to a list of registrations from that section - section_id_to_semester = { - section_id: semester for semester in section_map for section_id in section_map[semester] - } - registrations = Registration.objects.filter(section_id__in=section_id_to_semester.keys()) - for registration in registrations: - semester = section_id_to_semester[registration.section_id] - registrations_map[semester][registration.section_id].append(registration) - - demand_plots_map = defaultdict(dict) - # demand_plots_map: maps semester to section id to the demand plot of that section - - # Now that all database work has been completed, let's iterate through - # our semesters and compute demand plots for each section - for semester in section_map.keys(): - if semester < PCA_REGISTRATIONS_RECORDED_SINCE: - continue - add_drop_period = add_drop_periods_map[semester] - if semester not in demand_distribution_estimates_map: - continue - demand_distribution_estimates_changes = [ - { - "percent_through": ext.percent_through_add_drop_period, - "type": "distribution_estimate_change", - "csrdv_frac_zero": ext.csrdv_frac_zero, - "csprdv_lognorm_param_shape": ext.csprdv_lognorm_param_shape, - "csprdv_lognorm_param_loc": ext.csprdv_lognorm_param_loc, - "csprdv_lognorm_param_scale": ext.csprdv_lognorm_param_scale, - "min": ext.lowest_raw_demand, - "max": ext.highest_raw_demand, - } - for ext in demand_distribution_estimates_map[semester] - ] - if not demand_distribution_estimates_changes: - continue - for section in section_map[semester].values(): - section_id = section.id - volume_changes = [] # a list containing registration volume changes over time - for registration in registrations_map[semester][section_id]: - volume_changes.append( - { - "percent_through": add_drop_period.get_percent_through_add_drop( - registration.created_at - ), - "volume_change": 1, - "type": "volume_change", - } - ) - deactivated_at = registration.deactivated_at - if deactivated_at is not None: - volume_changes.append( - { - "percent_through": add_drop_period.get_percent_through_add_drop( - deactivated_at - ), - "volume_change": -1, - "type": "volume_change", - } - ) - status_updates_list = [ - { - "percent_through": update.percent_through_add_drop_period, - "type": "status_update", - "old_status": update.old_status, - "new_status": update.new_status, - } - for update in status_updates_map[semester][section_id] - ] - demand_plot = [(0, 0)] - # demand_plot: the demand plot for this section, containing elements of the form - # (percent_through, relative_demand) - changes = sorted( - volume_changes + demand_distribution_estimates_changes + status_updates_list, - key=lambda x: ( - x["percent_through"], - 1 - if x["type"] == "status_update" - else 2 - if x["type"] == "distribution_estimate_change" - else 3, - ), - ) - - # Initialize variables to be maintained in our main changes loop - registration_volume = 0 - latest_raw_demand_distribution_estimate = None - # Initialize section statuses - section_status = None - for change in changes: - if change["type"] == "status_update": - if section_status is None: - section_status = change["old_status"] - if section_status is None: - section_status = "O" if section.percent_open > 0.5 else "C" - - total_value_in_bin = 0 - num_in_bin = 0 - bin_start_pct = 0 - for change in changes: - if change["type"] == "status_update": - if change["old_status"] != section_status: # Skip erroneous status updates - continue - section_status = change["new_status"] - elif change["type"] == "distribution_estimate_change": - latest_raw_demand_distribution_estimate = change - else: - registration_volume += change["volume_change"] - if latest_raw_demand_distribution_estimate is None: - continue - if section_status == "O": - rel_demand = 0 - elif section_status != "C": - rel_demand = 1 - elif ( - latest_raw_demand_distribution_estimate["min"] - == latest_raw_demand_distribution_estimate["max"] - ): - rel_demand = 1 / 2 - else: - csrdv_frac_zero = latest_raw_demand_distribution_estimate["csrdv_frac_zero"] - raw_demand = registration_volume / section.capacity - if csrdv_frac_zero is None: - csrdv_frac_zero = int(raw_demand <= 0) - if raw_demand <= 0: - rel_demand = csrdv_frac_zero / 2 - else: - param_shape = latest_raw_demand_distribution_estimate[ - "csprdv_lognorm_param_shape" - ] - param_loc = latest_raw_demand_distribution_estimate[ - "csprdv_lognorm_param_loc" - ] - param_scale = latest_raw_demand_distribution_estimate[ - "csprdv_lognorm_param_scale" - ] - if param_shape is None or param_loc is None or param_scale is None: - rel_demand = csrdv_frac_zero - else: - rel_demand = csrdv_frac_zero + stats.lognorm.cdf( - raw_demand, - param_shape, - param_loc, - param_scale, - ) * (1 - csrdv_frac_zero) - if change["percent_through"] > bin_start_pct + bin_size: - if num_in_bin > 0: - bin_avg = total_value_in_bin / num_in_bin - if len(demand_plot) == 0 or not isclose(demand_plot[-1][1], bin_avg): - demand_plot.append((bin_start_pct, bin_avg)) - bin_start_pct = change["percent_through"] - total_value_in_bin = 0 - num_in_bin = 0 - total_value_in_bin += rel_demand - num_in_bin += 1 - if num_in_bin > 0: - demand_plot.append((bin_start_pct, total_value_in_bin / num_in_bin)) - if bin_start_pct < 1: - demand_plot.append((1, demand_plot[-1][1])) - demand_plots_map[semester][section_id] = demand_plot - - recent_demand_plot_semester = ( - max(demand_plots_map.keys()) if len(demand_plots_map) > 0 else None - ) - recent_demand_plot = ( - average_given_plots(demand_plots_map[recent_demand_plot_semester], bin_size=bin_size) - if len(demand_plots_map) > 0 - else None - ) - - avg_demand_plot = average_given_plots(demand_plots_map, bin_size=bin_size) - avg_demand_plot_min_semester = min(demand_plots_map.keys()) if demand_plots_map else None - avg_percent_open_plot_num_semesters = len(demand_plots_map) - - return ( - avg_demand_plot, - avg_demand_plot_min_semester, - avg_percent_open_plot_num_semesters, - recent_demand_plot, - recent_demand_plot_semester, - ) - - -def avg_and_recent_percent_open_plots(section_map, status_updates_map): - """ - Aggregate plots of the percentage of sections that were open at each point in time (during - historical add/drop periods) for the given sections (specified by section_map). - Percentage-open plots are lists of tuples of the form (percent_through, percentage_open). - The average plot will average across all sections, and the recent plot will average across - sections from only the most recent semester. - Note that section_map should map semester to section id to section object. - The status_updates_map should map semester to section id to a list of status updates - for that section (this can be retrieved with the call get_status_updates_map(section_map)). - The generated plots will have points at increments of step_size in the range [0,1]. - Returns (avg_percent_open_plot, avg_demand_plot_min_semester, - recent_percent_open_plot, recent_percent_open_plot_semester) - """ - - open_plots = dict() - # open_plots: maps semester to section id to the plot of when that section was open during - # the add/drop period (1 if open, 0 if not) - - # Now that all database work has been completed, let's iterate through - # our semesters and compute open plots for each section - for semester in section_map.keys(): - if semester < STATUS_UPDATES_RECORDED_SINCE: - continue - open_plots[semester] = dict() - for section in section_map[semester].values(): - section_id = section.id - updates = sorted( - status_updates_map[semester][section_id], - key=lambda x: x.percent_through_add_drop_period, - ) - if len(updates) == 0: - estimate_open = int(section.percent_open > 0.5) - open_plots[semester][section_id] = [(0, estimate_open), (1, estimate_open)] - continue - open_plot = [(0, int(updates[0].old_status == "O"))] - # open_plot: the demand plot for this section, containing elements of the form - # (percent_through, relative_demand). - - latest_status = int(updates[0].old_status == "O") - for update in updates: - if int(update.old_status == "O") != latest_status: - # Ignore invalid status updates - continue - latest_status = int(update.new_status == "O") - open_plot.append((update.percent_through_add_drop_period, latest_status)) - if open_plot[-1][0] < 1: - open_plot.append((1, latest_status)) - - open_plots[semester][section_id] = open_plot - - recent_percent_open_plot_semester = max(open_plots.keys()) if len(open_plots) > 0 else None - recent_percent_open_plot = ( - average_given_plots(open_plots[max(section_map.keys())]) if len(section_map) > 0 else None - ) - - avg_percent_open_plot = average_given_plots(open_plots) - avg_percent_open_plot_min_semester = min(open_plots.keys()) if len(open_plots) > 0 else None - avg_percent_open_plot_num_semesters = len(open_plots) - - return ( - avg_percent_open_plot, - avg_percent_open_plot_min_semester, - avg_percent_open_plot_num_semesters, - recent_percent_open_plot, - recent_percent_open_plot_semester, - ) +import re +from collections import defaultdict +from math import isclose +from typing import Dict, List + +import scipy.stats as stats +from django.db.models import Count, F +from django.http import Http404 + +from courses.models import Section +from PennCourses.settings.base import ( + PCA_REGISTRATIONS_RECORDED_SINCE, + STATUS_UPDATES_RECORDED_SINCE, +) + + +def titleize(name): + """ + Titleize a course name or instructor, taking into account exceptions such as II. + """ + # string.title() will capitalize the first letter of every word, + # where a word is a substring delimited by a non-letter character. So, "o'leary" is two words + # and will be capitalized (properly) as "O'Leary". + name = name.strip().title() + # Roman-numeral suffixes + name = re.sub(r"([XVI])(x|v|i+)", lambda m: m.group(1) + m.group(2).upper(), name) + # "1st".title() -> "1St", but it should still be "1st". + name = re.sub(r"(\d)(St|Nd|Rd|Th)", lambda m: m.group(1) + m.group(2).lower(), name) + # Like McDonald. + name = re.sub(r"Mc([a-z])", lambda m: "Mc" + m.group(1).upper(), name) + # Possessives shouldn't get capitalized. + name = name.replace("'S", "'s") + return name + + +def to_r_camel(s): + """ + Turns fields from python snake_case to the PCR frontend's rCamelCase. + """ + return "r" + "".join([x.title() for x in s.split("_")]) + + +def make_subdict(field_prefix, d): + """ + Rows in a queryset that don't represent related database models are flat. But we want + our JSON to have a nested structure that makes more sense to the client. This function + takes fields from a flat dictionary with a certain prefix and returns a dictionary + of those entries, with the prefix removed from the keys. + """ + start_at = len(field_prefix) + return { + to_r_camel(k[start_at:]): v + for k, v in d.items() + if k.startswith(field_prefix) and v is not None + } + + +def get_single_dict_from_qs(qs): + """ + Returns the first object in a qs as a dict (as returned by `.values()`). + """ + vals = qs[:1].values() + if not vals: + raise Http404() + return dict(vals[0]) + + +def get_average_and_recent_dict_single(values_dict, extra_fields=None, **extra_fields_conv): + """ + Accepts a dict taken from the `.values()` list of a queryset + previously annotated by `annotate_average_and_recent`. + Returns a dict with keys `["average_reviews", "recent_reviews", + "num_semesters", "latest_semester"] + extra_fields` (these keys are documented in + the PCR API docs). You can specify any extra keys to include using the `extra_fields` list, + as long as those keys show up in `values_dict`. You can also specify extra keys to include + using kwargs of the form `new_key=old_key`. The resulting dict will have entries of the form + `new_key: values_dict[old_key]`. + """ + values_dict = dict(values_dict) + extra_fields = extra_fields or [] + return { + "average_reviews": make_subdict("average_", values_dict), + "recent_reviews": make_subdict("recent_", values_dict), + "num_semesters": values_dict["average_semester_count"], + "latest_semester": values_dict["average_semester_calc"], + **{k: values_dict[k] for k in extra_fields}, + **{new_key: values_dict[old_key] for new_key, old_key in extra_fields_conv.items()}, + } + + +def get_historical_codes(topic, exclude_codes): + historical_codes = dict() + + for course in topic.courses.all(): + full_code = course.full_code + semester = course.semester + if full_code in exclude_codes: + continue + if full_code not in historical_codes or historical_codes[full_code]["semester"] < semester: + historical_codes[full_code] = { + "full_code": full_code, + "branched_from": False, + "semester": semester, + } + + if topic.branched_from: + c = topic.branched_from.most_recent + historical_codes[c.full_code] = { + "full_code": c.full_code, + "branched_from": True, + "semester": c.semester, + } + + return sorted(list(historical_codes.values()), key=lambda c: c["semester"], reverse=True) + + +def get_num_sections(*args, **kwargs): + """ + Returns num_sections, num_sections_recent + Sections are filtered by the given args and kwargs. + """ + num_sections_by_semester = ( + Section.objects.filter( + *args, + **kwargs, + ) + .values("course__semester") + .annotate(num_sections=Count("id", distinct=True)) + .values_list("course__semester", "num_sections") + ) + num_sections = 0 + max_sem = None + num_sections_recent = 0 + for semester, num in num_sections_by_semester: + num_sections += num + if not max_sem or max_sem < semester: + max_sem = semester + num_sections_recent = num + return num_sections, num_sections_recent + + +def dict_average(entries: List[Dict[str, float]]) -> Dict[str, float]: + """ + Average a list of dicts into one dict with averages. + :param entries: + :return: + """ + keys = set() + for entry in entries: + keys.update(entry.keys()) + + averages = {k: (0, 0) for k in keys} + for entry in entries: + for k, v in entry.items(): + sum_, count_ = averages[k] + if averages.get(k) is not None: + averages[k] = (sum_ + v, count_ + 1) + + return {k: v[0] / v[1] if v[1] > 0 else None for k, v in averages.items()} + + +def aggregate_reviews(reviews, group_by, **extra_fields): + """ + Aggregate a list of reviews (as dictionaries), grouping by some field. + :param reviews: A list of dictionaries representing Review objects, with reviewbits inlined + using review.annotations.review_averages(). And dict-ified by calling .values() on a queryset. + :param group_by: Field to group by in the review. + :param extra_fields: Any extra fields from the dictionaries to carry through to the response. + :return: Average reviews, recent reviews, number of semesters taught, and other data needed + for the response to the frontend. + """ + grouped_reviews = dict() + # First pass: Group individual reviews by the provided key. + for review in reviews: + key = review[group_by] + grouped_reviews.setdefault(key, []).append( + { + "semester": review["semester"], + "exclude_from_recent": review.get("exclude_from_recent", False), + "scores": make_subdict("bit_", review), + **{ + response_prop: review[instance_prop] + for response_prop, instance_prop in extra_fields.items() + }, + } + ) + aggregated = dict() + # Second pass: Aggregate grouped reviews by taking the average of all scores and recent scores. + for k, reviews in grouped_reviews.items(): + latest_sem_with_reviews = max( + [r["semester"] for r in reviews if not r.get("exclude_from_recent")], default=None + ) + latest_sem = max([r["semester"] for r in reviews], default=None) + all_scores = [r["scores"] for r in reviews] + recent_scores = [r["scores"] for r in reviews if r["semester"] == latest_sem_with_reviews] + aggregated[k] = { + "id": k, + "average_reviews": dict_average(all_scores), + "recent_reviews": dict_average(recent_scores), + "latest_semester": latest_sem, + "num_semesters": len(set([r["semester"] for r in reviews])), + **{extra_field: reviews[0][extra_field] for extra_field, _ in extra_fields.items()}, + } + + return aggregated + + +def average_given_plots(plots_dict, bin_size=0.000001): + """ + Given plots (i.e. demands plots or section status plots), which should be a dict with + plot lists as leaves at some depth, aggregate all these plots and return a single average plot. + For instance, if a dict mapping semesters to section ids to demand plot lists is given, + this function will return the average demand plot across all given sections and semesters. + If a dict mapping section ids to section status plot lists is given, + this function will return a plot of the average percentage of the given sections that + were open at each point in time. + The bin_size argument allows you to specify how far after a certain data point to squash + following data points and average into the same point. By default, only data points + that are within 0.000001 will be squashed (i.e. almost equal, ignoring floating point + precision issues). + Returns None if no valid plots are found in the given plots_dict dict. + Note that demand plots are lists of tuples of the form (percent_through, value). + """ + # Extract plots from dict + plots = [] # A list of all plots in the dict + + def explore(to_explore): + if isinstance(to_explore, dict): + for value in to_explore.values(): + explore(value) + elif isinstance(to_explore, list): + plots.append(to_explore) + + explore(plots_dict) + + if len(plots) == 0: + return None + + assert all(len(plot) > 0 for plot in plots), f"Empty plot given: \n{plots}" + frontier_candidate_indices = [0 for _ in range(len(plots))] + # frontier_candidate_indices: A list of the indices of the next candidate elements to add to + # the frontier + + averaged_plot = [] + latest_values = [None for _ in range(len(plots))] + # averaged_plot: This will be our final averaged plot (which we will return) + while any([plot_idx < len(plots[i]) for i, plot_idx in enumerate(frontier_candidate_indices)]): + min_percent_through = min( + plots[i][frontier_candidate_indices[i]][0] + for i in range(len(plots)) + if frontier_candidate_indices[i] < len(plots[i]) + ) + plots_bins = [[] for _ in range(len(plots))] + # plots_bins is a list of lists of y values (one list for each given plot) + for plot_num in range(len(plots)): + new_frontier_candidate_index = frontier_candidate_indices[plot_num] + take_latest_value = True + while ( + new_frontier_candidate_index < len(plots[plot_num]) + and plots[plot_num][new_frontier_candidate_index][0] + <= min_percent_through + bin_size + ): + take_latest_value = False + plots_bins[plot_num].append(plots[plot_num][new_frontier_candidate_index][1]) + new_frontier_candidate_index += 1 + if take_latest_value and latest_values[plot_num] is not None: + plots_bins[plot_num].append(latest_values[plot_num]) + frontier_candidate_indices[plot_num] = new_frontier_candidate_index + latest_values = [sum(lst) / len(lst) if len(lst) > 0 else None for lst in plots_bins] + non_null_latest_values = [val for val in latest_values if val is not None] + latest_val_avg = sum(non_null_latest_values) / len(non_null_latest_values) + if ( + len(averaged_plot) == 0 + or not isclose(averaged_plot[-1][1], latest_val_avg) + or min_percent_through == 1 + ): + averaged_plot.append((min_percent_through, latest_val_avg)) + return averaged_plot + + +def get_status_updates_map(section_map): + """ + Returns status_updates_map, mapping semester to section id to a list of status updates + for that section. Every section from the given section_map dict is represented in the + returned status_updates_map dict. Note that section_map should map semester to section id + to section object. + """ + from courses.models import StatusUpdate # imported here to avoid circular imports + + status_updates = StatusUpdate.objects.filter( + section_id__in=[ + section_id for semester in section_map.keys() for section_id in section_map[semester] + ], + in_add_drop_period=True, + ).annotate(semester=F("section__course__semester")) + status_updates_map = dict() + # status_updates_map: maps semester to section id to the status updates for that section + for semester in section_map.keys(): + status_updates_map[semester] = dict() + for section_id in section_map[semester].keys(): + status_updates_map[semester][section_id] = [] + for status_update in status_updates: + status_updates_map[status_update.semester][status_update.section_id].append(status_update) + return status_updates_map + + +def avg_and_recent_demand_plots(section_map, status_updates_map, bin_size=0.01): + """ + Aggregate demand plots over time (during historical add/drop periods) for the given + sections (specified by section_map). + Demand plots are lists of tuples of the form (percent_through, relative_demand). + The average plot will average across all sections, and the recent plot will average across + sections from only the most recent semester. + Note that section_map should map semester to section id to section object. + The status_updates_map should map semester to section id to a list of status updates + for that section (this can be retrieved with the call get_status_updates_map(section_map)). + Points are grouped together with all all remaining points within bin_size to the right, + so the minimum separation between data points will be bin_size. + Returns (avg_demand_plot, avg_demand_plot_min_semester, avg_percent_open_plot_num_semesters, + recent_demand_plot, recent_demand_plot_semester) + """ + from alert.models import AddDropPeriod, PcaDemandDistributionEstimate, Registration + + # ^ imported here to avoid circular imports + add_drop_periods = AddDropPeriod.objects.filter(semester__in=section_map.keys()) + add_drop_periods_map = dict() + # add_drop_periods_map: maps semester to that semester's add drop period object + for adp in add_drop_periods: + add_drop_periods_map[adp.semester] = adp + + demand_distribution_estimates = PcaDemandDistributionEstimate.objects.filter( + semester__in=section_map.keys(), in_add_drop_period=True + ).select_related("highest_demand_section", "lowest_demand_section") + demand_distribution_estimates_map = defaultdict(list) + # demand_distribution_estimates_map: maps semester + # to a list of the demand distribution_estimates from that semester + for ext in demand_distribution_estimates: + demand_distribution_estimates_map[ext.semester].append(ext) + + registrations_map = defaultdict(lambda: defaultdict(list)) + # registrations_map: maps semester to section id to a list of registrations from that section + section_id_to_semester = { + section_id: semester for semester in section_map for section_id in section_map[semester] + } + registrations = Registration.objects.filter(section_id__in=section_id_to_semester.keys()) + for registration in registrations: + semester = section_id_to_semester[registration.section_id] + registrations_map[semester][registration.section_id].append(registration) + + demand_plots_map = defaultdict(dict) + # demand_plots_map: maps semester to section id to the demand plot of that section + + # Now that all database work has been completed, let's iterate through + # our semesters and compute demand plots for each section + for semester in section_map.keys(): + if semester < PCA_REGISTRATIONS_RECORDED_SINCE: + continue + add_drop_period = add_drop_periods_map[semester] + if semester not in demand_distribution_estimates_map: + continue + demand_distribution_estimates_changes = [ + { + "percent_through": ext.percent_through_add_drop_period, + "type": "distribution_estimate_change", + "csrdv_frac_zero": ext.csrdv_frac_zero, + "csprdv_lognorm_param_shape": ext.csprdv_lognorm_param_shape, + "csprdv_lognorm_param_loc": ext.csprdv_lognorm_param_loc, + "csprdv_lognorm_param_scale": ext.csprdv_lognorm_param_scale, + "min": ext.lowest_raw_demand, + "max": ext.highest_raw_demand, + } + for ext in demand_distribution_estimates_map[semester] + ] + if not demand_distribution_estimates_changes: + continue + for section in section_map[semester].values(): + section_id = section.id + volume_changes = [] # a list containing registration volume changes over time + for registration in registrations_map[semester][section_id]: + volume_changes.append( + { + "percent_through": add_drop_period.get_percent_through_add_drop( + registration.created_at + ), + "volume_change": 1, + "type": "volume_change", + } + ) + deactivated_at = registration.deactivated_at + if deactivated_at is not None: + volume_changes.append( + { + "percent_through": add_drop_period.get_percent_through_add_drop( + deactivated_at + ), + "volume_change": -1, + "type": "volume_change", + } + ) + status_updates_list = [ + { + "percent_through": update.percent_through_add_drop_period, + "type": "status_update", + "old_status": update.old_status, + "new_status": update.new_status, + } + for update in status_updates_map[semester][section_id] + ] + demand_plot = [(0, 0)] + # demand_plot: the demand plot for this section, containing elements of the form + # (percent_through, relative_demand) + changes = sorted( + volume_changes + demand_distribution_estimates_changes + status_updates_list, + key=lambda x: ( + x["percent_through"], + 1 + if x["type"] == "status_update" + else 2 + if x["type"] == "distribution_estimate_change" + else 3, + ), + ) + + # Initialize variables to be maintained in our main changes loop + registration_volume = 0 + latest_raw_demand_distribution_estimate = None + # Initialize section statuses + section_status = None + for change in changes: + if change["type"] == "status_update": + if section_status is None: + section_status = change["old_status"] + if section_status is None: + section_status = "O" if section.percent_open > 0.5 else "C" + + total_value_in_bin = 0 + num_in_bin = 0 + bin_start_pct = 0 + for change in changes: + if change["type"] == "status_update": + if change["old_status"] != section_status: # Skip erroneous status updates + continue + section_status = change["new_status"] + elif change["type"] == "distribution_estimate_change": + latest_raw_demand_distribution_estimate = change + else: + registration_volume += change["volume_change"] + if latest_raw_demand_distribution_estimate is None: + continue + if section_status == "O": + rel_demand = 0 + elif section_status != "C": + rel_demand = 1 + elif ( + latest_raw_demand_distribution_estimate["min"] + == latest_raw_demand_distribution_estimate["max"] + ): + rel_demand = 1 / 2 + else: + csrdv_frac_zero = latest_raw_demand_distribution_estimate["csrdv_frac_zero"] + raw_demand = registration_volume / section.capacity + if csrdv_frac_zero is None: + csrdv_frac_zero = int(raw_demand <= 0) + if raw_demand <= 0: + rel_demand = csrdv_frac_zero / 2 + else: + param_shape = latest_raw_demand_distribution_estimate[ + "csprdv_lognorm_param_shape" + ] + param_loc = latest_raw_demand_distribution_estimate[ + "csprdv_lognorm_param_loc" + ] + param_scale = latest_raw_demand_distribution_estimate[ + "csprdv_lognorm_param_scale" + ] + if param_shape is None or param_loc is None or param_scale is None: + rel_demand = csrdv_frac_zero + else: + rel_demand = csrdv_frac_zero + stats.lognorm.cdf( + raw_demand, + param_shape, + param_loc, + param_scale, + ) * (1 - csrdv_frac_zero) + if change["percent_through"] > bin_start_pct + bin_size: + if num_in_bin > 0: + bin_avg = total_value_in_bin / num_in_bin + if len(demand_plot) == 0 or not isclose(demand_plot[-1][1], bin_avg): + demand_plot.append((bin_start_pct, bin_avg)) + bin_start_pct = change["percent_through"] + total_value_in_bin = 0 + num_in_bin = 0 + total_value_in_bin += rel_demand + num_in_bin += 1 + if num_in_bin > 0: + demand_plot.append((bin_start_pct, total_value_in_bin / num_in_bin)) + if bin_start_pct < 1: + demand_plot.append((1, demand_plot[-1][1])) + demand_plots_map[semester][section_id] = demand_plot + + recent_demand_plot_semester = ( + max(demand_plots_map.keys()) if len(demand_plots_map) > 0 else None + ) + recent_demand_plot = ( + average_given_plots(demand_plots_map[recent_demand_plot_semester], bin_size=bin_size) + if len(demand_plots_map) > 0 + else None + ) + + avg_demand_plot = average_given_plots(demand_plots_map, bin_size=bin_size) + avg_demand_plot_min_semester = min(demand_plots_map.keys()) if demand_plots_map else None + avg_percent_open_plot_num_semesters = len(demand_plots_map) + + return ( + avg_demand_plot, + avg_demand_plot_min_semester, + avg_percent_open_plot_num_semesters, + recent_demand_plot, + recent_demand_plot_semester, + ) + + +def avg_and_recent_percent_open_plots(section_map, status_updates_map): + """ + Aggregate plots of the percentage of sections that were open at each point in time (during + historical add/drop periods) for the given sections (specified by section_map). + Percentage-open plots are lists of tuples of the form (percent_through, percentage_open). + The average plot will average across all sections, and the recent plot will average across + sections from only the most recent semester. + Note that section_map should map semester to section id to section object. + The status_updates_map should map semester to section id to a list of status updates + for that section (this can be retrieved with the call get_status_updates_map(section_map)). + The generated plots will have points at increments of step_size in the range [0,1]. + Returns (avg_percent_open_plot, avg_demand_plot_min_semester, + recent_percent_open_plot, recent_percent_open_plot_semester) + """ + + open_plots = dict() + # open_plots: maps semester to section id to the plot of when that section was open during + # the add/drop period (1 if open, 0 if not) + + # Now that all database work has been completed, let's iterate through + # our semesters and compute open plots for each section + for semester in section_map.keys(): + if semester < STATUS_UPDATES_RECORDED_SINCE: + continue + open_plots[semester] = dict() + for section in section_map[semester].values(): + section_id = section.id + updates = sorted( + status_updates_map[semester][section_id], + key=lambda x: x.percent_through_add_drop_period, + ) + if len(updates) == 0: + estimate_open = int(section.percent_open > 0.5) + open_plots[semester][section_id] = [(0, estimate_open), (1, estimate_open)] + continue + open_plot = [(0, int(updates[0].old_status == "O"))] + # open_plot: the demand plot for this section, containing elements of the form + # (percent_through, relative_demand). + + latest_status = int(updates[0].old_status == "O") + for update in updates: + if int(update.old_status == "O") != latest_status: + # Ignore invalid status updates + continue + latest_status = int(update.new_status == "O") + open_plot.append((update.percent_through_add_drop_period, latest_status)) + if open_plot[-1][0] < 1: + open_plot.append((1, latest_status)) + + open_plots[semester][section_id] = open_plot + + recent_percent_open_plot_semester = max(open_plots.keys()) if len(open_plots) > 0 else None + recent_percent_open_plot = ( + average_given_plots(open_plots[max(section_map.keys())]) if len(section_map) > 0 else None + ) + + avg_percent_open_plot = average_given_plots(open_plots) + avg_percent_open_plot_min_semester = min(open_plots.keys()) if len(open_plots) > 0 else None + avg_percent_open_plot_num_semesters = len(open_plots) + + return ( + avg_percent_open_plot, + avg_percent_open_plot_min_semester, + avg_percent_open_plot_num_semesters, + recent_percent_open_plot, + recent_percent_open_plot_semester, + ) diff --git a/backend/scripts/asgi-run b/backend/scripts/asgi-run index 9882f6fb9..e221bc7d5 100755 --- a/backend/scripts/asgi-run +++ b/backend/scripts/asgi-run @@ -1,10 +1,10 @@ -#!/bin/bash - -# Django Migrate -/usr/local/bin/python3 /app/manage.py migrate --noinput - -# Switch to project folder -cd /app/ - -# Run using Gunicorn + Uvicorn -exec /usr/local/bin/gunicorn -b 0.0.0.0:80 -w 4 -k uvicorn.workers.UvicornWorker PennCourses.asgi:application +#!/bin/bash + +# Django Migrate +/usr/local/bin/python3 /app/manage.py migrate --noinput + +# Switch to project folder +cd /app/ + +# Run using Gunicorn + Uvicorn +exec /usr/local/bin/gunicorn -b 0.0.0.0:80 -w 4 -k uvicorn.workers.UvicornWorker PennCourses.asgi:application diff --git a/backend/setup.cfg b/backend/setup.cfg index dfa460263..bba3157d7 100644 --- a/backend/setup.cfg +++ b/backend/setup.cfg @@ -1,31 +1,31 @@ -[flake8] -max-line-length = 100 -exclude = .venv, migrations, settings, manage.py, frontend -inline-quotes = double -ignore = E231 W503 - -[isort] -default_section = THIRDPARTY -known_first_party = PennCourses, alert, courses, plan, review -line_length = 100 -lines_after_imports = 2 -multi_line_output = 3 -include_trailing_comma = True -use_parentheses = True - -[coverage:run] -omit = */tests/*, */tests.py, */migrations/*, */settings/*, */wsgi.py, */.venv/*, manage.py, */apps.py, frontend/*, */admin.py, PennCourses/docs_settings.py -source = . - -[uwsgi] -http-socket = :80 -chdir = /app/ -module = PennCourses.wsgi:application -master = true -static-map = /assets=/app/static -processes = 5 - -[tool:pytest] -DJANGO_SETTINGS_MODULE = PennCourses.settings.development -python_files = *test*.py -norecursedirs = .git __pycache__ frontend pcr-backup* .idea +[flake8] +max-line-length = 100 +exclude = .venv, migrations, settings, manage.py, frontend +inline-quotes = double +ignore = E231 W503 + +[isort] +default_section = THIRDPARTY +known_first_party = PennCourses, alert, courses, plan, review +line_length = 100 +lines_after_imports = 2 +multi_line_output = 3 +include_trailing_comma = True +use_parentheses = True + +[coverage:run] +omit = */tests/*, */tests.py, */migrations/*, */settings/*, */wsgi.py, */.venv/*, manage.py, */apps.py, frontend/*, */admin.py, PennCourses/docs_settings.py +source = . + +[uwsgi] +http-socket = :80 +chdir = /app/ +module = PennCourses.wsgi:application +master = true +static-map = /assets=/app/static +processes = 5 + +[tool:pytest] +DJANGO_SETTINGS_MODULE = PennCourses.settings.development +python_files = *test*.py +norecursedirs = .git __pycache__ frontend pcr-backup* .idea diff --git a/backend/tests/__init__.py b/backend/tests/__init__.py index d02265233..66b86dfb7 100644 --- a/backend/tests/__init__.py +++ b/backend/tests/__init__.py @@ -1,41 +1,41 @@ -from courses.views import CourseList, CourseListSearch - - -def set_mock_get_serializer_context(): - """ - Sets `CourseListSearch.get_serializer_context` to `CourseList.get_serializer_context` - and returns the original value of `CourseListSearch.get_serializer_context` - This function is called in `tests.__init__.py` to ensure it takes effect for - all tests by default. The goal is to reduce testing runtime, though this also means - that the `recommendation_score` field of the `CourseListSearch` endpoint always returns - null (`None`) by default in all the tests. However if you would - like revert to the original (production) version of `CourseListSearch.get_serializer_context` - it is returned from this function and stored as a package variable as - production_CourseListSearch_get_serializer_context. To revert to the original version - for a single test or test case, decorate your test case or test like this: - ``` - @unittest.mock.patch('courses.views.CourseListSearch.get_serializer_context', - new=production_CourseListSearch_get_serializer_context) - ``` - (Note that you should put - `from tests import production_CourseListSearch_get_serializer_context` - at the top of the file to make production_CourseListSearch_get_serializer_context - is defined. Note also that reverting may lead to a `botocore` - error in the CI tests. To resolve this `botocore` error it is suggested that you patch - `courses.views.retrieve_course_clusters`. See - `tests.courses.test_api.CourseSearchRecommendationScoreTestCase. - test_recommendation_is_number_when_user_is_logged_in` for an example.). - """ - print( - "SETTING `CourseListSearch.get_serializer_context` " - "TO `CourseList.get_serializer_context`...\n" - ) - production_CourseListSearch_get_serializer_context = CourseListSearch.get_serializer_context - CourseListSearch.get_serializer_context = CourseList.get_serializer_context - return production_CourseListSearch_get_serializer_context - - -# `production_CourseListSearch_get_serializer_context` -# is accessible to all files in the `tests` package (ie, descendants of `backend/tests`) -# and holds the original (production) version of the `CourseListSearch.get_serializer_context` -production_CourseListSearch_get_serializer_context = set_mock_get_serializer_context() +from courses.views import CourseList, CourseListSearch + + +def set_mock_get_serializer_context(): + """ + Sets `CourseListSearch.get_serializer_context` to `CourseList.get_serializer_context` + and returns the original value of `CourseListSearch.get_serializer_context` + This function is called in `tests.__init__.py` to ensure it takes effect for + all tests by default. The goal is to reduce testing runtime, though this also means + that the `recommendation_score` field of the `CourseListSearch` endpoint always returns + null (`None`) by default in all the tests. However if you would + like revert to the original (production) version of `CourseListSearch.get_serializer_context` + it is returned from this function and stored as a package variable as + production_CourseListSearch_get_serializer_context. To revert to the original version + for a single test or test case, decorate your test case or test like this: + ``` + @unittest.mock.patch('courses.views.CourseListSearch.get_serializer_context', + new=production_CourseListSearch_get_serializer_context) + ``` + (Note that you should put + `from tests import production_CourseListSearch_get_serializer_context` + at the top of the file to make production_CourseListSearch_get_serializer_context + is defined. Note also that reverting may lead to a `botocore` + error in the CI tests. To resolve this `botocore` error it is suggested that you patch + `courses.views.retrieve_course_clusters`. See + `tests.courses.test_api.CourseSearchRecommendationScoreTestCase. + test_recommendation_is_number_when_user_is_logged_in` for an example.). + """ + print( + "SETTING `CourseListSearch.get_serializer_context` " + "TO `CourseList.get_serializer_context`...\n" + ) + production_CourseListSearch_get_serializer_context = CourseListSearch.get_serializer_context + CourseListSearch.get_serializer_context = CourseList.get_serializer_context + return production_CourseListSearch_get_serializer_context + + +# `production_CourseListSearch_get_serializer_context` +# is accessible to all files in the `tests` package (ie, descendants of `backend/tests`) +# and holds the original (production) version of the `CourseListSearch.get_serializer_context` +production_CourseListSearch_get_serializer_context = set_mock_get_serializer_context() diff --git a/backend/tests/courses/test-opendata.json b/backend/tests/courses/test-opendata.json index 961ec02a6..0507b72ac 100644 --- a/backend/tests/courses/test-opendata.json +++ b/backend/tests/courses/test-opendata.json @@ -1,338 +1,338 @@ -{ - "result_data":[ - { - "activity":"LEC", - "activity_description":"Lecture", - "additional_section_narrative":null, - "course_level":null, - "subject":"CIS", - "course_level_desc":null, - "corequisite_activity":null, - "corequisite_activity_description":null, - "course_department":"CIS", - "course_description":"A fast-paced introduction to the fundamental concepts of programming and software design. This course assumes some previous programming experience, at the level of a high school computer science class or CIS110. (If you got at least 4 in the AP Computer Science A or AB exam, you will do great.) No specific programming language background is assumed: basic experience with any language (for instance Java, C, C++, VB, Python, Perl, or Scheme) is fine. If you have never programmed before, you should take CIS 110 first. ", - "course_number":"1200", - "crn":"61696", - "xlist_group":null, - "course_terms_offered":"1 term: Either", - "course_title":"Prog Lang & Tech I", - "credit_connector":"OR", - "credit_type":"CU", - "credits":"1", - "crosslist_primary":null, - "end_date":"2022-12-12 00:00:00.0", - "first_meeting_days":"MWF10:15 AMSTITB6", - "is_cancelled":false, - "is_closed":false, - "is_crosslist_primary":null, - "is_not_scheduled":false, - "max_enrollment":"240", - "max_enrollment_crosslist":null, - "maximum_credit":"1", - "minimum_credit":"0", - "primary_instructor":"Stephan A Zdancewic", - "section_id":"CIS1200001", - "section_number":"001", - "section_title":"Prog Lang & Tech I", - "start_date":"2022-08-30 00:00:00.0", - "syllabus_url":null, - "term":"202230", - "term_session":"14W", - "crosslistings":[ - - ], - "attributes":[ - { - "attribute_code":"ACGC", - "attribute_desc":"COGS M Computation Cogni" - }, - { - "attribute_code":"ACGL", - "attribute_desc":"COGS M Language Min" - }, - { - "attribute_code":"ACGN", - "attribute_desc":"COGS M Cog Neuroscienc" - }, - { - "attribute_code":"ALCN", - "attribute_desc":"LGIC N Minor Electiv" - }, - { - "attribute_code":"ALNR", - "attribute_desc":"ASLD Related Course" - }, - { - "attribute_code":"AU16", - "attribute_desc":"College 16 CU Requiremen" - }, - { - "attribute_code":"AUFR", - "attribute_desc":"COL-FND-Formal Reason&Analysis" - }, - { - "attribute_code":"EUMS", - "attribute_desc":"EAS-COURSE-Math/Science/Engrng" - }, - { - "attribute_code":"NURS", - "attribute_desc":"NUR-SECTOR-ReaSys&Relationship" - }, - { - "attribute_code":"QP", - "attribute_desc":"Grade Mode: Pass/Fail" - }, - { - "attribute_code":"QS", - "attribute_desc":"Grade Mode: Standard Lettr Grd" - }, - { - "attribute_code":"QU", - "attribute_desc":"Grade Mode: Stsfctry/Unstsftry" - } - ], - "instructors":[ - { - "primary_ind":"Y", - "penn_id":"10218938", - "last_name":"Zdancewic", - "first_name":"Stephan", - "middle_initial":"A" - }, - { - "primary_ind":null, - "penn_id":"69952305", - "last_name":"Sheth", - "first_name":"Swapneel", - "middle_initial":null - } - ], - "linked_courses":[ - { - "subject_code ":"CIS", - "course_number ":"1200", - "section_number ":"218", - "section_id ":"CIS1200218", - "schedule_code ":"REC", - "schedule_description":"Recitation" - }, - { - "subject_code ":"CIS", - "course_number ":"1200", - "section_number ":"217", - "section_id ":"CIS1200217", - "schedule_code ":"REC", - "schedule_description":"Recitation" - }, - { - "subject_code ":"CIS", - "course_number ":"1200", - "section_number ":"216", - "section_id ":"CIS1200216", - "schedule_code ":"REC", - "schedule_description":"Recitation" - }, - { - "subject_code ":"CIS", - "course_number ":"1200", - "section_number ":"215", - "section_id ":"CIS1200215", - "schedule_code ":"REC", - "schedule_description":"Recitation" - }, - { - "subject_code ":"CIS", - "course_number ":"1200", - "section_number ":"214", - "section_id ":"CIS1200214", - "schedule_code ":"REC", - "schedule_description":"Recitation" - }, - { - "subject_code ":"CIS", - "course_number ":"1200", - "section_number ":"213", - "section_id ":"CIS1200213", - "schedule_code ":"REC", - "schedule_description":"Recitation" - }, - { - "subject_code ":"CIS", - "course_number ":"1200", - "section_number ":"212", - "section_id ":"CIS1200212", - "schedule_code ":"REC", - "schedule_description":"Recitation" - }, - { - "subject_code ":"CIS", - "course_number ":"1200", - "section_number ":"211", - "section_id ":"CIS1200211", - "schedule_code ":"REC", - "schedule_description":"Recitation" - }, - { - "subject_code ":"CIS", - "course_number ":"1200", - "section_number ":"210", - "section_id ":"CIS1200210", - "schedule_code ":"REC", - "schedule_description":"Recitation" - }, - { - "subject_code ":"CIS", - "course_number ":"1200", - "section_number ":"209", - "section_id ":"CIS1200209", - "schedule_code ":"REC", - "schedule_description":"Recitation" - }, - { - "subject_code ":"CIS", - "course_number ":"1200", - "section_number ":"208", - "section_id ":"CIS1200208", - "schedule_code ":"REC", - "schedule_description":"Recitation" - }, - { - "subject_code ":"CIS", - "course_number ":"1200", - "section_number ":"207", - "section_id ":"CIS1200207", - "schedule_code ":"REC", - "schedule_description":"Recitation" - }, - { - "subject_code ":"CIS", - "course_number ":"1200", - "section_number ":"206", - "section_id ":"CIS1200206", - "schedule_code ":"REC", - "schedule_description":"Recitation" - }, - { - "subject_code ":"CIS", - "course_number ":"1200", - "section_number ":"205", - "section_id ":"CIS1200205", - "schedule_code ":"REC", - "schedule_description":"Recitation" - }, - { - "subject_code ":"CIS", - "course_number ":"1200", - "section_number ":"204", - "section_id ":"CIS1200204", - "schedule_code ":"REC", - "schedule_description":"Recitation" - }, - { - "subject_code ":"CIS", - "course_number ":"1200", - "section_number ":"203", - "section_id ":"CIS1200203", - "schedule_code ":"REC", - "schedule_description":"Recitation" - }, - { - "subject_code ":"CIS", - "course_number ":"1200", - "section_number ":"202", - "section_id ":"CIS1200202", - "schedule_code ":"REC", - "schedule_description":"Recitation" - }, - { - "subject_code ":"CIS", - "course_number ":"1200", - "section_number ":"201", - "section_id ":"CIS1200201", - "schedule_code ":"REC", - "schedule_description":"Recitation" - }, - { - "subject_code ":"CIS", - "course_number ":"1200", - "section_number ":"219", - "section_id ":"CIS1200219", - "schedule_code ":"REC", - "schedule_description":"Recitation" - }, - { - "subject_code ":"CIS", - "course_number ":"1200", - "section_number ":"220", - "section_id ":"CIS1200220", - "schedule_code ":"REC", - "schedule_description":"Recitation" - }, - { - "subject_code ":"CIS", - "course_number ":"1200", - "section_number ":"221", - "section_id ":"CIS1200221", - "schedule_code ":"REC", - "schedule_description":"Recitation" - }, - { - "subject_code ":"CIS", - "course_number ":"1200", - "section_number ":"222", - "section_id ":"CIS1200222", - "schedule_code ":"REC", - "schedule_description":"Recitation" - } - ], - "meetings":[ - { - "building_code":"STIT", - "building_desc":"Stiteler Hall", - "room_code":"B6", - "begin_time_24":"1015", - "end_time_24":"1114", - "begin_time":"10:15 AM", - "end_time":"11:14 AM", - "sunday":null, - "monday":"M", - "tuesday":null, - "wednesday":"W", - "thursday":null, - "friday":"F", - "saturday":null, - "days":"MWF", - "start_date":"2022-08-30 00:00:00.0", - "end_date":"2022-12-12 00:00:00.0" - } - ], - "grade_modes":[ - { - "code":"QP", - "description":"Grade Mode: Pass/Fail" - }, - { - "code":"QS", - "description":"Grade Mode: Standard Lettr Grd" - }, - { - "code":"QU", - "description":"Grade Mode: Stsfctry/Unstsftry" - } - ], - "course_restrictions":[ - - ] - } - ], - "service_meta":{ - "number_of_pages":1, - "results_per_page":1000, - "current_page_number":1, - "next_page_number":null, - "previous_page_number":null, - "error_text":null, - "rest_code":null, - "error":false - } -} +{ + "result_data":[ + { + "activity":"LEC", + "activity_description":"Lecture", + "additional_section_narrative":null, + "course_level":null, + "subject":"CIS", + "course_level_desc":null, + "corequisite_activity":null, + "corequisite_activity_description":null, + "course_department":"CIS", + "course_description":"A fast-paced introduction to the fundamental concepts of programming and software design. This course assumes some previous programming experience, at the level of a high school computer science class or CIS110. (If you got at least 4 in the AP Computer Science A or AB exam, you will do great.) No specific programming language background is assumed: basic experience with any language (for instance Java, C, C++, VB, Python, Perl, or Scheme) is fine. If you have never programmed before, you should take CIS 110 first. ", + "course_number":"1200", + "crn":"61696", + "xlist_group":null, + "course_terms_offered":"1 term: Either", + "course_title":"Prog Lang & Tech I", + "credit_connector":"OR", + "credit_type":"CU", + "credits":"1", + "crosslist_primary":null, + "end_date":"2022-12-12 00:00:00.0", + "first_meeting_days":"MWF10:15 AMSTITB6", + "is_cancelled":false, + "is_closed":false, + "is_crosslist_primary":null, + "is_not_scheduled":false, + "max_enrollment":"240", + "max_enrollment_crosslist":null, + "maximum_credit":"1", + "minimum_credit":"0", + "primary_instructor":"Stephan A Zdancewic", + "section_id":"CIS1200001", + "section_number":"001", + "section_title":"Prog Lang & Tech I", + "start_date":"2022-08-30 00:00:00.0", + "syllabus_url":null, + "term":"202230", + "term_session":"14W", + "crosslistings":[ + + ], + "attributes":[ + { + "attribute_code":"ACGC", + "attribute_desc":"COGS M Computation Cogni" + }, + { + "attribute_code":"ACGL", + "attribute_desc":"COGS M Language Min" + }, + { + "attribute_code":"ACGN", + "attribute_desc":"COGS M Cog Neuroscienc" + }, + { + "attribute_code":"ALCN", + "attribute_desc":"LGIC N Minor Electiv" + }, + { + "attribute_code":"ALNR", + "attribute_desc":"ASLD Related Course" + }, + { + "attribute_code":"AU16", + "attribute_desc":"College 16 CU Requiremen" + }, + { + "attribute_code":"AUFR", + "attribute_desc":"COL-FND-Formal Reason&Analysis" + }, + { + "attribute_code":"EUMS", + "attribute_desc":"EAS-COURSE-Math/Science/Engrng" + }, + { + "attribute_code":"NURS", + "attribute_desc":"NUR-SECTOR-ReaSys&Relationship" + }, + { + "attribute_code":"QP", + "attribute_desc":"Grade Mode: Pass/Fail" + }, + { + "attribute_code":"QS", + "attribute_desc":"Grade Mode: Standard Lettr Grd" + }, + { + "attribute_code":"QU", + "attribute_desc":"Grade Mode: Stsfctry/Unstsftry" + } + ], + "instructors":[ + { + "primary_ind":"Y", + "penn_id":"10218938", + "last_name":"Zdancewic", + "first_name":"Stephan", + "middle_initial":"A" + }, + { + "primary_ind":null, + "penn_id":"69952305", + "last_name":"Sheth", + "first_name":"Swapneel", + "middle_initial":null + } + ], + "linked_courses":[ + { + "subject_code ":"CIS", + "course_number ":"1200", + "section_number ":"218", + "section_id ":"CIS1200218", + "schedule_code ":"REC", + "schedule_description":"Recitation" + }, + { + "subject_code ":"CIS", + "course_number ":"1200", + "section_number ":"217", + "section_id ":"CIS1200217", + "schedule_code ":"REC", + "schedule_description":"Recitation" + }, + { + "subject_code ":"CIS", + "course_number ":"1200", + "section_number ":"216", + "section_id ":"CIS1200216", + "schedule_code ":"REC", + "schedule_description":"Recitation" + }, + { + "subject_code ":"CIS", + "course_number ":"1200", + "section_number ":"215", + "section_id ":"CIS1200215", + "schedule_code ":"REC", + "schedule_description":"Recitation" + }, + { + "subject_code ":"CIS", + "course_number ":"1200", + "section_number ":"214", + "section_id ":"CIS1200214", + "schedule_code ":"REC", + "schedule_description":"Recitation" + }, + { + "subject_code ":"CIS", + "course_number ":"1200", + "section_number ":"213", + "section_id ":"CIS1200213", + "schedule_code ":"REC", + "schedule_description":"Recitation" + }, + { + "subject_code ":"CIS", + "course_number ":"1200", + "section_number ":"212", + "section_id ":"CIS1200212", + "schedule_code ":"REC", + "schedule_description":"Recitation" + }, + { + "subject_code ":"CIS", + "course_number ":"1200", + "section_number ":"211", + "section_id ":"CIS1200211", + "schedule_code ":"REC", + "schedule_description":"Recitation" + }, + { + "subject_code ":"CIS", + "course_number ":"1200", + "section_number ":"210", + "section_id ":"CIS1200210", + "schedule_code ":"REC", + "schedule_description":"Recitation" + }, + { + "subject_code ":"CIS", + "course_number ":"1200", + "section_number ":"209", + "section_id ":"CIS1200209", + "schedule_code ":"REC", + "schedule_description":"Recitation" + }, + { + "subject_code ":"CIS", + "course_number ":"1200", + "section_number ":"208", + "section_id ":"CIS1200208", + "schedule_code ":"REC", + "schedule_description":"Recitation" + }, + { + "subject_code ":"CIS", + "course_number ":"1200", + "section_number ":"207", + "section_id ":"CIS1200207", + "schedule_code ":"REC", + "schedule_description":"Recitation" + }, + { + "subject_code ":"CIS", + "course_number ":"1200", + "section_number ":"206", + "section_id ":"CIS1200206", + "schedule_code ":"REC", + "schedule_description":"Recitation" + }, + { + "subject_code ":"CIS", + "course_number ":"1200", + "section_number ":"205", + "section_id ":"CIS1200205", + "schedule_code ":"REC", + "schedule_description":"Recitation" + }, + { + "subject_code ":"CIS", + "course_number ":"1200", + "section_number ":"204", + "section_id ":"CIS1200204", + "schedule_code ":"REC", + "schedule_description":"Recitation" + }, + { + "subject_code ":"CIS", + "course_number ":"1200", + "section_number ":"203", + "section_id ":"CIS1200203", + "schedule_code ":"REC", + "schedule_description":"Recitation" + }, + { + "subject_code ":"CIS", + "course_number ":"1200", + "section_number ":"202", + "section_id ":"CIS1200202", + "schedule_code ":"REC", + "schedule_description":"Recitation" + }, + { + "subject_code ":"CIS", + "course_number ":"1200", + "section_number ":"201", + "section_id ":"CIS1200201", + "schedule_code ":"REC", + "schedule_description":"Recitation" + }, + { + "subject_code ":"CIS", + "course_number ":"1200", + "section_number ":"219", + "section_id ":"CIS1200219", + "schedule_code ":"REC", + "schedule_description":"Recitation" + }, + { + "subject_code ":"CIS", + "course_number ":"1200", + "section_number ":"220", + "section_id ":"CIS1200220", + "schedule_code ":"REC", + "schedule_description":"Recitation" + }, + { + "subject_code ":"CIS", + "course_number ":"1200", + "section_number ":"221", + "section_id ":"CIS1200221", + "schedule_code ":"REC", + "schedule_description":"Recitation" + }, + { + "subject_code ":"CIS", + "course_number ":"1200", + "section_number ":"222", + "section_id ":"CIS1200222", + "schedule_code ":"REC", + "schedule_description":"Recitation" + } + ], + "meetings":[ + { + "building_code":"STIT", + "building_desc":"Stiteler Hall", + "room_code":"B6", + "begin_time_24":"1015", + "end_time_24":"1114", + "begin_time":"10:15 AM", + "end_time":"11:14 AM", + "sunday":null, + "monday":"M", + "tuesday":null, + "wednesday":"W", + "thursday":null, + "friday":"F", + "saturday":null, + "days":"MWF", + "start_date":"2022-08-30 00:00:00.0", + "end_date":"2022-12-12 00:00:00.0" + } + ], + "grade_modes":[ + { + "code":"QP", + "description":"Grade Mode: Pass/Fail" + }, + { + "code":"QS", + "description":"Grade Mode: Standard Lettr Grd" + }, + { + "code":"QU", + "description":"Grade Mode: Stsfctry/Unstsftry" + } + ], + "course_restrictions":[ + + ] + } + ], + "service_meta":{ + "number_of_pages":1, + "results_per_page":1000, + "current_page_number":1, + "next_page_number":null, + "previous_page_number":null, + "error_text":null, + "rest_code":null, + "error":false + } +} diff --git a/backend/tests/courses/test_opendata_import.py b/backend/tests/courses/test_opendata_import.py index 0235cb3c1..7f7e191a1 100644 --- a/backend/tests/courses/test_opendata_import.py +++ b/backend/tests/courses/test_opendata_import.py @@ -1,118 +1,118 @@ -import json -import os - -from django.db.models.signals import post_save -from django.test import TestCase -from options.models import Option - -from alert.models import AddDropPeriod -from courses.models import Attribute, Course, Instructor, Meeting, NGSSRestriction, Section -from courses.util import ( - add_attributes, - add_restrictions, - get_or_create_course_and_section, - invalidate_current_semester_cache, - upsert_course_from_opendata, -) - - -TEST_SEMESTER = "2019A" - - -def set_semester(): - post_save.disconnect( - receiver=invalidate_current_semester_cache, - sender=Option, - dispatch_uid="invalidate_current_semester_cache", - ) - Option(key="SEMESTER", value=TEST_SEMESTER, value_type="TXT").save() - AddDropPeriod(semester=TEST_SEMESTER).save() - - -class AddAttributesTestCase(TestCase): - def setUp(self): - self.ANTH_0020, self.ANTH_0020_001, _, _ = get_or_create_course_and_section( - "ANTH-0020-001", TEST_SEMESTER - ) - self.MUSC_0050, self.MUSC_0050_001, _, _ = get_or_create_course_and_section( - "MUSC-0050-001", TEST_SEMESTER - ) - self.AMTH = {"attribute_code": "AMTH", "attribute_desc": "MUSC M Tier Thre"} - self.NUFC = {"attribute_code": "NUFC", "attribute_desc": "NUR-ADMIN-FCH Department"} - - def test_add_attribute(self): - add_attributes(self.MUSC_0050, [self.AMTH]) - self.assertTrue(self.MUSC_0050.attributes.all().filter(code="AMTH").exists()) - AMTH_obj = Attribute.objects.get(code="AMTH") - self.assertEqual("SAS", AMTH_obj.school) - self.assertEqual(self.AMTH["attribute_desc"], AMTH_obj.description) - self.assertEqual("SAS", AMTH_obj.school) - - def test_add_multiple_attribute(self): - add_attributes(self.MUSC_0050, [self.AMTH, self.NUFC]) - AMTH_obj = Attribute.objects.get(code="AMTH") - NUFC_obj = Attribute.objects.get(code="NUFC") - self.assertEqual("NUR", NUFC_obj.school) - self.assertIn(AMTH_obj, self.MUSC_0050.attributes.all()) - self.assertIn(NUFC_obj, self.MUSC_0050.attributes.all()) - - def test_add_attribute_multiple_times(self): - add_attributes(self.MUSC_0050, [self.AMTH]) - add_attributes(self.ANTH_0020, [self.AMTH]) - AMTH_obj = Attribute.objects.get(code="AMTH") - self.assertIn(self.MUSC_0050, AMTH_obj.courses.all()) - self.assertIn(self.ANTH_0020, AMTH_obj.courses.all()) - - def test_add_attribute_with_no_school(self): - add_attributes( - self.MUSC_0050, [{"attribute_code": "ZPRS", "attribute_desc": "VIPER seminar"}] - ) - VPRS_obj = Attribute.objects.get(code="ZPRS") - self.assertIsNone(VPRS_obj.school) - self.assertIn(self.MUSC_0050, VPRS_obj.courses.all()) - - -class AddNGSSRestrictionTestCase(TestCase): - def setUp(self): - self.ANTH_0020, self.ANTH_0020_001, _, _ = get_or_create_course_and_section( - "ANTH-0020-001", TEST_SEMESTER - ) - self.MUSC_0050, self.MUSC_0050_001, _, _ = get_or_create_course_and_section( - "MUSC-0050-001", TEST_SEMESTER - ) - self.PHL = { - "restriction_code": "PHL", - "restriction_type": "Campus", - "restriction_desc": "Philly Campus", - } - self.DOLLAR = { - "restriction_code": "DOLLAR", - "restriction_type": "Cohort", - "restriction_desc": "Exclusive for cohort dollar", - } - - def test_add_multiple_restrictions_multiple_times(self): - add_restrictions(self.MUSC_0050, [self.PHL, self.DOLLAR]) - add_restrictions(self.ANTH_0020, [self.PHL]) - PHL_obj = NGSSRestriction.objects.get(code="PHL") - DOLLAR_obj = NGSSRestriction.objects.get(code="DOLLAR") - self.assertIn(self.MUSC_0050, PHL_obj.courses.all()) - self.assertIn(self.ANTH_0020, PHL_obj.courses.all()) - self.assertIn(self.MUSC_0050, DOLLAR_obj.courses.all()) - - -class ParseOpendataResponseTestCase(TestCase): - def test_parse_response(self): - BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) - if not os.path.basename(BASE_DIR).startswith("backend"): - test_file_path = os.path.join(BASE_DIR, "backend/tests/courses/test-opendata.json") - else: - test_file_path = os.path.join(BASE_DIR, "tests/courses/test-opendata.json") - upsert_course_from_opendata( - json.load(open(test_file_path, "r"))["result_data"][0], - TEST_SEMESTER, - ) - self.assertEqual(1, Course.objects.count()) - self.assertEqual(23, Section.objects.count()) - self.assertEqual(3, Meeting.objects.count()) - self.assertEqual(2, Instructor.objects.count()) +import json +import os + +from django.db.models.signals import post_save +from django.test import TestCase +from options.models import Option + +from alert.models import AddDropPeriod +from courses.models import Attribute, Course, Instructor, Meeting, NGSSRestriction, Section +from courses.util import ( + add_attributes, + add_restrictions, + get_or_create_course_and_section, + invalidate_current_semester_cache, + upsert_course_from_opendata, +) + + +TEST_SEMESTER = "2019A" + + +def set_semester(): + post_save.disconnect( + receiver=invalidate_current_semester_cache, + sender=Option, + dispatch_uid="invalidate_current_semester_cache", + ) + Option(key="SEMESTER", value=TEST_SEMESTER, value_type="TXT").save() + AddDropPeriod(semester=TEST_SEMESTER).save() + + +class AddAttributesTestCase(TestCase): + def setUp(self): + self.ANTH_0020, self.ANTH_0020_001, _, _ = get_or_create_course_and_section( + "ANTH-0020-001", TEST_SEMESTER + ) + self.MUSC_0050, self.MUSC_0050_001, _, _ = get_or_create_course_and_section( + "MUSC-0050-001", TEST_SEMESTER + ) + self.AMTH = {"attribute_code": "AMTH", "attribute_desc": "MUSC M Tier Thre"} + self.NUFC = {"attribute_code": "NUFC", "attribute_desc": "NUR-ADMIN-FCH Department"} + + def test_add_attribute(self): + add_attributes(self.MUSC_0050, [self.AMTH]) + self.assertTrue(self.MUSC_0050.attributes.all().filter(code="AMTH").exists()) + AMTH_obj = Attribute.objects.get(code="AMTH") + self.assertEqual("SAS", AMTH_obj.school) + self.assertEqual(self.AMTH["attribute_desc"], AMTH_obj.description) + self.assertEqual("SAS", AMTH_obj.school) + + def test_add_multiple_attribute(self): + add_attributes(self.MUSC_0050, [self.AMTH, self.NUFC]) + AMTH_obj = Attribute.objects.get(code="AMTH") + NUFC_obj = Attribute.objects.get(code="NUFC") + self.assertEqual("NUR", NUFC_obj.school) + self.assertIn(AMTH_obj, self.MUSC_0050.attributes.all()) + self.assertIn(NUFC_obj, self.MUSC_0050.attributes.all()) + + def test_add_attribute_multiple_times(self): + add_attributes(self.MUSC_0050, [self.AMTH]) + add_attributes(self.ANTH_0020, [self.AMTH]) + AMTH_obj = Attribute.objects.get(code="AMTH") + self.assertIn(self.MUSC_0050, AMTH_obj.courses.all()) + self.assertIn(self.ANTH_0020, AMTH_obj.courses.all()) + + def test_add_attribute_with_no_school(self): + add_attributes( + self.MUSC_0050, [{"attribute_code": "ZPRS", "attribute_desc": "VIPER seminar"}] + ) + VPRS_obj = Attribute.objects.get(code="ZPRS") + self.assertIsNone(VPRS_obj.school) + self.assertIn(self.MUSC_0050, VPRS_obj.courses.all()) + + +class AddNGSSRestrictionTestCase(TestCase): + def setUp(self): + self.ANTH_0020, self.ANTH_0020_001, _, _ = get_or_create_course_and_section( + "ANTH-0020-001", TEST_SEMESTER + ) + self.MUSC_0050, self.MUSC_0050_001, _, _ = get_or_create_course_and_section( + "MUSC-0050-001", TEST_SEMESTER + ) + self.PHL = { + "restriction_code": "PHL", + "restriction_type": "Campus", + "restriction_desc": "Philly Campus", + } + self.DOLLAR = { + "restriction_code": "DOLLAR", + "restriction_type": "Cohort", + "restriction_desc": "Exclusive for cohort dollar", + } + + def test_add_multiple_restrictions_multiple_times(self): + add_restrictions(self.MUSC_0050, [self.PHL, self.DOLLAR]) + add_restrictions(self.ANTH_0020, [self.PHL]) + PHL_obj = NGSSRestriction.objects.get(code="PHL") + DOLLAR_obj = NGSSRestriction.objects.get(code="DOLLAR") + self.assertIn(self.MUSC_0050, PHL_obj.courses.all()) + self.assertIn(self.ANTH_0020, PHL_obj.courses.all()) + self.assertIn(self.MUSC_0050, DOLLAR_obj.courses.all()) + + +class ParseOpendataResponseTestCase(TestCase): + def test_parse_response(self): + BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) + if not os.path.basename(BASE_DIR).startswith("backend"): + test_file_path = os.path.join(BASE_DIR, "backend/tests/courses/test-opendata.json") + else: + test_file_path = os.path.join(BASE_DIR, "tests/courses/test-opendata.json") + upsert_course_from_opendata( + json.load(open(test_file_path, "r"))["result_data"][0], + TEST_SEMESTER, + ) + self.assertEqual(1, Course.objects.count()) + self.assertEqual(23, Section.objects.count()) + self.assertEqual(3, Meeting.objects.count()) + self.assertEqual(2, Instructor.objects.count()) diff --git a/backend/tests/courses/test_recompute_stats.py b/backend/tests/courses/test_recompute_stats.py index 1d1b97a25..a51d5f994 100644 --- a/backend/tests/courses/test_recompute_stats.py +++ b/backend/tests/courses/test_recompute_stats.py @@ -1,149 +1,149 @@ -from django.db.models.signals import post_save -from django.test.testcases import TestCase -from options.models import Option - -from alert.management.commands.recomputestats import ( - deduplicate_status_updates, - recompute_precomputed_fields, -) -from alert.models import AddDropPeriod -from courses.models import Building, Course, Meeting, Room, Section, StatusUpdate -from courses.util import get_or_create_course_and_section, invalidate_current_semester_cache -from tests.courses.util import create_mock_data - - -TEST_SEMESTER = "2019A" - - -def set_semester(): - post_save.disconnect( - receiver=invalidate_current_semester_cache, - sender=Option, - dispatch_uid="invalidate_current_semester_cache", - ) - Option(key="SEMESTER", value=TEST_SEMESTER, value_type="TXT").save() - AddDropPeriod(semester=TEST_SEMESTER).save() - - -class DeduplicateStatusUpdatesTestCase(TestCase): - def setUp(self): - set_semester() - self.sections = [] - self.sections.append(get_or_create_course_and_section("CIS-160-001", TEST_SEMESTER)[1]) - self.sections.append(get_or_create_course_and_section("CIS-160-002", TEST_SEMESTER)[1]) - self.sections.append(get_or_create_course_and_section("CIS-120-001", TEST_SEMESTER)[1]) - self.old_section = get_or_create_course_and_section("CIS-120-001", "2017C")[1] - - def test_no_duplicates(self): - StatusUpdate( - section=self.sections[0], old_status="", new_status="O", alert_sent=False - ).save() - StatusUpdate( - section=self.sections[0], old_status="O", new_status="C", alert_sent=False - ).save() - StatusUpdate( - section=self.sections[0], old_status="C", new_status="O", alert_sent=False - ).save() - - StatusUpdate( - section=self.sections[1], old_status="X", new_status="O", alert_sent=False - ).save() - StatusUpdate( - section=self.sections[1], old_status="O", new_status="C", alert_sent=False - ).save() - - StatusUpdate( - section=self.old_section, old_status="C", new_status="O", alert_sent=False - ).save() - StatusUpdate( - section=self.old_section, old_status="O", new_status="C", alert_sent=False - ).save() - - self.assertEqual( - 5, StatusUpdate.objects.filter(section__course__semester=TEST_SEMESTER).count() - ) - self.assertEqual(2, StatusUpdate.objects.filter(section__course__semester="2017C").count()) - deduplicate_status_updates(semesters="all") - self.assertEqual( - 5, StatusUpdate.objects.filter(section__course__semester=TEST_SEMESTER).count() - ) - self.assertEqual(2, StatusUpdate.objects.filter(section__course__semester="2017C").count()) - - def test_some_duplicates(self): - StatusUpdate( - section=self.sections[0], old_status="", new_status="O", alert_sent=False - ).save() - StatusUpdate( - section=self.sections[0], old_status="", new_status="O", alert_sent=False - ).save() - StatusUpdate( - section=self.sections[0], old_status="", new_status="O", alert_sent=False - ).save() - StatusUpdate( - section=self.sections[0], old_status="O", new_status="C", alert_sent=False - ).save() - StatusUpdate( - section=self.sections[0], old_status="O", new_status="C", alert_sent=False - ).save() - StatusUpdate( - section=self.sections[0], old_status="C", new_status="O", alert_sent=False - ).save() - - StatusUpdate( - section=self.sections[1], old_status="X", new_status="O", alert_sent=False - ).save() - StatusUpdate( - section=self.sections[1], old_status="X", new_status="O", alert_sent=False - ).save() - StatusUpdate( - section=self.sections[1], old_status="O", new_status="C", alert_sent=False - ).save() - - StatusUpdate( - section=self.old_section, old_status="C", new_status="O", alert_sent=False - ).save() - StatusUpdate( - section=self.old_section, old_status="O", new_status="C", alert_sent=False - ).save() - StatusUpdate( - section=self.old_section, old_status="O", new_status="C", alert_sent=False - ).save() - - self.assertEqual( - 9, StatusUpdate.objects.filter(section__course__semester=TEST_SEMESTER).count() - ) - self.assertEqual(3, StatusUpdate.objects.filter(section__course__semester="2017C").count()) - deduplicate_status_updates(semesters="all") - self.assertEqual( - 5, StatusUpdate.objects.filter(section__course__semester=TEST_SEMESTER).count() - ) - self.assertEqual(2, StatusUpdate.objects.filter(section__course__semester="2017C").count()) - - -class RecomputePrecomputedFieldsTestCase(TestCase): - def setUp(self): - set_semester() - self.cis_160, self.cis_160_001 = create_mock_data("CIS-160-001", TEST_SEMESTER) - building, _ = Building.objects.get_or_create(code=1) - room, _ = Room.objects.get_or_create(building=building, number=1) - new_meeting = Meeting(section=self.cis_160_001, day="R", start=11, end=12, room=room) - new_meeting.save() - self.cis_160_201 = create_mock_data("CIS-160-201", TEST_SEMESTER)[1] - self.cis_160_201.activity = "REC" - self.cis_160_201.save() - self.cis_160_002 = create_mock_data("CIS-160-002", TEST_SEMESTER)[1] - self.cis_120, self.cis_120_001 = create_mock_data("CIS-120-001", TEST_SEMESTER) - self.cis_120_old, self.cis_120_001_old = create_mock_data("CIS-120-001", "2017C") - - def test_all_semesters(self): - recompute_precomputed_fields() - self.assertEquals(Course.objects.get(id=self.cis_160.id).num_activities, 2) - self.assertEquals(Section.objects.get(id=self.cis_160_001.id).num_meetings, 4) - self.assertEquals(Section.objects.get(id=self.cis_160_201.id).num_meetings, 3) - self.assertEquals(Section.objects.get(id=self.cis_160_002.id).num_meetings, 3) - - self.assertEquals(Course.objects.get(id=self.cis_120.id).num_activities, 1) - self.assertEquals(Section.objects.get(id=self.cis_120_001.id).num_meetings, 3) - - self.assertEquals(Course.objects.get(id=self.cis_120_old.id).num_activities, 1) - self.assertEquals(Section.objects.get(id=self.cis_120_001_old.id).num_meetings, 3) +from django.db.models.signals import post_save +from django.test.testcases import TestCase +from options.models import Option + +from alert.management.commands.recomputestats import ( + deduplicate_status_updates, + recompute_precomputed_fields, +) +from alert.models import AddDropPeriod +from courses.models import Building, Course, Meeting, Room, Section, StatusUpdate +from courses.util import get_or_create_course_and_section, invalidate_current_semester_cache +from tests.courses.util import create_mock_data + + +TEST_SEMESTER = "2019A" + + +def set_semester(): + post_save.disconnect( + receiver=invalidate_current_semester_cache, + sender=Option, + dispatch_uid="invalidate_current_semester_cache", + ) + Option(key="SEMESTER", value=TEST_SEMESTER, value_type="TXT").save() + AddDropPeriod(semester=TEST_SEMESTER).save() + + +class DeduplicateStatusUpdatesTestCase(TestCase): + def setUp(self): + set_semester() + self.sections = [] + self.sections.append(get_or_create_course_and_section("CIS-160-001", TEST_SEMESTER)[1]) + self.sections.append(get_or_create_course_and_section("CIS-160-002", TEST_SEMESTER)[1]) + self.sections.append(get_or_create_course_and_section("CIS-120-001", TEST_SEMESTER)[1]) + self.old_section = get_or_create_course_and_section("CIS-120-001", "2017C")[1] + + def test_no_duplicates(self): + StatusUpdate( + section=self.sections[0], old_status="", new_status="O", alert_sent=False + ).save() + StatusUpdate( + section=self.sections[0], old_status="O", new_status="C", alert_sent=False + ).save() + StatusUpdate( + section=self.sections[0], old_status="C", new_status="O", alert_sent=False + ).save() + + StatusUpdate( + section=self.sections[1], old_status="X", new_status="O", alert_sent=False + ).save() + StatusUpdate( + section=self.sections[1], old_status="O", new_status="C", alert_sent=False + ).save() + + StatusUpdate( + section=self.old_section, old_status="C", new_status="O", alert_sent=False + ).save() + StatusUpdate( + section=self.old_section, old_status="O", new_status="C", alert_sent=False + ).save() + + self.assertEqual( + 5, StatusUpdate.objects.filter(section__course__semester=TEST_SEMESTER).count() + ) + self.assertEqual(2, StatusUpdate.objects.filter(section__course__semester="2017C").count()) + deduplicate_status_updates(semesters="all") + self.assertEqual( + 5, StatusUpdate.objects.filter(section__course__semester=TEST_SEMESTER).count() + ) + self.assertEqual(2, StatusUpdate.objects.filter(section__course__semester="2017C").count()) + + def test_some_duplicates(self): + StatusUpdate( + section=self.sections[0], old_status="", new_status="O", alert_sent=False + ).save() + StatusUpdate( + section=self.sections[0], old_status="", new_status="O", alert_sent=False + ).save() + StatusUpdate( + section=self.sections[0], old_status="", new_status="O", alert_sent=False + ).save() + StatusUpdate( + section=self.sections[0], old_status="O", new_status="C", alert_sent=False + ).save() + StatusUpdate( + section=self.sections[0], old_status="O", new_status="C", alert_sent=False + ).save() + StatusUpdate( + section=self.sections[0], old_status="C", new_status="O", alert_sent=False + ).save() + + StatusUpdate( + section=self.sections[1], old_status="X", new_status="O", alert_sent=False + ).save() + StatusUpdate( + section=self.sections[1], old_status="X", new_status="O", alert_sent=False + ).save() + StatusUpdate( + section=self.sections[1], old_status="O", new_status="C", alert_sent=False + ).save() + + StatusUpdate( + section=self.old_section, old_status="C", new_status="O", alert_sent=False + ).save() + StatusUpdate( + section=self.old_section, old_status="O", new_status="C", alert_sent=False + ).save() + StatusUpdate( + section=self.old_section, old_status="O", new_status="C", alert_sent=False + ).save() + + self.assertEqual( + 9, StatusUpdate.objects.filter(section__course__semester=TEST_SEMESTER).count() + ) + self.assertEqual(3, StatusUpdate.objects.filter(section__course__semester="2017C").count()) + deduplicate_status_updates(semesters="all") + self.assertEqual( + 5, StatusUpdate.objects.filter(section__course__semester=TEST_SEMESTER).count() + ) + self.assertEqual(2, StatusUpdate.objects.filter(section__course__semester="2017C").count()) + + +class RecomputePrecomputedFieldsTestCase(TestCase): + def setUp(self): + set_semester() + self.cis_160, self.cis_160_001 = create_mock_data("CIS-160-001", TEST_SEMESTER) + building, _ = Building.objects.get_or_create(code=1) + room, _ = Room.objects.get_or_create(building=building, number=1) + new_meeting = Meeting(section=self.cis_160_001, day="R", start=11, end=12, room=room) + new_meeting.save() + self.cis_160_201 = create_mock_data("CIS-160-201", TEST_SEMESTER)[1] + self.cis_160_201.activity = "REC" + self.cis_160_201.save() + self.cis_160_002 = create_mock_data("CIS-160-002", TEST_SEMESTER)[1] + self.cis_120, self.cis_120_001 = create_mock_data("CIS-120-001", TEST_SEMESTER) + self.cis_120_old, self.cis_120_001_old = create_mock_data("CIS-120-001", "2017C") + + def test_all_semesters(self): + recompute_precomputed_fields() + self.assertEquals(Course.objects.get(id=self.cis_160.id).num_activities, 2) + self.assertEquals(Section.objects.get(id=self.cis_160_001.id).num_meetings, 4) + self.assertEquals(Section.objects.get(id=self.cis_160_201.id).num_meetings, 3) + self.assertEquals(Section.objects.get(id=self.cis_160_002.id).num_meetings, 3) + + self.assertEquals(Course.objects.get(id=self.cis_120.id).num_activities, 1) + self.assertEquals(Section.objects.get(id=self.cis_120_001.id).num_meetings, 3) + + self.assertEquals(Course.objects.get(id=self.cis_120_old.id).num_activities, 1) + self.assertEquals(Section.objects.get(id=self.cis_120_001_old.id).num_meetings, 3) diff --git a/backend/tests/courses/util.py b/backend/tests/courses/util.py index 632a32783..541a41073 100644 --- a/backend/tests/courses/util.py +++ b/backend/tests/courses/util.py @@ -1,56 +1,56 @@ -from courses.models import Instructor -from courses.util import get_or_create_course_and_section, set_meetings -from review.models import Review - - -def time_str(time): - return f"{time // 100:2d}:{int(time % 100):02d} {'AM' if time < 1200 else 'PM'}" - - -def create_mock_data(code, semester, meeting_days="MWF", start=1100, end=1200): - course, section, _, _ = get_or_create_course_and_section(code, semester) - course.description = "This is a fake class." - course.save() - section.credits = 1 - section.status = "O" - section.activity = "LEC" - section.save() - m = [ - { - "building_code": "LLAB", - "room_code": "10", - "days": meeting_days, - "begin_time_24": start, - "begin_time": time_str(start), - "end_time_24": end, - "end_time": time_str(end), - } - ] - set_meetings(section, m) - return course, section - - -def create_mock_data_with_reviews(code, semester, number_of_instructors): - course, section = create_mock_data(code, semester) - reviews = [] - for i in range(1, number_of_instructors + 1): - instr, _ = Instructor.objects.get_or_create(name="Instructor" + str(i)) - section.instructors.add(instr) - review = Review(section=section, instructor=instr, responses=100) - review.save() - review.set_averages( - { - "course_quality": 4 / i, - "instructor_quality": 4 / (i + 1), - "difficulty": 4 / (i + 2), - "work_required": 4 / (i + 3), - } - ) - reviews.append(review) - return course, section, reviews - - -def create_mock_async_class(code, semester): - course, section = create_mock_data(code, semester) - set_meetings(section, []) - return course, section +from courses.models import Instructor +from courses.util import get_or_create_course_and_section, set_meetings +from review.models import Review + + +def time_str(time): + return f"{time // 100:2d}:{int(time % 100):02d} {'AM' if time < 1200 else 'PM'}" + + +def create_mock_data(code, semester, meeting_days="MWF", start=1100, end=1200): + course, section, _, _ = get_or_create_course_and_section(code, semester) + course.description = "This is a fake class." + course.save() + section.credits = 1 + section.status = "O" + section.activity = "LEC" + section.save() + m = [ + { + "building_code": "LLAB", + "room_code": "10", + "days": meeting_days, + "begin_time_24": start, + "begin_time": time_str(start), + "end_time_24": end, + "end_time": time_str(end), + } + ] + set_meetings(section, m) + return course, section + + +def create_mock_data_with_reviews(code, semester, number_of_instructors): + course, section = create_mock_data(code, semester) + reviews = [] + for i in range(1, number_of_instructors + 1): + instr, _ = Instructor.objects.get_or_create(name="Instructor" + str(i)) + section.instructors.add(instr) + review = Review(section=section, instructor=instr, responses=100) + review.save() + review.set_averages( + { + "course_quality": 4 / i, + "instructor_quality": 4 / (i + 1), + "difficulty": 4 / (i + 2), + "work_required": 4 / (i + 3), + } + ) + reviews.append(review) + return course, section, reviews + + +def create_mock_async_class(code, semester): + course, section = create_mock_data(code, semester) + set_meetings(section, []) + return course, section diff --git a/backend/tests/plan/course_recs_test_data/course_data_test.csv b/backend/tests/plan/course_recs_test_data/course_data_test.csv index 718da538f..dd4d9eea5 100644 --- a/backend/tests/plan/course_recs_test_data/course_data_test.csv +++ b/backend/tests/plan/course_recs_test_data/course_data_test.csv @@ -1,362 +1,362 @@ -hash1,AFRC-437,2016C -hash1,ARTH-775,2016C -hash1,GRMN-180,2016C -hash1,ARCH-532,2016C -hash1,BEPP-263,2017A -hash1,NELC-337,2017A -hash1,ANTH-395,2017A -hash1,LGIC-320,2017A -hash1,AFRC-209,2017C -hash1,ANTH-329,2017C -hash1,NELC-346,2017C -hash1,NELC-685,2017C -hash1,LAW-930,2017C -hash1,GPRD-936,2018A -hash1,GEOL-545,2018A -hash1,HCMG-852,2018A -hash1,RELS-129,2018A -hash1,COMM-313,2018C -hash1,EDUC-639,2018C -hash1,DADE-991,2018C -hash1,ENVS-606,2018C -hash1,CHIN-515,2019A -hash1,PHIL-578,2019A -hash1,JPAN-012,2019A -hash1,ASAM-110,2019A -hash2,LAW-695,2018C -hash2,EALC-242,2018C -hash2,AFRC-602,2018C -hash2,PHIL-002,2018C -hash2,REAL-236,2018C -hash2,PPE-402,2019A -hash2,STAT-431,2019A -hash2,ESE-621,2019A -hash2,BEPP-322,2019A -hash2,URBS-277,2019A -hash3,LAW-501,2016C -hash3,HIST-412,2016C -hash3,ENGL-169,2016C -hash3,LALS-397,2016C -hash3,PSYC-439,2016C -hash3,NELC-552,2016C -hash3,SKRT-480,2017A -hash3,LGIC-010,2017A -hash3,JWST-034,2017A -hash3,GSWS-118,2017A -hash3,EDUC-683,2017A -hash3,HCMG-866,2017C -hash3,NELC-332,2017C -hash3,EDUC-598,2017C -hash3,ACCT-613,2017C -hash3,ENVS-699,2017C -hash3,GRMN-101,2018A -hash3,WH-301,2018A -hash3,ASAM-110,2018A -hash3,RELS-129,2018A -hash3,SAST-217,2018C -hash3,WRIT-038,2018C -hash3,LGST-206,2018C -hash3,VBMS-604,2018C -hash3,EDUC-646,2019A -hash3,BMB-567,2019A -hash3,ENGL-156,2019A -hash3,GAFL-571,2019A -hash4,BEPP-452,2017C -hash4,EDUC-639,2017C -hash4,LAW-734,2017C -hash4,GPRD-957,2017C -hash4,ANTH-617,2018A -hash4,WRIT-073,2018A -hash4,HCMG-352,2018A -hash4,MATH-260,2018A -hash4,GRMN-502,2018C -hash4,PSYC-253,2018C -hash4,ANEL-546,2018C -hash4,NURS-609,2018C -hash4,WRIT-030,2018C -hash4,BIBB-240,2019A -hash4,ESE-224,2019A -hash4,PHIL-372,2019A -hash4,NELC-587,2019A -hash4,REES-010,2019C -hash4,NURS-628,2019C -hash4,PHIL-077,2019C -hash4,BEPP-931,2019C -hash4,PPE-402,2020A -hash4,LGST-101,2020A -hash4,EDUC-715,2020A -hash4,GPRD-929,2020A -hash5,CIT-596,2017C -hash5,COML-256,2017C -hash5,MKTG-955,2017C -hash5,BIOL-375,2017C -hash5,OIDD-697,2018A -hash5,COML-094,2018A -hash5,SWRK-614,2018A -hash5,NURS-614,2018A -hash5,NURS-749,2018A -hash5,EPID-664,2018A -hash6,PERS-616,2016C -hash6,EAS-512,2016C -hash6,NURS-361,2016C -hash6,EDUC-669,2016C -hash6,EDUC-586,2016C -hash6,BIBB-240,2016C -hash6,SPAN-180,2017A -hash6,NURS-215,2017A -hash6,MKTG-955,2017A -hash6,ANTH-415,2017A -hash6,BIOL-438,2017A -hash6,FNCE-785,2017A -hash6,AFST-251,2017C -hash6,URDU-402,2017C -hash6,CLST-402,2017C -hash6,NELC-137,2017C -hash6,FNAR-222,2017C -hash6,MATH-571,2017C -hash6,ARCH-768,2018A -hash6,BE-553,2018A -hash6,PHIL-002,2018A -hash6,NELC-346,2018A -hash6,EDUC-566,2018A -hash6,KORN-012,2018A -hash6,CBE-480,2018C -hash6,CBE-371,2018C -hash6,BIOL-221,2018C -hash6,SPAN-388,2018C -hash6,CLST-303,2019A -hash6,BEPP-789,2019A -hash6,DYNM-615,2019A -hash6,EALC-633,2019A -hash6,PHIL-002,2019C -hash6,MATH-314,2019C -hash6,MGMT-264,2019C -hash6,PSCI-697,2019C -hash6,FNCE-812,2019C -hash6,OIDD-291,2019C -hash6,NGG-521,2020A -hash6,FNCE-256,2020A -hash6,BE-608,2020A -hash6,PHYS-314,2020A -hash6,ENGL-092,2020A -hash6,PSCI-010,2020A -hash7,DADE-921,2016C -hash7,NURS-652,2016C -hash7,NELC-346,2016C -hash7,CRIM-300,2016C -hash7,NURS-357,2016C -hash7,CAMB-695,2017A -hash7,ACCT-921,2017A -hash7,NURS-587,2017A -hash7,NURS-731,2017A -hash7,DYNM-723,2017A -hash7,PHIL-525,2017C -hash7,LGST-206,2017C -hash7,EDUC-683,2017C -hash7,FNCE-725,2017C -hash7,LAW-967,2018A -hash7,BEPP-789,2018A -hash7,MKTG-211,2018A -hash7,MEAM-529,2018A -hash7,PSYC-435,2018C -hash7,MGMT-729,2018C -hash7,AFST-285,2018C -hash7,SOCI-159,2018C -hash7,FREN-229,2019A -hash7,COML-247,2019A -hash7,JPAN-012,2019A -hash7,TURK-122,2019A -hash7,BEPP-620,2019A -hash7,NURS-648,2019C -hash7,COML-570,2019C -hash7,NURS-357,2019C -hash7,LAW-597,2019C -hash7,GRMN-504,2019C -hash7,HSOC-420,2020A -hash7,LING-610,2020A -hash7,SWRK-760,2020A -hash7,CBE-535,2020A -hash8,EAS-502,2017C -hash8,OIDD-245,2017C -hash8,EDUC-715,2017C -hash8,LAW-974,2017C -hash8,MUSC-135,2017C -hash8,EALC-633,2018A -hash8,MKTG-239,2018A -hash8,CIS-195,2018A -hash8,LING-230,2018A -hash8,EDUC-668,2018A -hash8,LGST-101,2018A -hash8,EAS-897,2018C -hash8,MKTG-806,2018C -hash8,ACCT-613,2018C -hash8,OIDD-934,2018C -hash8,EDUC-663,2018C -hash8,AFRC-491,2018C -hash8,GSWS-344,2019A -hash8,VLST-233,2019A -hash8,JPAN-022,2019A -hash8,HIST-650,2019A -hash8,CPLN-643,2019A -hash8,ACCT-706,2019C -hash8,GRMN-514,2019C -hash8,INTL-BSL,2019C -hash8,LAW-920,2019C -hash8,CPLN-632,2020A -hash8,CIT-594,2020A -hash8,COML-101,2020A -hash8,BE-498,2020A -hash8,WRIT-030,2020A -hash8,ARCH-712,2020A -hash9,PSCI-258,2017C -hash9,BMIN-520,2017C -hash9,GEOL-643,2017C -hash9,BEPP-263,2017C -hash9,DADE-924,2017C -hash9,LING-151,2018A -hash9,KORN-132,2018A -hash9,ANTH-307,2018A -hash9,CIS-399,2018A -hash9,RELS-144,2018A -hash9,MUSC-236,2018C -hash9,HSOC-251,2018C -hash9,EDUC-545,2018C -hash9,CAMB-534,2018C -hash9,MATH-730,2018C -hash9,CAMB-706,2019A -hash9,NURS-648,2019A -hash9,NURS-513,2019A -hash9,LALS-158,2019A -hash9,WRIT-030,2019A -hash9,SWRK-714,2019A -hash9,CIS-197,2019C -hash9,EALC-622,2019C -hash9,VISR-699,2019C -hash9,BE-101,2019C -hash9,LGST-611,2019C -hash9,CLST-223,2019C -hash9,NPLD-750,2020A -hash9,REAL-240,2020A -hash9,SWRK-713,2020A -hash9,PSCI-333,2020A -hash9,GRMN-101,2020A -hash10,PSYC-449,2016C -hash10,BMB-650,2016C -hash10,AFRC-581,2016C -hash10,HCMG-863,2016C -hash10,NURS-757,2017A -hash10,EPID-625,2017A -hash10,NPLD-782,2017A -hash10,PHIL-205,2017A -hash10,MGMT-692,2017A -hash10,DYNM-630,2017A -hash10,PERS-612,2017C -hash10,EDCE-382,2017C -hash10,NURS-708,2017C -hash10,IMPA-606,2017C -hash10,MKTG-352,2017C -hash10,MEAM-891,2018A -hash10,COML-096,2018A -hash10,PHYS-016,2018A -hash10,NELC-102,2018A -hash10,SKRT-480,2018A -hash10,LAW-631,2018A -hash10,JWST-053,2018C -hash10,VLST-261,2018C -hash10,MUSC-171,2018C -hash10,COML-006,2018C -hash10,LAW-974,2019A -hash10,PSCI-181,2019A -hash10,ENGL-282,2019A -hash10,PSYC-449,2019A -hash10,BSTA-670,2019A -hash11,LAW-795,2018C -hash11,GSWS-118,2018C -hash11,GSWS-165,2018C -hash11,EDUC-552,2018C -hash11,SPAN-388,2018C -hash11,FNCE-725,2019A -hash11,PSCI-498,2019A -hash11,BE-101,2019A -hash11,COML-555,2019A -hash11,SAST-799,2019A -hash12,FNCE-751,2016C -hash12,BEPP-452,2016C -hash12,MUSC-275,2016C -hash12,GOMD-978,2016C -hash12,RELS-257,2016C -hash12,SWRK-768,2016C -hash12,EDUC-668,2017A -hash12,CIS-195,2017A -hash12,PHIL-372,2017A -hash12,AFST-285,2017A -hash12,INTL-BTM,2017C -hash12,FNCE-254,2017C -hash12,ARCH-728,2017C -hash12,MKTG-350,2017C -hash12,LGIC-320,2017C -hash12,LAW-966,2018A -hash12,PSCI-217,2018A -hash12,GCB-577,2018A -hash12,PSYC-612,2018A -hash12,LAW-987,2018A -hash12,HSOC-411,2018C -hash12,LARP-734,2018C -hash12,EDUC-360,2018C -hash12,DENT-634,2018C -hash12,VLST-261,2019A -hash12,ANTH-595,2019A -hash12,ENVS-699,2019A -hash12,EPID-664,2019A -hash12,LALS-398,2019A -hash12,HCMG-868,2019A -hash13,LAW-631,2016C -hash13,LGIC-320,2016C -hash13,BE-608,2016C -hash13,ENVS-616,2016C -hash13,NSCI-402,2017A -hash13,OIDD-763,2017A -hash13,MKTG-955,2017A -hash13,HEBR-053,2017A -hash13,FNAR-489,2017C -hash13,CHIN-722,2017C -hash13,AFRC-271,2017C -hash13,AFST-285,2017C -hash13,COML-592,2017C -hash13,NURS-357,2017C -hash13,PSYC-170,2018A -hash13,MKTG-350,2018A -hash13,LALS-273,2018A -hash13,ACCT-706,2018A -hash13,CLST-211,2018A -hash13,GPED-915,2018A -hash14,HSPV-747,2017C -hash14,REES-010,2017C -hash14,GRMN-203,2017C -hash14,BIOE-701,2017C -hash14,ENVS-616,2017C -hash14,LAW-734,2018A -hash14,MKTG-352,2018A -hash14,TELU-430,2018A -hash14,MATH-621,2018A -hash14,CBE-460,2018A -hash14,ANEL-546,2018A -hash14,HIST-139,2018C -hash14,BEPP-261,2018C -hash14,HCMG-868,2018C -hash14,ASAM-006,2018C -hash14,MGMT-231,2018C -hash14,ECON-242,2018C -hash14,LAW-968,2019A -hash14,HCMG-215,2019A -hash14,PSYC-435,2019A -hash14,QUEC-120,2019A -hash14,PHIL-029,2019A -hash14,PHYS-140,2019C -hash14,GPRD-959,2019C -hash14,LAW-946,2019C -hash14,LAW-904,2019C -hash14,OIDD-397,2020A -hash14,ARTH-235,2020A -hash14,OIDD-291,2020A -hash14,AFRC-602,2020A +hash1,AFRC-437,2016C +hash1,ARTH-775,2016C +hash1,GRMN-180,2016C +hash1,ARCH-532,2016C +hash1,BEPP-263,2017A +hash1,NELC-337,2017A +hash1,ANTH-395,2017A +hash1,LGIC-320,2017A +hash1,AFRC-209,2017C +hash1,ANTH-329,2017C +hash1,NELC-346,2017C +hash1,NELC-685,2017C +hash1,LAW-930,2017C +hash1,GPRD-936,2018A +hash1,GEOL-545,2018A +hash1,HCMG-852,2018A +hash1,RELS-129,2018A +hash1,COMM-313,2018C +hash1,EDUC-639,2018C +hash1,DADE-991,2018C +hash1,ENVS-606,2018C +hash1,CHIN-515,2019A +hash1,PHIL-578,2019A +hash1,JPAN-012,2019A +hash1,ASAM-110,2019A +hash2,LAW-695,2018C +hash2,EALC-242,2018C +hash2,AFRC-602,2018C +hash2,PHIL-002,2018C +hash2,REAL-236,2018C +hash2,PPE-402,2019A +hash2,STAT-431,2019A +hash2,ESE-621,2019A +hash2,BEPP-322,2019A +hash2,URBS-277,2019A +hash3,LAW-501,2016C +hash3,HIST-412,2016C +hash3,ENGL-169,2016C +hash3,LALS-397,2016C +hash3,PSYC-439,2016C +hash3,NELC-552,2016C +hash3,SKRT-480,2017A +hash3,LGIC-010,2017A +hash3,JWST-034,2017A +hash3,GSWS-118,2017A +hash3,EDUC-683,2017A +hash3,HCMG-866,2017C +hash3,NELC-332,2017C +hash3,EDUC-598,2017C +hash3,ACCT-613,2017C +hash3,ENVS-699,2017C +hash3,GRMN-101,2018A +hash3,WH-301,2018A +hash3,ASAM-110,2018A +hash3,RELS-129,2018A +hash3,SAST-217,2018C +hash3,WRIT-038,2018C +hash3,LGST-206,2018C +hash3,VBMS-604,2018C +hash3,EDUC-646,2019A +hash3,BMB-567,2019A +hash3,ENGL-156,2019A +hash3,GAFL-571,2019A +hash4,BEPP-452,2017C +hash4,EDUC-639,2017C +hash4,LAW-734,2017C +hash4,GPRD-957,2017C +hash4,ANTH-617,2018A +hash4,WRIT-073,2018A +hash4,HCMG-352,2018A +hash4,MATH-260,2018A +hash4,GRMN-502,2018C +hash4,PSYC-253,2018C +hash4,ANEL-546,2018C +hash4,NURS-609,2018C +hash4,WRIT-030,2018C +hash4,BIBB-240,2019A +hash4,ESE-224,2019A +hash4,PHIL-372,2019A +hash4,NELC-587,2019A +hash4,REES-010,2019C +hash4,NURS-628,2019C +hash4,PHIL-077,2019C +hash4,BEPP-931,2019C +hash4,PPE-402,2020A +hash4,LGST-101,2020A +hash4,EDUC-715,2020A +hash4,GPRD-929,2020A +hash5,CIT-596,2017C +hash5,COML-256,2017C +hash5,MKTG-955,2017C +hash5,BIOL-375,2017C +hash5,OIDD-697,2018A +hash5,COML-094,2018A +hash5,SWRK-614,2018A +hash5,NURS-614,2018A +hash5,NURS-749,2018A +hash5,EPID-664,2018A +hash6,PERS-616,2016C +hash6,EAS-512,2016C +hash6,NURS-361,2016C +hash6,EDUC-669,2016C +hash6,EDUC-586,2016C +hash6,BIBB-240,2016C +hash6,SPAN-180,2017A +hash6,NURS-215,2017A +hash6,MKTG-955,2017A +hash6,ANTH-415,2017A +hash6,BIOL-438,2017A +hash6,FNCE-785,2017A +hash6,AFST-251,2017C +hash6,URDU-402,2017C +hash6,CLST-402,2017C +hash6,NELC-137,2017C +hash6,FNAR-222,2017C +hash6,MATH-571,2017C +hash6,ARCH-768,2018A +hash6,BE-553,2018A +hash6,PHIL-002,2018A +hash6,NELC-346,2018A +hash6,EDUC-566,2018A +hash6,KORN-012,2018A +hash6,CBE-480,2018C +hash6,CBE-371,2018C +hash6,BIOL-221,2018C +hash6,SPAN-388,2018C +hash6,CLST-303,2019A +hash6,BEPP-789,2019A +hash6,DYNM-615,2019A +hash6,EALC-633,2019A +hash6,PHIL-002,2019C +hash6,MATH-314,2019C +hash6,MGMT-264,2019C +hash6,PSCI-697,2019C +hash6,FNCE-812,2019C +hash6,OIDD-291,2019C +hash6,NGG-521,2020A +hash6,FNCE-256,2020A +hash6,BE-608,2020A +hash6,PHYS-314,2020A +hash6,ENGL-092,2020A +hash6,PSCI-010,2020A +hash7,DADE-921,2016C +hash7,NURS-652,2016C +hash7,NELC-346,2016C +hash7,CRIM-300,2016C +hash7,NURS-357,2016C +hash7,CAMB-695,2017A +hash7,ACCT-921,2017A +hash7,NURS-587,2017A +hash7,NURS-731,2017A +hash7,DYNM-723,2017A +hash7,PHIL-525,2017C +hash7,LGST-206,2017C +hash7,EDUC-683,2017C +hash7,FNCE-725,2017C +hash7,LAW-967,2018A +hash7,BEPP-789,2018A +hash7,MKTG-211,2018A +hash7,MEAM-529,2018A +hash7,PSYC-435,2018C +hash7,MGMT-729,2018C +hash7,AFST-285,2018C +hash7,SOCI-159,2018C +hash7,FREN-229,2019A +hash7,COML-247,2019A +hash7,JPAN-012,2019A +hash7,TURK-122,2019A +hash7,BEPP-620,2019A +hash7,NURS-648,2019C +hash7,COML-570,2019C +hash7,NURS-357,2019C +hash7,LAW-597,2019C +hash7,GRMN-504,2019C +hash7,HSOC-420,2020A +hash7,LING-610,2020A +hash7,SWRK-760,2020A +hash7,CBE-535,2020A +hash8,EAS-502,2017C +hash8,OIDD-245,2017C +hash8,EDUC-715,2017C +hash8,LAW-974,2017C +hash8,MUSC-135,2017C +hash8,EALC-633,2018A +hash8,MKTG-239,2018A +hash8,CIS-195,2018A +hash8,LING-230,2018A +hash8,EDUC-668,2018A +hash8,LGST-101,2018A +hash8,EAS-897,2018C +hash8,MKTG-806,2018C +hash8,ACCT-613,2018C +hash8,OIDD-934,2018C +hash8,EDUC-663,2018C +hash8,AFRC-491,2018C +hash8,GSWS-344,2019A +hash8,VLST-233,2019A +hash8,JPAN-022,2019A +hash8,HIST-650,2019A +hash8,CPLN-643,2019A +hash8,ACCT-706,2019C +hash8,GRMN-514,2019C +hash8,INTL-BSL,2019C +hash8,LAW-920,2019C +hash8,CPLN-632,2020A +hash8,CIT-594,2020A +hash8,COML-101,2020A +hash8,BE-498,2020A +hash8,WRIT-030,2020A +hash8,ARCH-712,2020A +hash9,PSCI-258,2017C +hash9,BMIN-520,2017C +hash9,GEOL-643,2017C +hash9,BEPP-263,2017C +hash9,DADE-924,2017C +hash9,LING-151,2018A +hash9,KORN-132,2018A +hash9,ANTH-307,2018A +hash9,CIS-399,2018A +hash9,RELS-144,2018A +hash9,MUSC-236,2018C +hash9,HSOC-251,2018C +hash9,EDUC-545,2018C +hash9,CAMB-534,2018C +hash9,MATH-730,2018C +hash9,CAMB-706,2019A +hash9,NURS-648,2019A +hash9,NURS-513,2019A +hash9,LALS-158,2019A +hash9,WRIT-030,2019A +hash9,SWRK-714,2019A +hash9,CIS-197,2019C +hash9,EALC-622,2019C +hash9,VISR-699,2019C +hash9,BE-101,2019C +hash9,LGST-611,2019C +hash9,CLST-223,2019C +hash9,NPLD-750,2020A +hash9,REAL-240,2020A +hash9,SWRK-713,2020A +hash9,PSCI-333,2020A +hash9,GRMN-101,2020A +hash10,PSYC-449,2016C +hash10,BMB-650,2016C +hash10,AFRC-581,2016C +hash10,HCMG-863,2016C +hash10,NURS-757,2017A +hash10,EPID-625,2017A +hash10,NPLD-782,2017A +hash10,PHIL-205,2017A +hash10,MGMT-692,2017A +hash10,DYNM-630,2017A +hash10,PERS-612,2017C +hash10,EDCE-382,2017C +hash10,NURS-708,2017C +hash10,IMPA-606,2017C +hash10,MKTG-352,2017C +hash10,MEAM-891,2018A +hash10,COML-096,2018A +hash10,PHYS-016,2018A +hash10,NELC-102,2018A +hash10,SKRT-480,2018A +hash10,LAW-631,2018A +hash10,JWST-053,2018C +hash10,VLST-261,2018C +hash10,MUSC-171,2018C +hash10,COML-006,2018C +hash10,LAW-974,2019A +hash10,PSCI-181,2019A +hash10,ENGL-282,2019A +hash10,PSYC-449,2019A +hash10,BSTA-670,2019A +hash11,LAW-795,2018C +hash11,GSWS-118,2018C +hash11,GSWS-165,2018C +hash11,EDUC-552,2018C +hash11,SPAN-388,2018C +hash11,FNCE-725,2019A +hash11,PSCI-498,2019A +hash11,BE-101,2019A +hash11,COML-555,2019A +hash11,SAST-799,2019A +hash12,FNCE-751,2016C +hash12,BEPP-452,2016C +hash12,MUSC-275,2016C +hash12,GOMD-978,2016C +hash12,RELS-257,2016C +hash12,SWRK-768,2016C +hash12,EDUC-668,2017A +hash12,CIS-195,2017A +hash12,PHIL-372,2017A +hash12,AFST-285,2017A +hash12,INTL-BTM,2017C +hash12,FNCE-254,2017C +hash12,ARCH-728,2017C +hash12,MKTG-350,2017C +hash12,LGIC-320,2017C +hash12,LAW-966,2018A +hash12,PSCI-217,2018A +hash12,GCB-577,2018A +hash12,PSYC-612,2018A +hash12,LAW-987,2018A +hash12,HSOC-411,2018C +hash12,LARP-734,2018C +hash12,EDUC-360,2018C +hash12,DENT-634,2018C +hash12,VLST-261,2019A +hash12,ANTH-595,2019A +hash12,ENVS-699,2019A +hash12,EPID-664,2019A +hash12,LALS-398,2019A +hash12,HCMG-868,2019A +hash13,LAW-631,2016C +hash13,LGIC-320,2016C +hash13,BE-608,2016C +hash13,ENVS-616,2016C +hash13,NSCI-402,2017A +hash13,OIDD-763,2017A +hash13,MKTG-955,2017A +hash13,HEBR-053,2017A +hash13,FNAR-489,2017C +hash13,CHIN-722,2017C +hash13,AFRC-271,2017C +hash13,AFST-285,2017C +hash13,COML-592,2017C +hash13,NURS-357,2017C +hash13,PSYC-170,2018A +hash13,MKTG-350,2018A +hash13,LALS-273,2018A +hash13,ACCT-706,2018A +hash13,CLST-211,2018A +hash13,GPED-915,2018A +hash14,HSPV-747,2017C +hash14,REES-010,2017C +hash14,GRMN-203,2017C +hash14,BIOE-701,2017C +hash14,ENVS-616,2017C +hash14,LAW-734,2018A +hash14,MKTG-352,2018A +hash14,TELU-430,2018A +hash14,MATH-621,2018A +hash14,CBE-460,2018A +hash14,ANEL-546,2018A +hash14,HIST-139,2018C +hash14,BEPP-261,2018C +hash14,HCMG-868,2018C +hash14,ASAM-006,2018C +hash14,MGMT-231,2018C +hash14,ECON-242,2018C +hash14,LAW-968,2019A +hash14,HCMG-215,2019A +hash14,PSYC-435,2019A +hash14,QUEC-120,2019A +hash14,PHIL-029,2019A +hash14,PHYS-140,2019C +hash14,GPRD-959,2019C +hash14,LAW-946,2019C +hash14,LAW-904,2019C +hash14,OIDD-397,2020A +hash14,ARTH-235,2020A +hash14,OIDD-291,2020A +hash14,AFRC-602,2020A diff --git a/backend/tests/plan/course_recs_test_data/course_descriptions_test.csv b/backend/tests/plan/course_recs_test_data/course_descriptions_test.csv index 567b433f5..7ca6bec5f 100644 --- a/backend/tests/plan/course_recs_test_data/course_descriptions_test.csv +++ b/backend/tests/plan/course_recs_test_data/course_descriptions_test.csv @@ -1,308 +1,308 @@ -AFRC-437,"Why are African Americans and some other minority groups disproportionately incarcerated and subjected to penal sanctions? What are the political, social and economic consequences for individuals, communities, and the wider society of mass incarceration in the United States? What types of reforms of the criminal justice system are desirable and possible? This advanced seminar analyzes the connection between race, crime, punishment, and politics in the United States. The primary focus is on the role of race in explaining why the country's prison population increased six-fold since the early 1970s and why the United States today has the highest incarceration rate in the world. The class will likely take field trips to a maximum-security jail in Philadelphia and to a state prison in the Philadelphia suburbs." -ARTH-775,"Topic varies from semester to semester. For Spring 2020, this course will cover 'Cezanne, Alienation, and Modern Portraiture.' In the process of advancing modern art, Cezanne is often said to have subjugated the modern individual to the painting. Lost in conspicuous brushwork and vibrant coloration, the sitter had a difficult time making their presence felt in his work (not least his wife, Hortense Fiquet, who he portrayed most frequently). With the help of new scholarship on the artist and the period, this course will reassess this old saw of modernist art history, and instead place Cezanne's innovations carefully within the contemporary emergence of psychology and modern urban consciousness. We will discuss the various ways in which the birth of modernist representation coincided with the birth of the modern subject, and develop new means to analyze modernist portraiture more broadly. Along the way, this seminar will look carefully at Cezanne's entire career and oeuvre (and that of several of his colleagues as well), and we will study in particular the ways in which writers, philosophers and art historians--from Zola, Rilke, Heidegger and Merleau-Ponty to Jonathan Crary, Tamar Garb and T. J. Clark more recently--have used the artist to write their histories of modernism and modernity since the turn of the last century." -GRMN-180,"The German House is a half-credit course with concentrations in German conversation, film, and culture. Though many students enroll for credit, others often come to select events. All interested parties are invited, and you do not have to actually live in the house to enroll for credit. Students from all different levels of language proficiency are welcome. Beginners learn from more advanced students, and all enjoy a relaxed environment for maintaining or improving their German language skills." -ARCH-532,"A continuation of Construction I, focusing on light and heavy steel frame construction, concrete construction, light and heavyweight cladding systems and systems building." -BEPP-263,"This course examines environmental and energy issues from an economist's perspective. Over the last several decades, energy markets have become some of the most dynamic markets of the world economy, as they experienced a shift from heavy regulation to market-driven incentives. First, we look at scarcity pricing and market power in electricity and gasoline markets. We then study oil and gas markets, with an emphasis on optimal extraction and pricing, and geopolitical risks that investors in hydrocarbon resources face. We then shift gears to the sources of environmental problems, and how policy makers can intervene to solve some of these problems. We talk about the economic rationale for a broad range of possible policies: environmental taxes, subsidies, performance standards and cap-and-trade. In doing so, we discuss fundamental concepts in environmental economics, such as externalities, valuation of the environment and the challenge of designing international agreements. At the end of the course, there will be special attention for the economics and finance of renewable energy and policies to foster its growth. Finally, we discuss the transportation sector, and analyze heavily debated policies such as fuel-economy standards and subsidies for green vehicles. Prerequisites: An introductory microeconomics course (ECON1, or another course approved by the instructor) will be sufficient in most cases; BEPP 250 or an equivalent intermediate microeconomics course is recommended." -NELC-337,"The Hebrew Bible legislates against magic and witchcraft. But Jewish literature is replete with demons, witches, spells and incantations. This course will examine the phenomenon of Jewish magic in the longue duree. We will explore a wide array of sources describing ancient Jewish magical practices, and attempt to reconstruct the various aspects of ancient Jewish magic. We will start with demonology and exorcism in biblical and Second Temple literature. Then we will examine rabbinic attitudes towards magic and sorcery and rabbinic magical recipes. We then turn to material artifacts: late antique Jewish amulets and magic bowls. Finally we will survey the large corpus of magical texts from the Cairo Geniza and Hebrew manuscripts of magic from the middle ages. During the course we will consider broader questions such as the relationships between magic and religion, the identity of the Jewish magicians and their clients, relationship between Jewish and contemporary non-Jewish magic, and the role of women in magical practice." -ANTH-395,"The last 40 years has been a period of unparalleled reappraisal of archaeological theory and practice. We will consider the development of anthropological archaeology in terms of the questions archaeologists have asked, the ideas that have guided those questions, and the procedures that have been used to investigate them. Our discussion will focus on the intellectual heritage of normative or cultural-historical archaeology and its successors in terms of changing archaeological goals and theoretical frameworks, and their importance for contemporary research. The course will be organized around specific examples of archaeological research that have exemplified or challenged theoretical and methodological standards from culture history through the post-processual critique and the emergence of contemporary theorizations." -LGIC-320,"The second semester of a two-semester course on the fundamental results and techniques of mathematical logic. Topics will be drawn from model theory, proof theory, recursion theory, and set theory. Connections between logic and algebra, analysis, combinatorics, computer science, and the foundations of mathematics will be emphasized." -AFRC-209,"This selective survey will examine a variety of the circumstances of sub-Saharan African art, ranging from imperial to nomadic cultures and from ancient times to comtemporary participation in the international market. Iconography, themes and style will be considered, as will questions of modernity, religious impact, tradition and colonialism." -ANTH-329,"How do people become who they are, both similar to others and uniquely individual? How might these similarities and differences be shaped by childhood experiences in family, community, and societies around the world? How do children develop emotionally? Morally? What features of human development, expression of emotions, and relational patterns are universal for our species? What features are not universal? And what is and is not known about these questions? In this course, we will consider these and many other questions. We will read about and discuss complex and dynamic interactions between culture and individual psychology, and between nature and nurture from birth to adulthood. We will carefully examine various phases of human development as described by psychoanalysts and anthropologists. The course includes anthropologic and psychoanalytic readings and videotapes, as well as literature, fairy tales, and mythologies from cultures around the world. The instructors are both psychoanalysts, one a psychiatrist and one a pediatrician. The course counts towards the Psychoanalytic Studies (PSYS) Minor." -NELC-346,"This course introduces students to theory and methodology of the geospatial humanities and social sciences, understood broadly as the application of Geographical Information Systems (GIS) and spatial analysis techniques to the study of social and cultural patterns in the past and present. By engaging with spatial theory, spatial analysis case studies, and technical methodologies, students will develop an understanding of the questions driving, and tools available for, humanistic and social science research projects that explore change over space and time. We will use ESRI's ArcGIS software to visualize, analyze, and integrate historical, anthropological, and environmental data. Techniques will be introduced through the discussion of case studies and through demonstration of software skills. During supervised laboratory sessions, the various techniques and analyses covered will be applied to sample data and also to data from a region/topic chosen by the student." -NELC-685,"A one-semester survey of Islamic art and architecture which examines visual culture as it functions within the larger sphere of Islamic culture in general. Particular attention will be given to relationships between visual culture and literature, using specific case studies, sites or objects which may be related to various branches of Islamic literaturem including historical, didactic, philosophical writings, poetry and religous text. All primary sources are available in English translation." -LAW-930, -GPRD-936, -GEOL-545,"Pattern on the Earth's surface arise due to the transport of sediment by water and wind, with energy that is supplied by climate and tectonic deformation of the solid Earth. This course presents a treatment of the processes of erosion and deposition that shape landscapes. Emphasis will be placed on using simple physical principles as a tool for (a) understanding landscape patterns including drainage networks, river channels and deltas, desert dunes, and submarine channels, (b) reconstructing past environmental conditions using the sedimentary record, and (c) the management of rivers and landscapes under present and future climate scenarios. The course will conclude with a critical assessment of landscape evolution on other planets, including Mars." -HCMG-852,"The purpose of this course is to apply economics to an analysis of the health care industry, with special emphasis on the unique characteristics of the US healthcare markets, from pre-hospital to post-acute care. This course focuses on salient economic features of health care delivery, including: the role of nonprofit providers, the effects of regulation and antitrust activity on hospitals, the degree of input substitutability within hospitals, the nature of competition in home health care, public versus private provision of emergency medical services, the effect of specialty hospitals and ambulatory surgery centers, defining and improving medical performance in hospitals, specialization and investment in physical and human capital, shifting of services between inpatient and outpatient settings and its effect on health care costs and quality, and innovation in primary care from retail clinics to patient-centered medical homes and retainer-based medicine." -RELS-129,"Course topics will vary; have included The Binding of Isaac, Responses to Catastrophies in Jewish History, Holy Men & Women (Ben-Amos); Rewriting the Bible (Dohrmann); Performing Judaism (Fishman); Jewish Political Thought (Fishman); Jewish Esotericism (Lorberbaum). Democratic culture assumes the democracy of knowledge- the accessibility of knowledge and its transparency. Should this always be the case? What of harmful knowledge? When are secrets necessary? In traditional Jewish thought, approaching the divine has often assumed an aura of danger. Theological knowledge was thought of as restricted. This seminar will explore the ""open"" and ""closed"" in theological knowledge, as presented in central texts of the rabbinic tradition: the Mishnah, Maimonides and the Kabbalah. Primary sources will be available in both Hebrew and English." -COMM-313,"In this 'big data' era, presidents and popes tweet daily. Anyone can broadcast their thoughts and experiences through social media. Speeches, debates and events are recorded in online text archives. The resulting explosion of available textual data means that journalists and marketers summarize ideas and events by visualizing the results of textual analysis (the ubiquitous 'word cloud' just scratches the surface of what is possible). Automated text analysis reveals similarities and differences between groups of people and ideological positions. In this hands-on course students will learn how to manage large textual datasets (e.g. Twitter, YouTube, news stories) to investigate research questions. They will work through a series of steps to collect, organize, analyze and present textual data by using automated tools toward a final project of relevant interest. The course will cover linguistic theory and techniques that can be applied to textual data (particularly from the fields of corpus linguistics and natural language processing). No prior programming experience is required. Through this course students will gain skills writing Python programs to handle large amounts of textual data and become familiar with one of the key techniques used by data scientists, which is currently one of the most in-demand jobs." -EDUC-639,"This course examines different theoretical frames and strategies related to the study and design of learning environments in school, community and online contexts. Physical, social and cognitive aspects of learning situations are considered as students evaluate current research and applications in a variety of existing educational learning environments." -DADE-991, -ENVS-606,"This class will explore the foundations of avifaunal biology and ecology using a combination of hands-on classroom and in-the-field experiences. Classroom content includes physiology, anatomy, and morphology of birds. The fall migration of birds in North America is an epic and often tragic event. Sampling birds in migration has resulted in foundational understandings about stopover habitats, species-specific energy budgets and has helped realize the complete life cycle of hundreds of species. We will enter the field and participate in actual ornithological research, explore avifaunal ecology through birdwatching, and meet with regional leaders in the ornithological field." -CHIN-515,"This course surveys the literary movements of the post-Cultural Revolution era (1978-present).The reading consists of fictional works representative of each literary movement. Students will write four short (1-2 pages, double space) ""responding"" papers and two longer critical essays (5-7 pages double spaced). Each student will also give one oral presentation to the class on an assigned story. This course is designed for students who have achieved native or near native level of reading and writing proficiency in Chinese. The class is conducted exclusively in Chinese." -PHIL-578,"This is a topics-based graduate seminar in political philosophy. Examples of topics we can examine in this course include distributive justice, liberty, equality, and global justice. Course readings will be drawn from a combination of seminal and more recent works on the selected topics." -JPAN-012,Textbooks: Genki I (Lesson 8- Lesson 12) and Genki II (Lesson 13- Lesson 14)Kanji: reproduction-approx. 170/recognitio-approx.250 -ASAM-110,Please see our website for more current information: asam.sas.upenn.edu -LAW-695, -EALC-242,"This course explores Chinese medicine and healing culture, its diversity, and its change over time. We will discuss topics including the establishment of canonical medicine, Daoist approaches to healing and longevity, diverse views of the body and disease, the emergence of treatments for women, medical construction of sex difference and imagination of female sexuality, the thriving and decline of female healers, the identity of scholar physicians, the transmission of medical knowledge, domestic and cross-regional drug market, healer-patient relations, and new visions of traditional Chinese medicine in modern China." -AFRC-602,"This course critically examines stereotype threat and impostor phenomenon as they relate to African Americans. Both stereotype threat and impostor phenomenon negatively affect African Americans. The apprehension experienced by African Americans that they might behave in a manner that confirms an existing negative cultural stereotype is stereotype threat, which usually results in reduced effectiveness in African Americans' performance. Stereotype threat is linked with impostor phenomenon. Impostor phenomenon is an internal experience of intellectual phoniness in authentically talented individuals, in which they doubt their accomplishments and fear being exposed as a fraud. While stereotype threat relies on broad generalization, the impostor phenomenon describes feelings of personal inadequacy, especially in high-achieving African Americans. This course will explore the evolving meanings connected to both stereotype threat and impostor phenomenon in relation to African Americans." -PHIL-002,"Ethics is the study of right and wrong behavior. This introductory course will introduce students to major ethical theories, the possible sources of normativity, and specific ethical problems and questions. Topics may include euthanasia, abortion, animal rights, the family, sexuality, bioethics, crime and punishment and war." -REAL-236,"This course analyzes housing finance systems and housing market outcomes across the globe. In the US, the course focuses on the development of securitization markets and addresses the current challenges of housing finance reform, including the future of Fannie Mae and Freddie Mac. Internationally, the course covers issues of access to housing and housing informality in developing countries, financial crises arising out of the housing sector, and market-oriented and public policy solutions. The course features a wide array of speakers in finance, government and academia who contribute their perspectives to pressing issues of mortgage market design." -PPE-402,"Led by fellows in the Philosophy, Politics and Economics program, this course teaches students how to conduct research in PPE with an emphasis on creating a well-formed research question, determining what kinds of data or scholarly research bears on that question, and how to carry out an interdisciplinary, research-driven project on that question." -STAT-431,Graphical displays; one- and two-sample confidence intervals; one- and two-sample hypothesis tests; one- and two-way ANOVA; simple and multiple linear least-squares regression; nonlinear regression; variable selection; logistic regression; categorical data analysis; goodness-of-fit tests. A methodology course. This course does not have business applications but has significant overlap with STAT 101 and 102. -ESE-621,"This is a graduate level course on fundamental operating principles and physics of semiconductor devices in reduced or highly scaled dimensions. The course will include topics and concepts covering basic quantum mechanics and solid state physics of nanostructures as well as device transport and characterization, materials and fabrication. A basic knowledge of semiconductor physics and devices is assumed. The course will build upon basic quantum mechanics and solid state physics concepts to understand the operation of nanoscale semiconductor devices and physics of electrons in confined dimensions . The course will also provide a historical perspective on micro and nanoelectronics, discuss the future of semiconductor computing technologies, cutting edge research in nanomaterials, device fabrication as well as provide a perspective on materials and technology challenges. Prerequisite: If course requirement not met, permission of instructor required." -BEPP-322,"This course presents an analysis of overall private wealth management. This includes planning for disposition of closely-held business interests; the impact of income taxes and other transfer costs on business interests and other assets; integration of life insurance, disability insurance, medical benefits, and long-term care insurance in the financial plan; planning for concentrated asset (e.g. common stock) positions, diversification techniques, and asset allocation strategies; distribution of retirement assets; lifetime giving and estate planning; and analysis of current developments in the creation, conservation, and distribution of estates. Attention also is given to various executive compensation techniques (including restricted stock and stock options) and planning for various employee benefits. The course also covers sophisticated charitable giving techniques and methods for financing educaton expenses. Reading consist of textbooks, case studies, and bulk pack articles." -URBS-277,"Is urban space gendered? Do we change how it is gendered as we move through it? Does it change us? This course explores gender and sexuality in the contemporary global city through the study of urban spaces. We will consider feminist, queer, and transgender theories of the city, as we investigate how practices of using and making space are gendered and sexualized. Each week of the course will be organized around a type of space, including subway, school, and birthing center, nightclub, suburb, and park. Assignments will include an auto-ethnography, a short critical essay, and a final assignment that asks you to propose an additional type of space in which to study the intersections of sex, gender, and the urban built environment. In each space, we will conduct an interdisciplinary exploration, drawing from sociology, anthropology, geography, city planning history, feminist and queer theory, as well as from fiction, poetry, music videos, photography, and documentary film." -LAW-501, -HIST-412, -ENGL-169,"An advanced course in long-form nonfiction journalistic writing for a select group of experienced and self-starting student writers. (Ideally, each accepted member will have already taken one or two nonfiction seminars within the creative writing program.) The goal will be to tailor a reporting and writing project to your interest, one you may have long wished to take up but never had the opportunity. It could be a project in the arts. It could be a profile of a person or place. It might be documentary in nature, which is to say an extremely close-up observation of your subject. (An example: think of a hospital chaplain at Penn, going on his dreary, redemptive, daily rounds, to visit the sick and anoint the dying. What if you were there, for most of the term, as unobtrusively as possible, at his black-clad elbow?) The group will meet at to-be-determined intervals. In between, the enrollees will be pairing off and in effect serving as each other's editor and coach and fellow (sister) struggler. When we do assemble as a group, we will be reading to each other as well as discussing the works of some long-form heroes--Didion, Talese, Richard Ben Cramer, one or two others you may not have heard of. In essence, this is a kind of master course, limited in enrollment, and devoted to your piece of writing, to be handed in on the final day. It will be in the range of 25 to 30 pages, something above 8,000 words. The course presumes a lot of individual initiative and self-reliance. If you're interested, please email phendric@english.upenn.edu and suggest your qualifications. Permission to enroll is required." -LALS-397,Topics vary. Please see the Spanish Department's website for the current course description: https://www.sas.upenn.edu/hispanic-portuguese-studies/pc -PSYC-439,"This course is designed to examine the various roles played by the nervous and endocrine systems in controlling both physiological processes and behavior. First, the course will build a foundation in the concepts of neural and endocrine system function. Then, we will discuss how these mechanisms form the biological underpinnings of various behaviors and their relevant physiological correlates. We will focus on sexual and parental behaviors, stress, metabolism, neuroendocrine-immune interactions, and mental health." -NELC-552,"Course topics will vary; they have included: Holy Men & Women (Ben-Amos); Rewriting the Bible (Dohrmann); Jewish Political Thought & Action (Fishman) When did the Bible become the Bible? What was the nature of canon and authority in early Israel and Judaism, and how did biblical communities think about their sacred texts? How and what did the Bible mean to ancient readers? The answers to these questions are varied and surprising. This course looks at early biblical and Jewish texts that both write and re-write the tradition's own central texts. We will think widely and creatively about ancient textuality, orality, interpretation, composition, and authority. Drawing on literary theory, the course will examine the ways that biblical and post-biblical literature from the Second Temple to the rabbinic period (with some forays into contemporary literature) manifest complex ideas about power, meaning, and religiousity in early Judaism." -SKRT-480,"This course is for advanced students of Sanskrit. Designed as a seminar, the course aims to take students through the primary and secondary sources of Sanskrit literary and phlosophical production. Each semester will focus on a different genre: epic, belles-lettres, lyric poetry, drama, philosophy, shastra, advanced grammar, history, poetics, and epigraphy. We will focus on original sources, secondary scholarship, and theoretical approaches toward the translation and study of Sanskrit texts." -LGIC-010,"This course provides an introduction to some of the fundamental ideas of logic. Topics will include truth functional logic, quantificational logic, and logical decision problems." -JWST-034,Continuation of JWST 033; emphases in reading texts and conversation. -GSWS-118,"This seminar explores Iranian culture, society, history and politics through the medium of film. We will examine a variety of cinematic works that represent the social, political, economic and cultural circumstances of contemporary Iran, as well as the diaspora. Along the way, we will discuss issues pertaining to gender, religion, nationalism, ethnicity, and the role of cinema in Iranian society and beyond. Discussions topics will also include the place of the Iranian diaspora in cinema, as well as the transnational production, distribution, and consumption of Iranian cinema. Films will include those by internationally acclaimed filmmakers, such as Rakhshan Bani-Etemad, Asghar Farhadi, Bahman Ghobadi, Abbas Kiarostami, Mohsen Makhmalbaf, Dariush Mehrjui, Tahmineh Milani, Jafar Panahi, Marjane Satrapi and others. All films will be subtitled in English. No prior knowledge is required." -EDUC-683,"This course covers the methods and design of field surveys in the U.S. and other countries in education, the social sciences, criminal justice research, and other areas. It covers methods of eliciting information through household, mail, telephone surveys, methods of assuring privacy, enhancing cooperation rates and related matters. Finally, the fundamentals of statistical sampling and sample design are handled. Much of the course is based on contemporary surveys sponsored by the National Center for Education Statistics and other federal, state and local agencies." -HCMG-866,"This course will introduce students to the main components of Health Information Technology (HIT) and how HIT currently effects, and in the future, may change health care operating models. Although it will not prepare students for primary technology management positions, it will help them understand the role of information technology in the success of the delivery system and other important healthcare processes. It will provide a foundation that will prepare them as managers, investors and consultants to rely upon or manage information technology to accomplish delivery system objectives. The course will give special attention to key health care processes, and topics such as the drive for provider quality and cost improvements, the potential ability to leverage clinical data for care improvement and product development, the growth of new information technologies for consumer directed healthcare and telemedicine, the strategies and economics of individual HIT companies and the role of government. The course relies heavily on industry leaders to share their ideas and experiences with students." -NELC-332,"This interdisciplinary seminar aims to introduce students to the countries of North Africa, with a focus on the Maghreb and Libya (1830-present). It does so while examining the region's close economic and cultural connections to sub-Saharan Africa, Europe, and the Middle East. Readings will include histories, political analyses, anthropological studies, and novels, and will cover a wide range of topics such as colonial and postcolonial experiences, developments in Islamic thought and practice, and labor migration. This class is intended for juniors, seniors, and graduate students. Prerequisite: A university-level survey course in Middle Eastern, African, or Meditterranean history." -EDUC-598,"This ABCS course explores religious pluralism and interfaith dialogue and action on college campuses. It brings together students with diverse faith commitments (including atheism) to engage with and learn from one another in academic study, dialogue, and service." -ACCT-613,"This course provides an introduction to both financial and managerial accounting, and emphasizes the analysis and evaluation of accounting information as part of the managerial processes of planning, decision-making, and control. A large aspect of the course covers the fundamentals of financial accounting. The objective is to provide a basic overview of financial accounting, including basic accounting concepts and principles, as well as the structure of the income statement, balance sheet, and statement of cash flows. The course also introduces elements of managerial accounting and emphasizes the development and use of accounting information for internal decisions. Topics include cost behavior and analysis, product and service costing, and relevant costs for internal decision-making. This course is recommended for students who will be using accounting information for managing manufacturing and service operations, controlling costs, and making strategic decisions, as well as those going into general consulting or thinking of starting their own businesses." -ENVS-699,"This course is designed to help students successfully complete their MES Capstone. A set of milestones will be set and regular meetings will be held in groups and individually to aid the student as they complete the research portion of their degree.We will be working together to complete a series of steps towards the final project. These steps fall into five major areas 1) Reviewing the literature; 2) Finding a model; 3) Framing your research; 4) managing data; and 5) Writing your results. Throughout the semester, we will also discuss career goals and the job search. Prerequisite: Project proposal and Online Application equired for course regisration. See MES Office and ""Guide to the Capatone"" for more information." -GRMN-101,"Designed for the beginning student with no previous knowledge of German. German 101, as the first course in the first-year series, focuses on the development of language competence in listening, speaking, reading, and writing. By the end of the semester, students will be able to engage in simple conversations about familiar things, know greetings and everyday expressions, they will be ble to count and tell time, and negate sentences in day-to-day contexts. Furthermore, students will be able to speak about events that happened in the immediate past and express plans for the future. In addition, students will have developed reading strategies that allow them to glean information from simple newspaper and magazine articles and short literary texts. Because cultural knowledge is one of the foci of German 101, students will learn much about practical life in Germany and will explore German-speaking cultures on the Internet." -WH-301,"Organizations emerge because individuals cannot (or do not want to) accomplish their goals alone. Therefore, an organization is most often defined as a collective oriented toward a common goal. Collaboration --in relationships and in teams -- is the building block of organizational effectiveness. That is, much of your work each day will occur in a social context, and will require you to wield influence (and be influenced). Moreover, over 80% of Fortune 1,000 companies now use teams. The ability to work effectively in teams is thus a critical skill. In this course we will use the latest evidence from the science of organizations to understand an array of tactics that can help you work with others (and manage them) as you strive to attain shared goals, especially in the context of teams. You will develop a portable toolkit of ideas related to managing team decision making, team conflict, team diversity, interpersonal influence and emotional intelligence. This is a cross-listed course. Students may enroll in either MGMT 301 or WH 301." -SAST-217,"C.U. in India is a hybrid, domestic/overseas course series which provides students with the opportunity to have an applied learning and cultural experience in India or South East Asia where students participate in 1) 28 classrom hours in the Fall term 2) a 12-day trip to India or South East Asia with the instructor during the winter break visiting key sites and conducting original research (sites vary) 3) 28 classroom hours at Penn in the Spring term and 4) a research paper, due at the end of the Spring term. Course enrollment is limited to students admitted to the program. For more information and the program application go to http://sites.sas.upenn.edu/cuinindia This is a 2-CU yearlong course" -WRIT-038,"This writing seminar focuses on a scholarly inquiry in the field of Engineering, the use of scientific principles to design and build machines, structures, systems, and other items. This seminar will introduce students to scholarly conversations on a specific topic in this discipline as a basis for understanding discipline- and profession-based writing. This includes types of reasoning, evidence, citation practices, and other means of creating, testing, and sharing knowledge with diverse audiences. Throughout, students are introduced to new writing situations as a means of sharpening their skills and learning how to be adaptive, effective writers." -LGST-206,"This course examines the art and science of negotiation, with additional emphasis on conflict resolution. Students will engage in a number of simulated negotiations ranging from simple one-issue transactions to multi-party joint ventures. Through these exercises and associated readings, students explore the basic theoretical models of bargaining and have an opportunity to test and improve their negotiation skills." -VBMS-604,"This is an introductory course to the neurosciences and assumes a basic background in anatomy, cell biology, histology and biochemistry. At the cell/molecular level the course covers neurocytology, membrane bioelectrical events and their channel protein basis, neurotransmitters and their actions at the synapse. It also covers aspects of neurochemistry, neuropharmacology and focuses on neuroanatomy and function of neural systems. The latter include the somatic and autonomic components of the peripheral nervous system, the spinal cord and reflexes, primary sensory systems, motor pathways and limbic-visceral systems of the brain." -EDUC-646,The term school-to-prison pipeline typically refers to a disturbing trend in which punitive policies have led to children being funneled out of schools and into the criminal justice system at an alarming rate. This course: 1. Examines the historical context and policies that have contributed to the school-to-prison pipeline. -BMB-567,"This course covers selected topics in bioinorganic chemistry. Special emphasis is placed on dioxygen chemistry and electron transfer processes. Course topics include: 1) oxygen uptake and utilization; 2) oxygen transport; 3) oxygen and O atom incorporation into substrates; 4) metalloenzyme-catalyzed C-C bond formation; 5) the metallobiochemistry of DNA; 6) metal-sulfide proteins; 7) manganese containing metalloproteins; 8) photosystem II, light-driven electron transfer and the biological water-splitting reaction; 9) biological electron transfer; 10) electron transfer theory; 11) mechanisms of energy storage and release; and 12) long-distance electron transfer reactions." -ENGL-156,"A creative writing course built entirely around the use of photographs, and the crafting of compelling nonfiction narratives from them. The essential concept will be to employ photographs as storytelling vehicles. So we will be using curling, drugstore printed Kodak shots from our own family albums. We will be using searing and famous images from history books. We will be taking things from yesterday's newspaper. We will even be using pictures that were just made by the workshop participants outside the campus gates. In all of this, there will be one overriding aim to achieve memorable, full-bodied stories. To locate the strange, evocative, storytelling universes that are sealed inside the four rectangular walls of photograph. They are always there, if you know how to look. It's about the quality of your noticing, the intensity of your seeing. See the English Department's websitee at www.english.upenn.edu for a description of the current offerings." -GAFL-571, -BEPP-452,"This specialized course is usually only taken by Wharton students who plan to concentrate in actuarial science and Penn students who plan to minor in actuarial mathematics. It provides a comprehensive analysis of advanced life contingencies problems such as reserving, multiple life functions, multiple decrement theory with application to the valuation of pension plans." -LAW-734, -GPRD-957, -ANTH-617,"A critical examination of recent history and theory in cultural and social anthropology. Topics include structural-functionalism; symbolic anthropology; post-modern theory. Emphasis is on major schools and trends in America, Britain, and France." -WRIT-073,"This writing seminar focuses on a scholarly inquiry in the field of Philosophy, the study of questions about existence, knowledge, values, reason, mind, and language. This seminar will introduce students to scholarly conversations on a specific topic in this discipline as a basis for understanding discipline- and profession-based writing. This includes types of reasoning, evidence, citation practices, and other means of creating, testing, and sharing knowledge with diverse audiences. Throughout, students are introduced to new writing situations as a means of sharpening their skills and learning how to be adaptive, effective writers." -HCMG-352,"The purpose of this course is to apply economics to an analysis of the health care industry, with special emphasis on the unique characteristics of the US healthcare markets, from pre-hospital to post-acute care. This course focuses on salient economic features of health care delivery, including: the role of nonprofit providers, the effects of regulation and antitrust activity on hospitals, the degree of input substitutability within hospitals, the nature of competition in home health care, public versus private provision of emergency medical services, the effect of specialty hospitals and ambulatory surgery centers, the economics of direct-to-consumer advertising and its effect on drug safety, defining and improving medical performance in hospitals, specialization and investment in physical and human capital, and shifting of services between inpatient and outpatient settings and its effect on health care costs and quality." -MATH-260,This is an honors version of Math 240 which explores the same topics but with greater mathematical rigor. -GRMN-502, -PSYC-253,"Thinking, judgment, and personal and societal decision making, with emphasis on fallacies and biases. Prerequisite: One semester of Statistics or Microeconomics." -ANEL-546, -NURS-609,"This course explores the perioperative evaluation and advanced anesthetic principles related to patient populations undergoing a broad range of surgical procedures. Emphasis is placed on selection and administration of anesthesia to these populations to ensure optimal patient care, safety, monitoring and implementing interventions to prevent and treat common perioperative emergencies. Prerequisite: Enrollment in NANS program - year 2" -WRIT-030,"Open to upperclassmen who have not fulfilled their writing requirement. We will begin by reading and analyzing Richard Toye's A Very Short Introduction to Rhetoric to learn about the art of informing and persuasing others, an art that is at the very heart of all civic society and every walk of life, as Toye's examples underscore, from the rhetoric of modern cinema and Churchill's wartime speeches to Islamic preaching. This introduction to rhetoric will be followed by students' own exploration of rhetoric in a topic of their own choosing, which might include the rhetoric engineers use to explain a failed bridge; a fashion designer uses to promote a new collection; or politicians and marketing consultants use to convince us to subscribe to their views. Teachers, doctors, and bill collectors use rhetoric, and so too lovers. Visual rhetoric - the ability of images to wordlessly persuade and explain - can be the most powerful of all. In this course, students will learn to be artful producers and discriminating recipients of rhetoric. Please note that the seats in WRIT 030 351 - WRIT 030 358 are typically reserved for upperclassmen, and that the seats in WRIT 030 601 - WRIT 030 603 are reserved for LPS students. Descriptions of WRIT 030 601 - WRIT 030 603 can be found here: https://apps.sas.upenn.edu/writing/ccs/catalog.php" -BIBB-240,Topics to be covered include basic principles of chronobiology; neuroscience mechanisms of circadian rhythms and sleep; phylogeny and ontongeny of sleep; human sleep and sleep disorders; circadian dysfunction; circadian and sleep homeostatic influences in human health and safety. Students may not recieve credit for both BIBB 240 and BIBB 040. -ESE-224,"Introduction to signal and information processing (SIP). In SIP we discern patterns in data and extract the patterns from noise. Foundations of deterministic SIP in the form of frequency domain analysis, sampling, and linear filtering. Random signals and the modifications of deterministic tools that are necessary to deal with them. Multidimensional SIP where the goal is to analyze signals that are indexed by more than one parameter. Includes a hands-on lab component that implements SIP as standalone applications on modern mobile platforms." -PHIL-372,This majors seminar will cover selected topics in ethics. The content will vary from semester to semester. -NELC-587,"This course is designed to provide an in-depth analysis of archaeological metals. Topics to be discussed include: exploitation of ore and its transformation to metal in ancient times, distribution of metal as a raw materials, provenance studies, development and organization of early metallurgy, and interdisciplinary investigations of metals and related artifacts like slag and crucibles. Students will become familiar with the full spectrum of analytical procedures, ranging from microscopy for materials characterization to mass spectrometry for geochemical fingerprinting, and will work on individual research projects analyzing archaeological objects following the analytical methodology of archaeometallurgy." -REES-010,"The reappearance of the concept of Central and Eastern Europe is one of the most fascinating results of the collapse of the Soviet empire. The course will provide an introduction into the study of this region - its cultures, histories, and societies - from the foundation of the Holy Roman Empire to the enlargement of the European Union. Students are encouraged to delve deeper into particular countries, disciplines, and sub-regions, such as Central Europe, Eastern Europe, and the Balkans, through an individual research paper and class presentations. Prerequisite: This course is one of two required core courses for the Russian and East European Studies (REES) major." -NURS-628,"An examination of the psycho-socio-cultural processes which influence the behavior patterns, coping, and adaptation of older adults. The course emphasizes strategies to promote mental health as well as assessment, presentation, and intervention in the major acute and chronic psychiatric disorders affecting the older adult." -PHIL-077,"This course is an introduction to some of the central philosophical problems of law: What is law? What makes law? What is the relationship between law and morality? Can laws be unjust? Is there a moral obligation to obey the law? We will look at different theories of law, such as positivism and natural law theory, and discuss topics like civil disobedience, liberty and the law, and punishment and the law. The third and final section of the course will consider an unusual and particularly significant kind of law: constitutional law. We will consider the purpose(s) of constitutions, how constitutionalism relates to democracy, and how constitutions ought to be understood and interpreted, in light of our answers to these first two questions. Throughout the course, we will engage with both classic and contemporary work, reading work by Michelle Alexander, Jeremy Bentham, Angela Davis, Ronald Dworkin, John Hart Ely, H.L.A. Hart, Thomas Hobbes, John Locke, John Stuart Mill, Robert Nozick, Martha Nussbaum, Richard Posner, Jeremy Waldron, and others." -BEPP-931,"The objective of this course is to introduce graduate students to computational approaches for solving economic models. We will formulate economic problems in computationally tractable form and use techniques from numerical analysis to solve them. Examples of computational techniques in the current economics literature as well as discuss areas where these techniques may be useful in future research will be disclosed. We will pay particular attention to methods for solving dynamic optimization problems and computing equilibria of games. The substantive applications will cover a wide range of problems including industrial organization, game theory, macroecomics, finance, and econometrics." -LGST-101,"This course presents law as an evolving social institution, with special emphasis on the legal regulation of business in the context of social values. It considers basic concepts of law and legal process, in the U.S. and other legal systems, and introduces the fundamentals of rigorous legal analysis. An in-depth examination of contract law is included." -EDUC-715,"This course is designed to enhance understanding of decision making in higher education administration. Based on case studies, students will analyze, propose policies, generate action plans and implementation procedures, and assess the potential consequences of their administrative decisions." -GPRD-929, -CIT-596,"This course focuses primarily on the design and analysis of algorithms. We will begin with sorting and searching algorithms and then spend most of the course on graph algorithms. In order to study graph algorithms, general algorithm design patterns like dynamic programming and greedy algorithms will be introduced. A section of this course is also devoted to understanding NP-Completeness." -COML-256,"This course will explore fiction and film in contemporary Japan, from 1945 to the present. Topics will include literary and cinematic representation of Japan s war experience and post-war reconstruction, negotiation with Japanese classics, confrontation with the state, and changing ideas of gender and sexuality. We will explore these and other questions by analyzing texts of various genres, including film and film scripts, novels, short stories, manga, and academic essays. Class sessions will combine lectures, discussion, audio-visual materials, and creative as well as analytical writing exercises. The course is taught in English, although Japanese materials will be made available upon request. No prior coursework in Japanese literature, culture, or film is required or expected; additional secondary materials will be available for students taking the course at the 600 level. Writers and film directors examined may include: Kawabata Yasunari, Hayashi Fumiko, Abe Kobo, Mishima Yukio, Oe Kenzaburo, Yoshimoto Banana, Ozu Yasujiro, Naruse Mikio, Kurosawa Akira, Imamura Shohei, Koreeda Hirokazu, and Beat Takeshi." -MKTG-955,"This is a continuation of MKTG 954. This doctoral seminar reviews analytical models relevant to improving various aspects of marketing decisions such as new product launch, product line design, pricing strategy, advertising decisions, sales force organization and compensation, distribution channel design and promotion decisions. The primary focus will be on analytical models. The seminar will introduce the students to various types of analytical models used in research in marketing, including game theory models for competitive analysis, agency theory models for improving organization design and incentives within organizations, and optimization methods to improve decision making and resource allocation. The course will enable students to become familiar with applications of these techniques in the marketing literature and prepare the students to apply these and other analytical approaches to research problems that are of interest to the students." -BIOL-375,"Microbiology plays a central role in diverse areas of human life such as infectious disease, ecology, and biotechnology. This course will cover aspects of modern microbiology with an emphasis on prokaryotic organisms. The topics will include basic aspects of microbial diversity, genetics, virology, and pathogenesis as well as examples of applied microbiology." -OIDD-697,"This course is highly recommended for students with an interest in pursuing careers in: (1) retailing and retail supply chains; (2) businesses like banking, consulting, information technology, that provides services to retail firms; (3) manufacturing companies (e.g. P&G) that sell their products through retail firms. Retailing is a huge industry that has consistently been an incubator for new business concepts. This course will examine how retailers understand their customers' preferences and respond with appropriate products through effective supply chain management. Supply chain management is vitally important for retailers and has been noted as the source of success for many retailers such as Wal-mart and Home Depot, and as an inhibitor of success for e-tailers as they struggle with delivery reliability. See M. L. Fisher, A. Raman and A. McClelland, ""Rocket Science Retailing is Coming - Are You Ready?,"" Harvard Business Review, July/August 2000 for related research." -COML-094,"This course introduces students to major issues in the history of literary theory. Treating the work of Plato and Aristotle as well as contemporary criticism, we will consider the fundamental issues that arise from representation, making meaning, appropriation and adaptation, categorization and genre, historicity and genealogy, and historicity and temporality. We will consider major movements in the history of theory including the ""New"" Criticism of the 1920's and 30's, structuralism and post-structuralism, Marxism and psychoanalysis, feminism, cultural studies, critical race theory, and queer theory. See the Comparative Literature website at http://ccat.sas.upenn.edu/complit/ for a description of the current offerings." -SWRK-614,"This is the second in a four-course sequence and continues to examine varied practice frameworks and methods for service delivery in working with individuals, groups, families and communities. It emphasizes the eradication of institutional racism and other forms of oppression along with the integration of a culturally-sensitive approach to social work practice. Attention is given to understanding client problems in the context of different social work practice approaches and service requirements and to increased use of professional values to guide and inform practice." -NURS-614,"Population specific topics of concern to nurse anesthetists are reviewed and discussed. Seminal works in the field of anesthesia are reviewed and discussed to facilitate a comprehensive review of contemporary anesthesia practice. The gaps between research and its implementation in practice will be considered. Students will focus on completing a comprehensive review of 1) Basic sciences; 2) Equipment, Instrumentation and Technology; 3) Basic Principles of Anesthesia Practice; and 4) Advanced Principles of Anesthesia Practice as described by National Council on Certification and Recertification of Nurse Anesthetists. Prerequisite: Must be enrolled in the Nurse Anesthesia program" -NURS-749,"This course explores the impact of historical ideas, events, and actors on current issues in health and illness care. Topics include the movement from hospitals to health care systems; the changing definitions of professionalism and professional practice patterns; and the ways historical context shapes definitions of leadership roles and theoretical knowledge." -EPID-664,"This course will introduce students to methods and study design principles that are specific or unique to clinical research and trials in neurology, child neurology,neuro-ophthalmology, neurosurgery, and related fields. Prerequisite: Permission of instructor" -PERS-616, -EAS-512,"The goal of this course is to teach students of engineering and applied science to be effective negotiators. It aims to improve the way these students communicate i virtually any human interaction. The course intends to improve the ability of engineers and other technology disciplines to gain more support more quickly for projects, researc product and services development, and marketing. For those wanting to be entrepreneurs o r intrapreneurs, the course is designed essentially to find the most value possible in starting up and running companies. Based on Professor Diamond's innovative and renowned model of negotiation, it is intended to assist those for whom technical expertise is not enough to persuade others, internally and externally, to provide resources, promotions and project approvals; or to resolve disputes, solve problems and gain more opportunities. Rejecting the 40-year-old notions of power, leverage and logic, the course focuses on persuasion by making better human connections, uncovering perceptions and emotions, and structuring agreements to be both collaborative and fair. This course is entrepreneurial in nature and can provide many times more value than traditional persuasion. The Getting More book has sold more than 1 million copies around the world and is also used by universities, corporations (Google), and U.S. Special Operations (SEALs, Green Berets, Special Forces, Marines) to save lives and reduce conflict. From the first day, students will do interactive cases based their own engineering-related problems and based on current problems in the news. There will be diagnostics enabling every student to assess his/her skill and improvements." -NURS-361,"Human milk is recognized universally as the optimal diet for newborn infants. The health benefits of breastfeeding are so significant that a National Health Objective set forth by the Surgeon General of the United States for the year 2010 is to increase the proportion of mothers who breastfeed their babies in the postpartum period. Through classroom and clinical experiences, this course will provide an in depth examination of the anatomy and physiology of lactation, essential aspects of establishing and maintaining lactation, and the nurses' role in counseling the breastfeeding family. Emphasis will be placed on current research findings in the content area." -EDUC-669,"This course is designed as a collaborative investigation into practitioner inquiry and the work of inquiry communities in K-16 and graduate/professional school settings, professional networks and community-based organizations. The focus is on conceptual and methodological frameworks and methods of practitioner inquiry and the contexts, purposes and practices of differently situated inquiry communities. Participants will explore a range of practitioner inquiry traditions and texts that go by terms such as action, collaborative, critical, community-based, participatory, autobiographical, emancipatory, narrative and pedagogical. They will also conduct an inquiry based on their particular interests and contexts. The course will emphasize practitioner inquiry that intentionally engages issues of equity, access and culture in educational settings." -EDUC-586,"This ethnographic methodology course considers filmmaking/videography as a tool in conducting ethnographic research as well as a medium for presenting academic research to scholarly and non-scholarly audiences. The course engages the methodological and theoretical implications of capturing data and crafting social scientific accounts/narratives in images and sounds. Students are required to put theory into practice by conducting ethnographic research and producing an ethnographic film as their final project. In service to that goal, students will read about ethnography (as a social scientific method and representational genre), learn and utilize ethnographic methods in fieldwork, watch non-fiction films (to be analyzed for formal properties and implicit assumptions about culture/sociality), and acquire rigorous training in the skills and craft of digital video production. This is an ABCS course, and students will produce short ethnographic films with students in Philadelphia high schools as part of a partnership project with the School District of Philadelphia. Due to the time needed for ethnographic film production, this is a year-long course, which will meet periodically in both the fall and spring semesters." -SPAN-180,Must be a resident of the Modern Language College House. Prerequisite: Residence in Modern Language House -NURS-215,"This course emphasizes the child-bearing cycle, and the related issues of maternal and infant mortality and morbidity. It also explores women and infant's health care and health promotion needs across the lifespan. It provides a global perspective, and uses the United Nations' Pillars of Safe Motherhood and World Health Organization's Millennium Development Goals as the vehicles to enable students to understand the interrelationships among issues of health and health promotion; social, economic, political and environmental contexts; and the care of women across the lifespan. Clinical experiences provide opportunities for students to understand the connections between the local and the global; to use their developing knowledge base to affect the health of women and their infants. Students will have opportunities for hospital-based care of child-bearing women and their infants. In addition, community-based experiences with individual women and with groups of women across the life cycle will be provided in order to enhance teaching, interviewing and assessment skills." -ANTH-415,"This course introduces the study of animal bones from archaeological sites. Faunal analysis is an interdisciplinary science which draws methods from archaeology, biology, and paleontology. Bones, shells, and other remains yield evidence for the use of animals by humans, and evidence for the biology of animals and for past environments. The course will focus on research approaches to important transitions in human-animal relationships: the development of human hunting and fishing, animal domestication, early pastoralism, and the emergence of market economies in animal products. Class presentations will include lectures and discussion concerning research design and archaeological case material, with additional videos, slidework with field and laboratory equipment, and supervised work identifying and describing archaeological materials from the University Museum's collections. This class is taught in the Zooarchaeology Laboratory of the Center for the Analysis of Archaeological Materials" -BIOL-438,"The course will focus on muscle function from the level of molecules to whole animal locomotion. At each level of organization, muscle function will be explored from mechanical and energetic viewpoints. The course will include lectures, demonstrations, and several guest expert lectures. Students will also be introduced to realistic musculo-skeletal modelling and forward dynamic simulations to explore integrated function." -FNCE-785,"This course explores strategic, business and legal decision making in a fluid real world corporate context. Classes will cover a series of timely financial and legal subjects as well as case studies that deal with topical problems in corporate governance, investment strategy, finance, private equity, executive compensation, and potential corporate and criminal behavior. Press, public market reaction, and governmental/political considerations will be integrated into the discussion. All students will be required to participate in one major and two minor team projects. An equal number of graduate law and business students will be enrolled in this class. The instructor, a 30 year veteran and partner at a major private equity firm, is also an attorney and CPA. No prerequisites." -AFST-251, -URDU-402,"This introductory course core proficiency in Hindi-Urdu up to the intermediate level. It is designed for students with little or no prior exposure to Hindi or Urdu. The course covers all four language skills (speaking, lsitening, reading, and writing) and all three models of communication (interpersonal, presentational, interpretive). Students will develop literacy skills in the primary script of their choice (Hindi or Urdu script). All written materials will be provided in both scripts. All meetings are interactive and students acquire the language by using it in realistic contexts. Culture is embedded in the activities and is also introduced through various authentic materials." -CLST-402,Intensive Greek reading course for students in the Post-Baccalaureate Program in Classical Studies. Readings are chosen to expose students to a variety of prose and poetry texts during their program experience. The Fall course includes some grammar review and analysis as well as translation. Permission of instructor required for non-Post-Baccalaureate students. -NELC-137,"This course will explore the origins, the history and, most importantly, the literary and cinematic art of the struggle that has endured for a century over the region that some call the Holy Land, some call Eretz Israel and others call Palestine. We will also consider religious motivations and interpretations that have inspired many involved in this conflict as well as the political consequences of world wars that contributed so greatly to the reconfiguration of the Middle East after the fall of the Ottoman Empire, and after the revelations of the Holocaust in Western Europe. While we will rely on a textbook for historical grounding. the most significant material we will use to learn this history will be films, novels, and short stories. Can the arts lead us to a different understanding of the lives lived through what seems like unending crisis?" -FNAR-222,"The history and practice of the contemporary mural movement couples step by step analysis of the process of designing with painting a mural. In addition students will learn to see mural art as a tool for social change. This course combines theory with practice. Students will design and paint a large outdoor mural in West Philadelphia in collaboration with Philadelphia high school students and community groups. The class is co-taught by Jane Golden, director of the Mural Arts Program in Philadelphia, and Shira Walinsky, a mural arts painter and founder of Southeast by Southeast project, a community center for Burmese refugees in South Philadelphia." -MATH-571,Continuation of Math 570. Prerequisite: Permission of instructor if course prerequisites not met -ARCH-768,"This course evaluates ""ground-up"" development as well as re-hab, re-development, and acquisition investments. We examine raw and developed land and the similarities and differences of traditional real estate product types including office, R & D, retail, warehouses, single family and multi-family residential, mixed use, and land as well as ""specialty"" uses like golf courses, assisted living, and fractional share ownership. Emphasis is on concise analysis and decision making. We discuss the development process with topics including market analysis, site acquisition, due diligence, zoning, entitlements, approvals, site planning, building design, construction, financing, leasing, and ongoing management and disposition. Special topics like workouts and running a development company are also discussed. Course lessons apply to all markets but the class discusses U.S. markets only. Throughout the course, we focus on risk management and leadership issues. Numerous guest lecturers who are leaders in the real estate industry participate in the learning process. Format: predominately case analysis and discussion, some lectures, project visits." -BE-553,"Tissue engineering demonstrates enormous potential for improving human health. This course explores principles of tissue engineering, drawing upon diverse fields such as developmental biology, cell biology, physiology, transport phenomena, material science, and polymer chemistry. Current and developing methods of tissue engineering, as well as specific applications will be discussed in the context of these principles. A significant component of the course will involve review of current literature within this developing field." -EDUC-566,"This course provides students experiential and cognitive awareness through affective exercises and readings. It explores issues of living in a diverse society through a variety of educational strategies including workshops, small group process, guest lectures, etc. It represents the seminar portion of P.A.C.E. (Programs for Awareness in Cultural Education): An ""Educating the Peer Educator"" Program." -KORN-012,"This is a continuation of KORN 011. This course aims to further develop the four language skills of students to the novice-high level by building on materials covered in KORN 011. Students will learn how to use three speech styles (polite formal, informal, and intimate) appropriately in a given context. Upon successful completion of this course, students should be able to handle simple and elementary needs of daily lives and talk (and write) about a variety of topics such as family, college life, birthday celebration, shopping, Korean food, etc." -CBE-480,"The laboratory methods covered include molecular cloning techniques, cell transformation, DNA gel electrophoresis, ImageJ, PCR, DNA sequencing, SDS?PAGE, mammalian cell culture and enzyme assays. Culture techniques for bacteria, yeast and mammalian cells are taught and practiced. The students write several individual lab reports and keep a weekly lab notebook during the semester. A group presentation and report on a proposal for a new lab experiment is the final assignment for the lab." -CBE-371,The design of industrial methods for separating mixtures. Distillation; liquid-liquid extraction; membranes; absorption. Computer simulations of the processes. -BIOL-221,"This course will survey the discipline of molecular genetics. Two broad areas will be considered 1) Molecular Biology: DNA replication, transcription, translation, regulation of gene expression in both prokaryotic and eukaryotic systems, and genomics and 2) Genetics: basic Mendelian & molecular genetics." -SPAN-388,Topics vary. Please see the Spanish Department's website for the current course description: https://www.sas.upenn.edu/hispanic-portuguese-studies/pc -CLST-303,"This course Presents an introduction to the history, theory and modern practice of museums. Using the resources of the University Museum, the course will introduce students to curatorial practice, education, exhibition design and conservation, while exploring the theoretical and ethical issues confronted by museums. Particularly relevant for those interested in archaeology, anthropology, art history, cultural heritage and public education." -BEPP-789,"This course is intended to deepen understanding of the major contemporary issues in the world economy. The focus is on the ""big picture"" of global economic developments and the evolution of economic thought over the last one hundred years. The topics include: financial market booms and busts; business cycles; monetary and fiscal policies; inequality; the social welfare state; technological change and economic growth; and international trade and financial arrangements. The time period covers: the Roaring Twenties; the Great Depression, the post war Golden Age (1945-1973); the stagflation of the 1970s; the Washington Consensus era of the market liberalization (1980-2007); and the 2008 financial crisis and ensuing Great Recession; and the recent rise of populism. This course also explores different schools of thought. The course will chronicle and compare economic policy and performance of the United States, Europe, Japan and emerging markets (Asia, Latin America, Africa)." -DYNM-615,"This class will operate in a hybrid format. We will have four 90-minute virtual class sessions (via Zoom) on four Thursdays (6 PM to 7:30 PM) -- January 23, February 13, March 5th, and March 26th. These sessions will cover key material on sustainable cities and will help guide students in their thinking for course deliverables. The course also has an independent study element similar to a capstone course - as students will have considerable independent time to complete course material and conduct research for their projects throughout the term. Environmentalist Paul Hawken challenged a class of 2009 college graduates that they would have to ""figure out what it means to be a human being on earth at a time when every living system is declining, and the rate of decline is accelerating."" That theme is at the heart of this course. While we have seen the notion of sustainability gaining traction in recent years, our quality of life in the near future hinges on the development and implementation of sustainable solutions to enormously complex global environmental and social problems. This course is designed to foster the thinking that is needed to address those enormous problems. It involves focusing on a critical global problem with sustainability and social dimensions - in this case, the rapid shift of an increasing global population to cities - and providing the framework for students to take a deep dive into evaluating and recommending solutions to meet that challenge. The World Health Organization (WHO) estimates that in 1990, less than 40% of the global population lived in cities, and that figure is expected to grow to 60% by 2030 and 70% by 2050. Such growth brings tremendous sustainability and design challenges in both developed and developing countries in terms of resource consumption, food security, water security, energy security, air quality, transportation, infrastructure, waste processing, recycling, and public health. It also brings opportunity. In 2020, students will conduct research on the challenges and opportunities created by increasing urbanization, drawing on readings from thought leaders in conjunction with their own creative insights, with a focus on advancing development of sustainable cities to ensure the long-term health of people and planet. Prerequisite: Non-DYNM students must complete a course permit request: https://www.sas.upenn.edu/lps/graduate/dynamics/course-permit" -EALC-633,"This seminar investigates classical Chinese conceptions of art and beauty as exemplified in philosophy, literature, music, painting, calligraphy, and architecture. All readings will be in English, and no knowledge of Chinese is presumed. Graduate students should see the instructor to discuss requirements for graduate credit." -MATH-314,"Topics will include: Vector spaces, Basis and dimension, quotients; Linear maps and matrices; Determinants, Dual spaces and maps; Invariant subspaces, Cononical forms; Scalar products: Euclidean, unitary and symplectic spaces; Orthogonal and Unitary operators; Tensor products and polylinear maps; Symmetric and skew-symmetric tensors and exterior algebra. Prerequisite: Math 314/514 covers Linear Algebra at the advanced level with a theoretical approach. Students can receive credit for at most one of Math 312 or Math 314." -MGMT-264,"This course focuses on venture capital management issues in the context of a high-growth potential start-up company. The course is motivated by rapid increases in both the supply of and demand for private equity over the past two decades. The topic is addressed from two distinct perspectives: issues that relate to the demand for private equity and venture capital (the entrepreneur's perspective) on the one hand, and issues that relate to the supply of capital (the investor's perspective) on the other. As well, we will address management issues that relate to how the VC and the entrepreneur work together once an investment has been made, compensation issues, and governance issues in the privately held venture capital backed company. Format: Case/discussion format, supplemented by lectures and guest speakers." -PSCI-697, -FNCE-812,"The focus will be on the causes of fiscal crises, a careful detailing of who wins and who loses, and then on how such crises might be resolved and, perhaps most importantly, how they might be prevented in the future. The course will draw upon the fiscal experiences of US local governments (New York, Philadelphia, Detroit, Orange County, Puerto Rico), utilities (WPPSS) and states (Illinois), and the international experience from such countries as Greece, Brazil, and Argentina. The costs of such crises for citizens, pensioners, and bond holders can be significant. We seek to understand the underlying economic, political, and legal/regulatory causes of such events so that they may be prevented in the future. The importance of private information and public regulation for disciplining the fiscal performance of democratically elected governments will be a central concern. We believe strongly that diagnosing and treating the ""disease"" of fiscal mismanagement is an interdisciplinary endeavor drawing on finance, economics, political science, and the law. Students with backgrounds in any of these disciplines are welcome." -OIDD-291,"This course examines the art and science of negotiation, with additional emphasis on conflict resolution. Students will engage in a number of simulated negotiations ranging from simple one-issue transactions to multi-party joint ventures. Through these exercises and associated readings, students explore the basic theoretical models of bargaining and have an opportunity to test and improve their negotiation skills." -NGG-521,"The course is geared to advanced undergraduate and graduate students interested in understanding the basics of implantable neuro-devices, their design, practical implementation, approval, and use. Reading will cover the basics of neuro signals, recording, analysis, classification, modulation, and fundamental principles of Brain-Machine Interfaces. The course will be based upon twice weekly lectures and ""hands-on"" weekly assignments that teach basic signal recording, feature extraction, classification and practical implementation in clinical systems. Assignments will build incrementally toward constructing a complete, functional BMI system. Fundamental concepts in neurosignals, hardware and software will be reinforced by practical examples and in-depth study. Guest lecturers and demonstrations will supplement regular lectures." -FNCE-256,"The objective of this course is to provide students with detailed knowledge of corporate structures, valuation methods, project finance, risk management practices, corporate governance issues, and geo-political risks in the energy industry. In general, this course seeks to provide students with an overall context for understanding energy issues and risks, and how these might affect financing and investment decisions for both providers of energy and end-users of energy." -BE-608,"To provide an in-depth view of the process by which scientific discoveries are commercialized. This course covers discovery in the laboratory, technology transfer, regulatory, financial, and managerial issues involved in moving a technology from the lab into the market place. The course contents fall into three broad categories: (1) examples of scientific discoveries that are candidates for commercialization, (2) fundamental elements of technology transfer, such as intellectual property protection and licensing, and (3) aspects of commercialization, such as regulatory approval, financing, and startup formation. In using this structure, the course provides parallel coverage of both the science and the commercialization process, in such a way that the elements of one contribute to the student's experience in learning the other. Prerequisite: Undergraduates and graduate students in other departments are welcome. Please contact mmaxwell@upenn.edu to request permission to request permission to register." -PHYS-314,"This course covers the fundamentals of atmosphere and ocean dynamics, and aims to put these in the context of climate change in the 21st century. Large-scale atmospheric and oceanic circulation, the global energy balance, and the global energy balance, and the global hydrological cycle. We will introduce concepts of fluid dynamics and we will apply these to the vertical and horizontal motions in the atmosphere and ocean. Concepts covered include: hydrostatic law, buoyancy and convection, basic equations of fluid motions, Hadley and Ferrel cells in the atmosphere, thermohaline circulation, Sverdrup ocean flow, modes of climate variability (El-Nino, North Atlantic Oscillation, Southern Annular Mode). The course will incorporate student led discussions based on readings of the 2007 Intergovernmental Panel on Climate Change (IPCC) report and recent literature on climate change. Aimed at undergraduate or graduate students who have no prior knowledge of meteorology or oceanography or training in fluid mechanics. Previous background in calculus and/or introductory physics is helpful. This is a general course which spans many subdisciplines (fluid mechanics, atmospheric science, oceanography, hydrology)." -ENGL-092,"This course is an introduction to the analysis of film as both a textual practice and a cultural practice. We will examine a variety of films--from Fritz Lang's M (1931) to Julia Dash's DAUGHTERS OF THE DUST (1991)--in order to demonstrate the tools and skills of ""close reading."" We will concentrate on those specifically filmic features of the movies, such as mise-en-scene, cinematography, editing and sound strategies, as well as those larger organizational forms, such as narrative and non-narrative structures and movie genres. Because our responses to the movies always extend beyond the film frame, we will additionally look closely at the complex business of film distribution, promotion, and exhibition to show how the less visible machinery of the movie business also shapes our understanding and enjoyment of particular films. Along the way, we will discuss some of the most influential and productive critical schools of thought informing film analysis today, including realism, auteurism, feminism, postmodernism, and others. Screenings are mandatory. See the English Department's website at www.english.upenn.edu for a description of the current offerings." -PSCI-010,"Freshmen seminars are small, substantive courses taught by members of the faculty and open only to freshmen. These seminars offer an excellent opportunity to explore areas not represented in high school curricula and to establish relationships with faculty members around areas of mutual interest. See www.college.upenn.edu/admissions/freshmen.php" -DADE-921,"Participants in this workshop will assess their beliefs and awareness around cross cultural communication and diversity and inclusiveness. Through lecture, group participation, skill practice, role-play, case studies, and coaching theylearn to advance their skill levels and take communication to the next level. Participants will also be recorded at the opening and conclusion of the to assess skill development." -NURS-652,"This course focuses on the management of financial resources in the healthcare industry particularly in inpatient and ambulatory care settings. Specific emphasis is on applied accounting, budgeting, capital planning, nursing staffing/scheduling and variance analysis. Additionally, students will apply concepts in developing a business/program plan including completion of an environmental scan, cost-benefit analysis and marketing plan. Students will engage in strategic planning, stakeholder analysis and benchmarking efforts." -CRIM-300,"This course explores constitutional criminal procedure or the law of the Fourth, Fifth, and Sixth Amendments to the United States Constitution. Topics included the laws and rules associated with search and seizure, arrest, interrogation, the exclusionary rule, and deprivation of counsel. Social science evidence that supports or raises questions about legal doctrine will be examined. No prerequisites are required." -NURS-357,"Innovation, defined as a hypothesis-driven, testable, and disciplined strategy, is important to improve health & healthcare. Employing new ways of thinking, such as with design thinking, will help open up possibilities of ways to improve health & the process of healthcare. Incorporating current & emerging social & digital technologies such as mobile apps, wearables, remote sensing, and 3D printing, affords new opportunities for innovation. This course provides foundational content & a disciplined approach to innovation as it applies to health & healthcare. A flipped classroom approach has the in-class component focusing on group learning through design thinking activities. The course is open to undergraduate nursing students as a case study & upper-level undergraduates and graduate students from across the Penn campus. The course provides a theoretical foundation in design thinking & may provide an overview of innovation technology & digital strategies as well as social & process change strategies. To enhance the didactic component, students will actively participate in a design case study. Students will be matched by interest and skill level with teams & will work with community-based organizations, healthcare providers and/or innovation partners. Student teams will meet their partners to identify & refine a health or healthcare problem to tackle. Students will work throughout the semester to create an innovative solution that will be pitched to their community-based organization, healthcare provider, and/or innovation partner at the end of the semester. Prerequiste: Completion of freshman & sophomore level courses" -CAMB-695,"This 7-week course is designed to introduce students to basic scientific writing skills and is based upon the premise that clear writing, giving feedback, and receiving feedback are all essential tools for professional development. While this is not strictly a prelim preparatory course, applying the principles of this course will help improve your prelim writing and your scientific writing in general. Structure: An initial introductory lecture for the entire class is followed by 6 weekly small group sessions. These sessions are conducted as workshops designed to enhance student and faculty engagement to improve scientific writing. During the course, participants review the princinples of clear, persuasive writing, and apply these principles to writing for a scientific audience. Particular emphasis is placed on conveying the significance of your research, outlining your aims, and discussing your results. Classes are highly interactive, and the majority of class time will be spent discussing student scientific writing. In order to focus on the techniques of scientific writing, in-class discussion and critiques will not address scientific methodology or interpretations of results." -ACCT-921,"This is an empirical literature survey course covering topics that include corporate disclosure, cost of capital, incentives, compensation, governance, financial intermediation, financial reporting, tax, agency theory, cost accounting, capital structure, international financial reporting, analysts, and market efficiency." -NURS-587,"Grounded in a social justice perspective, this course aims to provide the student with a foundational overview of the field of community health and leadership skills in public health advocacy. The course encourages critical thinking about health outcomes framed by the broad context of the political and social environment. This course analyzes the range of roles and functions carried out by leaders in healthcare advocacy for marginalized communities; integrates knowledge of health policy and the key influence of government and financing on health outcomes; explores community-based participatory research and interventions as tools for change; and discusses ways to develop respectful partnerships with community organizations. An assets-based approach that draws upon the strengths of communities and their leaders provides a foundation for community-engagement skill building. The course emphasizes the development of skills and techniques to lead effective, collaborative, health-focused interventions for disenfranchised groups, including residents of urban neighborhoods. Prerequisite: Undergraduates with permission of the instructor" -NURS-731,This course focuses on the care of high-risk neonates within the context of the family unit. The biological and psychosocial aspects are studied as a basis for nursing practice. Emphasis is placed on the role of the Advanced Practice nurse in improving services to high-risk neonates with the purpose of decreasing mortality and morbidity rates and improving the quality of life of high-risk newborns and infants. -DYNM-723,"Participants learn to be coaches by being coaches to one another. Over a two-month period, cohort members expand their repertoire of skills and tools, share their experiences, and together scrutinize the client/coach relationship." -PHIL-525,"For the last four centuries, scientific research has provided our most reliable understanding of the world. Although the scientific revolution started modestly with attempts to understand stellar movement, we now know the age and constitution of the universe, the basis of heredity, and we can make and break chemical bonds at will. By all appearances, science seems to have made substantial progress from the scientific revolution to the global scientific enterprise of the 21st centry. This course is about how science has generated this knowledge, and whether it has been as progressive and reliable as it seems. We will consider methodological issues such as the sources of scientific knowledge, objectivity, the growing importance of computation in the natural sciences, and the nature of modeling. We will examine products of scientific research: explanations, models, theories, and laws of nature. And we will discuss questions about science and values, including whether non-scientific values can and should enter scientific research, the relationship between science and religion, and the role of the public in guiding the scientific enterprise." -FNCE-725,"This course covers fixed income securities (including fixed income derivatives) and provides an introduction to the markets in which they are traded, as well as to the tools that are used to value these securities and to assess and manage their risk. Quantitative models play a key role in the valuation and risk management of these securities. As a result, although every effort will be made to introduce the various pricing models and techniques as intuitively as possible and the technical requirements are limited to basic calculus and statistics, the class is by its nature quantitative and will require a steady amount of work. In addition, some computer proficiency will be required for the assignments, although familiarity with a spreadsheet program (such as Microsoft Excel) will suffice." -LAW-967, -MKTG-211,"This course is concerned with how and why people behave as consumers. Its goals are to: (1) provide conceptual understanding of consumer behavior, (2) provide experience in the application of buyer behavior concepts to marketing management decisions and social policy decision-making; and (3) to develop analytical capability in using behavioral research." -MEAM-529,"Introduction to MEMS and NEMS technologies: MEMS/NEMS applications and key commercial success stories (accelerometers, gyroscopes, digital light projectors, resonators). Review of micromachining techniques and MEMS/NEMS fabrication approaches. Actuation methods in MEMS and NEMS, MEMS/NEMS design and modeling. Examples of MEMS/NEMS components from industry and academia. Case studies: MEMS inertial sensors, microscale mirrors, micro and nano resonators, micro and nano switches, MEMS/NEMS chem/bio sensors, MEMS gyroscopes, MEMS microphones." -PSYC-435, -MGMT-729,"Announcing the first iPhone at Macworld 2007, Apple CEO Steve Jobs famously boasted: ""And boy, have we patented it!"" How, and to what extent, do patents and intellectual property really provide competitive advantage for innovative technology companies? What makes an IP asset strategically powerful? How do patents impact, and even drive, major corporate decisions including M&A, venture funding and exits, and entry into new markets? In this course, students will learn to critically analyze and answer these questions, gaining insights they can leverage in their future roles as innovation industry executives, entrepreneurs, strategist and investors. The course includes three major units. In Unit 1, Patents and Innovation Value, we examine closely the relationship between competitive advantage, value proposition, and intellectual property (particularly patents). We will apply our understanding of that relationship to critique and sharpen patent strategy to protect examples of cutting-edge technologies. In Unit 2, Patent Leverage and the Corporate Playbook, we study theory and examples of how intellectual property leverage strategically informs corporate transactions and decisions, for established companies as well as for start-ups. In unit 3, Limits and Alternatives to Patents, we confront the recent legal trend toward reigning in the power and scope of patents. We also consider the growing importance of data as a proprietary technology asset, and discuss options for adapting intellectual property strategy appropriately. Throughout, students will learn and practice applying the concepts we learn to decision-making in examples based on innovative real-world technologies and businesses." -AFST-285,"The objectives are to continue to strengthen students' knowledge of speaking, listening, reading, and writing Swahili and to compare it with the language of the students; to continue learning about the cultures of East Africa and to continue making comparisons with the culture(s) of the students; to continue to consider the relationship between that knowledge and the knowledge of other disciplines; and using that knowledge, to continue to unite students with communities outside of class. Level 3 on the ILR (Interagency Language Roundtable) scale." -SOCI-159,"Since the collapse of communism in 1989 in Eastern Europe (and 1991 in the Soviet Union), many of the countries in the region have experienced public health crises and demographic catastrophe. Below replacement fertility rates and massive out migration have decimated the populations of these countries even as populations age and place unsustainable strains on pension systems and medical services. The demographic collapse has also been accompanied by falling male life expectancy and the rise of alcoholism, depression, domestic violence, and suicide. The economic exigencies of the transition from communism to capitalism dismantled welfare states at the exact moment when health services were most needed, leaving charities and nongovernmental organization to try to fill in the gaps. Through a combination of readings from the fields of epidemiology, demography, and medical anthropology, this course examines the public health implications of poverty and social dislocation in post-communist states. All readings and assignments are in English." -FREN-229,"Where adn how is French spoken in the world? Which variety (or varieties) of French represents ""good"" or standard language use? What does it mean to have an accent or to experience linguistic insecurity? To what extent have political forces and movements historically affected the evolution of French? How do language attitudes differ among French- and English-speaking regions of the world and what is the status of French in an era of globalization? In what ways does language shape our identities? Le Francais dans le monde/French in the World examines these questions by providing a survey of the sociolinguistics of the French language in the contemporary world. We will explore how societal changges influence the manner and the contexts in which the French language is spoken. Case studies focus on various parts of the Francophone world, including Europe (Belgium, Switzerland), New World (Quebec, Caribbean, Louisiana), Africa (North Africa, Sub-Saharan Africa), etc. Readings and class discussions are in French. Prerequisite: Two 200-level French courses taken at Penn or equivalent." -COML-247,"""A spectre is haunting Europe--the spectre of Communism"": This, the famous opening line of The Communist Manifesto, will guide this course's exploration of the history, legacy, and potential future of Karl Marx's most important texts and ideas, even long after Communism has been pronounced dead. Contextualizing Marx within a tradition of radical thought regarding politics, religion, and sexuality, we will focus on the philosophical, political, and cultural origins and implications of his ideas. Our work will center on the question of how his writings seek to counter or exploit various tendencies of the time; how they align with the work of Nietzsche, Freud, and other radical thinkers to follow; and how they might continue to haunt us today. We will begin by discussing key works by Marx himself, examining ways in which he is both influenced by and appeals to many of the same fantasies, desires, and anxieties encoded in the literature, arts and intellectual currents of the time. In examining his legacy, we will focus on elaborations or challenges to his ideas, particularly within cultural criticism, postwar protest movements, and the cultural politics of the Cold War. In conclusion, we will turn to the question of Marxism or Post-Marxism today, asking what promise Marx's ideas might still hold in a world vastly different from his own. All readings and lectures in English." -TURK-122,"Similar to TURK 212, Advanced Turkish Culture & Media I, in this course students also will have exposure to social Turkish clubs and to establish their own. They will arrange their Turkish tea parties and learn about Turkish cuisine. Expose Turkish daily news and media will be discussed in class. Students will have chance to interview interview Turkish businessman, writer, journalists in class and/or skype or zoom people in Turkish. Team spirit or ethics with those of the United States. Students will present and prepare a drama. Mainly students will create and decide their activities and discussions. and the instructor will just monitor them most of time. They will continue watching Turkish movies and expose to Turkish culture through these films. After each movie discussions and essay writings will be expected." -BEPP-620,"Behavioral economics has revealed a variety of systematic ways in which people deviate from being perfectly selfish, rational, optimizing agents. These findings have important implications for government policy and firm behavior. This course will explore these implications by answering two main questions: (1) what does behavioral economics imply for when and how the government should intervene in markets? (2) What does behavioral economics imply for firms' pricing and production decisions? The course will present the standard economic approaches to answering these questions and then explore how answers change when we consider that people act in behavioral ways. Towards the end of the course, we will investigate specific policy questions, allowing us to debate solutions while hearing from policy makers operating in a world of behavioral agents." -NURS-648,"This course will build on concepts presented in the Diagnosis and Management of Adults across the Lifespan (NURS 646) course. The focus is on refining health assessment skills, interpreting findings, developing and implementing appropriate plans of care to meet common health maintenance needs of adults and to promote the health of adults with more complex health problems with an emphasis on the frail adult. The student will gain increased expertise in communication skills, health assessment skills, interpreting findings, epidemiological concepts and developing and implementing plans of care. The emphasis will be placed upon managing an aging population with complex, chronic healthcare needs and promoting healthy behaviors across the lifespan." -COML-570,"Topic for Fall 2017: ""Object Theory"". This seminar will investigate the rise of and ongoing scholarly concern with ""objects"" and ""things,"" which has emerged from fields such as anthropology and art history as a category of renewed interest for literary scholars, too. We will investigate key contributions to theories of the object by thinkers such as: Mauss, Barthes, Heidegger, Latour, Benjamin, Bill Brown, Jane Bennett, among others. Literary readings will accompany these theoretical texts." -LAW-597, -GRMN-504, -HSOC-420,"This course is designed to provide HSOC students with the tools necessary to undertake original research, guiding them through the research and writing process. Students will produce either a polished proposal for a senior thesis project, or, if there is room inthe course, a completed research paper by the end of term. Students work individually, in small groups and under the close supervision of a faculty member to establish feasible research topics, develop effective research and writing strategies, analyze primary and secondary sources, and provide critiques of classmates'drafts. Students must apply for this couse by December 1." -LING-610,Selected topics either in Indo-European comparative linguistics or in historical and comparative method. -SWRK-760,"This course familiarizes students with mental health and mental disorders within the context of the life cycle, viewed from a biopsychosocial perspective. Prevalent categories of psychiatric disorders are considered with respect to their differentiating charateristics, explanatory theories, and relevance for social work practice, according to the DSM and other diagnostic tools. The course includes biological information and addresses the impact of race, ethnicity, social class, age, gender, and other sociocultural variables on diagnostic processes." -CBE-535,"This course provides an overview of fundamental concepts in colloid and interface science. Topics include the thermodynamics of interfaces, interfacial interactions (e.g. van der Waal's interactions, electrostatics, steric interactions), adsorption, the hydrodynamics and stability of interfacial systems, self assembly, etc. Connections to self-assembly and directed assembly of nanomaterials and emerging topics are explored. Pre-requisites: undergraduate thermodynamics, some familiarity with concepts of transport phenomena (including fluid flow and mass transfer) and differential equations" -EAS-502,"The objective is to introduce students to the major aspects of renewable energy, with its foundations in technology, association to economics, and impacts on ecology and society. This introduction is intended both for general education and awareness and for preparation for careers related to this field. The course spans from basic principles to applications. A review of solar, wind, biomass, hydroelectric, geothermal energy, and prospects for future energy systems such as renewable power generation in space." -OIDD-245,"Students who take this course will engage with the world of data science using tools such as Tableau and R that are becoming increasingly popular in industry. The first half of the course is designed for students with limited experience with data projects, and while familiarity with R, via courses such as STAT 405 or STAT 470, will be ideal preparation, students with other programming exposure can pick up the required skills via review sessions and self-instruction. The second half of the course extends students' experience to industry applications of text mining and machine learning and requires students to work with more unstructured data. Each week of the course will be devoted to analysis of a data set from a particular industry (e.g. HR, sports, fashion, real estate, music, education, politics, restaurants, non-profit work), which we will use to answer business questions by applying analytic techniques. The course is very hands-on, and students will be expected to become proficient at applying data to business decisions and at effectively analyzing large data sets to inform decisions about business problems." -LAW-974, -MUSC-135,"This course surveys American musical life from the colonial period to the present. Beginning with the music of Native Americans, the European legacy, and the African Diaspora, the course treats the singular social and political milieu that forged the profile of America's musical landscape. Attention will be given to the establishment of the culture industry and to various activities such as sacred music, parlor music, concert and theater music, the cultivation of oral traditions, the appearance of jazz, the trajectory of western art music in the United States, and the eventual global dominance of American popular music. Music 070 prerequisite. Preference given to music Majors and Minors. Fulfills the Cultural Diversity in the U.S. College Requirement." -MKTG-239,"As consumers, we are constantly exposed to advertisements and experience visual messages from product packages in stores, retail displays, and products already owned. In essence, visual marketing collateral is omnipresent and is an essential part of corporate visual identity, strategy, branding, and communication. Some of this falls to creative graphic design, but advertising, design, and marketing can also be significantly enhanced by knowledge of how visual information and its presentation context can be optimized to deliver desirable and advantageous messages and experiences. This course will emphasize how to measure, interpret, and optimize visual marketing. This course will use lectures, discussions, exercises and a group project, to help students understand the underlying processes that influence our visual perception and visual cognition. Students will learn about the theoretical processes and models that influence, attention and visual fluency. Students will also be exposed to eye-tracking instruments that help measure eye movement. Finally, we will explore how visual stimuli can influence consumer memory, persuasion, and choice. We will examine practical applications in marketing, advertising, packaging, retail, and design contexts." -CIS-195,"This project-oriented course is centered around application development on current mobile platforms like iOS and Android. The first half of the course will involve fundamentals of mobile app development, where students learn about mobile app lifecycles, event-based programming, efficient resource management, and how to interact with the range of sensors available on modern mobile devices. In the second half of the course, students work in teams to conceptualize and develop a significant mobile application. Creativity and originality are highly encouraged! Prerequisite: CIS 120 or previous programming experience." -LING-230,An introduction to phonetics and phonology. Topics include articulatory phonetics (the anatomy of the vocal tract; how speech sounds are produced); transcription (conventions for representing the sounds of the world's languages); classification (how speech sounds are classified and represented cognitively through distinctive features); phonology (the grammar of speech sounds in various languages: their patterning and interaction) and syllable structure and its role in phonology. Prerequisite: A prior course in linguistics or permission of instructor. -EDUC-668,"This seminar explores key foundational questions for graduate-level work: How is academic knowledge formed and reproduced? How do we engage with and interrogate the scholarly research? And, how do we participate in the academic conversation around a topic? The Master's Paper Seminar introduces students to academic discourse, disciplinary writing conventions, and research practices. As part of this course, students are guided through preparing a literature review of a topic of their choice. This review, in turn, forms the foundation of their 30-40 page Master's Paper that is required for the completion of the M.S.Ed degree." -EAS-897, -MKTG-806,"RETAIL MERCHANDISING; This course introduces the role of merchandising at various retailers with an emphasis on apparel and soft-line businesses. Selected topics will include product development, line planning, sourcing, product lifecycle, forecasting, buying, planning and vendor relations. Special emphasis will be placed on current trends in retail merchandising through current articles and industry guest speakers. The objective of this course is to familiarize students with merchandising theory and strategies considered to be current best practices in retailing." -OIDD-934,"The course goal is to provide a brief but fairly rigorous introduction to the formulation and solution of dynamic programs. Its focus is primarily methodological. We will cover discrete state space problems, over finite or infinite time horizon, with and without discounting. Structured policies and their theoretical foundation will be of particular interest. Computational methods and approximation methods will be addressed. Applications are presented throughout the course, such as inventory policies, production control, financial decisions, and scheduling." -EDUC-663,"The course provides an understanding of sociocultural concepts essential to the work of counselors and providers of psychological services. This course provides a contextual and applied understanding of working with socioculturally diverse clients. The purpose of this course is to expand one's understanding of the impact of sociocultural and contextual factors, social-psychological influences, the role of values, and the interaction of identities in counseling and psychological services. Both intervention and prevention strategies will be addressed. The student will be required to demonstrate a working knowledge of key concepts in sociocultural psychology and the topical areas addressed in the course." -AFRC-491,Continuation of AFST 490. Offered through Penn Language Center. Prerequisite: Permission of Penn Language Center. -GSWS-344,"Intellectual, emotional and behavioral development in the college years. Illustrative topics: developing intellectual and social competence; developing personal and career goals; managing interpersonal relationships; values and behavior. Recommended for submatriculation in Psychological Services Master's Degree program." -VLST-233,"Introduction to major artistic traditions of China and Japan and to the methodological practices of art history. Attention given to key cultural concepts and ways of looking, in such topics as: concepts of the afterlife and its representation; Buddhist arts and iconography; painting styles and subjects; and more broadly at the transmission of styles and cultural practices across East Asia. Serves as an introduction to upper level lecture courses in East Asian art history cultures. If size of class permits, certain sessions will be held in the University Museum or the Philadelphia Museum of Art." -JPAN-022,"PREREQUISITES Completion of JPAN 021, JPAN012 or the equivalent. This is equivalent to JPAN 111 and JPAN 112 in one semester, 2CU, and completes the College language requirement. Textbooks: Genki II (Lesson 13- Lesson 23) and Tobira: Gate way to Advanced Japanese (Unit 1-Unit 3) Kanji: Approximately 140 new Kanji will be introduced. Overall Kanji knowledge will be about approx. 400." -HIST-650,Reading and discussion course on selected topics in African history -CPLN-643,"This newly reconstituted course will introduce designers and planners to practical methods of design and development for major real estate product types. Topics will include product archetypes, site selection and obtaining entitlements, basic site planning, programming, and conceptual and basic design principles. Project types will include, among others; infill and suburban office parks, all retail forms, campus and institutional projects. Two-person teams of developers and architects will present and discuss actual development projects." -ACCT-706,"This course covers managerial accounting and cost management practices that can be strategically applied across the various functions of a business organization to improve organizational performance. The course emphasizes the methods available to measure and evaluate costs for decision-making and performance evaluation purposes. It reviews a number of cost management issues relating to the design and implementation of strategic, marketing, value analysis, and other management models in modern firms; and identifies major contemporary issues in managerial accounting and financial decision- making. A variety of case studies in different industries and decision contexts are used to examine the application of these concepts." -GRMN-514, -INTL-BSL, -LAW-920, -CPLN-632,This course explores the nature and use of raster-oriented (i.e. image-based) Geographic Information Systems (GIS) for the analysis and synthesis of spatial patterns and processes. Previous experience in GIS is not required. -CIT-594,"This course will focus on data structures, software design, and advanced Java. The course starts off with an introduction to data structures and basics of the analysis of algorithms. Important data structures covered will include arrays, lists, stacks, queues, trees, hash maps, and graphs. The course will also focus on software design and advanced Java topics such as software architectures, design patterns, networking, multithreading, and graphics. We will use Java for the entire course." -COML-101,"The purpose of the course is to introduce you to the subjects of the discipline of Folklore, their occurrence in social life and the scholarly analysis of their use in culture. As a discipline folklore explores the manifestations of expressive forms in both traditional and moderns societies, in small-scale groups where people interact with each face-to-face, and in large-scale, often industrial societies, in which the themes, symbols, and forms that permeate traditional life, occupy new positions, or occur in different occasions in everyday life. For some of you the distinction between low and high culture, or artistic and popular art will be helpful in placing folklore forms in modern societies. For others, these distinction will not be helpful. In traditional societies, and within social groups that define themselves ethnically, professionally, or culturally, within modern heterogeneous societies, and traditional societies in the Americas, Africa, Asia, Europe and Australia, folklore plays a more prominent role in society, than it appears to plan in literati cultures on the same continents. Consequently the study of folklore and the analysis of its forms are appropriate in traditional as well as modern societies and any society that is in a transitional phase." -BE-498,Second semester of a year-long project. -ARCH-712,A seminar on advanced topics in architectural design and theory. Topics and instructors will vary. -PSCI-258,"What exactly should be considered a fundamental ""human right""? What is the basis for something is a fundamental human right? This course will examine not only broad conceptual debates, but will also focus on specific issue areas (e.g., civil rights, economic rights, women's rights), as well as the question of how new rights norms emerge in international relations." -BMIN-520, -GEOL-643,"The evaluation of technical, social and economic constraints on the design of water supply and sanitation projects. The focus on sustainable design emphasizes how technical solutions fit within the appropriate social context. Case studies are used to demonstrate these principles across a range of examples from developed and developing countries including detailed studies from rural communities with limited resources." -DADE-924,"There are numerous conditions that affect the oral and maxillofacial region, including oral mucosal diseases, temporomandibular joint disorders, orofacial pain syndromes and salivary gland dysfunction. Patients presenting with these disorders can be challenging to diagnose and manage. Several techniques are available for evaluation of these conditions and will guide the clinician toward proper diagnosis. Management protocols vary based upon the specific affecting the oral and maxillofacial region. This course will highlight the etiology, clinical presentation, diagnostic techniques, and management protocols of several conditions, including oral mucosal diseases, temporomandibular joint disorders, orofacial pain syndromes, and salivary gland disorders." -LING-151,"This course describes current theorizing on how the human mind achieves high-level cognitive processes such as using language, thinking, and reasoning. The course discusses issues such as whether the language ability is unique to humans, whether there is a critical period to the acquisition of a language, the nature of conceptual knowledge, how people perform deductive reasoning and induction, and how linguistic and conceptual knowledge interact." -KORN-132,"This course is a continuation of KORN 131 and aims to further develop students' linguistic and cultural competence by building on materials covered in KORN 131. In addition to gaining a deeper understanding of Korean culture, the course focuses on enhancing linguistic accuracy and fluency in both spoken and written Korean. Particular emphasis will be placed on building a meaningful Korean-speaking community, as well as consolidation of grammar structures, and expansion and enhancement of vocabulary. Topics include preparing for a trip to Korea, finding housing, college culture in Korea, entertainment and participating in various social events. Upon completion of this course, students will be able to express themselves more accurately and participate in Korea-related communities more meaningfully. This course completes the College language requirement." -ANTH-307,This course examines the social and political lives of contemporary Native American Indians in the United States and Canada. Topics include: Indigenous identity; homelands and natural resources; popular culture and media; Indigenous arts and cultural expression; museum representations; athletics; gender relations; tribal recognition and sovereignty; and resistance movements. We will consider the origins of federal programs and legislation that have become essential to the protection of Native American freedoms. Students can expect to gain an appreciation of the complexity and cultural diversity of Native communities and tribal nations and insights into their interactions with other cultures over time. -CIS-399,Visit the CIS department website for descriptions of available Special Topics classes. -RELS-144,"""Jesus and Muhammad walk into a bar..."" We can think about multiple ways to complete the joke. They could talk about prophecy and prophetic succession, God's word, women, pagans and Jews, state authority, among others. This course traces the long arc of religious history, from the Jesus movement to the rise of Islam. Through texts, objects, buildings, and artistic representations we will study the time period that connects these two significant developments that majorly changed world history. Lectures and discussions will consist of close reading, analysis, and discussion of primary sources, analysis of non-literary media, and engagement with modern scholarship. We will raise questions about ancient and modern perspectives on religious practice, representation, authority, gender, race/ethnicity, memory, and interreligious encounters." -MUSC-236,"Participation in the course is contingent upon a successful audition. This course must be taken for a letter grade (pass/fail option may not be utilized for this course). This weekly seminar will explore music from the past and present through class discussions of performance, historical context, and analytical aspects of the music led by a professor and/or performer. One example of a class in this number will be an indepth study of chamber music repertoire led by the Daedalus Quartet. Students will prepare for a final performance at the end of the semester as well as a paper/presentation. Students interested in this applied approach to music may also wish to take 256 and/or 276. Prerequisite: Students must successfuly audition to be in the course; previous private study in an instrument is required. Basic fluency in rudiments of music theory is also required." -HSOC-251,"Many factors have shaped, and continue to shape, population health and public health policy. This course will explore the concept, mission, and core functions of public health. Students will have a chance to learn about its key methodological (epidemiology, biostatistics) and content (environmental health, social and behavioral sciences, health policy) areas. In addition, we will focus on topics of particular relevance to the current health of the public; topics likely will include the basics of life (food, water, and shelter) and topics of current interest (e.g., motor vehicle crashes, mental health, violence)." -EDUC-545, -CAMB-534,"An advanced seminar course emphasizing genetic research in model organisms and how it informs modern medicine. Each week a student will present background on a specific human disease. This is followed by an intense discussion by the entire class of 2 recent papers in which model organisms have been used to address the disease mechanism and/or treatment. As a final assignment, students will have the opportunity to write, edit, and publish a ""News & Views"" style article in the journal ""Disease Models and Mechanisms"". Offered spring semester. Prerequisite: If course requirements not met, permission of instructor required." -MATH-730,Topics from the literature. The specific subjects will vary from year to year. -CAMB-706,"This is a year-long course for the incoming CAMB-MVP students and others wishing to gain a broad overview of pathogens and their interactions with hosts. The course will provide students with key fundamental knowledge of Microbiology, Virology and Parasitology. The course starts with introductory lectures on Concepts of Host-Pathogen interactions. The rest of the course is divided into sections on Bacteriology, Virology and Parasitology. Each week there are three 1 hour class slots that are either lectures on a specific topic or discussions of a relevant paper presented by students. Classes are led by faculty from across the campus and are highly interactive. Evaluation is based on mid and final take home essay topics for each of the three sections. Regular attendance and active participation in the discussions is also part of the evaluation." -NURS-513,"This course will examine obesity from scientific, cultural, psychological, and economic perspectives. The complex matrix of factors that contribute to obesity and established treatment options will be explored. Prerequisite: Undergraduate by permission of instructor This course satisfies the Society & Social Structures Sector for Nursing Class of 2012 and Beyond." -LALS-158,"This survey course considers Latin American musics within a broad cultural and historical framework. Latin American musical practices are explored by illustrating the many ways that aesthetics, ritual, communication, religion, and social structure are embodied in and contested through performance. These initial inquiries open onto an investigation of a range of theoretical concepts that become particularly pertinent in Latin American contexts--concepts such as post-colonialism, migration, ethnicity, and globalization. Throughout the course, we will listen to many different styles and repertories of music and then work to understand them not only in relation to the readings that frame our discussions but also in relation to our own, North American contexts of music consumption and production. (Formerly Music 158)." -SWRK-714,"The focus of learning in this semester is theories and skills related to clinical practice with individuals and groups, differential intervention, and the broadening of the professional role and repertoire. The course content and assignments are closely linked with the students' learning objectives and experiences in the field. Students extend and refine their practice knowledge and skills and learn to intervene with cognitive, behavioral, and narrative modalities. This semester focuses also on work with complex trauma across systems and populations. Students consolidate their identification as professionals and learn to constructively use the environment to effect systems changes." -CIS-197,"This course provides an introduction to modern web development frameworks, techniques, and practices used to deliver robust client side applications on the web. The emphasis will be on developing JavaScript programs that run in the browser. Topics covered include the JavaScript language, web browser internals, the Document Object Model (DOM), HTML5, client-side app architecture and compile-to-JS languages like (Coffeescript, TypeScript, etc.). This course is most useful for students who have some programming and web development experience and want to develop moderate JavaScript skills to be able to build complex, interactive applications in the browser." -EALC-622,"Continuation of CHIN491 EALC221/621, which is the only prerequisite for this course. Upon completion of Shadick, readings in a wide selection of texts with Chinese commentaries may be taken up. These readings are in part chosen to reflect student interest. This is the second half of a year-long course. Those who enroll must take both semesters." -VISR-699,"This course enables student to undertake a self-directed study on a topic in Veterinary Medicine, under the supervision of a faculty member. Students are required to submit an Independent Study & Research (ISR) application to the Registrar Manager in the Office for Students. Credit may vary." -BE-101,Introduction to Bioengineering II. Continuation of the freshman introductory bioengineering course. This course introduces students to the design process and emphasizes its role in engineering. -LGST-611,"This course uses the global business context to introduce students to important legal, ethical and cultural challenges they will face as business leaders. Cases and materials will address how business leaders, constrained by law and motivated to act responsibly in a global context, should analyze relevant variables to make wise decisions. Topics will include an introduction to the basic theoretical frameworks used in the analysis of ethical issues, such as right-based, consequentialist-based, and virtue-based reasoning, and conflicting interpretations of corporate responsibility. The course will include materials that introduce students to basic legal (common law vs. civil law) and normative (human rights) regimes at work in the global economy as well as sensitize them to the role of local cultural traditions in global business activity. Topics may also include such issues as comparative forms of corporate governance, bribery and corruption in global markets, human rights issues, diverse legal compliance systems, corporate responses to global poverty, global environmental responsibilities, and challenges arising when companies face conflicting ethical demands between home and local, host country mores. The pedagogy emphasizes globalized cases, exercises, and theoretical materials from the fields of legal studies, business ethics and social responsibility." -CLST-223,"Did you ever wonder what the world of the Iliad and Odyssey was really like? This illustrated lecture course surveys the prehistory and early history of the Greek world through texts and material remains, with the aim of bringing to life the society, economy, and politics of this ancient era. Among the topics are the rise and fall of the great Bronze Age civilizations of the Aegean area, the Minoans of Crete and the Mycenaeans of the Greek mainland; the cataclysmic volcanic eruption on the island of Thera (modern Santorini) and its long-term consequences; the Trojan War (myth or history??); the world of the Dark Age that followed the collapse of the Mycenaean palaces; and the Greek renaissance of the eighth century B.C. - including the adoption of the alphabet, the great colonizing movement, and the great Panhellenic sanctuaries like Olympia and Delphi - that laid the foundation for the Classical world to come. There are no prerequisites, and no prior knowledge of archaeology or the Greek world is assumed." -NPLD-750,"Businesses performing philanthropic activity often use their platform of CSR activities to engage with society, directly, via a corporate foundation, or through partnerships with nonprofit organizations. Although such philanthropic activities are not directly related to profit-making ventures, they may boost their reputation, be used in marketing their products, talent recruitment, increase employee engagement and commitment, and thus contribute to the profit indirectly. Many businesses undertake their CSR related philanthropic activities using strategic partnerships with nonprofits or public sector organizations to meet their goals. This provides opportunities to nonprofit and public sector leaders in achieving social and sustainable change." -REAL-240,"This course is designed for majors in Real Estate, but is also open to finance-oriented students who wish a deeper analysis of real estate investment and investment analysis issues than that offered in REAL 209. The class will contain a mixture of lectures, guest speakers and case discussions. Academic research is paired with recent industry analysis of key issues in order to marry sound theory and empirical results with current events and practices. Several classes will include lectures outlining what economics and finance tell us about a number of topics. Generally, these will be followed by guest lectures from industry professionals who will focus on a specific application of the principles introduced in the lectures." -SWRK-713,"This course builds upon the foundation of historical, psychological, sociological, economic, political, and personal knowledge about institutionalized forms of racism and discrimination developed in SWRK 603, American Racism and Social Work Practice. The course uses understanding elements of oppression to critically examine strategies for addressing racism and sexism in organizations and communitites through systematic assessment and planning for social change. The course examines change at three levels: organizations, communitites, and social movements." -PSCI-333,"Political polls are a central feature of elections and are ubiquitously employed to understand and explain voter intentions and public opinion. This course will examine political polling by focusing on four main areas of consideration. First, what is the role of political polls in a functioning democracy? This area will explore the theoretical justifications for polling as a representation of public opinion. Second, the course will explore the business and use of political polling, including media coverage of polls, use by politicians for political strategy and messaging, and the impact polls have on elections specifically and politics more broadly. The third area will focus on the nuts and bolts of election and political polls, specifically with regard to exploring traditional questions and scales used for political measurement; the construction and considerations of likely voter models; measurement of the horserace; and samples and modes used for election polls. The course will additionally cover a fourth area of special topics, which will include exit polling, prediction markets, polling aggregation, and other topics. It is not necessary for students to have any specialized mathematical or statistical background for this course." -PSYC-449,"Topics vary each semester. PSYC 449 (Gerstein) Neuroscience for Policymakers: This seminar will provide an overview of the neuroscience behind some of the most relevant issues in public health policy today. We will examine the primary scientific literature as well as delve into lay articles about the science and policy surrounding each issue. /PSYC 449 (Epstein) Consciousness: Consciousness is our subjective experience of the world, including both perceptions and felt internal states. In this seminar, we will explore the the burgeoning scientific literature on the neural basis of consciousness. We will focus in particular on three topics: What are the neuralsystems underlying visual awareness? What are the mechanisms that control the progression of conscious contents to create our stream of thought? What is the relationship between consciousness and behavior? /PSYC 449 (Jenkins) The Social Brain: This seminar examines the cognitive and neural mechanisms that enable humans to predict and understand people's behavior.We will be propelled throughout the course by fundamental questions about the human social brain. For example, why are humans so social? Does the human brain have specialized processes for social thought? Consideration of these questions will involve advanced treatment of a range of topics. Prerequisite: PSYC 449, 601 are LPS courses. PSYC 449, 301, 303 are Psych Department courses." -BMB-650,"Participation in the ""Dr. George W. Raiziss Biochemical Rounds"", a weekly seminar program sponsored by the Department of Biochemistry and Biophysics. Program deals with a wide range of modern biochemical and biophysical topics presented by established investigators selected from our faculty, and by leading scientists from other institutions. Prerequisite: Permission needed from Department" -AFRC-581,"James Baldwin, one of the greatest writers of the twentieth century, spoke to the issues of his times as well as to our own. This class will examine the intellectual legacy that Baldwin left to present-day writers such as Toni Morrison, Charles Johnson, Ta-Nehisi Coates, Thulani Davis, Caryl Phillips and others. We will spend time reading and discussing Baldwin's novels, short stories, plays and essays. In doing so, we will be considering the complex assumptions and negotiations that we make in our day-to-day lives around our identities and experiences built upon gender, sexual preference, the social-constructs called ""race,"" and more. James Baldwin's life and work will be the touchstone that grounds our discussions. We will read Go Tell It on the Mountain, Another Country, The Fire Next Time, and Giovanni's Room and see films I Am Not Your Negro, The Price of the Ticket and The Murder of Emmett Till. Students will research subjects of their own choosing about Baldwin's life and art. For example, they may focus on the shaping influences of Pentecostalism; segregation; racism; homophobia; exile in Paris; the Civil Rights Movement; Black Power, Baldwin's faith, or his return to America." -HCMG-863,"This course provides an overview of the management, economic and policy issues facing the pharmaceutical and biotechnology industries. The course perspective is global, but with emphasis on the U.S. as the largest and most profitable market. Critical issues we will examine include: R&D intensive cost structure with regulation and rapid technological change; strategic challenges of biotech startups; pricing and promotion in a complex global marketplace where customers include governments and insurers, as well as physicians and consumers; intense and evolving M&A, joint ventures, and complex alliances; government regulation of all aspects of business including market access, pricing, promotion, and manufacturing. We use Wharton and industry experts from various disciplines to address these issues." -NURS-757,"This course is the second of four residencies that provide the nurse anesthetist student the opportunity to attain competencies within the Certified Registered Nurse Anesthesia (CRNA) scope of practice. Throughout the residency, the nurse anesthesia resident will utilize appropriate clinical judgment to manage the complex medical, physical and psychosocial needs of clients in the perioperative phases. Further refinement of the patient assessment, anesthesia administration, and critical thinking skills is emphasized. Students progress by providing anesthesia care for patients throughout the continuum of health care services. The guidance of CRNA faculty preceptors contributes to the development of the independence of the CRNA student. Collaborative practice within a care team model is emphasized and the student assumes more overall responsibility for the quality of care for the patients throughout the perioperative experience, with clinical support as required. Prerequisite: Enrollment in NANS program, year 2" -EPID-625,"This course is an introduction to statistical methods that can be used to evaluate biomarker prognostic studies and multivariate prediction models. It is designed for advanced MS and PhD-level students in epidemiology and related fields (nursing, health policy, social work, demography). Topics will include biostatistical evaluation of biomarkers, predictive models based on various regression modeling strategies and classification trees, assessing the predictive ability of a model; internal and external validation of models; and updating prognostic models with new variables or for use in different populations. Students will learn about the statistical methods that are required by current reporting guidelines for biomarker prognostic studies or the reporting guidelines for multivariable prediction models. Prerequisite: Working knowledge of either Stata, SAS or R to fit regression, logistic regression and/or Cox regression models. Permission of course director for students outside of School of Medicine graduate programs." -NPLD-782,"Studying the behavior of groups and the actions/inactions of people within groups provides a doorway to deeper understanding of our selves, our families, our friends, our colleagues, our organizations, and our communitites. This half credit course is designed for Penn Graduate students eager to generate constructive group processes when chairing a committee, managing a work group, teaching in a classroom, serving on a jury, conducting a support/therapy group or facilitating strategy formulation. It is easy to see what is going well or poorly when observing what others are doing. But tuning into and gaining a comprehensive grasp of these processes when they are happening in the groups we belong to and learning how to take constructive actions in the here and now when it can have a meaningful impact requires a high level of cognitive capability combined with a special form of relational artistry. This course is an amalgam of experiential activities and energizing ways to internalize the rich concepts developed during many decades of applied-academic research. Participants are required to be fully present and fully engaged for the whole weekend, read the major book and a number of articles, pus write a paper. This course occupies a full weekend and runs from 6pm Fri. to 6pm Sun. This is a 48-hour intensive weekend; you go home to sleep. Participants applying for this course are required to take a Primer, which provides potential participants with a common conceptual base for engaging in the essential learning and lays out the intellectual foundations of the course. Permits will be issued soon after participants have taken the Primer. Contact the NPLD program for more information on primer and course dates." -PHIL-205,"This course will survey several central topics in philosophy of mind and language, as well as investigate how these areas of philosophy interact with the scientific study of the mind. Questions addressed may include: What is it to have a mind? What is consciousness? What is it to think, to perceive, to act, to communicate, to feel emotions? What is the relationship between the mind and the brain? Can there be a science of the mind? Of language? What can it tell us? What can philosophy contribute to cognitive science? We will look for more precise ways of asking these questions, and we will study some canonical answers to them." -MGMT-692,"This is a course the builds on the basic Negotiation course. In this course, we explore a wide range of negotiation topics from crisis and hostage negotiations, to the role of emotions including anxiety, envy and anger in negotiations, to backlash effects for women in negotiations, and the role of alcohol in negotiations. We will survey many aspects of current negotiation research, discuss historic negotiation cases, and students will participate in role-play exercises. Many of the role play exercises will involve multi-party negotiations and afford opportunities to hone skills in team-based negotiations." -DYNM-630,"According to newest research, over 70% of organizations worldwide have started digital transformation initiatives or at least plan for them. New digital tools are available almost daily - and many of them have the potential of a major business impact. They enable high performance practices and often even new business models. The resulting transformation of business processes leads to superior customer or supplier experience and organizations become more efficient, agile, meet compliance requirements, or improve the quality of products and services. They help achieving a level of process performance you would never have thought of before. Robotic Process Automation (RPA), Blockchain, Artificial Intelligence, the Internet of Things and Cloud-based software architectures with next generation automation approaches are some examples. However, many organizations underestimate the challenges of digital business transformation resulting in initiatives delivering little or no business value. A major financial organization, for example, stopped the use of over 1000 robots since that digital workforce had created severe issues: processes changed more frequently than expected - the robots didn't which led to numerous exception cases that had to be handled manually. The elimination of bottlenecks created more severe issues downstream. Value-driven Digital Business Transformation addresses those challenges. It proposes approaches, methods and tools that help to focus on the right sub-processes to transform and improve those areas considering the end-to-end business context as well as sustain the results through appropriate governance. The systematic use of digital technologies requires a hybrid workforce management aligning people, robots and other technologies through appropriate business processes management practices. A value-driven digital transformation prepares for this situation. The course discusses design and execution principles as well as related methods and tools to realize the full business value of digital business transformations, delivering results fast and at minimal risk. It combines newest case studies with current research findings to master business impacts of digitalization." -PERS-612,"This course is designed to help you build upon what you have learned in Elementary Persian I. Emphasis is placed on using the language for interpersonal, interpretive, and presentational modes of communication. Therefore use of English is restricted. Listening, speaking, reading, and writing-as well as culture, vocabulary, grammar, and pronunciation-are integrated into the course. Students must either have successfully completed PERS 611, or take the departmental exam." -EDCE-382,"The three PLN CDA courses build on the experiential knowledge of the early childhood practitioner (candidate). The courses connect child development theory and practice, health, safety, nutrition, and family community relations with the significant responsibilities of the candidate's ethical practice with children, families, and the community. The CDA six competency goals and thirteen functional areas are integrated into course content and discussions. Part II -Understanding and Supporting Children's Development : Modules 5-10 - To advance physical and intellectual competence" -NURS-708,"This course will explore the philosophy and growth of public policy that has directed the American Health Care System in its ever expanding movement toward universal health care for all citizens. Analysis of health policy and systems content will assist the students to identify the knowledge and skills needed for the health or human service provider to assume leadership roles in the formulation of public policy for change; this includes system restructuring, service delivery and funding of health care. Emphasis will be on the effect of policy on the individual/family user of health care services rather than the effect on professional health care providers or health care delivery systems. Special attention will be given to the effect of policy on populations, both urban and rural, living near and below the poverty level." -IMPA-606,"From Confucius to Kant; from Machiavellis The Prince to Loyolas The Spiritual Exercises to John Stuart Mills On Liberty; from ideological tenets of Marxist -Leninist communism to catechetical tenets of post-Vatican II Catholicism; from the 20th century prophet of pluralism Sir Isaiah Berlin to the ancient Hebrew prophet Isaiah; there is no shortage of ideas, beliefs and guiding principles to help leaders recognize and address the inevitable ethical questions related to public problem-solving. Through classic and contemporary readings and case studies, this course covers the basics of contemporary game theory as it relates to the empirics and ethics of negotiation. You are also exposed to several different philosophical, civic, ideological and religious traditions in moral reasoning as they relate to leadership ethics and effective public problem-solving. **For IMPA Students Only**" -MKTG-352,"MARKETING ANALYTICS: Companies are currently spending millions of dollars on data-gathering initiatives - but few are successfully capitalizing on all this data to generate revenue and increase profit. Moving from collecting data to analysis to profitable results requires the ability to forecast and develop a business rationale based on identified data patterns. Marketing Analytics will cover the three pillars of analytics - descriptive, predictive and prescriptive. Descriptive Analytics examines different types of data and how they can be visualized, ultimately helping you leverage your findings and strengthen your decision making. Predictive Analytics explores the potential uses of data once collected and interpreted. You will learn to utilize different tools, such as regression analysis, and estimate relationships among variables to predict future behavior. Prescriptive Analytics takes you through the final step - formulating concrete recommendations. These recommendations can be directed toward a variety of efforts including pricing and social-platform outreach." -MEAM-891,"Intended for graduate students conducting research. Building upon the fundamentals of mechanical design, this hands-on , project-based course provides participants with the knowledge and skills necessary to design, analyze, manufacture, and test fully functional subtractive manufacturing processes and part components. Topics covered include an introduction to machine elements, analysis of the mechanics of machining, manufacturing technology, precision fabrication (milling turning and computer-controlled machining), metrology, tolerances, cutting-tool fundamentals and engineering materials." -COML-096,"What makes men and women different? What is the nature of desire? This course introduces students to a long history of speculation about the meaning and nature of gender and sexuality -- a history fundamental to literary representation and the business of making meaning. We will consider theories from Aristophanes speech in Platos Symposium to recent feminist and queer theory. Authors treated might include: Plato, Shakespeare, J. S. Mill, Mary Wollstonecraft, Sigmund Freud, Virginia Woolf, Simone de Beauvoir, Adrienne Rich, Audre Lorde, Michel Foucault, Gayle Rubin, Catherine MacKinnon, Eve Kosofsky Sedgwick, Judith Butler, bell hooks, Leo Bersani, Gloria Anzaldua, David Halperin, Cherrie Moraga, Donna Haraway, Gayatri Spivak, Diana Fuss, Rosemary Hennesy, Chandra Tadpole Mohanty, and Susan Stryker. See the English Department's website at www.english.upenn.edu for a description of the current offerings." -PHYS-016,"The developed world's dependence on fossil fuels for energy production has extremely undesirable economic, environmental, and political consequences, and is likely to be mankind's greatest challenge in the 21st century. We describe the physical principles of energy, its production and consumption, and environmental consequences, including the greenhouse effect. We will examine a number of alternative modes of energy generation - fossil fuels, biomass, wind, solar, hydro, and nuclear - and study the physical and technological aspects of each, and their societal, environmental and economic impacts over the construction and operational lifetimes. No previous study of physics is assumed. Prerequisites: Algebra and Trigonometry. May be counted as Science Studies for students in Class of 2009 and prior. Target audience: Non-science majors (although science/engineering students are welcome)." -NELC-102,"This is the second half of the Near East sequence. This course surveys Islamic civilization from circa 600 (the rise of Islam) to the start of the modern era and concentrates on political, social, and cultural trends. Although the emphasis will be on Middle Eastern societies, we will occasionally consider developments in other parts of the world, such as sub-Saharan Africa, Central Asia, and Spain, where Islamic civilization was or has been influential. Our goal is to understand the shared features that have distinguished Islamic civilization as well as the varieties of experience that have endowed it with so much diversity." -LAW-631, -JWST-053,"Development of the skills of reading, writing, and conversing in modern Hebre on an intermediate level. Open to all students who have completed two semesters of Hebrew at Penn with a grade of B- or above and new students with equivalent competency." -VLST-261,"In this studio based course, students are introduced to video production and postproduction as well as to selected historical and theoretical texts addressing the medium of video. Students will be taught basic camera operation, sound recording and lighting, as well as basic video and sound editing and exporting using various screening and installation formats. In addition to a range of short assignment-based exercises, students will be expected to complete three short projects over the course of the semester. Critiques of these projects are crucial to the course as students are expected to speak at length about the formal, technical, critical and historical dimensions of their works. Weekly readings in philosophy, critical theory, artist statements and literature are assinged. The course will also include weekly screenings of films and videos, introducing students to the history of video art as well as to other contemporary practices." -MUSC-171,"Continuation of techniques established in Theory and Musicianship I. Explores chromatic harmony. Concepts will be developed through analysis and model composition. Musicianship component will include sight singing, clef reading, harmonic dictation and keyboard harmony. Prerequisite: Required of music majors." -COML-006,"Premodern India produced some of the world's greatest myths and stories: tales of gods, goddesses, heroes, princesses, kings and lovers that continue to capture the imaginations of millions of readers and hearers. In this course, we will look closely at some of these stories especially as found in Purana-s, great compendia composed in Sanskrit, including the chief stories of the central gods of Hinduism: Visnu, Siva, and the Goddess. We will also consider the relationship between these texts and the earlier myths of the Vedas and the Indian Epics, the diversity of the narrative and mythic materials within and across different texts, and the re-imagining of these stories in the modern world." -PSCI-181,"This course will provide an overview of major figures and themes of modern political thought. We will focus on themes and questions pertinent to political theory in the modern era, particularly focusing on the relationship of the individual to community, society, and state. Although the emergence of the individual as a central moral, political, and conceptual category arguably began in earlier eras, it is in the seventeenth century that it takes firm hold in defining the state, political institutions, moral thinking, and social relations. The centrality of ""the individual"" has created difficulties, even paradoxes, for community and social relations, and political theorists have struggled to reconicle those throughout the modern era. We will consider the political forms that emerged out of those struggles, as well as the changed and distinctly ""modern"" conceptualizations of political theory such as freedom, responsibilty, justice, rights and obligations, as central categories for organizing moral and political life." -ENGL-282,"This course examines the coming to pass of trap music from several perspectives: 1) that of its technological foundations and innovations (the Roland 808, Auto-tune, FL Studio (FruityLoops), etc.); 2) that of its masters/mastery (its transformation of stardom through the figures of the producer (Metro Boomin) and the rock star (Future)); 3) that of its interpretability and effects (what does the music say and do to us). We will thus engage with this music as a practice of art and form of techno-sociality that manifests uncanny and maximal attunement with the now." -BSTA-670,"This course concentrates on computational tools, which are useful for statistical research and for computationally intensive statistics. Through this course you will develop a knowledge base and skill set of a wide range of computational tools needed for statistical research. Topics include computer storage, architecture and arithmetic; random number generation; numerical optimization methods; spline smoothing and penalized likelihood; numerical integration; simulation design; Gibbs sampling; bootstrap methods; and the EM algorithm. Prerequisite: If course requirements not met, permission of instructor required." -LAW-795, -GSWS-165,"This course complicates prevailing understandings of the Caribbean and extends the boundaries of Asian America by exploring the histories, experiences, and contributions of Asians in the Caribbean. In particular, we will focus on the migrations of Chinese and Indian individuals to Cuba, Trinidad, and Guyana as well as how their descendants are immigrating to the United States. We will examine the legal and social debates surrounding their labor in the 19th century, how they participated in the decolonization of the region, and how their migration to the United States complicates our understandings of ethnicity and race. Ultimately, through our comparative race approach, we will appreciate that the Caribbean is more than the Black Caribbean, it is also the Asian Caribbean." -EDUC-552,"Drawing on work from the education, psychology, communication, and the growing field of games studies, we will examine the history of video games, research on game play and players, review how researchers from different disciplines have conceptualized and investigated learning in playing and designing games, and what we know about possible outcomes. We will also address issues of gender, race and violence that have been prominent in discussions about the impact of games." -PSCI-498,Consult department for detailed descriptions. Recent topics include: Globalization; Race & Criminal Justice; Democracy & Markets in Postcommunist Europe. -COML-555,"This seminar will examine contemporary affect theory and its relationship with Michel Foucault's theory of power. We will begin by mapping out Foucault's ""analytics of power,"" from his early work on power knowledge to his late work on embodiment, desire, and the care of the self. We will then turn to affect theory, an approach which centralizes the non-rational, emotive force of power. No previous knowledge of theory is required." -SAST-799,Optional directed study course for PhD students in the last semester of coursework to prepare for candidacy exam to directly follow the end of this semester. -FNCE-751,"The focus of this course is on buying (or acquiring controlling stakes in) firms. The main topics to be covered are mergers and friendly acquisitions, hostile takeovers and buyouts. Using case studies, the course surveys the drivers of success in the transactions. While issues regarding motive and strategy will be discussed, financial theory would be the main lens used to view these control acquiring transactions. This will allow students to (1) evaluate transactions through valuation approaches and (2) structure deals employing financial innovation as a response to legal framework and economic frictions. This course should be of interest to students interested in pursuing careers as private equity investors, advisors in investment banking and corporate managers that deal with these issues. This course assumes familiarity with valuation analysis. During the spring semester students are not permitted to take this course pass fail." -MUSC-275,"MUSC275 offers an introduction to electronic music/sound production with a focus on analogue systems and performance. Guest artists will join us for in-class visits and performances during the semester. Meetings will take place in the classroom, in concert spaces and in the studio. Preference given to Music majors and minors for registration." -GOMD-978,"This course will allow academic discussion of disease, disease processes and therapeutic management of a wide range of topics related to oral medicine." -RELS-257, -SWRK-768, -INTL-BTM, -FNCE-254,"This course explores Impact Investing, a discipline that seeks to generate social benefits as well as financial returns. From tiny beginnings, the Impact Investment space has expanded and now commands significant attention from policymakers, wealthy and public-spirited individuals, academia and, not least, the world's largest asset managers and philanthropic foundations. Evangelists believe it may be the key to freeing the world from poverty. Skeptics think it will remain confined to the boutique. Regardless, Impact Investing is becoming a distinct career specialization for finance professionals despite the diverse skillset each must have and the uncertainty of the new field's growth." -ARCH-728,"Personalization is quickly becoming the norm for mass production in a variety of consumer-centric industries. From retail to food, the idea of designing and making custom-made products tailored to fit one's lifestyle will be our exploration. Utilizing digital design innovations, we are able to incubate ideas, prototype, test and be entrepreneurial in design to create these individualized products. Cues from these industries will be used to shift both cultural and experiential product design from a regional discovery to a global focus. This course will embrace digital design and utilize its engagement with manufacturing solutions for a physical output. Through research and a series of design exercises, the approach will be built upon several strategies including adaptability, materiality, fabrication, modularity, and human-centric design. The final project will interpret the research and result in the creation of a design strategy for a mass customized product or system. This course will explore product design solutions through a combination of physical and digital design methods. Beginning with an examination of case studies, students will gain a sense of the breadth of product and interaction design practice as it applies to smart objects. Through a series of lectures and hands-on studio exercises, students will explore all aspects of smart object design including expressive behaviors (light, sound and movement), interaction systems, ergonomics, data networks and contexts of use. The course will culminate in a final project that considers all aspects of smart object design within the context of a larger theme." -MKTG-350,"CONSUMER NEUROSCIENCE: How can studying the brain improve our understanding of consumer behavior? While neuroscience made tremendous strides throughout the 20th century, rarely were meaningful applications developed outside of medicine. Recently, however, breakthroughs in measurement and computation have accelerated brain science and created a dizzying array of opportunities in business and technology. Currently, applications to marketing research and product development are experiencing explosive growth that has been met with both excitement and skepticism. This mini-course provides an overview of the neuroscience behind and the potential for these developments. Topics will range from well-known and widely used applications, such as eye-tracking measures in the lab and field, to emerging methods and measures, such as mobile technologies, face-reading algorithms, and neural predictors of marketing response. The course will also discuss applications in branding and product development, including wearable physiological devices and apps, sensory branding for foods and fragrances, pharmaceuticals and medical devices, and neuroscience-based products designed to enhance cognitive functions. These applications stem from many subfields of cognitive neuroscience, including attention, emotion, memory, and decision making. This course is self-contained and has no prerequisites. However, students with some background in business, economics, psychology, and/or neuroscience are likely to find the material covered in this course complementary to their existing knowledge." -LAW-966, -PSCI-217,"This course will present an in-depth examination of political, economic and social change in post-Soviet Russia within a historical context. After a brief discussion of contemporary problems in Russia, the first half of the course will delve into the rise of communism in 1917, the evolution of the Soviet regime, and the tensions between ideology and practice over the seventy years of communist rule up until 1985. The second part of the course will begin with an examination of the Gorbachev period and the competing interpretations of how the events between 1985 and 1991 may have contributed to the collapse of the Soviet Union. We will then proceed to make sense of the continuities and changes in politics, economics and society in contemporary Russia. Important topics will include the confrontations accompanying the adoption of a new constitution, the emergence of competing ideologies and parties, the struggle over economic privatization, the question of federalism and nationalism, social and political implicatons of economic reform, and prospects for Russia's future in the Putin and post-Putin era." -GCB-577,"Second year students in GCB, CAMB (G&E), or IGG programs using genomics methods to measure transcriptomics and epigenomics changes in their experimental systems. The goal is to familiarize students with the latest cutting-edge genomics tools and cover solutions to major experimental and computational challenges in the investigation of genome-wide epigenetic data sets. Students will develop competence in (i) variations of experimental techniques improving resolution and throughout, (ii) issues related to the computational analyses closely related to the various genome-wide assays used to probe epigenetic processes and signals, (iii) computational approaches useful to overcome pitfalls associated to the analysis of a given epigenetic data modality, (iv) methods, techniques and studies on the integration of multi-layer epigenetic data sets." -PSYC-612,"An applied graduate level course for students who have completed an undergraduate course in basic statistical methods. Covers two unrelated topics: loglinear and logit models for discrete data and nonparametric methods for nonnormal data. Emphasis is on practical methods of data analysis and their interpretation. Primarily for doctoral students in the managerial, behavioral, social and health sciences. May be taken before STAT 500 with permission of instructor." -LAW-987, -HSOC-411,"Why did Lance Armstrong get caught? Why do Kenyans win marathons? Does Gatorade really work? In this course, we won't answer these questions ourselves but will rely upon the methods of history, sociology, and anthropology to explore the world of the sport scientists who do. Sport scientists produce knowledge about how human bodies work and the intricacies of human performance. They bring elite (world-class) athletes to their laboratories-or their labs to the athletes. Through readings, discussions, and original research, we will find out how these scientists determine the boundary between ""natural"" and ""performance-enhanced,"" work to conquer the problem of fatigue, and establish the limits and potential of human beings. Course themes include: technology in science and sport, the lab vs. the field, genetics and race, the politics of the body, and doping. Course goals include: 1) reading scientific and medical texts critically, and assessing their social, cultural, and political origins and ramifications; 2) pursuing an in-depth The course fulfills the Capstone requirement for the HSOC/STSC majors. Semester-long research projects will focus on ""un-black-boxing"" the metrics sport scientists and physicians use to categorize athletes' bodies as ""normal"" or ""abnormal."" For example, you may investigate the test(s) used to define whether an athlete is male or female, establish whether an athlete's blood is ""too"" oxygenated, or assess whether an athlete is ""too"" fast (false start). Requirements therefore include: weekly readings and participation in online and in-class discussions; sequenced research assignments; peer review; and a final 20+page original research paper and presentation." -LARP-734,"This advanced social science and design seminar is about mobilizing expert knowledge to develop transformative policy ideas to make the Green New Deal come alive. We'll look at cutting edge social science and design scholarship on the problems we're trying to solve, and the successes and failures of past efforts at transformative policy. And we'll focus in particular on the built environment. How might a Green New Deal make the physical changes to our infrastructures, homes, energy landscapes, transportation systems, public recreation amenities, care facilities, and more, in ways that slash carbon emissions, increase resiliency, and abolish inequalities of race, class, gender, and nation? That's not a rhetorical question: in this class, we'll assemble knowledge, get into teams, and come up with concrete proposals." -EDUC-360,"A life-span (infancy to adulthood) approach to development. Topics include: biological, physical, social and cognitive basis of development. Films and guest speakers are often included." -DENT-634,"A combination of lectures, seminars and laboratory exercises provide the dental student with a fundamental understanding of the partially edentulous condition. Topics covered include classification, diagnosis, treatment planning and treatment of partially edentulous patients with RPDs. This course is designed to provide students with the terminology, concepts and principles necessary for case selection, design, construction of, and patient therapy with conventional RPDs. Upon completion of this course students will have the neccessary didactic knowledge to successfully understand and treat removable partial denture cases in conjunction with the clinical faculty during their third and fourth years." -ANTH-595,"The last 40 years has been a period of unparalleled reappraisal of archaeological theory and practice. We will consider the development of anthropological archaeology in terms of the questions archaeologists have asked, the ideas that have guided those questions, and the procedures that have been used to investigate them. Our discussion will focus on the intellectual heritage of normative or cultural-historical archaeology and its successors in terms of changing archaeological goals and theoretical frameworks, and their importance for contemporary research. The course will be organized around specific examples of archaeological research that have exemplified or challenged theoretical and methodological standards from culture history through the post-processual critique and the emergence of contemporary theorizations." -LALS-398,Topics vary. Please see the Spanish Department's website for the current course description: https://www.sas.upenn.edu/hispanic-portuguese-studies/pc -HCMG-868,"Issues surrounding global health have captivated the attention of the public sector and foundations for many decades. Many of their initiatives are realizing progress on the health-related Millennium Development Goals. The private sector has been less engaged in global health, but has a significant role to play in providing resources and in building infrastructure, human resource capacity and sustainability. This course explores entrepreneurial and other private sector solutions for health services and access to medicines and technologies in the developing world and other underserved areas. The course also encompasses study of creative programs to engage the private sector in development of vaccines and medicines for tropical and neglected diseases. Furthermore, the course addresses novel care systems and therapeutic strategies for the rapidly growing burden of chronic, non-communicable diseases in the developing world. In short, the course builds on the content of conventional global health courses from a managerial and entrepreneurial perspective. Learning is driven through readings, class discussion and a series of guest speakers representing a wide range of global health issues. Evaluation is largely based on a student group project." -ENVS-616,"How do government policy-makers make decisions about potential threats to human health and the environment in the face of scientific uncertainty? The course develops the concept of Risk Assessment from the publication of the 1983 National Research Council (NRC) report commonly known as the ""Red Book"" which was used to rank the initial hazardous waste sites under the Superfund program. Using a variety of teaching tools, including lectures, panel discussions, and case studies, the course examines how public policy decisions regarding environmental risk are made and how effective those decisions are at reducing risks to affected populations. The course focuses on the complex interaction of science, economics, politics, laws, and regulations in dealing with environmental and public health risks. The course will begin with a review of the policy process and methods used in evaluating human health and environmental risks, including the traditional steps in the risk assessment process, including quantitative and qualitative aspects of hazard identification, dose-response assessment, exposure assessment, and risk characterization. The course will then focus on how scientific uncertainty, risk perceptions, socio-economic disparities, risk communication, and politics influence environmental risk-based decision-making. Issues such as special populations (e.g., children, elderly, immune-compromised, woman of pregnancy age, etc.) must be considered when developing risk reduction strategies. The use of the ""precautionary principle"" will be discussed in the context of different types of environmental stressors (e.g., pesticides, chemicals, climate change, air pollution, water quality, and land use) and how this important controversial principle is applied differently in contrasting national and European risk management policies." -NSCI-402,"The capstone course of the NROTC curriculum, this course is intended to provide the midshipman with the ethical foundation and basic leadership tools to be effective junior officers. Topics such as responsibility, accoutability, ethics, the law of armed conflict, military law, division organization and training, and discipline are introduced through practical exercises, group discussion, and case studies." -OIDD-763,"Over the last several decades, energy markets have become some of the most dynamic markets of the world economy. Traditional fossil fuel and electricity markets have been seen a partial shift from heavy regulation to market-driven incentives, while rising environmental concerns have led to a wide array of new regulations and ""environmental markets"". The growth of renewable energy could be another source of rapid change, but brings with it a whole new set of technological and policy challenges. This changing energy landscape requires quick adaptation from energy companies, but also offers opportunities to turn regulations into new business. The objective of this course is to provide students with the economist's perspective on a broad range of topics that professionals in the energy industry will encounter. Topics include the effect of competition, market power and scarcity on energy prices, the impact of deregulation on electricity and fossil fuel markets, extraction and pricing of oil and gas, geopolitical uncertainty and risk in hydrocarbon investments, the environmental impact and policies related to the energy sector, environmental cap-and-trade markets, energy efficiency, the economics and finance of renewable energy, and recent developments in the transportation sector." -HEBR-053,"Development of the skills of reading, writing, and conversing in modern Hebrew on an intermediate level. Open to all students who have completed two semesters of Hebrew at Penn with a grade of B- or above and new students with equivalent competency." -FNAR-489,"The Spring semester seminar culminates in a senior thesis exhibition for each graduating student. These exhibitions have traditionally been held as a small group exhibition featuring a few students in one group, or as a larger end of semester exhibition with each student installing a series of works. The format of the exhibition will be determined during the fall semester by the senior faculty. The process of preparing, installing, and promoting the thesis exhibition is covered in detail throughout the semester. Students will work in their on-campus studio spaces to produce dynamic, thoughtful and well-crafted work that will serve as their final portfolio. They will present their portfolio of work during a final critique before graduation." -CHIN-722, -AFRC-271, -COML-592, -PSYC-170,"An overview of theories and research across the range of social behavior from intra-individual to the group level including the effects of culture, social environment, and groups on social interaction." -LALS-273,"This course focuses on immigrant communities in United States cities and suburbs. We survey migration and community experiences among a broad range of ethnic groups in different city and suburban neighborhoods. Class readings, discussions, and visits to Philadelphia neighborhoods explore themes including labor markets, commerce, housing, civil society, racial and ethnic relations, integration, refugee resettlement, and local, state, and national immigration policies. The class introduces students to a variety of social science approaches to studying social groups and neighborhoods, including readings in sociology, geography, anthropology, social history, and political science. Ultimately, the class aims to help students develop: 1) a broad knowledge of immigration and its impacts on U.S. cities and regions; 2) a comparative understanding of diverse migrant and receiving communities; and 3) familiarity with policies and institutions that seek to influence immigration and immigrant communities." -CLST-211,"A survey of the ethical theories debated by philosophers in Classical Greece and Rome. Plato, Aristotle, Stoics, Epicureans and Pyrrhonist Sceptics offer competing answers to the fundamental question raised by Socrates: How are we to live? That is, what is the best life for a human being? These philosophers generally agree that virtue is an important part of the best human life, but disagree about whether it is the greatest good (Epicurus, for example claims that pleasure is the highest good), or whether there are any other goods (for example, health, wealth, family). Much attention is paid in their theories to accounts of the virtues of character, and to the place of wisdom in the best sort of human life." -GPED-915,A comprehensive course to provide an in depth knowledge of all areas of Pediatric Dentistry -HSPV-747,"This seminar will address the history, theories, principles, and practices of the preservation and interpretation of archaeological sites and landscapes. The course will draw from a wide range of published material and experiences representing both national and international contexts. Topics will include site and landscape documentation and recording; site formation and degradation; intervention strategies including interpretation and display, legislation, policy, and contemporary issues of descendent community ownership and global heritage. Depending on the site, students will study specific issues leading toward the critique or development of a conservation and management program in accordance with guidelines established by ICOMOS/ ICAHM and other official agencies." -GRMN-203,"In this course, you will explore themes of cultural and historical significance in contemporary German-speaking countries through literature and nonfiction, through film and current event media coverage. Whether you wish to dive deeply into historical or political contexts, explore untranslatable cultural phenomena or the aesthetic rhythm and semantic complexity of the German language, GRMN 203 Texts and Contexts will inspire your imagination and deepen your understanding of German language, culture and literature. This is a required course for all courses taught in German at or above the 200 level." -BIOE-701, -TELU-430,"This course is designed to expand the students' basic language skills in Telugu in order to allow them to function adequately in a Telugu-speaking environment, to immerse themselves in the rich Andhra culture, and to accomplish a more advanced competency in an interesting foreign language. This course is also aimed at students planning to conduct scholarly research in Telugu history, literature or society, or humanities or social science fieldwork in Telugu speaking areas." -MATH-621,Continuation of Math 620. -CBE-460,"Dynamics and control of linear single-input, single output (SISO) systems in chemical processes. Laplace transforms. Dynamic responses of linear systems to various inputs. Frequency domain analysis. Feedback control strategies. Stability. Controller tuning. Advanced control, including cascade and feed forward control. Introduction to multiple-input, multiple-output (MIMO) control. Inverse response." -HIST-139,"A broad introduction to the history of Jewish civilization from its Biblical beginnings to the Middle Ages, with the main focus on the formative period of classical rabbinic Judaism and on the symbiotic relationship between Judaism, Christianity, and Islam." -BEPP-261,"This course is designed to introduce students to the role of risk assessment, risk perception and risk management in dealing with uncertain health, safety and environmental risks including the threat of terrorism. It explores the role of decision analysis as well as the use of scenarios for dealing with these problems. The course will evaluate the role of policy tools such as risk communication, economic incentives, insurance, regulation and private-public partnerships in developing strategies for managing these risks. A project will enable students to apply the concepts discussed in the course to a concrete problem." -ASAM-006,"This course will focus on race and ethnicty in the United States. We begin with a brief history of racial categorization and immigration to the U.S. The course continues by examining a number of topics including racial and ethnic identity, interracial and interethnic friendships and marriage, racial attitudes, mass media iages, residential segregation, educational stritification, and labot market outcomes. The course will inlcude discussions of African Americans, Whites, Hispanics, Asian Ameriacns, and multiracials." -MGMT-231,"This advanced course on entrepreneurship focuses on developing a validated opportunity or concept into a venture that is ready for seed financing and/or launching the product or service. Participants in this course must previously have developed a validated opportunity, either in a previous course or through independent efforts. Students may participate as a team of up to three people. Ideally, participants are commited to pursuing their opportunity commercially, or at least to seriously explore that possibility. The course provides a practical guidance for developing the product or service, forming the entity, raising capital building the team, establishing partnerships, and sourcing professional services. After completing the course, you will be ""pitch ready"" - whether submitting to campus venture competitions or to outside investors. Most coursework is focused on applying concepts and frameworks to project tasks in developing the venture. Students must have successfully completed MGMT 801 before enrolling in this course. Students must have successfully completed MGMT801 before enrolling in this course. Format: Readings, discussion, and developing an implementation plan for a real venture." -ECON-242,"This course covers topics of interest in macroeconomics. Two sections are offered: Markets with Frictions. This course studies allocations in markets with frictions, as described by the difficulty in finding a trading partner, private information problems, commitment issues, and so on. Applications to labor markets, monetary economics, the marriage market will be discused. The main technical tool will be search theory, but a liberal amount of calculus and other mathematics will be used. Numerical Methods for Macroeconmists. This course will study some of the numerical methods that are used in modern macroeconomics. This class will learn how to solve nonlinear equations, difference equations, interpolate functions, smooth data, and conduct Monte Carlo simulations on the computer. This will be done while studying economic problems, such as the determination of labor supply, economic growth and business cycle analysis. Calculus is an integral part of the course and some elementary probability theory will be drawn upon. The MATLAB programming language will be used." -LAW-968, -HCMG-215,"This course provides an overview of the management, economic and policy issues facing the pharmaceutical and biotechnology industries. The course perspective is global, but with emphasis on the U.S. as the largest and and most profitable market. Critical issues we will examine include: R&D intensive cost structure and rapid technological change; biotech startups and alliances with the pharma industry; pricing and promotion in a complex global marketplace where customers include governments and insurers, as well as physicians, pharmacists and consumers. We use Wharton and industry experts from various disciplines to address these issues." -QUEC-120,"Quechua, the language of the Inca Empire and still spoken by approximately 6 million people throughout the Andes, is the most popular indigenous language of South America. The program focuses on the development of written and oral communicative abilities in Quechua through an interactive activity-based approach. Course includes an introduction to Quechua and Andean culture. Students will participate in pair, small-group and whole-class activities. Assessment is based on both students ability to use the language in written and oral tasks and understanding the language and culture. This beginning level Quechua course is designed for students who have little or no previous knowledge of the language. Lectures will be delivered in English and Quechua" -PHIL-029,This is an introductory philosophy course that uses philosophical tools to understand and answer questions that arise in and about sports. Is there a principled basis for determining which methods of performance enhancement are acceptable? Developing a framework to answer this question will take us through: 1) questions about rules: what is their point in sports and what are appropriate reasons to change them; 2) questions about the point of participation in a sport; 3) questions about the kinds of virtues sports participants can demonstrate; and 4) questions about integrity of participants and a sport itself. A related set of questions concerns the appropriate competitors in sporting events: Should competition be restricted to single sex categories; Should competition be divided into disabled and non-disabled categories? -PHYS-140,"The topics of this calculus-based course are: Classical laws of motions; interactions between particles; conservation laws and symmetry principles; particle and rigid body motion; gravitation, harmonic motion, and applications of mechanics to real-world problems. Engineering students only. Prereqisite: For Engineering students whose course of study does not require a physics laboratory course. Those who are enrolled in a dual degree program with the college must register for the lab-based version of this course, PHYS 150." -GPRD-959, -LAW-946, -LAW-904, -OIDD-397,"This course is highly recommended for students with an interest in pursuing careers in: (1) retailing and retail supply chains; (2) businesses like banking, consulting, information technology, that provides services to retail firms; (3) manufacturing companies (e.g. P&G) that sell their products through retail firms. Retailing is a huge industry that has consistently been an incubator for new business concepts. This course will examine how retailers understand their customers' preferences and respond with appropriate products through effective supply chain management. Supply chain management is vitally important for retailers and has been noted as the source of success for many retailers such as Wal-mart and Home Depot, and as an inhibitor of success for e-tailers as they struggle with delivery reliability. See M. L. Fisher, A. Raman and A. McClelland, ""Rocket Science Retailing is Coming - Are You Ready?,"" Harvard Business Review, July/August 2000 for related research." -ARTH-235,"A one-semester survey of Islamic art and architecture which examines visual culture as it functions within the larger sphere of Islamic culture in general. Particular attention will be given to relationships between visual culture and literature, using specific case studies, sites or objects which may be related to various branches of Islamic literature, including historical, didactic, philosophical writings, poetry and religious texts. All primary sources are available in English translation." -CIS-120,"A fast-paced introduction to the fundamental concepts of programming and software design. This course assumes some previous programming experience, at the level of a high school computer science class or CIS110. (If you got at least 4 in the AP Computer Science A or AB exam, you will do great.) No specific programming language background is assumed: basic experience with any language (for instance Java, C, C++, VB, Python, Perl, or Scheme) is fine. If you have never programmed before, you should take CIS 110 first." -CIS-121,"This is a course about Algorithms and Data Structures using the JAVA programming language. We introduce the basic concepts about complexity of an algorithm and methods on how to compute the running time of algorithms. Then, we describe data structures like stacks, queues, maps, trees, and graphs, and we construct efficient algorithms based on these representations. The course builds upon existing implementations of basic data structures in JAVA and extends them for the structures like trees, studying the performance of operations on such structures, and theiefficiency when used in real-world applications. A large project introducing students to the challenges of software engineering concludes the course." -CIS-262,"This course explores questions fundamental to computer science such as which problems cannot be solved by computers, can we formalize computing as a mathematical concept without relying upon the specifics of programming languages and computing platforms, and which problems can be solved efficiently. The topics include finite automata and regular languages, context-free grammars and pushdown automata, Turing machines and undecidability, tractability and NP-completeness. The course emphasizes rigorous mathematical reasoning as well as connections to practical computing problems such as test processing, parsing, XML query languages, and program verification." +AFRC-437,"Why are African Americans and some other minority groups disproportionately incarcerated and subjected to penal sanctions? What are the political, social and economic consequences for individuals, communities, and the wider society of mass incarceration in the United States? What types of reforms of the criminal justice system are desirable and possible? This advanced seminar analyzes the connection between race, crime, punishment, and politics in the United States. The primary focus is on the role of race in explaining why the country's prison population increased six-fold since the early 1970s and why the United States today has the highest incarceration rate in the world. The class will likely take field trips to a maximum-security jail in Philadelphia and to a state prison in the Philadelphia suburbs." +ARTH-775,"Topic varies from semester to semester. For Spring 2020, this course will cover 'Cezanne, Alienation, and Modern Portraiture.' In the process of advancing modern art, Cezanne is often said to have subjugated the modern individual to the painting. Lost in conspicuous brushwork and vibrant coloration, the sitter had a difficult time making their presence felt in his work (not least his wife, Hortense Fiquet, who he portrayed most frequently). With the help of new scholarship on the artist and the period, this course will reassess this old saw of modernist art history, and instead place Cezanne's innovations carefully within the contemporary emergence of psychology and modern urban consciousness. We will discuss the various ways in which the birth of modernist representation coincided with the birth of the modern subject, and develop new means to analyze modernist portraiture more broadly. Along the way, this seminar will look carefully at Cezanne's entire career and oeuvre (and that of several of his colleagues as well), and we will study in particular the ways in which writers, philosophers and art historians--from Zola, Rilke, Heidegger and Merleau-Ponty to Jonathan Crary, Tamar Garb and T. J. Clark more recently--have used the artist to write their histories of modernism and modernity since the turn of the last century." +GRMN-180,"The German House is a half-credit course with concentrations in German conversation, film, and culture. Though many students enroll for credit, others often come to select events. All interested parties are invited, and you do not have to actually live in the house to enroll for credit. Students from all different levels of language proficiency are welcome. Beginners learn from more advanced students, and all enjoy a relaxed environment for maintaining or improving their German language skills." +ARCH-532,"A continuation of Construction I, focusing on light and heavy steel frame construction, concrete construction, light and heavyweight cladding systems and systems building." +BEPP-263,"This course examines environmental and energy issues from an economist's perspective. Over the last several decades, energy markets have become some of the most dynamic markets of the world economy, as they experienced a shift from heavy regulation to market-driven incentives. First, we look at scarcity pricing and market power in electricity and gasoline markets. We then study oil and gas markets, with an emphasis on optimal extraction and pricing, and geopolitical risks that investors in hydrocarbon resources face. We then shift gears to the sources of environmental problems, and how policy makers can intervene to solve some of these problems. We talk about the economic rationale for a broad range of possible policies: environmental taxes, subsidies, performance standards and cap-and-trade. In doing so, we discuss fundamental concepts in environmental economics, such as externalities, valuation of the environment and the challenge of designing international agreements. At the end of the course, there will be special attention for the economics and finance of renewable energy and policies to foster its growth. Finally, we discuss the transportation sector, and analyze heavily debated policies such as fuel-economy standards and subsidies for green vehicles. Prerequisites: An introductory microeconomics course (ECON1, or another course approved by the instructor) will be sufficient in most cases; BEPP 250 or an equivalent intermediate microeconomics course is recommended." +NELC-337,"The Hebrew Bible legislates against magic and witchcraft. But Jewish literature is replete with demons, witches, spells and incantations. This course will examine the phenomenon of Jewish magic in the longue duree. We will explore a wide array of sources describing ancient Jewish magical practices, and attempt to reconstruct the various aspects of ancient Jewish magic. We will start with demonology and exorcism in biblical and Second Temple literature. Then we will examine rabbinic attitudes towards magic and sorcery and rabbinic magical recipes. We then turn to material artifacts: late antique Jewish amulets and magic bowls. Finally we will survey the large corpus of magical texts from the Cairo Geniza and Hebrew manuscripts of magic from the middle ages. During the course we will consider broader questions such as the relationships between magic and religion, the identity of the Jewish magicians and their clients, relationship between Jewish and contemporary non-Jewish magic, and the role of women in magical practice." +ANTH-395,"The last 40 years has been a period of unparalleled reappraisal of archaeological theory and practice. We will consider the development of anthropological archaeology in terms of the questions archaeologists have asked, the ideas that have guided those questions, and the procedures that have been used to investigate them. Our discussion will focus on the intellectual heritage of normative or cultural-historical archaeology and its successors in terms of changing archaeological goals and theoretical frameworks, and their importance for contemporary research. The course will be organized around specific examples of archaeological research that have exemplified or challenged theoretical and methodological standards from culture history through the post-processual critique and the emergence of contemporary theorizations." +LGIC-320,"The second semester of a two-semester course on the fundamental results and techniques of mathematical logic. Topics will be drawn from model theory, proof theory, recursion theory, and set theory. Connections between logic and algebra, analysis, combinatorics, computer science, and the foundations of mathematics will be emphasized." +AFRC-209,"This selective survey will examine a variety of the circumstances of sub-Saharan African art, ranging from imperial to nomadic cultures and from ancient times to comtemporary participation in the international market. Iconography, themes and style will be considered, as will questions of modernity, religious impact, tradition and colonialism." +ANTH-329,"How do people become who they are, both similar to others and uniquely individual? How might these similarities and differences be shaped by childhood experiences in family, community, and societies around the world? How do children develop emotionally? Morally? What features of human development, expression of emotions, and relational patterns are universal for our species? What features are not universal? And what is and is not known about these questions? In this course, we will consider these and many other questions. We will read about and discuss complex and dynamic interactions between culture and individual psychology, and between nature and nurture from birth to adulthood. We will carefully examine various phases of human development as described by psychoanalysts and anthropologists. The course includes anthropologic and psychoanalytic readings and videotapes, as well as literature, fairy tales, and mythologies from cultures around the world. The instructors are both psychoanalysts, one a psychiatrist and one a pediatrician. The course counts towards the Psychoanalytic Studies (PSYS) Minor." +NELC-346,"This course introduces students to theory and methodology of the geospatial humanities and social sciences, understood broadly as the application of Geographical Information Systems (GIS) and spatial analysis techniques to the study of social and cultural patterns in the past and present. By engaging with spatial theory, spatial analysis case studies, and technical methodologies, students will develop an understanding of the questions driving, and tools available for, humanistic and social science research projects that explore change over space and time. We will use ESRI's ArcGIS software to visualize, analyze, and integrate historical, anthropological, and environmental data. Techniques will be introduced through the discussion of case studies and through demonstration of software skills. During supervised laboratory sessions, the various techniques and analyses covered will be applied to sample data and also to data from a region/topic chosen by the student." +NELC-685,"A one-semester survey of Islamic art and architecture which examines visual culture as it functions within the larger sphere of Islamic culture in general. Particular attention will be given to relationships between visual culture and literature, using specific case studies, sites or objects which may be related to various branches of Islamic literaturem including historical, didactic, philosophical writings, poetry and religous text. All primary sources are available in English translation." +LAW-930, +GPRD-936, +GEOL-545,"Pattern on the Earth's surface arise due to the transport of sediment by water and wind, with energy that is supplied by climate and tectonic deformation of the solid Earth. This course presents a treatment of the processes of erosion and deposition that shape landscapes. Emphasis will be placed on using simple physical principles as a tool for (a) understanding landscape patterns including drainage networks, river channels and deltas, desert dunes, and submarine channels, (b) reconstructing past environmental conditions using the sedimentary record, and (c) the management of rivers and landscapes under present and future climate scenarios. The course will conclude with a critical assessment of landscape evolution on other planets, including Mars." +HCMG-852,"The purpose of this course is to apply economics to an analysis of the health care industry, with special emphasis on the unique characteristics of the US healthcare markets, from pre-hospital to post-acute care. This course focuses on salient economic features of health care delivery, including: the role of nonprofit providers, the effects of regulation and antitrust activity on hospitals, the degree of input substitutability within hospitals, the nature of competition in home health care, public versus private provision of emergency medical services, the effect of specialty hospitals and ambulatory surgery centers, defining and improving medical performance in hospitals, specialization and investment in physical and human capital, shifting of services between inpatient and outpatient settings and its effect on health care costs and quality, and innovation in primary care from retail clinics to patient-centered medical homes and retainer-based medicine." +RELS-129,"Course topics will vary; have included The Binding of Isaac, Responses to Catastrophies in Jewish History, Holy Men & Women (Ben-Amos); Rewriting the Bible (Dohrmann); Performing Judaism (Fishman); Jewish Political Thought (Fishman); Jewish Esotericism (Lorberbaum). Democratic culture assumes the democracy of knowledge- the accessibility of knowledge and its transparency. Should this always be the case? What of harmful knowledge? When are secrets necessary? In traditional Jewish thought, approaching the divine has often assumed an aura of danger. Theological knowledge was thought of as restricted. This seminar will explore the ""open"" and ""closed"" in theological knowledge, as presented in central texts of the rabbinic tradition: the Mishnah, Maimonides and the Kabbalah. Primary sources will be available in both Hebrew and English." +COMM-313,"In this 'big data' era, presidents and popes tweet daily. Anyone can broadcast their thoughts and experiences through social media. Speeches, debates and events are recorded in online text archives. The resulting explosion of available textual data means that journalists and marketers summarize ideas and events by visualizing the results of textual analysis (the ubiquitous 'word cloud' just scratches the surface of what is possible). Automated text analysis reveals similarities and differences between groups of people and ideological positions. In this hands-on course students will learn how to manage large textual datasets (e.g. Twitter, YouTube, news stories) to investigate research questions. They will work through a series of steps to collect, organize, analyze and present textual data by using automated tools toward a final project of relevant interest. The course will cover linguistic theory and techniques that can be applied to textual data (particularly from the fields of corpus linguistics and natural language processing). No prior programming experience is required. Through this course students will gain skills writing Python programs to handle large amounts of textual data and become familiar with one of the key techniques used by data scientists, which is currently one of the most in-demand jobs." +EDUC-639,"This course examines different theoretical frames and strategies related to the study and design of learning environments in school, community and online contexts. Physical, social and cognitive aspects of learning situations are considered as students evaluate current research and applications in a variety of existing educational learning environments." +DADE-991, +ENVS-606,"This class will explore the foundations of avifaunal biology and ecology using a combination of hands-on classroom and in-the-field experiences. Classroom content includes physiology, anatomy, and morphology of birds. The fall migration of birds in North America is an epic and often tragic event. Sampling birds in migration has resulted in foundational understandings about stopover habitats, species-specific energy budgets and has helped realize the complete life cycle of hundreds of species. We will enter the field and participate in actual ornithological research, explore avifaunal ecology through birdwatching, and meet with regional leaders in the ornithological field." +CHIN-515,"This course surveys the literary movements of the post-Cultural Revolution era (1978-present).The reading consists of fictional works representative of each literary movement. Students will write four short (1-2 pages, double space) ""responding"" papers and two longer critical essays (5-7 pages double spaced). Each student will also give one oral presentation to the class on an assigned story. This course is designed for students who have achieved native or near native level of reading and writing proficiency in Chinese. The class is conducted exclusively in Chinese." +PHIL-578,"This is a topics-based graduate seminar in political philosophy. Examples of topics we can examine in this course include distributive justice, liberty, equality, and global justice. Course readings will be drawn from a combination of seminal and more recent works on the selected topics." +JPAN-012,Textbooks: Genki I (Lesson 8- Lesson 12) and Genki II (Lesson 13- Lesson 14)Kanji: reproduction-approx. 170/recognitio-approx.250 +ASAM-110,Please see our website for more current information: asam.sas.upenn.edu +LAW-695, +EALC-242,"This course explores Chinese medicine and healing culture, its diversity, and its change over time. We will discuss topics including the establishment of canonical medicine, Daoist approaches to healing and longevity, diverse views of the body and disease, the emergence of treatments for women, medical construction of sex difference and imagination of female sexuality, the thriving and decline of female healers, the identity of scholar physicians, the transmission of medical knowledge, domestic and cross-regional drug market, healer-patient relations, and new visions of traditional Chinese medicine in modern China." +AFRC-602,"This course critically examines stereotype threat and impostor phenomenon as they relate to African Americans. Both stereotype threat and impostor phenomenon negatively affect African Americans. The apprehension experienced by African Americans that they might behave in a manner that confirms an existing negative cultural stereotype is stereotype threat, which usually results in reduced effectiveness in African Americans' performance. Stereotype threat is linked with impostor phenomenon. Impostor phenomenon is an internal experience of intellectual phoniness in authentically talented individuals, in which they doubt their accomplishments and fear being exposed as a fraud. While stereotype threat relies on broad generalization, the impostor phenomenon describes feelings of personal inadequacy, especially in high-achieving African Americans. This course will explore the evolving meanings connected to both stereotype threat and impostor phenomenon in relation to African Americans." +PHIL-002,"Ethics is the study of right and wrong behavior. This introductory course will introduce students to major ethical theories, the possible sources of normativity, and specific ethical problems and questions. Topics may include euthanasia, abortion, animal rights, the family, sexuality, bioethics, crime and punishment and war." +REAL-236,"This course analyzes housing finance systems and housing market outcomes across the globe. In the US, the course focuses on the development of securitization markets and addresses the current challenges of housing finance reform, including the future of Fannie Mae and Freddie Mac. Internationally, the course covers issues of access to housing and housing informality in developing countries, financial crises arising out of the housing sector, and market-oriented and public policy solutions. The course features a wide array of speakers in finance, government and academia who contribute their perspectives to pressing issues of mortgage market design." +PPE-402,"Led by fellows in the Philosophy, Politics and Economics program, this course teaches students how to conduct research in PPE with an emphasis on creating a well-formed research question, determining what kinds of data or scholarly research bears on that question, and how to carry out an interdisciplinary, research-driven project on that question." +STAT-431,Graphical displays; one- and two-sample confidence intervals; one- and two-sample hypothesis tests; one- and two-way ANOVA; simple and multiple linear least-squares regression; nonlinear regression; variable selection; logistic regression; categorical data analysis; goodness-of-fit tests. A methodology course. This course does not have business applications but has significant overlap with STAT 101 and 102. +ESE-621,"This is a graduate level course on fundamental operating principles and physics of semiconductor devices in reduced or highly scaled dimensions. The course will include topics and concepts covering basic quantum mechanics and solid state physics of nanostructures as well as device transport and characterization, materials and fabrication. A basic knowledge of semiconductor physics and devices is assumed. The course will build upon basic quantum mechanics and solid state physics concepts to understand the operation of nanoscale semiconductor devices and physics of electrons in confined dimensions . The course will also provide a historical perspective on micro and nanoelectronics, discuss the future of semiconductor computing technologies, cutting edge research in nanomaterials, device fabrication as well as provide a perspective on materials and technology challenges. Prerequisite: If course requirement not met, permission of instructor required." +BEPP-322,"This course presents an analysis of overall private wealth management. This includes planning for disposition of closely-held business interests; the impact of income taxes and other transfer costs on business interests and other assets; integration of life insurance, disability insurance, medical benefits, and long-term care insurance in the financial plan; planning for concentrated asset (e.g. common stock) positions, diversification techniques, and asset allocation strategies; distribution of retirement assets; lifetime giving and estate planning; and analysis of current developments in the creation, conservation, and distribution of estates. Attention also is given to various executive compensation techniques (including restricted stock and stock options) and planning for various employee benefits. The course also covers sophisticated charitable giving techniques and methods for financing educaton expenses. Reading consist of textbooks, case studies, and bulk pack articles." +URBS-277,"Is urban space gendered? Do we change how it is gendered as we move through it? Does it change us? This course explores gender and sexuality in the contemporary global city through the study of urban spaces. We will consider feminist, queer, and transgender theories of the city, as we investigate how practices of using and making space are gendered and sexualized. Each week of the course will be organized around a type of space, including subway, school, and birthing center, nightclub, suburb, and park. Assignments will include an auto-ethnography, a short critical essay, and a final assignment that asks you to propose an additional type of space in which to study the intersections of sex, gender, and the urban built environment. In each space, we will conduct an interdisciplinary exploration, drawing from sociology, anthropology, geography, city planning history, feminist and queer theory, as well as from fiction, poetry, music videos, photography, and documentary film." +LAW-501, +HIST-412, +ENGL-169,"An advanced course in long-form nonfiction journalistic writing for a select group of experienced and self-starting student writers. (Ideally, each accepted member will have already taken one or two nonfiction seminars within the creative writing program.) The goal will be to tailor a reporting and writing project to your interest, one you may have long wished to take up but never had the opportunity. It could be a project in the arts. It could be a profile of a person or place. It might be documentary in nature, which is to say an extremely close-up observation of your subject. (An example: think of a hospital chaplain at Penn, going on his dreary, redemptive, daily rounds, to visit the sick and anoint the dying. What if you were there, for most of the term, as unobtrusively as possible, at his black-clad elbow?) The group will meet at to-be-determined intervals. In between, the enrollees will be pairing off and in effect serving as each other's editor and coach and fellow (sister) struggler. When we do assemble as a group, we will be reading to each other as well as discussing the works of some long-form heroes--Didion, Talese, Richard Ben Cramer, one or two others you may not have heard of. In essence, this is a kind of master course, limited in enrollment, and devoted to your piece of writing, to be handed in on the final day. It will be in the range of 25 to 30 pages, something above 8,000 words. The course presumes a lot of individual initiative and self-reliance. If you're interested, please email phendric@english.upenn.edu and suggest your qualifications. Permission to enroll is required." +LALS-397,Topics vary. Please see the Spanish Department's website for the current course description: https://www.sas.upenn.edu/hispanic-portuguese-studies/pc +PSYC-439,"This course is designed to examine the various roles played by the nervous and endocrine systems in controlling both physiological processes and behavior. First, the course will build a foundation in the concepts of neural and endocrine system function. Then, we will discuss how these mechanisms form the biological underpinnings of various behaviors and their relevant physiological correlates. We will focus on sexual and parental behaviors, stress, metabolism, neuroendocrine-immune interactions, and mental health." +NELC-552,"Course topics will vary; they have included: Holy Men & Women (Ben-Amos); Rewriting the Bible (Dohrmann); Jewish Political Thought & Action (Fishman) When did the Bible become the Bible? What was the nature of canon and authority in early Israel and Judaism, and how did biblical communities think about their sacred texts? How and what did the Bible mean to ancient readers? The answers to these questions are varied and surprising. This course looks at early biblical and Jewish texts that both write and re-write the tradition's own central texts. We will think widely and creatively about ancient textuality, orality, interpretation, composition, and authority. Drawing on literary theory, the course will examine the ways that biblical and post-biblical literature from the Second Temple to the rabbinic period (with some forays into contemporary literature) manifest complex ideas about power, meaning, and religiousity in early Judaism." +SKRT-480,"This course is for advanced students of Sanskrit. Designed as a seminar, the course aims to take students through the primary and secondary sources of Sanskrit literary and phlosophical production. Each semester will focus on a different genre: epic, belles-lettres, lyric poetry, drama, philosophy, shastra, advanced grammar, history, poetics, and epigraphy. We will focus on original sources, secondary scholarship, and theoretical approaches toward the translation and study of Sanskrit texts." +LGIC-010,"This course provides an introduction to some of the fundamental ideas of logic. Topics will include truth functional logic, quantificational logic, and logical decision problems." +JWST-034,Continuation of JWST 033; emphases in reading texts and conversation. +GSWS-118,"This seminar explores Iranian culture, society, history and politics through the medium of film. We will examine a variety of cinematic works that represent the social, political, economic and cultural circumstances of contemporary Iran, as well as the diaspora. Along the way, we will discuss issues pertaining to gender, religion, nationalism, ethnicity, and the role of cinema in Iranian society and beyond. Discussions topics will also include the place of the Iranian diaspora in cinema, as well as the transnational production, distribution, and consumption of Iranian cinema. Films will include those by internationally acclaimed filmmakers, such as Rakhshan Bani-Etemad, Asghar Farhadi, Bahman Ghobadi, Abbas Kiarostami, Mohsen Makhmalbaf, Dariush Mehrjui, Tahmineh Milani, Jafar Panahi, Marjane Satrapi and others. All films will be subtitled in English. No prior knowledge is required." +EDUC-683,"This course covers the methods and design of field surveys in the U.S. and other countries in education, the social sciences, criminal justice research, and other areas. It covers methods of eliciting information through household, mail, telephone surveys, methods of assuring privacy, enhancing cooperation rates and related matters. Finally, the fundamentals of statistical sampling and sample design are handled. Much of the course is based on contemporary surveys sponsored by the National Center for Education Statistics and other federal, state and local agencies." +HCMG-866,"This course will introduce students to the main components of Health Information Technology (HIT) and how HIT currently effects, and in the future, may change health care operating models. Although it will not prepare students for primary technology management positions, it will help them understand the role of information technology in the success of the delivery system and other important healthcare processes. It will provide a foundation that will prepare them as managers, investors and consultants to rely upon or manage information technology to accomplish delivery system objectives. The course will give special attention to key health care processes, and topics such as the drive for provider quality and cost improvements, the potential ability to leverage clinical data for care improvement and product development, the growth of new information technologies for consumer directed healthcare and telemedicine, the strategies and economics of individual HIT companies and the role of government. The course relies heavily on industry leaders to share their ideas and experiences with students." +NELC-332,"This interdisciplinary seminar aims to introduce students to the countries of North Africa, with a focus on the Maghreb and Libya (1830-present). It does so while examining the region's close economic and cultural connections to sub-Saharan Africa, Europe, and the Middle East. Readings will include histories, political analyses, anthropological studies, and novels, and will cover a wide range of topics such as colonial and postcolonial experiences, developments in Islamic thought and practice, and labor migration. This class is intended for juniors, seniors, and graduate students. Prerequisite: A university-level survey course in Middle Eastern, African, or Meditterranean history." +EDUC-598,"This ABCS course explores religious pluralism and interfaith dialogue and action on college campuses. It brings together students with diverse faith commitments (including atheism) to engage with and learn from one another in academic study, dialogue, and service." +ACCT-613,"This course provides an introduction to both financial and managerial accounting, and emphasizes the analysis and evaluation of accounting information as part of the managerial processes of planning, decision-making, and control. A large aspect of the course covers the fundamentals of financial accounting. The objective is to provide a basic overview of financial accounting, including basic accounting concepts and principles, as well as the structure of the income statement, balance sheet, and statement of cash flows. The course also introduces elements of managerial accounting and emphasizes the development and use of accounting information for internal decisions. Topics include cost behavior and analysis, product and service costing, and relevant costs for internal decision-making. This course is recommended for students who will be using accounting information for managing manufacturing and service operations, controlling costs, and making strategic decisions, as well as those going into general consulting or thinking of starting their own businesses." +ENVS-699,"This course is designed to help students successfully complete their MES Capstone. A set of milestones will be set and regular meetings will be held in groups and individually to aid the student as they complete the research portion of their degree.We will be working together to complete a series of steps towards the final project. These steps fall into five major areas 1) Reviewing the literature; 2) Finding a model; 3) Framing your research; 4) managing data; and 5) Writing your results. Throughout the semester, we will also discuss career goals and the job search. Prerequisite: Project proposal and Online Application equired for course regisration. See MES Office and ""Guide to the Capatone"" for more information." +GRMN-101,"Designed for the beginning student with no previous knowledge of German. German 101, as the first course in the first-year series, focuses on the development of language competence in listening, speaking, reading, and writing. By the end of the semester, students will be able to engage in simple conversations about familiar things, know greetings and everyday expressions, they will be ble to count and tell time, and negate sentences in day-to-day contexts. Furthermore, students will be able to speak about events that happened in the immediate past and express plans for the future. In addition, students will have developed reading strategies that allow them to glean information from simple newspaper and magazine articles and short literary texts. Because cultural knowledge is one of the foci of German 101, students will learn much about practical life in Germany and will explore German-speaking cultures on the Internet." +WH-301,"Organizations emerge because individuals cannot (or do not want to) accomplish their goals alone. Therefore, an organization is most often defined as a collective oriented toward a common goal. Collaboration --in relationships and in teams -- is the building block of organizational effectiveness. That is, much of your work each day will occur in a social context, and will require you to wield influence (and be influenced). Moreover, over 80% of Fortune 1,000 companies now use teams. The ability to work effectively in teams is thus a critical skill. In this course we will use the latest evidence from the science of organizations to understand an array of tactics that can help you work with others (and manage them) as you strive to attain shared goals, especially in the context of teams. You will develop a portable toolkit of ideas related to managing team decision making, team conflict, team diversity, interpersonal influence and emotional intelligence. This is a cross-listed course. Students may enroll in either MGMT 301 or WH 301." +SAST-217,"C.U. in India is a hybrid, domestic/overseas course series which provides students with the opportunity to have an applied learning and cultural experience in India or South East Asia where students participate in 1) 28 classrom hours in the Fall term 2) a 12-day trip to India or South East Asia with the instructor during the winter break visiting key sites and conducting original research (sites vary) 3) 28 classroom hours at Penn in the Spring term and 4) a research paper, due at the end of the Spring term. Course enrollment is limited to students admitted to the program. For more information and the program application go to http://sites.sas.upenn.edu/cuinindia This is a 2-CU yearlong course" +WRIT-038,"This writing seminar focuses on a scholarly inquiry in the field of Engineering, the use of scientific principles to design and build machines, structures, systems, and other items. This seminar will introduce students to scholarly conversations on a specific topic in this discipline as a basis for understanding discipline- and profession-based writing. This includes types of reasoning, evidence, citation practices, and other means of creating, testing, and sharing knowledge with diverse audiences. Throughout, students are introduced to new writing situations as a means of sharpening their skills and learning how to be adaptive, effective writers." +LGST-206,"This course examines the art and science of negotiation, with additional emphasis on conflict resolution. Students will engage in a number of simulated negotiations ranging from simple one-issue transactions to multi-party joint ventures. Through these exercises and associated readings, students explore the basic theoretical models of bargaining and have an opportunity to test and improve their negotiation skills." +VBMS-604,"This is an introductory course to the neurosciences and assumes a basic background in anatomy, cell biology, histology and biochemistry. At the cell/molecular level the course covers neurocytology, membrane bioelectrical events and their channel protein basis, neurotransmitters and their actions at the synapse. It also covers aspects of neurochemistry, neuropharmacology and focuses on neuroanatomy and function of neural systems. The latter include the somatic and autonomic components of the peripheral nervous system, the spinal cord and reflexes, primary sensory systems, motor pathways and limbic-visceral systems of the brain." +EDUC-646,The term school-to-prison pipeline typically refers to a disturbing trend in which punitive policies have led to children being funneled out of schools and into the criminal justice system at an alarming rate. This course: 1. Examines the historical context and policies that have contributed to the school-to-prison pipeline. +BMB-567,"This course covers selected topics in bioinorganic chemistry. Special emphasis is placed on dioxygen chemistry and electron transfer processes. Course topics include: 1) oxygen uptake and utilization; 2) oxygen transport; 3) oxygen and O atom incorporation into substrates; 4) metalloenzyme-catalyzed C-C bond formation; 5) the metallobiochemistry of DNA; 6) metal-sulfide proteins; 7) manganese containing metalloproteins; 8) photosystem II, light-driven electron transfer and the biological water-splitting reaction; 9) biological electron transfer; 10) electron transfer theory; 11) mechanisms of energy storage and release; and 12) long-distance electron transfer reactions." +ENGL-156,"A creative writing course built entirely around the use of photographs, and the crafting of compelling nonfiction narratives from them. The essential concept will be to employ photographs as storytelling vehicles. So we will be using curling, drugstore printed Kodak shots from our own family albums. We will be using searing and famous images from history books. We will be taking things from yesterday's newspaper. We will even be using pictures that were just made by the workshop participants outside the campus gates. In all of this, there will be one overriding aim to achieve memorable, full-bodied stories. To locate the strange, evocative, storytelling universes that are sealed inside the four rectangular walls of photograph. They are always there, if you know how to look. It's about the quality of your noticing, the intensity of your seeing. See the English Department's websitee at www.english.upenn.edu for a description of the current offerings." +GAFL-571, +BEPP-452,"This specialized course is usually only taken by Wharton students who plan to concentrate in actuarial science and Penn students who plan to minor in actuarial mathematics. It provides a comprehensive analysis of advanced life contingencies problems such as reserving, multiple life functions, multiple decrement theory with application to the valuation of pension plans." +LAW-734, +GPRD-957, +ANTH-617,"A critical examination of recent history and theory in cultural and social anthropology. Topics include structural-functionalism; symbolic anthropology; post-modern theory. Emphasis is on major schools and trends in America, Britain, and France." +WRIT-073,"This writing seminar focuses on a scholarly inquiry in the field of Philosophy, the study of questions about existence, knowledge, values, reason, mind, and language. This seminar will introduce students to scholarly conversations on a specific topic in this discipline as a basis for understanding discipline- and profession-based writing. This includes types of reasoning, evidence, citation practices, and other means of creating, testing, and sharing knowledge with diverse audiences. Throughout, students are introduced to new writing situations as a means of sharpening their skills and learning how to be adaptive, effective writers." +HCMG-352,"The purpose of this course is to apply economics to an analysis of the health care industry, with special emphasis on the unique characteristics of the US healthcare markets, from pre-hospital to post-acute care. This course focuses on salient economic features of health care delivery, including: the role of nonprofit providers, the effects of regulation and antitrust activity on hospitals, the degree of input substitutability within hospitals, the nature of competition in home health care, public versus private provision of emergency medical services, the effect of specialty hospitals and ambulatory surgery centers, the economics of direct-to-consumer advertising and its effect on drug safety, defining and improving medical performance in hospitals, specialization and investment in physical and human capital, and shifting of services between inpatient and outpatient settings and its effect on health care costs and quality." +MATH-260,This is an honors version of Math 240 which explores the same topics but with greater mathematical rigor. +GRMN-502, +PSYC-253,"Thinking, judgment, and personal and societal decision making, with emphasis on fallacies and biases. Prerequisite: One semester of Statistics or Microeconomics." +ANEL-546, +NURS-609,"This course explores the perioperative evaluation and advanced anesthetic principles related to patient populations undergoing a broad range of surgical procedures. Emphasis is placed on selection and administration of anesthesia to these populations to ensure optimal patient care, safety, monitoring and implementing interventions to prevent and treat common perioperative emergencies. Prerequisite: Enrollment in NANS program - year 2" +WRIT-030,"Open to upperclassmen who have not fulfilled their writing requirement. We will begin by reading and analyzing Richard Toye's A Very Short Introduction to Rhetoric to learn about the art of informing and persuasing others, an art that is at the very heart of all civic society and every walk of life, as Toye's examples underscore, from the rhetoric of modern cinema and Churchill's wartime speeches to Islamic preaching. This introduction to rhetoric will be followed by students' own exploration of rhetoric in a topic of their own choosing, which might include the rhetoric engineers use to explain a failed bridge; a fashion designer uses to promote a new collection; or politicians and marketing consultants use to convince us to subscribe to their views. Teachers, doctors, and bill collectors use rhetoric, and so too lovers. Visual rhetoric - the ability of images to wordlessly persuade and explain - can be the most powerful of all. In this course, students will learn to be artful producers and discriminating recipients of rhetoric. Please note that the seats in WRIT 030 351 - WRIT 030 358 are typically reserved for upperclassmen, and that the seats in WRIT 030 601 - WRIT 030 603 are reserved for LPS students. Descriptions of WRIT 030 601 - WRIT 030 603 can be found here: https://apps.sas.upenn.edu/writing/ccs/catalog.php" +BIBB-240,Topics to be covered include basic principles of chronobiology; neuroscience mechanisms of circadian rhythms and sleep; phylogeny and ontongeny of sleep; human sleep and sleep disorders; circadian dysfunction; circadian and sleep homeostatic influences in human health and safety. Students may not recieve credit for both BIBB 240 and BIBB 040. +ESE-224,"Introduction to signal and information processing (SIP). In SIP we discern patterns in data and extract the patterns from noise. Foundations of deterministic SIP in the form of frequency domain analysis, sampling, and linear filtering. Random signals and the modifications of deterministic tools that are necessary to deal with them. Multidimensional SIP where the goal is to analyze signals that are indexed by more than one parameter. Includes a hands-on lab component that implements SIP as standalone applications on modern mobile platforms." +PHIL-372,This majors seminar will cover selected topics in ethics. The content will vary from semester to semester. +NELC-587,"This course is designed to provide an in-depth analysis of archaeological metals. Topics to be discussed include: exploitation of ore and its transformation to metal in ancient times, distribution of metal as a raw materials, provenance studies, development and organization of early metallurgy, and interdisciplinary investigations of metals and related artifacts like slag and crucibles. Students will become familiar with the full spectrum of analytical procedures, ranging from microscopy for materials characterization to mass spectrometry for geochemical fingerprinting, and will work on individual research projects analyzing archaeological objects following the analytical methodology of archaeometallurgy." +REES-010,"The reappearance of the concept of Central and Eastern Europe is one of the most fascinating results of the collapse of the Soviet empire. The course will provide an introduction into the study of this region - its cultures, histories, and societies - from the foundation of the Holy Roman Empire to the enlargement of the European Union. Students are encouraged to delve deeper into particular countries, disciplines, and sub-regions, such as Central Europe, Eastern Europe, and the Balkans, through an individual research paper and class presentations. Prerequisite: This course is one of two required core courses for the Russian and East European Studies (REES) major." +NURS-628,"An examination of the psycho-socio-cultural processes which influence the behavior patterns, coping, and adaptation of older adults. The course emphasizes strategies to promote mental health as well as assessment, presentation, and intervention in the major acute and chronic psychiatric disorders affecting the older adult." +PHIL-077,"This course is an introduction to some of the central philosophical problems of law: What is law? What makes law? What is the relationship between law and morality? Can laws be unjust? Is there a moral obligation to obey the law? We will look at different theories of law, such as positivism and natural law theory, and discuss topics like civil disobedience, liberty and the law, and punishment and the law. The third and final section of the course will consider an unusual and particularly significant kind of law: constitutional law. We will consider the purpose(s) of constitutions, how constitutionalism relates to democracy, and how constitutions ought to be understood and interpreted, in light of our answers to these first two questions. Throughout the course, we will engage with both classic and contemporary work, reading work by Michelle Alexander, Jeremy Bentham, Angela Davis, Ronald Dworkin, John Hart Ely, H.L.A. Hart, Thomas Hobbes, John Locke, John Stuart Mill, Robert Nozick, Martha Nussbaum, Richard Posner, Jeremy Waldron, and others." +BEPP-931,"The objective of this course is to introduce graduate students to computational approaches for solving economic models. We will formulate economic problems in computationally tractable form and use techniques from numerical analysis to solve them. Examples of computational techniques in the current economics literature as well as discuss areas where these techniques may be useful in future research will be disclosed. We will pay particular attention to methods for solving dynamic optimization problems and computing equilibria of games. The substantive applications will cover a wide range of problems including industrial organization, game theory, macroecomics, finance, and econometrics." +LGST-101,"This course presents law as an evolving social institution, with special emphasis on the legal regulation of business in the context of social values. It considers basic concepts of law and legal process, in the U.S. and other legal systems, and introduces the fundamentals of rigorous legal analysis. An in-depth examination of contract law is included." +EDUC-715,"This course is designed to enhance understanding of decision making in higher education administration. Based on case studies, students will analyze, propose policies, generate action plans and implementation procedures, and assess the potential consequences of their administrative decisions." +GPRD-929, +CIT-596,"This course focuses primarily on the design and analysis of algorithms. We will begin with sorting and searching algorithms and then spend most of the course on graph algorithms. In order to study graph algorithms, general algorithm design patterns like dynamic programming and greedy algorithms will be introduced. A section of this course is also devoted to understanding NP-Completeness." +COML-256,"This course will explore fiction and film in contemporary Japan, from 1945 to the present. Topics will include literary and cinematic representation of Japan s war experience and post-war reconstruction, negotiation with Japanese classics, confrontation with the state, and changing ideas of gender and sexuality. We will explore these and other questions by analyzing texts of various genres, including film and film scripts, novels, short stories, manga, and academic essays. Class sessions will combine lectures, discussion, audio-visual materials, and creative as well as analytical writing exercises. The course is taught in English, although Japanese materials will be made available upon request. No prior coursework in Japanese literature, culture, or film is required or expected; additional secondary materials will be available for students taking the course at the 600 level. Writers and film directors examined may include: Kawabata Yasunari, Hayashi Fumiko, Abe Kobo, Mishima Yukio, Oe Kenzaburo, Yoshimoto Banana, Ozu Yasujiro, Naruse Mikio, Kurosawa Akira, Imamura Shohei, Koreeda Hirokazu, and Beat Takeshi." +MKTG-955,"This is a continuation of MKTG 954. This doctoral seminar reviews analytical models relevant to improving various aspects of marketing decisions such as new product launch, product line design, pricing strategy, advertising decisions, sales force organization and compensation, distribution channel design and promotion decisions. The primary focus will be on analytical models. The seminar will introduce the students to various types of analytical models used in research in marketing, including game theory models for competitive analysis, agency theory models for improving organization design and incentives within organizations, and optimization methods to improve decision making and resource allocation. The course will enable students to become familiar with applications of these techniques in the marketing literature and prepare the students to apply these and other analytical approaches to research problems that are of interest to the students." +BIOL-375,"Microbiology plays a central role in diverse areas of human life such as infectious disease, ecology, and biotechnology. This course will cover aspects of modern microbiology with an emphasis on prokaryotic organisms. The topics will include basic aspects of microbial diversity, genetics, virology, and pathogenesis as well as examples of applied microbiology." +OIDD-697,"This course is highly recommended for students with an interest in pursuing careers in: (1) retailing and retail supply chains; (2) businesses like banking, consulting, information technology, that provides services to retail firms; (3) manufacturing companies (e.g. P&G) that sell their products through retail firms. Retailing is a huge industry that has consistently been an incubator for new business concepts. This course will examine how retailers understand their customers' preferences and respond with appropriate products through effective supply chain management. Supply chain management is vitally important for retailers and has been noted as the source of success for many retailers such as Wal-mart and Home Depot, and as an inhibitor of success for e-tailers as they struggle with delivery reliability. See M. L. Fisher, A. Raman and A. McClelland, ""Rocket Science Retailing is Coming - Are You Ready?,"" Harvard Business Review, July/August 2000 for related research." +COML-094,"This course introduces students to major issues in the history of literary theory. Treating the work of Plato and Aristotle as well as contemporary criticism, we will consider the fundamental issues that arise from representation, making meaning, appropriation and adaptation, categorization and genre, historicity and genealogy, and historicity and temporality. We will consider major movements in the history of theory including the ""New"" Criticism of the 1920's and 30's, structuralism and post-structuralism, Marxism and psychoanalysis, feminism, cultural studies, critical race theory, and queer theory. See the Comparative Literature website at http://ccat.sas.upenn.edu/complit/ for a description of the current offerings." +SWRK-614,"This is the second in a four-course sequence and continues to examine varied practice frameworks and methods for service delivery in working with individuals, groups, families and communities. It emphasizes the eradication of institutional racism and other forms of oppression along with the integration of a culturally-sensitive approach to social work practice. Attention is given to understanding client problems in the context of different social work practice approaches and service requirements and to increased use of professional values to guide and inform practice." +NURS-614,"Population specific topics of concern to nurse anesthetists are reviewed and discussed. Seminal works in the field of anesthesia are reviewed and discussed to facilitate a comprehensive review of contemporary anesthesia practice. The gaps between research and its implementation in practice will be considered. Students will focus on completing a comprehensive review of 1) Basic sciences; 2) Equipment, Instrumentation and Technology; 3) Basic Principles of Anesthesia Practice; and 4) Advanced Principles of Anesthesia Practice as described by National Council on Certification and Recertification of Nurse Anesthetists. Prerequisite: Must be enrolled in the Nurse Anesthesia program" +NURS-749,"This course explores the impact of historical ideas, events, and actors on current issues in health and illness care. Topics include the movement from hospitals to health care systems; the changing definitions of professionalism and professional practice patterns; and the ways historical context shapes definitions of leadership roles and theoretical knowledge." +EPID-664,"This course will introduce students to methods and study design principles that are specific or unique to clinical research and trials in neurology, child neurology,neuro-ophthalmology, neurosurgery, and related fields. Prerequisite: Permission of instructor" +PERS-616, +EAS-512,"The goal of this course is to teach students of engineering and applied science to be effective negotiators. It aims to improve the way these students communicate i virtually any human interaction. The course intends to improve the ability of engineers and other technology disciplines to gain more support more quickly for projects, researc product and services development, and marketing. For those wanting to be entrepreneurs o r intrapreneurs, the course is designed essentially to find the most value possible in starting up and running companies. Based on Professor Diamond's innovative and renowned model of negotiation, it is intended to assist those for whom technical expertise is not enough to persuade others, internally and externally, to provide resources, promotions and project approvals; or to resolve disputes, solve problems and gain more opportunities. Rejecting the 40-year-old notions of power, leverage and logic, the course focuses on persuasion by making better human connections, uncovering perceptions and emotions, and structuring agreements to be both collaborative and fair. This course is entrepreneurial in nature and can provide many times more value than traditional persuasion. The Getting More book has sold more than 1 million copies around the world and is also used by universities, corporations (Google), and U.S. Special Operations (SEALs, Green Berets, Special Forces, Marines) to save lives and reduce conflict. From the first day, students will do interactive cases based their own engineering-related problems and based on current problems in the news. There will be diagnostics enabling every student to assess his/her skill and improvements." +NURS-361,"Human milk is recognized universally as the optimal diet for newborn infants. The health benefits of breastfeeding are so significant that a National Health Objective set forth by the Surgeon General of the United States for the year 2010 is to increase the proportion of mothers who breastfeed their babies in the postpartum period. Through classroom and clinical experiences, this course will provide an in depth examination of the anatomy and physiology of lactation, essential aspects of establishing and maintaining lactation, and the nurses' role in counseling the breastfeeding family. Emphasis will be placed on current research findings in the content area." +EDUC-669,"This course is designed as a collaborative investigation into practitioner inquiry and the work of inquiry communities in K-16 and graduate/professional school settings, professional networks and community-based organizations. The focus is on conceptual and methodological frameworks and methods of practitioner inquiry and the contexts, purposes and practices of differently situated inquiry communities. Participants will explore a range of practitioner inquiry traditions and texts that go by terms such as action, collaborative, critical, community-based, participatory, autobiographical, emancipatory, narrative and pedagogical. They will also conduct an inquiry based on their particular interests and contexts. The course will emphasize practitioner inquiry that intentionally engages issues of equity, access and culture in educational settings." +EDUC-586,"This ethnographic methodology course considers filmmaking/videography as a tool in conducting ethnographic research as well as a medium for presenting academic research to scholarly and non-scholarly audiences. The course engages the methodological and theoretical implications of capturing data and crafting social scientific accounts/narratives in images and sounds. Students are required to put theory into practice by conducting ethnographic research and producing an ethnographic film as their final project. In service to that goal, students will read about ethnography (as a social scientific method and representational genre), learn and utilize ethnographic methods in fieldwork, watch non-fiction films (to be analyzed for formal properties and implicit assumptions about culture/sociality), and acquire rigorous training in the skills and craft of digital video production. This is an ABCS course, and students will produce short ethnographic films with students in Philadelphia high schools as part of a partnership project with the School District of Philadelphia. Due to the time needed for ethnographic film production, this is a year-long course, which will meet periodically in both the fall and spring semesters." +SPAN-180,Must be a resident of the Modern Language College House. Prerequisite: Residence in Modern Language House +NURS-215,"This course emphasizes the child-bearing cycle, and the related issues of maternal and infant mortality and morbidity. It also explores women and infant's health care and health promotion needs across the lifespan. It provides a global perspective, and uses the United Nations' Pillars of Safe Motherhood and World Health Organization's Millennium Development Goals as the vehicles to enable students to understand the interrelationships among issues of health and health promotion; social, economic, political and environmental contexts; and the care of women across the lifespan. Clinical experiences provide opportunities for students to understand the connections between the local and the global; to use their developing knowledge base to affect the health of women and their infants. Students will have opportunities for hospital-based care of child-bearing women and their infants. In addition, community-based experiences with individual women and with groups of women across the life cycle will be provided in order to enhance teaching, interviewing and assessment skills." +ANTH-415,"This course introduces the study of animal bones from archaeological sites. Faunal analysis is an interdisciplinary science which draws methods from archaeology, biology, and paleontology. Bones, shells, and other remains yield evidence for the use of animals by humans, and evidence for the biology of animals and for past environments. The course will focus on research approaches to important transitions in human-animal relationships: the development of human hunting and fishing, animal domestication, early pastoralism, and the emergence of market economies in animal products. Class presentations will include lectures and discussion concerning research design and archaeological case material, with additional videos, slidework with field and laboratory equipment, and supervised work identifying and describing archaeological materials from the University Museum's collections. This class is taught in the Zooarchaeology Laboratory of the Center for the Analysis of Archaeological Materials" +BIOL-438,"The course will focus on muscle function from the level of molecules to whole animal locomotion. At each level of organization, muscle function will be explored from mechanical and energetic viewpoints. The course will include lectures, demonstrations, and several guest expert lectures. Students will also be introduced to realistic musculo-skeletal modelling and forward dynamic simulations to explore integrated function." +FNCE-785,"This course explores strategic, business and legal decision making in a fluid real world corporate context. Classes will cover a series of timely financial and legal subjects as well as case studies that deal with topical problems in corporate governance, investment strategy, finance, private equity, executive compensation, and potential corporate and criminal behavior. Press, public market reaction, and governmental/political considerations will be integrated into the discussion. All students will be required to participate in one major and two minor team projects. An equal number of graduate law and business students will be enrolled in this class. The instructor, a 30 year veteran and partner at a major private equity firm, is also an attorney and CPA. No prerequisites." +AFST-251, +URDU-402,"This introductory course core proficiency in Hindi-Urdu up to the intermediate level. It is designed for students with little or no prior exposure to Hindi or Urdu. The course covers all four language skills (speaking, lsitening, reading, and writing) and all three models of communication (interpersonal, presentational, interpretive). Students will develop literacy skills in the primary script of their choice (Hindi or Urdu script). All written materials will be provided in both scripts. All meetings are interactive and students acquire the language by using it in realistic contexts. Culture is embedded in the activities and is also introduced through various authentic materials." +CLST-402,Intensive Greek reading course for students in the Post-Baccalaureate Program in Classical Studies. Readings are chosen to expose students to a variety of prose and poetry texts during their program experience. The Fall course includes some grammar review and analysis as well as translation. Permission of instructor required for non-Post-Baccalaureate students. +NELC-137,"This course will explore the origins, the history and, most importantly, the literary and cinematic art of the struggle that has endured for a century over the region that some call the Holy Land, some call Eretz Israel and others call Palestine. We will also consider religious motivations and interpretations that have inspired many involved in this conflict as well as the political consequences of world wars that contributed so greatly to the reconfiguration of the Middle East after the fall of the Ottoman Empire, and after the revelations of the Holocaust in Western Europe. While we will rely on a textbook for historical grounding. the most significant material we will use to learn this history will be films, novels, and short stories. Can the arts lead us to a different understanding of the lives lived through what seems like unending crisis?" +FNAR-222,"The history and practice of the contemporary mural movement couples step by step analysis of the process of designing with painting a mural. In addition students will learn to see mural art as a tool for social change. This course combines theory with practice. Students will design and paint a large outdoor mural in West Philadelphia in collaboration with Philadelphia high school students and community groups. The class is co-taught by Jane Golden, director of the Mural Arts Program in Philadelphia, and Shira Walinsky, a mural arts painter and founder of Southeast by Southeast project, a community center for Burmese refugees in South Philadelphia." +MATH-571,Continuation of Math 570. Prerequisite: Permission of instructor if course prerequisites not met +ARCH-768,"This course evaluates ""ground-up"" development as well as re-hab, re-development, and acquisition investments. We examine raw and developed land and the similarities and differences of traditional real estate product types including office, R & D, retail, warehouses, single family and multi-family residential, mixed use, and land as well as ""specialty"" uses like golf courses, assisted living, and fractional share ownership. Emphasis is on concise analysis and decision making. We discuss the development process with topics including market analysis, site acquisition, due diligence, zoning, entitlements, approvals, site planning, building design, construction, financing, leasing, and ongoing management and disposition. Special topics like workouts and running a development company are also discussed. Course lessons apply to all markets but the class discusses U.S. markets only. Throughout the course, we focus on risk management and leadership issues. Numerous guest lecturers who are leaders in the real estate industry participate in the learning process. Format: predominately case analysis and discussion, some lectures, project visits." +BE-553,"Tissue engineering demonstrates enormous potential for improving human health. This course explores principles of tissue engineering, drawing upon diverse fields such as developmental biology, cell biology, physiology, transport phenomena, material science, and polymer chemistry. Current and developing methods of tissue engineering, as well as specific applications will be discussed in the context of these principles. A significant component of the course will involve review of current literature within this developing field." +EDUC-566,"This course provides students experiential and cognitive awareness through affective exercises and readings. It explores issues of living in a diverse society through a variety of educational strategies including workshops, small group process, guest lectures, etc. It represents the seminar portion of P.A.C.E. (Programs for Awareness in Cultural Education): An ""Educating the Peer Educator"" Program." +KORN-012,"This is a continuation of KORN 011. This course aims to further develop the four language skills of students to the novice-high level by building on materials covered in KORN 011. Students will learn how to use three speech styles (polite formal, informal, and intimate) appropriately in a given context. Upon successful completion of this course, students should be able to handle simple and elementary needs of daily lives and talk (and write) about a variety of topics such as family, college life, birthday celebration, shopping, Korean food, etc." +CBE-480,"The laboratory methods covered include molecular cloning techniques, cell transformation, DNA gel electrophoresis, ImageJ, PCR, DNA sequencing, SDS?PAGE, mammalian cell culture and enzyme assays. Culture techniques for bacteria, yeast and mammalian cells are taught and practiced. The students write several individual lab reports and keep a weekly lab notebook during the semester. A group presentation and report on a proposal for a new lab experiment is the final assignment for the lab." +CBE-371,The design of industrial methods for separating mixtures. Distillation; liquid-liquid extraction; membranes; absorption. Computer simulations of the processes. +BIOL-221,"This course will survey the discipline of molecular genetics. Two broad areas will be considered 1) Molecular Biology: DNA replication, transcription, translation, regulation of gene expression in both prokaryotic and eukaryotic systems, and genomics and 2) Genetics: basic Mendelian & molecular genetics." +SPAN-388,Topics vary. Please see the Spanish Department's website for the current course description: https://www.sas.upenn.edu/hispanic-portuguese-studies/pc +CLST-303,"This course Presents an introduction to the history, theory and modern practice of museums. Using the resources of the University Museum, the course will introduce students to curatorial practice, education, exhibition design and conservation, while exploring the theoretical and ethical issues confronted by museums. Particularly relevant for those interested in archaeology, anthropology, art history, cultural heritage and public education." +BEPP-789,"This course is intended to deepen understanding of the major contemporary issues in the world economy. The focus is on the ""big picture"" of global economic developments and the evolution of economic thought over the last one hundred years. The topics include: financial market booms and busts; business cycles; monetary and fiscal policies; inequality; the social welfare state; technological change and economic growth; and international trade and financial arrangements. The time period covers: the Roaring Twenties; the Great Depression, the post war Golden Age (1945-1973); the stagflation of the 1970s; the Washington Consensus era of the market liberalization (1980-2007); and the 2008 financial crisis and ensuing Great Recession; and the recent rise of populism. This course also explores different schools of thought. The course will chronicle and compare economic policy and performance of the United States, Europe, Japan and emerging markets (Asia, Latin America, Africa)." +DYNM-615,"This class will operate in a hybrid format. We will have four 90-minute virtual class sessions (via Zoom) on four Thursdays (6 PM to 7:30 PM) -- January 23, February 13, March 5th, and March 26th. These sessions will cover key material on sustainable cities and will help guide students in their thinking for course deliverables. The course also has an independent study element similar to a capstone course - as students will have considerable independent time to complete course material and conduct research for their projects throughout the term. Environmentalist Paul Hawken challenged a class of 2009 college graduates that they would have to ""figure out what it means to be a human being on earth at a time when every living system is declining, and the rate of decline is accelerating."" That theme is at the heart of this course. While we have seen the notion of sustainability gaining traction in recent years, our quality of life in the near future hinges on the development and implementation of sustainable solutions to enormously complex global environmental and social problems. This course is designed to foster the thinking that is needed to address those enormous problems. It involves focusing on a critical global problem with sustainability and social dimensions - in this case, the rapid shift of an increasing global population to cities - and providing the framework for students to take a deep dive into evaluating and recommending solutions to meet that challenge. The World Health Organization (WHO) estimates that in 1990, less than 40% of the global population lived in cities, and that figure is expected to grow to 60% by 2030 and 70% by 2050. Such growth brings tremendous sustainability and design challenges in both developed and developing countries in terms of resource consumption, food security, water security, energy security, air quality, transportation, infrastructure, waste processing, recycling, and public health. It also brings opportunity. In 2020, students will conduct research on the challenges and opportunities created by increasing urbanization, drawing on readings from thought leaders in conjunction with their own creative insights, with a focus on advancing development of sustainable cities to ensure the long-term health of people and planet. Prerequisite: Non-DYNM students must complete a course permit request: https://www.sas.upenn.edu/lps/graduate/dynamics/course-permit" +EALC-633,"This seminar investigates classical Chinese conceptions of art and beauty as exemplified in philosophy, literature, music, painting, calligraphy, and architecture. All readings will be in English, and no knowledge of Chinese is presumed. Graduate students should see the instructor to discuss requirements for graduate credit." +MATH-314,"Topics will include: Vector spaces, Basis and dimension, quotients; Linear maps and matrices; Determinants, Dual spaces and maps; Invariant subspaces, Cononical forms; Scalar products: Euclidean, unitary and symplectic spaces; Orthogonal and Unitary operators; Tensor products and polylinear maps; Symmetric and skew-symmetric tensors and exterior algebra. Prerequisite: Math 314/514 covers Linear Algebra at the advanced level with a theoretical approach. Students can receive credit for at most one of Math 312 or Math 314." +MGMT-264,"This course focuses on venture capital management issues in the context of a high-growth potential start-up company. The course is motivated by rapid increases in both the supply of and demand for private equity over the past two decades. The topic is addressed from two distinct perspectives: issues that relate to the demand for private equity and venture capital (the entrepreneur's perspective) on the one hand, and issues that relate to the supply of capital (the investor's perspective) on the other. As well, we will address management issues that relate to how the VC and the entrepreneur work together once an investment has been made, compensation issues, and governance issues in the privately held venture capital backed company. Format: Case/discussion format, supplemented by lectures and guest speakers." +PSCI-697, +FNCE-812,"The focus will be on the causes of fiscal crises, a careful detailing of who wins and who loses, and then on how such crises might be resolved and, perhaps most importantly, how they might be prevented in the future. The course will draw upon the fiscal experiences of US local governments (New York, Philadelphia, Detroit, Orange County, Puerto Rico), utilities (WPPSS) and states (Illinois), and the international experience from such countries as Greece, Brazil, and Argentina. The costs of such crises for citizens, pensioners, and bond holders can be significant. We seek to understand the underlying economic, political, and legal/regulatory causes of such events so that they may be prevented in the future. The importance of private information and public regulation for disciplining the fiscal performance of democratically elected governments will be a central concern. We believe strongly that diagnosing and treating the ""disease"" of fiscal mismanagement is an interdisciplinary endeavor drawing on finance, economics, political science, and the law. Students with backgrounds in any of these disciplines are welcome." +OIDD-291,"This course examines the art and science of negotiation, with additional emphasis on conflict resolution. Students will engage in a number of simulated negotiations ranging from simple one-issue transactions to multi-party joint ventures. Through these exercises and associated readings, students explore the basic theoretical models of bargaining and have an opportunity to test and improve their negotiation skills." +NGG-521,"The course is geared to advanced undergraduate and graduate students interested in understanding the basics of implantable neuro-devices, their design, practical implementation, approval, and use. Reading will cover the basics of neuro signals, recording, analysis, classification, modulation, and fundamental principles of Brain-Machine Interfaces. The course will be based upon twice weekly lectures and ""hands-on"" weekly assignments that teach basic signal recording, feature extraction, classification and practical implementation in clinical systems. Assignments will build incrementally toward constructing a complete, functional BMI system. Fundamental concepts in neurosignals, hardware and software will be reinforced by practical examples and in-depth study. Guest lecturers and demonstrations will supplement regular lectures." +FNCE-256,"The objective of this course is to provide students with detailed knowledge of corporate structures, valuation methods, project finance, risk management practices, corporate governance issues, and geo-political risks in the energy industry. In general, this course seeks to provide students with an overall context for understanding energy issues and risks, and how these might affect financing and investment decisions for both providers of energy and end-users of energy." +BE-608,"To provide an in-depth view of the process by which scientific discoveries are commercialized. This course covers discovery in the laboratory, technology transfer, regulatory, financial, and managerial issues involved in moving a technology from the lab into the market place. The course contents fall into three broad categories: (1) examples of scientific discoveries that are candidates for commercialization, (2) fundamental elements of technology transfer, such as intellectual property protection and licensing, and (3) aspects of commercialization, such as regulatory approval, financing, and startup formation. In using this structure, the course provides parallel coverage of both the science and the commercialization process, in such a way that the elements of one contribute to the student's experience in learning the other. Prerequisite: Undergraduates and graduate students in other departments are welcome. Please contact mmaxwell@upenn.edu to request permission to request permission to register." +PHYS-314,"This course covers the fundamentals of atmosphere and ocean dynamics, and aims to put these in the context of climate change in the 21st century. Large-scale atmospheric and oceanic circulation, the global energy balance, and the global energy balance, and the global hydrological cycle. We will introduce concepts of fluid dynamics and we will apply these to the vertical and horizontal motions in the atmosphere and ocean. Concepts covered include: hydrostatic law, buoyancy and convection, basic equations of fluid motions, Hadley and Ferrel cells in the atmosphere, thermohaline circulation, Sverdrup ocean flow, modes of climate variability (El-Nino, North Atlantic Oscillation, Southern Annular Mode). The course will incorporate student led discussions based on readings of the 2007 Intergovernmental Panel on Climate Change (IPCC) report and recent literature on climate change. Aimed at undergraduate or graduate students who have no prior knowledge of meteorology or oceanography or training in fluid mechanics. Previous background in calculus and/or introductory physics is helpful. This is a general course which spans many subdisciplines (fluid mechanics, atmospheric science, oceanography, hydrology)." +ENGL-092,"This course is an introduction to the analysis of film as both a textual practice and a cultural practice. We will examine a variety of films--from Fritz Lang's M (1931) to Julia Dash's DAUGHTERS OF THE DUST (1991)--in order to demonstrate the tools and skills of ""close reading."" We will concentrate on those specifically filmic features of the movies, such as mise-en-scene, cinematography, editing and sound strategies, as well as those larger organizational forms, such as narrative and non-narrative structures and movie genres. Because our responses to the movies always extend beyond the film frame, we will additionally look closely at the complex business of film distribution, promotion, and exhibition to show how the less visible machinery of the movie business also shapes our understanding and enjoyment of particular films. Along the way, we will discuss some of the most influential and productive critical schools of thought informing film analysis today, including realism, auteurism, feminism, postmodernism, and others. Screenings are mandatory. See the English Department's website at www.english.upenn.edu for a description of the current offerings." +PSCI-010,"Freshmen seminars are small, substantive courses taught by members of the faculty and open only to freshmen. These seminars offer an excellent opportunity to explore areas not represented in high school curricula and to establish relationships with faculty members around areas of mutual interest. See www.college.upenn.edu/admissions/freshmen.php" +DADE-921,"Participants in this workshop will assess their beliefs and awareness around cross cultural communication and diversity and inclusiveness. Through lecture, group participation, skill practice, role-play, case studies, and coaching theylearn to advance their skill levels and take communication to the next level. Participants will also be recorded at the opening and conclusion of the to assess skill development." +NURS-652,"This course focuses on the management of financial resources in the healthcare industry particularly in inpatient and ambulatory care settings. Specific emphasis is on applied accounting, budgeting, capital planning, nursing staffing/scheduling and variance analysis. Additionally, students will apply concepts in developing a business/program plan including completion of an environmental scan, cost-benefit analysis and marketing plan. Students will engage in strategic planning, stakeholder analysis and benchmarking efforts." +CRIM-300,"This course explores constitutional criminal procedure or the law of the Fourth, Fifth, and Sixth Amendments to the United States Constitution. Topics included the laws and rules associated with search and seizure, arrest, interrogation, the exclusionary rule, and deprivation of counsel. Social science evidence that supports or raises questions about legal doctrine will be examined. No prerequisites are required." +NURS-357,"Innovation, defined as a hypothesis-driven, testable, and disciplined strategy, is important to improve health & healthcare. Employing new ways of thinking, such as with design thinking, will help open up possibilities of ways to improve health & the process of healthcare. Incorporating current & emerging social & digital technologies such as mobile apps, wearables, remote sensing, and 3D printing, affords new opportunities for innovation. This course provides foundational content & a disciplined approach to innovation as it applies to health & healthcare. A flipped classroom approach has the in-class component focusing on group learning through design thinking activities. The course is open to undergraduate nursing students as a case study & upper-level undergraduates and graduate students from across the Penn campus. The course provides a theoretical foundation in design thinking & may provide an overview of innovation technology & digital strategies as well as social & process change strategies. To enhance the didactic component, students will actively participate in a design case study. Students will be matched by interest and skill level with teams & will work with community-based organizations, healthcare providers and/or innovation partners. Student teams will meet their partners to identify & refine a health or healthcare problem to tackle. Students will work throughout the semester to create an innovative solution that will be pitched to their community-based organization, healthcare provider, and/or innovation partner at the end of the semester. Prerequiste: Completion of freshman & sophomore level courses" +CAMB-695,"This 7-week course is designed to introduce students to basic scientific writing skills and is based upon the premise that clear writing, giving feedback, and receiving feedback are all essential tools for professional development. While this is not strictly a prelim preparatory course, applying the principles of this course will help improve your prelim writing and your scientific writing in general. Structure: An initial introductory lecture for the entire class is followed by 6 weekly small group sessions. These sessions are conducted as workshops designed to enhance student and faculty engagement to improve scientific writing. During the course, participants review the princinples of clear, persuasive writing, and apply these principles to writing for a scientific audience. Particular emphasis is placed on conveying the significance of your research, outlining your aims, and discussing your results. Classes are highly interactive, and the majority of class time will be spent discussing student scientific writing. In order to focus on the techniques of scientific writing, in-class discussion and critiques will not address scientific methodology or interpretations of results." +ACCT-921,"This is an empirical literature survey course covering topics that include corporate disclosure, cost of capital, incentives, compensation, governance, financial intermediation, financial reporting, tax, agency theory, cost accounting, capital structure, international financial reporting, analysts, and market efficiency." +NURS-587,"Grounded in a social justice perspective, this course aims to provide the student with a foundational overview of the field of community health and leadership skills in public health advocacy. The course encourages critical thinking about health outcomes framed by the broad context of the political and social environment. This course analyzes the range of roles and functions carried out by leaders in healthcare advocacy for marginalized communities; integrates knowledge of health policy and the key influence of government and financing on health outcomes; explores community-based participatory research and interventions as tools for change; and discusses ways to develop respectful partnerships with community organizations. An assets-based approach that draws upon the strengths of communities and their leaders provides a foundation for community-engagement skill building. The course emphasizes the development of skills and techniques to lead effective, collaborative, health-focused interventions for disenfranchised groups, including residents of urban neighborhoods. Prerequisite: Undergraduates with permission of the instructor" +NURS-731,This course focuses on the care of high-risk neonates within the context of the family unit. The biological and psychosocial aspects are studied as a basis for nursing practice. Emphasis is placed on the role of the Advanced Practice nurse in improving services to high-risk neonates with the purpose of decreasing mortality and morbidity rates and improving the quality of life of high-risk newborns and infants. +DYNM-723,"Participants learn to be coaches by being coaches to one another. Over a two-month period, cohort members expand their repertoire of skills and tools, share their experiences, and together scrutinize the client/coach relationship." +PHIL-525,"For the last four centuries, scientific research has provided our most reliable understanding of the world. Although the scientific revolution started modestly with attempts to understand stellar movement, we now know the age and constitution of the universe, the basis of heredity, and we can make and break chemical bonds at will. By all appearances, science seems to have made substantial progress from the scientific revolution to the global scientific enterprise of the 21st centry. This course is about how science has generated this knowledge, and whether it has been as progressive and reliable as it seems. We will consider methodological issues such as the sources of scientific knowledge, objectivity, the growing importance of computation in the natural sciences, and the nature of modeling. We will examine products of scientific research: explanations, models, theories, and laws of nature. And we will discuss questions about science and values, including whether non-scientific values can and should enter scientific research, the relationship between science and religion, and the role of the public in guiding the scientific enterprise." +FNCE-725,"This course covers fixed income securities (including fixed income derivatives) and provides an introduction to the markets in which they are traded, as well as to the tools that are used to value these securities and to assess and manage their risk. Quantitative models play a key role in the valuation and risk management of these securities. As a result, although every effort will be made to introduce the various pricing models and techniques as intuitively as possible and the technical requirements are limited to basic calculus and statistics, the class is by its nature quantitative and will require a steady amount of work. In addition, some computer proficiency will be required for the assignments, although familiarity with a spreadsheet program (such as Microsoft Excel) will suffice." +LAW-967, +MKTG-211,"This course is concerned with how and why people behave as consumers. Its goals are to: (1) provide conceptual understanding of consumer behavior, (2) provide experience in the application of buyer behavior concepts to marketing management decisions and social policy decision-making; and (3) to develop analytical capability in using behavioral research." +MEAM-529,"Introduction to MEMS and NEMS technologies: MEMS/NEMS applications and key commercial success stories (accelerometers, gyroscopes, digital light projectors, resonators). Review of micromachining techniques and MEMS/NEMS fabrication approaches. Actuation methods in MEMS and NEMS, MEMS/NEMS design and modeling. Examples of MEMS/NEMS components from industry and academia. Case studies: MEMS inertial sensors, microscale mirrors, micro and nano resonators, micro and nano switches, MEMS/NEMS chem/bio sensors, MEMS gyroscopes, MEMS microphones." +PSYC-435, +MGMT-729,"Announcing the first iPhone at Macworld 2007, Apple CEO Steve Jobs famously boasted: ""And boy, have we patented it!"" How, and to what extent, do patents and intellectual property really provide competitive advantage for innovative technology companies? What makes an IP asset strategically powerful? How do patents impact, and even drive, major corporate decisions including M&A, venture funding and exits, and entry into new markets? In this course, students will learn to critically analyze and answer these questions, gaining insights they can leverage in their future roles as innovation industry executives, entrepreneurs, strategist and investors. The course includes three major units. In Unit 1, Patents and Innovation Value, we examine closely the relationship between competitive advantage, value proposition, and intellectual property (particularly patents). We will apply our understanding of that relationship to critique and sharpen patent strategy to protect examples of cutting-edge technologies. In Unit 2, Patent Leverage and the Corporate Playbook, we study theory and examples of how intellectual property leverage strategically informs corporate transactions and decisions, for established companies as well as for start-ups. In unit 3, Limits and Alternatives to Patents, we confront the recent legal trend toward reigning in the power and scope of patents. We also consider the growing importance of data as a proprietary technology asset, and discuss options for adapting intellectual property strategy appropriately. Throughout, students will learn and practice applying the concepts we learn to decision-making in examples based on innovative real-world technologies and businesses." +AFST-285,"The objectives are to continue to strengthen students' knowledge of speaking, listening, reading, and writing Swahili and to compare it with the language of the students; to continue learning about the cultures of East Africa and to continue making comparisons with the culture(s) of the students; to continue to consider the relationship between that knowledge and the knowledge of other disciplines; and using that knowledge, to continue to unite students with communities outside of class. Level 3 on the ILR (Interagency Language Roundtable) scale." +SOCI-159,"Since the collapse of communism in 1989 in Eastern Europe (and 1991 in the Soviet Union), many of the countries in the region have experienced public health crises and demographic catastrophe. Below replacement fertility rates and massive out migration have decimated the populations of these countries even as populations age and place unsustainable strains on pension systems and medical services. The demographic collapse has also been accompanied by falling male life expectancy and the rise of alcoholism, depression, domestic violence, and suicide. The economic exigencies of the transition from communism to capitalism dismantled welfare states at the exact moment when health services were most needed, leaving charities and nongovernmental organization to try to fill in the gaps. Through a combination of readings from the fields of epidemiology, demography, and medical anthropology, this course examines the public health implications of poverty and social dislocation in post-communist states. All readings and assignments are in English." +FREN-229,"Where adn how is French spoken in the world? Which variety (or varieties) of French represents ""good"" or standard language use? What does it mean to have an accent or to experience linguistic insecurity? To what extent have political forces and movements historically affected the evolution of French? How do language attitudes differ among French- and English-speaking regions of the world and what is the status of French in an era of globalization? In what ways does language shape our identities? Le Francais dans le monde/French in the World examines these questions by providing a survey of the sociolinguistics of the French language in the contemporary world. We will explore how societal changges influence the manner and the contexts in which the French language is spoken. Case studies focus on various parts of the Francophone world, including Europe (Belgium, Switzerland), New World (Quebec, Caribbean, Louisiana), Africa (North Africa, Sub-Saharan Africa), etc. Readings and class discussions are in French. Prerequisite: Two 200-level French courses taken at Penn or equivalent." +COML-247,"""A spectre is haunting Europe--the spectre of Communism"": This, the famous opening line of The Communist Manifesto, will guide this course's exploration of the history, legacy, and potential future of Karl Marx's most important texts and ideas, even long after Communism has been pronounced dead. Contextualizing Marx within a tradition of radical thought regarding politics, religion, and sexuality, we will focus on the philosophical, political, and cultural origins and implications of his ideas. Our work will center on the question of how his writings seek to counter or exploit various tendencies of the time; how they align with the work of Nietzsche, Freud, and other radical thinkers to follow; and how they might continue to haunt us today. We will begin by discussing key works by Marx himself, examining ways in which he is both influenced by and appeals to many of the same fantasies, desires, and anxieties encoded in the literature, arts and intellectual currents of the time. In examining his legacy, we will focus on elaborations or challenges to his ideas, particularly within cultural criticism, postwar protest movements, and the cultural politics of the Cold War. In conclusion, we will turn to the question of Marxism or Post-Marxism today, asking what promise Marx's ideas might still hold in a world vastly different from his own. All readings and lectures in English." +TURK-122,"Similar to TURK 212, Advanced Turkish Culture & Media I, in this course students also will have exposure to social Turkish clubs and to establish their own. They will arrange their Turkish tea parties and learn about Turkish cuisine. Expose Turkish daily news and media will be discussed in class. Students will have chance to interview interview Turkish businessman, writer, journalists in class and/or skype or zoom people in Turkish. Team spirit or ethics with those of the United States. Students will present and prepare a drama. Mainly students will create and decide their activities and discussions. and the instructor will just monitor them most of time. They will continue watching Turkish movies and expose to Turkish culture through these films. After each movie discussions and essay writings will be expected." +BEPP-620,"Behavioral economics has revealed a variety of systematic ways in which people deviate from being perfectly selfish, rational, optimizing agents. These findings have important implications for government policy and firm behavior. This course will explore these implications by answering two main questions: (1) what does behavioral economics imply for when and how the government should intervene in markets? (2) What does behavioral economics imply for firms' pricing and production decisions? The course will present the standard economic approaches to answering these questions and then explore how answers change when we consider that people act in behavioral ways. Towards the end of the course, we will investigate specific policy questions, allowing us to debate solutions while hearing from policy makers operating in a world of behavioral agents." +NURS-648,"This course will build on concepts presented in the Diagnosis and Management of Adults across the Lifespan (NURS 646) course. The focus is on refining health assessment skills, interpreting findings, developing and implementing appropriate plans of care to meet common health maintenance needs of adults and to promote the health of adults with more complex health problems with an emphasis on the frail adult. The student will gain increased expertise in communication skills, health assessment skills, interpreting findings, epidemiological concepts and developing and implementing plans of care. The emphasis will be placed upon managing an aging population with complex, chronic healthcare needs and promoting healthy behaviors across the lifespan." +COML-570,"Topic for Fall 2017: ""Object Theory"". This seminar will investigate the rise of and ongoing scholarly concern with ""objects"" and ""things,"" which has emerged from fields such as anthropology and art history as a category of renewed interest for literary scholars, too. We will investigate key contributions to theories of the object by thinkers such as: Mauss, Barthes, Heidegger, Latour, Benjamin, Bill Brown, Jane Bennett, among others. Literary readings will accompany these theoretical texts." +LAW-597, +GRMN-504, +HSOC-420,"This course is designed to provide HSOC students with the tools necessary to undertake original research, guiding them through the research and writing process. Students will produce either a polished proposal for a senior thesis project, or, if there is room inthe course, a completed research paper by the end of term. Students work individually, in small groups and under the close supervision of a faculty member to establish feasible research topics, develop effective research and writing strategies, analyze primary and secondary sources, and provide critiques of classmates'drafts. Students must apply for this couse by December 1." +LING-610,Selected topics either in Indo-European comparative linguistics or in historical and comparative method. +SWRK-760,"This course familiarizes students with mental health and mental disorders within the context of the life cycle, viewed from a biopsychosocial perspective. Prevalent categories of psychiatric disorders are considered with respect to their differentiating charateristics, explanatory theories, and relevance for social work practice, according to the DSM and other diagnostic tools. The course includes biological information and addresses the impact of race, ethnicity, social class, age, gender, and other sociocultural variables on diagnostic processes." +CBE-535,"This course provides an overview of fundamental concepts in colloid and interface science. Topics include the thermodynamics of interfaces, interfacial interactions (e.g. van der Waal's interactions, electrostatics, steric interactions), adsorption, the hydrodynamics and stability of interfacial systems, self assembly, etc. Connections to self-assembly and directed assembly of nanomaterials and emerging topics are explored. Pre-requisites: undergraduate thermodynamics, some familiarity with concepts of transport phenomena (including fluid flow and mass transfer) and differential equations" +EAS-502,"The objective is to introduce students to the major aspects of renewable energy, with its foundations in technology, association to economics, and impacts on ecology and society. This introduction is intended both for general education and awareness and for preparation for careers related to this field. The course spans from basic principles to applications. A review of solar, wind, biomass, hydroelectric, geothermal energy, and prospects for future energy systems such as renewable power generation in space." +OIDD-245,"Students who take this course will engage with the world of data science using tools such as Tableau and R that are becoming increasingly popular in industry. The first half of the course is designed for students with limited experience with data projects, and while familiarity with R, via courses such as STAT 405 or STAT 470, will be ideal preparation, students with other programming exposure can pick up the required skills via review sessions and self-instruction. The second half of the course extends students' experience to industry applications of text mining and machine learning and requires students to work with more unstructured data. Each week of the course will be devoted to analysis of a data set from a particular industry (e.g. HR, sports, fashion, real estate, music, education, politics, restaurants, non-profit work), which we will use to answer business questions by applying analytic techniques. The course is very hands-on, and students will be expected to become proficient at applying data to business decisions and at effectively analyzing large data sets to inform decisions about business problems." +LAW-974, +MUSC-135,"This course surveys American musical life from the colonial period to the present. Beginning with the music of Native Americans, the European legacy, and the African Diaspora, the course treats the singular social and political milieu that forged the profile of America's musical landscape. Attention will be given to the establishment of the culture industry and to various activities such as sacred music, parlor music, concert and theater music, the cultivation of oral traditions, the appearance of jazz, the trajectory of western art music in the United States, and the eventual global dominance of American popular music. Music 070 prerequisite. Preference given to music Majors and Minors. Fulfills the Cultural Diversity in the U.S. College Requirement." +MKTG-239,"As consumers, we are constantly exposed to advertisements and experience visual messages from product packages in stores, retail displays, and products already owned. In essence, visual marketing collateral is omnipresent and is an essential part of corporate visual identity, strategy, branding, and communication. Some of this falls to creative graphic design, but advertising, design, and marketing can also be significantly enhanced by knowledge of how visual information and its presentation context can be optimized to deliver desirable and advantageous messages and experiences. This course will emphasize how to measure, interpret, and optimize visual marketing. This course will use lectures, discussions, exercises and a group project, to help students understand the underlying processes that influence our visual perception and visual cognition. Students will learn about the theoretical processes and models that influence, attention and visual fluency. Students will also be exposed to eye-tracking instruments that help measure eye movement. Finally, we will explore how visual stimuli can influence consumer memory, persuasion, and choice. We will examine practical applications in marketing, advertising, packaging, retail, and design contexts." +CIS-195,"This project-oriented course is centered around application development on current mobile platforms like iOS and Android. The first half of the course will involve fundamentals of mobile app development, where students learn about mobile app lifecycles, event-based programming, efficient resource management, and how to interact with the range of sensors available on modern mobile devices. In the second half of the course, students work in teams to conceptualize and develop a significant mobile application. Creativity and originality are highly encouraged! Prerequisite: CIS 120 or previous programming experience." +LING-230,An introduction to phonetics and phonology. Topics include articulatory phonetics (the anatomy of the vocal tract; how speech sounds are produced); transcription (conventions for representing the sounds of the world's languages); classification (how speech sounds are classified and represented cognitively through distinctive features); phonology (the grammar of speech sounds in various languages: their patterning and interaction) and syllable structure and its role in phonology. Prerequisite: A prior course in linguistics or permission of instructor. +EDUC-668,"This seminar explores key foundational questions for graduate-level work: How is academic knowledge formed and reproduced? How do we engage with and interrogate the scholarly research? And, how do we participate in the academic conversation around a topic? The Master's Paper Seminar introduces students to academic discourse, disciplinary writing conventions, and research practices. As part of this course, students are guided through preparing a literature review of a topic of their choice. This review, in turn, forms the foundation of their 30-40 page Master's Paper that is required for the completion of the M.S.Ed degree." +EAS-897, +MKTG-806,"RETAIL MERCHANDISING; This course introduces the role of merchandising at various retailers with an emphasis on apparel and soft-line businesses. Selected topics will include product development, line planning, sourcing, product lifecycle, forecasting, buying, planning and vendor relations. Special emphasis will be placed on current trends in retail merchandising through current articles and industry guest speakers. The objective of this course is to familiarize students with merchandising theory and strategies considered to be current best practices in retailing." +OIDD-934,"The course goal is to provide a brief but fairly rigorous introduction to the formulation and solution of dynamic programs. Its focus is primarily methodological. We will cover discrete state space problems, over finite or infinite time horizon, with and without discounting. Structured policies and their theoretical foundation will be of particular interest. Computational methods and approximation methods will be addressed. Applications are presented throughout the course, such as inventory policies, production control, financial decisions, and scheduling." +EDUC-663,"The course provides an understanding of sociocultural concepts essential to the work of counselors and providers of psychological services. This course provides a contextual and applied understanding of working with socioculturally diverse clients. The purpose of this course is to expand one's understanding of the impact of sociocultural and contextual factors, social-psychological influences, the role of values, and the interaction of identities in counseling and psychological services. Both intervention and prevention strategies will be addressed. The student will be required to demonstrate a working knowledge of key concepts in sociocultural psychology and the topical areas addressed in the course." +AFRC-491,Continuation of AFST 490. Offered through Penn Language Center. Prerequisite: Permission of Penn Language Center. +GSWS-344,"Intellectual, emotional and behavioral development in the college years. Illustrative topics: developing intellectual and social competence; developing personal and career goals; managing interpersonal relationships; values and behavior. Recommended for submatriculation in Psychological Services Master's Degree program." +VLST-233,"Introduction to major artistic traditions of China and Japan and to the methodological practices of art history. Attention given to key cultural concepts and ways of looking, in such topics as: concepts of the afterlife and its representation; Buddhist arts and iconography; painting styles and subjects; and more broadly at the transmission of styles and cultural practices across East Asia. Serves as an introduction to upper level lecture courses in East Asian art history cultures. If size of class permits, certain sessions will be held in the University Museum or the Philadelphia Museum of Art." +JPAN-022,"PREREQUISITES Completion of JPAN 021, JPAN012 or the equivalent. This is equivalent to JPAN 111 and JPAN 112 in one semester, 2CU, and completes the College language requirement. Textbooks: Genki II (Lesson 13- Lesson 23) and Tobira: Gate way to Advanced Japanese (Unit 1-Unit 3) Kanji: Approximately 140 new Kanji will be introduced. Overall Kanji knowledge will be about approx. 400." +HIST-650,Reading and discussion course on selected topics in African history +CPLN-643,"This newly reconstituted course will introduce designers and planners to practical methods of design and development for major real estate product types. Topics will include product archetypes, site selection and obtaining entitlements, basic site planning, programming, and conceptual and basic design principles. Project types will include, among others; infill and suburban office parks, all retail forms, campus and institutional projects. Two-person teams of developers and architects will present and discuss actual development projects." +ACCT-706,"This course covers managerial accounting and cost management practices that can be strategically applied across the various functions of a business organization to improve organizational performance. The course emphasizes the methods available to measure and evaluate costs for decision-making and performance evaluation purposes. It reviews a number of cost management issues relating to the design and implementation of strategic, marketing, value analysis, and other management models in modern firms; and identifies major contemporary issues in managerial accounting and financial decision- making. A variety of case studies in different industries and decision contexts are used to examine the application of these concepts." +GRMN-514, +INTL-BSL, +LAW-920, +CPLN-632,This course explores the nature and use of raster-oriented (i.e. image-based) Geographic Information Systems (GIS) for the analysis and synthesis of spatial patterns and processes. Previous experience in GIS is not required. +CIT-594,"This course will focus on data structures, software design, and advanced Java. The course starts off with an introduction to data structures and basics of the analysis of algorithms. Important data structures covered will include arrays, lists, stacks, queues, trees, hash maps, and graphs. The course will also focus on software design and advanced Java topics such as software architectures, design patterns, networking, multithreading, and graphics. We will use Java for the entire course." +COML-101,"The purpose of the course is to introduce you to the subjects of the discipline of Folklore, their occurrence in social life and the scholarly analysis of their use in culture. As a discipline folklore explores the manifestations of expressive forms in both traditional and moderns societies, in small-scale groups where people interact with each face-to-face, and in large-scale, often industrial societies, in which the themes, symbols, and forms that permeate traditional life, occupy new positions, or occur in different occasions in everyday life. For some of you the distinction between low and high culture, or artistic and popular art will be helpful in placing folklore forms in modern societies. For others, these distinction will not be helpful. In traditional societies, and within social groups that define themselves ethnically, professionally, or culturally, within modern heterogeneous societies, and traditional societies in the Americas, Africa, Asia, Europe and Australia, folklore plays a more prominent role in society, than it appears to plan in literati cultures on the same continents. Consequently the study of folklore and the analysis of its forms are appropriate in traditional as well as modern societies and any society that is in a transitional phase." +BE-498,Second semester of a year-long project. +ARCH-712,A seminar on advanced topics in architectural design and theory. Topics and instructors will vary. +PSCI-258,"What exactly should be considered a fundamental ""human right""? What is the basis for something is a fundamental human right? This course will examine not only broad conceptual debates, but will also focus on specific issue areas (e.g., civil rights, economic rights, women's rights), as well as the question of how new rights norms emerge in international relations." +BMIN-520, +GEOL-643,"The evaluation of technical, social and economic constraints on the design of water supply and sanitation projects. The focus on sustainable design emphasizes how technical solutions fit within the appropriate social context. Case studies are used to demonstrate these principles across a range of examples from developed and developing countries including detailed studies from rural communities with limited resources." +DADE-924,"There are numerous conditions that affect the oral and maxillofacial region, including oral mucosal diseases, temporomandibular joint disorders, orofacial pain syndromes and salivary gland dysfunction. Patients presenting with these disorders can be challenging to diagnose and manage. Several techniques are available for evaluation of these conditions and will guide the clinician toward proper diagnosis. Management protocols vary based upon the specific affecting the oral and maxillofacial region. This course will highlight the etiology, clinical presentation, diagnostic techniques, and management protocols of several conditions, including oral mucosal diseases, temporomandibular joint disorders, orofacial pain syndromes, and salivary gland disorders." +LING-151,"This course describes current theorizing on how the human mind achieves high-level cognitive processes such as using language, thinking, and reasoning. The course discusses issues such as whether the language ability is unique to humans, whether there is a critical period to the acquisition of a language, the nature of conceptual knowledge, how people perform deductive reasoning and induction, and how linguistic and conceptual knowledge interact." +KORN-132,"This course is a continuation of KORN 131 and aims to further develop students' linguistic and cultural competence by building on materials covered in KORN 131. In addition to gaining a deeper understanding of Korean culture, the course focuses on enhancing linguistic accuracy and fluency in both spoken and written Korean. Particular emphasis will be placed on building a meaningful Korean-speaking community, as well as consolidation of grammar structures, and expansion and enhancement of vocabulary. Topics include preparing for a trip to Korea, finding housing, college culture in Korea, entertainment and participating in various social events. Upon completion of this course, students will be able to express themselves more accurately and participate in Korea-related communities more meaningfully. This course completes the College language requirement." +ANTH-307,This course examines the social and political lives of contemporary Native American Indians in the United States and Canada. Topics include: Indigenous identity; homelands and natural resources; popular culture and media; Indigenous arts and cultural expression; museum representations; athletics; gender relations; tribal recognition and sovereignty; and resistance movements. We will consider the origins of federal programs and legislation that have become essential to the protection of Native American freedoms. Students can expect to gain an appreciation of the complexity and cultural diversity of Native communities and tribal nations and insights into their interactions with other cultures over time. +CIS-399,Visit the CIS department website for descriptions of available Special Topics classes. +RELS-144,"""Jesus and Muhammad walk into a bar..."" We can think about multiple ways to complete the joke. They could talk about prophecy and prophetic succession, God's word, women, pagans and Jews, state authority, among others. This course traces the long arc of religious history, from the Jesus movement to the rise of Islam. Through texts, objects, buildings, and artistic representations we will study the time period that connects these two significant developments that majorly changed world history. Lectures and discussions will consist of close reading, analysis, and discussion of primary sources, analysis of non-literary media, and engagement with modern scholarship. We will raise questions about ancient and modern perspectives on religious practice, representation, authority, gender, race/ethnicity, memory, and interreligious encounters." +MUSC-236,"Participation in the course is contingent upon a successful audition. This course must be taken for a letter grade (pass/fail option may not be utilized for this course). This weekly seminar will explore music from the past and present through class discussions of performance, historical context, and analytical aspects of the music led by a professor and/or performer. One example of a class in this number will be an indepth study of chamber music repertoire led by the Daedalus Quartet. Students will prepare for a final performance at the end of the semester as well as a paper/presentation. Students interested in this applied approach to music may also wish to take 256 and/or 276. Prerequisite: Students must successfuly audition to be in the course; previous private study in an instrument is required. Basic fluency in rudiments of music theory is also required." +HSOC-251,"Many factors have shaped, and continue to shape, population health and public health policy. This course will explore the concept, mission, and core functions of public health. Students will have a chance to learn about its key methodological (epidemiology, biostatistics) and content (environmental health, social and behavioral sciences, health policy) areas. In addition, we will focus on topics of particular relevance to the current health of the public; topics likely will include the basics of life (food, water, and shelter) and topics of current interest (e.g., motor vehicle crashes, mental health, violence)." +EDUC-545, +CAMB-534,"An advanced seminar course emphasizing genetic research in model organisms and how it informs modern medicine. Each week a student will present background on a specific human disease. This is followed by an intense discussion by the entire class of 2 recent papers in which model organisms have been used to address the disease mechanism and/or treatment. As a final assignment, students will have the opportunity to write, edit, and publish a ""News & Views"" style article in the journal ""Disease Models and Mechanisms"". Offered spring semester. Prerequisite: If course requirements not met, permission of instructor required." +MATH-730,Topics from the literature. The specific subjects will vary from year to year. +CAMB-706,"This is a year-long course for the incoming CAMB-MVP students and others wishing to gain a broad overview of pathogens and their interactions with hosts. The course will provide students with key fundamental knowledge of Microbiology, Virology and Parasitology. The course starts with introductory lectures on Concepts of Host-Pathogen interactions. The rest of the course is divided into sections on Bacteriology, Virology and Parasitology. Each week there are three 1 hour class slots that are either lectures on a specific topic or discussions of a relevant paper presented by students. Classes are led by faculty from across the campus and are highly interactive. Evaluation is based on mid and final take home essay topics for each of the three sections. Regular attendance and active participation in the discussions is also part of the evaluation." +NURS-513,"This course will examine obesity from scientific, cultural, psychological, and economic perspectives. The complex matrix of factors that contribute to obesity and established treatment options will be explored. Prerequisite: Undergraduate by permission of instructor This course satisfies the Society & Social Structures Sector for Nursing Class of 2012 and Beyond." +LALS-158,"This survey course considers Latin American musics within a broad cultural and historical framework. Latin American musical practices are explored by illustrating the many ways that aesthetics, ritual, communication, religion, and social structure are embodied in and contested through performance. These initial inquiries open onto an investigation of a range of theoretical concepts that become particularly pertinent in Latin American contexts--concepts such as post-colonialism, migration, ethnicity, and globalization. Throughout the course, we will listen to many different styles and repertories of music and then work to understand them not only in relation to the readings that frame our discussions but also in relation to our own, North American contexts of music consumption and production. (Formerly Music 158)." +SWRK-714,"The focus of learning in this semester is theories and skills related to clinical practice with individuals and groups, differential intervention, and the broadening of the professional role and repertoire. The course content and assignments are closely linked with the students' learning objectives and experiences in the field. Students extend and refine their practice knowledge and skills and learn to intervene with cognitive, behavioral, and narrative modalities. This semester focuses also on work with complex trauma across systems and populations. Students consolidate their identification as professionals and learn to constructively use the environment to effect systems changes." +CIS-197,"This course provides an introduction to modern web development frameworks, techniques, and practices used to deliver robust client side applications on the web. The emphasis will be on developing JavaScript programs that run in the browser. Topics covered include the JavaScript language, web browser internals, the Document Object Model (DOM), HTML5, client-side app architecture and compile-to-JS languages like (Coffeescript, TypeScript, etc.). This course is most useful for students who have some programming and web development experience and want to develop moderate JavaScript skills to be able to build complex, interactive applications in the browser." +EALC-622,"Continuation of CHIN491 EALC221/621, which is the only prerequisite for this course. Upon completion of Shadick, readings in a wide selection of texts with Chinese commentaries may be taken up. These readings are in part chosen to reflect student interest. This is the second half of a year-long course. Those who enroll must take both semesters." +VISR-699,"This course enables student to undertake a self-directed study on a topic in Veterinary Medicine, under the supervision of a faculty member. Students are required to submit an Independent Study & Research (ISR) application to the Registrar Manager in the Office for Students. Credit may vary." +BE-101,Introduction to Bioengineering II. Continuation of the freshman introductory bioengineering course. This course introduces students to the design process and emphasizes its role in engineering. +LGST-611,"This course uses the global business context to introduce students to important legal, ethical and cultural challenges they will face as business leaders. Cases and materials will address how business leaders, constrained by law and motivated to act responsibly in a global context, should analyze relevant variables to make wise decisions. Topics will include an introduction to the basic theoretical frameworks used in the analysis of ethical issues, such as right-based, consequentialist-based, and virtue-based reasoning, and conflicting interpretations of corporate responsibility. The course will include materials that introduce students to basic legal (common law vs. civil law) and normative (human rights) regimes at work in the global economy as well as sensitize them to the role of local cultural traditions in global business activity. Topics may also include such issues as comparative forms of corporate governance, bribery and corruption in global markets, human rights issues, diverse legal compliance systems, corporate responses to global poverty, global environmental responsibilities, and challenges arising when companies face conflicting ethical demands between home and local, host country mores. The pedagogy emphasizes globalized cases, exercises, and theoretical materials from the fields of legal studies, business ethics and social responsibility." +CLST-223,"Did you ever wonder what the world of the Iliad and Odyssey was really like? This illustrated lecture course surveys the prehistory and early history of the Greek world through texts and material remains, with the aim of bringing to life the society, economy, and politics of this ancient era. Among the topics are the rise and fall of the great Bronze Age civilizations of the Aegean area, the Minoans of Crete and the Mycenaeans of the Greek mainland; the cataclysmic volcanic eruption on the island of Thera (modern Santorini) and its long-term consequences; the Trojan War (myth or history??); the world of the Dark Age that followed the collapse of the Mycenaean palaces; and the Greek renaissance of the eighth century B.C. - including the adoption of the alphabet, the great colonizing movement, and the great Panhellenic sanctuaries like Olympia and Delphi - that laid the foundation for the Classical world to come. There are no prerequisites, and no prior knowledge of archaeology or the Greek world is assumed." +NPLD-750,"Businesses performing philanthropic activity often use their platform of CSR activities to engage with society, directly, via a corporate foundation, or through partnerships with nonprofit organizations. Although such philanthropic activities are not directly related to profit-making ventures, they may boost their reputation, be used in marketing their products, talent recruitment, increase employee engagement and commitment, and thus contribute to the profit indirectly. Many businesses undertake their CSR related philanthropic activities using strategic partnerships with nonprofits or public sector organizations to meet their goals. This provides opportunities to nonprofit and public sector leaders in achieving social and sustainable change." +REAL-240,"This course is designed for majors in Real Estate, but is also open to finance-oriented students who wish a deeper analysis of real estate investment and investment analysis issues than that offered in REAL 209. The class will contain a mixture of lectures, guest speakers and case discussions. Academic research is paired with recent industry analysis of key issues in order to marry sound theory and empirical results with current events and practices. Several classes will include lectures outlining what economics and finance tell us about a number of topics. Generally, these will be followed by guest lectures from industry professionals who will focus on a specific application of the principles introduced in the lectures." +SWRK-713,"This course builds upon the foundation of historical, psychological, sociological, economic, political, and personal knowledge about institutionalized forms of racism and discrimination developed in SWRK 603, American Racism and Social Work Practice. The course uses understanding elements of oppression to critically examine strategies for addressing racism and sexism in organizations and communitites through systematic assessment and planning for social change. The course examines change at three levels: organizations, communitites, and social movements." +PSCI-333,"Political polls are a central feature of elections and are ubiquitously employed to understand and explain voter intentions and public opinion. This course will examine political polling by focusing on four main areas of consideration. First, what is the role of political polls in a functioning democracy? This area will explore the theoretical justifications for polling as a representation of public opinion. Second, the course will explore the business and use of political polling, including media coverage of polls, use by politicians for political strategy and messaging, and the impact polls have on elections specifically and politics more broadly. The third area will focus on the nuts and bolts of election and political polls, specifically with regard to exploring traditional questions and scales used for political measurement; the construction and considerations of likely voter models; measurement of the horserace; and samples and modes used for election polls. The course will additionally cover a fourth area of special topics, which will include exit polling, prediction markets, polling aggregation, and other topics. It is not necessary for students to have any specialized mathematical or statistical background for this course." +PSYC-449,"Topics vary each semester. PSYC 449 (Gerstein) Neuroscience for Policymakers: This seminar will provide an overview of the neuroscience behind some of the most relevant issues in public health policy today. We will examine the primary scientific literature as well as delve into lay articles about the science and policy surrounding each issue. /PSYC 449 (Epstein) Consciousness: Consciousness is our subjective experience of the world, including both perceptions and felt internal states. In this seminar, we will explore the the burgeoning scientific literature on the neural basis of consciousness. We will focus in particular on three topics: What are the neuralsystems underlying visual awareness? What are the mechanisms that control the progression of conscious contents to create our stream of thought? What is the relationship between consciousness and behavior? /PSYC 449 (Jenkins) The Social Brain: This seminar examines the cognitive and neural mechanisms that enable humans to predict and understand people's behavior.We will be propelled throughout the course by fundamental questions about the human social brain. For example, why are humans so social? Does the human brain have specialized processes for social thought? Consideration of these questions will involve advanced treatment of a range of topics. Prerequisite: PSYC 449, 601 are LPS courses. PSYC 449, 301, 303 are Psych Department courses." +BMB-650,"Participation in the ""Dr. George W. Raiziss Biochemical Rounds"", a weekly seminar program sponsored by the Department of Biochemistry and Biophysics. Program deals with a wide range of modern biochemical and biophysical topics presented by established investigators selected from our faculty, and by leading scientists from other institutions. Prerequisite: Permission needed from Department" +AFRC-581,"James Baldwin, one of the greatest writers of the twentieth century, spoke to the issues of his times as well as to our own. This class will examine the intellectual legacy that Baldwin left to present-day writers such as Toni Morrison, Charles Johnson, Ta-Nehisi Coates, Thulani Davis, Caryl Phillips and others. We will spend time reading and discussing Baldwin's novels, short stories, plays and essays. In doing so, we will be considering the complex assumptions and negotiations that we make in our day-to-day lives around our identities and experiences built upon gender, sexual preference, the social-constructs called ""race,"" and more. James Baldwin's life and work will be the touchstone that grounds our discussions. We will read Go Tell It on the Mountain, Another Country, The Fire Next Time, and Giovanni's Room and see films I Am Not Your Negro, The Price of the Ticket and The Murder of Emmett Till. Students will research subjects of their own choosing about Baldwin's life and art. For example, they may focus on the shaping influences of Pentecostalism; segregation; racism; homophobia; exile in Paris; the Civil Rights Movement; Black Power, Baldwin's faith, or his return to America." +HCMG-863,"This course provides an overview of the management, economic and policy issues facing the pharmaceutical and biotechnology industries. The course perspective is global, but with emphasis on the U.S. as the largest and most profitable market. Critical issues we will examine include: R&D intensive cost structure with regulation and rapid technological change; strategic challenges of biotech startups; pricing and promotion in a complex global marketplace where customers include governments and insurers, as well as physicians and consumers; intense and evolving M&A, joint ventures, and complex alliances; government regulation of all aspects of business including market access, pricing, promotion, and manufacturing. We use Wharton and industry experts from various disciplines to address these issues." +NURS-757,"This course is the second of four residencies that provide the nurse anesthetist student the opportunity to attain competencies within the Certified Registered Nurse Anesthesia (CRNA) scope of practice. Throughout the residency, the nurse anesthesia resident will utilize appropriate clinical judgment to manage the complex medical, physical and psychosocial needs of clients in the perioperative phases. Further refinement of the patient assessment, anesthesia administration, and critical thinking skills is emphasized. Students progress by providing anesthesia care for patients throughout the continuum of health care services. The guidance of CRNA faculty preceptors contributes to the development of the independence of the CRNA student. Collaborative practice within a care team model is emphasized and the student assumes more overall responsibility for the quality of care for the patients throughout the perioperative experience, with clinical support as required. Prerequisite: Enrollment in NANS program, year 2" +EPID-625,"This course is an introduction to statistical methods that can be used to evaluate biomarker prognostic studies and multivariate prediction models. It is designed for advanced MS and PhD-level students in epidemiology and related fields (nursing, health policy, social work, demography). Topics will include biostatistical evaluation of biomarkers, predictive models based on various regression modeling strategies and classification trees, assessing the predictive ability of a model; internal and external validation of models; and updating prognostic models with new variables or for use in different populations. Students will learn about the statistical methods that are required by current reporting guidelines for biomarker prognostic studies or the reporting guidelines for multivariable prediction models. Prerequisite: Working knowledge of either Stata, SAS or R to fit regression, logistic regression and/or Cox regression models. Permission of course director for students outside of School of Medicine graduate programs." +NPLD-782,"Studying the behavior of groups and the actions/inactions of people within groups provides a doorway to deeper understanding of our selves, our families, our friends, our colleagues, our organizations, and our communitites. This half credit course is designed for Penn Graduate students eager to generate constructive group processes when chairing a committee, managing a work group, teaching in a classroom, serving on a jury, conducting a support/therapy group or facilitating strategy formulation. It is easy to see what is going well or poorly when observing what others are doing. But tuning into and gaining a comprehensive grasp of these processes when they are happening in the groups we belong to and learning how to take constructive actions in the here and now when it can have a meaningful impact requires a high level of cognitive capability combined with a special form of relational artistry. This course is an amalgam of experiential activities and energizing ways to internalize the rich concepts developed during many decades of applied-academic research. Participants are required to be fully present and fully engaged for the whole weekend, read the major book and a number of articles, pus write a paper. This course occupies a full weekend and runs from 6pm Fri. to 6pm Sun. This is a 48-hour intensive weekend; you go home to sleep. Participants applying for this course are required to take a Primer, which provides potential participants with a common conceptual base for engaging in the essential learning and lays out the intellectual foundations of the course. Permits will be issued soon after participants have taken the Primer. Contact the NPLD program for more information on primer and course dates." +PHIL-205,"This course will survey several central topics in philosophy of mind and language, as well as investigate how these areas of philosophy interact with the scientific study of the mind. Questions addressed may include: What is it to have a mind? What is consciousness? What is it to think, to perceive, to act, to communicate, to feel emotions? What is the relationship between the mind and the brain? Can there be a science of the mind? Of language? What can it tell us? What can philosophy contribute to cognitive science? We will look for more precise ways of asking these questions, and we will study some canonical answers to them." +MGMT-692,"This is a course the builds on the basic Negotiation course. In this course, we explore a wide range of negotiation topics from crisis and hostage negotiations, to the role of emotions including anxiety, envy and anger in negotiations, to backlash effects for women in negotiations, and the role of alcohol in negotiations. We will survey many aspects of current negotiation research, discuss historic negotiation cases, and students will participate in role-play exercises. Many of the role play exercises will involve multi-party negotiations and afford opportunities to hone skills in team-based negotiations." +DYNM-630,"According to newest research, over 70% of organizations worldwide have started digital transformation initiatives or at least plan for them. New digital tools are available almost daily - and many of them have the potential of a major business impact. They enable high performance practices and often even new business models. The resulting transformation of business processes leads to superior customer or supplier experience and organizations become more efficient, agile, meet compliance requirements, or improve the quality of products and services. They help achieving a level of process performance you would never have thought of before. Robotic Process Automation (RPA), Blockchain, Artificial Intelligence, the Internet of Things and Cloud-based software architectures with next generation automation approaches are some examples. However, many organizations underestimate the challenges of digital business transformation resulting in initiatives delivering little or no business value. A major financial organization, for example, stopped the use of over 1000 robots since that digital workforce had created severe issues: processes changed more frequently than expected - the robots didn't which led to numerous exception cases that had to be handled manually. The elimination of bottlenecks created more severe issues downstream. Value-driven Digital Business Transformation addresses those challenges. It proposes approaches, methods and tools that help to focus on the right sub-processes to transform and improve those areas considering the end-to-end business context as well as sustain the results through appropriate governance. The systematic use of digital technologies requires a hybrid workforce management aligning people, robots and other technologies through appropriate business processes management practices. A value-driven digital transformation prepares for this situation. The course discusses design and execution principles as well as related methods and tools to realize the full business value of digital business transformations, delivering results fast and at minimal risk. It combines newest case studies with current research findings to master business impacts of digitalization." +PERS-612,"This course is designed to help you build upon what you have learned in Elementary Persian I. Emphasis is placed on using the language for interpersonal, interpretive, and presentational modes of communication. Therefore use of English is restricted. Listening, speaking, reading, and writing-as well as culture, vocabulary, grammar, and pronunciation-are integrated into the course. Students must either have successfully completed PERS 611, or take the departmental exam." +EDCE-382,"The three PLN CDA courses build on the experiential knowledge of the early childhood practitioner (candidate). The courses connect child development theory and practice, health, safety, nutrition, and family community relations with the significant responsibilities of the candidate's ethical practice with children, families, and the community. The CDA six competency goals and thirteen functional areas are integrated into course content and discussions. Part II -Understanding and Supporting Children's Development : Modules 5-10 - To advance physical and intellectual competence" +NURS-708,"This course will explore the philosophy and growth of public policy that has directed the American Health Care System in its ever expanding movement toward universal health care for all citizens. Analysis of health policy and systems content will assist the students to identify the knowledge and skills needed for the health or human service provider to assume leadership roles in the formulation of public policy for change; this includes system restructuring, service delivery and funding of health care. Emphasis will be on the effect of policy on the individual/family user of health care services rather than the effect on professional health care providers or health care delivery systems. Special attention will be given to the effect of policy on populations, both urban and rural, living near and below the poverty level." +IMPA-606,"From Confucius to Kant; from Machiavellis The Prince to Loyolas The Spiritual Exercises to John Stuart Mills On Liberty; from ideological tenets of Marxist -Leninist communism to catechetical tenets of post-Vatican II Catholicism; from the 20th century prophet of pluralism Sir Isaiah Berlin to the ancient Hebrew prophet Isaiah; there is no shortage of ideas, beliefs and guiding principles to help leaders recognize and address the inevitable ethical questions related to public problem-solving. Through classic and contemporary readings and case studies, this course covers the basics of contemporary game theory as it relates to the empirics and ethics of negotiation. You are also exposed to several different philosophical, civic, ideological and religious traditions in moral reasoning as they relate to leadership ethics and effective public problem-solving. **For IMPA Students Only**" +MKTG-352,"MARKETING ANALYTICS: Companies are currently spending millions of dollars on data-gathering initiatives - but few are successfully capitalizing on all this data to generate revenue and increase profit. Moving from collecting data to analysis to profitable results requires the ability to forecast and develop a business rationale based on identified data patterns. Marketing Analytics will cover the three pillars of analytics - descriptive, predictive and prescriptive. Descriptive Analytics examines different types of data and how they can be visualized, ultimately helping you leverage your findings and strengthen your decision making. Predictive Analytics explores the potential uses of data once collected and interpreted. You will learn to utilize different tools, such as regression analysis, and estimate relationships among variables to predict future behavior. Prescriptive Analytics takes you through the final step - formulating concrete recommendations. These recommendations can be directed toward a variety of efforts including pricing and social-platform outreach." +MEAM-891,"Intended for graduate students conducting research. Building upon the fundamentals of mechanical design, this hands-on , project-based course provides participants with the knowledge and skills necessary to design, analyze, manufacture, and test fully functional subtractive manufacturing processes and part components. Topics covered include an introduction to machine elements, analysis of the mechanics of machining, manufacturing technology, precision fabrication (milling turning and computer-controlled machining), metrology, tolerances, cutting-tool fundamentals and engineering materials." +COML-096,"What makes men and women different? What is the nature of desire? This course introduces students to a long history of speculation about the meaning and nature of gender and sexuality -- a history fundamental to literary representation and the business of making meaning. We will consider theories from Aristophanes speech in Platos Symposium to recent feminist and queer theory. Authors treated might include: Plato, Shakespeare, J. S. Mill, Mary Wollstonecraft, Sigmund Freud, Virginia Woolf, Simone de Beauvoir, Adrienne Rich, Audre Lorde, Michel Foucault, Gayle Rubin, Catherine MacKinnon, Eve Kosofsky Sedgwick, Judith Butler, bell hooks, Leo Bersani, Gloria Anzaldua, David Halperin, Cherrie Moraga, Donna Haraway, Gayatri Spivak, Diana Fuss, Rosemary Hennesy, Chandra Tadpole Mohanty, and Susan Stryker. See the English Department's website at www.english.upenn.edu for a description of the current offerings." +PHYS-016,"The developed world's dependence on fossil fuels for energy production has extremely undesirable economic, environmental, and political consequences, and is likely to be mankind's greatest challenge in the 21st century. We describe the physical principles of energy, its production and consumption, and environmental consequences, including the greenhouse effect. We will examine a number of alternative modes of energy generation - fossil fuels, biomass, wind, solar, hydro, and nuclear - and study the physical and technological aspects of each, and their societal, environmental and economic impacts over the construction and operational lifetimes. No previous study of physics is assumed. Prerequisites: Algebra and Trigonometry. May be counted as Science Studies for students in Class of 2009 and prior. Target audience: Non-science majors (although science/engineering students are welcome)." +NELC-102,"This is the second half of the Near East sequence. This course surveys Islamic civilization from circa 600 (the rise of Islam) to the start of the modern era and concentrates on political, social, and cultural trends. Although the emphasis will be on Middle Eastern societies, we will occasionally consider developments in other parts of the world, such as sub-Saharan Africa, Central Asia, and Spain, where Islamic civilization was or has been influential. Our goal is to understand the shared features that have distinguished Islamic civilization as well as the varieties of experience that have endowed it with so much diversity." +LAW-631, +JWST-053,"Development of the skills of reading, writing, and conversing in modern Hebre on an intermediate level. Open to all students who have completed two semesters of Hebrew at Penn with a grade of B- or above and new students with equivalent competency." +VLST-261,"In this studio based course, students are introduced to video production and postproduction as well as to selected historical and theoretical texts addressing the medium of video. Students will be taught basic camera operation, sound recording and lighting, as well as basic video and sound editing and exporting using various screening and installation formats. In addition to a range of short assignment-based exercises, students will be expected to complete three short projects over the course of the semester. Critiques of these projects are crucial to the course as students are expected to speak at length about the formal, technical, critical and historical dimensions of their works. Weekly readings in philosophy, critical theory, artist statements and literature are assinged. The course will also include weekly screenings of films and videos, introducing students to the history of video art as well as to other contemporary practices." +MUSC-171,"Continuation of techniques established in Theory and Musicianship I. Explores chromatic harmony. Concepts will be developed through analysis and model composition. Musicianship component will include sight singing, clef reading, harmonic dictation and keyboard harmony. Prerequisite: Required of music majors." +COML-006,"Premodern India produced some of the world's greatest myths and stories: tales of gods, goddesses, heroes, princesses, kings and lovers that continue to capture the imaginations of millions of readers and hearers. In this course, we will look closely at some of these stories especially as found in Purana-s, great compendia composed in Sanskrit, including the chief stories of the central gods of Hinduism: Visnu, Siva, and the Goddess. We will also consider the relationship between these texts and the earlier myths of the Vedas and the Indian Epics, the diversity of the narrative and mythic materials within and across different texts, and the re-imagining of these stories in the modern world." +PSCI-181,"This course will provide an overview of major figures and themes of modern political thought. We will focus on themes and questions pertinent to political theory in the modern era, particularly focusing on the relationship of the individual to community, society, and state. Although the emergence of the individual as a central moral, political, and conceptual category arguably began in earlier eras, it is in the seventeenth century that it takes firm hold in defining the state, political institutions, moral thinking, and social relations. The centrality of ""the individual"" has created difficulties, even paradoxes, for community and social relations, and political theorists have struggled to reconicle those throughout the modern era. We will consider the political forms that emerged out of those struggles, as well as the changed and distinctly ""modern"" conceptualizations of political theory such as freedom, responsibilty, justice, rights and obligations, as central categories for organizing moral and political life." +ENGL-282,"This course examines the coming to pass of trap music from several perspectives: 1) that of its technological foundations and innovations (the Roland 808, Auto-tune, FL Studio (FruityLoops), etc.); 2) that of its masters/mastery (its transformation of stardom through the figures of the producer (Metro Boomin) and the rock star (Future)); 3) that of its interpretability and effects (what does the music say and do to us). We will thus engage with this music as a practice of art and form of techno-sociality that manifests uncanny and maximal attunement with the now." +BSTA-670,"This course concentrates on computational tools, which are useful for statistical research and for computationally intensive statistics. Through this course you will develop a knowledge base and skill set of a wide range of computational tools needed for statistical research. Topics include computer storage, architecture and arithmetic; random number generation; numerical optimization methods; spline smoothing and penalized likelihood; numerical integration; simulation design; Gibbs sampling; bootstrap methods; and the EM algorithm. Prerequisite: If course requirements not met, permission of instructor required." +LAW-795, +GSWS-165,"This course complicates prevailing understandings of the Caribbean and extends the boundaries of Asian America by exploring the histories, experiences, and contributions of Asians in the Caribbean. In particular, we will focus on the migrations of Chinese and Indian individuals to Cuba, Trinidad, and Guyana as well as how their descendants are immigrating to the United States. We will examine the legal and social debates surrounding their labor in the 19th century, how they participated in the decolonization of the region, and how their migration to the United States complicates our understandings of ethnicity and race. Ultimately, through our comparative race approach, we will appreciate that the Caribbean is more than the Black Caribbean, it is also the Asian Caribbean." +EDUC-552,"Drawing on work from the education, psychology, communication, and the growing field of games studies, we will examine the history of video games, research on game play and players, review how researchers from different disciplines have conceptualized and investigated learning in playing and designing games, and what we know about possible outcomes. We will also address issues of gender, race and violence that have been prominent in discussions about the impact of games." +PSCI-498,Consult department for detailed descriptions. Recent topics include: Globalization; Race & Criminal Justice; Democracy & Markets in Postcommunist Europe. +COML-555,"This seminar will examine contemporary affect theory and its relationship with Michel Foucault's theory of power. We will begin by mapping out Foucault's ""analytics of power,"" from his early work on power knowledge to his late work on embodiment, desire, and the care of the self. We will then turn to affect theory, an approach which centralizes the non-rational, emotive force of power. No previous knowledge of theory is required." +SAST-799,Optional directed study course for PhD students in the last semester of coursework to prepare for candidacy exam to directly follow the end of this semester. +FNCE-751,"The focus of this course is on buying (or acquiring controlling stakes in) firms. The main topics to be covered are mergers and friendly acquisitions, hostile takeovers and buyouts. Using case studies, the course surveys the drivers of success in the transactions. While issues regarding motive and strategy will be discussed, financial theory would be the main lens used to view these control acquiring transactions. This will allow students to (1) evaluate transactions through valuation approaches and (2) structure deals employing financial innovation as a response to legal framework and economic frictions. This course should be of interest to students interested in pursuing careers as private equity investors, advisors in investment banking and corporate managers that deal with these issues. This course assumes familiarity with valuation analysis. During the spring semester students are not permitted to take this course pass fail." +MUSC-275,"MUSC275 offers an introduction to electronic music/sound production with a focus on analogue systems and performance. Guest artists will join us for in-class visits and performances during the semester. Meetings will take place in the classroom, in concert spaces and in the studio. Preference given to Music majors and minors for registration." +GOMD-978,"This course will allow academic discussion of disease, disease processes and therapeutic management of a wide range of topics related to oral medicine." +RELS-257, +SWRK-768, +INTL-BTM, +FNCE-254,"This course explores Impact Investing, a discipline that seeks to generate social benefits as well as financial returns. From tiny beginnings, the Impact Investment space has expanded and now commands significant attention from policymakers, wealthy and public-spirited individuals, academia and, not least, the world's largest asset managers and philanthropic foundations. Evangelists believe it may be the key to freeing the world from poverty. Skeptics think it will remain confined to the boutique. Regardless, Impact Investing is becoming a distinct career specialization for finance professionals despite the diverse skillset each must have and the uncertainty of the new field's growth." +ARCH-728,"Personalization is quickly becoming the norm for mass production in a variety of consumer-centric industries. From retail to food, the idea of designing and making custom-made products tailored to fit one's lifestyle will be our exploration. Utilizing digital design innovations, we are able to incubate ideas, prototype, test and be entrepreneurial in design to create these individualized products. Cues from these industries will be used to shift both cultural and experiential product design from a regional discovery to a global focus. This course will embrace digital design and utilize its engagement with manufacturing solutions for a physical output. Through research and a series of design exercises, the approach will be built upon several strategies including adaptability, materiality, fabrication, modularity, and human-centric design. The final project will interpret the research and result in the creation of a design strategy for a mass customized product or system. This course will explore product design solutions through a combination of physical and digital design methods. Beginning with an examination of case studies, students will gain a sense of the breadth of product and interaction design practice as it applies to smart objects. Through a series of lectures and hands-on studio exercises, students will explore all aspects of smart object design including expressive behaviors (light, sound and movement), interaction systems, ergonomics, data networks and contexts of use. The course will culminate in a final project that considers all aspects of smart object design within the context of a larger theme." +MKTG-350,"CONSUMER NEUROSCIENCE: How can studying the brain improve our understanding of consumer behavior? While neuroscience made tremendous strides throughout the 20th century, rarely were meaningful applications developed outside of medicine. Recently, however, breakthroughs in measurement and computation have accelerated brain science and created a dizzying array of opportunities in business and technology. Currently, applications to marketing research and product development are experiencing explosive growth that has been met with both excitement and skepticism. This mini-course provides an overview of the neuroscience behind and the potential for these developments. Topics will range from well-known and widely used applications, such as eye-tracking measures in the lab and field, to emerging methods and measures, such as mobile technologies, face-reading algorithms, and neural predictors of marketing response. The course will also discuss applications in branding and product development, including wearable physiological devices and apps, sensory branding for foods and fragrances, pharmaceuticals and medical devices, and neuroscience-based products designed to enhance cognitive functions. These applications stem from many subfields of cognitive neuroscience, including attention, emotion, memory, and decision making. This course is self-contained and has no prerequisites. However, students with some background in business, economics, psychology, and/or neuroscience are likely to find the material covered in this course complementary to their existing knowledge." +LAW-966, +PSCI-217,"This course will present an in-depth examination of political, economic and social change in post-Soviet Russia within a historical context. After a brief discussion of contemporary problems in Russia, the first half of the course will delve into the rise of communism in 1917, the evolution of the Soviet regime, and the tensions between ideology and practice over the seventy years of communist rule up until 1985. The second part of the course will begin with an examination of the Gorbachev period and the competing interpretations of how the events between 1985 and 1991 may have contributed to the collapse of the Soviet Union. We will then proceed to make sense of the continuities and changes in politics, economics and society in contemporary Russia. Important topics will include the confrontations accompanying the adoption of a new constitution, the emergence of competing ideologies and parties, the struggle over economic privatization, the question of federalism and nationalism, social and political implicatons of economic reform, and prospects for Russia's future in the Putin and post-Putin era." +GCB-577,"Second year students in GCB, CAMB (G&E), or IGG programs using genomics methods to measure transcriptomics and epigenomics changes in their experimental systems. The goal is to familiarize students with the latest cutting-edge genomics tools and cover solutions to major experimental and computational challenges in the investigation of genome-wide epigenetic data sets. Students will develop competence in (i) variations of experimental techniques improving resolution and throughout, (ii) issues related to the computational analyses closely related to the various genome-wide assays used to probe epigenetic processes and signals, (iii) computational approaches useful to overcome pitfalls associated to the analysis of a given epigenetic data modality, (iv) methods, techniques and studies on the integration of multi-layer epigenetic data sets." +PSYC-612,"An applied graduate level course for students who have completed an undergraduate course in basic statistical methods. Covers two unrelated topics: loglinear and logit models for discrete data and nonparametric methods for nonnormal data. Emphasis is on practical methods of data analysis and their interpretation. Primarily for doctoral students in the managerial, behavioral, social and health sciences. May be taken before STAT 500 with permission of instructor." +LAW-987, +HSOC-411,"Why did Lance Armstrong get caught? Why do Kenyans win marathons? Does Gatorade really work? In this course, we won't answer these questions ourselves but will rely upon the methods of history, sociology, and anthropology to explore the world of the sport scientists who do. Sport scientists produce knowledge about how human bodies work and the intricacies of human performance. They bring elite (world-class) athletes to their laboratories-or their labs to the athletes. Through readings, discussions, and original research, we will find out how these scientists determine the boundary between ""natural"" and ""performance-enhanced,"" work to conquer the problem of fatigue, and establish the limits and potential of human beings. Course themes include: technology in science and sport, the lab vs. the field, genetics and race, the politics of the body, and doping. Course goals include: 1) reading scientific and medical texts critically, and assessing their social, cultural, and political origins and ramifications; 2) pursuing an in-depth The course fulfills the Capstone requirement for the HSOC/STSC majors. Semester-long research projects will focus on ""un-black-boxing"" the metrics sport scientists and physicians use to categorize athletes' bodies as ""normal"" or ""abnormal."" For example, you may investigate the test(s) used to define whether an athlete is male or female, establish whether an athlete's blood is ""too"" oxygenated, or assess whether an athlete is ""too"" fast (false start). Requirements therefore include: weekly readings and participation in online and in-class discussions; sequenced research assignments; peer review; and a final 20+page original research paper and presentation." +LARP-734,"This advanced social science and design seminar is about mobilizing expert knowledge to develop transformative policy ideas to make the Green New Deal come alive. We'll look at cutting edge social science and design scholarship on the problems we're trying to solve, and the successes and failures of past efforts at transformative policy. And we'll focus in particular on the built environment. How might a Green New Deal make the physical changes to our infrastructures, homes, energy landscapes, transportation systems, public recreation amenities, care facilities, and more, in ways that slash carbon emissions, increase resiliency, and abolish inequalities of race, class, gender, and nation? That's not a rhetorical question: in this class, we'll assemble knowledge, get into teams, and come up with concrete proposals." +EDUC-360,"A life-span (infancy to adulthood) approach to development. Topics include: biological, physical, social and cognitive basis of development. Films and guest speakers are often included." +DENT-634,"A combination of lectures, seminars and laboratory exercises provide the dental student with a fundamental understanding of the partially edentulous condition. Topics covered include classification, diagnosis, treatment planning and treatment of partially edentulous patients with RPDs. This course is designed to provide students with the terminology, concepts and principles necessary for case selection, design, construction of, and patient therapy with conventional RPDs. Upon completion of this course students will have the neccessary didactic knowledge to successfully understand and treat removable partial denture cases in conjunction with the clinical faculty during their third and fourth years." +ANTH-595,"The last 40 years has been a period of unparalleled reappraisal of archaeological theory and practice. We will consider the development of anthropological archaeology in terms of the questions archaeologists have asked, the ideas that have guided those questions, and the procedures that have been used to investigate them. Our discussion will focus on the intellectual heritage of normative or cultural-historical archaeology and its successors in terms of changing archaeological goals and theoretical frameworks, and their importance for contemporary research. The course will be organized around specific examples of archaeological research that have exemplified or challenged theoretical and methodological standards from culture history through the post-processual critique and the emergence of contemporary theorizations." +LALS-398,Topics vary. Please see the Spanish Department's website for the current course description: https://www.sas.upenn.edu/hispanic-portuguese-studies/pc +HCMG-868,"Issues surrounding global health have captivated the attention of the public sector and foundations for many decades. Many of their initiatives are realizing progress on the health-related Millennium Development Goals. The private sector has been less engaged in global health, but has a significant role to play in providing resources and in building infrastructure, human resource capacity and sustainability. This course explores entrepreneurial and other private sector solutions for health services and access to medicines and technologies in the developing world and other underserved areas. The course also encompasses study of creative programs to engage the private sector in development of vaccines and medicines for tropical and neglected diseases. Furthermore, the course addresses novel care systems and therapeutic strategies for the rapidly growing burden of chronic, non-communicable diseases in the developing world. In short, the course builds on the content of conventional global health courses from a managerial and entrepreneurial perspective. Learning is driven through readings, class discussion and a series of guest speakers representing a wide range of global health issues. Evaluation is largely based on a student group project." +ENVS-616,"How do government policy-makers make decisions about potential threats to human health and the environment in the face of scientific uncertainty? The course develops the concept of Risk Assessment from the publication of the 1983 National Research Council (NRC) report commonly known as the ""Red Book"" which was used to rank the initial hazardous waste sites under the Superfund program. Using a variety of teaching tools, including lectures, panel discussions, and case studies, the course examines how public policy decisions regarding environmental risk are made and how effective those decisions are at reducing risks to affected populations. The course focuses on the complex interaction of science, economics, politics, laws, and regulations in dealing with environmental and public health risks. The course will begin with a review of the policy process and methods used in evaluating human health and environmental risks, including the traditional steps in the risk assessment process, including quantitative and qualitative aspects of hazard identification, dose-response assessment, exposure assessment, and risk characterization. The course will then focus on how scientific uncertainty, risk perceptions, socio-economic disparities, risk communication, and politics influence environmental risk-based decision-making. Issues such as special populations (e.g., children, elderly, immune-compromised, woman of pregnancy age, etc.) must be considered when developing risk reduction strategies. The use of the ""precautionary principle"" will be discussed in the context of different types of environmental stressors (e.g., pesticides, chemicals, climate change, air pollution, water quality, and land use) and how this important controversial principle is applied differently in contrasting national and European risk management policies." +NSCI-402,"The capstone course of the NROTC curriculum, this course is intended to provide the midshipman with the ethical foundation and basic leadership tools to be effective junior officers. Topics such as responsibility, accoutability, ethics, the law of armed conflict, military law, division organization and training, and discipline are introduced through practical exercises, group discussion, and case studies." +OIDD-763,"Over the last several decades, energy markets have become some of the most dynamic markets of the world economy. Traditional fossil fuel and electricity markets have been seen a partial shift from heavy regulation to market-driven incentives, while rising environmental concerns have led to a wide array of new regulations and ""environmental markets"". The growth of renewable energy could be another source of rapid change, but brings with it a whole new set of technological and policy challenges. This changing energy landscape requires quick adaptation from energy companies, but also offers opportunities to turn regulations into new business. The objective of this course is to provide students with the economist's perspective on a broad range of topics that professionals in the energy industry will encounter. Topics include the effect of competition, market power and scarcity on energy prices, the impact of deregulation on electricity and fossil fuel markets, extraction and pricing of oil and gas, geopolitical uncertainty and risk in hydrocarbon investments, the environmental impact and policies related to the energy sector, environmental cap-and-trade markets, energy efficiency, the economics and finance of renewable energy, and recent developments in the transportation sector." +HEBR-053,"Development of the skills of reading, writing, and conversing in modern Hebrew on an intermediate level. Open to all students who have completed two semesters of Hebrew at Penn with a grade of B- or above and new students with equivalent competency." +FNAR-489,"The Spring semester seminar culminates in a senior thesis exhibition for each graduating student. These exhibitions have traditionally been held as a small group exhibition featuring a few students in one group, or as a larger end of semester exhibition with each student installing a series of works. The format of the exhibition will be determined during the fall semester by the senior faculty. The process of preparing, installing, and promoting the thesis exhibition is covered in detail throughout the semester. Students will work in their on-campus studio spaces to produce dynamic, thoughtful and well-crafted work that will serve as their final portfolio. They will present their portfolio of work during a final critique before graduation." +CHIN-722, +AFRC-271, +COML-592, +PSYC-170,"An overview of theories and research across the range of social behavior from intra-individual to the group level including the effects of culture, social environment, and groups on social interaction." +LALS-273,"This course focuses on immigrant communities in United States cities and suburbs. We survey migration and community experiences among a broad range of ethnic groups in different city and suburban neighborhoods. Class readings, discussions, and visits to Philadelphia neighborhoods explore themes including labor markets, commerce, housing, civil society, racial and ethnic relations, integration, refugee resettlement, and local, state, and national immigration policies. The class introduces students to a variety of social science approaches to studying social groups and neighborhoods, including readings in sociology, geography, anthropology, social history, and political science. Ultimately, the class aims to help students develop: 1) a broad knowledge of immigration and its impacts on U.S. cities and regions; 2) a comparative understanding of diverse migrant and receiving communities; and 3) familiarity with policies and institutions that seek to influence immigration and immigrant communities." +CLST-211,"A survey of the ethical theories debated by philosophers in Classical Greece and Rome. Plato, Aristotle, Stoics, Epicureans and Pyrrhonist Sceptics offer competing answers to the fundamental question raised by Socrates: How are we to live? That is, what is the best life for a human being? These philosophers generally agree that virtue is an important part of the best human life, but disagree about whether it is the greatest good (Epicurus, for example claims that pleasure is the highest good), or whether there are any other goods (for example, health, wealth, family). Much attention is paid in their theories to accounts of the virtues of character, and to the place of wisdom in the best sort of human life." +GPED-915,A comprehensive course to provide an in depth knowledge of all areas of Pediatric Dentistry +HSPV-747,"This seminar will address the history, theories, principles, and practices of the preservation and interpretation of archaeological sites and landscapes. The course will draw from a wide range of published material and experiences representing both national and international contexts. Topics will include site and landscape documentation and recording; site formation and degradation; intervention strategies including interpretation and display, legislation, policy, and contemporary issues of descendent community ownership and global heritage. Depending on the site, students will study specific issues leading toward the critique or development of a conservation and management program in accordance with guidelines established by ICOMOS/ ICAHM and other official agencies." +GRMN-203,"In this course, you will explore themes of cultural and historical significance in contemporary German-speaking countries through literature and nonfiction, through film and current event media coverage. Whether you wish to dive deeply into historical or political contexts, explore untranslatable cultural phenomena or the aesthetic rhythm and semantic complexity of the German language, GRMN 203 Texts and Contexts will inspire your imagination and deepen your understanding of German language, culture and literature. This is a required course for all courses taught in German at or above the 200 level." +BIOE-701, +TELU-430,"This course is designed to expand the students' basic language skills in Telugu in order to allow them to function adequately in a Telugu-speaking environment, to immerse themselves in the rich Andhra culture, and to accomplish a more advanced competency in an interesting foreign language. This course is also aimed at students planning to conduct scholarly research in Telugu history, literature or society, or humanities or social science fieldwork in Telugu speaking areas." +MATH-621,Continuation of Math 620. +CBE-460,"Dynamics and control of linear single-input, single output (SISO) systems in chemical processes. Laplace transforms. Dynamic responses of linear systems to various inputs. Frequency domain analysis. Feedback control strategies. Stability. Controller tuning. Advanced control, including cascade and feed forward control. Introduction to multiple-input, multiple-output (MIMO) control. Inverse response." +HIST-139,"A broad introduction to the history of Jewish civilization from its Biblical beginnings to the Middle Ages, with the main focus on the formative period of classical rabbinic Judaism and on the symbiotic relationship between Judaism, Christianity, and Islam." +BEPP-261,"This course is designed to introduce students to the role of risk assessment, risk perception and risk management in dealing with uncertain health, safety and environmental risks including the threat of terrorism. It explores the role of decision analysis as well as the use of scenarios for dealing with these problems. The course will evaluate the role of policy tools such as risk communication, economic incentives, insurance, regulation and private-public partnerships in developing strategies for managing these risks. A project will enable students to apply the concepts discussed in the course to a concrete problem." +ASAM-006,"This course will focus on race and ethnicty in the United States. We begin with a brief history of racial categorization and immigration to the U.S. The course continues by examining a number of topics including racial and ethnic identity, interracial and interethnic friendships and marriage, racial attitudes, mass media iages, residential segregation, educational stritification, and labot market outcomes. The course will inlcude discussions of African Americans, Whites, Hispanics, Asian Ameriacns, and multiracials." +MGMT-231,"This advanced course on entrepreneurship focuses on developing a validated opportunity or concept into a venture that is ready for seed financing and/or launching the product or service. Participants in this course must previously have developed a validated opportunity, either in a previous course or through independent efforts. Students may participate as a team of up to three people. Ideally, participants are commited to pursuing their opportunity commercially, or at least to seriously explore that possibility. The course provides a practical guidance for developing the product or service, forming the entity, raising capital building the team, establishing partnerships, and sourcing professional services. After completing the course, you will be ""pitch ready"" - whether submitting to campus venture competitions or to outside investors. Most coursework is focused on applying concepts and frameworks to project tasks in developing the venture. Students must have successfully completed MGMT 801 before enrolling in this course. Students must have successfully completed MGMT801 before enrolling in this course. Format: Readings, discussion, and developing an implementation plan for a real venture." +ECON-242,"This course covers topics of interest in macroeconomics. Two sections are offered: Markets with Frictions. This course studies allocations in markets with frictions, as described by the difficulty in finding a trading partner, private information problems, commitment issues, and so on. Applications to labor markets, monetary economics, the marriage market will be discused. The main technical tool will be search theory, but a liberal amount of calculus and other mathematics will be used. Numerical Methods for Macroeconmists. This course will study some of the numerical methods that are used in modern macroeconomics. This class will learn how to solve nonlinear equations, difference equations, interpolate functions, smooth data, and conduct Monte Carlo simulations on the computer. This will be done while studying economic problems, such as the determination of labor supply, economic growth and business cycle analysis. Calculus is an integral part of the course and some elementary probability theory will be drawn upon. The MATLAB programming language will be used." +LAW-968, +HCMG-215,"This course provides an overview of the management, economic and policy issues facing the pharmaceutical and biotechnology industries. The course perspective is global, but with emphasis on the U.S. as the largest and and most profitable market. Critical issues we will examine include: R&D intensive cost structure and rapid technological change; biotech startups and alliances with the pharma industry; pricing and promotion in a complex global marketplace where customers include governments and insurers, as well as physicians, pharmacists and consumers. We use Wharton and industry experts from various disciplines to address these issues." +QUEC-120,"Quechua, the language of the Inca Empire and still spoken by approximately 6 million people throughout the Andes, is the most popular indigenous language of South America. The program focuses on the development of written and oral communicative abilities in Quechua through an interactive activity-based approach. Course includes an introduction to Quechua and Andean culture. Students will participate in pair, small-group and whole-class activities. Assessment is based on both students ability to use the language in written and oral tasks and understanding the language and culture. This beginning level Quechua course is designed for students who have little or no previous knowledge of the language. Lectures will be delivered in English and Quechua" +PHIL-029,This is an introductory philosophy course that uses philosophical tools to understand and answer questions that arise in and about sports. Is there a principled basis for determining which methods of performance enhancement are acceptable? Developing a framework to answer this question will take us through: 1) questions about rules: what is their point in sports and what are appropriate reasons to change them; 2) questions about the point of participation in a sport; 3) questions about the kinds of virtues sports participants can demonstrate; and 4) questions about integrity of participants and a sport itself. A related set of questions concerns the appropriate competitors in sporting events: Should competition be restricted to single sex categories; Should competition be divided into disabled and non-disabled categories? +PHYS-140,"The topics of this calculus-based course are: Classical laws of motions; interactions between particles; conservation laws and symmetry principles; particle and rigid body motion; gravitation, harmonic motion, and applications of mechanics to real-world problems. Engineering students only. Prereqisite: For Engineering students whose course of study does not require a physics laboratory course. Those who are enrolled in a dual degree program with the college must register for the lab-based version of this course, PHYS 150." +GPRD-959, +LAW-946, +LAW-904, +OIDD-397,"This course is highly recommended for students with an interest in pursuing careers in: (1) retailing and retail supply chains; (2) businesses like banking, consulting, information technology, that provides services to retail firms; (3) manufacturing companies (e.g. P&G) that sell their products through retail firms. Retailing is a huge industry that has consistently been an incubator for new business concepts. This course will examine how retailers understand their customers' preferences and respond with appropriate products through effective supply chain management. Supply chain management is vitally important for retailers and has been noted as the source of success for many retailers such as Wal-mart and Home Depot, and as an inhibitor of success for e-tailers as they struggle with delivery reliability. See M. L. Fisher, A. Raman and A. McClelland, ""Rocket Science Retailing is Coming - Are You Ready?,"" Harvard Business Review, July/August 2000 for related research." +ARTH-235,"A one-semester survey of Islamic art and architecture which examines visual culture as it functions within the larger sphere of Islamic culture in general. Particular attention will be given to relationships between visual culture and literature, using specific case studies, sites or objects which may be related to various branches of Islamic literature, including historical, didactic, philosophical writings, poetry and religious texts. All primary sources are available in English translation." +CIS-120,"A fast-paced introduction to the fundamental concepts of programming and software design. This course assumes some previous programming experience, at the level of a high school computer science class or CIS110. (If you got at least 4 in the AP Computer Science A or AB exam, you will do great.) No specific programming language background is assumed: basic experience with any language (for instance Java, C, C++, VB, Python, Perl, or Scheme) is fine. If you have never programmed before, you should take CIS 110 first." +CIS-121,"This is a course about Algorithms and Data Structures using the JAVA programming language. We introduce the basic concepts about complexity of an algorithm and methods on how to compute the running time of algorithms. Then, we describe data structures like stacks, queues, maps, trees, and graphs, and we construct efficient algorithms based on these representations. The course builds upon existing implementations of basic data structures in JAVA and extends them for the structures like trees, studying the performance of operations on such structures, and theiefficiency when used in real-world applications. A large project introducing students to the challenges of software engineering concludes the course." +CIS-262,"This course explores questions fundamental to computer science such as which problems cannot be solved by computers, can we formalize computing as a mathematical concept without relying upon the specifics of programming languages and computing platforms, and which problems can be solved efficiently. The topics include finite automata and regular languages, context-free grammars and pushdown automata, Turing machines and undecidability, tractability and NP-completeness. The course emphasizes rigorous mathematical reasoning as well as connections to practical computing problems such as test processing, parsing, XML query languages, and program verification." diff --git a/backend/tests/plan/test_api.py b/backend/tests/plan/test_api.py index 865191f6c..4f0d1e1bd 100644 --- a/backend/tests/plan/test_api.py +++ b/backend/tests/plan/test_api.py @@ -1,902 +1,902 @@ -from django.contrib.auth.models import User -from django.db.models.signals import post_save -from django.test import TestCase -from django.urls import reverse -from options.models import Option -from rest_framework.test import APIClient - -from alert.management.commands.recomputestats import recompute_precomputed_fields -from alert.models import AddDropPeriod -from courses.models import Instructor, PreNGSSRequirement, Section -from courses.util import invalidate_current_semester_cache, set_meetings -from plan.models import Schedule -from review.models import Review -from tests.courses.util import create_mock_async_class, create_mock_data - - -TEST_SEMESTER = "2021C" -assert TEST_SEMESTER >= "2021C", "Some tests assume TEST_SEMESTER >= 2021C" - - -def set_semester(): - post_save.disconnect( - receiver=invalidate_current_semester_cache, - sender=Option, - dispatch_uid="invalidate_current_semester_cache", - ) - Option(key="SEMESTER", value=TEST_SEMESTER, value_type="TXT").save() - AddDropPeriod(semester=TEST_SEMESTER).save() - - -class CreditUnitFilterTestCase(TestCase): - def setUp(self): - self.course, self.section = create_mock_data("CIS-120-001", TEST_SEMESTER) - _, self.section2 = create_mock_data("CIS-120-201", TEST_SEMESTER) - self.section.credits = 1.0 - self.section2.credits = 0.0 - self.section.save() - self.section2.save() - self.client = APIClient() - set_semester() - - def test_include_course(self): - response = self.client.get(reverse("courses-search", args=["current"]), {"cu": "1.0"}) - self.assertEqual(200, response.status_code) - self.assertEqual(1, len(response.data)) - - def test_include_multiple(self): - response = self.client.get(reverse("courses-search", args=["current"]), {"cu": "0.5,1.0"}) - self.assertEqual(200, response.status_code) - self.assertEqual(1, len(response.data)) - - def test_exclude_course(self): - response = self.client.get(reverse("courses-search", args=["current"]), {"cu": ".5,1.5"}) - self.assertEqual(200, response.status_code) - self.assertEqual(0, len(response.data)) - - -class PreNGSSRequirementFilterTestCase(TestCase): - def setUp(self): - self.course, self.section = create_mock_data("CIS-120-001", TEST_SEMESTER) - self.math, self.math1 = create_mock_data("MATH-114-001", TEST_SEMESTER) - self.different_math, self.different_math1 = create_mock_data( - "MATH-116-001", ("2019A" if TEST_SEMESTER == "2019C" else "2019C") - ) - self.req = PreNGSSRequirement(semester=TEST_SEMESTER, code="REQ", school="SAS") - self.req.save() - self.req.courses.add(self.math) - print(self.req.satisfying_courses) - self.client = APIClient() - set_semester() - - def test_return_all_courses(self): - response = self.client.get(reverse("courses-search", args=["current"])) - self.assertEqual(200, response.status_code) - self.assertEqual(2, len(response.data)) - - def test_filter_for_req(self): - response = self.client.get( - reverse("courses-search", args=["current"]), {"pre_ngss_requirements": "REQ@SAS"} - ) - self.assertEqual(200, response.status_code) - self.assertEqual(1, len(response.data)) - self.assertEqual("MATH-114", response.data[0]["id"]) - - def test_filter_for_req_dif_sem(self): - req2 = PreNGSSRequirement( - semester=("2019A" if TEST_SEMESTER == "2019C" else "2019C"), code="REQ", school="SAS" - ) - req2.save() - req2.courses.add(self.different_math) - response = self.client.get( - reverse("courses-search", args=["current"]), {"pre_ngss_requirements": "REQ@SAS"} - ) - self.assertEqual(200, response.status_code) - self.assertEqual(1, len(response.data)) - self.assertEqual("MATH-114", response.data[0]["id"]) - self.assertEqual(TEST_SEMESTER, response.data[0]["semester"]) - - def test_multi_req(self): - course3, section3 = create_mock_data("CIS-240-001", TEST_SEMESTER) - req2 = PreNGSSRequirement(semester=TEST_SEMESTER, code="REQ2", school="SEAS") - req2.save() - req2.courses.add(course3) - - response = self.client.get( - reverse("courses-search", args=["current"]), - {"pre_ngss_requirements": "REQ@SAS,REQ2@SEAS"}, - ) - self.assertEqual(0, len(response.data)) - - def test_double_count_req(self): - req2 = PreNGSSRequirement(semester=TEST_SEMESTER, code="REQ2", school="SEAS") - req2.save() - req2.courses.add(self.math) - response = self.client.get( - reverse("courses-search", args=["current"]), - {"pre_ngss_requirements": "REQ@SAS,REQ2@SEAS"}, - ) - self.assertEqual(1, len(response.data)) - self.assertEqual("MATH-114", response.data[0]["id"]) - - -class IsOpenFilterTestCase(TestCase): - def setUp(self): - - _, self.cis_160_001 = create_mock_data( - code="CIS-160-001", semester=TEST_SEMESTER, meeting_days="TR" - ) - - _, self.cis_160_201 = create_mock_data( - code="CIS-160-201", semester=TEST_SEMESTER, meeting_days="M" - ) - self.cis_160_201.activity = "REC" - self.cis_160_201.save() - - _, self.cis_160_202 = create_mock_data( - code="CIS-160-202", semester=TEST_SEMESTER, meeting_days="W" - ) - self.cis_160_202.activity = "REC" - self.cis_160_202.save() - - def save_all(): - for section in [self.cis_160_001, self.cis_160_201, self.cis_160_202]: - section.save() - - self.save_all = save_all - self.all_codes = {"CIS-160"} - self.non_open_statuses = [ - status[0] for status in Section.STATUS_CHOICES if status[0] not in ["O"] - ] - - recompute_precomputed_fields() - - self.client = APIClient() - set_semester() - - def test_lec_open_all_rec_open(self): - response = self.client.get(reverse("courses-search", args=[TEST_SEMESTER]), {"is-open": ""}) - self.assertEqual(response.status_code, 200) - self.assertEqual(len(response.data), 1) - self.assertEqual({res["id"] for res in response.data}, self.all_codes) - - def test_lec_open_one_rec_not_open(self): - for status in self.non_open_statuses: - self.cis_160_202.status = status - self.save_all() - - response = self.client.get( - reverse("courses-search", args=[TEST_SEMESTER]), {"is_open": ""} - ) - self.assertEqual(response.status_code, 200) - self.assertEqual(len(response.data), 1) - self.assertEqual({res["id"] for res in response.data}, self.all_codes) - - def test_lec_open_all_rec_not_open(self): - for status in self.non_open_statuses: - self.cis_160_202.status = status - self.cis_160_201.status = status - self.save_all() - - response = self.client.get( - reverse("courses-search", args=[TEST_SEMESTER]), {"is_open": ""} - ) - self.assertEqual(response.status_code, 200) - self.assertEqual(len(response.data), 0) - self.assertEqual({res["id"] for res in response.data}, set()) - - def test_rec_open_lec_not_open(self): - for status in self.non_open_statuses: - self.cis_160_001.status = status - self.save_all() - - response = self.client.get( - reverse("courses-search", args=[TEST_SEMESTER]), {"is_open": ""} - ) - self.assertEqual(response.status_code, 200) - self.assertEqual(len(response.data), 0) - self.assertEqual({res["id"] for res in response.data}, set()) - - def test_lec_not_open_all_rec_not_open(self): - for status in self.non_open_statuses: - self.cis_160_202.status = status - self.cis_160_201.status = status - self.cis_160_001.status = status - self.save_all() - - response = self.client.get( - reverse("courses-search", args=[TEST_SEMESTER]), {"is_open": ""} - ) - self.assertEqual(response.status_code, 200) - self.assertEqual(len(response.data), 0) - self.assertEqual({res["id"] for res in response.data}, set()) - - -class CourseReviewAverageTestCase(TestCase): - def setUp(self): - self.course, self.section = create_mock_data("CIS-120-001", TEST_SEMESTER) - _, self.section2 = create_mock_data("CIS-120-002", TEST_SEMESTER) - self.instructor = Instructor(name="Person1") - self.instructor.save() - self.rev1 = Review( - section=create_mock_data("CIS-120-003", "2005C")[1], - instructor=self.instructor, - responses=100, - ) - self.rev1.save() - self.rev1.set_averages( - { - "course_quality": 4, - "instructor_quality": 4, - "difficulty": 4, - } - ) - self.instructor2 = Instructor(name="Person2") - self.instructor2.save() - self.rev2 = Review( - section=create_mock_data("CIS-120-002", "2015A")[1], - instructor=self.instructor2, - responses=100, - ) - self.rev2.instructor = self.instructor2 - self.rev2.save() - self.rev2.set_averages( - { - "course_quality": 2, - "instructor_quality": 2, - "difficulty": 2, - } - ) - - self.section.instructors.add(self.instructor) - self.section2.instructors.add(self.instructor2) - self.client = APIClient() - set_semester() - - def test_course_average(self): - response = self.client.get(reverse("courses-detail", args=["current", "CIS-120"])) - self.assertEqual(200, response.status_code) - self.assertEqual(3, response.data["course_quality"]) - self.assertEqual(3, response.data["instructor_quality"]) - self.assertEqual(3, response.data["difficulty"]) - - def test_section_reviews(self): - response = self.client.get(reverse("courses-detail", args=["current", "CIS-120"])) - self.assertEqual(200, response.status_code) - self.assertEqual(2, len(response.data["sections"])) - - def test_section_no_duplicates(self): - instructor3 = Instructor(name="person3") - instructor3.save() - rev3 = Review( - section=self.rev2.section, - instructor=instructor3, - responses=100, - ) - rev3.save() - rev3.set_averages( - { - "course_quality": 1, - "instructor_quality": 1, - "difficulty": 1, - } - ) - self.section2.instructors.add(instructor3) - response = self.client.get(reverse("courses-detail", args=["current", "CIS-120"])) - self.assertEqual(200, response.status_code) - self.assertEqual(2, len(response.data["sections"])) - self.assertEqual( - 1.5, response.data["sections"][1]["course_quality"], response.data["sections"][1] - ) - - def test_filter_courses_by_review_included(self): - response = self.client.get( - reverse("courses-search", args=["current"]), {"difficulty": "2.5-3.5"} - ) - self.assertEqual(200, response.status_code) - self.assertEqual(1, len(response.data)) - - def test_filter_courses_by_review_excluded(self): - response = self.client.get( - reverse("courses-search", args=["current"]), {"difficulty": "0-2"} - ) - self.assertEqual(200, response.status_code) - self.assertEqual(0, len(response.data)) - - -class DayFilterTestCase(TestCase): - def setUp(self): - _, self.cis_120_001 = create_mock_data("CIS-120-001", TEST_SEMESTER) # days MWF - - _, self.cis_120_002 = create_mock_data( - code="CIS-120-002", semester=TEST_SEMESTER, meeting_days="TR" - ) - - _, self.cis_160_001 = create_mock_data( - code="CIS-160-001", semester=TEST_SEMESTER, meeting_days="TR" - ) - - _, self.cis_160_201 = create_mock_data( - code="CIS-160-201", semester=TEST_SEMESTER, meeting_days="M" - ) - self.cis_160_201.activity = "REC" - self.cis_160_201.save() - - _, self.cis_160_202 = create_mock_data( - code="CIS-160-202", semester=TEST_SEMESTER, meeting_days="W" - ) - self.cis_160_202.activity = "REC" - self.cis_160_202.save() - - _, self.cis_121_001 = create_mock_data(code="CIS-121-001", semester=TEST_SEMESTER) - set_meetings( - self.cis_121_001, - [ - { - "building_code": "LLAB", - "room_code": "10", - "days": "MT", - "begin_time_24": 900, - "begin_time": "9:00 AM", - "end_time_24": 1000, - "end_time": "10:00 AM", - }, - { - "building_code": "LLAB", - "room_code": "10", - "days": "WR", - "begin_time_24": 1330, - "begin_time": "1:30 PM", - "end_time_24": 1430, - "end_time": "2:30 PM", - }, - ], - ) - - _, self.cis_262_001 = create_mock_async_class(code="CIS-262-001", semester=TEST_SEMESTER) - - recompute_precomputed_fields() - - self.all_codes = {"CIS-120", "CIS-160", "CIS-121", "CIS-262"} - - self.client = APIClient() - set_semester() - - def test_only_async(self): - response = self.client.get(reverse("courses-search", args=[TEST_SEMESTER]), {"days": ""}) - self.assertEqual(response.status_code, 200) - self.assertEqual(len(response.data), 1) - self.assertEqual({res["id"] for res in response.data}, {"CIS-262"}) # only async - - def test_all_days(self): - response = self.client.get( - reverse("courses-search", args=[TEST_SEMESTER]), {"days": "MTWRFSU"} - ) - self.assertEqual(response.status_code, 200) - self.assertEqual(len(response.data), len(self.all_codes)) - self.assertEqual({res["id"] for res in response.data}, self.all_codes) - - def test_illegal_characters(self): - response = self.client.get(reverse("courses-search", args=[TEST_SEMESTER]), {"days": "M-R"}) - self.assertEqual(response.status_code, 200) - self.assertEqual(len(response.data), len(self.all_codes)) - self.assertEqual({res["id"] for res in response.data}, self.all_codes) - - def test_lec_no_rec(self): - response = self.client.get(reverse("courses-search", args=[TEST_SEMESTER]), {"days": "TR"}) - self.assertEqual(response.status_code, 200) - self.assertEqual(len(response.data), 2) - self.assertEqual({res["id"] for res in response.data}, {"CIS-120", "CIS-262"}) - - def test_rec_no_lec(self): - response = self.client.get(reverse("courses-search", args=[TEST_SEMESTER]), {"days": "MW"}) - self.assertEqual(response.status_code, 200) - self.assertEqual(len(response.data), 1) - self.assertEqual({res["id"] for res in response.data}, {"CIS-262"}) - - def test_lec_and_rec(self): - response = self.client.get(reverse("courses-search", args=[TEST_SEMESTER]), {"days": "TWR"}) - self.assertEqual(response.status_code, 200) - self.assertEqual(len(response.data), 3) - self.assertEqual({res["id"] for res in response.data}, {"CIS-160", "CIS-120", "CIS-262"}) - - def test_partial_match(self): - response = self.client.get( - reverse("courses-search", args=[TEST_SEMESTER]), - {"days": "T"}, - ) - self.assertEqual(response.status_code, 200) - self.assertEqual(len(response.data), 1) - self.assertEqual({res["id"] for res in response.data}, {"CIS-262"}) - - def test_contains_rec_no_sec(self): - response = self.client.get( - reverse("courses-search", args=[TEST_SEMESTER]), - {"days": "W"}, - ) - self.assertEqual(response.status_code, 200) - self.assertEqual(len(response.data), 1) - self.assertEqual({res["id"] for res in response.data}, {"CIS-262"}) - - def test_partial_multi_meeting_match(self): - response = self.client.get( - reverse("courses-search", args=[TEST_SEMESTER]), - {"days": "MT"}, - ) - self.assertEqual(response.status_code, 200) - self.assertEqual(len(response.data), 1) - self.assertEqual({res["id"] for res in response.data}, {"CIS-262"}) - - def test_full_multi_meeting_match(self): - response = self.client.get( - reverse("courses-search", args=[TEST_SEMESTER]), - {"days": "MTWR"}, - ) - self.assertEqual(response.status_code, 200) - self.assertEqual(len(response.data), 4) - self.assertEqual( - {res["id"] for res in response.data}, {"CIS-120", "CIS-121", "CIS-160", "CIS-262"} - ) - - -class TimeFilterTestCase(TestCase): - def setUp(self): - _, self.cis_120_001 = create_mock_data("CIS-120-001", TEST_SEMESTER) # time 11.0-12.0 - - _, self.cis_120_002 = create_mock_data( - code="CIS-120-002", semester=TEST_SEMESTER, start=1200, end=1330 - ) - - _, self.cis_160_001 = create_mock_data( - code="CIS-160-001", semester=TEST_SEMESTER, start=500, end=630 - ) - - _, self.cis_160_201 = create_mock_data( - code="CIS-160-201", semester=TEST_SEMESTER, start=1100, end=1200 - ) - self.cis_160_201.activity = "REC" - self.cis_160_201.save() - - _, self.cis_160_202 = create_mock_data( - code="CIS-160-202", semester=TEST_SEMESTER, start=1400, end=1500 - ) - self.cis_160_202.activity = "REC" - self.cis_160_202.save() - - _, self.cis_121_001 = create_mock_data(code="CIS-121-001", semester=TEST_SEMESTER) - set_meetings( - self.cis_121_001, - [ - { - "building_code": "LLAB", - "room_code": "10", - "days": "MT", - "begin_time_24": 900, - "begin_time": "9:00 AM", - "end_time_24": 1000, - "end_time": "10:00 AM", - }, - { - "building_code": "LLAB", - "room_code": "10", - "days": "WR", - "begin_time_24": 1330, - "begin_time": "1:30 PM", - "end_time_24": 1430, - "end_time": "2:30 PM", - }, - ], - ) - - _, self.cis_262_001 = create_mock_async_class(code="CIS-262-001", semester=TEST_SEMESTER) - - recompute_precomputed_fields() - - self.all_codes = {"CIS-120", "CIS-160", "CIS-121", "CIS-262"} - - self.client = APIClient() - set_semester() - - def test_empty_time_all(self): - response = self.client.get(reverse("courses-search", args=[TEST_SEMESTER]), {"time": ""}) - self.assertEqual(response.status_code, 200) - self.assertEqual(len(response.data), len(self.all_codes)) - self.assertEqual({res["id"] for res in response.data}, self.all_codes) - - def test_whole_day(self): - response = self.client.get( - reverse("courses-search", args=[TEST_SEMESTER]), {"time": "0.0-23.59"} - ) - self.assertEqual(response.status_code, 200) - self.assertEqual(len(response.data), len(self.all_codes)) - self.assertEqual({res["id"] for res in response.data}, self.all_codes) - - def test_no_dashes(self): - response = self.client.get( - reverse("courses-search", args=[TEST_SEMESTER]), {"time": "11.00"} - ) - self.assertEqual(response.status_code, 200) - self.assertEqual(len(response.data), len(self.all_codes)) - self.assertEqual({res["id"] for res in response.data}, self.all_codes) - - def test_too_many_dashes(self): - response = self.client.get( - reverse("courses-search", args=[TEST_SEMESTER]), {"time": "-1.00-3.00"} - ) - self.assertEqual(response.status_code, 200) - self.assertEqual(len(response.data), len(self.all_codes)) - self.assertEqual({res["id"] for res in response.data}, self.all_codes) - - def test_non_numeric(self): - response = self.client.get( - reverse("courses-search", args=[TEST_SEMESTER]), {"time": "11.00am-3.00pm"} - ) - self.assertEqual(response.status_code, 200) - self.assertEqual(len(response.data), len(self.all_codes)) - self.assertEqual({res["id"] for res in response.data}, self.all_codes) - - def test_crossover_times(self): - response = self.client.get( - reverse("courses-search", args=[TEST_SEMESTER]), {"time": "15.0-2.0"} - ) - self.assertEqual(response.status_code, 200) - self.assertEqual(len(response.data), 1) - self.assertEqual({res["id"] for res in response.data}, {"CIS-262"}) # only async - - def test_start_end_same(self): - response = self.client.get( - reverse("courses-search", args=[TEST_SEMESTER]), {"time": "5.5-5.5"} - ) - self.assertEqual(response.status_code, 200) - self.assertEqual(len(response.data), 1) - self.assertEqual({res["id"] for res in response.data}, {"CIS-262"}) # only async - - def test_lec_no_rec(self): - response = self.client.get( - reverse("courses-search", args=[TEST_SEMESTER]), {"time": "4.59-6.30"} - ) - self.assertEqual(response.status_code, 200) - self.assertEqual(len(response.data), 1) - self.assertEqual({res["id"] for res in response.data}, {"CIS-262"}) # only async - - def test_one_match(self): - response = self.client.get( - reverse("courses-search", args=[TEST_SEMESTER]), {"time": "11.30-13.30"} - ) - self.assertEqual(response.status_code, 200) - self.assertEqual(len(response.data), 2) - self.assertEqual({res["id"] for res in response.data}, {"CIS-120", "CIS-262"}) - - def test_lec_and_rec(self): - response = self.client.get( - reverse("courses-search", args=[TEST_SEMESTER]), {"time": "5.0-12.0"} - ) - self.assertEqual(response.status_code, 200) - self.assertEqual(len(response.data), 3) - self.assertEqual({res["id"] for res in response.data}, {"CIS-160", "CIS-120", "CIS-262"}) - - def test_contains_parts_of_two_sec(self): - response = self.client.get( - reverse("courses-search", args=[TEST_SEMESTER]), - {"time": "11.30-13.0"}, - ) - self.assertEqual(response.status_code, 200) - self.assertEqual(len(response.data), 1) - self.assertEqual({res["id"] for res in response.data}, {"CIS-262"}) - - def test_contains_rec_no_sec(self): - response = self.client.get( - reverse("courses-search", args=[TEST_SEMESTER]), - {"time": "11.30-16"}, - ) - self.assertEqual(response.status_code, 200) - self.assertEqual(len(response.data), 2) - self.assertEqual({res["id"] for res in response.data}, {"CIS-120", "CIS-262"}) - - def test_unbounded_right(self): - response = self.client.get( - reverse("courses-search", args=[TEST_SEMESTER]), - {"time": "11.30-"}, - ) - self.assertEqual(response.status_code, 200) - self.assertEqual(len(response.data), 2) - self.assertEqual({res["id"] for res in response.data}, {"CIS-120", "CIS-262"}) - - def test_unbounded_left(self): - response = self.client.get( - reverse("courses-search", args=[TEST_SEMESTER]), - {"time": "-12.00"}, - ) - self.assertEqual(response.status_code, 200) - self.assertEqual(len(response.data), 3) - self.assertEqual({res["id"] for res in response.data}, {"CIS-120", "CIS-160", "CIS-262"}) - - def test_multi_meeting_match(self): - response = self.client.get( - reverse("courses-search", args=[TEST_SEMESTER]), - {"time": "9.00-15.00"}, - ) - self.assertEqual(response.status_code, 200) - self.assertEqual(len(response.data), 3) - self.assertEqual({res["id"] for res in response.data}, {"CIS-120", "CIS-121", "CIS-262"}) - - -class DayTimeFilterTestCase(TestCase): - def setUp(self): - _, self.cis_120_001 = create_mock_data( - "CIS-120-001", TEST_SEMESTER - ) # time 11.0-12.0, days MWF - - _, self.cis_120_002 = create_mock_data( - code="CIS-120-002", semester=TEST_SEMESTER, start=1200, end=1330, meeting_days="TR" - ) - - _, self.cis_160_001 = create_mock_data( - code="CIS-160-001", semester=TEST_SEMESTER, start=500, end=630, meeting_days="TR" - ) - - _, self.cis_160_201 = create_mock_data( - code="CIS-160-201", semester=TEST_SEMESTER, start=1100, end=1200, meeting_days="M" - ) - self.cis_160_201.activity = "REC" - self.cis_160_201.save() - - _, self.cis_160_202 = create_mock_data( - code="CIS-160-202", semester=TEST_SEMESTER, start=1400, end=1500, meeting_days="W" - ) - self.cis_160_202.activity = "REC" - self.cis_160_202.save() - - _, self.cis_121_001 = create_mock_data(code="CIS-121-001", semester=TEST_SEMESTER) - set_meetings( - self.cis_121_001, - [ - { - "building_code": "LLAB", - "room_code": "10", - "days": "MT", - "begin_time_24": 900, - "begin_time": "9:00 AM", - "end_time_24": 1000, - "end_time": "10:00 AM", - }, - { - "building_code": "LLAB", - "room_code": "10", - "days": "WR", - "begin_time_24": 1330, - "begin_time": "1:30 PM", - "end_time_24": 1430, - "end_time": "2:30 PM", - }, - ], - ) - - _, self.cis_262_001 = create_mock_async_class(code="CIS-262-001", semester=TEST_SEMESTER) - - recompute_precomputed_fields() - - self.all_codes = {"CIS-120", "CIS-160", "CIS-121", "CIS-262"} - - self.client = APIClient() - set_semester() - - def test_all_match(self): - response = self.client.get( - reverse("courses-search", args=[TEST_SEMESTER]), {"time": "0-23.59", "days": "MTWRFSU"} - ) - self.assertEqual(response.status_code, 200) - self.assertEqual(len(response.data), len(self.all_codes)) - self.assertEqual({res["id"] for res in response.data}, self.all_codes) - - def test_days_match_not_time(self): - response = self.client.get( - reverse("courses-search", args=[TEST_SEMESTER]), - {"time": "1.00-2.00", "days": "MTWRFSU"}, - ) - self.assertEqual(response.status_code, 200) - self.assertEqual(len(response.data), 1) - self.assertEqual({res["id"] for res in response.data}, {"CIS-262"}) - - def test_time_matches_not_days(self): - response = self.client.get( - reverse("courses-search", args=[TEST_SEMESTER]), {"time": "1.00-", "days": "F"} - ) - self.assertEqual(response.status_code, 200) - self.assertEqual(len(response.data), 1) - self.assertEqual({res["id"] for res in response.data}, {"CIS-262"}) - - def test_days_time_partial_match(self): - response = self.client.get( - reverse("courses-search", args=[TEST_SEMESTER]), {"time": "12.0-15.0", "days": "TWR"} - ) - self.assertEqual(response.status_code, 200) - self.assertEqual(len(response.data), 2) - self.assertEqual({res["id"] for res in response.data}, {"CIS-120", "CIS-262"}) - - def test_multi_meeting_partial_match(self): - response = self.client.get( - reverse("courses-search", args=[TEST_SEMESTER]), {"time": "9.00-10.00", "days": "MTWR"} - ) - self.assertEqual(response.status_code, 200) - self.assertEqual(len(response.data), 1) - self.assertEqual({res["id"] for res in response.data}, {"CIS-262"}) - - def test_multi_meeting_full_match(self): - response = self.client.get( - reverse("courses-search", args=[TEST_SEMESTER]), {"time": "9.00-14.30", "days": "MTWR"} - ) - self.assertEqual(response.status_code, 200) - self.assertEqual(len(response.data), 3) - self.assertEqual({res["id"] for res in response.data}, {"CIS-120", "CIS-121", "CIS-262"}) - - -class ScheduleFilterTestCase(TestCase): - def setUp(self): - _, self.cis_120_001 = create_mock_data( - "CIS-120-001", TEST_SEMESTER - ) # time 11.0-12.0, days MWF - - _, self.cis_120_002 = create_mock_data( - code="CIS-120-002", semester=TEST_SEMESTER, start=1200, end=1330, meeting_days="TR" - ) - - _, self.cis_160_001 = create_mock_data( - code="CIS-160-001", semester=TEST_SEMESTER, start=500, end=630, meeting_days="TR" - ) - - _, self.cis_160_201 = create_mock_data( - code="CIS-160-201", semester=TEST_SEMESTER, start=1100, end=1200, meeting_days="M" - ) - self.cis_160_201.activity = "REC" - self.cis_160_201.save() - - _, self.cis_160_202 = create_mock_data( - code="CIS-160-202", semester=TEST_SEMESTER, start=1400, end=1500, meeting_days="W" - ) - self.cis_160_202.activity = "REC" - self.cis_160_202.save() - - _, self.cis_121_001 = create_mock_data(code="CIS-121-001", semester=TEST_SEMESTER) - set_meetings( - self.cis_121_001, - [ - { - "building_code": "LLAB", - "room_code": "10", - "days": "MT", - "begin_time_24": 900, - "begin_time": "9:00 AM", - "end_time_24": 1000, - "end_time": "10:00 AM", - }, - { - "building_code": "LLAB", - "room_code": "10", - "days": "WR", - "begin_time_24": 1330, - "begin_time": "1:30 PM", - "end_time_24": 1430, - "end_time": "2:30 PM", - }, - ], - ) - - _, self.cis_262_001 = create_mock_async_class(code="CIS-262-001", semester=TEST_SEMESTER) - - recompute_precomputed_fields() - - self.all_codes = {"CIS-120", "CIS-160", "CIS-121", "CIS-262"} - - self.user = User.objects.create_user( - username="jacob", email="jacob@example.com", password="top_secret" - ) - - self.empty_schedule = Schedule( - person=self.user, - semester=TEST_SEMESTER, - name="Empty Schedule", - ) - self.empty_schedule.save() - - self.all_available_schedule = Schedule( - person=self.user, - semester=TEST_SEMESTER, - name="All Classes Available Schedule", - ) - self.all_available_schedule.save() - self.all_available_schedule.sections.set([self.cis_120_001]) - - self.only_120_262_available_schedule = Schedule( - person=self.user, - semester=TEST_SEMESTER, - name="Only CIS-120 and CIS-262 Available Schedule", - ) - self.only_120_262_available_schedule.save() - self.only_120_262_available_schedule.sections.set([self.cis_120_001, self.cis_121_001]) - - self.only_262_available_schedule = Schedule( - person=self.user, - semester=TEST_SEMESTER, - name="Only CIS-262 Available Schedule", - ) - self.only_262_available_schedule.save() - self.only_262_available_schedule.sections.set( - [self.cis_120_001, self.cis_120_002, self.cis_121_001] - ) - - self.client = APIClient() - set_semester() - - def test_not_authenticated(self): - response = self.client.get( - reverse("courses-search", args=[TEST_SEMESTER]), - {"schedule-fit": str(self.only_262_available_schedule.id)}, - ) - self.assertEqual(response.status_code, 200) - self.assertEqual(len(response.data), len(self.all_codes)) - self.assertEqual({res["id"] for res in response.data}, self.all_codes) - - def test_different_authenticated(self): - User.objects.create_user( - username="charley", email="charley@example.com", password="top_secret" - ) - client2 = APIClient() - client2.login(username="charley", password="top_secret") - response = client2.get( - reverse("courses-search", args=[TEST_SEMESTER]), - {"schedule-fit": str(self.only_262_available_schedule.id)}, - ) - self.assertEqual(response.status_code, 200) - self.assertEqual(len(response.data), len(self.all_codes)) - self.assertEqual({res["id"] for res in response.data}, self.all_codes) - - def test_invalid_schedule(self): - self.client.login(username="jacob", password="top_secret") - response = self.client.get( - reverse("courses-search", args=[TEST_SEMESTER]), - {"schedule-fit": "invalid"}, - ) - self.assertEqual(response.status_code, 200) - self.assertEqual(len(response.data), len(self.all_codes)) - self.assertEqual({res["id"] for res in response.data}, self.all_codes) - - def test_empty_schedule(self): - self.client.login(username="jacob", password="top_secret") - response = self.client.get( - reverse("courses-search", args=[TEST_SEMESTER]), - {"schedule-fit": str(self.empty_schedule.id)}, - ) - self.assertEqual(response.status_code, 200) - self.assertEqual(len(response.data), len(self.all_codes)) - self.assertEqual({res["id"] for res in response.data}, self.all_codes) - - def test_all_available_schedule(self): - self.client.login(username="jacob", password="top_secret") - response = self.client.get( - reverse("courses-search", args=[TEST_SEMESTER]), - {"schedule-fit": str(self.all_available_schedule.id)}, - ) - self.assertEqual(response.status_code, 200) - self.assertEqual(len(response.data), len(self.all_codes)) - self.assertEqual({res["id"] for res in response.data}, self.all_codes) - - def test_only_120_262_available_schedule(self): - self.client.login(username="jacob", password="top_secret") - response = self.client.get( - reverse("courses-search", args=[TEST_SEMESTER]), - {"schedule-fit": str(self.only_120_262_available_schedule.id)}, - ) - self.assertEqual(response.status_code, 200) - self.assertEqual(len(response.data), 2) - self.assertEqual({res["id"] for res in response.data}, {"CIS-120", "CIS-262"}) - - def test_only_262_available_schedule(self): - self.client.login(username="jacob", password="top_secret") - response = self.client.get( - reverse("courses-search", args=[TEST_SEMESTER]), - {"schedule-fit": str(self.only_262_available_schedule.id)}, - ) - self.assertEqual(response.status_code, 200) - self.assertEqual(len(response.data), 1) - self.assertEqual({res["id"] for res in response.data}, {"CIS-262"}) +from django.contrib.auth.models import User +from django.db.models.signals import post_save +from django.test import TestCase +from django.urls import reverse +from options.models import Option +from rest_framework.test import APIClient + +from alert.management.commands.recomputestats import recompute_precomputed_fields +from alert.models import AddDropPeriod +from courses.models import Instructor, PreNGSSRequirement, Section +from courses.util import invalidate_current_semester_cache, set_meetings +from plan.models import Schedule +from review.models import Review +from tests.courses.util import create_mock_async_class, create_mock_data + + +TEST_SEMESTER = "2021C" +assert TEST_SEMESTER >= "2021C", "Some tests assume TEST_SEMESTER >= 2021C" + + +def set_semester(): + post_save.disconnect( + receiver=invalidate_current_semester_cache, + sender=Option, + dispatch_uid="invalidate_current_semester_cache", + ) + Option(key="SEMESTER", value=TEST_SEMESTER, value_type="TXT").save() + AddDropPeriod(semester=TEST_SEMESTER).save() + + +class CreditUnitFilterTestCase(TestCase): + def setUp(self): + self.course, self.section = create_mock_data("CIS-120-001", TEST_SEMESTER) + _, self.section2 = create_mock_data("CIS-120-201", TEST_SEMESTER) + self.section.credits = 1.0 + self.section2.credits = 0.0 + self.section.save() + self.section2.save() + self.client = APIClient() + set_semester() + + def test_include_course(self): + response = self.client.get(reverse("courses-search", args=["current"]), {"cu": "1.0"}) + self.assertEqual(200, response.status_code) + self.assertEqual(1, len(response.data)) + + def test_include_multiple(self): + response = self.client.get(reverse("courses-search", args=["current"]), {"cu": "0.5,1.0"}) + self.assertEqual(200, response.status_code) + self.assertEqual(1, len(response.data)) + + def test_exclude_course(self): + response = self.client.get(reverse("courses-search", args=["current"]), {"cu": ".5,1.5"}) + self.assertEqual(200, response.status_code) + self.assertEqual(0, len(response.data)) + + +class PreNGSSRequirementFilterTestCase(TestCase): + def setUp(self): + self.course, self.section = create_mock_data("CIS-120-001", TEST_SEMESTER) + self.math, self.math1 = create_mock_data("MATH-114-001", TEST_SEMESTER) + self.different_math, self.different_math1 = create_mock_data( + "MATH-116-001", ("2019A" if TEST_SEMESTER == "2019C" else "2019C") + ) + self.req = PreNGSSRequirement(semester=TEST_SEMESTER, code="REQ", school="SAS") + self.req.save() + self.req.courses.add(self.math) + print(self.req.satisfying_courses) + self.client = APIClient() + set_semester() + + def test_return_all_courses(self): + response = self.client.get(reverse("courses-search", args=["current"])) + self.assertEqual(200, response.status_code) + self.assertEqual(2, len(response.data)) + + def test_filter_for_req(self): + response = self.client.get( + reverse("courses-search", args=["current"]), {"pre_ngss_requirements": "REQ@SAS"} + ) + self.assertEqual(200, response.status_code) + self.assertEqual(1, len(response.data)) + self.assertEqual("MATH-114", response.data[0]["id"]) + + def test_filter_for_req_dif_sem(self): + req2 = PreNGSSRequirement( + semester=("2019A" if TEST_SEMESTER == "2019C" else "2019C"), code="REQ", school="SAS" + ) + req2.save() + req2.courses.add(self.different_math) + response = self.client.get( + reverse("courses-search", args=["current"]), {"pre_ngss_requirements": "REQ@SAS"} + ) + self.assertEqual(200, response.status_code) + self.assertEqual(1, len(response.data)) + self.assertEqual("MATH-114", response.data[0]["id"]) + self.assertEqual(TEST_SEMESTER, response.data[0]["semester"]) + + def test_multi_req(self): + course3, section3 = create_mock_data("CIS-240-001", TEST_SEMESTER) + req2 = PreNGSSRequirement(semester=TEST_SEMESTER, code="REQ2", school="SEAS") + req2.save() + req2.courses.add(course3) + + response = self.client.get( + reverse("courses-search", args=["current"]), + {"pre_ngss_requirements": "REQ@SAS,REQ2@SEAS"}, + ) + self.assertEqual(0, len(response.data)) + + def test_double_count_req(self): + req2 = PreNGSSRequirement(semester=TEST_SEMESTER, code="REQ2", school="SEAS") + req2.save() + req2.courses.add(self.math) + response = self.client.get( + reverse("courses-search", args=["current"]), + {"pre_ngss_requirements": "REQ@SAS,REQ2@SEAS"}, + ) + self.assertEqual(1, len(response.data)) + self.assertEqual("MATH-114", response.data[0]["id"]) + + +class IsOpenFilterTestCase(TestCase): + def setUp(self): + + _, self.cis_160_001 = create_mock_data( + code="CIS-160-001", semester=TEST_SEMESTER, meeting_days="TR" + ) + + _, self.cis_160_201 = create_mock_data( + code="CIS-160-201", semester=TEST_SEMESTER, meeting_days="M" + ) + self.cis_160_201.activity = "REC" + self.cis_160_201.save() + + _, self.cis_160_202 = create_mock_data( + code="CIS-160-202", semester=TEST_SEMESTER, meeting_days="W" + ) + self.cis_160_202.activity = "REC" + self.cis_160_202.save() + + def save_all(): + for section in [self.cis_160_001, self.cis_160_201, self.cis_160_202]: + section.save() + + self.save_all = save_all + self.all_codes = {"CIS-160"} + self.non_open_statuses = [ + status[0] for status in Section.STATUS_CHOICES if status[0] not in ["O"] + ] + + recompute_precomputed_fields() + + self.client = APIClient() + set_semester() + + def test_lec_open_all_rec_open(self): + response = self.client.get(reverse("courses-search", args=[TEST_SEMESTER]), {"is-open": ""}) + self.assertEqual(response.status_code, 200) + self.assertEqual(len(response.data), 1) + self.assertEqual({res["id"] for res in response.data}, self.all_codes) + + def test_lec_open_one_rec_not_open(self): + for status in self.non_open_statuses: + self.cis_160_202.status = status + self.save_all() + + response = self.client.get( + reverse("courses-search", args=[TEST_SEMESTER]), {"is_open": ""} + ) + self.assertEqual(response.status_code, 200) + self.assertEqual(len(response.data), 1) + self.assertEqual({res["id"] for res in response.data}, self.all_codes) + + def test_lec_open_all_rec_not_open(self): + for status in self.non_open_statuses: + self.cis_160_202.status = status + self.cis_160_201.status = status + self.save_all() + + response = self.client.get( + reverse("courses-search", args=[TEST_SEMESTER]), {"is_open": ""} + ) + self.assertEqual(response.status_code, 200) + self.assertEqual(len(response.data), 0) + self.assertEqual({res["id"] for res in response.data}, set()) + + def test_rec_open_lec_not_open(self): + for status in self.non_open_statuses: + self.cis_160_001.status = status + self.save_all() + + response = self.client.get( + reverse("courses-search", args=[TEST_SEMESTER]), {"is_open": ""} + ) + self.assertEqual(response.status_code, 200) + self.assertEqual(len(response.data), 0) + self.assertEqual({res["id"] for res in response.data}, set()) + + def test_lec_not_open_all_rec_not_open(self): + for status in self.non_open_statuses: + self.cis_160_202.status = status + self.cis_160_201.status = status + self.cis_160_001.status = status + self.save_all() + + response = self.client.get( + reverse("courses-search", args=[TEST_SEMESTER]), {"is_open": ""} + ) + self.assertEqual(response.status_code, 200) + self.assertEqual(len(response.data), 0) + self.assertEqual({res["id"] for res in response.data}, set()) + + +class CourseReviewAverageTestCase(TestCase): + def setUp(self): + self.course, self.section = create_mock_data("CIS-120-001", TEST_SEMESTER) + _, self.section2 = create_mock_data("CIS-120-002", TEST_SEMESTER) + self.instructor = Instructor(name="Person1") + self.instructor.save() + self.rev1 = Review( + section=create_mock_data("CIS-120-003", "2005C")[1], + instructor=self.instructor, + responses=100, + ) + self.rev1.save() + self.rev1.set_averages( + { + "course_quality": 4, + "instructor_quality": 4, + "difficulty": 4, + } + ) + self.instructor2 = Instructor(name="Person2") + self.instructor2.save() + self.rev2 = Review( + section=create_mock_data("CIS-120-002", "2015A")[1], + instructor=self.instructor2, + responses=100, + ) + self.rev2.instructor = self.instructor2 + self.rev2.save() + self.rev2.set_averages( + { + "course_quality": 2, + "instructor_quality": 2, + "difficulty": 2, + } + ) + + self.section.instructors.add(self.instructor) + self.section2.instructors.add(self.instructor2) + self.client = APIClient() + set_semester() + + def test_course_average(self): + response = self.client.get(reverse("courses-detail", args=["current", "CIS-120"])) + self.assertEqual(200, response.status_code) + self.assertEqual(3, response.data["course_quality"]) + self.assertEqual(3, response.data["instructor_quality"]) + self.assertEqual(3, response.data["difficulty"]) + + def test_section_reviews(self): + response = self.client.get(reverse("courses-detail", args=["current", "CIS-120"])) + self.assertEqual(200, response.status_code) + self.assertEqual(2, len(response.data["sections"])) + + def test_section_no_duplicates(self): + instructor3 = Instructor(name="person3") + instructor3.save() + rev3 = Review( + section=self.rev2.section, + instructor=instructor3, + responses=100, + ) + rev3.save() + rev3.set_averages( + { + "course_quality": 1, + "instructor_quality": 1, + "difficulty": 1, + } + ) + self.section2.instructors.add(instructor3) + response = self.client.get(reverse("courses-detail", args=["current", "CIS-120"])) + self.assertEqual(200, response.status_code) + self.assertEqual(2, len(response.data["sections"])) + self.assertEqual( + 1.5, response.data["sections"][1]["course_quality"], response.data["sections"][1] + ) + + def test_filter_courses_by_review_included(self): + response = self.client.get( + reverse("courses-search", args=["current"]), {"difficulty": "2.5-3.5"} + ) + self.assertEqual(200, response.status_code) + self.assertEqual(1, len(response.data)) + + def test_filter_courses_by_review_excluded(self): + response = self.client.get( + reverse("courses-search", args=["current"]), {"difficulty": "0-2"} + ) + self.assertEqual(200, response.status_code) + self.assertEqual(0, len(response.data)) + + +class DayFilterTestCase(TestCase): + def setUp(self): + _, self.cis_120_001 = create_mock_data("CIS-120-001", TEST_SEMESTER) # days MWF + + _, self.cis_120_002 = create_mock_data( + code="CIS-120-002", semester=TEST_SEMESTER, meeting_days="TR" + ) + + _, self.cis_160_001 = create_mock_data( + code="CIS-160-001", semester=TEST_SEMESTER, meeting_days="TR" + ) + + _, self.cis_160_201 = create_mock_data( + code="CIS-160-201", semester=TEST_SEMESTER, meeting_days="M" + ) + self.cis_160_201.activity = "REC" + self.cis_160_201.save() + + _, self.cis_160_202 = create_mock_data( + code="CIS-160-202", semester=TEST_SEMESTER, meeting_days="W" + ) + self.cis_160_202.activity = "REC" + self.cis_160_202.save() + + _, self.cis_121_001 = create_mock_data(code="CIS-121-001", semester=TEST_SEMESTER) + set_meetings( + self.cis_121_001, + [ + { + "building_code": "LLAB", + "room_code": "10", + "days": "MT", + "begin_time_24": 900, + "begin_time": "9:00 AM", + "end_time_24": 1000, + "end_time": "10:00 AM", + }, + { + "building_code": "LLAB", + "room_code": "10", + "days": "WR", + "begin_time_24": 1330, + "begin_time": "1:30 PM", + "end_time_24": 1430, + "end_time": "2:30 PM", + }, + ], + ) + + _, self.cis_262_001 = create_mock_async_class(code="CIS-262-001", semester=TEST_SEMESTER) + + recompute_precomputed_fields() + + self.all_codes = {"CIS-120", "CIS-160", "CIS-121", "CIS-262"} + + self.client = APIClient() + set_semester() + + def test_only_async(self): + response = self.client.get(reverse("courses-search", args=[TEST_SEMESTER]), {"days": ""}) + self.assertEqual(response.status_code, 200) + self.assertEqual(len(response.data), 1) + self.assertEqual({res["id"] for res in response.data}, {"CIS-262"}) # only async + + def test_all_days(self): + response = self.client.get( + reverse("courses-search", args=[TEST_SEMESTER]), {"days": "MTWRFSU"} + ) + self.assertEqual(response.status_code, 200) + self.assertEqual(len(response.data), len(self.all_codes)) + self.assertEqual({res["id"] for res in response.data}, self.all_codes) + + def test_illegal_characters(self): + response = self.client.get(reverse("courses-search", args=[TEST_SEMESTER]), {"days": "M-R"}) + self.assertEqual(response.status_code, 200) + self.assertEqual(len(response.data), len(self.all_codes)) + self.assertEqual({res["id"] for res in response.data}, self.all_codes) + + def test_lec_no_rec(self): + response = self.client.get(reverse("courses-search", args=[TEST_SEMESTER]), {"days": "TR"}) + self.assertEqual(response.status_code, 200) + self.assertEqual(len(response.data), 2) + self.assertEqual({res["id"] for res in response.data}, {"CIS-120", "CIS-262"}) + + def test_rec_no_lec(self): + response = self.client.get(reverse("courses-search", args=[TEST_SEMESTER]), {"days": "MW"}) + self.assertEqual(response.status_code, 200) + self.assertEqual(len(response.data), 1) + self.assertEqual({res["id"] for res in response.data}, {"CIS-262"}) + + def test_lec_and_rec(self): + response = self.client.get(reverse("courses-search", args=[TEST_SEMESTER]), {"days": "TWR"}) + self.assertEqual(response.status_code, 200) + self.assertEqual(len(response.data), 3) + self.assertEqual({res["id"] for res in response.data}, {"CIS-160", "CIS-120", "CIS-262"}) + + def test_partial_match(self): + response = self.client.get( + reverse("courses-search", args=[TEST_SEMESTER]), + {"days": "T"}, + ) + self.assertEqual(response.status_code, 200) + self.assertEqual(len(response.data), 1) + self.assertEqual({res["id"] for res in response.data}, {"CIS-262"}) + + def test_contains_rec_no_sec(self): + response = self.client.get( + reverse("courses-search", args=[TEST_SEMESTER]), + {"days": "W"}, + ) + self.assertEqual(response.status_code, 200) + self.assertEqual(len(response.data), 1) + self.assertEqual({res["id"] for res in response.data}, {"CIS-262"}) + + def test_partial_multi_meeting_match(self): + response = self.client.get( + reverse("courses-search", args=[TEST_SEMESTER]), + {"days": "MT"}, + ) + self.assertEqual(response.status_code, 200) + self.assertEqual(len(response.data), 1) + self.assertEqual({res["id"] for res in response.data}, {"CIS-262"}) + + def test_full_multi_meeting_match(self): + response = self.client.get( + reverse("courses-search", args=[TEST_SEMESTER]), + {"days": "MTWR"}, + ) + self.assertEqual(response.status_code, 200) + self.assertEqual(len(response.data), 4) + self.assertEqual( + {res["id"] for res in response.data}, {"CIS-120", "CIS-121", "CIS-160", "CIS-262"} + ) + + +class TimeFilterTestCase(TestCase): + def setUp(self): + _, self.cis_120_001 = create_mock_data("CIS-120-001", TEST_SEMESTER) # time 11.0-12.0 + + _, self.cis_120_002 = create_mock_data( + code="CIS-120-002", semester=TEST_SEMESTER, start=1200, end=1330 + ) + + _, self.cis_160_001 = create_mock_data( + code="CIS-160-001", semester=TEST_SEMESTER, start=500, end=630 + ) + + _, self.cis_160_201 = create_mock_data( + code="CIS-160-201", semester=TEST_SEMESTER, start=1100, end=1200 + ) + self.cis_160_201.activity = "REC" + self.cis_160_201.save() + + _, self.cis_160_202 = create_mock_data( + code="CIS-160-202", semester=TEST_SEMESTER, start=1400, end=1500 + ) + self.cis_160_202.activity = "REC" + self.cis_160_202.save() + + _, self.cis_121_001 = create_mock_data(code="CIS-121-001", semester=TEST_SEMESTER) + set_meetings( + self.cis_121_001, + [ + { + "building_code": "LLAB", + "room_code": "10", + "days": "MT", + "begin_time_24": 900, + "begin_time": "9:00 AM", + "end_time_24": 1000, + "end_time": "10:00 AM", + }, + { + "building_code": "LLAB", + "room_code": "10", + "days": "WR", + "begin_time_24": 1330, + "begin_time": "1:30 PM", + "end_time_24": 1430, + "end_time": "2:30 PM", + }, + ], + ) + + _, self.cis_262_001 = create_mock_async_class(code="CIS-262-001", semester=TEST_SEMESTER) + + recompute_precomputed_fields() + + self.all_codes = {"CIS-120", "CIS-160", "CIS-121", "CIS-262"} + + self.client = APIClient() + set_semester() + + def test_empty_time_all(self): + response = self.client.get(reverse("courses-search", args=[TEST_SEMESTER]), {"time": ""}) + self.assertEqual(response.status_code, 200) + self.assertEqual(len(response.data), len(self.all_codes)) + self.assertEqual({res["id"] for res in response.data}, self.all_codes) + + def test_whole_day(self): + response = self.client.get( + reverse("courses-search", args=[TEST_SEMESTER]), {"time": "0.0-23.59"} + ) + self.assertEqual(response.status_code, 200) + self.assertEqual(len(response.data), len(self.all_codes)) + self.assertEqual({res["id"] for res in response.data}, self.all_codes) + + def test_no_dashes(self): + response = self.client.get( + reverse("courses-search", args=[TEST_SEMESTER]), {"time": "11.00"} + ) + self.assertEqual(response.status_code, 200) + self.assertEqual(len(response.data), len(self.all_codes)) + self.assertEqual({res["id"] for res in response.data}, self.all_codes) + + def test_too_many_dashes(self): + response = self.client.get( + reverse("courses-search", args=[TEST_SEMESTER]), {"time": "-1.00-3.00"} + ) + self.assertEqual(response.status_code, 200) + self.assertEqual(len(response.data), len(self.all_codes)) + self.assertEqual({res["id"] for res in response.data}, self.all_codes) + + def test_non_numeric(self): + response = self.client.get( + reverse("courses-search", args=[TEST_SEMESTER]), {"time": "11.00am-3.00pm"} + ) + self.assertEqual(response.status_code, 200) + self.assertEqual(len(response.data), len(self.all_codes)) + self.assertEqual({res["id"] for res in response.data}, self.all_codes) + + def test_crossover_times(self): + response = self.client.get( + reverse("courses-search", args=[TEST_SEMESTER]), {"time": "15.0-2.0"} + ) + self.assertEqual(response.status_code, 200) + self.assertEqual(len(response.data), 1) + self.assertEqual({res["id"] for res in response.data}, {"CIS-262"}) # only async + + def test_start_end_same(self): + response = self.client.get( + reverse("courses-search", args=[TEST_SEMESTER]), {"time": "5.5-5.5"} + ) + self.assertEqual(response.status_code, 200) + self.assertEqual(len(response.data), 1) + self.assertEqual({res["id"] for res in response.data}, {"CIS-262"}) # only async + + def test_lec_no_rec(self): + response = self.client.get( + reverse("courses-search", args=[TEST_SEMESTER]), {"time": "4.59-6.30"} + ) + self.assertEqual(response.status_code, 200) + self.assertEqual(len(response.data), 1) + self.assertEqual({res["id"] for res in response.data}, {"CIS-262"}) # only async + + def test_one_match(self): + response = self.client.get( + reverse("courses-search", args=[TEST_SEMESTER]), {"time": "11.30-13.30"} + ) + self.assertEqual(response.status_code, 200) + self.assertEqual(len(response.data), 2) + self.assertEqual({res["id"] for res in response.data}, {"CIS-120", "CIS-262"}) + + def test_lec_and_rec(self): + response = self.client.get( + reverse("courses-search", args=[TEST_SEMESTER]), {"time": "5.0-12.0"} + ) + self.assertEqual(response.status_code, 200) + self.assertEqual(len(response.data), 3) + self.assertEqual({res["id"] for res in response.data}, {"CIS-160", "CIS-120", "CIS-262"}) + + def test_contains_parts_of_two_sec(self): + response = self.client.get( + reverse("courses-search", args=[TEST_SEMESTER]), + {"time": "11.30-13.0"}, + ) + self.assertEqual(response.status_code, 200) + self.assertEqual(len(response.data), 1) + self.assertEqual({res["id"] for res in response.data}, {"CIS-262"}) + + def test_contains_rec_no_sec(self): + response = self.client.get( + reverse("courses-search", args=[TEST_SEMESTER]), + {"time": "11.30-16"}, + ) + self.assertEqual(response.status_code, 200) + self.assertEqual(len(response.data), 2) + self.assertEqual({res["id"] for res in response.data}, {"CIS-120", "CIS-262"}) + + def test_unbounded_right(self): + response = self.client.get( + reverse("courses-search", args=[TEST_SEMESTER]), + {"time": "11.30-"}, + ) + self.assertEqual(response.status_code, 200) + self.assertEqual(len(response.data), 2) + self.assertEqual({res["id"] for res in response.data}, {"CIS-120", "CIS-262"}) + + def test_unbounded_left(self): + response = self.client.get( + reverse("courses-search", args=[TEST_SEMESTER]), + {"time": "-12.00"}, + ) + self.assertEqual(response.status_code, 200) + self.assertEqual(len(response.data), 3) + self.assertEqual({res["id"] for res in response.data}, {"CIS-120", "CIS-160", "CIS-262"}) + + def test_multi_meeting_match(self): + response = self.client.get( + reverse("courses-search", args=[TEST_SEMESTER]), + {"time": "9.00-15.00"}, + ) + self.assertEqual(response.status_code, 200) + self.assertEqual(len(response.data), 3) + self.assertEqual({res["id"] for res in response.data}, {"CIS-120", "CIS-121", "CIS-262"}) + + +class DayTimeFilterTestCase(TestCase): + def setUp(self): + _, self.cis_120_001 = create_mock_data( + "CIS-120-001", TEST_SEMESTER + ) # time 11.0-12.0, days MWF + + _, self.cis_120_002 = create_mock_data( + code="CIS-120-002", semester=TEST_SEMESTER, start=1200, end=1330, meeting_days="TR" + ) + + _, self.cis_160_001 = create_mock_data( + code="CIS-160-001", semester=TEST_SEMESTER, start=500, end=630, meeting_days="TR" + ) + + _, self.cis_160_201 = create_mock_data( + code="CIS-160-201", semester=TEST_SEMESTER, start=1100, end=1200, meeting_days="M" + ) + self.cis_160_201.activity = "REC" + self.cis_160_201.save() + + _, self.cis_160_202 = create_mock_data( + code="CIS-160-202", semester=TEST_SEMESTER, start=1400, end=1500, meeting_days="W" + ) + self.cis_160_202.activity = "REC" + self.cis_160_202.save() + + _, self.cis_121_001 = create_mock_data(code="CIS-121-001", semester=TEST_SEMESTER) + set_meetings( + self.cis_121_001, + [ + { + "building_code": "LLAB", + "room_code": "10", + "days": "MT", + "begin_time_24": 900, + "begin_time": "9:00 AM", + "end_time_24": 1000, + "end_time": "10:00 AM", + }, + { + "building_code": "LLAB", + "room_code": "10", + "days": "WR", + "begin_time_24": 1330, + "begin_time": "1:30 PM", + "end_time_24": 1430, + "end_time": "2:30 PM", + }, + ], + ) + + _, self.cis_262_001 = create_mock_async_class(code="CIS-262-001", semester=TEST_SEMESTER) + + recompute_precomputed_fields() + + self.all_codes = {"CIS-120", "CIS-160", "CIS-121", "CIS-262"} + + self.client = APIClient() + set_semester() + + def test_all_match(self): + response = self.client.get( + reverse("courses-search", args=[TEST_SEMESTER]), {"time": "0-23.59", "days": "MTWRFSU"} + ) + self.assertEqual(response.status_code, 200) + self.assertEqual(len(response.data), len(self.all_codes)) + self.assertEqual({res["id"] for res in response.data}, self.all_codes) + + def test_days_match_not_time(self): + response = self.client.get( + reverse("courses-search", args=[TEST_SEMESTER]), + {"time": "1.00-2.00", "days": "MTWRFSU"}, + ) + self.assertEqual(response.status_code, 200) + self.assertEqual(len(response.data), 1) + self.assertEqual({res["id"] for res in response.data}, {"CIS-262"}) + + def test_time_matches_not_days(self): + response = self.client.get( + reverse("courses-search", args=[TEST_SEMESTER]), {"time": "1.00-", "days": "F"} + ) + self.assertEqual(response.status_code, 200) + self.assertEqual(len(response.data), 1) + self.assertEqual({res["id"] for res in response.data}, {"CIS-262"}) + + def test_days_time_partial_match(self): + response = self.client.get( + reverse("courses-search", args=[TEST_SEMESTER]), {"time": "12.0-15.0", "days": "TWR"} + ) + self.assertEqual(response.status_code, 200) + self.assertEqual(len(response.data), 2) + self.assertEqual({res["id"] for res in response.data}, {"CIS-120", "CIS-262"}) + + def test_multi_meeting_partial_match(self): + response = self.client.get( + reverse("courses-search", args=[TEST_SEMESTER]), {"time": "9.00-10.00", "days": "MTWR"} + ) + self.assertEqual(response.status_code, 200) + self.assertEqual(len(response.data), 1) + self.assertEqual({res["id"] for res in response.data}, {"CIS-262"}) + + def test_multi_meeting_full_match(self): + response = self.client.get( + reverse("courses-search", args=[TEST_SEMESTER]), {"time": "9.00-14.30", "days": "MTWR"} + ) + self.assertEqual(response.status_code, 200) + self.assertEqual(len(response.data), 3) + self.assertEqual({res["id"] for res in response.data}, {"CIS-120", "CIS-121", "CIS-262"}) + + +class ScheduleFilterTestCase(TestCase): + def setUp(self): + _, self.cis_120_001 = create_mock_data( + "CIS-120-001", TEST_SEMESTER + ) # time 11.0-12.0, days MWF + + _, self.cis_120_002 = create_mock_data( + code="CIS-120-002", semester=TEST_SEMESTER, start=1200, end=1330, meeting_days="TR" + ) + + _, self.cis_160_001 = create_mock_data( + code="CIS-160-001", semester=TEST_SEMESTER, start=500, end=630, meeting_days="TR" + ) + + _, self.cis_160_201 = create_mock_data( + code="CIS-160-201", semester=TEST_SEMESTER, start=1100, end=1200, meeting_days="M" + ) + self.cis_160_201.activity = "REC" + self.cis_160_201.save() + + _, self.cis_160_202 = create_mock_data( + code="CIS-160-202", semester=TEST_SEMESTER, start=1400, end=1500, meeting_days="W" + ) + self.cis_160_202.activity = "REC" + self.cis_160_202.save() + + _, self.cis_121_001 = create_mock_data(code="CIS-121-001", semester=TEST_SEMESTER) + set_meetings( + self.cis_121_001, + [ + { + "building_code": "LLAB", + "room_code": "10", + "days": "MT", + "begin_time_24": 900, + "begin_time": "9:00 AM", + "end_time_24": 1000, + "end_time": "10:00 AM", + }, + { + "building_code": "LLAB", + "room_code": "10", + "days": "WR", + "begin_time_24": 1330, + "begin_time": "1:30 PM", + "end_time_24": 1430, + "end_time": "2:30 PM", + }, + ], + ) + + _, self.cis_262_001 = create_mock_async_class(code="CIS-262-001", semester=TEST_SEMESTER) + + recompute_precomputed_fields() + + self.all_codes = {"CIS-120", "CIS-160", "CIS-121", "CIS-262"} + + self.user = User.objects.create_user( + username="jacob", email="jacob@example.com", password="top_secret" + ) + + self.empty_schedule = Schedule( + person=self.user, + semester=TEST_SEMESTER, + name="Empty Schedule", + ) + self.empty_schedule.save() + + self.all_available_schedule = Schedule( + person=self.user, + semester=TEST_SEMESTER, + name="All Classes Available Schedule", + ) + self.all_available_schedule.save() + self.all_available_schedule.sections.set([self.cis_120_001]) + + self.only_120_262_available_schedule = Schedule( + person=self.user, + semester=TEST_SEMESTER, + name="Only CIS-120 and CIS-262 Available Schedule", + ) + self.only_120_262_available_schedule.save() + self.only_120_262_available_schedule.sections.set([self.cis_120_001, self.cis_121_001]) + + self.only_262_available_schedule = Schedule( + person=self.user, + semester=TEST_SEMESTER, + name="Only CIS-262 Available Schedule", + ) + self.only_262_available_schedule.save() + self.only_262_available_schedule.sections.set( + [self.cis_120_001, self.cis_120_002, self.cis_121_001] + ) + + self.client = APIClient() + set_semester() + + def test_not_authenticated(self): + response = self.client.get( + reverse("courses-search", args=[TEST_SEMESTER]), + {"schedule-fit": str(self.only_262_available_schedule.id)}, + ) + self.assertEqual(response.status_code, 200) + self.assertEqual(len(response.data), len(self.all_codes)) + self.assertEqual({res["id"] for res in response.data}, self.all_codes) + + def test_different_authenticated(self): + User.objects.create_user( + username="charley", email="charley@example.com", password="top_secret" + ) + client2 = APIClient() + client2.login(username="charley", password="top_secret") + response = client2.get( + reverse("courses-search", args=[TEST_SEMESTER]), + {"schedule-fit": str(self.only_262_available_schedule.id)}, + ) + self.assertEqual(response.status_code, 200) + self.assertEqual(len(response.data), len(self.all_codes)) + self.assertEqual({res["id"] for res in response.data}, self.all_codes) + + def test_invalid_schedule(self): + self.client.login(username="jacob", password="top_secret") + response = self.client.get( + reverse("courses-search", args=[TEST_SEMESTER]), + {"schedule-fit": "invalid"}, + ) + self.assertEqual(response.status_code, 200) + self.assertEqual(len(response.data), len(self.all_codes)) + self.assertEqual({res["id"] for res in response.data}, self.all_codes) + + def test_empty_schedule(self): + self.client.login(username="jacob", password="top_secret") + response = self.client.get( + reverse("courses-search", args=[TEST_SEMESTER]), + {"schedule-fit": str(self.empty_schedule.id)}, + ) + self.assertEqual(response.status_code, 200) + self.assertEqual(len(response.data), len(self.all_codes)) + self.assertEqual({res["id"] for res in response.data}, self.all_codes) + + def test_all_available_schedule(self): + self.client.login(username="jacob", password="top_secret") + response = self.client.get( + reverse("courses-search", args=[TEST_SEMESTER]), + {"schedule-fit": str(self.all_available_schedule.id)}, + ) + self.assertEqual(response.status_code, 200) + self.assertEqual(len(response.data), len(self.all_codes)) + self.assertEqual({res["id"] for res in response.data}, self.all_codes) + + def test_only_120_262_available_schedule(self): + self.client.login(username="jacob", password="top_secret") + response = self.client.get( + reverse("courses-search", args=[TEST_SEMESTER]), + {"schedule-fit": str(self.only_120_262_available_schedule.id)}, + ) + self.assertEqual(response.status_code, 200) + self.assertEqual(len(response.data), 2) + self.assertEqual({res["id"] for res in response.data}, {"CIS-120", "CIS-262"}) + + def test_only_262_available_schedule(self): + self.client.login(username="jacob", password="top_secret") + response = self.client.get( + reverse("courses-search", args=[TEST_SEMESTER]), + {"schedule-fit": str(self.only_262_available_schedule.id)}, + ) + self.assertEqual(response.status_code, 200) + self.assertEqual(len(response.data), 1) + self.assertEqual({res["id"] for res in response.data}, {"CIS-262"}) diff --git a/backend/tests/plan/test_course_recs.py b/backend/tests/plan/test_course_recs.py index 57568c980..4846a3740 100644 --- a/backend/tests/plan/test_course_recs.py +++ b/backend/tests/plan/test_course_recs.py @@ -1,677 +1,677 @@ -import csv -import json -import os -from unittest.mock import patch - -import numpy as np -from django.conf import settings -from django.contrib.auth import get_user_model -from django.contrib.auth.hashers import make_password -from django.core.management import call_command -from django.db.models.signals import post_save -from django.test import TestCase -from django.urls import reverse -from options.models import Option -from rest_framework.renderers import JSONRenderer -from rest_framework.test import APIClient - -from alert.models import AddDropPeriod -from courses.models import Course, Department, Section -from courses.util import invalidate_current_semester_cache -from plan.management.commands.recommendcourses import retrieve_course_clusters -from plan.management.commands.trainrecommender import ( - generate_course_vectors_dict, - group_courses, - train_recommender, -) -from plan.models import Schedule - - -TEST_SEMESTER = "2021C" -assert TEST_SEMESTER >= "2021C", "Some tests assume TEST_SEMESTER >= 2021C" - - -def set_semester(): - post_save.disconnect( - receiver=invalidate_current_semester_cache, - sender=Option, - dispatch_uid="invalidate_current_semester_cache", - ) - Option(key="SEMESTER", value=TEST_SEMESTER, value_type="TXT").save() - AddDropPeriod(semester=TEST_SEMESTER).save() - - -User = get_user_model() - - -@patch("plan.views.retrieve_course_clusters") -class CourseRecommendationsTestCase(TestCase): - @classmethod - def setUpTestData(cls): - """ - Creates departments, courses, sections and schedules from - `/tests/plan/course_recs_test_data/course_data_test.csv` - and `/tests/plan/course_recs_test_data/course_descriptions_test.csv`. - - The contents of `/tests/plan/course_recs_test_data/course_data_test.csv` are 3 columns: - - a `person_id` (used when creating schedules) - - a course `full_code` column (ie "PSCI-498") - - a semester column (ranging between 2016C and 2020A). - - Courses are created with approximately the following specification: - - `department_id`: Corresponds to the department code embedded in the `full_code` - - `full_code` : corresponds to the course code column in - `/tests/plan/course_recs_test_data/course_data_test.csv` - - `semester` : corresponds to the semester column in - `/tests/plan/course_recs_test_data/course_data_test.csv`. Additionally, if the value of - the semester column in `/tests/plan/course_recs_test_data/course_data_test.csv` - for a course is not "2020A" or "2017A" and the course `full_code` is not "HIST-650" - another course entry is created with `semester` equal to `TEST_SEMESTER` as defined - at the top of this file (2021C at the time of writing.) - - `description` : corresponds to the entry in - `/tests/plan/course_recs_test_data/course_descriptions_test.csv` - - Sections corresponding to each created course are created with approximately this - specification - - `code` : "001" - - `full_code` : the course's `full_code` + "-001" - """ - - course_data_path = ( - settings.BASE_DIR + "/tests/plan/course_recs_test_data/course_data_test.csv" - ) - - # Setting up test courses in the db - test_descriptions = dict() - with open( - settings.BASE_DIR + "/tests/plan/course_recs_test_data/course_descriptions_test.csv" - ) as course_desc_file: - desc_reader = csv.reader(course_desc_file) - for course, description in desc_reader: - test_descriptions[course] = description - courses = set() - with open(course_data_path) as course_data_file: - course_data_reader = csv.reader(course_data_file) - for _, course_code, semester in course_data_reader: - courses.add((course_code, semester)) - - departments_to_save = [] - department_obs = dict() - courses_to_save = [] - course_obs = dict() - sections_to_save = [] - - def create_dont_save(course_code, semester, iter_num): - dept_code = course_code.split("-")[0] - if iter_num == 0: - if dept_code not in department_obs: - dept = Department(code=dept_code, name=dept_code) - department_obs[dept_code] = dept - departments_to_save.append(dept) - elif iter_num == 1: - dept_id = department_obs[dept_code] - course = Course( - code=course_code.split("-")[1], - semester=semester, - full_code=course_code, - description=test_descriptions[course_code], - ) - course.department_id = dept_id - courses_to_save.append(course) - elif iter_num == 2: - course_id = course_obs[course_code, semester] - section = Section( - code="001", - full_code=course_code + "-001", - credits=1, - status="O", - activity="LEC", - ) - section.course_id = course_id - sections_to_save.append(section) - - curr_courses = set() - for i in range(3): - for course_code, semester in courses: - assert semester != TEST_SEMESTER - create_dont_save(course_code, semester, i) - for course_code, semester in courses: - curr_courses.add(course_code) - for course_code, semester in courses: - if semester in ["2017A", "2020A"] or course_code in ["HIST-650"]: - curr_courses.remove(course_code) - for course_code in curr_courses: - create_dont_save(course_code, TEST_SEMESTER, i) - for extra_course_code in ["CIS-121", "CIS-262"]: - create_dont_save(extra_course_code, TEST_SEMESTER, i) - if i == 0: - Department.objects.bulk_create(departments_to_save) - department_obs = dict() - for dept in Department.objects.all(): - department_obs[dept.code] = dept.id - elif i == 1: - for course in courses_to_save: - course.save() - course_obs[course.full_code, course.semester] = course.id - elif i == 2: - Section.objects.bulk_create(sections_to_save) - - section_obs = dict() - for section in Section.objects.all(): - section_obs[section.full_code, section.course.semester] = section.id - cls.section_obs = section_obs - - schedules = dict() - with open(course_data_path) as course_data_file: - course_data_reader = csv.reader(course_data_file) - for person_id, course_code, semester in course_data_reader: - if person_id not in schedules: - schedules[person_id] = dict() - if semester not in schedules[person_id]: - schedules[person_id][semester] = set() - schedules[person_id][semester].add(course_code) - - User.objects.bulk_create( - [ - User( - username=person_id, - email=person_id + "@example.com", - password=make_password(person_id + "_password"), - is_active=True, - ) - for person_id in schedules.keys() - ] - + [ - User( - username=person_id, - email=person_id + "@example.com", - password=make_password(person_id + "_password"), - is_active=True, - ) - for person_id in ["freshman", "gapsem", "noshow", "repeat"] - ] - ) - - user_obs = dict() - for user in User.objects.all(): - user_obs[user.username] = user.id - - # Create past schedules - schedules_list = [] - for username in schedules.keys(): - for semester in schedules[username].keys(): - schedule = Schedule( - semester=semester, - name=username + " main schedule", - ) - schedule.person_id = user_obs[username] - schedules_list.append(schedule) - Schedule.objects.bulk_create(schedules_list) - schedule_obs = dict() - for schedule in Schedule.objects.all(): - schedule_obs[schedule.person_id, schedule.semester] = schedule - for username in schedules.keys(): - for semester in schedules[username].keys(): - schedule = schedule_obs[user_obs[username], semester] - for course_code in schedules[username][semester]: - if course_code in ["AFRC-437", "GRMN-180", "CIS-262"]: - continue - schedule.sections.add(section_obs[course_code + "-001", semester]) - - schedule = Schedule( - person=get_user_model().objects.get(username="hash1"), - semester=TEST_SEMESTER, - name="My Test Schedule", - ) - schedule.save() - for course_code in ["AFRC-437", "GRMN-180", "CIS-262"]: - schedule.sections.add(section_obs[course_code + "-001", TEST_SEMESTER]) - - cls.course_clusters = train_recommender( - course_data_path=course_data_path, output_path=os.devnull - ) - - cls.course_clusters_with_schedules = train_recommender( - course_data_path=None, output_path=os.devnull - ) - - def setUp(self): - self.client = APIClient() - self.client.login(username="hash1", password="hash1_password") - set_semester() - response = self.client.get(reverse("courses-list", args=[TEST_SEMESTER])) - self.assertEqual(response.status_code, 200, response.content) - self.course_objects = dict() - for course_ob in response.data: - self.course_objects[course_ob["id"]] = course_ob - - def subtest_with_user(self): - response = self.client.post(reverse("recommend-courses")) - self.assertEqual(response.status_code, 200, response.content) - self.assertEqual(len(response.data), 5) - - def test_with_user(self, mock): - mock.return_value = self.course_clusters - self.subtest_with_user() - - def test_with_user_from_schedules(self, mock): - mock.return_value = self.course_clusters_with_schedules - self.subtest_with_user() - - def subtest_with_edge_case_users(self): - freshman = User.objects.get(username="freshman") - freshman_client = APIClient() - freshman_client.login(username="freshman", password="freshman_password") - freshman_schedule = Schedule( - person=freshman, - semester=TEST_SEMESTER, - name="Current schedule", - ) - freshman_schedule.save() - for course_code in ["GRMN-502", "GEOL-545", "MUSC-275"]: - freshman_schedule.sections.add(self.section_obs[course_code + "-001", TEST_SEMESTER]) - response = freshman_client.post(reverse("recommend-courses")) - self.assertEqual(response.status_code, 200, response.content) - self.assertEqual(len(response.data), 5) - - gapsem = User.objects.get(username="gapsem") - gapsem_client = APIClient() - gapsem_client.login(username="gapsem", password="gapsem_password") - gapsem_schedule = Schedule( - person=gapsem, - semester="2017A", - name="Previous schedule", - ) - gapsem_schedule.save() - for course_code in ["LGIC-320", "ANTH-395", "NELC-337"]: - gapsem_schedule.sections.add(self.section_obs[course_code + "-001", "2017A"]) - response = gapsem_client.post(reverse("recommend-courses")) - self.assertEqual(response.status_code, 200, response.content) - self.assertEqual(len(response.data), 5) - - noshow = User.objects.get(username="noshow") - noshow_client = APIClient() - noshow_client.login(username="noshow", password="noshow_password") - noshow_schedule = Schedule( - person=noshow, - semester=TEST_SEMESTER, - name="Empty schedule", - ) - noshow_schedule.save() - noshow_previous_schedule = Schedule( - person=noshow, - semester="2017C", - name="Empty previous schedule", - ) - noshow_previous_schedule.save() - response = noshow_client.post(reverse("recommend-courses")) - self.assertEqual(response.status_code, 200, response.content) - self.assertEqual(len(response.data), 5) - - repeat = User.objects.get(username="repeat") - repeat_client = APIClient() - repeat_client.login(username="repeat", password="repeat_password") - repeat_schedule_old = Schedule( - person=repeat, - semester="2016C", - name="Old schedule", - ) - repeat_schedule_old.save() - for course_code in ["MUSC-275"]: - repeat_schedule_old.sections.add(self.section_obs[course_code + "-001", "2016C"]) - repeat_schedule = Schedule( - person=repeat, - semester=TEST_SEMESTER, - name="New schedule", - ) - repeat_schedule.save() - for course_code in ["GRMN-502", "GEOL-545", "MUSC-275"]: - repeat_schedule.sections.add(self.section_obs[course_code + "-001", TEST_SEMESTER]) - response = repeat_client.post(reverse("recommend-courses")) - self.assertEqual(response.status_code, 200, response.content) - self.assertEqual(len(response.data), 5) - - def test_with_edge_case_users(self, mock): - mock.return_value = self.course_clusters - self.subtest_with_edge_case_users() - - def test_with_edge_case_users_from_schedules(self, mock): - mock.return_value = self.course_clusters_with_schedules - self.subtest_with_edge_case_users() - - def subtest_bad_data_courses(self): - response = self.client.post( - reverse("recommend-courses"), - json.dumps({"curr_courses": ["CIS1233"]}), - content_type="application/json", - ) - self.assertEqual(response.status_code, 400, response.content) - - def test_bad_data_courses(self, mock): - mock.return_value = self.course_clusters - self.subtest_bad_data_courses() - - def test_bad_data_courses_from_schedules(self, mock): - mock.return_value = self.course_clusters_with_schedules - self.subtest_bad_data_courses() - - def subtest_bad_data_past(self): - response = self.client.post( - reverse("recommend-courses"), - json.dumps({"past_courses": ["CIS1233"]}), - content_type="application/json", - ) - self.assertEqual(response.status_code, 400, response.content) - - def test_bad_data_past(self, mock): - mock.return_value = self.course_clusters - self.subtest_bad_data_past() - - def test_bad_data_past_from_schedules(self, mock): - mock.return_value = self.course_clusters_with_schedules - self.subtest_bad_data_past() - - def subtest_bad_data_past_current(self): - response = self.client.post( - reverse("recommend-courses"), - json.dumps({"past_courses": ["CIS1233"], "curr_courses": ["CIS123123"]}), - content_type="application/json", - ) - self.assertEqual(response.status_code, 400, response.content) - - def test_bad_data_past_current(self, mock): - mock.return_value = self.course_clusters - self.subtest_bad_data_past_current() - - def test_bad_data_past_current_from_schedules(self, mock): - mock.return_value = self.course_clusters_with_schedules - self.subtest_bad_data_past_current() - - def check_response_data(self, data): - for course_ob in data: - should_be = self.course_objects[course_ob["id"]] - should_be_str = ( - JSONRenderer().render(should_be, renderer_context={"indent": 4}).decode("UTF-8") - ) - course_ob_str = ( - JSONRenderer().render(course_ob, renderer_context={"indent": 4}).decode("UTF-8") - ) - error_msg = "\n\nresponse=" + course_ob_str + "\n\nshould be=" + should_be_str + "\n\n" - self.assertEqual(should_be, course_ob, error_msg) - - def subtest_only_past_courses(self): - response = self.client.post( - reverse("recommend-courses"), - json.dumps({"past_courses": ["BEPP-263", "GRMN-180"]}), - content_type="application/json", - ) - self.assertEqual(response.status_code, 200, response.content) - self.check_response_data(response.data) - self.assertEqual(len(response.data), 5) - - def test_only_past_courses(self, mock): - mock.return_value = self.course_clusters - self.subtest_only_past_courses() - - def test_only_past_courses_from_schedules(self, mock): - mock.return_value = self.course_clusters_with_schedules - self.subtest_only_past_courses() - - def subtest_only_current(self): - response = self.client.post( - reverse("recommend-courses"), - json.dumps({"curr_courses": ["AFRC-437", "GRMN-180"]}), - content_type="application/json", - ) - self.assertEqual(response.status_code, 200, response.content) - self.check_response_data(response.data) - self.assertEqual(len(response.data), 5) - - def test_only_current(self, mock): - mock.return_value = self.course_clusters - self.subtest_only_current() - - def test_only_current_from_schedules(self, mock): - mock.return_value = self.course_clusters_with_schedules - self.subtest_only_current() - - def subtest_past_and_current(self): - response = self.client.post( - reverse("recommend-courses"), - json.dumps( - { - "curr_courses": ["AFRC-437", "GRMN-180", "CIS-262"], - "past_courses": ["ARTH-775", "EDUC-715"], - } - ), - content_type="application/json", - ) - self.assertEqual(response.status_code, 200, response.content) - self.check_response_data(response.data) - self.assertEqual(len(response.data), 5) - - def test_past_and_current(self, mock): - mock.return_value = self.course_clusters - self.subtest_past_and_current() - - def test_past_and_current_from_schedules(self, mock): - mock.return_value = self.course_clusters_with_schedules - self.subtest_past_and_current() - - def subtest_custom_num_recommendations(self): - response = self.client.post( - reverse("recommend-courses"), - json.dumps( - { - "curr_courses": ["AFRC-437", "GRMN-180", "CIS-121"], - "past_courses": ["ARTH-775", "EDUC-715"], - "n_recommendations": 20, - } - ), - content_type="application/json", - ) - self.assertEqual(response.status_code, 200, response.content) - self.check_response_data(response.data) - self.assertEqual(len(response.data), 20) - - def test_custom_num_recommendations(self, mock): - mock.return_value = self.course_clusters - self.subtest_custom_num_recommendations() - - def test_custom_num_recommendations_from_schedules(self, mock): - mock.return_value = self.course_clusters_with_schedules - self.subtest_custom_num_recommendations() - - def subtest_invalid_num_recommendations(self): - response = self.client.post( - reverse("recommend-courses"), - json.dumps( - { - "curr_courses": ["AFRC-437", "GRMN-180", "CIS-121"], - "past_courses": ["ARTH-775", "EDUC-715"], - "n_recommendations": 0, - } - ), - content_type="application/json", - ) - self.assertEqual(response.status_code, 400, response.content) - response = self.client.post( - reverse("recommend-courses"), - json.dumps( - { - "curr_courses": ["AFRC-437", "GRMN-180", "CIS-121"], - "past_courses": ["ARTH-775", "EDUC-715"], - "n_recommendations": -1, - } - ), - content_type="application/json", - ) - self.assertEqual(response.status_code, 400, response.content) - response = self.client.post( - reverse("recommend-courses"), - json.dumps( - { - "curr_courses": ["AFRC-437", "GRMN-180", "CIS-121"], - "past_courses": ["ARTH-775", "EDUC-715"], - "n_recommendations": "test", - } - ), - content_type="application/json", - ) - self.assertEqual(response.status_code, 400, response.content) - - def test_invalid_num_recommendations(self, mock): - mock.return_value = self.course_clusters - self.subtest_invalid_num_recommendations() - - def test_invalid_num_recommendations_from_schedules(self, mock): - mock.return_value = self.course_clusters_with_schedules - self.subtest_invalid_num_recommendations() - - def subtest_non_current_course_in_curr_courses(self): - response = self.client.post( - reverse("recommend-courses"), - json.dumps( - { - "curr_courses": ["AFRC-437", "GRMN-180", "HIST-650"], - "past_courses": ["ARTH-775", "CIS-262"], - } - ), - content_type="application/json", - ) - self.assertEqual(response.status_code, 400, response.content) - - def test_non_current_course_in_curr_courses(self, mock): - mock.return_value = self.course_clusters - self.subtest_non_current_course_in_curr_courses() - - def test_non_current_course_in_curr_courses_from_schedules(self, mock): - mock.return_value = self.course_clusters_with_schedules - self.subtest_non_current_course_in_curr_courses() - - def subtest_repeated_courses(self): - response = self.client.post( - reverse("recommend-courses"), - json.dumps( - { - "curr_courses": ["AFRC-437", "GRMN-180", "AFRC-437"], - "past_courses": ["ARTH-775", "CIS-262"], - } - ), - content_type="application/json", - ) - self.assertEqual(response.status_code, 400, response.content) - response = self.client.post( - reverse("recommend-courses"), - json.dumps( - { - "curr_courses": ["AFRC-437", "GRMN-180"], - "past_courses": ["ARTH-775", "CIS-262", "CIS-262"], - } - ), - content_type="application/json", - ) - self.assertEqual(response.status_code, 400, response.content) - - def test_repeated_courses(self, mock): - mock.return_value = self.course_clusters - self.subtest_repeated_courses() - - def test_repeated_courses_from_schedules(self, mock): - mock.return_value = self.course_clusters_with_schedules - self.subtest_repeated_courses() - - def subtest_overlapping_courses(self): - response = self.client.post( - reverse("recommend-courses"), - json.dumps( - { - "curr_courses": ["AFRC-437", "GRMN-180", "CIS-262"], - "past_courses": ["ARTH-775", "CIS-262"], - } - ), - content_type="application/json", - ) - self.assertEqual(response.status_code, 400, response.content) - - def test_overlapping_courses(self, mock): - mock.return_value = self.course_clusters - self.subtest_overlapping_courses() - - def test_overlapping_courses_from_schedules(self, mock): - mock.return_value = self.course_clusters_with_schedules - self.subtest_overlapping_courses() - - def test_generate_course_vectors_dict_one_class_one_user_no_desc(self, mock): - expected = ( - {"CIS-120": np.array([0.4472136, 0.89442719])}, - {"CIS-120": np.array([0.4472136, 0.89442719])}, - ) - actual = generate_course_vectors_dict([(0, "CIS-120", "2020A")], False) - # self.assertEqual does not work with np arrays - self.assertTrue( - actual is not None and isinstance(actual[0], dict) and "CIS-120" in actual[0] - ) - self.assertTrue(np.linalg.norm(actual[0]["CIS-120"] - expected[0]["CIS-120"]) < 1e-8) - self.assertTrue(np.linalg.norm(actual[1]["CIS-120"] - expected[1]["CIS-120"]) < 1e-8) - - def test_group_courses_course_multiple_times_one_semester(self, mock): - actual = group_courses([(0, "CIS-120", "2020A"), (0, "CIS-120", "2020A")]) - expected = {0: {"2020A": {"CIS-120": 2}}} - self.assertEqual(expected, actual) - - def subtest_recommend_courses_command_user(self): - call_command("recommendcourses", username="hash1", stdout=os.devnull) - - @patch("plan.management.commands.recommendcourses.retrieve_course_clusters") - def test_recommend_courses_command_user(self, mock1, mock2): - mock1.return_value = self.course_clusters - mock2.return_value = self.course_clusters - self.subtest_recommend_courses_command_user() - - @patch("plan.management.commands.recommendcourses.retrieve_course_clusters") - def test_recommend_courses_command_user_from_schedules(self, mock1, mock2): - mock1.return_value = self.course_clusters_with_schedules - mock2.return_value = self.course_clusters_with_schedules - self.subtest_recommend_courses_command_user() - - def subtest_recommend_courses_command_lists(self): - call_command( - "recommendcourses", - curr_courses="AFRC-437,GRMN-180,CIS-262", - past_courses="ARTH-775,EDUC-715", - stdout=os.devnull, - ) - - @patch("plan.management.commands.recommendcourses.retrieve_course_clusters") - def test_recommend_courses_command_lists(self, mock1, mock2): - mock1.return_value = self.course_clusters - mock2.return_value = self.course_clusters - self.subtest_recommend_courses_command_lists() - - @patch("plan.management.commands.recommendcourses.retrieve_course_clusters") - def test_recommend_courses_command_lists_from_schedules(self, mock1, mock2): - mock1.return_value = self.course_clusters_with_schedules - mock2.return_value = self.course_clusters_with_schedules - self.subtest_recommend_courses_command_lists() - - def test_retrieve_course_clusters_dev(self, mock): - with patch.dict( - "plan.management.commands.recommendcourses.os.environ", - {"DJANGO_SETTINGS_MODULE": "PennCourses.settings.development"}, - ): - clusters = retrieve_course_clusters() - mock.return_value = clusters - response = self.client.post( - reverse("recommend-courses"), - json.dumps( - { - "curr_courses": ["AFRC-437", "GRMN-180", "CIS-262"], - "past_courses": ["ARTH-775", "EDUC-715"], - } - ), - content_type="application/json", - ) - self.assertEqual(response.status_code, 200, response.content) - self.check_response_data(response.data) - self.assertEqual(len(response.data), 5) +import csv +import json +import os +from unittest.mock import patch + +import numpy as np +from django.conf import settings +from django.contrib.auth import get_user_model +from django.contrib.auth.hashers import make_password +from django.core.management import call_command +from django.db.models.signals import post_save +from django.test import TestCase +from django.urls import reverse +from options.models import Option +from rest_framework.renderers import JSONRenderer +from rest_framework.test import APIClient + +from alert.models import AddDropPeriod +from courses.models import Course, Department, Section +from courses.util import invalidate_current_semester_cache +from plan.management.commands.recommendcourses import retrieve_course_clusters +from plan.management.commands.trainrecommender import ( + generate_course_vectors_dict, + group_courses, + train_recommender, +) +from plan.models import Schedule + + +TEST_SEMESTER = "2021C" +assert TEST_SEMESTER >= "2021C", "Some tests assume TEST_SEMESTER >= 2021C" + + +def set_semester(): + post_save.disconnect( + receiver=invalidate_current_semester_cache, + sender=Option, + dispatch_uid="invalidate_current_semester_cache", + ) + Option(key="SEMESTER", value=TEST_SEMESTER, value_type="TXT").save() + AddDropPeriod(semester=TEST_SEMESTER).save() + + +User = get_user_model() + + +@patch("plan.views.retrieve_course_clusters") +class CourseRecommendationsTestCase(TestCase): + @classmethod + def setUpTestData(cls): + """ + Creates departments, courses, sections and schedules from + `/tests/plan/course_recs_test_data/course_data_test.csv` + and `/tests/plan/course_recs_test_data/course_descriptions_test.csv`. + + The contents of `/tests/plan/course_recs_test_data/course_data_test.csv` are 3 columns: + - a `person_id` (used when creating schedules) + - a course `full_code` column (ie "PSCI-498") + - a semester column (ranging between 2016C and 2020A). + + Courses are created with approximately the following specification: + - `department_id`: Corresponds to the department code embedded in the `full_code` + - `full_code` : corresponds to the course code column in + `/tests/plan/course_recs_test_data/course_data_test.csv` + - `semester` : corresponds to the semester column in + `/tests/plan/course_recs_test_data/course_data_test.csv`. Additionally, if the value of + the semester column in `/tests/plan/course_recs_test_data/course_data_test.csv` + for a course is not "2020A" or "2017A" and the course `full_code` is not "HIST-650" + another course entry is created with `semester` equal to `TEST_SEMESTER` as defined + at the top of this file (2021C at the time of writing.) + - `description` : corresponds to the entry in + `/tests/plan/course_recs_test_data/course_descriptions_test.csv` + + Sections corresponding to each created course are created with approximately this + specification + - `code` : "001" + - `full_code` : the course's `full_code` + "-001" + """ + + course_data_path = ( + settings.BASE_DIR + "/tests/plan/course_recs_test_data/course_data_test.csv" + ) + + # Setting up test courses in the db + test_descriptions = dict() + with open( + settings.BASE_DIR + "/tests/plan/course_recs_test_data/course_descriptions_test.csv" + ) as course_desc_file: + desc_reader = csv.reader(course_desc_file) + for course, description in desc_reader: + test_descriptions[course] = description + courses = set() + with open(course_data_path) as course_data_file: + course_data_reader = csv.reader(course_data_file) + for _, course_code, semester in course_data_reader: + courses.add((course_code, semester)) + + departments_to_save = [] + department_obs = dict() + courses_to_save = [] + course_obs = dict() + sections_to_save = [] + + def create_dont_save(course_code, semester, iter_num): + dept_code = course_code.split("-")[0] + if iter_num == 0: + if dept_code not in department_obs: + dept = Department(code=dept_code, name=dept_code) + department_obs[dept_code] = dept + departments_to_save.append(dept) + elif iter_num == 1: + dept_id = department_obs[dept_code] + course = Course( + code=course_code.split("-")[1], + semester=semester, + full_code=course_code, + description=test_descriptions[course_code], + ) + course.department_id = dept_id + courses_to_save.append(course) + elif iter_num == 2: + course_id = course_obs[course_code, semester] + section = Section( + code="001", + full_code=course_code + "-001", + credits=1, + status="O", + activity="LEC", + ) + section.course_id = course_id + sections_to_save.append(section) + + curr_courses = set() + for i in range(3): + for course_code, semester in courses: + assert semester != TEST_SEMESTER + create_dont_save(course_code, semester, i) + for course_code, semester in courses: + curr_courses.add(course_code) + for course_code, semester in courses: + if semester in ["2017A", "2020A"] or course_code in ["HIST-650"]: + curr_courses.remove(course_code) + for course_code in curr_courses: + create_dont_save(course_code, TEST_SEMESTER, i) + for extra_course_code in ["CIS-121", "CIS-262"]: + create_dont_save(extra_course_code, TEST_SEMESTER, i) + if i == 0: + Department.objects.bulk_create(departments_to_save) + department_obs = dict() + for dept in Department.objects.all(): + department_obs[dept.code] = dept.id + elif i == 1: + for course in courses_to_save: + course.save() + course_obs[course.full_code, course.semester] = course.id + elif i == 2: + Section.objects.bulk_create(sections_to_save) + + section_obs = dict() + for section in Section.objects.all(): + section_obs[section.full_code, section.course.semester] = section.id + cls.section_obs = section_obs + + schedules = dict() + with open(course_data_path) as course_data_file: + course_data_reader = csv.reader(course_data_file) + for person_id, course_code, semester in course_data_reader: + if person_id not in schedules: + schedules[person_id] = dict() + if semester not in schedules[person_id]: + schedules[person_id][semester] = set() + schedules[person_id][semester].add(course_code) + + User.objects.bulk_create( + [ + User( + username=person_id, + email=person_id + "@example.com", + password=make_password(person_id + "_password"), + is_active=True, + ) + for person_id in schedules.keys() + ] + + [ + User( + username=person_id, + email=person_id + "@example.com", + password=make_password(person_id + "_password"), + is_active=True, + ) + for person_id in ["freshman", "gapsem", "noshow", "repeat"] + ] + ) + + user_obs = dict() + for user in User.objects.all(): + user_obs[user.username] = user.id + + # Create past schedules + schedules_list = [] + for username in schedules.keys(): + for semester in schedules[username].keys(): + schedule = Schedule( + semester=semester, + name=username + " main schedule", + ) + schedule.person_id = user_obs[username] + schedules_list.append(schedule) + Schedule.objects.bulk_create(schedules_list) + schedule_obs = dict() + for schedule in Schedule.objects.all(): + schedule_obs[schedule.person_id, schedule.semester] = schedule + for username in schedules.keys(): + for semester in schedules[username].keys(): + schedule = schedule_obs[user_obs[username], semester] + for course_code in schedules[username][semester]: + if course_code in ["AFRC-437", "GRMN-180", "CIS-262"]: + continue + schedule.sections.add(section_obs[course_code + "-001", semester]) + + schedule = Schedule( + person=get_user_model().objects.get(username="hash1"), + semester=TEST_SEMESTER, + name="My Test Schedule", + ) + schedule.save() + for course_code in ["AFRC-437", "GRMN-180", "CIS-262"]: + schedule.sections.add(section_obs[course_code + "-001", TEST_SEMESTER]) + + cls.course_clusters = train_recommender( + course_data_path=course_data_path, output_path=os.devnull + ) + + cls.course_clusters_with_schedules = train_recommender( + course_data_path=None, output_path=os.devnull + ) + + def setUp(self): + self.client = APIClient() + self.client.login(username="hash1", password="hash1_password") + set_semester() + response = self.client.get(reverse("courses-list", args=[TEST_SEMESTER])) + self.assertEqual(response.status_code, 200, response.content) + self.course_objects = dict() + for course_ob in response.data: + self.course_objects[course_ob["id"]] = course_ob + + def subtest_with_user(self): + response = self.client.post(reverse("recommend-courses")) + self.assertEqual(response.status_code, 200, response.content) + self.assertEqual(len(response.data), 5) + + def test_with_user(self, mock): + mock.return_value = self.course_clusters + self.subtest_with_user() + + def test_with_user_from_schedules(self, mock): + mock.return_value = self.course_clusters_with_schedules + self.subtest_with_user() + + def subtest_with_edge_case_users(self): + freshman = User.objects.get(username="freshman") + freshman_client = APIClient() + freshman_client.login(username="freshman", password="freshman_password") + freshman_schedule = Schedule( + person=freshman, + semester=TEST_SEMESTER, + name="Current schedule", + ) + freshman_schedule.save() + for course_code in ["GRMN-502", "GEOL-545", "MUSC-275"]: + freshman_schedule.sections.add(self.section_obs[course_code + "-001", TEST_SEMESTER]) + response = freshman_client.post(reverse("recommend-courses")) + self.assertEqual(response.status_code, 200, response.content) + self.assertEqual(len(response.data), 5) + + gapsem = User.objects.get(username="gapsem") + gapsem_client = APIClient() + gapsem_client.login(username="gapsem", password="gapsem_password") + gapsem_schedule = Schedule( + person=gapsem, + semester="2017A", + name="Previous schedule", + ) + gapsem_schedule.save() + for course_code in ["LGIC-320", "ANTH-395", "NELC-337"]: + gapsem_schedule.sections.add(self.section_obs[course_code + "-001", "2017A"]) + response = gapsem_client.post(reverse("recommend-courses")) + self.assertEqual(response.status_code, 200, response.content) + self.assertEqual(len(response.data), 5) + + noshow = User.objects.get(username="noshow") + noshow_client = APIClient() + noshow_client.login(username="noshow", password="noshow_password") + noshow_schedule = Schedule( + person=noshow, + semester=TEST_SEMESTER, + name="Empty schedule", + ) + noshow_schedule.save() + noshow_previous_schedule = Schedule( + person=noshow, + semester="2017C", + name="Empty previous schedule", + ) + noshow_previous_schedule.save() + response = noshow_client.post(reverse("recommend-courses")) + self.assertEqual(response.status_code, 200, response.content) + self.assertEqual(len(response.data), 5) + + repeat = User.objects.get(username="repeat") + repeat_client = APIClient() + repeat_client.login(username="repeat", password="repeat_password") + repeat_schedule_old = Schedule( + person=repeat, + semester="2016C", + name="Old schedule", + ) + repeat_schedule_old.save() + for course_code in ["MUSC-275"]: + repeat_schedule_old.sections.add(self.section_obs[course_code + "-001", "2016C"]) + repeat_schedule = Schedule( + person=repeat, + semester=TEST_SEMESTER, + name="New schedule", + ) + repeat_schedule.save() + for course_code in ["GRMN-502", "GEOL-545", "MUSC-275"]: + repeat_schedule.sections.add(self.section_obs[course_code + "-001", TEST_SEMESTER]) + response = repeat_client.post(reverse("recommend-courses")) + self.assertEqual(response.status_code, 200, response.content) + self.assertEqual(len(response.data), 5) + + def test_with_edge_case_users(self, mock): + mock.return_value = self.course_clusters + self.subtest_with_edge_case_users() + + def test_with_edge_case_users_from_schedules(self, mock): + mock.return_value = self.course_clusters_with_schedules + self.subtest_with_edge_case_users() + + def subtest_bad_data_courses(self): + response = self.client.post( + reverse("recommend-courses"), + json.dumps({"curr_courses": ["CIS1233"]}), + content_type="application/json", + ) + self.assertEqual(response.status_code, 400, response.content) + + def test_bad_data_courses(self, mock): + mock.return_value = self.course_clusters + self.subtest_bad_data_courses() + + def test_bad_data_courses_from_schedules(self, mock): + mock.return_value = self.course_clusters_with_schedules + self.subtest_bad_data_courses() + + def subtest_bad_data_past(self): + response = self.client.post( + reverse("recommend-courses"), + json.dumps({"past_courses": ["CIS1233"]}), + content_type="application/json", + ) + self.assertEqual(response.status_code, 400, response.content) + + def test_bad_data_past(self, mock): + mock.return_value = self.course_clusters + self.subtest_bad_data_past() + + def test_bad_data_past_from_schedules(self, mock): + mock.return_value = self.course_clusters_with_schedules + self.subtest_bad_data_past() + + def subtest_bad_data_past_current(self): + response = self.client.post( + reverse("recommend-courses"), + json.dumps({"past_courses": ["CIS1233"], "curr_courses": ["CIS123123"]}), + content_type="application/json", + ) + self.assertEqual(response.status_code, 400, response.content) + + def test_bad_data_past_current(self, mock): + mock.return_value = self.course_clusters + self.subtest_bad_data_past_current() + + def test_bad_data_past_current_from_schedules(self, mock): + mock.return_value = self.course_clusters_with_schedules + self.subtest_bad_data_past_current() + + def check_response_data(self, data): + for course_ob in data: + should_be = self.course_objects[course_ob["id"]] + should_be_str = ( + JSONRenderer().render(should_be, renderer_context={"indent": 4}).decode("UTF-8") + ) + course_ob_str = ( + JSONRenderer().render(course_ob, renderer_context={"indent": 4}).decode("UTF-8") + ) + error_msg = "\n\nresponse=" + course_ob_str + "\n\nshould be=" + should_be_str + "\n\n" + self.assertEqual(should_be, course_ob, error_msg) + + def subtest_only_past_courses(self): + response = self.client.post( + reverse("recommend-courses"), + json.dumps({"past_courses": ["BEPP-263", "GRMN-180"]}), + content_type="application/json", + ) + self.assertEqual(response.status_code, 200, response.content) + self.check_response_data(response.data) + self.assertEqual(len(response.data), 5) + + def test_only_past_courses(self, mock): + mock.return_value = self.course_clusters + self.subtest_only_past_courses() + + def test_only_past_courses_from_schedules(self, mock): + mock.return_value = self.course_clusters_with_schedules + self.subtest_only_past_courses() + + def subtest_only_current(self): + response = self.client.post( + reverse("recommend-courses"), + json.dumps({"curr_courses": ["AFRC-437", "GRMN-180"]}), + content_type="application/json", + ) + self.assertEqual(response.status_code, 200, response.content) + self.check_response_data(response.data) + self.assertEqual(len(response.data), 5) + + def test_only_current(self, mock): + mock.return_value = self.course_clusters + self.subtest_only_current() + + def test_only_current_from_schedules(self, mock): + mock.return_value = self.course_clusters_with_schedules + self.subtest_only_current() + + def subtest_past_and_current(self): + response = self.client.post( + reverse("recommend-courses"), + json.dumps( + { + "curr_courses": ["AFRC-437", "GRMN-180", "CIS-262"], + "past_courses": ["ARTH-775", "EDUC-715"], + } + ), + content_type="application/json", + ) + self.assertEqual(response.status_code, 200, response.content) + self.check_response_data(response.data) + self.assertEqual(len(response.data), 5) + + def test_past_and_current(self, mock): + mock.return_value = self.course_clusters + self.subtest_past_and_current() + + def test_past_and_current_from_schedules(self, mock): + mock.return_value = self.course_clusters_with_schedules + self.subtest_past_and_current() + + def subtest_custom_num_recommendations(self): + response = self.client.post( + reverse("recommend-courses"), + json.dumps( + { + "curr_courses": ["AFRC-437", "GRMN-180", "CIS-121"], + "past_courses": ["ARTH-775", "EDUC-715"], + "n_recommendations": 20, + } + ), + content_type="application/json", + ) + self.assertEqual(response.status_code, 200, response.content) + self.check_response_data(response.data) + self.assertEqual(len(response.data), 20) + + def test_custom_num_recommendations(self, mock): + mock.return_value = self.course_clusters + self.subtest_custom_num_recommendations() + + def test_custom_num_recommendations_from_schedules(self, mock): + mock.return_value = self.course_clusters_with_schedules + self.subtest_custom_num_recommendations() + + def subtest_invalid_num_recommendations(self): + response = self.client.post( + reverse("recommend-courses"), + json.dumps( + { + "curr_courses": ["AFRC-437", "GRMN-180", "CIS-121"], + "past_courses": ["ARTH-775", "EDUC-715"], + "n_recommendations": 0, + } + ), + content_type="application/json", + ) + self.assertEqual(response.status_code, 400, response.content) + response = self.client.post( + reverse("recommend-courses"), + json.dumps( + { + "curr_courses": ["AFRC-437", "GRMN-180", "CIS-121"], + "past_courses": ["ARTH-775", "EDUC-715"], + "n_recommendations": -1, + } + ), + content_type="application/json", + ) + self.assertEqual(response.status_code, 400, response.content) + response = self.client.post( + reverse("recommend-courses"), + json.dumps( + { + "curr_courses": ["AFRC-437", "GRMN-180", "CIS-121"], + "past_courses": ["ARTH-775", "EDUC-715"], + "n_recommendations": "test", + } + ), + content_type="application/json", + ) + self.assertEqual(response.status_code, 400, response.content) + + def test_invalid_num_recommendations(self, mock): + mock.return_value = self.course_clusters + self.subtest_invalid_num_recommendations() + + def test_invalid_num_recommendations_from_schedules(self, mock): + mock.return_value = self.course_clusters_with_schedules + self.subtest_invalid_num_recommendations() + + def subtest_non_current_course_in_curr_courses(self): + response = self.client.post( + reverse("recommend-courses"), + json.dumps( + { + "curr_courses": ["AFRC-437", "GRMN-180", "HIST-650"], + "past_courses": ["ARTH-775", "CIS-262"], + } + ), + content_type="application/json", + ) + self.assertEqual(response.status_code, 400, response.content) + + def test_non_current_course_in_curr_courses(self, mock): + mock.return_value = self.course_clusters + self.subtest_non_current_course_in_curr_courses() + + def test_non_current_course_in_curr_courses_from_schedules(self, mock): + mock.return_value = self.course_clusters_with_schedules + self.subtest_non_current_course_in_curr_courses() + + def subtest_repeated_courses(self): + response = self.client.post( + reverse("recommend-courses"), + json.dumps( + { + "curr_courses": ["AFRC-437", "GRMN-180", "AFRC-437"], + "past_courses": ["ARTH-775", "CIS-262"], + } + ), + content_type="application/json", + ) + self.assertEqual(response.status_code, 400, response.content) + response = self.client.post( + reverse("recommend-courses"), + json.dumps( + { + "curr_courses": ["AFRC-437", "GRMN-180"], + "past_courses": ["ARTH-775", "CIS-262", "CIS-262"], + } + ), + content_type="application/json", + ) + self.assertEqual(response.status_code, 400, response.content) + + def test_repeated_courses(self, mock): + mock.return_value = self.course_clusters + self.subtest_repeated_courses() + + def test_repeated_courses_from_schedules(self, mock): + mock.return_value = self.course_clusters_with_schedules + self.subtest_repeated_courses() + + def subtest_overlapping_courses(self): + response = self.client.post( + reverse("recommend-courses"), + json.dumps( + { + "curr_courses": ["AFRC-437", "GRMN-180", "CIS-262"], + "past_courses": ["ARTH-775", "CIS-262"], + } + ), + content_type="application/json", + ) + self.assertEqual(response.status_code, 400, response.content) + + def test_overlapping_courses(self, mock): + mock.return_value = self.course_clusters + self.subtest_overlapping_courses() + + def test_overlapping_courses_from_schedules(self, mock): + mock.return_value = self.course_clusters_with_schedules + self.subtest_overlapping_courses() + + def test_generate_course_vectors_dict_one_class_one_user_no_desc(self, mock): + expected = ( + {"CIS-120": np.array([0.4472136, 0.89442719])}, + {"CIS-120": np.array([0.4472136, 0.89442719])}, + ) + actual = generate_course_vectors_dict([(0, "CIS-120", "2020A")], False) + # self.assertEqual does not work with np arrays + self.assertTrue( + actual is not None and isinstance(actual[0], dict) and "CIS-120" in actual[0] + ) + self.assertTrue(np.linalg.norm(actual[0]["CIS-120"] - expected[0]["CIS-120"]) < 1e-8) + self.assertTrue(np.linalg.norm(actual[1]["CIS-120"] - expected[1]["CIS-120"]) < 1e-8) + + def test_group_courses_course_multiple_times_one_semester(self, mock): + actual = group_courses([(0, "CIS-120", "2020A"), (0, "CIS-120", "2020A")]) + expected = {0: {"2020A": {"CIS-120": 2}}} + self.assertEqual(expected, actual) + + def subtest_recommend_courses_command_user(self): + call_command("recommendcourses", username="hash1", stdout=os.devnull) + + @patch("plan.management.commands.recommendcourses.retrieve_course_clusters") + def test_recommend_courses_command_user(self, mock1, mock2): + mock1.return_value = self.course_clusters + mock2.return_value = self.course_clusters + self.subtest_recommend_courses_command_user() + + @patch("plan.management.commands.recommendcourses.retrieve_course_clusters") + def test_recommend_courses_command_user_from_schedules(self, mock1, mock2): + mock1.return_value = self.course_clusters_with_schedules + mock2.return_value = self.course_clusters_with_schedules + self.subtest_recommend_courses_command_user() + + def subtest_recommend_courses_command_lists(self): + call_command( + "recommendcourses", + curr_courses="AFRC-437,GRMN-180,CIS-262", + past_courses="ARTH-775,EDUC-715", + stdout=os.devnull, + ) + + @patch("plan.management.commands.recommendcourses.retrieve_course_clusters") + def test_recommend_courses_command_lists(self, mock1, mock2): + mock1.return_value = self.course_clusters + mock2.return_value = self.course_clusters + self.subtest_recommend_courses_command_lists() + + @patch("plan.management.commands.recommendcourses.retrieve_course_clusters") + def test_recommend_courses_command_lists_from_schedules(self, mock1, mock2): + mock1.return_value = self.course_clusters_with_schedules + mock2.return_value = self.course_clusters_with_schedules + self.subtest_recommend_courses_command_lists() + + def test_retrieve_course_clusters_dev(self, mock): + with patch.dict( + "plan.management.commands.recommendcourses.os.environ", + {"DJANGO_SETTINGS_MODULE": "PennCourses.settings.development"}, + ): + clusters = retrieve_course_clusters() + mock.return_value = clusters + response = self.client.post( + reverse("recommend-courses"), + json.dumps( + { + "curr_courses": ["AFRC-437", "GRMN-180", "CIS-262"], + "past_courses": ["ARTH-775", "EDUC-715"], + } + ), + content_type="application/json", + ) + self.assertEqual(response.status_code, 200, response.content) + self.check_response_data(response.data) + self.assertEqual(len(response.data), 5) diff --git a/backend/tests/plan/test_schedule.py b/backend/tests/plan/test_schedule.py index 5d816fe4a..2d3d624dd 100644 --- a/backend/tests/plan/test_schedule.py +++ b/backend/tests/plan/test_schedule.py @@ -1,686 +1,686 @@ -import json - -from django.contrib.auth import get_user_model -from django.db.models.signals import post_save -from django.test import TestCase -from options.models import Option -from rest_framework.test import APIClient - -from alert.models import AddDropPeriod -from courses.util import get_average_reviews, invalidate_current_semester_cache -from plan.models import Schedule -from tests.courses.util import create_mock_data_with_reviews - - -User = get_user_model() - -TEST_SEMESTER = "2019C" - - -def set_semester(): - post_save.disconnect( - receiver=invalidate_current_semester_cache, - sender=Option, - dispatch_uid="invalidate_current_semester_cache", - ) - Option(key="SEMESTER", value=TEST_SEMESTER, value_type="TXT").save() - AddDropPeriod(semester=TEST_SEMESTER).save() - - -class ScheduleTest(TestCase): - def setUp(self): - set_semester() - _, self.cis120, self.cis120_reviews = create_mock_data_with_reviews( - "CIS-120-001", TEST_SEMESTER, 2 - ) - self.s = Schedule( - person=User.objects.create_user( - username="jacob", email="jacob@example.com", password="top_secret" - ), - semester=TEST_SEMESTER, - name="My Test Schedule", - ) - self.s.save() - self.s.sections.set([self.cis120]) - self.client = APIClient() - self.client.login(username="jacob", password="top_secret") - - def check_serialized_section(self, serialized_section, section, reviews, consider_review_data): - self.assertEqual(section.full_code, serialized_section.get("id")) - self.assertEqual(section.status, serialized_section.get("status")) - self.assertEqual(section.activity, serialized_section.get("activity")) - self.assertEqual(section.credits, serialized_section.get("credits")) - self.assertEqual(section.semester, serialized_section.get("semester")) - - if consider_review_data: - fields = ["course_quality", "instructor_quality", "difficulty", "work_required"] - for field in fields: - expected = get_average_reviews(reviews, field) - actual = serialized_section.get(field) - self.assertAlmostEqual(expected, actual, 3) - - def test_semester_not_set(self): - Option.objects.filter(key="SEMESTER").delete() - response = self.client.get("/api/plan/schedules/") - self.assertEqual(500, response.status_code) - self.assertTrue("SEMESTER" in response.data["detail"]) - - def test_get_schedule(self): - response = self.client.get("/api/plan/schedules/") - self.assertEqual(200, response.status_code) - self.assertEqual(1, len(response.data)) - self.assertEqual(response.data[0]["name"], "My Test Schedule") - self.assertEqual(response.data[0]["semester"], TEST_SEMESTER) - self.assertEqual(len(response.data[0]["sections"]), 1) - self.check_serialized_section( - response.data[0]["sections"][0], self.cis120, self.cis120_reviews, True - ) - - def test_create_schedule(self): - _, cis121, cis121_reviews = create_mock_data_with_reviews("CIS-121-001", TEST_SEMESTER, 2) - _, cis160, cis160_reviews = create_mock_data_with_reviews("CIS-160-001", TEST_SEMESTER, 2) - response = self.client.post( - "/api/plan/schedules/", - json.dumps( - { - "semester": TEST_SEMESTER, - "name": "New Test Schedule", - "sections": [ - {"id": "CIS-121-001", "semester": TEST_SEMESTER}, - {"id": "CIS-160-001", "semester": TEST_SEMESTER}, - ], - } - ), - content_type="application/json", - ) - self.assertEqual(response.status_code, 201) - response = self.client.get("/api/plan/schedules/") - self.assertEqual(200, response.status_code) - self.assertEqual(2, len(response.data)) - self.assertEquals(sum([d["name"] == "New Test Schedule" for d in response.data]), 1) - for d in response.data: - if d["name"] == "New Test Schedule": - sched = d - break - self.assertEqual(sched["semester"], TEST_SEMESTER) - self.assertEqual(len(sched["sections"]), 2) - self.assertEquals(1, sum([s["id"] == "CIS-121-001" for s in sched["sections"]])) - self.assertEquals(1, sum([s["id"] == "CIS-160-001" for s in sched["sections"]])) - for s in sched["sections"]: - if s["id"] == "CIS-121-001": - section_cis121 = s - if s["id"] == "CIS-160-001": - section_cis160 = s - self.check_serialized_section(section_cis121, cis121, cis121_reviews, True) - self.check_serialized_section(section_cis160, cis160, cis160_reviews, True) - - def test_create_schedule_no_semester(self): - _, cis121, cis121_reviews = create_mock_data_with_reviews("CIS-121-001", TEST_SEMESTER, 2) - _, cis160, cis160_reviews = create_mock_data_with_reviews("CIS-160-001", TEST_SEMESTER, 2) - response = self.client.post( - "/api/plan/schedules/", - json.dumps( - { - "name": "New Test Schedule", - "sections": [ - {"id": "CIS-121-001", "semester": TEST_SEMESTER}, - {"id": "CIS-160-001", "semester": TEST_SEMESTER}, - ], - } - ), - content_type="application/json", - ) - self.assertEqual(201, response.status_code) - response = self.client.get("/api/plan/schedules/") - self.assertEqual(200, response.status_code) - self.assertEqual(2, len(response.data)) - self.assertEquals(sum([d["name"] == "New Test Schedule" for d in response.data]), 1) - for d in response.data: - if d["name"] == "New Test Schedule": - sched = d - break - self.assertEqual(sched["semester"], TEST_SEMESTER) - self.assertEqual(len(sched["sections"]), 2) - self.assertEquals(1, sum([s["id"] == "CIS-121-001" for s in sched["sections"]])) - self.assertEquals(1, sum([s["id"] == "CIS-160-001" for s in sched["sections"]])) - for s in sched["sections"]: - if s["id"] == "CIS-121-001": - section_cis121 = s - if s["id"] == "CIS-160-001": - section_cis160 = s - self.check_serialized_section(section_cis121, cis121, cis121_reviews, True) - self.check_serialized_section(section_cis160, cis160, cis160_reviews, True) - response = self.client.get("/api/plan/schedules/" + str(self.s.id + 1) + "/") - self.assertEqual(200, response.status_code) - self.assertEqual(response.data["name"], "New Test Schedule") - self.assertEqual(response.data["semester"], TEST_SEMESTER) - self.check_serialized_section(response.data["sections"][0], cis121, cis121_reviews, True) - self.check_serialized_section(response.data["sections"][1], cis160, cis160_reviews, True) - - def test_update_schedule_no_semester(self): - _, cis121, cis121_reviews = create_mock_data_with_reviews("CIS-121-001", TEST_SEMESTER, 2) - _, cis160, cis160_reviews = create_mock_data_with_reviews("CIS-160-001", TEST_SEMESTER, 2) - response = self.client.put( - "/api/plan/schedules/" + str(self.s.id) + "/", - json.dumps( - { - "name": "New Test Schedule", - "sections": [ - {"id": "CIS-121-001", "semester": TEST_SEMESTER}, - {"id": "CIS-160-001", "semester": TEST_SEMESTER}, - ], - } - ), - content_type="application/json", - ) - self.assertEqual(response.status_code, 200) - response = self.client.get("/api/plan/schedules/") - self.assertEqual(200, response.status_code) - self.assertEqual(1, len(response.data)) - self.assertEqual(response.data[0]["name"], "New Test Schedule") - self.assertEqual(response.data[0]["semester"], TEST_SEMESTER) - self.assertEqual(len(response.data[0]["sections"]), 2) - self.assertEquals(1, sum([s["id"] == "CIS-121-001" for s in response.data[0]["sections"]])) - self.assertEquals(1, sum([s["id"] == "CIS-160-001" for s in response.data[0]["sections"]])) - for s in response.data[0]["sections"]: - if s["id"] == "CIS-121-001": - section_cis121 = s - if s["id"] == "CIS-160-001": - section_cis160 = s - self.check_serialized_section(section_cis121, cis121, cis121_reviews, True) - self.check_serialized_section(section_cis160, cis160, cis160_reviews, True) - response = self.client.get("/api/plan/schedules/" + str(self.s.id) + "/") - self.assertEqual(200, response.status_code) - self.assertEqual(response.data["name"], "New Test Schedule") - self.assertEqual(response.data["semester"], TEST_SEMESTER) - self.assertEquals(1, sum([s["id"] == "CIS-121-001" for s in response.data["sections"]])) - self.assertEquals(1, sum([s["id"] == "CIS-160-001" for s in response.data["sections"]])) - for s in response.data["sections"]: - if s["id"] == "CIS-121-001": - section_cis121 = s - if s["id"] == "CIS-160-001": - section_cis160 = s - self.check_serialized_section(section_cis121, cis121, cis121_reviews, True) - self.check_serialized_section(section_cis160, cis160, cis160_reviews, True) - - def test_create_schedule_meetings(self): - _, cis121, cis121_reviews = create_mock_data_with_reviews("CIS-121-001", TEST_SEMESTER, 2) - _, cis160, cis160_reviews = create_mock_data_with_reviews("CIS-160-001", TEST_SEMESTER, 2) - response = self.client.post( - "/api/plan/schedules/", - json.dumps( - { - "semester": TEST_SEMESTER, - "name": "New Test Schedule", - "meetings": [ - {"id": "CIS-121-001", "semester": TEST_SEMESTER}, - {"id": "CIS-160-001", "semester": TEST_SEMESTER}, - ], - } - ), - content_type="application/json", - ) - self.assertEqual(response.status_code, 201) - response = self.client.get("/api/plan/schedules/") - self.assertEqual(200, response.status_code) - self.assertEqual(2, len(response.data)) - self.assertEquals(sum([d["name"] == "New Test Schedule" for d in response.data]), 1) - for d in response.data: - if d["name"] == "New Test Schedule": - sched = d - break - self.assertEqual(sched["semester"], TEST_SEMESTER) - self.assertEqual(len(sched["sections"]), 2) - self.assertEquals(1, sum([s["id"] == "CIS-121-001" for s in sched["sections"]])) - self.assertEquals(1, sum([s["id"] == "CIS-160-001" for s in sched["sections"]])) - for s in sched["sections"]: - if s["id"] == "CIS-121-001": - section_cis121 = s - if s["id"] == "CIS-160-001": - section_cis160 = s - self.check_serialized_section(section_cis121, cis121, cis121_reviews, True) - self.check_serialized_section(section_cis160, cis160, cis160_reviews, True) - - def test_update_schedule_specific(self): - _, cis121, cis121_reviews = create_mock_data_with_reviews("CIS-121-001", TEST_SEMESTER, 2) - _, cis160, cis160_reviews = create_mock_data_with_reviews("CIS-160-001", TEST_SEMESTER, 2) - response = self.client.put( - "/api/plan/schedules/" + str(self.s.id) + "/", - json.dumps( - { - "semester": TEST_SEMESTER, - "name": "New Test Schedule", - "sections": [ - {"id": "CIS-121-001", "semester": TEST_SEMESTER}, - {"id": "CIS-160-001", "semester": TEST_SEMESTER}, - ], - } - ), - content_type="application/json", - ) - self.assertEqual(response.status_code, 200) - response = self.client.get("/api/plan/schedules/") - self.assertEqual(200, response.status_code) - self.assertEqual(1, len(response.data)) - self.assertEquals(sum([d["name"] == "New Test Schedule" for d in response.data]), 1) - for d in response.data: - if d["name"] == "New Test Schedule": - sched = d - break - self.assertEqual(sched["semester"], TEST_SEMESTER) - self.assertEqual(len(sched["sections"]), 2) - self.assertEquals(1, sum([s["id"] == "CIS-121-001" for s in sched["sections"]])) - self.assertEquals(1, sum([s["id"] == "CIS-160-001" for s in sched["sections"]])) - for s in sched["sections"]: - if s["id"] == "CIS-121-001": - section_cis121 = s - if s["id"] == "CIS-160-001": - section_cis160 = s - self.check_serialized_section(section_cis121, cis121, cis121_reviews, True) - self.check_serialized_section(section_cis160, cis160, cis160_reviews, True) - response = self.client.get("/api/plan/schedules/" + str(self.s.id) + "/") - self.assertEqual(200, response.status_code) - self.assertEqual(response.data["name"], "New Test Schedule") - self.assertEqual(response.data["semester"], TEST_SEMESTER) - self.assertEqual(len(response.data["sections"]), 2) - self.assertEquals(1, sum([s["id"] == "CIS-121-001" for s in response.data["sections"]])) - self.assertEquals(1, sum([s["id"] == "CIS-160-001" for s in response.data["sections"]])) - for s in response.data["sections"]: - if s["id"] == "CIS-121-001": - section_cis121 = s - if s["id"] == "CIS-160-001": - section_cis160 = s - self.check_serialized_section(section_cis121, cis121, cis121_reviews, True) - self.check_serialized_section(section_cis160, cis160, cis160_reviews, True) - - def test_update_schedule_specific_meetings(self): - _, cis121, cis121_reviews = create_mock_data_with_reviews("CIS-121-001", TEST_SEMESTER, 2) - _, cis160, cis160_reviews = create_mock_data_with_reviews("CIS-160-001", TEST_SEMESTER, 2) - response = self.client.put( - "/api/plan/schedules/" + str(self.s.id) + "/", - json.dumps( - { - "semester": TEST_SEMESTER, - "name": "New Test Schedule", - "meetings": [ - {"id": "CIS-121-001", "semester": TEST_SEMESTER}, - {"id": "CIS-160-001", "semester": TEST_SEMESTER}, - ], - } - ), - content_type="application/json", - ) - self.assertEqual(response.status_code, 200) - response = self.client.get("/api/plan/schedules/") - self.assertEqual(200, response.status_code) - self.assertEqual(1, len(response.data)) - self.assertEquals(sum([d["name"] == "New Test Schedule" for d in response.data]), 1) - for d in response.data: - if d["name"] == "New Test Schedule": - sched = d - break - self.assertEqual(sched["semester"], TEST_SEMESTER) - self.assertEqual(len(sched["sections"]), 2) - self.assertEquals(1, sum([s["id"] == "CIS-121-001" for s in sched["sections"]])) - self.assertEquals(1, sum([s["id"] == "CIS-160-001" for s in sched["sections"]])) - for s in sched["sections"]: - if s["id"] == "CIS-121-001": - section_cis121 = s - if s["id"] == "CIS-160-001": - section_cis160 = s - self.check_serialized_section(section_cis121, cis121, cis121_reviews, True) - self.check_serialized_section(section_cis160, cis160, cis160_reviews, True) - response = self.client.get("/api/plan/schedules/" + str(self.s.id) + "/") - self.assertEqual(200, response.status_code) - self.assertEqual(response.data["name"], "New Test Schedule") - self.assertEqual(response.data["semester"], TEST_SEMESTER) - self.assertEqual(len(response.data["sections"]), 2) - self.assertEquals(1, sum([s["id"] == "CIS-121-001" for s in response.data["sections"]])) - self.assertEquals(1, sum([s["id"] == "CIS-160-001" for s in response.data["sections"]])) - for s in response.data["sections"]: - if s["id"] == "CIS-121-001": - section_cis121 = s - if s["id"] == "CIS-160-001": - section_cis160 = s - self.check_serialized_section(section_cis121, cis121, cis121_reviews, True) - self.check_serialized_section(section_cis160, cis160, cis160_reviews, True) - - def test_update_schedule_specific_same_name(self): - _, cis121, cis121_reviews = create_mock_data_with_reviews("CIS-121-001", TEST_SEMESTER, 2) - _, cis160, cis160_reviews = create_mock_data_with_reviews("CIS-160-001", TEST_SEMESTER, 2) - response = self.client.put( - "/api/plan/schedules/" + str(self.s.id) + "/", - json.dumps( - { - "semester": TEST_SEMESTER, - "name": "My Test Schedule", - "sections": [ - {"id": "CIS-121-001", "semester": TEST_SEMESTER}, - {"id": "CIS-160-001", "semester": TEST_SEMESTER}, - ], - } - ), - content_type="application/json", - ) - self.assertEqual(response.status_code, 200) - response = self.client.get("/api/plan/schedules/") - self.assertEqual(200, response.status_code) - self.assertEqual(1, len(response.data)) - self.assertEqual(response.data[0]["name"], "My Test Schedule") - self.assertEqual(response.data[0]["semester"], TEST_SEMESTER) - self.assertEqual(len(response.data[0]["sections"]), 2) - self.assertEquals(1, sum([s["id"] == "CIS-121-001" for s in response.data[0]["sections"]])) - self.assertEquals(1, sum([s["id"] == "CIS-160-001" for s in response.data[0]["sections"]])) - for s in response.data[0]["sections"]: - if s["id"] == "CIS-121-001": - section_cis121 = s - if s["id"] == "CIS-160-001": - section_cis160 = s - self.check_serialized_section(section_cis121, cis121, cis121_reviews, True) - self.check_serialized_section(section_cis160, cis160, cis160_reviews, True) - response = self.client.get("/api/plan/schedules/" + str(self.s.id) + "/") - self.assertEqual(200, response.status_code) - self.assertEqual(response.data["name"], "My Test Schedule") - self.assertEqual(response.data["semester"], TEST_SEMESTER) - self.assertEqual(len(response.data["sections"]), 2) - self.assertEquals(1, sum([s["id"] == "CIS-121-001" for s in response.data["sections"]])) - self.assertEquals(1, sum([s["id"] == "CIS-160-001" for s in response.data["sections"]])) - for s in response.data["sections"]: - if s["id"] == "CIS-121-001": - section_cis121 = s - if s["id"] == "CIS-160-001": - section_cis160 = s - self.check_serialized_section(section_cis121, cis121, cis121_reviews, True) - self.check_serialized_section(section_cis160, cis160, cis160_reviews, True) - - def test_update_schedule_general(self): - _, cis121, cis121_reviews = create_mock_data_with_reviews("CIS-121-001", TEST_SEMESTER, 2) - _, cis160, cis160_reviews = create_mock_data_with_reviews("CIS-160-001", TEST_SEMESTER, 2) - response = self.client.post( - "/api/plan/schedules/", - json.dumps( - { - "id": str(self.s.id), - "semester": TEST_SEMESTER, - "name": "New Test Schedule", - "sections": [ - {"id": "CIS-121-001", "semester": TEST_SEMESTER}, - {"id": "CIS-160-001", "semester": TEST_SEMESTER}, - ], - } - ), - content_type="application/json", - ) - self.assertEqual(response.status_code, 200) - response = self.client.get("/api/plan/schedules/") - self.assertEqual(200, response.status_code) - self.assertEqual(1, len(response.data)) - self.assertEqual(response.data[0]["name"], "New Test Schedule") - self.assertEqual(response.data[0]["semester"], TEST_SEMESTER) - self.assertEqual(len(response.data[0]["sections"]), 2) - self.assertEquals(1, sum([s["id"] == "CIS-121-001" for s in response.data[0]["sections"]])) - self.assertEquals(1, sum([s["id"] == "CIS-160-001" for s in response.data[0]["sections"]])) - for s in response.data[0]["sections"]: - if s["id"] == "CIS-121-001": - section_cis121 = s - if s["id"] == "CIS-160-001": - section_cis160 = s - self.check_serialized_section(section_cis121, cis121, cis121_reviews, True) - self.check_serialized_section(section_cis160, cis160, cis160_reviews, True) - - def test_update_schedule_general_same_name(self): - _, cis160, cis160_reviews = create_mock_data_with_reviews("CIS-160-001", TEST_SEMESTER, 2) - response = self.client.post( - "/api/plan/schedules/", - json.dumps( - { - "id": str(self.s.id), - "semester": TEST_SEMESTER, - "name": "My Test Schedule", - "sections": [ - {"id": "CIS-120-001", "semester": TEST_SEMESTER}, - {"id": "CIS-160-001", "semester": TEST_SEMESTER}, - ], - } - ), - content_type="application/json", - ) - self.assertEqual(response.status_code, 200) - response = self.client.get("/api/plan/schedules/") - self.assertEqual(200, response.status_code) - self.assertEqual(1, len(response.data)) - self.assertEqual(response.data[0]["name"], "My Test Schedule") - self.assertEqual(response.data[0]["semester"], TEST_SEMESTER) - self.assertEqual(2, len(response.data[0]["sections"])) - self.assertEquals(1, sum([s["id"] == "CIS-120-001" for s in response.data[0]["sections"]])) - self.assertEquals(1, sum([s["id"] == "CIS-160-001" for s in response.data[0]["sections"]])) - for s in response.data[0]["sections"]: - if s["id"] == "CIS-120-001": - section_cis120 = s - if s["id"] == "CIS-160-001": - section_cis160 = s - self.check_serialized_section(section_cis120, self.cis120, self.cis120_reviews, True) - self.check_serialized_section(section_cis160, cis160, cis160_reviews, True) - - def test_delete(self): - response = self.client.delete("/api/plan/schedules/" + str(self.s.id) + "/") - self.assertEqual(response.status_code, 204) - response = self.client.get("/api/plan/schedules/") - self.assertEqual(200, response.status_code) - self.assertEqual(0, len(response.data)) - - def test_semesters_not_uniform(self): - create_mock_data_with_reviews("CIS-121-001", "1739C", 2) - create_mock_data_with_reviews("CIS-160-001", TEST_SEMESTER, 2) - response = self.client.post( - "/api/plan/schedules/", - json.dumps( - { - "semester": TEST_SEMESTER, - "name": "New Test Schedule", - "sections": [ - {"id": "CIS-121-001", "semester": "1739C"}, - {"id": "CIS-160-001", "semester": TEST_SEMESTER}, - ], - } - ), - content_type="application/json", - ) - self.assertEqual(response.status_code, 400) - self.assertEqual(response.data["detail"], "Semester uniformity invariant violated.") - - def test_semesters_not_uniform_update(self): - create_mock_data_with_reviews("CIS-121-001", "1739C", 2) - create_mock_data_with_reviews("CIS-160-001", TEST_SEMESTER, 2) - response = self.client.post( - "/api/plan/schedules/", - json.dumps( - { - "id": str(self.s.id), - "semester": TEST_SEMESTER, - "name": "New Test Schedule", - "sections": [ - {"id": "CIS-121-001", "semester": "1739C"}, - {"id": "CIS-160-001", "semester": TEST_SEMESTER}, - ], - } - ), - content_type="application/json", - ) - self.assertEqual(response.status_code, 400) - self.assertEqual(response.data["detail"], "Semester uniformity invariant violated.") - - def test_schedule_dne(self): - response = self.client.get("/api/plan/schedules/1000/") - self.assertEqual(response.status_code, 404) - self.assertEqual(response.data["detail"], "Not found.") - - def test_name_already_exists(self): - create_mock_data_with_reviews("CIS-121-001", TEST_SEMESTER, 2) - create_mock_data_with_reviews("CIS-160-001", TEST_SEMESTER, 2) - response = self.client.post( - "/api/plan/schedules/", - json.dumps( - { - "semester": TEST_SEMESTER, - "name": "My Test Schedule", - "sections": [ - {"id": "CIS-121-001", "semester": TEST_SEMESTER}, - {"id": "CIS-160-001", "semester": TEST_SEMESTER}, - ], - } - ), - content_type="application/json", - ) - self.assertEqual(400, response.status_code) - - def test_section_dne_one(self): - create_mock_data_with_reviews("CIS-160-001", TEST_SEMESTER, 2) - response = self.client.post( - "/api/plan/schedules/", - json.dumps( - { - "semester": TEST_SEMESTER, - "name": "New Test Schedule", - "sections": [ - {"id": "CIS-121-001", "semester": TEST_SEMESTER}, - {"id": "CIS-160-001", "semester": TEST_SEMESTER}, - ], - } - ), - content_type="application/json", - ) - self.assertEqual(response.status_code, 400) - self.assertEqual(response.data["detail"], "One or more sections not found in database.") - - def test_section_dne_all(self): - response = self.client.post( - "/api/plan/schedules/", - json.dumps( - { - "semester": TEST_SEMESTER, - "name": "New Test Schedule", - "sections": [ - {"id": "CIS-121-001", "semester": TEST_SEMESTER}, - {"id": "CIS-160-001", "semester": TEST_SEMESTER}, - ], - } - ), - content_type="application/json", - ) - self.assertEqual(response.status_code, 400) - self.assertEqual(response.data["detail"], "One or more sections not found in database.") - - def test_user_not_logged_in(self): - client2 = APIClient() - response = client2.post( - "/api/plan/schedules/", - json.dumps( - { - "semester": TEST_SEMESTER, - "name": "New Test Schedule", - "sections": [ - {"id": "CIS-121-001", "semester": TEST_SEMESTER}, - {"id": "CIS-160-001", "semester": TEST_SEMESTER}, - ], - } - ), - content_type="application/json", - ) - self.assertEqual(403, response.status_code) - response = client2.get("/api/plan/schedules/") - self.assertEqual(403, response.status_code) - response = client2.put( - "/api/plan/schedules/" + str(self.s.id) + "/", - json.dumps( - { - "semester": TEST_SEMESTER, - "name": "New Test Schedule", - "meetings": [ - {"id": "CIS-121-001", "semester": TEST_SEMESTER}, - {"id": "CIS-160-001", "semester": TEST_SEMESTER}, - ], - } - ), - content_type="application/json", - ) - self.assertEqual(403, response.status_code) - - def test_user_cant_access_other_users_schedule(self): - User.objects.create_user( - username="charley", email="charley@example.com", password="top_secret" - ) - client2 = APIClient() - client2.login(username="charley", password="top_secret") - response = client2.post( - "/api/plan/schedules/", - json.dumps( - { - "id": str(self.s.id), - "semester": TEST_SEMESTER, - "name": "New Test Schedule", - "sections": [ - {"id": "CIS-121-001", "semester": TEST_SEMESTER}, - {"id": "CIS-160-001", "semester": TEST_SEMESTER}, - ], - } - ), - content_type="application/json", - ) - self.assertEqual(403, response.status_code) - response = client2.put( - "/api/plan/schedules/" + str(self.s.id) + "/", - json.dumps( - { - "semester": TEST_SEMESTER, - "name": "New Test Schedule", - "meetings": [ - {"id": "CIS-121-001", "semester": TEST_SEMESTER}, - {"id": "CIS-160-001", "semester": TEST_SEMESTER}, - ], - } - ), - content_type="application/json", - ) - self.assertEqual(403, response.status_code) - response = client2.get("/api/plan/schedules/") - self.assertEqual(200, response.status_code) - self.assertEqual(0, len(response.data)) - - def test_create_schedule_no_semester_no_courses(self): - response = self.client.post( - "/api/plan/schedules/", - json.dumps({"name": "New Test Schedule", "sections": []}), - content_type="application/json", - ) - self.assertEqual(201, response.status_code) - response = self.client.get("/api/plan/schedules/") - self.assertEqual(200, response.status_code) - self.assertEqual(2, len(response.data)) - self.assertEqual(response.data[1]["name"], "New Test Schedule") - self.assertEqual(response.data[1]["semester"], TEST_SEMESTER) - self.assertEqual(len(response.data[1]["sections"]), 0) - response = self.client.get("/api/plan/schedules/" + str(self.s.id + 1) + "/") - self.assertEqual(200, response.status_code) - self.assertEqual(response.data["name"], "New Test Schedule") - self.assertEqual(response.data["semester"], TEST_SEMESTER) - self.assertEqual(len(response.data["sections"]), 0) - - def test_update_schedule_no_semester_no_courses(self): - response = self.client.put( - "/api/plan/schedules/" + str(self.s.id) + "/", - json.dumps({"name": "New Test Schedule", "sections": []}), - content_type="application/json", - ) - self.assertEqual(response.status_code, 200) - response = self.client.get("/api/plan/schedules/") - self.assertEqual(200, response.status_code) - self.assertEqual(1, len(response.data)) - self.assertEqual(response.data[0]["name"], "New Test Schedule") - self.assertEqual(response.data[0]["semester"], TEST_SEMESTER) - self.assertEqual(len(response.data[0]["sections"]), 0) - response = self.client.get("/api/plan/schedules/" + str(self.s.id) + "/") - self.assertEqual(200, response.status_code) - self.assertEqual(response.data["name"], "New Test Schedule") - self.assertEqual(response.data["semester"], TEST_SEMESTER) - self.assertEqual(len(response.data["sections"]), 0) +import json + +from django.contrib.auth import get_user_model +from django.db.models.signals import post_save +from django.test import TestCase +from options.models import Option +from rest_framework.test import APIClient + +from alert.models import AddDropPeriod +from courses.util import get_average_reviews, invalidate_current_semester_cache +from plan.models import Schedule +from tests.courses.util import create_mock_data_with_reviews + + +User = get_user_model() + +TEST_SEMESTER = "2019C" + + +def set_semester(): + post_save.disconnect( + receiver=invalidate_current_semester_cache, + sender=Option, + dispatch_uid="invalidate_current_semester_cache", + ) + Option(key="SEMESTER", value=TEST_SEMESTER, value_type="TXT").save() + AddDropPeriod(semester=TEST_SEMESTER).save() + + +class ScheduleTest(TestCase): + def setUp(self): + set_semester() + _, self.cis120, self.cis120_reviews = create_mock_data_with_reviews( + "CIS-120-001", TEST_SEMESTER, 2 + ) + self.s = Schedule( + person=User.objects.create_user( + username="jacob", email="jacob@example.com", password="top_secret" + ), + semester=TEST_SEMESTER, + name="My Test Schedule", + ) + self.s.save() + self.s.sections.set([self.cis120]) + self.client = APIClient() + self.client.login(username="jacob", password="top_secret") + + def check_serialized_section(self, serialized_section, section, reviews, consider_review_data): + self.assertEqual(section.full_code, serialized_section.get("id")) + self.assertEqual(section.status, serialized_section.get("status")) + self.assertEqual(section.activity, serialized_section.get("activity")) + self.assertEqual(section.credits, serialized_section.get("credits")) + self.assertEqual(section.semester, serialized_section.get("semester")) + + if consider_review_data: + fields = ["course_quality", "instructor_quality", "difficulty", "work_required"] + for field in fields: + expected = get_average_reviews(reviews, field) + actual = serialized_section.get(field) + self.assertAlmostEqual(expected, actual, 3) + + def test_semester_not_set(self): + Option.objects.filter(key="SEMESTER").delete() + response = self.client.get("/api/plan/schedules/") + self.assertEqual(500, response.status_code) + self.assertTrue("SEMESTER" in response.data["detail"]) + + def test_get_schedule(self): + response = self.client.get("/api/plan/schedules/") + self.assertEqual(200, response.status_code) + self.assertEqual(1, len(response.data)) + self.assertEqual(response.data[0]["name"], "My Test Schedule") + self.assertEqual(response.data[0]["semester"], TEST_SEMESTER) + self.assertEqual(len(response.data[0]["sections"]), 1) + self.check_serialized_section( + response.data[0]["sections"][0], self.cis120, self.cis120_reviews, True + ) + + def test_create_schedule(self): + _, cis121, cis121_reviews = create_mock_data_with_reviews("CIS-121-001", TEST_SEMESTER, 2) + _, cis160, cis160_reviews = create_mock_data_with_reviews("CIS-160-001", TEST_SEMESTER, 2) + response = self.client.post( + "/api/plan/schedules/", + json.dumps( + { + "semester": TEST_SEMESTER, + "name": "New Test Schedule", + "sections": [ + {"id": "CIS-121-001", "semester": TEST_SEMESTER}, + {"id": "CIS-160-001", "semester": TEST_SEMESTER}, + ], + } + ), + content_type="application/json", + ) + self.assertEqual(response.status_code, 201) + response = self.client.get("/api/plan/schedules/") + self.assertEqual(200, response.status_code) + self.assertEqual(2, len(response.data)) + self.assertEquals(sum([d["name"] == "New Test Schedule" for d in response.data]), 1) + for d in response.data: + if d["name"] == "New Test Schedule": + sched = d + break + self.assertEqual(sched["semester"], TEST_SEMESTER) + self.assertEqual(len(sched["sections"]), 2) + self.assertEquals(1, sum([s["id"] == "CIS-121-001" for s in sched["sections"]])) + self.assertEquals(1, sum([s["id"] == "CIS-160-001" for s in sched["sections"]])) + for s in sched["sections"]: + if s["id"] == "CIS-121-001": + section_cis121 = s + if s["id"] == "CIS-160-001": + section_cis160 = s + self.check_serialized_section(section_cis121, cis121, cis121_reviews, True) + self.check_serialized_section(section_cis160, cis160, cis160_reviews, True) + + def test_create_schedule_no_semester(self): + _, cis121, cis121_reviews = create_mock_data_with_reviews("CIS-121-001", TEST_SEMESTER, 2) + _, cis160, cis160_reviews = create_mock_data_with_reviews("CIS-160-001", TEST_SEMESTER, 2) + response = self.client.post( + "/api/plan/schedules/", + json.dumps( + { + "name": "New Test Schedule", + "sections": [ + {"id": "CIS-121-001", "semester": TEST_SEMESTER}, + {"id": "CIS-160-001", "semester": TEST_SEMESTER}, + ], + } + ), + content_type="application/json", + ) + self.assertEqual(201, response.status_code) + response = self.client.get("/api/plan/schedules/") + self.assertEqual(200, response.status_code) + self.assertEqual(2, len(response.data)) + self.assertEquals(sum([d["name"] == "New Test Schedule" for d in response.data]), 1) + for d in response.data: + if d["name"] == "New Test Schedule": + sched = d + break + self.assertEqual(sched["semester"], TEST_SEMESTER) + self.assertEqual(len(sched["sections"]), 2) + self.assertEquals(1, sum([s["id"] == "CIS-121-001" for s in sched["sections"]])) + self.assertEquals(1, sum([s["id"] == "CIS-160-001" for s in sched["sections"]])) + for s in sched["sections"]: + if s["id"] == "CIS-121-001": + section_cis121 = s + if s["id"] == "CIS-160-001": + section_cis160 = s + self.check_serialized_section(section_cis121, cis121, cis121_reviews, True) + self.check_serialized_section(section_cis160, cis160, cis160_reviews, True) + response = self.client.get("/api/plan/schedules/" + str(self.s.id + 1) + "/") + self.assertEqual(200, response.status_code) + self.assertEqual(response.data["name"], "New Test Schedule") + self.assertEqual(response.data["semester"], TEST_SEMESTER) + self.check_serialized_section(response.data["sections"][0], cis121, cis121_reviews, True) + self.check_serialized_section(response.data["sections"][1], cis160, cis160_reviews, True) + + def test_update_schedule_no_semester(self): + _, cis121, cis121_reviews = create_mock_data_with_reviews("CIS-121-001", TEST_SEMESTER, 2) + _, cis160, cis160_reviews = create_mock_data_with_reviews("CIS-160-001", TEST_SEMESTER, 2) + response = self.client.put( + "/api/plan/schedules/" + str(self.s.id) + "/", + json.dumps( + { + "name": "New Test Schedule", + "sections": [ + {"id": "CIS-121-001", "semester": TEST_SEMESTER}, + {"id": "CIS-160-001", "semester": TEST_SEMESTER}, + ], + } + ), + content_type="application/json", + ) + self.assertEqual(response.status_code, 200) + response = self.client.get("/api/plan/schedules/") + self.assertEqual(200, response.status_code) + self.assertEqual(1, len(response.data)) + self.assertEqual(response.data[0]["name"], "New Test Schedule") + self.assertEqual(response.data[0]["semester"], TEST_SEMESTER) + self.assertEqual(len(response.data[0]["sections"]), 2) + self.assertEquals(1, sum([s["id"] == "CIS-121-001" for s in response.data[0]["sections"]])) + self.assertEquals(1, sum([s["id"] == "CIS-160-001" for s in response.data[0]["sections"]])) + for s in response.data[0]["sections"]: + if s["id"] == "CIS-121-001": + section_cis121 = s + if s["id"] == "CIS-160-001": + section_cis160 = s + self.check_serialized_section(section_cis121, cis121, cis121_reviews, True) + self.check_serialized_section(section_cis160, cis160, cis160_reviews, True) + response = self.client.get("/api/plan/schedules/" + str(self.s.id) + "/") + self.assertEqual(200, response.status_code) + self.assertEqual(response.data["name"], "New Test Schedule") + self.assertEqual(response.data["semester"], TEST_SEMESTER) + self.assertEquals(1, sum([s["id"] == "CIS-121-001" for s in response.data["sections"]])) + self.assertEquals(1, sum([s["id"] == "CIS-160-001" for s in response.data["sections"]])) + for s in response.data["sections"]: + if s["id"] == "CIS-121-001": + section_cis121 = s + if s["id"] == "CIS-160-001": + section_cis160 = s + self.check_serialized_section(section_cis121, cis121, cis121_reviews, True) + self.check_serialized_section(section_cis160, cis160, cis160_reviews, True) + + def test_create_schedule_meetings(self): + _, cis121, cis121_reviews = create_mock_data_with_reviews("CIS-121-001", TEST_SEMESTER, 2) + _, cis160, cis160_reviews = create_mock_data_with_reviews("CIS-160-001", TEST_SEMESTER, 2) + response = self.client.post( + "/api/plan/schedules/", + json.dumps( + { + "semester": TEST_SEMESTER, + "name": "New Test Schedule", + "meetings": [ + {"id": "CIS-121-001", "semester": TEST_SEMESTER}, + {"id": "CIS-160-001", "semester": TEST_SEMESTER}, + ], + } + ), + content_type="application/json", + ) + self.assertEqual(response.status_code, 201) + response = self.client.get("/api/plan/schedules/") + self.assertEqual(200, response.status_code) + self.assertEqual(2, len(response.data)) + self.assertEquals(sum([d["name"] == "New Test Schedule" for d in response.data]), 1) + for d in response.data: + if d["name"] == "New Test Schedule": + sched = d + break + self.assertEqual(sched["semester"], TEST_SEMESTER) + self.assertEqual(len(sched["sections"]), 2) + self.assertEquals(1, sum([s["id"] == "CIS-121-001" for s in sched["sections"]])) + self.assertEquals(1, sum([s["id"] == "CIS-160-001" for s in sched["sections"]])) + for s in sched["sections"]: + if s["id"] == "CIS-121-001": + section_cis121 = s + if s["id"] == "CIS-160-001": + section_cis160 = s + self.check_serialized_section(section_cis121, cis121, cis121_reviews, True) + self.check_serialized_section(section_cis160, cis160, cis160_reviews, True) + + def test_update_schedule_specific(self): + _, cis121, cis121_reviews = create_mock_data_with_reviews("CIS-121-001", TEST_SEMESTER, 2) + _, cis160, cis160_reviews = create_mock_data_with_reviews("CIS-160-001", TEST_SEMESTER, 2) + response = self.client.put( + "/api/plan/schedules/" + str(self.s.id) + "/", + json.dumps( + { + "semester": TEST_SEMESTER, + "name": "New Test Schedule", + "sections": [ + {"id": "CIS-121-001", "semester": TEST_SEMESTER}, + {"id": "CIS-160-001", "semester": TEST_SEMESTER}, + ], + } + ), + content_type="application/json", + ) + self.assertEqual(response.status_code, 200) + response = self.client.get("/api/plan/schedules/") + self.assertEqual(200, response.status_code) + self.assertEqual(1, len(response.data)) + self.assertEquals(sum([d["name"] == "New Test Schedule" for d in response.data]), 1) + for d in response.data: + if d["name"] == "New Test Schedule": + sched = d + break + self.assertEqual(sched["semester"], TEST_SEMESTER) + self.assertEqual(len(sched["sections"]), 2) + self.assertEquals(1, sum([s["id"] == "CIS-121-001" for s in sched["sections"]])) + self.assertEquals(1, sum([s["id"] == "CIS-160-001" for s in sched["sections"]])) + for s in sched["sections"]: + if s["id"] == "CIS-121-001": + section_cis121 = s + if s["id"] == "CIS-160-001": + section_cis160 = s + self.check_serialized_section(section_cis121, cis121, cis121_reviews, True) + self.check_serialized_section(section_cis160, cis160, cis160_reviews, True) + response = self.client.get("/api/plan/schedules/" + str(self.s.id) + "/") + self.assertEqual(200, response.status_code) + self.assertEqual(response.data["name"], "New Test Schedule") + self.assertEqual(response.data["semester"], TEST_SEMESTER) + self.assertEqual(len(response.data["sections"]), 2) + self.assertEquals(1, sum([s["id"] == "CIS-121-001" for s in response.data["sections"]])) + self.assertEquals(1, sum([s["id"] == "CIS-160-001" for s in response.data["sections"]])) + for s in response.data["sections"]: + if s["id"] == "CIS-121-001": + section_cis121 = s + if s["id"] == "CIS-160-001": + section_cis160 = s + self.check_serialized_section(section_cis121, cis121, cis121_reviews, True) + self.check_serialized_section(section_cis160, cis160, cis160_reviews, True) + + def test_update_schedule_specific_meetings(self): + _, cis121, cis121_reviews = create_mock_data_with_reviews("CIS-121-001", TEST_SEMESTER, 2) + _, cis160, cis160_reviews = create_mock_data_with_reviews("CIS-160-001", TEST_SEMESTER, 2) + response = self.client.put( + "/api/plan/schedules/" + str(self.s.id) + "/", + json.dumps( + { + "semester": TEST_SEMESTER, + "name": "New Test Schedule", + "meetings": [ + {"id": "CIS-121-001", "semester": TEST_SEMESTER}, + {"id": "CIS-160-001", "semester": TEST_SEMESTER}, + ], + } + ), + content_type="application/json", + ) + self.assertEqual(response.status_code, 200) + response = self.client.get("/api/plan/schedules/") + self.assertEqual(200, response.status_code) + self.assertEqual(1, len(response.data)) + self.assertEquals(sum([d["name"] == "New Test Schedule" for d in response.data]), 1) + for d in response.data: + if d["name"] == "New Test Schedule": + sched = d + break + self.assertEqual(sched["semester"], TEST_SEMESTER) + self.assertEqual(len(sched["sections"]), 2) + self.assertEquals(1, sum([s["id"] == "CIS-121-001" for s in sched["sections"]])) + self.assertEquals(1, sum([s["id"] == "CIS-160-001" for s in sched["sections"]])) + for s in sched["sections"]: + if s["id"] == "CIS-121-001": + section_cis121 = s + if s["id"] == "CIS-160-001": + section_cis160 = s + self.check_serialized_section(section_cis121, cis121, cis121_reviews, True) + self.check_serialized_section(section_cis160, cis160, cis160_reviews, True) + response = self.client.get("/api/plan/schedules/" + str(self.s.id) + "/") + self.assertEqual(200, response.status_code) + self.assertEqual(response.data["name"], "New Test Schedule") + self.assertEqual(response.data["semester"], TEST_SEMESTER) + self.assertEqual(len(response.data["sections"]), 2) + self.assertEquals(1, sum([s["id"] == "CIS-121-001" for s in response.data["sections"]])) + self.assertEquals(1, sum([s["id"] == "CIS-160-001" for s in response.data["sections"]])) + for s in response.data["sections"]: + if s["id"] == "CIS-121-001": + section_cis121 = s + if s["id"] == "CIS-160-001": + section_cis160 = s + self.check_serialized_section(section_cis121, cis121, cis121_reviews, True) + self.check_serialized_section(section_cis160, cis160, cis160_reviews, True) + + def test_update_schedule_specific_same_name(self): + _, cis121, cis121_reviews = create_mock_data_with_reviews("CIS-121-001", TEST_SEMESTER, 2) + _, cis160, cis160_reviews = create_mock_data_with_reviews("CIS-160-001", TEST_SEMESTER, 2) + response = self.client.put( + "/api/plan/schedules/" + str(self.s.id) + "/", + json.dumps( + { + "semester": TEST_SEMESTER, + "name": "My Test Schedule", + "sections": [ + {"id": "CIS-121-001", "semester": TEST_SEMESTER}, + {"id": "CIS-160-001", "semester": TEST_SEMESTER}, + ], + } + ), + content_type="application/json", + ) + self.assertEqual(response.status_code, 200) + response = self.client.get("/api/plan/schedules/") + self.assertEqual(200, response.status_code) + self.assertEqual(1, len(response.data)) + self.assertEqual(response.data[0]["name"], "My Test Schedule") + self.assertEqual(response.data[0]["semester"], TEST_SEMESTER) + self.assertEqual(len(response.data[0]["sections"]), 2) + self.assertEquals(1, sum([s["id"] == "CIS-121-001" for s in response.data[0]["sections"]])) + self.assertEquals(1, sum([s["id"] == "CIS-160-001" for s in response.data[0]["sections"]])) + for s in response.data[0]["sections"]: + if s["id"] == "CIS-121-001": + section_cis121 = s + if s["id"] == "CIS-160-001": + section_cis160 = s + self.check_serialized_section(section_cis121, cis121, cis121_reviews, True) + self.check_serialized_section(section_cis160, cis160, cis160_reviews, True) + response = self.client.get("/api/plan/schedules/" + str(self.s.id) + "/") + self.assertEqual(200, response.status_code) + self.assertEqual(response.data["name"], "My Test Schedule") + self.assertEqual(response.data["semester"], TEST_SEMESTER) + self.assertEqual(len(response.data["sections"]), 2) + self.assertEquals(1, sum([s["id"] == "CIS-121-001" for s in response.data["sections"]])) + self.assertEquals(1, sum([s["id"] == "CIS-160-001" for s in response.data["sections"]])) + for s in response.data["sections"]: + if s["id"] == "CIS-121-001": + section_cis121 = s + if s["id"] == "CIS-160-001": + section_cis160 = s + self.check_serialized_section(section_cis121, cis121, cis121_reviews, True) + self.check_serialized_section(section_cis160, cis160, cis160_reviews, True) + + def test_update_schedule_general(self): + _, cis121, cis121_reviews = create_mock_data_with_reviews("CIS-121-001", TEST_SEMESTER, 2) + _, cis160, cis160_reviews = create_mock_data_with_reviews("CIS-160-001", TEST_SEMESTER, 2) + response = self.client.post( + "/api/plan/schedules/", + json.dumps( + { + "id": str(self.s.id), + "semester": TEST_SEMESTER, + "name": "New Test Schedule", + "sections": [ + {"id": "CIS-121-001", "semester": TEST_SEMESTER}, + {"id": "CIS-160-001", "semester": TEST_SEMESTER}, + ], + } + ), + content_type="application/json", + ) + self.assertEqual(response.status_code, 200) + response = self.client.get("/api/plan/schedules/") + self.assertEqual(200, response.status_code) + self.assertEqual(1, len(response.data)) + self.assertEqual(response.data[0]["name"], "New Test Schedule") + self.assertEqual(response.data[0]["semester"], TEST_SEMESTER) + self.assertEqual(len(response.data[0]["sections"]), 2) + self.assertEquals(1, sum([s["id"] == "CIS-121-001" for s in response.data[0]["sections"]])) + self.assertEquals(1, sum([s["id"] == "CIS-160-001" for s in response.data[0]["sections"]])) + for s in response.data[0]["sections"]: + if s["id"] == "CIS-121-001": + section_cis121 = s + if s["id"] == "CIS-160-001": + section_cis160 = s + self.check_serialized_section(section_cis121, cis121, cis121_reviews, True) + self.check_serialized_section(section_cis160, cis160, cis160_reviews, True) + + def test_update_schedule_general_same_name(self): + _, cis160, cis160_reviews = create_mock_data_with_reviews("CIS-160-001", TEST_SEMESTER, 2) + response = self.client.post( + "/api/plan/schedules/", + json.dumps( + { + "id": str(self.s.id), + "semester": TEST_SEMESTER, + "name": "My Test Schedule", + "sections": [ + {"id": "CIS-120-001", "semester": TEST_SEMESTER}, + {"id": "CIS-160-001", "semester": TEST_SEMESTER}, + ], + } + ), + content_type="application/json", + ) + self.assertEqual(response.status_code, 200) + response = self.client.get("/api/plan/schedules/") + self.assertEqual(200, response.status_code) + self.assertEqual(1, len(response.data)) + self.assertEqual(response.data[0]["name"], "My Test Schedule") + self.assertEqual(response.data[0]["semester"], TEST_SEMESTER) + self.assertEqual(2, len(response.data[0]["sections"])) + self.assertEquals(1, sum([s["id"] == "CIS-120-001" for s in response.data[0]["sections"]])) + self.assertEquals(1, sum([s["id"] == "CIS-160-001" for s in response.data[0]["sections"]])) + for s in response.data[0]["sections"]: + if s["id"] == "CIS-120-001": + section_cis120 = s + if s["id"] == "CIS-160-001": + section_cis160 = s + self.check_serialized_section(section_cis120, self.cis120, self.cis120_reviews, True) + self.check_serialized_section(section_cis160, cis160, cis160_reviews, True) + + def test_delete(self): + response = self.client.delete("/api/plan/schedules/" + str(self.s.id) + "/") + self.assertEqual(response.status_code, 204) + response = self.client.get("/api/plan/schedules/") + self.assertEqual(200, response.status_code) + self.assertEqual(0, len(response.data)) + + def test_semesters_not_uniform(self): + create_mock_data_with_reviews("CIS-121-001", "1739C", 2) + create_mock_data_with_reviews("CIS-160-001", TEST_SEMESTER, 2) + response = self.client.post( + "/api/plan/schedules/", + json.dumps( + { + "semester": TEST_SEMESTER, + "name": "New Test Schedule", + "sections": [ + {"id": "CIS-121-001", "semester": "1739C"}, + {"id": "CIS-160-001", "semester": TEST_SEMESTER}, + ], + } + ), + content_type="application/json", + ) + self.assertEqual(response.status_code, 400) + self.assertEqual(response.data["detail"], "Semester uniformity invariant violated.") + + def test_semesters_not_uniform_update(self): + create_mock_data_with_reviews("CIS-121-001", "1739C", 2) + create_mock_data_with_reviews("CIS-160-001", TEST_SEMESTER, 2) + response = self.client.post( + "/api/plan/schedules/", + json.dumps( + { + "id": str(self.s.id), + "semester": TEST_SEMESTER, + "name": "New Test Schedule", + "sections": [ + {"id": "CIS-121-001", "semester": "1739C"}, + {"id": "CIS-160-001", "semester": TEST_SEMESTER}, + ], + } + ), + content_type="application/json", + ) + self.assertEqual(response.status_code, 400) + self.assertEqual(response.data["detail"], "Semester uniformity invariant violated.") + + def test_schedule_dne(self): + response = self.client.get("/api/plan/schedules/1000/") + self.assertEqual(response.status_code, 404) + self.assertEqual(response.data["detail"], "Not found.") + + def test_name_already_exists(self): + create_mock_data_with_reviews("CIS-121-001", TEST_SEMESTER, 2) + create_mock_data_with_reviews("CIS-160-001", TEST_SEMESTER, 2) + response = self.client.post( + "/api/plan/schedules/", + json.dumps( + { + "semester": TEST_SEMESTER, + "name": "My Test Schedule", + "sections": [ + {"id": "CIS-121-001", "semester": TEST_SEMESTER}, + {"id": "CIS-160-001", "semester": TEST_SEMESTER}, + ], + } + ), + content_type="application/json", + ) + self.assertEqual(400, response.status_code) + + def test_section_dne_one(self): + create_mock_data_with_reviews("CIS-160-001", TEST_SEMESTER, 2) + response = self.client.post( + "/api/plan/schedules/", + json.dumps( + { + "semester": TEST_SEMESTER, + "name": "New Test Schedule", + "sections": [ + {"id": "CIS-121-001", "semester": TEST_SEMESTER}, + {"id": "CIS-160-001", "semester": TEST_SEMESTER}, + ], + } + ), + content_type="application/json", + ) + self.assertEqual(response.status_code, 400) + self.assertEqual(response.data["detail"], "One or more sections not found in database.") + + def test_section_dne_all(self): + response = self.client.post( + "/api/plan/schedules/", + json.dumps( + { + "semester": TEST_SEMESTER, + "name": "New Test Schedule", + "sections": [ + {"id": "CIS-121-001", "semester": TEST_SEMESTER}, + {"id": "CIS-160-001", "semester": TEST_SEMESTER}, + ], + } + ), + content_type="application/json", + ) + self.assertEqual(response.status_code, 400) + self.assertEqual(response.data["detail"], "One or more sections not found in database.") + + def test_user_not_logged_in(self): + client2 = APIClient() + response = client2.post( + "/api/plan/schedules/", + json.dumps( + { + "semester": TEST_SEMESTER, + "name": "New Test Schedule", + "sections": [ + {"id": "CIS-121-001", "semester": TEST_SEMESTER}, + {"id": "CIS-160-001", "semester": TEST_SEMESTER}, + ], + } + ), + content_type="application/json", + ) + self.assertEqual(403, response.status_code) + response = client2.get("/api/plan/schedules/") + self.assertEqual(403, response.status_code) + response = client2.put( + "/api/plan/schedules/" + str(self.s.id) + "/", + json.dumps( + { + "semester": TEST_SEMESTER, + "name": "New Test Schedule", + "meetings": [ + {"id": "CIS-121-001", "semester": TEST_SEMESTER}, + {"id": "CIS-160-001", "semester": TEST_SEMESTER}, + ], + } + ), + content_type="application/json", + ) + self.assertEqual(403, response.status_code) + + def test_user_cant_access_other_users_schedule(self): + User.objects.create_user( + username="charley", email="charley@example.com", password="top_secret" + ) + client2 = APIClient() + client2.login(username="charley", password="top_secret") + response = client2.post( + "/api/plan/schedules/", + json.dumps( + { + "id": str(self.s.id), + "semester": TEST_SEMESTER, + "name": "New Test Schedule", + "sections": [ + {"id": "CIS-121-001", "semester": TEST_SEMESTER}, + {"id": "CIS-160-001", "semester": TEST_SEMESTER}, + ], + } + ), + content_type="application/json", + ) + self.assertEqual(403, response.status_code) + response = client2.put( + "/api/plan/schedules/" + str(self.s.id) + "/", + json.dumps( + { + "semester": TEST_SEMESTER, + "name": "New Test Schedule", + "meetings": [ + {"id": "CIS-121-001", "semester": TEST_SEMESTER}, + {"id": "CIS-160-001", "semester": TEST_SEMESTER}, + ], + } + ), + content_type="application/json", + ) + self.assertEqual(403, response.status_code) + response = client2.get("/api/plan/schedules/") + self.assertEqual(200, response.status_code) + self.assertEqual(0, len(response.data)) + + def test_create_schedule_no_semester_no_courses(self): + response = self.client.post( + "/api/plan/schedules/", + json.dumps({"name": "New Test Schedule", "sections": []}), + content_type="application/json", + ) + self.assertEqual(201, response.status_code) + response = self.client.get("/api/plan/schedules/") + self.assertEqual(200, response.status_code) + self.assertEqual(2, len(response.data)) + self.assertEqual(response.data[1]["name"], "New Test Schedule") + self.assertEqual(response.data[1]["semester"], TEST_SEMESTER) + self.assertEqual(len(response.data[1]["sections"]), 0) + response = self.client.get("/api/plan/schedules/" + str(self.s.id + 1) + "/") + self.assertEqual(200, response.status_code) + self.assertEqual(response.data["name"], "New Test Schedule") + self.assertEqual(response.data["semester"], TEST_SEMESTER) + self.assertEqual(len(response.data["sections"]), 0) + + def test_update_schedule_no_semester_no_courses(self): + response = self.client.put( + "/api/plan/schedules/" + str(self.s.id) + "/", + json.dumps({"name": "New Test Schedule", "sections": []}), + content_type="application/json", + ) + self.assertEqual(response.status_code, 200) + response = self.client.get("/api/plan/schedules/") + self.assertEqual(200, response.status_code) + self.assertEqual(1, len(response.data)) + self.assertEqual(response.data[0]["name"], "New Test Schedule") + self.assertEqual(response.data[0]["semester"], TEST_SEMESTER) + self.assertEqual(len(response.data[0]["sections"]), 0) + response = self.client.get("/api/plan/schedules/" + str(self.s.id) + "/") + self.assertEqual(200, response.status_code) + self.assertEqual(response.data["name"], "New Test Schedule") + self.assertEqual(response.data["semester"], TEST_SEMESTER) + self.assertEqual(len(response.data["sections"]), 0) diff --git a/backend/tests/review/test_api.py b/backend/tests/review/test_api.py index aaa211b9b..2ad5ebcd7 100644 --- a/backend/tests/review/test_api.py +++ b/backend/tests/review/test_api.py @@ -1,958 +1,958 @@ -from django.contrib.auth.models import User -from django.db.models.signals import post_save -from django.test import TestCase -from django.urls import reverse -from django.utils.http import urlencode -from options.models import Option -from rest_framework.test import APIClient - -from alert.management.commands.recomputestats import recompute_precomputed_fields -from alert.models import AddDropPeriod -from courses.models import Instructor, PreNGSSRestriction, Section, StatusUpdate -from courses.util import get_or_create_course_and_section, invalidate_current_semester_cache -from review.import_utils.import_to_db import import_review -from review.models import Review -from tests.courses.util import create_mock_data - - -TEST_SEMESTER = "2022C" -assert TEST_SEMESTER > "2012A" - - -def set_semester(): - post_save.disconnect( - receiver=invalidate_current_semester_cache, - sender=Option, - dispatch_uid="invalidate_current_semester_cache", - ) - Option(key="SEMESTER", value=TEST_SEMESTER, value_type="TXT").save() - AddDropPeriod(semester=TEST_SEMESTER).save() - - -def create_review(section_code, semester, instructor_name, bits, responses=100): - _, section, _, _ = get_or_create_course_and_section(section_code, semester) - instructor, _ = Instructor.objects.get_or_create(name=instructor_name) - section.instructors.add(instructor) - import_review(section, instructor, None, responses, None, bits, lambda x, y=None: None) - recompute_precomputed_fields() - - -class PCRTestMixin(object): - """ - This mixin class contains a utility function for quickly writing new tests. - """ - - def assertDictContains(self, entire, subdict, path=list()): - """ - Assert that one dictionary is the subset of another. - """ - if isinstance(entire, dict) and isinstance(subdict, dict): - sublist = subdict.items() - elif isinstance(entire, list) and isinstance(subdict, list): - sublist = enumerate(subdict) - else: - return self.assertEqual(entire, subdict, "/".join(path)) - - for k, v in sublist: - self.assertDictContains(entire[k], subdict[k], path + [str(k)]) - - def assertRequestContains(self, url, args, expected): - """ - Do the equivalent of a "subset" check on the response from an API endpoint. - :param url: `name` of django view - :param args: single or multiple arguments for view. - :param expected: expected values from view. - """ - if not isinstance(args, list): - args = [args] - res = self.client.get(reverse(url, args=args)) - self.assertEqual(200, res.status_code) - self.assertDictContains(res.data, expected) - return res.data - - def assertRequestContainsAppx(self, url, args, expected, query_params={}): - """ - Do the equivalent of a "subset" check on the response from an API endpoint. - :param url: `name` of django view - :param args: single or multiple arguments for view. - :param expected: expected values from view. - :param query_params: query parameters to be included in request, defaults to empty dict. - """ - if not isinstance(args, list): - args = [args] - res = self.client.get(f"{reverse(url, args=args)}?{urlencode(query_params)}") - self.assertEqual(200, res.status_code) - self.assertDictContainsAppx( - res.data, - expected, - extra_error_str="\nresponse:" + str(res.json()) + "\n\nexpected:" + str(expected), - ) - return res.data - - def assertDictAlmostEquals(self, actual, expected, path=None, extra_error_str=""): - """ - Assert that one dictionary almost equals another (allowing small deviations for floats) - """ - path = path if path is not None else [] - if isinstance(actual, dict) and isinstance(expected, dict): - self.assertEquals( - actual.keys(), - expected.keys(), - "Dict path" + "/".join(path) + "\n" + extra_error_str, - ) - for key in actual: - self.assertDictAlmostEquals(actual[key], expected[key], path + [str(key)]) - try: - actual_float = float(actual) - expected_float = float(expected) - self.assertAlmostEquals( - actual_float, - expected_float, - msg="Dict path: " + "/".join(path) + "\n" + extra_error_str, - ) - except (TypeError, ValueError): - self.assertEquals( - actual, expected, "Dict path: " + "/".join(path) + "\n" + extra_error_str - ) - - def assertDictContainsAppx(self, entire, subdict, path=None, extra_error_str=""): - """ - Assert that one dictionary is the subset of another. - """ - path = path if path is not None else [] - if (isinstance(entire, list) and isinstance(subdict, list)) or ( - isinstance(entire, tuple) and isinstance(subdict, tuple) - ): - entire = dict(enumerate(entire)) - subdict = dict(enumerate(subdict)) - elif not (isinstance(entire, dict) and isinstance(subdict, dict)): - return self.assertDictAlmostEquals( - entire, subdict, path, extra_error_str=extra_error_str - ) - for k, v in subdict.items(): - self.assertTrue( - k in entire.keys(), - f"{k} not in keys of {entire}, but should be. " - + "\nDict path: " - + "/".join(path) - + "\n" - + extra_error_str, - ) - self.assertDictContainsAppx(entire[k], subdict[k], path + [str(k)], extra_error_str) - - -""" -Below are some utility functions that make writing out the response.data dictionaries -a bit easier to do. All of the tests use instructor_quality as the reviewbit to test. -these helper functions cut down on a lot of the repeated characters in the responses. -""" - - -def ratings_dict(label, n): - return {label: {"rInstructorQuality": n}} - - -def average(n): - return ratings_dict("average_reviews", n) - - -def recent(n): - return ratings_dict("recent_reviews", n) - - -def rating(n): - return ratings_dict("ratings", n) - - -def average_and_recent(a, r): - return {**average(a), **recent(r)} - - -def no_reviews_avg_recent(num_semesters, recent_semester): - return { - "average_reviews": {"rSemesterCount": num_semesters, "rSemesterCalc": recent_semester}, - "recent_reviews": {"rSemesterCount": 0}, - } - - -class TestHasReview(TestCase): - def test_has_none(self): - _, section, _, _ = get_or_create_course_and_section("CIS-120-001", TEST_SEMESTER) - instructor, _ = Instructor.objects.get_or_create(name="Rajiv Gandhi") - section.instructors.add(instructor) - recompute_precomputed_fields() - self.assertFalse(Section.objects.get(id=section.id).has_reviews) - - def test_has_no_responses(self): - _, section, _, _ = get_or_create_course_and_section("CIS-120-001", TEST_SEMESTER) - instructor, _ = Instructor.objects.get_or_create(name="Rajiv Gandhi") - section.instructors.add(instructor) - import_review( - section, instructor, None, 0, None, {"instructor_quality": 4}, lambda x, y=None: None - ) - recompute_precomputed_fields() - self.assertTrue(Section.objects.get(id=section.id).has_reviews) - - def test_has_one(self): - _, section, _, _ = get_or_create_course_and_section("CIS-120-001", TEST_SEMESTER) - instructor, _ = Instructor.objects.get_or_create(name="Rajiv Gandhi") - section.instructors.add(instructor) - import_review( - section, instructor, None, 10, None, {"instructor_quality": 4}, lambda x, y=None: None - ) - recompute_precomputed_fields() - self.assertTrue(Section.objects.get(id=section.id).has_reviews) - - def test_has_multiple(self): - _, section, _, _ = get_or_create_course_and_section("CIS-120-001", TEST_SEMESTER) - instructor, _ = Instructor.objects.get_or_create(name="Rajiv Gandhi") - section.instructors.add(instructor) - import_review( - section, instructor, None, 10, None, {"instructor_quality": 4}, lambda x, y: None - ) - import_review( - section, instructor, None, 10, None, {"course_quality": 4}, lambda x, y=None: None - ) - recompute_precomputed_fields() - self.assertTrue(Section.objects.get(id=section.id).has_reviews) - - -class OneReviewTestCase(TestCase, PCRTestMixin): - def setUp(self): - set_semester() - self.instructor_name = "Instructor One" - self.client = APIClient() - self.client.force_login(User.objects.create_user(username="test")) - create_review("CIS-120-001", TEST_SEMESTER, self.instructor_name, {"instructor_quality": 4}) - self.instructor_pk = Instructor.objects.get(name=self.instructor_name).pk - create_review( - "CIS-120-002", - "2007C", - self.instructor_name, - {"instructor_quality": 0}, - responses=0, - ) - create_review( - "CIS-120-001", - "2007C", - "No Responses Instructor", - {"instructor_quality": 0}, - responses=0, - ) - Review.objects.all().update(enrollment=100) - self.instructor_nores_pk = Instructor.objects.get(name="No Responses Instructor").pk - - def test_course(self): - res = self.assertRequestContainsAppx( - "course-reviews", - "CIS-120", - { - **average_and_recent(4, 4), - "instructors": { - self.instructor_pk: {**average_and_recent(4, 4)}, - self.instructor_nores_pk: {}, - }, - }, - ) - self.assertEqual(len(res["instructors"]), 2) - - def test_instructor(self): - self.assertRequestContainsAppx( - "instructor-reviews", - self.instructor_pk, - {**average_and_recent(4, 4), "courses": {"CIS-120": {**average_and_recent(4, 4)}}}, - ) - self.assertRequestContainsAppx( - "instructor-reviews", - self.instructor_nores_pk, - { - "courses": {"CIS-120": {**no_reviews_avg_recent(1, "2007C")}}, - }, - ) - - def test_department(self): - self.assertRequestContainsAppx( - "department-reviews", "CIS", {"courses": {"CIS-120": average_and_recent(4, 4)}} - ) - - def test_history(self): - self.assertRequestContainsAppx( - "course-history", - ["CIS-120", self.instructor_pk], - {"sections": [rating(4)]}, - ) - self.assertRequestContainsAppx( - "course-history", - ["CIS-120", self.instructor_nores_pk], - { - "sections": [ - { - "course_code": "CIS-120", - "semester": "2007C", - "forms_returned": 0, - "forms_produced": 100, - }, - ] - }, - ) - - def test_autocomplete(self): - self.assertRequestContainsAppx( - "review-autocomplete", - [], - { - "instructors": [ - { - "title": self.instructor_name, - "desc": "CIS", - "url": f"/instructor/{self.instructor_pk}", - }, - { - "title": "No Responses Instructor", - "desc": "CIS", - "url": f"/instructor/{self.instructor_nores_pk}", - }, - ], - "courses": [ - { - "title": "CIS-120", - "desc": [""], - "url": "/course/CIS-120", - } - ], - "departments": [{"title": "CIS", "desc": "", "url": "/department/CIS"}], - }, - ) - - -class TwoSemestersOneInstructorTestCase(TestCase, PCRTestMixin): - def setUp(self): - set_semester() - AddDropPeriod(semester="2012A").save() - self.instructor_name = "Instructor One" - self.client = APIClient() - self.client.force_login(User.objects.create_user(username="test")) - create_review("CIS-120-001", TEST_SEMESTER, self.instructor_name, {"instructor_quality": 4}) - create_review("CIS-120-001", "2012A", self.instructor_name, {"instructor_quality": 2}) - create_review( - "CIS-120-002", - "2007C", - self.instructor_name, - {"instructor_quality": 0}, - responses=0, - ) - create_review( - "CIS-120-001", - "2007C", - "No Responses Instructor", - {"instructor_quality": 0}, - responses=0, - ) - - def test_course(self): - self.assertRequestContainsAppx( - "course-reviews", - "CIS-120", - { - "num_semesters": 3, - **average_and_recent(3, 4), - "instructors": { - Instructor.objects.get(name=self.instructor_name).pk: { - **average_and_recent(3, 4), - "latest_semester": TEST_SEMESTER, - }, - }, - }, - ) - - def test_instructor(self): - self.assertRequestContainsAppx( - "instructor-reviews", - Instructor.objects.get(name=self.instructor_name).pk, - {**average_and_recent(3, 4), "courses": {"CIS-120": average_and_recent(3, 4)}}, - ) - - def test_department(self): - self.assertRequestContainsAppx( - "department-reviews", "CIS", {"courses": {"CIS-120": average_and_recent(3, 4)}} - ) - - def test_history(self): - self.assertRequestContainsAppx( - "course-history", - ["CIS-120", Instructor.objects.get(name=self.instructor_name).pk], - {"sections": [rating(4), rating(2)]}, - ) - - -class TwoSectionsOneSemesterTestCase(TestCase, PCRTestMixin): - def setUp(self): - set_semester() - self.instructor_name = "Instructor One" - self.client = APIClient() - self.client.force_login(User.objects.create_user(username="test")) - create_review("CIS-120-001", TEST_SEMESTER, self.instructor_name, {"instructor_quality": 4}) - create_review("CIS-120-002", TEST_SEMESTER, self.instructor_name, {"instructor_quality": 2}) - - def test_course(self): - self.assertRequestContainsAppx( - "course-reviews", - "CIS-120", - { - "num_semesters": 1, - **average_and_recent(3, 3), - "instructors": { - Instructor.objects.get(name=self.instructor_name).pk: { - **average_and_recent(3, 3), - "latest_semester": TEST_SEMESTER, - }, - }, - }, - ) - - def test_instructor(self): - self.assertRequestContainsAppx( - "instructor-reviews", - Instructor.objects.get(name=self.instructor_name).pk, - { - **average_and_recent(3, 3), - "num_semesters": 1, - "courses": {"CIS-120": average_and_recent(3, 3)}, - }, - ) - - def test_department(self): - self.assertRequestContainsAppx( - "department-reviews", "CIS", {"courses": {"CIS-120": average_and_recent(3, 3)}} - ) - - def test_history(self): - self.assertRequestContainsAppx( - "course-history", - ["CIS-120", Instructor.objects.get(name=self.instructor_name).pk], - {"sections": [rating(4), rating(2)]}, - ) - - -class SemesterWithFutureCourseTestCase(TestCase, PCRTestMixin): - def setUp(self): - set_semester() - AddDropPeriod(semester="2012A").save() - AddDropPeriod(semester="3008C").save() - self.instructor_name = "Instructor One" - self.client = APIClient() - self.client.force_login(User.objects.create_user(username="test")) - create_review("CIS-120-001", TEST_SEMESTER, self.instructor_name, {"instructor_quality": 4}) - create_review("CIS-120-001", "2012A", self.instructor_name, {"instructor_quality": 2}) - create_review( - "CIS-120-002", - "2007C", - self.instructor_name, - {"instructor_quality": 0}, - responses=0, - ) - create_review( - "CIS-120-001", - "2007C", - "No Responses Instructor", - {"instructor_quality": 0}, - responses=0, - ) - create_review("CIS-160-001", "3008C", self.instructor_name, {"instructor_quality": 2}) - - def test_course(self): - self.assertRequestContainsAppx( - "course-reviews", - "CIS-120", - { - "num_semesters": 3, - **average_and_recent(3, 4), - "instructors": { - Instructor.objects.get(name=self.instructor_name).pk: { - **average_and_recent(3, 4), - "latest_semester": TEST_SEMESTER, - } - }, - }, - ) - - def test_department(self): - self.assertRequestContainsAppx( - "department-reviews", - "CIS", - { - "courses": { - "CIS-120": average_and_recent(3, 4), - "CIS-160": average_and_recent(2, 2), - } - }, - ) - - -class TwoInstructorsOneSectionTestCase(TestCase, PCRTestMixin): - def setUp(self): - set_semester() - self.instructor_name = "Instructor One" - self.client = APIClient() - self.client.force_login(User.objects.create_user(username="test")) - create_review("CIS-120-001", TEST_SEMESTER, self.instructor_name, {"instructor_quality": 4}) - create_review("CIS-120-001", TEST_SEMESTER, "Instructor Two", {"instructor_quality": 2}) - create_review( - "CIS-120-002", - "2007C", - self.instructor_name, - {"instructor_quality": 0}, - responses=0, - ) - create_review( - "CIS-120-001", - "2007C", - "No Responses Instructor", - {"instructor_quality": 0}, - responses=0, - ) - self.instructor1 = Instructor.objects.get(name=self.instructor_name) - self.instructor2 = Instructor.objects.get(name="Instructor Two") - - def test_course(self): - self.assertRequestContainsAppx( - "course-reviews", - "CIS-120", - { - **average_and_recent(3, 3), - "instructors": { - self.instructor1.pk: average_and_recent(4, 4), - self.instructor2.pk: average_and_recent(2, 2), - }, - "num_sections": 3, - "num_sections_recent": 1, - }, - ) - - def test_instructor(self): - self.assertRequestContainsAppx( - "instructor-reviews", - self.instructor1.pk, - {**average_and_recent(4, 4), "courses": {"CIS-120": average_and_recent(4, 4)}}, - ) - - self.assertRequestContainsAppx( - "instructor-reviews", - self.instructor2.pk, - {**average_and_recent(2, 2), "courses": {"CIS-120": average_and_recent(2, 2)}}, - ) - - -class TwoSectionTestCase(TestCase, PCRTestMixin): - def setUp(self): - set_semester() - self.instructor_name = "Instructor One" - self.client = APIClient() - self.client.force_login(User.objects.create_user(username="test")) - create_review("CIS-120-001", TEST_SEMESTER, self.instructor_name, {"instructor_quality": 4}) - create_review("CIS-120-002", TEST_SEMESTER, "Instructor Two", {"instructor_quality": 2}) - create_review( - "CIS-120-002", - "2007C", - self.instructor_name, - {"instructor_quality": 0}, - responses=0, - ) - create_review( - "CIS-120-001", - "2007C", - "No Responses Instructor", - {"instructor_quality": 0}, - responses=0, - ) - self.instructor1 = Instructor.objects.get(name=self.instructor_name) - self.instructor2 = Instructor.objects.get(name="Instructor Two") - - def test_course(self): - self.assertRequestContainsAppx( - "course-reviews", - "CIS-120", - { - **average_and_recent(3, 3), - "instructors": { - self.instructor1.pk: average_and_recent(4, 4), - self.instructor2.pk: average_and_recent(2, 2), - }, - }, - ) - - def test_instructor(self): - self.assertRequestContainsAppx( - "instructor-reviews", - self.instructor1.pk, - {**average_and_recent(4, 4), "courses": {"CIS-120": average_and_recent(4, 4)}}, - ) - - self.assertRequestContainsAppx( - "instructor-reviews", - self.instructor2.pk, - {**average_and_recent(2, 2), "courses": {"CIS-120": average_and_recent(2, 2)}}, - ) - - -class TwoInstructorsMultipleSemestersTestCase(TestCase, PCRTestMixin): - def setUp(self): - set_semester() - AddDropPeriod(semester="2017A").save() - AddDropPeriod(semester="2012A").save() - AddDropPeriod(semester="2012C").save() - self.instructor_name = "Instructor One" - self.client = APIClient() - self.client.force_login(User.objects.create_user(username="test")) - create_review("CIS-120-001", TEST_SEMESTER, self.instructor_name, {"instructor_quality": 4}) - create_review("CIS-120-001", "2017A", "Instructor Two", {"instructor_quality": 2}) - create_review( - "CIS-120-002", - "2007C", - self.instructor_name, - {"instructor_quality": 0}, - responses=0, - ) - create_review( - "CIS-120-001", - "2007C", - "No Responses Instructor", - {"instructor_quality": 0}, - responses=0, - ) - - create_review("CIS-120-900", "2012A", self.instructor_name, {"instructor_quality": 2}) - create_review("CIS-120-003", "2012C", "Instructor Two", {"instructor_quality": 1}) - self.instructor1 = Instructor.objects.get(name=self.instructor_name) - self.instructor2 = Instructor.objects.get(name="Instructor Two") - - def test_course(self): - self.assertRequestContainsAppx( - "course-reviews", - "CIS-120", - { - **average_and_recent(2.25, 4), - "instructors": { - self.instructor1.pk: { - **average_and_recent(3, 4), - "latest_semester": TEST_SEMESTER, - }, - self.instructor2.pk: { - **average_and_recent(1.5, 2), - "latest_semester": "2017A", - }, - }, - }, - ) - - def test_course_with_cotaught_section(self): - create_review("CIS-120-001", TEST_SEMESTER, "Instructor Two", {"instructor_quality": 1}) - self.assertRequestContainsAppx( - "course-reviews", - "CIS-120", - { - **average_and_recent(2, 2.5), - "instructors": { - self.instructor1.pk: { - **average_and_recent(3, 4), - "latest_semester": TEST_SEMESTER, - }, - self.instructor2.pk: { - **average_and_recent(4 / 3, 1), - "latest_semester": TEST_SEMESTER, - }, - }, - "num_sections": 6, - "num_sections_recent": 1, - }, - ) - - -class TwoDepartmentTestCase(TestCase, PCRTestMixin): - def setUp(self): - set_semester() - create_review("CIS-120-001", TEST_SEMESTER, "Instructor One", {"instructor_quality": 4}) - create_review( - "CIS-120-002", - "2007C", - "Instructor One", - {"instructor_quality": 0}, - responses=0, - ) - create_review( - "CIS-120-001", - "2007C", - "No Responses Instructor", - {"instructor_quality": 0}, - responses=0, - ) - create_review("MATH-114-002", TEST_SEMESTER, "Instructor Two", {"instructor_quality": 2}) - create_review("ENM-211-003", TEST_SEMESTER, "Instructor Two", {"instructor_quality": 3}) - self.client = APIClient() - self.client.force_login(User.objects.create_user(username="test")) - self.instructor1 = Instructor.objects.get(name="Instructor One") - self.instructor2 = Instructor.objects.get(name="Instructor Two") - - def test_course(self): - self.assertRequestContainsAppx( - "course-reviews", - "MATH-114", - { - **average_and_recent(2, 2), - "instructors": {self.instructor2.pk: average_and_recent(2, 2)}, - }, - ) - - def test_instructor(self): - self.assertRequestContainsAppx( - "instructor-reviews", - self.instructor2.pk, - { - **average_and_recent(2.5, 2.5), - "courses": { - "MATH-114": average_and_recent(2, 2), - "ENM-211": average_and_recent(3, 3), - }, - }, - ) - - def test_autocomplete(self): - no_responses_instructor = Instructor.objects.get(name="No Responses Instructor") - self.assertRequestContainsAppx( - "review-autocomplete", - [], - { - "instructors": [ - { - "title": "Instructor One", - "desc": "CIS", - "url": f"/instructor/{self.instructor1.pk}", - }, - { - "title": "Instructor Two", - "desc": "ENM,MATH", - "url": f"/instructor/{self.instructor2.pk}", - }, - { - "title": "No Responses Instructor", - "desc": "CIS", - "url": f"/instructor/{no_responses_instructor.pk}", - }, - ], - }, - ) - - -class NoReviewForSectionTestCase(TestCase, PCRTestMixin): - def setUp(self): - set_semester() - create_review("CIS-120-001", TEST_SEMESTER, "Instructor One", {"instructor_quality": 4}) - create_review( - "CIS-120-002", - "2007C", - "Instructor One", - {"instructor_quality": 0}, - responses=0, - ) - create_review( - "CIS-120-001", - "2007C", - "No Responses Instructor", - {"instructor_quality": 0}, - responses=0, - ) - _, recitation, _, _ = get_or_create_course_and_section("CIS-120-201", TEST_SEMESTER) - recitation.activity = "REC" - recitation.instructors.add(Instructor.objects.create(name="Instructor Two")) - recitation.save() - self.client = APIClient() - self.client.force_login(User.objects.create_user(username="test")) - self.instructor1 = Instructor.objects.get(name="Instructor One") - self.instructor2 = Instructor.objects.get(name="Instructor Two") - self.instructor_nores = Instructor.objects.get(name="No Responses Instructor") - - def test_course(self): - res = self.assertRequestContainsAppx( - "course-reviews", - "CIS-120", - { - **average_and_recent(4, 4), - "instructors": { - self.instructor1.pk: average_and_recent(4, 4), - self.instructor_nores.pk: {}, - }, - }, - ) - self.assertEqual(2, len(res["instructors"])) - - -class RegistrationMetricsFlagTestCase(TestCase, PCRTestMixin): - def setUp(self): - set_semester() - create_review("CIS-120-001", "2020A", "Instructor One", {"instructor_quality": 4}) - pdp_restriction = PreNGSSRestriction( - code="PDP", description="Permission required from dept." - ) - pdp_restriction.save() - cis_120_001 = Section.objects.get(full_code="CIS-120-001") - cis_120_001.pre_ngss_restrictions.add(pdp_restriction) - cis_120_001.capacity = 100 - cis_120_001.save() - StatusUpdate( - section=Section.objects.get(full_code="CIS-120-001"), - old_status="", - new_status="O", - alert_sent=False, - request_body="", - ).save() - - create_review("CIS-105-001", "2020A", "Instructor One", {"instructor_quality": 4}) - cis_105_001 = Section.objects.get(full_code="CIS-105-001") - cis_105_001.capacity = 20 - cis_105_001.save() - StatusUpdate( - section=Section.objects.get(full_code="CIS-105-001"), - old_status="", - new_status="O", - alert_sent=False, - request_body="", - ).save() - - create_review("OIDD-101-001", "2020A", "Instructor One", {"instructor_quality": 4}) - - self.client = APIClient() - self.client.force_login(User.objects.create_user(username="test")) - - def test_registration_metrics_pdp(self): - self.assertRequestContainsAppx( - "course-reviews", - "CIS-120", - {"registration_metrics": False}, - ) - - def test_registration_metrics_true(self): - self.assertRequestContainsAppx( - "course-reviews", - "CIS-105", - {"registration_metrics": True}, - ) - - def test_registration_metrics_no_status_updates(self): - self.assertRequestContainsAppx( - "course-reviews", - "OIDD-101", - {"registration_metrics": False}, - ) - - -class NotFoundTestCase(TestCase): - def setUp(self): - self.client = APIClient() - self.client.force_login(User.objects.create_user(username="test")) - - def test_course(self): - self.assertEqual(404, self.client.get(reverse("course-reviews", args=["BLAH"])).status_code) - - def test_instructor(self): - self.assertEqual(404, self.client.get(reverse("instructor-reviews", args=[0])).status_code) - - def test_department(self): - self.assertEqual( - 404, self.client.get(reverse("department-reviews", args=["BLAH"])).status_code - ) - - def test_history(self): - self.assertEqual( - 404, self.client.get(reverse("course-history", args=["BLAH", 123])).status_code - ) - - def test_no_reviews(self): - get_or_create_course_and_section("CIS-120-001", TEST_SEMESTER) - self.assertEqual( - 404, self.client.get(reverse("course-reviews", args=["CIS-120"])).status_code - ) - - -class NoAuthTestCase(TestCase): - def setUp(self): - self.client = APIClient() - - def test_course(self): - self.assertEqual(403, self.client.get(reverse("course-reviews", args=["BLAH"])).status_code) - - def test_instructor(self): - self.assertEqual(403, self.client.get(reverse("instructor-reviews", args=[0])).status_code) - - def test_department(self): - self.assertEqual( - 403, self.client.get(reverse("department-reviews", args=["BLAH"])).status_code - ) - - def test_history(self): - self.assertEqual( - 403, self.client.get(reverse("course-history", args=["BLAH", 0])).status_code - ) - - -class RecitationInstructorTestCase(TestCase, PCRTestMixin): - def setUp(self): - set_semester() - self.instructor_name = "Instructor One" - self.client = APIClient() - self.client.force_login(User.objects.create_user(username="test")) - create_review("CIS-120-001", TEST_SEMESTER, self.instructor_name, {"instructor_quality": 4}) - self.instructor_pk = Instructor.objects.get(name=self.instructor_name).pk - - rec_instructor = Instructor(name="Recitation Instructor") - rec_instructor.save() - self.rec_instructor_pk = rec_instructor.pk - _, rec_section = create_mock_data("CIS-120-201", TEST_SEMESTER) - rec_section.activity = "REC" - rec_section.save() - rec_section.instructors.add(rec_instructor) - - create_review( - "CIS-120-002", - "2007C", - self.instructor_name, - {"instructor_quality": 0}, - responses=0, - ) - Review.objects.all().update(enrollment=100) - - def test_course(self): - res = self.assertRequestContainsAppx( - "course-reviews", - "CIS-120", - { - **average_and_recent(4, 4), - "instructors": { - self.instructor_pk: {**average_and_recent(4, 4)}, - }, - }, - ) - self.assertEqual(len(res["instructors"]), 1) - - def test_autocomplete(self): - res = self.assertRequestContainsAppx( - "review-autocomplete", - [], - { - "instructors": [ - { - "title": self.instructor_name, - "desc": "CIS", - "url": f"/instructor/{self.instructor_pk}", - }, - ], - "courses": [ - { - "title": "CIS-120", - "desc": [""], - "url": "/course/CIS-120", - } - ], - "departments": [{"title": "CIS", "desc": "", "url": "/department/CIS"}], - }, - ) - self.assertEqual(len(res["instructors"]), 1) +from django.contrib.auth.models import User +from django.db.models.signals import post_save +from django.test import TestCase +from django.urls import reverse +from django.utils.http import urlencode +from options.models import Option +from rest_framework.test import APIClient + +from alert.management.commands.recomputestats import recompute_precomputed_fields +from alert.models import AddDropPeriod +from courses.models import Instructor, PreNGSSRestriction, Section, StatusUpdate +from courses.util import get_or_create_course_and_section, invalidate_current_semester_cache +from review.import_utils.import_to_db import import_review +from review.models import Review +from tests.courses.util import create_mock_data + + +TEST_SEMESTER = "2022C" +assert TEST_SEMESTER > "2012A" + + +def set_semester(): + post_save.disconnect( + receiver=invalidate_current_semester_cache, + sender=Option, + dispatch_uid="invalidate_current_semester_cache", + ) + Option(key="SEMESTER", value=TEST_SEMESTER, value_type="TXT").save() + AddDropPeriod(semester=TEST_SEMESTER).save() + + +def create_review(section_code, semester, instructor_name, bits, responses=100): + _, section, _, _ = get_or_create_course_and_section(section_code, semester) + instructor, _ = Instructor.objects.get_or_create(name=instructor_name) + section.instructors.add(instructor) + import_review(section, instructor, None, responses, None, bits, lambda x, y=None: None) + recompute_precomputed_fields() + + +class PCRTestMixin(object): + """ + This mixin class contains a utility function for quickly writing new tests. + """ + + def assertDictContains(self, entire, subdict, path=list()): + """ + Assert that one dictionary is the subset of another. + """ + if isinstance(entire, dict) and isinstance(subdict, dict): + sublist = subdict.items() + elif isinstance(entire, list) and isinstance(subdict, list): + sublist = enumerate(subdict) + else: + return self.assertEqual(entire, subdict, "/".join(path)) + + for k, v in sublist: + self.assertDictContains(entire[k], subdict[k], path + [str(k)]) + + def assertRequestContains(self, url, args, expected): + """ + Do the equivalent of a "subset" check on the response from an API endpoint. + :param url: `name` of django view + :param args: single or multiple arguments for view. + :param expected: expected values from view. + """ + if not isinstance(args, list): + args = [args] + res = self.client.get(reverse(url, args=args)) + self.assertEqual(200, res.status_code) + self.assertDictContains(res.data, expected) + return res.data + + def assertRequestContainsAppx(self, url, args, expected, query_params={}): + """ + Do the equivalent of a "subset" check on the response from an API endpoint. + :param url: `name` of django view + :param args: single or multiple arguments for view. + :param expected: expected values from view. + :param query_params: query parameters to be included in request, defaults to empty dict. + """ + if not isinstance(args, list): + args = [args] + res = self.client.get(f"{reverse(url, args=args)}?{urlencode(query_params)}") + self.assertEqual(200, res.status_code) + self.assertDictContainsAppx( + res.data, + expected, + extra_error_str="\nresponse:" + str(res.json()) + "\n\nexpected:" + str(expected), + ) + return res.data + + def assertDictAlmostEquals(self, actual, expected, path=None, extra_error_str=""): + """ + Assert that one dictionary almost equals another (allowing small deviations for floats) + """ + path = path if path is not None else [] + if isinstance(actual, dict) and isinstance(expected, dict): + self.assertEquals( + actual.keys(), + expected.keys(), + "Dict path" + "/".join(path) + "\n" + extra_error_str, + ) + for key in actual: + self.assertDictAlmostEquals(actual[key], expected[key], path + [str(key)]) + try: + actual_float = float(actual) + expected_float = float(expected) + self.assertAlmostEquals( + actual_float, + expected_float, + msg="Dict path: " + "/".join(path) + "\n" + extra_error_str, + ) + except (TypeError, ValueError): + self.assertEquals( + actual, expected, "Dict path: " + "/".join(path) + "\n" + extra_error_str + ) + + def assertDictContainsAppx(self, entire, subdict, path=None, extra_error_str=""): + """ + Assert that one dictionary is the subset of another. + """ + path = path if path is not None else [] + if (isinstance(entire, list) and isinstance(subdict, list)) or ( + isinstance(entire, tuple) and isinstance(subdict, tuple) + ): + entire = dict(enumerate(entire)) + subdict = dict(enumerate(subdict)) + elif not (isinstance(entire, dict) and isinstance(subdict, dict)): + return self.assertDictAlmostEquals( + entire, subdict, path, extra_error_str=extra_error_str + ) + for k, v in subdict.items(): + self.assertTrue( + k in entire.keys(), + f"{k} not in keys of {entire}, but should be. " + + "\nDict path: " + + "/".join(path) + + "\n" + + extra_error_str, + ) + self.assertDictContainsAppx(entire[k], subdict[k], path + [str(k)], extra_error_str) + + +""" +Below are some utility functions that make writing out the response.data dictionaries +a bit easier to do. All of the tests use instructor_quality as the reviewbit to test. +these helper functions cut down on a lot of the repeated characters in the responses. +""" + + +def ratings_dict(label, n): + return {label: {"rInstructorQuality": n}} + + +def average(n): + return ratings_dict("average_reviews", n) + + +def recent(n): + return ratings_dict("recent_reviews", n) + + +def rating(n): + return ratings_dict("ratings", n) + + +def average_and_recent(a, r): + return {**average(a), **recent(r)} + + +def no_reviews_avg_recent(num_semesters, recent_semester): + return { + "average_reviews": {"rSemesterCount": num_semesters, "rSemesterCalc": recent_semester}, + "recent_reviews": {"rSemesterCount": 0}, + } + + +class TestHasReview(TestCase): + def test_has_none(self): + _, section, _, _ = get_or_create_course_and_section("CIS-120-001", TEST_SEMESTER) + instructor, _ = Instructor.objects.get_or_create(name="Rajiv Gandhi") + section.instructors.add(instructor) + recompute_precomputed_fields() + self.assertFalse(Section.objects.get(id=section.id).has_reviews) + + def test_has_no_responses(self): + _, section, _, _ = get_or_create_course_and_section("CIS-120-001", TEST_SEMESTER) + instructor, _ = Instructor.objects.get_or_create(name="Rajiv Gandhi") + section.instructors.add(instructor) + import_review( + section, instructor, None, 0, None, {"instructor_quality": 4}, lambda x, y=None: None + ) + recompute_precomputed_fields() + self.assertTrue(Section.objects.get(id=section.id).has_reviews) + + def test_has_one(self): + _, section, _, _ = get_or_create_course_and_section("CIS-120-001", TEST_SEMESTER) + instructor, _ = Instructor.objects.get_or_create(name="Rajiv Gandhi") + section.instructors.add(instructor) + import_review( + section, instructor, None, 10, None, {"instructor_quality": 4}, lambda x, y=None: None + ) + recompute_precomputed_fields() + self.assertTrue(Section.objects.get(id=section.id).has_reviews) + + def test_has_multiple(self): + _, section, _, _ = get_or_create_course_and_section("CIS-120-001", TEST_SEMESTER) + instructor, _ = Instructor.objects.get_or_create(name="Rajiv Gandhi") + section.instructors.add(instructor) + import_review( + section, instructor, None, 10, None, {"instructor_quality": 4}, lambda x, y: None + ) + import_review( + section, instructor, None, 10, None, {"course_quality": 4}, lambda x, y=None: None + ) + recompute_precomputed_fields() + self.assertTrue(Section.objects.get(id=section.id).has_reviews) + + +class OneReviewTestCase(TestCase, PCRTestMixin): + def setUp(self): + set_semester() + self.instructor_name = "Instructor One" + self.client = APIClient() + self.client.force_login(User.objects.create_user(username="test")) + create_review("CIS-120-001", TEST_SEMESTER, self.instructor_name, {"instructor_quality": 4}) + self.instructor_pk = Instructor.objects.get(name=self.instructor_name).pk + create_review( + "CIS-120-002", + "2007C", + self.instructor_name, + {"instructor_quality": 0}, + responses=0, + ) + create_review( + "CIS-120-001", + "2007C", + "No Responses Instructor", + {"instructor_quality": 0}, + responses=0, + ) + Review.objects.all().update(enrollment=100) + self.instructor_nores_pk = Instructor.objects.get(name="No Responses Instructor").pk + + def test_course(self): + res = self.assertRequestContainsAppx( + "course-reviews", + "CIS-120", + { + **average_and_recent(4, 4), + "instructors": { + self.instructor_pk: {**average_and_recent(4, 4)}, + self.instructor_nores_pk: {}, + }, + }, + ) + self.assertEqual(len(res["instructors"]), 2) + + def test_instructor(self): + self.assertRequestContainsAppx( + "instructor-reviews", + self.instructor_pk, + {**average_and_recent(4, 4), "courses": {"CIS-120": {**average_and_recent(4, 4)}}}, + ) + self.assertRequestContainsAppx( + "instructor-reviews", + self.instructor_nores_pk, + { + "courses": {"CIS-120": {**no_reviews_avg_recent(1, "2007C")}}, + }, + ) + + def test_department(self): + self.assertRequestContainsAppx( + "department-reviews", "CIS", {"courses": {"CIS-120": average_and_recent(4, 4)}} + ) + + def test_history(self): + self.assertRequestContainsAppx( + "course-history", + ["CIS-120", self.instructor_pk], + {"sections": [rating(4)]}, + ) + self.assertRequestContainsAppx( + "course-history", + ["CIS-120", self.instructor_nores_pk], + { + "sections": [ + { + "course_code": "CIS-120", + "semester": "2007C", + "forms_returned": 0, + "forms_produced": 100, + }, + ] + }, + ) + + def test_autocomplete(self): + self.assertRequestContainsAppx( + "review-autocomplete", + [], + { + "instructors": [ + { + "title": self.instructor_name, + "desc": "CIS", + "url": f"/instructor/{self.instructor_pk}", + }, + { + "title": "No Responses Instructor", + "desc": "CIS", + "url": f"/instructor/{self.instructor_nores_pk}", + }, + ], + "courses": [ + { + "title": "CIS-120", + "desc": [""], + "url": "/course/CIS-120", + } + ], + "departments": [{"title": "CIS", "desc": "", "url": "/department/CIS"}], + }, + ) + + +class TwoSemestersOneInstructorTestCase(TestCase, PCRTestMixin): + def setUp(self): + set_semester() + AddDropPeriod(semester="2012A").save() + self.instructor_name = "Instructor One" + self.client = APIClient() + self.client.force_login(User.objects.create_user(username="test")) + create_review("CIS-120-001", TEST_SEMESTER, self.instructor_name, {"instructor_quality": 4}) + create_review("CIS-120-001", "2012A", self.instructor_name, {"instructor_quality": 2}) + create_review( + "CIS-120-002", + "2007C", + self.instructor_name, + {"instructor_quality": 0}, + responses=0, + ) + create_review( + "CIS-120-001", + "2007C", + "No Responses Instructor", + {"instructor_quality": 0}, + responses=0, + ) + + def test_course(self): + self.assertRequestContainsAppx( + "course-reviews", + "CIS-120", + { + "num_semesters": 3, + **average_and_recent(3, 4), + "instructors": { + Instructor.objects.get(name=self.instructor_name).pk: { + **average_and_recent(3, 4), + "latest_semester": TEST_SEMESTER, + }, + }, + }, + ) + + def test_instructor(self): + self.assertRequestContainsAppx( + "instructor-reviews", + Instructor.objects.get(name=self.instructor_name).pk, + {**average_and_recent(3, 4), "courses": {"CIS-120": average_and_recent(3, 4)}}, + ) + + def test_department(self): + self.assertRequestContainsAppx( + "department-reviews", "CIS", {"courses": {"CIS-120": average_and_recent(3, 4)}} + ) + + def test_history(self): + self.assertRequestContainsAppx( + "course-history", + ["CIS-120", Instructor.objects.get(name=self.instructor_name).pk], + {"sections": [rating(4), rating(2)]}, + ) + + +class TwoSectionsOneSemesterTestCase(TestCase, PCRTestMixin): + def setUp(self): + set_semester() + self.instructor_name = "Instructor One" + self.client = APIClient() + self.client.force_login(User.objects.create_user(username="test")) + create_review("CIS-120-001", TEST_SEMESTER, self.instructor_name, {"instructor_quality": 4}) + create_review("CIS-120-002", TEST_SEMESTER, self.instructor_name, {"instructor_quality": 2}) + + def test_course(self): + self.assertRequestContainsAppx( + "course-reviews", + "CIS-120", + { + "num_semesters": 1, + **average_and_recent(3, 3), + "instructors": { + Instructor.objects.get(name=self.instructor_name).pk: { + **average_and_recent(3, 3), + "latest_semester": TEST_SEMESTER, + }, + }, + }, + ) + + def test_instructor(self): + self.assertRequestContainsAppx( + "instructor-reviews", + Instructor.objects.get(name=self.instructor_name).pk, + { + **average_and_recent(3, 3), + "num_semesters": 1, + "courses": {"CIS-120": average_and_recent(3, 3)}, + }, + ) + + def test_department(self): + self.assertRequestContainsAppx( + "department-reviews", "CIS", {"courses": {"CIS-120": average_and_recent(3, 3)}} + ) + + def test_history(self): + self.assertRequestContainsAppx( + "course-history", + ["CIS-120", Instructor.objects.get(name=self.instructor_name).pk], + {"sections": [rating(4), rating(2)]}, + ) + + +class SemesterWithFutureCourseTestCase(TestCase, PCRTestMixin): + def setUp(self): + set_semester() + AddDropPeriod(semester="2012A").save() + AddDropPeriod(semester="3008C").save() + self.instructor_name = "Instructor One" + self.client = APIClient() + self.client.force_login(User.objects.create_user(username="test")) + create_review("CIS-120-001", TEST_SEMESTER, self.instructor_name, {"instructor_quality": 4}) + create_review("CIS-120-001", "2012A", self.instructor_name, {"instructor_quality": 2}) + create_review( + "CIS-120-002", + "2007C", + self.instructor_name, + {"instructor_quality": 0}, + responses=0, + ) + create_review( + "CIS-120-001", + "2007C", + "No Responses Instructor", + {"instructor_quality": 0}, + responses=0, + ) + create_review("CIS-160-001", "3008C", self.instructor_name, {"instructor_quality": 2}) + + def test_course(self): + self.assertRequestContainsAppx( + "course-reviews", + "CIS-120", + { + "num_semesters": 3, + **average_and_recent(3, 4), + "instructors": { + Instructor.objects.get(name=self.instructor_name).pk: { + **average_and_recent(3, 4), + "latest_semester": TEST_SEMESTER, + } + }, + }, + ) + + def test_department(self): + self.assertRequestContainsAppx( + "department-reviews", + "CIS", + { + "courses": { + "CIS-120": average_and_recent(3, 4), + "CIS-160": average_and_recent(2, 2), + } + }, + ) + + +class TwoInstructorsOneSectionTestCase(TestCase, PCRTestMixin): + def setUp(self): + set_semester() + self.instructor_name = "Instructor One" + self.client = APIClient() + self.client.force_login(User.objects.create_user(username="test")) + create_review("CIS-120-001", TEST_SEMESTER, self.instructor_name, {"instructor_quality": 4}) + create_review("CIS-120-001", TEST_SEMESTER, "Instructor Two", {"instructor_quality": 2}) + create_review( + "CIS-120-002", + "2007C", + self.instructor_name, + {"instructor_quality": 0}, + responses=0, + ) + create_review( + "CIS-120-001", + "2007C", + "No Responses Instructor", + {"instructor_quality": 0}, + responses=0, + ) + self.instructor1 = Instructor.objects.get(name=self.instructor_name) + self.instructor2 = Instructor.objects.get(name="Instructor Two") + + def test_course(self): + self.assertRequestContainsAppx( + "course-reviews", + "CIS-120", + { + **average_and_recent(3, 3), + "instructors": { + self.instructor1.pk: average_and_recent(4, 4), + self.instructor2.pk: average_and_recent(2, 2), + }, + "num_sections": 3, + "num_sections_recent": 1, + }, + ) + + def test_instructor(self): + self.assertRequestContainsAppx( + "instructor-reviews", + self.instructor1.pk, + {**average_and_recent(4, 4), "courses": {"CIS-120": average_and_recent(4, 4)}}, + ) + + self.assertRequestContainsAppx( + "instructor-reviews", + self.instructor2.pk, + {**average_and_recent(2, 2), "courses": {"CIS-120": average_and_recent(2, 2)}}, + ) + + +class TwoSectionTestCase(TestCase, PCRTestMixin): + def setUp(self): + set_semester() + self.instructor_name = "Instructor One" + self.client = APIClient() + self.client.force_login(User.objects.create_user(username="test")) + create_review("CIS-120-001", TEST_SEMESTER, self.instructor_name, {"instructor_quality": 4}) + create_review("CIS-120-002", TEST_SEMESTER, "Instructor Two", {"instructor_quality": 2}) + create_review( + "CIS-120-002", + "2007C", + self.instructor_name, + {"instructor_quality": 0}, + responses=0, + ) + create_review( + "CIS-120-001", + "2007C", + "No Responses Instructor", + {"instructor_quality": 0}, + responses=0, + ) + self.instructor1 = Instructor.objects.get(name=self.instructor_name) + self.instructor2 = Instructor.objects.get(name="Instructor Two") + + def test_course(self): + self.assertRequestContainsAppx( + "course-reviews", + "CIS-120", + { + **average_and_recent(3, 3), + "instructors": { + self.instructor1.pk: average_and_recent(4, 4), + self.instructor2.pk: average_and_recent(2, 2), + }, + }, + ) + + def test_instructor(self): + self.assertRequestContainsAppx( + "instructor-reviews", + self.instructor1.pk, + {**average_and_recent(4, 4), "courses": {"CIS-120": average_and_recent(4, 4)}}, + ) + + self.assertRequestContainsAppx( + "instructor-reviews", + self.instructor2.pk, + {**average_and_recent(2, 2), "courses": {"CIS-120": average_and_recent(2, 2)}}, + ) + + +class TwoInstructorsMultipleSemestersTestCase(TestCase, PCRTestMixin): + def setUp(self): + set_semester() + AddDropPeriod(semester="2017A").save() + AddDropPeriod(semester="2012A").save() + AddDropPeriod(semester="2012C").save() + self.instructor_name = "Instructor One" + self.client = APIClient() + self.client.force_login(User.objects.create_user(username="test")) + create_review("CIS-120-001", TEST_SEMESTER, self.instructor_name, {"instructor_quality": 4}) + create_review("CIS-120-001", "2017A", "Instructor Two", {"instructor_quality": 2}) + create_review( + "CIS-120-002", + "2007C", + self.instructor_name, + {"instructor_quality": 0}, + responses=0, + ) + create_review( + "CIS-120-001", + "2007C", + "No Responses Instructor", + {"instructor_quality": 0}, + responses=0, + ) + + create_review("CIS-120-900", "2012A", self.instructor_name, {"instructor_quality": 2}) + create_review("CIS-120-003", "2012C", "Instructor Two", {"instructor_quality": 1}) + self.instructor1 = Instructor.objects.get(name=self.instructor_name) + self.instructor2 = Instructor.objects.get(name="Instructor Two") + + def test_course(self): + self.assertRequestContainsAppx( + "course-reviews", + "CIS-120", + { + **average_and_recent(2.25, 4), + "instructors": { + self.instructor1.pk: { + **average_and_recent(3, 4), + "latest_semester": TEST_SEMESTER, + }, + self.instructor2.pk: { + **average_and_recent(1.5, 2), + "latest_semester": "2017A", + }, + }, + }, + ) + + def test_course_with_cotaught_section(self): + create_review("CIS-120-001", TEST_SEMESTER, "Instructor Two", {"instructor_quality": 1}) + self.assertRequestContainsAppx( + "course-reviews", + "CIS-120", + { + **average_and_recent(2, 2.5), + "instructors": { + self.instructor1.pk: { + **average_and_recent(3, 4), + "latest_semester": TEST_SEMESTER, + }, + self.instructor2.pk: { + **average_and_recent(4 / 3, 1), + "latest_semester": TEST_SEMESTER, + }, + }, + "num_sections": 6, + "num_sections_recent": 1, + }, + ) + + +class TwoDepartmentTestCase(TestCase, PCRTestMixin): + def setUp(self): + set_semester() + create_review("CIS-120-001", TEST_SEMESTER, "Instructor One", {"instructor_quality": 4}) + create_review( + "CIS-120-002", + "2007C", + "Instructor One", + {"instructor_quality": 0}, + responses=0, + ) + create_review( + "CIS-120-001", + "2007C", + "No Responses Instructor", + {"instructor_quality": 0}, + responses=0, + ) + create_review("MATH-114-002", TEST_SEMESTER, "Instructor Two", {"instructor_quality": 2}) + create_review("ENM-211-003", TEST_SEMESTER, "Instructor Two", {"instructor_quality": 3}) + self.client = APIClient() + self.client.force_login(User.objects.create_user(username="test")) + self.instructor1 = Instructor.objects.get(name="Instructor One") + self.instructor2 = Instructor.objects.get(name="Instructor Two") + + def test_course(self): + self.assertRequestContainsAppx( + "course-reviews", + "MATH-114", + { + **average_and_recent(2, 2), + "instructors": {self.instructor2.pk: average_and_recent(2, 2)}, + }, + ) + + def test_instructor(self): + self.assertRequestContainsAppx( + "instructor-reviews", + self.instructor2.pk, + { + **average_and_recent(2.5, 2.5), + "courses": { + "MATH-114": average_and_recent(2, 2), + "ENM-211": average_and_recent(3, 3), + }, + }, + ) + + def test_autocomplete(self): + no_responses_instructor = Instructor.objects.get(name="No Responses Instructor") + self.assertRequestContainsAppx( + "review-autocomplete", + [], + { + "instructors": [ + { + "title": "Instructor One", + "desc": "CIS", + "url": f"/instructor/{self.instructor1.pk}", + }, + { + "title": "Instructor Two", + "desc": "ENM,MATH", + "url": f"/instructor/{self.instructor2.pk}", + }, + { + "title": "No Responses Instructor", + "desc": "CIS", + "url": f"/instructor/{no_responses_instructor.pk}", + }, + ], + }, + ) + + +class NoReviewForSectionTestCase(TestCase, PCRTestMixin): + def setUp(self): + set_semester() + create_review("CIS-120-001", TEST_SEMESTER, "Instructor One", {"instructor_quality": 4}) + create_review( + "CIS-120-002", + "2007C", + "Instructor One", + {"instructor_quality": 0}, + responses=0, + ) + create_review( + "CIS-120-001", + "2007C", + "No Responses Instructor", + {"instructor_quality": 0}, + responses=0, + ) + _, recitation, _, _ = get_or_create_course_and_section("CIS-120-201", TEST_SEMESTER) + recitation.activity = "REC" + recitation.instructors.add(Instructor.objects.create(name="Instructor Two")) + recitation.save() + self.client = APIClient() + self.client.force_login(User.objects.create_user(username="test")) + self.instructor1 = Instructor.objects.get(name="Instructor One") + self.instructor2 = Instructor.objects.get(name="Instructor Two") + self.instructor_nores = Instructor.objects.get(name="No Responses Instructor") + + def test_course(self): + res = self.assertRequestContainsAppx( + "course-reviews", + "CIS-120", + { + **average_and_recent(4, 4), + "instructors": { + self.instructor1.pk: average_and_recent(4, 4), + self.instructor_nores.pk: {}, + }, + }, + ) + self.assertEqual(2, len(res["instructors"])) + + +class RegistrationMetricsFlagTestCase(TestCase, PCRTestMixin): + def setUp(self): + set_semester() + create_review("CIS-120-001", "2020A", "Instructor One", {"instructor_quality": 4}) + pdp_restriction = PreNGSSRestriction( + code="PDP", description="Permission required from dept." + ) + pdp_restriction.save() + cis_120_001 = Section.objects.get(full_code="CIS-120-001") + cis_120_001.pre_ngss_restrictions.add(pdp_restriction) + cis_120_001.capacity = 100 + cis_120_001.save() + StatusUpdate( + section=Section.objects.get(full_code="CIS-120-001"), + old_status="", + new_status="O", + alert_sent=False, + request_body="", + ).save() + + create_review("CIS-105-001", "2020A", "Instructor One", {"instructor_quality": 4}) + cis_105_001 = Section.objects.get(full_code="CIS-105-001") + cis_105_001.capacity = 20 + cis_105_001.save() + StatusUpdate( + section=Section.objects.get(full_code="CIS-105-001"), + old_status="", + new_status="O", + alert_sent=False, + request_body="", + ).save() + + create_review("OIDD-101-001", "2020A", "Instructor One", {"instructor_quality": 4}) + + self.client = APIClient() + self.client.force_login(User.objects.create_user(username="test")) + + def test_registration_metrics_pdp(self): + self.assertRequestContainsAppx( + "course-reviews", + "CIS-120", + {"registration_metrics": False}, + ) + + def test_registration_metrics_true(self): + self.assertRequestContainsAppx( + "course-reviews", + "CIS-105", + {"registration_metrics": True}, + ) + + def test_registration_metrics_no_status_updates(self): + self.assertRequestContainsAppx( + "course-reviews", + "OIDD-101", + {"registration_metrics": False}, + ) + + +class NotFoundTestCase(TestCase): + def setUp(self): + self.client = APIClient() + self.client.force_login(User.objects.create_user(username="test")) + + def test_course(self): + self.assertEqual(404, self.client.get(reverse("course-reviews", args=["BLAH"])).status_code) + + def test_instructor(self): + self.assertEqual(404, self.client.get(reverse("instructor-reviews", args=[0])).status_code) + + def test_department(self): + self.assertEqual( + 404, self.client.get(reverse("department-reviews", args=["BLAH"])).status_code + ) + + def test_history(self): + self.assertEqual( + 404, self.client.get(reverse("course-history", args=["BLAH", 123])).status_code + ) + + def test_no_reviews(self): + get_or_create_course_and_section("CIS-120-001", TEST_SEMESTER) + self.assertEqual( + 404, self.client.get(reverse("course-reviews", args=["CIS-120"])).status_code + ) + + +class NoAuthTestCase(TestCase): + def setUp(self): + self.client = APIClient() + + def test_course(self): + self.assertEqual(403, self.client.get(reverse("course-reviews", args=["BLAH"])).status_code) + + def test_instructor(self): + self.assertEqual(403, self.client.get(reverse("instructor-reviews", args=[0])).status_code) + + def test_department(self): + self.assertEqual( + 403, self.client.get(reverse("department-reviews", args=["BLAH"])).status_code + ) + + def test_history(self): + self.assertEqual( + 403, self.client.get(reverse("course-history", args=["BLAH", 0])).status_code + ) + + +class RecitationInstructorTestCase(TestCase, PCRTestMixin): + def setUp(self): + set_semester() + self.instructor_name = "Instructor One" + self.client = APIClient() + self.client.force_login(User.objects.create_user(username="test")) + create_review("CIS-120-001", TEST_SEMESTER, self.instructor_name, {"instructor_quality": 4}) + self.instructor_pk = Instructor.objects.get(name=self.instructor_name).pk + + rec_instructor = Instructor(name="Recitation Instructor") + rec_instructor.save() + self.rec_instructor_pk = rec_instructor.pk + _, rec_section = create_mock_data("CIS-120-201", TEST_SEMESTER) + rec_section.activity = "REC" + rec_section.save() + rec_section.instructors.add(rec_instructor) + + create_review( + "CIS-120-002", + "2007C", + self.instructor_name, + {"instructor_quality": 0}, + responses=0, + ) + Review.objects.all().update(enrollment=100) + + def test_course(self): + res = self.assertRequestContainsAppx( + "course-reviews", + "CIS-120", + { + **average_and_recent(4, 4), + "instructors": { + self.instructor_pk: {**average_and_recent(4, 4)}, + }, + }, + ) + self.assertEqual(len(res["instructors"]), 1) + + def test_autocomplete(self): + res = self.assertRequestContainsAppx( + "review-autocomplete", + [], + { + "instructors": [ + { + "title": self.instructor_name, + "desc": "CIS", + "url": f"/instructor/{self.instructor_pk}", + }, + ], + "courses": [ + { + "title": "CIS-120", + "desc": [""], + "url": "/course/CIS-120", + } + ], + "departments": [{"title": "CIS", "desc": "", "url": "/department/CIS"}], + }, + ) + self.assertEqual(len(res["instructors"]), 1) diff --git a/backend/tests/review/test_mergeinstructors.py b/backend/tests/review/test_mergeinstructors.py index f2baadba5..cd9ef732c 100644 --- a/backend/tests/review/test_mergeinstructors.py +++ b/backend/tests/review/test_mergeinstructors.py @@ -1,257 +1,257 @@ -from io import StringIO - -from django.contrib.auth.models import User -from django.core import management -from django.db.models.functions import Lower -from django.test import TestCase - -from courses.models import Instructor, Section -from courses.util import get_or_create_course_and_section -from review.management.commands.mergeinstructors import ( - INSTRUCTORS_UNMERGED, - batch_duplicates, - resolve_duplicates, - strategies, -) -from review.models import Review - - -TEST_SEMESTER = "2022C" - - -class BatchDuplicateTestCase(TestCase): - def setUp(self): - Instructor.objects.create(name="A") - Instructor.objects.create(name="a") - Instructor.objects.create(name="b") - - def test_batch_duplicates(self): - dupes = batch_duplicates( - Instructor.objects.all().annotate(name_lower=Lower("name")), lambda x: x.name_lower - ) - self.assertEqual(1, len(dupes)) - self.assertEqual("a", dupes[0].pop().name.lower()) - - def test_batch_duplicates_none_ignored(self): - Instructor.objects.create(name="B") - dupes = batch_duplicates( - Instructor.objects.all().annotate(name_lower=Lower("name")), - lambda x: x.name_lower if x.name_lower == "b" else None, - ) - self.assertEqual(1, len(dupes)) - self.assertEqual("b", dupes[0].pop().name.lower()) - - -class ResolveDuplicatesTestCase(TestCase): - def setUp(self): - self.user1 = User.objects.create_user(username="user1") - self.user2 = User.objects.create_user(username="user2") - - self.inst_A = Instructor.objects.create(name="A") - self.inst_a = Instructor.objects.create(name="a") - self.inst_b = Instructor.objects.create(name="b") - - self.course1, self.section1, _, _ = get_or_create_course_and_section("CIS-120-001", "2020C") - self.course2, self.section2, _, _ = get_or_create_course_and_section("CIS-120-001", "2019C") - - self.review1 = Review.objects.create(section=self.section1, instructor=self.inst_A) - self.section1.instructors.add(self.inst_A) - - self.review2 = Review.objects.create(section=self.section2, instructor=self.inst_a) - self.section2.instructors.add(self.inst_a) - - self.stats = dict() - - def stat(key, amt=1, element=None): - """ - Helper function to keep track of how many rows we are changing - """ - value = self.stats.get(key, 0) - if element is None: - self.stats[key] = value + amt - else: - self.stats.setdefault(key, []).append(element) - - self.stat = stat - - def test_basic_merge(self): - resolve_duplicates([{self.inst_A, self.inst_a}], False, self.stat) - self.assertEqual(2, Instructor.objects.count()) - self.assertFalse(Instructor.objects.filter(name="A").exists()) - self.assertEqual(2, Review.objects.filter(instructor=self.inst_a).count()) - self.assertEqual(2, Section.objects.filter(instructors=self.inst_a).count()) - - def test_basic_merge_dryrun_doesnt_modify(self): - resolve_duplicates([{self.inst_A, self.inst_a}], True, self.stat) - self.assertEqual(3, Instructor.objects.count()) - self.assertEqual(1, Review.objects.filter(instructor=self.inst_A).count()) - self.assertEqual(1, Section.objects.filter(instructors=self.inst_A).count()) - self.assertEqual(1, Review.objects.filter(instructor=self.inst_a).count()) - self.assertEqual(1, Section.objects.filter(instructors=self.inst_a).count()) - - def test_merge_with_user(self): - self.inst_A.user = self.user1 - self.inst_A.save() - resolve_duplicates([{self.inst_A, self.inst_a}], False, self.stat) - self.assertEqual(2, Instructor.objects.count()) - self.assertFalse(Instructor.objects.filter(name="a").exists()) - self.assertEqual(2, Review.objects.filter(instructor=self.inst_A).count()) - self.assertEqual(2, Section.objects.filter(instructors=self.inst_A).count()) - - def test_merge_with_both_having_same_user(self): - self.inst_a.user = self.user1 - self.inst_a.save() - self.inst_A.user = self.user1 - self.inst_A.save() - resolve_duplicates([{self.inst_A, self.inst_a}], False, self.stat) - self.assertEqual(2, Instructor.objects.count()) - self.assertFalse(Instructor.objects.filter(name="a").exists()) - self.assertEqual(2, Review.objects.filter(instructor=self.inst_A).count()) - self.assertEqual(2, Section.objects.filter(instructors=self.inst_A).count()) - - def test_merge_aborts_with_different_users(self): - self.inst_a.user = self.user1 - self.inst_a.save() - self.inst_A.user = self.user2 - self.inst_A.save() - resolve_duplicates([{self.inst_A, self.inst_a}], False, self.stat) - self.assertEqual(3, Instructor.objects.count()) - self.assertEqual(1, Review.objects.filter(instructor=self.inst_A).count()) - self.assertEqual(1, Section.objects.filter(instructors=self.inst_A).count()) - self.assertEqual(1, Review.objects.filter(instructor=self.inst_a).count()) - self.assertEqual(1, Section.objects.filter(instructors=self.inst_a).count()) - self.assertSetEqual( - {self.inst_A.pk, self.inst_a.pk}, set(self.stats[INSTRUCTORS_UNMERGED][0]) - ) - - -class MergeStrategyTestCase(TestCase): - def setUp(self): - self.user1 = User.objects.create_user(username="user1") - self.user2 = User.objects.create_user(username="user2") - - self.inst_A = Instructor.objects.create(name="A") - self.inst_a = Instructor.objects.create(name="a") - self.inst_b = Instructor.objects.create(name="b") - - def test_case_insensitive(self): - self.assertListEqual([{self.inst_a, self.inst_A}], strategies["case-insensitive"]()) - - def test_case_insensitive_recent_first(self): - self.inst_A.save() - self.assertListEqual([{self.inst_A, self.inst_a}], strategies["case-insensitive"]()) - - def test_pennid(self): - self.inst_A.user = self.user1 - self.inst_A.save() - self.inst_a.user = self.user1 - self.inst_a.save() - self.assertListEqual([{self.inst_a, self.inst_A}], strategies["pennid"]()) - - def test_flns_shared(self): - _, cis_1600_001, _, _ = get_or_create_course_and_section("CIS-1600-001", TEST_SEMESTER) - - rajiv_no_middle = Instructor.objects.create(name="Rajiv Gandhi") - rajiv_no_middle.user = self.user1 - rajiv_no_middle.save() - cis_1600_001.instructors.add(rajiv_no_middle) - - rajiv_middle = Instructor.objects.create(name="Rajiv C. Gandhi") - cis_1600_001.instructors.add(rajiv_middle) - - self.assertEqual( - [{rajiv_no_middle, rajiv_middle}], strategies["first-last-name-sections"]() - ) - - def test_flns_not_shared(self): - _, cis_1600_001, _, _ = get_or_create_course_and_section("CIS-1600-001", TEST_SEMESTER) - - rajiv_no_middle = Instructor.objects.create(name="Rajiv Gandhi") - cis_1600_001.instructors.add(rajiv_no_middle) - - Instructor.objects.create(name="Rajiv C. Gandhi") - - self.assertEqual([], strategies["first-last-name-sections"]()) - - -class MergeInstructorsCommandTestCase(TestCase): - COMMAND_NAME = "mergeinstructors" - - def setUp(self): - self.out = StringIO() - self.err = StringIO() - - self.user1 = User.objects.create_user(username="user1") - self.user2 = User.objects.create_user(username="user2") - - self.inst_A = Instructor.objects.create(name="A") - self.inst_a = Instructor.objects.create(name="a") - self.inst_b = Instructor.objects.create(name="b") - - self.course1, self.section1, _, _ = get_or_create_course_and_section("CIS-120-001", "2020C") - self.course2, self.section2, _, _ = get_or_create_course_and_section("CIS-120-001", "2019C") - - self.review1 = Review.objects.create(section=self.section1, instructor=self.inst_A) - self.section1.instructors.add(self.inst_A) - - self.review2 = Review.objects.create(section=self.section2, instructor=self.inst_a) - self.section2.instructors.add(self.inst_a) - - def test_with_all_strats(self): - self.inst_a.user = self.user1 - self.inst_b.user = self.user1 - self.inst_a.save() - self.inst_b.save() - management.call_command( - self.COMMAND_NAME, - "--all", - stdout=self.out, - stderr=self.err, - ) - self.assertEqual(1, Instructor.objects.all().count()) - self.assertEqual(2, Review.objects.filter(instructor=self.inst_b).count()) - self.assertEqual(2, Section.objects.filter(instructors=self.inst_b).count()) - - def test_with_one_strat(self): - management.call_command( - self.COMMAND_NAME, - "--strategy=case-insensitive", - stdout=self.out, - stderr=self.err, - ) - self.assertEqual(2, Instructor.objects.all().count()) - self.assertEqual(2, Review.objects.filter(instructor=self.inst_a).count()) - self.assertEqual(2, Section.objects.filter(instructors=self.inst_a).count()) - - def test_with_manual_override(self): - self.inst_A.user = self.user1 - self.inst_b.user = self.user2 - self.inst_A.save() - self.inst_b.save() - management.call_command( - self.COMMAND_NAME, - f"-i {self.inst_b.pk}", - f"-i {self.inst_A.pk}", - stdout=self.out, - stderr=self.err, - ) - self.assertEqual(2, Instructor.objects.all().count()) - self.assertFalse(Instructor.objects.filter(name="A").exists()) - self.assertEqual(1, Review.objects.filter(instructor=self.inst_a).count()) - self.assertEqual(1, Section.objects.filter(instructors=self.inst_a).count()) - - def test_with_dry_run(self): - self.inst_a.user = self.user1 - self.inst_b.user = self.user1 - self.inst_a.save() - self.inst_b.save() - management.call_command( - self.COMMAND_NAME, - "--all", - "--dryrun", - stdout=self.out, - stderr=self.err, - ) - self.assertEqual(3, Instructor.objects.all().count()) - self.assertEqual(0, Review.objects.filter(instructor=self.inst_b).count()) - self.assertEqual(0, Section.objects.filter(instructors=self.inst_b).count()) +from io import StringIO + +from django.contrib.auth.models import User +from django.core import management +from django.db.models.functions import Lower +from django.test import TestCase + +from courses.models import Instructor, Section +from courses.util import get_or_create_course_and_section +from review.management.commands.mergeinstructors import ( + INSTRUCTORS_UNMERGED, + batch_duplicates, + resolve_duplicates, + strategies, +) +from review.models import Review + + +TEST_SEMESTER = "2022C" + + +class BatchDuplicateTestCase(TestCase): + def setUp(self): + Instructor.objects.create(name="A") + Instructor.objects.create(name="a") + Instructor.objects.create(name="b") + + def test_batch_duplicates(self): + dupes = batch_duplicates( + Instructor.objects.all().annotate(name_lower=Lower("name")), lambda x: x.name_lower + ) + self.assertEqual(1, len(dupes)) + self.assertEqual("a", dupes[0].pop().name.lower()) + + def test_batch_duplicates_none_ignored(self): + Instructor.objects.create(name="B") + dupes = batch_duplicates( + Instructor.objects.all().annotate(name_lower=Lower("name")), + lambda x: x.name_lower if x.name_lower == "b" else None, + ) + self.assertEqual(1, len(dupes)) + self.assertEqual("b", dupes[0].pop().name.lower()) + + +class ResolveDuplicatesTestCase(TestCase): + def setUp(self): + self.user1 = User.objects.create_user(username="user1") + self.user2 = User.objects.create_user(username="user2") + + self.inst_A = Instructor.objects.create(name="A") + self.inst_a = Instructor.objects.create(name="a") + self.inst_b = Instructor.objects.create(name="b") + + self.course1, self.section1, _, _ = get_or_create_course_and_section("CIS-120-001", "2020C") + self.course2, self.section2, _, _ = get_or_create_course_and_section("CIS-120-001", "2019C") + + self.review1 = Review.objects.create(section=self.section1, instructor=self.inst_A) + self.section1.instructors.add(self.inst_A) + + self.review2 = Review.objects.create(section=self.section2, instructor=self.inst_a) + self.section2.instructors.add(self.inst_a) + + self.stats = dict() + + def stat(key, amt=1, element=None): + """ + Helper function to keep track of how many rows we are changing + """ + value = self.stats.get(key, 0) + if element is None: + self.stats[key] = value + amt + else: + self.stats.setdefault(key, []).append(element) + + self.stat = stat + + def test_basic_merge(self): + resolve_duplicates([{self.inst_A, self.inst_a}], False, self.stat) + self.assertEqual(2, Instructor.objects.count()) + self.assertFalse(Instructor.objects.filter(name="A").exists()) + self.assertEqual(2, Review.objects.filter(instructor=self.inst_a).count()) + self.assertEqual(2, Section.objects.filter(instructors=self.inst_a).count()) + + def test_basic_merge_dryrun_doesnt_modify(self): + resolve_duplicates([{self.inst_A, self.inst_a}], True, self.stat) + self.assertEqual(3, Instructor.objects.count()) + self.assertEqual(1, Review.objects.filter(instructor=self.inst_A).count()) + self.assertEqual(1, Section.objects.filter(instructors=self.inst_A).count()) + self.assertEqual(1, Review.objects.filter(instructor=self.inst_a).count()) + self.assertEqual(1, Section.objects.filter(instructors=self.inst_a).count()) + + def test_merge_with_user(self): + self.inst_A.user = self.user1 + self.inst_A.save() + resolve_duplicates([{self.inst_A, self.inst_a}], False, self.stat) + self.assertEqual(2, Instructor.objects.count()) + self.assertFalse(Instructor.objects.filter(name="a").exists()) + self.assertEqual(2, Review.objects.filter(instructor=self.inst_A).count()) + self.assertEqual(2, Section.objects.filter(instructors=self.inst_A).count()) + + def test_merge_with_both_having_same_user(self): + self.inst_a.user = self.user1 + self.inst_a.save() + self.inst_A.user = self.user1 + self.inst_A.save() + resolve_duplicates([{self.inst_A, self.inst_a}], False, self.stat) + self.assertEqual(2, Instructor.objects.count()) + self.assertFalse(Instructor.objects.filter(name="a").exists()) + self.assertEqual(2, Review.objects.filter(instructor=self.inst_A).count()) + self.assertEqual(2, Section.objects.filter(instructors=self.inst_A).count()) + + def test_merge_aborts_with_different_users(self): + self.inst_a.user = self.user1 + self.inst_a.save() + self.inst_A.user = self.user2 + self.inst_A.save() + resolve_duplicates([{self.inst_A, self.inst_a}], False, self.stat) + self.assertEqual(3, Instructor.objects.count()) + self.assertEqual(1, Review.objects.filter(instructor=self.inst_A).count()) + self.assertEqual(1, Section.objects.filter(instructors=self.inst_A).count()) + self.assertEqual(1, Review.objects.filter(instructor=self.inst_a).count()) + self.assertEqual(1, Section.objects.filter(instructors=self.inst_a).count()) + self.assertSetEqual( + {self.inst_A.pk, self.inst_a.pk}, set(self.stats[INSTRUCTORS_UNMERGED][0]) + ) + + +class MergeStrategyTestCase(TestCase): + def setUp(self): + self.user1 = User.objects.create_user(username="user1") + self.user2 = User.objects.create_user(username="user2") + + self.inst_A = Instructor.objects.create(name="A") + self.inst_a = Instructor.objects.create(name="a") + self.inst_b = Instructor.objects.create(name="b") + + def test_case_insensitive(self): + self.assertListEqual([{self.inst_a, self.inst_A}], strategies["case-insensitive"]()) + + def test_case_insensitive_recent_first(self): + self.inst_A.save() + self.assertListEqual([{self.inst_A, self.inst_a}], strategies["case-insensitive"]()) + + def test_pennid(self): + self.inst_A.user = self.user1 + self.inst_A.save() + self.inst_a.user = self.user1 + self.inst_a.save() + self.assertListEqual([{self.inst_a, self.inst_A}], strategies["pennid"]()) + + def test_flns_shared(self): + _, cis_1600_001, _, _ = get_or_create_course_and_section("CIS-1600-001", TEST_SEMESTER) + + rajiv_no_middle = Instructor.objects.create(name="Rajiv Gandhi") + rajiv_no_middle.user = self.user1 + rajiv_no_middle.save() + cis_1600_001.instructors.add(rajiv_no_middle) + + rajiv_middle = Instructor.objects.create(name="Rajiv C. Gandhi") + cis_1600_001.instructors.add(rajiv_middle) + + self.assertEqual( + [{rajiv_no_middle, rajiv_middle}], strategies["first-last-name-sections"]() + ) + + def test_flns_not_shared(self): + _, cis_1600_001, _, _ = get_or_create_course_and_section("CIS-1600-001", TEST_SEMESTER) + + rajiv_no_middle = Instructor.objects.create(name="Rajiv Gandhi") + cis_1600_001.instructors.add(rajiv_no_middle) + + Instructor.objects.create(name="Rajiv C. Gandhi") + + self.assertEqual([], strategies["first-last-name-sections"]()) + + +class MergeInstructorsCommandTestCase(TestCase): + COMMAND_NAME = "mergeinstructors" + + def setUp(self): + self.out = StringIO() + self.err = StringIO() + + self.user1 = User.objects.create_user(username="user1") + self.user2 = User.objects.create_user(username="user2") + + self.inst_A = Instructor.objects.create(name="A") + self.inst_a = Instructor.objects.create(name="a") + self.inst_b = Instructor.objects.create(name="b") + + self.course1, self.section1, _, _ = get_or_create_course_and_section("CIS-120-001", "2020C") + self.course2, self.section2, _, _ = get_or_create_course_and_section("CIS-120-001", "2019C") + + self.review1 = Review.objects.create(section=self.section1, instructor=self.inst_A) + self.section1.instructors.add(self.inst_A) + + self.review2 = Review.objects.create(section=self.section2, instructor=self.inst_a) + self.section2.instructors.add(self.inst_a) + + def test_with_all_strats(self): + self.inst_a.user = self.user1 + self.inst_b.user = self.user1 + self.inst_a.save() + self.inst_b.save() + management.call_command( + self.COMMAND_NAME, + "--all", + stdout=self.out, + stderr=self.err, + ) + self.assertEqual(1, Instructor.objects.all().count()) + self.assertEqual(2, Review.objects.filter(instructor=self.inst_b).count()) + self.assertEqual(2, Section.objects.filter(instructors=self.inst_b).count()) + + def test_with_one_strat(self): + management.call_command( + self.COMMAND_NAME, + "--strategy=case-insensitive", + stdout=self.out, + stderr=self.err, + ) + self.assertEqual(2, Instructor.objects.all().count()) + self.assertEqual(2, Review.objects.filter(instructor=self.inst_a).count()) + self.assertEqual(2, Section.objects.filter(instructors=self.inst_a).count()) + + def test_with_manual_override(self): + self.inst_A.user = self.user1 + self.inst_b.user = self.user2 + self.inst_A.save() + self.inst_b.save() + management.call_command( + self.COMMAND_NAME, + f"-i {self.inst_b.pk}", + f"-i {self.inst_A.pk}", + stdout=self.out, + stderr=self.err, + ) + self.assertEqual(2, Instructor.objects.all().count()) + self.assertFalse(Instructor.objects.filter(name="A").exists()) + self.assertEqual(1, Review.objects.filter(instructor=self.inst_a).count()) + self.assertEqual(1, Section.objects.filter(instructors=self.inst_a).count()) + + def test_with_dry_run(self): + self.inst_a.user = self.user1 + self.inst_b.user = self.user1 + self.inst_a.save() + self.inst_b.save() + management.call_command( + self.COMMAND_NAME, + "--all", + "--dryrun", + stdout=self.out, + stderr=self.err, + ) + self.assertEqual(3, Instructor.objects.all().count()) + self.assertEqual(0, Review.objects.filter(instructor=self.inst_b).count()) + self.assertEqual(0, Section.objects.filter(instructors=self.inst_b).count()) diff --git a/backend/tests/review/test_models.py b/backend/tests/review/test_models.py index f65eb436c..3f62d6e56 100644 --- a/backend/tests/review/test_models.py +++ b/backend/tests/review/test_models.py @@ -1,44 +1,44 @@ -from django.test import TestCase - -from courses.models import Instructor -from courses.util import get_or_create_course_and_section -from review.models import Review, ReviewBit -from review.util import titleize - - -TEST_SEMESTER = "2017A" - - -class TitleizeTestCase(TestCase): - def test_regular_name(self): - names = [ - "Davis Haupt", - "Old McDonald", - "Brennan O'Leary", - "H.R. Pickens, III", - "Pope Leo XV", - ] - for name in names: - raw = name.upper() - self.assertEqual(name, titleize(raw)) - - -class ReviewTestCase(TestCase): - def setUp(self): - self.instructor = Instructor(name="Teacher") - self.instructor.save() - self.review = Review( - section=get_or_create_course_and_section("CIS-120-001", TEST_SEMESTER)[1], - instructor=self.instructor, - ) - self.review.save() - - def test_set_bits(self): - self.review.set_averages( - { - "difficulty": 4, - "course_quality": 3, - } - ) - self.assertEqual(2, ReviewBit.objects.count()) - self.assertEqual(4, ReviewBit.objects.get(field="difficulty").average) +from django.test import TestCase + +from courses.models import Instructor +from courses.util import get_or_create_course_and_section +from review.models import Review, ReviewBit +from review.util import titleize + + +TEST_SEMESTER = "2017A" + + +class TitleizeTestCase(TestCase): + def test_regular_name(self): + names = [ + "Davis Haupt", + "Old McDonald", + "Brennan O'Leary", + "H.R. Pickens, III", + "Pope Leo XV", + ] + for name in names: + raw = name.upper() + self.assertEqual(name, titleize(raw)) + + +class ReviewTestCase(TestCase): + def setUp(self): + self.instructor = Instructor(name="Teacher") + self.instructor.save() + self.review = Review( + section=get_or_create_course_and_section("CIS-120-001", TEST_SEMESTER)[1], + instructor=self.instructor, + ) + self.review.save() + + def test_set_bits(self): + self.review.set_averages( + { + "difficulty": 4, + "course_quality": 3, + } + ) + self.assertEqual(2, ReviewBit.objects.count()) + self.assertEqual(4, ReviewBit.objects.get(field="difficulty").average) diff --git a/backend/tests/review/test_stats.py b/backend/tests/review/test_stats.py index 441e8f9a7..e47ec2fb7 100644 --- a/backend/tests/review/test_stats.py +++ b/backend/tests/review/test_stats.py @@ -1,994 +1,994 @@ -from dateutil.tz import gettz -from django.contrib.auth.models import User -from django.db.models.signals import post_save -from django.test import TestCase -from options.models import Option -from rest_framework.test import APIClient - -from alert.management.commands.recomputestats import ( - recompute_demand_distribution_estimates, - recompute_precomputed_fields, -) -from alert.models import AddDropPeriod, Registration -from courses.models import Instructor, Section -from courses.util import ( - get_or_create_add_drop_period, - invalidate_current_semester_cache, - record_update, -) -from PennCourses.settings.base import TIME_ZONE -from review.models import Review -from tests.courses.util import create_mock_data -from tests.review.test_api import PCRTestMixin, create_review - - -TEST_CURRENT_SEMESTER = "2021C" -TEST_SEMESTER = "2021A" # Past semester for reviews - -assert TEST_CURRENT_SEMESTER >= "2021C", "TEST_CURRENT_SEMESTER must be at least 2021C" -assert "b" not in TEST_CURRENT_SEMESTER.lower(), "TEST_CURRENT_SEMESTER cannot be a summer semester" -assert TEST_SEMESTER >= "2021A", "TEST_SEMESTER must be at least 2021A" -assert "b" not in TEST_SEMESTER.lower(), "TEST_SEMESTER cannot be a summer semester" - - -def set_semester(): - post_save.disconnect( - receiver=invalidate_current_semester_cache, - sender=Option, - dispatch_uid="invalidate_current_semester_cache", - ) - Option(key="SEMESTER", value=TEST_CURRENT_SEMESTER, value_type="TXT").save() - AddDropPeriod(semester=TEST_CURRENT_SEMESTER).save() - AddDropPeriod(semester=TEST_SEMESTER).save() - - -""" -Below are some utility functions that make writing out the response.data dictionaries -a bit easier to do. All of the tests use instructor_quality as the reviewbit to test. -these helper functions cut down on a lot of the repeated characters in the responses. -""" - - -def ratings_dict( - label, - rInstructorQuality, - rFinalEnrollment, - rPercentOpen, - rNumOpenings, - rFilledInAdvReg, -): - return { - label: { - "rInstructorQuality": rInstructorQuality, - "rFinalEnrollment": rFinalEnrollment, - "rPercentOpen": rPercentOpen, - "rNumOpenings": rNumOpenings, - "rFilledInAdvReg": rFilledInAdvReg, - } - } - - -def average(*fields): - return ratings_dict("average_reviews", *fields) - - -def recent(*fields): - return ratings_dict("recent_reviews", *fields) - - -def rating(*fields): - return ratings_dict("ratings", *fields) - - -def set_registrations(section_id, registration_spec_list): - for reg_spec in registration_spec_list: - reg = Registration(section_id=section_id) - reg.save() - for key, value in reg_spec.items(): - setattr(reg, key, value) - reg.save() - - -def get_sec_by_id(sec_id): - return Section.objects.get(id=sec_id) - - -def get_start_end_duration(adp): - start = adp.estimated_start - end = adp.estimated_end - duration = end - start - return start, end, duration - - -def get_to_date_func(adp): - start, end, duration = get_start_end_duration(adp) - - def to_date(percent): - return start + percent * duration - - return to_date - - -class TwoSemestersOneInstructorTestCase(TestCase, PCRTestMixin): - @classmethod - def setUpTestData(cls): - set_semester() - cls.instructor_name = "Instructor One" - create_review( - "ESE-120-001", TEST_SEMESTER, cls.instructor_name, {"instructor_quality": 3.5} - ) - create_review("ESE-120-001", "2020C", cls.instructor_name, {"instructor_quality": 2}) - cls.ESE_120_001_TEST_SEMESTER_id = Section.objects.get( - full_code="ESE-120-001", course__semester=TEST_SEMESTER - ).id - cls.ESE_120_001_2020C_id = Section.objects.get( - full_code="ESE-120-001", course__semester="2020C" - ).id - course, section = create_mock_data("ESE-120-001", TEST_CURRENT_SEMESTER) - section.capacity = 100 - section.save() - cls.current_sem_adp = get_or_create_add_drop_period(TEST_CURRENT_SEMESTER) - cls.adp = get_or_create_add_drop_period(TEST_SEMESTER) - AddDropPeriod(semester="2020C").save() - cls.old_adp = get_or_create_add_drop_period("2020C") - cls.average_instructor_quality = (2 + 3.5) / 2 - cls.recent_instructor_quality = 3.5 - cls.old_instructor_quality = 2 - old_status = "C" - new_status = "O" - start, end, duration = get_start_end_duration(cls.adp) - for date in ( - [start - 3 * duration / 5, start - 2 * duration / 5, start - duration / 5] - + [start + i * duration / 5 for i in range(1, 5)] - + [ - start + 0.81 * duration, - start + 0.82 * duration, - ] - ): - # O[.2]C[.4]O[.6]C[.8]O[.81]C[.82]O - record_update( - Section.objects.get(id=cls.ESE_120_001_TEST_SEMESTER_id), - TEST_SEMESTER, - old_status, - new_status, - False, - dict(), - created_at=date, - ) - old_status, new_status = new_status, old_status - cls.recent_percent_open = 3 / 5 - 0.01 - cls.recent_filled_in_adv_reg = 0 - old_status = "O" - new_status = "C" - start, end, duration = get_start_end_duration(cls.old_adp) - for date in [start - 3 * duration / 5, start - 2 * duration / 5, start - duration / 5] + [ - start + i * duration / 4 for i in range(1, 4) - ]: - # C[.25]O[.5]C[.75]O - record_update( - Section.objects.get(id=cls.ESE_120_001_2020C_id), - "2020C", - old_status, - new_status, - False, - dict(), - created_at=date, - ) - old_status, new_status = new_status, old_status - cls.average_percent_open = (1 / 2 + 3 / 5 - 0.01) / 2 - cls.old_percent_open = 1 / 2 - cls.average_filled_in_adv_reg = 0.5 - cls.old_filled_in_adv_reg = 1 - to_date = get_to_date_func(cls.adp) - # O[.2]C[.4]O[.6]C[.8]O[.81]C[.82]O - registration_list_TS = [ - {"created_at": to_date(0.1), "cancelled_at": to_date(0.19), "cancelled": True}, - { - "created_at": to_date(0.15), - "notification_sent_at": to_date(0.4), - "notification_sent": True, - }, - { - "created_at": to_date(0.45), - "notification_sent_at": to_date(0.6), - "notification_sent": True, - }, - {"created_at": to_date(0.61), "deleted_at": to_date(0.79), "deleted": True}, - ] - set_registrations(cls.ESE_120_001_TEST_SEMESTER_id, registration_list_TS) - to_date = get_to_date_func(cls.old_adp) - # C[.25]O[.5]C[.75]O - registration_list_2020C = [ - { - "created_at": to_date(0.1001), - "notification_sent_at": to_date(0.25), - "notification_sent": True, - }, - { - "created_at": to_date(0.51), - "cancelled_at": to_date(0.52), - "deleted_at": to_date(0.53), - "deleted": True, - }, - {"created_at": to_date(0.76), "deleted_at": to_date(0.77), "deleted": True}, - ] - set_registrations(cls.ESE_120_001_2020C_id, registration_list_2020C) - - cls.recent_num_updates = 3 - cls.average_num_updates = (3 + 2) / 2 - cls.old_num_updates = 2 - recent_review = Review.objects.get(section_id=cls.ESE_120_001_TEST_SEMESTER_id) - recent_review.enrollment = 80 - recent_review.save() - test_sem_class = get_sec_by_id(cls.ESE_120_001_TEST_SEMESTER_id) - test_sem_class.capacity = 100 - test_sem_class.save() - average_review = Review.objects.get(section_id=cls.ESE_120_001_2020C_id) - average_review.enrollment = 99 - average_review.save() - old_sem_class = get_sec_by_id(cls.ESE_120_001_2020C_id) - old_sem_class.capacity = 100 - old_sem_class.save() - cls.recent_enrollment = 80 - cls.average_enrollment = (80 + 99) / 2 - cls.old_enrollment = 99 - - recompute_precomputed_fields() - recompute_demand_distribution_estimates( - semesters=TEST_CURRENT_SEMESTER + "," + TEST_SEMESTER + "," + "2020C" - ) - - local_tz = gettz(TIME_ZONE) - cls.course_plots_subdict = { - "code": "ESE-120", - "current_add_drop_period": { - "start": cls.current_sem_adp.estimated_start.astimezone(tz=local_tz), - "end": cls.current_sem_adp.estimated_end.astimezone(tz=local_tz), - }, - "average_plots": { - "pca_demand_plot_since_semester": "2020C", - "pca_demand_plot_num_semesters": 2, - "percent_open_plot_since_semester": "2020C", - "percent_open_plot_num_semesters": 2, - "pca_demand_plot": [ - (0, 0.0), - (0.1001, 0.25), - (0.2, 0.5), - (0.25, 0.25), - (0.4, 0.0), - (0.5, 0.25), - (0.6, 0.5), - (0.75, 0.25), - (0.8, 0.0), - (0.81, 0.25), - (0.82, 0.0), - ], - "percent_open_plot": [ - (0, 0.5), - (0.2, 0), - (0.25, 0.5), - (0.4, 1), - (0.5, 0.5), - (0.6, 0.0), - (0.75, 0.5), - (0.8, 1), - (0.81, 0.5), - (0.82, 1), - (1, 1), - ], - }, - "recent_plots": { - "pca_demand_plot_since_semester": TEST_SEMESTER, - "pca_demand_plot_num_semesters": 1, - "percent_open_plot_since_semester": TEST_SEMESTER, - "percent_open_plot_num_semesters": 1, - "pca_demand_plot": [ - (0, 0.0), - (0.2, 0.5), - (0.4, 0.0), - (0.6, 0.5), - (0.8, 0.0), - (0.81, 0.5), - (0.82, 0.0), - ], - "percent_open_plot": [ - (0, 1), - (0.2, 0.0), - (0.4, 1), - (0.6, 0.0), - (0.8, 1), - (0.81, 0), - (0.82, 1), - (1, 1), - ], - }, - } - - def setUp(self): - self.client = APIClient() - self.client.force_login(User.objects.create_user(username="test")) - - def test_course(self): - reviews_subdict = { - **average( - self.average_instructor_quality, - self.average_enrollment, - self.average_percent_open, - self.average_num_updates, - self.average_filled_in_adv_reg, - ), - **recent( - self.recent_instructor_quality, - self.recent_enrollment, - self.recent_percent_open, - self.recent_num_updates, - self.recent_filled_in_adv_reg, - ), - } - self.assertRequestContainsAppx( - "course-reviews", - "ESE-120", - {**reviews_subdict, "instructors": {Instructor.objects.get().pk: reviews_subdict}}, - ) - self.assertRequestContainsAppx( - "course-plots", - "ESE-120", - self.course_plots_subdict, - ) - - instructor_ids = ",".join(str(id) for id in Instructor.objects.values_list("id", flat=True)) - self.assertRequestContainsAppx( - "course-plots", - "ESE-120", - self.course_plots_subdict, - query_params={ - "instructor_ids": instructor_ids, - }, - ) - - def test_instructor(self): - subdict = { - **average( - self.average_instructor_quality, - self.average_enrollment, - self.average_percent_open, - self.average_num_updates, - self.average_filled_in_adv_reg, - ), - **recent( - self.recent_instructor_quality, - self.recent_enrollment, - self.recent_percent_open, - self.recent_num_updates, - self.recent_filled_in_adv_reg, - ), - } - self.assertRequestContainsAppx( - "instructor-reviews", - Instructor.objects.get().pk, - {**subdict, "courses": {"ESE-120": subdict}}, - ) - - def test_department(self): - subdict = { - **average( - self.average_instructor_quality, - self.average_enrollment, - self.average_percent_open, - self.average_num_updates, - self.average_filled_in_adv_reg, - ), - **recent( - self.recent_instructor_quality, - self.recent_enrollment, - self.recent_percent_open, - self.recent_num_updates, - self.recent_filled_in_adv_reg, - ), - } - self.assertRequestContainsAppx( - "department-reviews", "ESE", {"courses": {"ESE-120": subdict}} - ) - - def test_history(self): - self.assertRequestContainsAppx( - "course-history", - ["ESE-120", Instructor.objects.get().pk], - { - "sections": [ - rating( - self.recent_instructor_quality, - self.recent_enrollment, - self.recent_percent_open, - self.recent_num_updates, - self.recent_filled_in_adv_reg, - ), - rating( - self.old_instructor_quality, - self.old_enrollment, - self.old_percent_open, - self.old_num_updates, - self.old_filled_in_adv_reg, - ), - ] - }, - ) - - def test_autocomplete(self): - self.assertRequestContainsAppx( - "review-autocomplete", - [], - { - "instructors": [ - { - "title": self.instructor_name, - "desc": "ESE", - "url": f"/instructor/{Instructor.objects.get().pk}", - } - ], - "courses": [ - { - "title": "ESE-120", - "desc": [""], - "url": "/course/ESE-120", - } - ], - "departments": [{"title": "ESE", "desc": "", "url": "/department/ESE"}], - }, - ) - - def test_current_percent_open(self): - self.assertAlmostEquals( - self.recent_percent_open, - Section.objects.get(id=self.ESE_120_001_TEST_SEMESTER_id).current_percent_open, - ) - self.assertAlmostEquals( - self.old_percent_open, - Section.objects.get(id=self.ESE_120_001_2020C_id).current_percent_open, - ) - - -class OneReviewTestCase(TestCase, PCRTestMixin): - @classmethod - def setUpTestData(cls): - set_semester() - cls.instructor_name = "Instructor One" - create_review( - "ESE-120-001", TEST_SEMESTER, cls.instructor_name, {"instructor_quality": 3.5} - ) - cls.ESE_120_001_id = Section.objects.get(full_code="ESE-120-001").id - cls.instructor_quality = 3.5 - cls.current_sem_adp = get_or_create_add_drop_period(TEST_CURRENT_SEMESTER) - cls.adp = get_or_create_add_drop_period(TEST_SEMESTER) - start = cls.adp.estimated_start - end = cls.adp.estimated_end - duration = end - start - old_status = "C" - new_status = "O" - percent_open_plot = [(0, 1)] - for date in [start - 3 * duration / 7, start - 2 * duration / 7, start - duration / 7] + [ - start + i * duration / 7 for i in range(1, 7) - ]: - # O[1/7]C[2/7]O[3/7]C[4/7]O[5/7]C[6/7]O - percent_thru = cls.adp.get_percent_through_add_drop(date) - record_update( - Section.objects.get(id=cls.ESE_120_001_id), - TEST_SEMESTER, - old_status, - new_status, - False, - dict(), - created_at=date, - ) - if date >= start: - percent_open_plot.append((percent_thru, int(new_status == "O"))) - old_status, new_status = new_status, old_status - percent_open_plot.append((1, 1)) - cls.percent_open = (duration * 4 / 7).total_seconds() / duration.total_seconds() - cls.filled_in_adv_reg = 0 - to_date = get_to_date_func(cls.adp) - set_registrations( - cls.ESE_120_001_id, - [ - {"created_at": to_date(0.25), "cancelled_at": to_date(0.26), "cancelled": True}, - { - "created_at": to_date(0.5), - "notification_sent_at": to_date(4 / 7), - "notification_sent": True, - }, - {"created_at": to_date(0.75), "deleted_at": to_date(5.9 / 7), "deleted": True}, - ], - ) - - cls.num_updates = 3 - review = Review.objects.get() - review.enrollment = 80 - review.save() - sec = get_sec_by_id(cls.ESE_120_001_id) - sec.capacity = 100 - sec.save() - cls.enrollment = 80 - - recompute_precomputed_fields() - recompute_demand_distribution_estimates(semesters=TEST_SEMESTER, verbose=True) - - plots = { - "pca_demand_plot_since_semester": TEST_SEMESTER, - "pca_demand_plot_num_semesters": 1, - "percent_open_plot_since_semester": TEST_SEMESTER, - "percent_open_plot_num_semesters": 1, - "pca_demand_plot": [ - (0, 0), - (0.25, 0.5), - (2 / 7, 0), - (3 / 7, 0.5), - (4 / 7, 0), - (5 / 7, 0.5), - (6 / 7, 0), - (1, 0), - ], - "percent_open_plot": percent_open_plot, - } - local_tz = gettz(TIME_ZONE) - cls.course_plots_subdict = { - "code": "ESE-120", - "current_add_drop_period": { - "start": cls.current_sem_adp.estimated_start.astimezone(tz=local_tz), - "end": cls.current_sem_adp.estimated_end.astimezone(tz=local_tz), - }, - "average_plots": plots, - "recent_plots": plots, - } - - def setUp(self): - self.client = APIClient() - self.client.force_login(User.objects.create_user(username="test")) - - def test_course(self): - reviews_subdict = { - **average( - self.instructor_quality, - self.enrollment, - self.percent_open, - self.num_updates, - self.filled_in_adv_reg, - ), - **recent( - self.instructor_quality, - self.enrollment, - self.percent_open, - self.num_updates, - self.filled_in_adv_reg, - ), - } - - instructor_ids = ",".join( - [str(id) for id in [Instructor.objects.get().pk]], - ) - self.assertRequestContainsAppx( - "course-plots", - ["ESE-120"], - self.course_plots_subdict, - query_params={ - "instructor_ids": instructor_ids, - }, - ) - - self.assertRequestContainsAppx( - "course-reviews", - "ESE-120", - {**reviews_subdict, "instructors": {Instructor.objects.get().pk: reviews_subdict}}, - ) - - def test_instructor(self): - subdict = { - **average( - self.instructor_quality, - self.enrollment, - self.percent_open, - self.num_updates, - self.filled_in_adv_reg, - ), - **recent( - self.instructor_quality, - self.enrollment, - self.percent_open, - self.num_updates, - self.filled_in_adv_reg, - ), - } - self.assertRequestContainsAppx( - "instructor-reviews", - Instructor.objects.get().pk, - {**subdict, "courses": {"ESE-120": subdict}}, - ) - - def test_department(self): - subdict = { - **average( - self.instructor_quality, - self.enrollment, - self.percent_open, - self.num_updates, - self.filled_in_adv_reg, - ), - **recent( - self.instructor_quality, - self.enrollment, - self.percent_open, - self.num_updates, - self.filled_in_adv_reg, - ), - } - self.assertRequestContainsAppx( - "department-reviews", "ESE", {"courses": {"ESE-120": subdict}} - ) - - def test_history(self): - self.assertRequestContainsAppx( - "course-history", - ["ESE-120", Instructor.objects.get().pk], - { - "sections": [ - rating( - self.instructor_quality, - self.enrollment, - self.percent_open, - self.num_updates, - self.filled_in_adv_reg, - ) - ] - }, - ) - - def test_autocomplete(self): - self.assertRequestContainsAppx( - "review-autocomplete", - [], - { - "instructors": [ - { - "title": self.instructor_name, - "desc": "ESE", - "url": f"/instructor/{Instructor.objects.get().pk}", - } - ], - "courses": [ - { - "title": "ESE-120", - "desc": [""], - "url": "/course/ESE-120", - } - ], - "departments": [{"title": "ESE", "desc": "", "url": "/department/ESE"}], - }, - ) - - -class TwoInstructorsOneSectionTestCase(TestCase, PCRTestMixin): - @classmethod - def setUpTestData(cls): - set_semester() - cls.instructor_1_name = "Instructor One" - cls.instructor_2_name = "Instructor Two" - create_review( - "ESE-120-001", TEST_SEMESTER, cls.instructor_1_name, {"instructor_quality": 3.5} - ) - create_review( - "ESE-120-001", TEST_SEMESTER, cls.instructor_2_name, {"instructor_quality": 3.5} - ) - cls.ESE_120_001_id = Section.objects.get(full_code="ESE-120-001").id - cls.instructor_quality = 3.5 - cls.current_sem_adp = get_or_create_add_drop_period(TEST_CURRENT_SEMESTER) - cls.adp = get_or_create_add_drop_period(TEST_SEMESTER) - start = cls.adp.estimated_start - end = cls.adp.estimated_end - duration = end - start - old_status = "C" - new_status = "O" - percent_open_plot = [(0, 1)] - for date in [start - 3 * duration / 7, start - 2 * duration / 7, start - duration / 7] + [ - start + i * duration / 7 for i in range(1, 7) - ]: - # O[1/7]C[2/7]O[3/7]C[4/7]O[5/7]C[6/7]O - percent_thru = cls.adp.get_percent_through_add_drop(date) - record_update( - Section.objects.get(id=cls.ESE_120_001_id), - TEST_SEMESTER, - old_status, - new_status, - False, - dict(), - created_at=date, - ) - if date >= start: - percent_open_plot.append((percent_thru, int(new_status == "O"))) - old_status, new_status = new_status, old_status - cls.filled_in_adv_reg = 0 - percent_open_plot.append((1, 1)) - to_date = get_to_date_func(cls.adp) - set_registrations( - cls.ESE_120_001_id, - [ - {"created_at": to_date(0.25), "cancelled_at": to_date(0.26), "cancelled": True}, - { - "created_at": to_date(0.5), - "notification_sent_at": to_date(4 / 7), - "notification_sent": True, - }, - {"created_at": to_date(0.75), "deleted_at": to_date(5.9 / 7), "deleted": True}, - ], - ) - cls.percent_open = (duration * 4 / 7).total_seconds() / duration.total_seconds() - - cls.num_updates = 3 - for review in Review.objects.all(): - review.enrollment = 80 - review.save() - sec = get_sec_by_id(cls.ESE_120_001_id) - sec.capacity = 100 - sec.save() - cls.enrollment = 80 - - recompute_precomputed_fields() - recompute_demand_distribution_estimates(semesters=TEST_SEMESTER) - - plots = { - "pca_demand_plot_since_semester": TEST_SEMESTER, - "pca_demand_plot_num_semesters": 1, - "percent_open_plot_since_semester": TEST_SEMESTER, - "percent_open_plot_num_semesters": 1, - "pca_demand_plot": [ - (0, 0), - (0.25, 0.5), - (2 / 7, 0), - (3 / 7, 0.5), - (4 / 7, 0), - (5 / 7, 0.5), - (6 / 7, 0), - (1, 0), - ], - "percent_open_plot": percent_open_plot, - } - local_tz = gettz(TIME_ZONE) - cls.course_plots_subdict = { - "code": "ESE-120", - "current_add_drop_period": { - "start": cls.current_sem_adp.estimated_start.astimezone(tz=local_tz), - "end": cls.current_sem_adp.estimated_end.astimezone(tz=local_tz), - }, - "average_plots": plots, - "recent_plots": plots, - } - empty_plots = { - "pca_demand_plot_since_semester": None, - "pca_demand_plot_num_semesters": 0, - "percent_open_plot_since_semester": None, - "percent_open_plot_num_semesters": 0, - "pca_demand_plot": None, - "percent_open_plot": None, - } - cls.empty_course_plots_subdict = { - "code": "ESE-120", - "current_add_drop_period": { - "start": cls.current_sem_adp.estimated_start.astimezone(tz=local_tz), - "end": cls.current_sem_adp.estimated_end.astimezone(tz=local_tz), - }, - "average_plots": empty_plots, - "recent_plots": empty_plots, - } - - def setUp(self): - self.client = APIClient() - self.client.force_login(User.objects.create_user(username="test")) - - def test_course(self): - reviews_subdict = { - **average( - self.instructor_quality, - self.enrollment, - self.percent_open, - self.num_updates, - self.filled_in_adv_reg, - ), - **recent( - self.instructor_quality, - self.enrollment, - self.percent_open, - self.num_updates, - self.filled_in_adv_reg, - ), - } - self.assertRequestContainsAppx( - "course-reviews", - "ESE-120", - { - **reviews_subdict, - "instructors": { - Instructor.objects.get(name=self.instructor_1_name).pk: reviews_subdict, - Instructor.objects.get(name=self.instructor_2_name).pk: reviews_subdict, - }, - }, - ) - self.assertRequestContainsAppx( - "course-plots", - "ESE-120", - self.course_plots_subdict, - ) - - instructor_ids = ",".join( - [ - str(id) - for id in [ - Instructor.objects.get(name=self.instructor_1_name).pk, - Instructor.objects.get(name=self.instructor_2_name).pk, - ] - ] - ) - self.assertRequestContainsAppx( - "course-plots", - "ESE-120", - self.course_plots_subdict, - query_params={ - "instructor_ids": instructor_ids, - }, - ) - - def test_instructor(self): - subdict = { - **average( - self.instructor_quality, - self.enrollment, - self.percent_open, - self.num_updates, - self.filled_in_adv_reg, - ), - **recent( - self.instructor_quality, - self.enrollment, - self.percent_open, - self.num_updates, - self.filled_in_adv_reg, - ), - } - self.assertRequestContainsAppx( - "instructor-reviews", - Instructor.objects.get(name=self.instructor_1_name).pk, - {**subdict, "courses": {"ESE-120": subdict}}, - ) - self.assertRequestContainsAppx( - "instructor-reviews", - Instructor.objects.get(name=self.instructor_2_name).pk, - {**subdict, "courses": {"ESE-120": subdict}}, - ) - - def test_plots_invalid_instructor_ids(self): - max_instructor_id = max( - Instructor.objects.filter( - name__in=[self.instructor_1_name, self.instructor_2_name] - ).values_list("pk", flat=True) - ) - instructor_ids = ",".join( - [str(id) for id in [max_instructor_id + 1, max_instructor_id + 2]] - ) - self.assertRequestContainsAppx( - "course-plots", - "ESE-120", - self.empty_course_plots_subdict, - query_params={ - "instructor_ids": instructor_ids, - }, - ) - - def test_plots_filter_to_one_instructor(self): - self.assertRequestContainsAppx( - "course-plots", - "ESE-120", - self.course_plots_subdict, - query_params={ - "instructor_ids": str(Instructor.objects.get(name=self.instructor_1_name).id), - }, - ) - self.assertRequestContainsAppx( - "course-plots", - "ESE-120", - self.course_plots_subdict, - query_params={ - "instructor_ids": str(Instructor.objects.get(name=self.instructor_2_name).id), - }, - ) - - def test_department(self): - subdict = { - **average( - self.instructor_quality, - self.enrollment, - self.percent_open, - self.num_updates, - self.filled_in_adv_reg, - ), - **recent( - self.instructor_quality, - self.enrollment, - self.percent_open, - self.num_updates, - self.filled_in_adv_reg, - ), - } - self.assertRequestContainsAppx( - "department-reviews", "ESE", {"courses": {"ESE-120": subdict}} - ) - - def test_history(self): - self.assertRequestContainsAppx( - "course-history", - ["ESE-120", Instructor.objects.get(name=self.instructor_1_name).pk], - { - "sections": [ - rating( - self.instructor_quality, - self.enrollment, - self.percent_open, - self.num_updates, - self.filled_in_adv_reg, - ) - ] - }, - ) - self.assertRequestContainsAppx( - "course-history", - ["ESE-120", Instructor.objects.get(name=self.instructor_2_name).pk], - { - "sections": [ - rating( - self.instructor_quality, - self.enrollment, - self.percent_open, - self.num_updates, - self.filled_in_adv_reg, - ) - ] - }, - ) - - def test_autocomplete(self): - self.assertRequestContainsAppx( - "review-autocomplete", - [], - { - "instructors": [ - { - "title": self.instructor_1_name, - "desc": "ESE", - "url": ( - "/instructor/" - + str(Instructor.objects.get(name=self.instructor_1_name).pk) - ), - }, - { - "title": self.instructor_2_name, - "desc": "ESE", - "url": ( - "/instructor/" - + str(Instructor.objects.get(name=self.instructor_2_name).pk) - ), - }, - ], - "courses": [ - { - "title": "ESE-120", - "desc": [""], - "url": "/course/ESE-120", - } - ], - "departments": [{"title": "ESE", "desc": "", "url": "/department/ESE"}], - }, - ) - - -# TODO: More tests to add: -# 2 sections of same class -# 2 different classes in same semester -# classes that don't qualify -# classes with no registrations -# current / future classes? +from dateutil.tz import gettz +from django.contrib.auth.models import User +from django.db.models.signals import post_save +from django.test import TestCase +from options.models import Option +from rest_framework.test import APIClient + +from alert.management.commands.recomputestats import ( + recompute_demand_distribution_estimates, + recompute_precomputed_fields, +) +from alert.models import AddDropPeriod, Registration +from courses.models import Instructor, Section +from courses.util import ( + get_or_create_add_drop_period, + invalidate_current_semester_cache, + record_update, +) +from PennCourses.settings.base import TIME_ZONE +from review.models import Review +from tests.courses.util import create_mock_data +from tests.review.test_api import PCRTestMixin, create_review + + +TEST_CURRENT_SEMESTER = "2021C" +TEST_SEMESTER = "2021A" # Past semester for reviews + +assert TEST_CURRENT_SEMESTER >= "2021C", "TEST_CURRENT_SEMESTER must be at least 2021C" +assert "b" not in TEST_CURRENT_SEMESTER.lower(), "TEST_CURRENT_SEMESTER cannot be a summer semester" +assert TEST_SEMESTER >= "2021A", "TEST_SEMESTER must be at least 2021A" +assert "b" not in TEST_SEMESTER.lower(), "TEST_SEMESTER cannot be a summer semester" + + +def set_semester(): + post_save.disconnect( + receiver=invalidate_current_semester_cache, + sender=Option, + dispatch_uid="invalidate_current_semester_cache", + ) + Option(key="SEMESTER", value=TEST_CURRENT_SEMESTER, value_type="TXT").save() + AddDropPeriod(semester=TEST_CURRENT_SEMESTER).save() + AddDropPeriod(semester=TEST_SEMESTER).save() + + +""" +Below are some utility functions that make writing out the response.data dictionaries +a bit easier to do. All of the tests use instructor_quality as the reviewbit to test. +these helper functions cut down on a lot of the repeated characters in the responses. +""" + + +def ratings_dict( + label, + rInstructorQuality, + rFinalEnrollment, + rPercentOpen, + rNumOpenings, + rFilledInAdvReg, +): + return { + label: { + "rInstructorQuality": rInstructorQuality, + "rFinalEnrollment": rFinalEnrollment, + "rPercentOpen": rPercentOpen, + "rNumOpenings": rNumOpenings, + "rFilledInAdvReg": rFilledInAdvReg, + } + } + + +def average(*fields): + return ratings_dict("average_reviews", *fields) + + +def recent(*fields): + return ratings_dict("recent_reviews", *fields) + + +def rating(*fields): + return ratings_dict("ratings", *fields) + + +def set_registrations(section_id, registration_spec_list): + for reg_spec in registration_spec_list: + reg = Registration(section_id=section_id) + reg.save() + for key, value in reg_spec.items(): + setattr(reg, key, value) + reg.save() + + +def get_sec_by_id(sec_id): + return Section.objects.get(id=sec_id) + + +def get_start_end_duration(adp): + start = adp.estimated_start + end = adp.estimated_end + duration = end - start + return start, end, duration + + +def get_to_date_func(adp): + start, end, duration = get_start_end_duration(adp) + + def to_date(percent): + return start + percent * duration + + return to_date + + +class TwoSemestersOneInstructorTestCase(TestCase, PCRTestMixin): + @classmethod + def setUpTestData(cls): + set_semester() + cls.instructor_name = "Instructor One" + create_review( + "ESE-120-001", TEST_SEMESTER, cls.instructor_name, {"instructor_quality": 3.5} + ) + create_review("ESE-120-001", "2020C", cls.instructor_name, {"instructor_quality": 2}) + cls.ESE_120_001_TEST_SEMESTER_id = Section.objects.get( + full_code="ESE-120-001", course__semester=TEST_SEMESTER + ).id + cls.ESE_120_001_2020C_id = Section.objects.get( + full_code="ESE-120-001", course__semester="2020C" + ).id + course, section = create_mock_data("ESE-120-001", TEST_CURRENT_SEMESTER) + section.capacity = 100 + section.save() + cls.current_sem_adp = get_or_create_add_drop_period(TEST_CURRENT_SEMESTER) + cls.adp = get_or_create_add_drop_period(TEST_SEMESTER) + AddDropPeriod(semester="2020C").save() + cls.old_adp = get_or_create_add_drop_period("2020C") + cls.average_instructor_quality = (2 + 3.5) / 2 + cls.recent_instructor_quality = 3.5 + cls.old_instructor_quality = 2 + old_status = "C" + new_status = "O" + start, end, duration = get_start_end_duration(cls.adp) + for date in ( + [start - 3 * duration / 5, start - 2 * duration / 5, start - duration / 5] + + [start + i * duration / 5 for i in range(1, 5)] + + [ + start + 0.81 * duration, + start + 0.82 * duration, + ] + ): + # O[.2]C[.4]O[.6]C[.8]O[.81]C[.82]O + record_update( + Section.objects.get(id=cls.ESE_120_001_TEST_SEMESTER_id), + TEST_SEMESTER, + old_status, + new_status, + False, + dict(), + created_at=date, + ) + old_status, new_status = new_status, old_status + cls.recent_percent_open = 3 / 5 - 0.01 + cls.recent_filled_in_adv_reg = 0 + old_status = "O" + new_status = "C" + start, end, duration = get_start_end_duration(cls.old_adp) + for date in [start - 3 * duration / 5, start - 2 * duration / 5, start - duration / 5] + [ + start + i * duration / 4 for i in range(1, 4) + ]: + # C[.25]O[.5]C[.75]O + record_update( + Section.objects.get(id=cls.ESE_120_001_2020C_id), + "2020C", + old_status, + new_status, + False, + dict(), + created_at=date, + ) + old_status, new_status = new_status, old_status + cls.average_percent_open = (1 / 2 + 3 / 5 - 0.01) / 2 + cls.old_percent_open = 1 / 2 + cls.average_filled_in_adv_reg = 0.5 + cls.old_filled_in_adv_reg = 1 + to_date = get_to_date_func(cls.adp) + # O[.2]C[.4]O[.6]C[.8]O[.81]C[.82]O + registration_list_TS = [ + {"created_at": to_date(0.1), "cancelled_at": to_date(0.19), "cancelled": True}, + { + "created_at": to_date(0.15), + "notification_sent_at": to_date(0.4), + "notification_sent": True, + }, + { + "created_at": to_date(0.45), + "notification_sent_at": to_date(0.6), + "notification_sent": True, + }, + {"created_at": to_date(0.61), "deleted_at": to_date(0.79), "deleted": True}, + ] + set_registrations(cls.ESE_120_001_TEST_SEMESTER_id, registration_list_TS) + to_date = get_to_date_func(cls.old_adp) + # C[.25]O[.5]C[.75]O + registration_list_2020C = [ + { + "created_at": to_date(0.1001), + "notification_sent_at": to_date(0.25), + "notification_sent": True, + }, + { + "created_at": to_date(0.51), + "cancelled_at": to_date(0.52), + "deleted_at": to_date(0.53), + "deleted": True, + }, + {"created_at": to_date(0.76), "deleted_at": to_date(0.77), "deleted": True}, + ] + set_registrations(cls.ESE_120_001_2020C_id, registration_list_2020C) + + cls.recent_num_updates = 3 + cls.average_num_updates = (3 + 2) / 2 + cls.old_num_updates = 2 + recent_review = Review.objects.get(section_id=cls.ESE_120_001_TEST_SEMESTER_id) + recent_review.enrollment = 80 + recent_review.save() + test_sem_class = get_sec_by_id(cls.ESE_120_001_TEST_SEMESTER_id) + test_sem_class.capacity = 100 + test_sem_class.save() + average_review = Review.objects.get(section_id=cls.ESE_120_001_2020C_id) + average_review.enrollment = 99 + average_review.save() + old_sem_class = get_sec_by_id(cls.ESE_120_001_2020C_id) + old_sem_class.capacity = 100 + old_sem_class.save() + cls.recent_enrollment = 80 + cls.average_enrollment = (80 + 99) / 2 + cls.old_enrollment = 99 + + recompute_precomputed_fields() + recompute_demand_distribution_estimates( + semesters=TEST_CURRENT_SEMESTER + "," + TEST_SEMESTER + "," + "2020C" + ) + + local_tz = gettz(TIME_ZONE) + cls.course_plots_subdict = { + "code": "ESE-120", + "current_add_drop_period": { + "start": cls.current_sem_adp.estimated_start.astimezone(tz=local_tz), + "end": cls.current_sem_adp.estimated_end.astimezone(tz=local_tz), + }, + "average_plots": { + "pca_demand_plot_since_semester": "2020C", + "pca_demand_plot_num_semesters": 2, + "percent_open_plot_since_semester": "2020C", + "percent_open_plot_num_semesters": 2, + "pca_demand_plot": [ + (0, 0.0), + (0.1001, 0.25), + (0.2, 0.5), + (0.25, 0.25), + (0.4, 0.0), + (0.5, 0.25), + (0.6, 0.5), + (0.75, 0.25), + (0.8, 0.0), + (0.81, 0.25), + (0.82, 0.0), + ], + "percent_open_plot": [ + (0, 0.5), + (0.2, 0), + (0.25, 0.5), + (0.4, 1), + (0.5, 0.5), + (0.6, 0.0), + (0.75, 0.5), + (0.8, 1), + (0.81, 0.5), + (0.82, 1), + (1, 1), + ], + }, + "recent_plots": { + "pca_demand_plot_since_semester": TEST_SEMESTER, + "pca_demand_plot_num_semesters": 1, + "percent_open_plot_since_semester": TEST_SEMESTER, + "percent_open_plot_num_semesters": 1, + "pca_demand_plot": [ + (0, 0.0), + (0.2, 0.5), + (0.4, 0.0), + (0.6, 0.5), + (0.8, 0.0), + (0.81, 0.5), + (0.82, 0.0), + ], + "percent_open_plot": [ + (0, 1), + (0.2, 0.0), + (0.4, 1), + (0.6, 0.0), + (0.8, 1), + (0.81, 0), + (0.82, 1), + (1, 1), + ], + }, + } + + def setUp(self): + self.client = APIClient() + self.client.force_login(User.objects.create_user(username="test")) + + def test_course(self): + reviews_subdict = { + **average( + self.average_instructor_quality, + self.average_enrollment, + self.average_percent_open, + self.average_num_updates, + self.average_filled_in_adv_reg, + ), + **recent( + self.recent_instructor_quality, + self.recent_enrollment, + self.recent_percent_open, + self.recent_num_updates, + self.recent_filled_in_adv_reg, + ), + } + self.assertRequestContainsAppx( + "course-reviews", + "ESE-120", + {**reviews_subdict, "instructors": {Instructor.objects.get().pk: reviews_subdict}}, + ) + self.assertRequestContainsAppx( + "course-plots", + "ESE-120", + self.course_plots_subdict, + ) + + instructor_ids = ",".join(str(id) for id in Instructor.objects.values_list("id", flat=True)) + self.assertRequestContainsAppx( + "course-plots", + "ESE-120", + self.course_plots_subdict, + query_params={ + "instructor_ids": instructor_ids, + }, + ) + + def test_instructor(self): + subdict = { + **average( + self.average_instructor_quality, + self.average_enrollment, + self.average_percent_open, + self.average_num_updates, + self.average_filled_in_adv_reg, + ), + **recent( + self.recent_instructor_quality, + self.recent_enrollment, + self.recent_percent_open, + self.recent_num_updates, + self.recent_filled_in_adv_reg, + ), + } + self.assertRequestContainsAppx( + "instructor-reviews", + Instructor.objects.get().pk, + {**subdict, "courses": {"ESE-120": subdict}}, + ) + + def test_department(self): + subdict = { + **average( + self.average_instructor_quality, + self.average_enrollment, + self.average_percent_open, + self.average_num_updates, + self.average_filled_in_adv_reg, + ), + **recent( + self.recent_instructor_quality, + self.recent_enrollment, + self.recent_percent_open, + self.recent_num_updates, + self.recent_filled_in_adv_reg, + ), + } + self.assertRequestContainsAppx( + "department-reviews", "ESE", {"courses": {"ESE-120": subdict}} + ) + + def test_history(self): + self.assertRequestContainsAppx( + "course-history", + ["ESE-120", Instructor.objects.get().pk], + { + "sections": [ + rating( + self.recent_instructor_quality, + self.recent_enrollment, + self.recent_percent_open, + self.recent_num_updates, + self.recent_filled_in_adv_reg, + ), + rating( + self.old_instructor_quality, + self.old_enrollment, + self.old_percent_open, + self.old_num_updates, + self.old_filled_in_adv_reg, + ), + ] + }, + ) + + def test_autocomplete(self): + self.assertRequestContainsAppx( + "review-autocomplete", + [], + { + "instructors": [ + { + "title": self.instructor_name, + "desc": "ESE", + "url": f"/instructor/{Instructor.objects.get().pk}", + } + ], + "courses": [ + { + "title": "ESE-120", + "desc": [""], + "url": "/course/ESE-120", + } + ], + "departments": [{"title": "ESE", "desc": "", "url": "/department/ESE"}], + }, + ) + + def test_current_percent_open(self): + self.assertAlmostEquals( + self.recent_percent_open, + Section.objects.get(id=self.ESE_120_001_TEST_SEMESTER_id).current_percent_open, + ) + self.assertAlmostEquals( + self.old_percent_open, + Section.objects.get(id=self.ESE_120_001_2020C_id).current_percent_open, + ) + + +class OneReviewTestCase(TestCase, PCRTestMixin): + @classmethod + def setUpTestData(cls): + set_semester() + cls.instructor_name = "Instructor One" + create_review( + "ESE-120-001", TEST_SEMESTER, cls.instructor_name, {"instructor_quality": 3.5} + ) + cls.ESE_120_001_id = Section.objects.get(full_code="ESE-120-001").id + cls.instructor_quality = 3.5 + cls.current_sem_adp = get_or_create_add_drop_period(TEST_CURRENT_SEMESTER) + cls.adp = get_or_create_add_drop_period(TEST_SEMESTER) + start = cls.adp.estimated_start + end = cls.adp.estimated_end + duration = end - start + old_status = "C" + new_status = "O" + percent_open_plot = [(0, 1)] + for date in [start - 3 * duration / 7, start - 2 * duration / 7, start - duration / 7] + [ + start + i * duration / 7 for i in range(1, 7) + ]: + # O[1/7]C[2/7]O[3/7]C[4/7]O[5/7]C[6/7]O + percent_thru = cls.adp.get_percent_through_add_drop(date) + record_update( + Section.objects.get(id=cls.ESE_120_001_id), + TEST_SEMESTER, + old_status, + new_status, + False, + dict(), + created_at=date, + ) + if date >= start: + percent_open_plot.append((percent_thru, int(new_status == "O"))) + old_status, new_status = new_status, old_status + percent_open_plot.append((1, 1)) + cls.percent_open = (duration * 4 / 7).total_seconds() / duration.total_seconds() + cls.filled_in_adv_reg = 0 + to_date = get_to_date_func(cls.adp) + set_registrations( + cls.ESE_120_001_id, + [ + {"created_at": to_date(0.25), "cancelled_at": to_date(0.26), "cancelled": True}, + { + "created_at": to_date(0.5), + "notification_sent_at": to_date(4 / 7), + "notification_sent": True, + }, + {"created_at": to_date(0.75), "deleted_at": to_date(5.9 / 7), "deleted": True}, + ], + ) + + cls.num_updates = 3 + review = Review.objects.get() + review.enrollment = 80 + review.save() + sec = get_sec_by_id(cls.ESE_120_001_id) + sec.capacity = 100 + sec.save() + cls.enrollment = 80 + + recompute_precomputed_fields() + recompute_demand_distribution_estimates(semesters=TEST_SEMESTER, verbose=True) + + plots = { + "pca_demand_plot_since_semester": TEST_SEMESTER, + "pca_demand_plot_num_semesters": 1, + "percent_open_plot_since_semester": TEST_SEMESTER, + "percent_open_plot_num_semesters": 1, + "pca_demand_plot": [ + (0, 0), + (0.25, 0.5), + (2 / 7, 0), + (3 / 7, 0.5), + (4 / 7, 0), + (5 / 7, 0.5), + (6 / 7, 0), + (1, 0), + ], + "percent_open_plot": percent_open_plot, + } + local_tz = gettz(TIME_ZONE) + cls.course_plots_subdict = { + "code": "ESE-120", + "current_add_drop_period": { + "start": cls.current_sem_adp.estimated_start.astimezone(tz=local_tz), + "end": cls.current_sem_adp.estimated_end.astimezone(tz=local_tz), + }, + "average_plots": plots, + "recent_plots": plots, + } + + def setUp(self): + self.client = APIClient() + self.client.force_login(User.objects.create_user(username="test")) + + def test_course(self): + reviews_subdict = { + **average( + self.instructor_quality, + self.enrollment, + self.percent_open, + self.num_updates, + self.filled_in_adv_reg, + ), + **recent( + self.instructor_quality, + self.enrollment, + self.percent_open, + self.num_updates, + self.filled_in_adv_reg, + ), + } + + instructor_ids = ",".join( + [str(id) for id in [Instructor.objects.get().pk]], + ) + self.assertRequestContainsAppx( + "course-plots", + ["ESE-120"], + self.course_plots_subdict, + query_params={ + "instructor_ids": instructor_ids, + }, + ) + + self.assertRequestContainsAppx( + "course-reviews", + "ESE-120", + {**reviews_subdict, "instructors": {Instructor.objects.get().pk: reviews_subdict}}, + ) + + def test_instructor(self): + subdict = { + **average( + self.instructor_quality, + self.enrollment, + self.percent_open, + self.num_updates, + self.filled_in_adv_reg, + ), + **recent( + self.instructor_quality, + self.enrollment, + self.percent_open, + self.num_updates, + self.filled_in_adv_reg, + ), + } + self.assertRequestContainsAppx( + "instructor-reviews", + Instructor.objects.get().pk, + {**subdict, "courses": {"ESE-120": subdict}}, + ) + + def test_department(self): + subdict = { + **average( + self.instructor_quality, + self.enrollment, + self.percent_open, + self.num_updates, + self.filled_in_adv_reg, + ), + **recent( + self.instructor_quality, + self.enrollment, + self.percent_open, + self.num_updates, + self.filled_in_adv_reg, + ), + } + self.assertRequestContainsAppx( + "department-reviews", "ESE", {"courses": {"ESE-120": subdict}} + ) + + def test_history(self): + self.assertRequestContainsAppx( + "course-history", + ["ESE-120", Instructor.objects.get().pk], + { + "sections": [ + rating( + self.instructor_quality, + self.enrollment, + self.percent_open, + self.num_updates, + self.filled_in_adv_reg, + ) + ] + }, + ) + + def test_autocomplete(self): + self.assertRequestContainsAppx( + "review-autocomplete", + [], + { + "instructors": [ + { + "title": self.instructor_name, + "desc": "ESE", + "url": f"/instructor/{Instructor.objects.get().pk}", + } + ], + "courses": [ + { + "title": "ESE-120", + "desc": [""], + "url": "/course/ESE-120", + } + ], + "departments": [{"title": "ESE", "desc": "", "url": "/department/ESE"}], + }, + ) + + +class TwoInstructorsOneSectionTestCase(TestCase, PCRTestMixin): + @classmethod + def setUpTestData(cls): + set_semester() + cls.instructor_1_name = "Instructor One" + cls.instructor_2_name = "Instructor Two" + create_review( + "ESE-120-001", TEST_SEMESTER, cls.instructor_1_name, {"instructor_quality": 3.5} + ) + create_review( + "ESE-120-001", TEST_SEMESTER, cls.instructor_2_name, {"instructor_quality": 3.5} + ) + cls.ESE_120_001_id = Section.objects.get(full_code="ESE-120-001").id + cls.instructor_quality = 3.5 + cls.current_sem_adp = get_or_create_add_drop_period(TEST_CURRENT_SEMESTER) + cls.adp = get_or_create_add_drop_period(TEST_SEMESTER) + start = cls.adp.estimated_start + end = cls.adp.estimated_end + duration = end - start + old_status = "C" + new_status = "O" + percent_open_plot = [(0, 1)] + for date in [start - 3 * duration / 7, start - 2 * duration / 7, start - duration / 7] + [ + start + i * duration / 7 for i in range(1, 7) + ]: + # O[1/7]C[2/7]O[3/7]C[4/7]O[5/7]C[6/7]O + percent_thru = cls.adp.get_percent_through_add_drop(date) + record_update( + Section.objects.get(id=cls.ESE_120_001_id), + TEST_SEMESTER, + old_status, + new_status, + False, + dict(), + created_at=date, + ) + if date >= start: + percent_open_plot.append((percent_thru, int(new_status == "O"))) + old_status, new_status = new_status, old_status + cls.filled_in_adv_reg = 0 + percent_open_plot.append((1, 1)) + to_date = get_to_date_func(cls.adp) + set_registrations( + cls.ESE_120_001_id, + [ + {"created_at": to_date(0.25), "cancelled_at": to_date(0.26), "cancelled": True}, + { + "created_at": to_date(0.5), + "notification_sent_at": to_date(4 / 7), + "notification_sent": True, + }, + {"created_at": to_date(0.75), "deleted_at": to_date(5.9 / 7), "deleted": True}, + ], + ) + cls.percent_open = (duration * 4 / 7).total_seconds() / duration.total_seconds() + + cls.num_updates = 3 + for review in Review.objects.all(): + review.enrollment = 80 + review.save() + sec = get_sec_by_id(cls.ESE_120_001_id) + sec.capacity = 100 + sec.save() + cls.enrollment = 80 + + recompute_precomputed_fields() + recompute_demand_distribution_estimates(semesters=TEST_SEMESTER) + + plots = { + "pca_demand_plot_since_semester": TEST_SEMESTER, + "pca_demand_plot_num_semesters": 1, + "percent_open_plot_since_semester": TEST_SEMESTER, + "percent_open_plot_num_semesters": 1, + "pca_demand_plot": [ + (0, 0), + (0.25, 0.5), + (2 / 7, 0), + (3 / 7, 0.5), + (4 / 7, 0), + (5 / 7, 0.5), + (6 / 7, 0), + (1, 0), + ], + "percent_open_plot": percent_open_plot, + } + local_tz = gettz(TIME_ZONE) + cls.course_plots_subdict = { + "code": "ESE-120", + "current_add_drop_period": { + "start": cls.current_sem_adp.estimated_start.astimezone(tz=local_tz), + "end": cls.current_sem_adp.estimated_end.astimezone(tz=local_tz), + }, + "average_plots": plots, + "recent_plots": plots, + } + empty_plots = { + "pca_demand_plot_since_semester": None, + "pca_demand_plot_num_semesters": 0, + "percent_open_plot_since_semester": None, + "percent_open_plot_num_semesters": 0, + "pca_demand_plot": None, + "percent_open_plot": None, + } + cls.empty_course_plots_subdict = { + "code": "ESE-120", + "current_add_drop_period": { + "start": cls.current_sem_adp.estimated_start.astimezone(tz=local_tz), + "end": cls.current_sem_adp.estimated_end.astimezone(tz=local_tz), + }, + "average_plots": empty_plots, + "recent_plots": empty_plots, + } + + def setUp(self): + self.client = APIClient() + self.client.force_login(User.objects.create_user(username="test")) + + def test_course(self): + reviews_subdict = { + **average( + self.instructor_quality, + self.enrollment, + self.percent_open, + self.num_updates, + self.filled_in_adv_reg, + ), + **recent( + self.instructor_quality, + self.enrollment, + self.percent_open, + self.num_updates, + self.filled_in_adv_reg, + ), + } + self.assertRequestContainsAppx( + "course-reviews", + "ESE-120", + { + **reviews_subdict, + "instructors": { + Instructor.objects.get(name=self.instructor_1_name).pk: reviews_subdict, + Instructor.objects.get(name=self.instructor_2_name).pk: reviews_subdict, + }, + }, + ) + self.assertRequestContainsAppx( + "course-plots", + "ESE-120", + self.course_plots_subdict, + ) + + instructor_ids = ",".join( + [ + str(id) + for id in [ + Instructor.objects.get(name=self.instructor_1_name).pk, + Instructor.objects.get(name=self.instructor_2_name).pk, + ] + ] + ) + self.assertRequestContainsAppx( + "course-plots", + "ESE-120", + self.course_plots_subdict, + query_params={ + "instructor_ids": instructor_ids, + }, + ) + + def test_instructor(self): + subdict = { + **average( + self.instructor_quality, + self.enrollment, + self.percent_open, + self.num_updates, + self.filled_in_adv_reg, + ), + **recent( + self.instructor_quality, + self.enrollment, + self.percent_open, + self.num_updates, + self.filled_in_adv_reg, + ), + } + self.assertRequestContainsAppx( + "instructor-reviews", + Instructor.objects.get(name=self.instructor_1_name).pk, + {**subdict, "courses": {"ESE-120": subdict}}, + ) + self.assertRequestContainsAppx( + "instructor-reviews", + Instructor.objects.get(name=self.instructor_2_name).pk, + {**subdict, "courses": {"ESE-120": subdict}}, + ) + + def test_plots_invalid_instructor_ids(self): + max_instructor_id = max( + Instructor.objects.filter( + name__in=[self.instructor_1_name, self.instructor_2_name] + ).values_list("pk", flat=True) + ) + instructor_ids = ",".join( + [str(id) for id in [max_instructor_id + 1, max_instructor_id + 2]] + ) + self.assertRequestContainsAppx( + "course-plots", + "ESE-120", + self.empty_course_plots_subdict, + query_params={ + "instructor_ids": instructor_ids, + }, + ) + + def test_plots_filter_to_one_instructor(self): + self.assertRequestContainsAppx( + "course-plots", + "ESE-120", + self.course_plots_subdict, + query_params={ + "instructor_ids": str(Instructor.objects.get(name=self.instructor_1_name).id), + }, + ) + self.assertRequestContainsAppx( + "course-plots", + "ESE-120", + self.course_plots_subdict, + query_params={ + "instructor_ids": str(Instructor.objects.get(name=self.instructor_2_name).id), + }, + ) + + def test_department(self): + subdict = { + **average( + self.instructor_quality, + self.enrollment, + self.percent_open, + self.num_updates, + self.filled_in_adv_reg, + ), + **recent( + self.instructor_quality, + self.enrollment, + self.percent_open, + self.num_updates, + self.filled_in_adv_reg, + ), + } + self.assertRequestContainsAppx( + "department-reviews", "ESE", {"courses": {"ESE-120": subdict}} + ) + + def test_history(self): + self.assertRequestContainsAppx( + "course-history", + ["ESE-120", Instructor.objects.get(name=self.instructor_1_name).pk], + { + "sections": [ + rating( + self.instructor_quality, + self.enrollment, + self.percent_open, + self.num_updates, + self.filled_in_adv_reg, + ) + ] + }, + ) + self.assertRequestContainsAppx( + "course-history", + ["ESE-120", Instructor.objects.get(name=self.instructor_2_name).pk], + { + "sections": [ + rating( + self.instructor_quality, + self.enrollment, + self.percent_open, + self.num_updates, + self.filled_in_adv_reg, + ) + ] + }, + ) + + def test_autocomplete(self): + self.assertRequestContainsAppx( + "review-autocomplete", + [], + { + "instructors": [ + { + "title": self.instructor_1_name, + "desc": "ESE", + "url": ( + "/instructor/" + + str(Instructor.objects.get(name=self.instructor_1_name).pk) + ), + }, + { + "title": self.instructor_2_name, + "desc": "ESE", + "url": ( + "/instructor/" + + str(Instructor.objects.get(name=self.instructor_2_name).pk) + ), + }, + ], + "courses": [ + { + "title": "ESE-120", + "desc": [""], + "url": "/course/ESE-120", + } + ], + "departments": [{"title": "ESE", "desc": "", "url": "/department/ESE"}], + }, + ) + + +# TODO: More tests to add: +# 2 sections of same class +# 2 different classes in same semester +# classes that don't qualify +# classes with no registrations +# current / future classes? diff --git a/backend/tests/review/test_topics.py b/backend/tests/review/test_topics.py index bb7826c08..75a306759 100644 --- a/backend/tests/review/test_topics.py +++ b/backend/tests/review/test_topics.py @@ -1,703 +1,703 @@ -from django.contrib.auth.models import User -from django.test import TestCase -from django.urls import reverse -from rest_framework.test import APIClient - -from alert.models import AddDropPeriod -from courses.models import Instructor, Section, Topic -from tests.courses.util import create_mock_data -from tests.review.test_api import ( - PCRTestMixin, - average_and_recent, - create_review, - rating, - set_semester, -) - - -TEST_SEMESTER = "2017C" -assert TEST_SEMESTER > "2012A" - - -class CourseCodeChangedTestCase(TestCase, PCRTestMixin): - def setUp(self): - set_semester() - AddDropPeriod(semester="2012A").save() - self.instructor_name = "Instructor One" - self.client = APIClient() - self.client.force_login(User.objects.create_user(username="test")) - create_review("CIS-471-001", TEST_SEMESTER, self.instructor_name, {"instructor_quality": 4}) - create_review("CIS-371-001", "2012A", self.instructor_name, {"instructor_quality": 2}) - create_review( - "CIS-371-002", - "2007C", - self.instructor_name, - {"instructor_quality": 0}, - responses=0, - ) - create_review( - "CIS-471-001", - "2007C", - "No Responses Instructor", - {"instructor_quality": 0}, - responses=0, - ) - - Section.objects.all().update(activity="LEC") - - topic_371 = Topic.objects.get(most_recent__full_code="CIS-371") - topic_471 = Topic.objects.get(most_recent__full_code="CIS-471") - topic_371.merge_with(topic_471) - - self.extra_course_data = { - "code": "CIS-471", - "historical_codes": [ - {"full_code": "CIS-371", "branched_from": False, "semester": "2012A"} - ], - } - - def test_course(self): - self.assertRequestContainsAppx( - "course-reviews", - "CIS-471", - { - "num_semesters": 3, - **average_and_recent(3, 4), - **self.extra_course_data, - "instructors": { - Instructor.objects.get(name=self.instructor_name).pk: { - **average_and_recent(3, 4), - "latest_semester": TEST_SEMESTER, - }, - }, - }, - ) - - def test_course_old_code(self): - self.assertRequestContainsAppx( - "course-reviews", - "CIS-371", - { - "num_semesters": 3, - **average_and_recent(3, 4), - **self.extra_course_data, - "instructors": { - Instructor.objects.get(name=self.instructor_name).pk: { - **average_and_recent(3, 4), - "latest_semester": TEST_SEMESTER, - }, - }, - }, - ) - - def test_instructor(self): - self.assertRequestContainsAppx( - "instructor-reviews", - Instructor.objects.get(name=self.instructor_name).pk, - { - **average_and_recent(3, 4), - "courses": {"CIS-471": average_and_recent(3, 4)}, - }, - ) - - def test_instructor_no_old_codes(self): - res = self.client.get( - reverse( - "instructor-reviews", args=[Instructor.objects.get(name=self.instructor_name).pk] - ) - ) - self.assertEqual(200, res.status_code) - self.assertFalse("CIS-371" in res.data["courses"]) - - def test_department(self): - self.assertRequestContainsAppx( - "department-reviews", - "CIS", - { - "courses": {"CIS-471": average_and_recent(3, 4)}, - }, - ) - - def test_department_no_old_codes(self): - res = self.client.get(reverse("department-reviews", args=["CIS"])) - self.assertEqual(200, res.status_code) - self.assertFalse("CIS-371" in res.data["courses"]) - - def test_history(self): - self.assertRequestContainsAppx( - "course-history", - ["CIS-371", Instructor.objects.get(name=self.instructor_name).pk], - {"sections": [rating(4), rating(2)]}, - ) - self.assertRequestContainsAppx( - "course-history", - ["CIS-471", Instructor.objects.get(name=self.instructor_name).pk], - { - "sections": [ - {"course_code": "CIS-471", "activity": "Lecture", **rating(4)}, - {"course_code": "CIS-371", "activity": "Lecture", **rating(2)}, - ] - }, - ) - - -class CourseCodeChangedNoReviewTestCase(TestCase, PCRTestMixin): - def setUp(self): - set_semester() - AddDropPeriod(semester="2012A").save() - self.instructor_name = "Instructor One" - self.client = APIClient() - self.client.force_login(User.objects.create_user(username="test")) - - _, section = create_mock_data("CIS-471-001", TEST_SEMESTER) - instructor, _ = Instructor.objects.get_or_create(name=self.instructor_name) - section.instructors.add(instructor) - - create_review("CIS-371-001", "2012A", self.instructor_name, {"instructor_quality": 2}) - create_review( - "CIS-371-002", - "2007C", - self.instructor_name, - {"instructor_quality": 0}, - responses=0, - ) - create_review( - "CIS-471-001", - "2007C", - "No Responses Instructor", - {"instructor_quality": 0}, - responses=0, - ) - Section.objects.all().update(activity="LEC") - topic_371 = Topic.objects.get(most_recent__full_code="CIS-371") - topic_471 = Topic.objects.get(most_recent__full_code="CIS-471") - topic_371.merge_with(topic_471) - - self.extra_course_data = { - "code": "CIS-471", - "historical_codes": [ - {"full_code": "CIS-371", "branched_from": False, "semester": "2012A"} - ], - } - - def test_course(self): - self.assertRequestContainsAppx( - "course-reviews", - "CIS-471", - { - "num_semesters": 3, - **average_and_recent(2, 2), - **self.extra_course_data, - "instructors": { - Instructor.objects.get(name=self.instructor_name).pk: { - **average_and_recent(2, 2), - "latest_semester": TEST_SEMESTER, - }, - }, - }, - ) - - def test_course_old_code(self): - self.assertRequestContainsAppx( - "course-reviews", - "CIS-371", - { - "num_semesters": 3, - **average_and_recent(2, 2), - **self.extra_course_data, - "instructors": { - Instructor.objects.get(name=self.instructor_name).pk: { - **average_and_recent(2, 2), - "latest_semester": TEST_SEMESTER, - }, - }, - }, - ) - - def test_instructor(self): - self.assertRequestContainsAppx( - "instructor-reviews", - Instructor.objects.get(name=self.instructor_name).pk, - { - **average_and_recent(2, 2), - "courses": {"CIS-471": average_and_recent(2, 2)}, - }, - ) - - def test_instructor_no_old_codes(self): - res = self.client.get( - reverse( - "instructor-reviews", args=[Instructor.objects.get(name=self.instructor_name).pk] - ) - ) - self.assertEqual(200, res.status_code) - self.assertFalse("CIS-371" in res.data["courses"]) - - def test_department(self): - self.assertRequestContainsAppx( - "department-reviews", - "CIS", - { - "courses": {"CIS-471": average_and_recent(2, 2)}, - }, - ) - - def test_department_no_old_codes(self): - res = self.client.get(reverse("department-reviews", args=["CIS"])) - self.assertEqual(200, res.status_code) - self.assertFalse("CIS-371" in res.data["courses"]) - - def test_history(self): - self.assertRequestContainsAppx( - "course-history", - ["CIS-371", Instructor.objects.get(name=self.instructor_name).pk], - { - "sections": [ - { - "course_code": "CIS-471", - "activity": "Lecture", - "semester": TEST_SEMESTER, - "forms_returned": None, - "forms_produced": None, - }, - { - "course_code": "CIS-371", - "semester": "2012A", - "activity": "Lecture", - **rating(2), - }, - ] - }, - ) - self.assertRequestContainsAppx( - "course-history", - ["CIS-471", Instructor.objects.get(name=self.instructor_name).pk], - { - "sections": [ - { - "course_code": "CIS-471", - "activity": "Lecture", - "semester": TEST_SEMESTER, - "forms_returned": None, - "forms_produced": None, - }, - { - "course_code": "CIS-371", - "semester": "2012A", - "activity": "Lecture", - **rating(2), - }, - ] - }, - ) - - -class InstructorNoReviewTestCase(TestCase, PCRTestMixin): - def setUp(self): - set_semester() - AddDropPeriod(semester="2012A").save() - self.instructor_name = "Instructor One" - self.client = APIClient() - self.client.force_login(User.objects.create_user(username="test")) - - _, section = create_mock_data("CIS-471-001", TEST_SEMESTER) - instructor, _ = Instructor.objects.get_or_create(name="Instructor Two") - section.instructors.add(instructor) - - create_review("CIS-371-001", "2012A", self.instructor_name, {"instructor_quality": 2}) - create_review( - "CIS-371-002", - "2007C", - self.instructor_name, - {"instructor_quality": 0}, - responses=0, - ) - create_review( - "CIS-471-001", - "2007C", - "No Responses Instructor", - {"instructor_quality": 0}, - responses=0, - ) - Section.objects.all().update(activity="LEC") - topic_371 = Topic.objects.get(most_recent__full_code="CIS-371") - topic_471 = Topic.objects.get(most_recent__full_code="CIS-471") - topic_371.merge_with(topic_471) - - self.extra_course_data = { - "code": "CIS-471", - "historical_codes": [ - {"full_code": "CIS-371", "branched_from": False, "semester": "2012A"} - ], - } - - def test_course(self): - self.assertRequestContainsAppx( - "course-reviews", - "CIS-471", - { - "num_semesters": 3, - **average_and_recent(2, 2), - **self.extra_course_data, - "instructors": { - Instructor.objects.get(name=self.instructor_name).pk: { - **average_and_recent(2, 2), - "latest_semester": "2012A", - }, - Instructor.objects.get(name="Instructor Two").pk: { - "average_reviews": {}, - "recent_reviews": {}, - "latest_semester": TEST_SEMESTER, - }, - }, - }, - ) - - def test_course_old_code(self): - self.assertRequestContainsAppx( - "course-reviews", - "CIS-371", - { - "num_semesters": 3, - **average_and_recent(2, 2), - **self.extra_course_data, - "instructors": { - Instructor.objects.get(name=self.instructor_name).pk: { - **average_and_recent(2, 2), - "latest_semester": "2012A", - }, - Instructor.objects.get(name="Instructor Two").pk: { - "average_reviews": {}, - "recent_reviews": {}, - "latest_semester": TEST_SEMESTER, - }, - }, - }, - ) - - def test_instructor(self): - self.assertRequestContainsAppx( - "instructor-reviews", - Instructor.objects.get(name=self.instructor_name).pk, - { - **average_and_recent(2, 2), - "courses": {"CIS-471": average_and_recent(2, 2)}, - }, - ) - - def test_instructor_no_old_codes(self): - res = self.client.get( - reverse( - "instructor-reviews", args=[Instructor.objects.get(name=self.instructor_name).pk] - ) - ) - self.assertEqual(200, res.status_code) - self.assertFalse("CIS-371" in res.data["courses"]) - - def test_department(self): - self.assertRequestContainsAppx( - "department-reviews", - "CIS", - { - "courses": {"CIS-471": average_and_recent(2, 2)}, - }, - ) - - def test_department_no_old_codes(self): - res = self.client.get(reverse("department-reviews", args=["CIS"])) - self.assertEqual(200, res.status_code) - self.assertFalse("CIS-371" in res.data["courses"]) - - def test_history(self): - self.assertRequestContainsAppx( - "course-history", - ["CIS-371", Instructor.objects.get(name=self.instructor_name).pk], - { - "sections": [ - { - "course_code": "CIS-371", - "semester": "2012A", - "activity": "Lecture", - **rating(2), - }, - ] - }, - ) - self.assertRequestContainsAppx( - "course-history", - ["CIS-471", Instructor.objects.get(name=self.instructor_name).pk], - { - "sections": [ - { - "course_code": "CIS-371", - "semester": "2012A", - "activity": "Lecture", - **rating(2), - }, - ] - }, - ) - self.assertRequestContainsAppx( - "course-history", - ["CIS-371", Instructor.objects.get(name="Instructor Two").pk], - { - "sections": [ - { - "course_code": "CIS-471", - "activity": "Lecture", - "semester": TEST_SEMESTER, - "forms_returned": None, - "forms_produced": None, - }, - ] - }, - ) - self.assertRequestContainsAppx( - "course-history", - ["CIS-471", Instructor.objects.get(name="Instructor Two").pk], - { - "sections": [ - { - "course_code": "CIS-471", - "activity": "Lecture", - "semester": TEST_SEMESTER, - "forms_returned": None, - "forms_produced": None, - }, - ] - }, - ) - - -class CourseCodeChangedTwoInstructorsMultipleSemestersTestCase(TestCase, PCRTestMixin): - def setUp(self): - set_semester() - AddDropPeriod(semester="2017A").save() - AddDropPeriod(semester="2012A").save() - AddDropPeriod(semester="2012C").save() - self.instructor_name = "Instructor One" - self.client = APIClient() - self.client.force_login(User.objects.create_user(username="test")) - create_review("CIS-471-001", TEST_SEMESTER, self.instructor_name, {"instructor_quality": 4}) - create_review("CIS-471-001", "2017A", "Instructor Two", {"instructor_quality": 2}) - - create_review("CIS-371-900", "2012A", self.instructor_name, {"instructor_quality": 2}) - create_review("CIS-371-003", "2012C", "Instructor Two", {"instructor_quality": 1}) - - Section.objects.all().update(activity="LEC") - - self.instructor1 = Instructor.objects.get(name=self.instructor_name) - self.instructor1_pk = self.instructor1.pk - self.instructor2 = Instructor.objects.get(name="Instructor Two") - self.instructor2_pk = self.instructor2.pk - - topic_371 = Topic.objects.get(most_recent__full_code="CIS-371") - topic_471 = Topic.objects.get(most_recent__full_code="CIS-471") - topic_371.merge_with(topic_471) - - self.extra_course_data = { - "code": "CIS-471", - "historical_codes": [ - {"full_code": "CIS-371", "branched_from": False, "semester": "2012C"} - ], - } - - def test_course(self): - self.assertRequestContainsAppx( - "course-reviews", - "CIS-471", - { - "num_semesters": 4, - **average_and_recent(2.25, 4), - **self.extra_course_data, - "instructors": { - self.instructor1_pk: { - **average_and_recent(3, 4), - "latest_semester": TEST_SEMESTER, - }, - self.instructor2_pk: { - **average_and_recent(1.5, 2), - "latest_semester": "2017A", - }, - }, - }, - ) - - def test_course_old_code(self): - self.assertRequestContainsAppx( - "course-reviews", - "CIS-371", - { - "num_semesters": 4, - **average_and_recent(2.25, 4), - **self.extra_course_data, - "instructors": { - self.instructor1_pk: { - **average_and_recent(3, 4), - "latest_semester": TEST_SEMESTER, - }, - self.instructor2_pk: { - **average_and_recent(1.5, 2), - "latest_semester": "2017A", - }, - }, - }, - ) - - def test_instructor(self): - self.assertRequestContainsAppx( - "instructor-reviews", - self.instructor1_pk, - { - **average_and_recent(3, 4), - "courses": { - "CIS-471": { - **average_and_recent(3, 4), - "full_code": "CIS-471", - "code": "CIS-471", - } - }, - }, - ) - self.assertRequestContainsAppx( - "instructor-reviews", - self.instructor2_pk, - { - **average_and_recent(1.5, 2), - "courses": { - "CIS-471": { - **average_and_recent(1.5, 2), - "full_code": "CIS-471", - "code": "CIS-471", - } - }, - }, - ) - - def test_instructor_no_old_codes(self): - res = self.client.get(reverse("instructor-reviews", args=[self.instructor1_pk])) - self.assertEqual(200, res.status_code) - self.assertFalse("CIS-371" in res.data["courses"]) - - def test_department(self): - self.assertRequestContainsAppx( - "department-reviews", - "CIS", - { - "courses": {"CIS-471": average_and_recent(2.25, 4)}, - }, - ) - - def test_department_no_old_codes(self): - res = self.client.get(reverse("department-reviews", args=["CIS"])) - self.assertEqual(200, res.status_code) - self.assertFalse("CIS-371" in res.data["courses"]) - - def test_history(self): - self.assertRequestContainsAppx( - "course-history", - ["CIS-471", self.instructor1_pk], - { - "sections": [ - {"course_code": "CIS-471", "activity": "Lecture", **rating(4)}, - {"course_code": "CIS-371", "activity": "Lecture", **rating(2)}, - ] - }, - ) - self.assertRequestContainsAppx( - "course-history", - ["CIS-471", self.instructor2_pk], - { - "sections": [ - {"course_code": "CIS-471", "activity": "Lecture", **rating(2)}, - {"course_code": "CIS-371", "activity": "Lecture", **rating(1)}, - ] - }, - ) - - -class BranchedFromTestCase(TestCase, PCRTestMixin): - def setUp(self): - set_semester() - AddDropPeriod(semester="2012A").save() - self.instructor_name = "Instructor One" - self.client = APIClient() - self.client.force_login(User.objects.create_user(username="test")) - create_review( - "ARTH-2220-001", TEST_SEMESTER, self.instructor_name, {"instructor_quality": 4} - ) - create_review( - "NELC-2055-001", TEST_SEMESTER, self.instructor_name, {"instructor_quality": 3} - ) - create_review("ARTH-222-001", "2012A", self.instructor_name, {"instructor_quality": 2}) - topic_2220 = Topic.objects.get(most_recent__full_code="ARTH-2220") - topic_2055 = Topic.objects.get(most_recent__full_code="NELC-2055") - topic_222 = Topic.objects.get(most_recent__full_code="ARTH-222") - topic_2220.branched_from = topic_222 - topic_2220.save() - topic_2055.branched_from = topic_222 - topic_2055.save() - - self.extra_course_data_2220 = { - "code": "ARTH-2220", - "historical_codes": [ - {"full_code": "ARTH-222", "branched_from": True, "semester": "2012A"} - ], - } - self.extra_course_data_2055 = { - "code": "NELC-2055", - "historical_codes": [ - {"full_code": "ARTH-222", "branched_from": True, "semester": "2012A"} - ], - } - self.extra_course_data_222 = { - "code": "ARTH-222", - "historical_codes": [], - } - - def test_course(self): - self.assertRequestContainsAppx( - "course-reviews", - "ARTH-2220", - { - "num_semesters": 1, - **average_and_recent(4, 4), - **self.extra_course_data_2220, - "instructors": { - Instructor.objects.get(name=self.instructor_name).pk: { - **average_and_recent(4, 4), - "latest_semester": TEST_SEMESTER, - }, - }, - }, - ) - self.assertRequestContainsAppx( - "course-reviews", - "NELC-2055", - { - "num_semesters": 1, - **average_and_recent(3, 3), - **self.extra_course_data_2055, - "instructors": { - Instructor.objects.get(name=self.instructor_name).pk: { - **average_and_recent(3, 3), - "latest_semester": TEST_SEMESTER, - }, - }, - }, - ) - self.assertRequestContainsAppx( - "course-reviews", - "ARTH-222", - { - "num_semesters": 1, - **average_and_recent(2, 2), - **self.extra_course_data_222, - "instructors": { - Instructor.objects.get(name=self.instructor_name).pk: { - **average_and_recent(2, 2), - "latest_semester": "2012A", - }, - }, - }, - ) +from django.contrib.auth.models import User +from django.test import TestCase +from django.urls import reverse +from rest_framework.test import APIClient + +from alert.models import AddDropPeriod +from courses.models import Instructor, Section, Topic +from tests.courses.util import create_mock_data +from tests.review.test_api import ( + PCRTestMixin, + average_and_recent, + create_review, + rating, + set_semester, +) + + +TEST_SEMESTER = "2017C" +assert TEST_SEMESTER > "2012A" + + +class CourseCodeChangedTestCase(TestCase, PCRTestMixin): + def setUp(self): + set_semester() + AddDropPeriod(semester="2012A").save() + self.instructor_name = "Instructor One" + self.client = APIClient() + self.client.force_login(User.objects.create_user(username="test")) + create_review("CIS-471-001", TEST_SEMESTER, self.instructor_name, {"instructor_quality": 4}) + create_review("CIS-371-001", "2012A", self.instructor_name, {"instructor_quality": 2}) + create_review( + "CIS-371-002", + "2007C", + self.instructor_name, + {"instructor_quality": 0}, + responses=0, + ) + create_review( + "CIS-471-001", + "2007C", + "No Responses Instructor", + {"instructor_quality": 0}, + responses=0, + ) + + Section.objects.all().update(activity="LEC") + + topic_371 = Topic.objects.get(most_recent__full_code="CIS-371") + topic_471 = Topic.objects.get(most_recent__full_code="CIS-471") + topic_371.merge_with(topic_471) + + self.extra_course_data = { + "code": "CIS-471", + "historical_codes": [ + {"full_code": "CIS-371", "branched_from": False, "semester": "2012A"} + ], + } + + def test_course(self): + self.assertRequestContainsAppx( + "course-reviews", + "CIS-471", + { + "num_semesters": 3, + **average_and_recent(3, 4), + **self.extra_course_data, + "instructors": { + Instructor.objects.get(name=self.instructor_name).pk: { + **average_and_recent(3, 4), + "latest_semester": TEST_SEMESTER, + }, + }, + }, + ) + + def test_course_old_code(self): + self.assertRequestContainsAppx( + "course-reviews", + "CIS-371", + { + "num_semesters": 3, + **average_and_recent(3, 4), + **self.extra_course_data, + "instructors": { + Instructor.objects.get(name=self.instructor_name).pk: { + **average_and_recent(3, 4), + "latest_semester": TEST_SEMESTER, + }, + }, + }, + ) + + def test_instructor(self): + self.assertRequestContainsAppx( + "instructor-reviews", + Instructor.objects.get(name=self.instructor_name).pk, + { + **average_and_recent(3, 4), + "courses": {"CIS-471": average_and_recent(3, 4)}, + }, + ) + + def test_instructor_no_old_codes(self): + res = self.client.get( + reverse( + "instructor-reviews", args=[Instructor.objects.get(name=self.instructor_name).pk] + ) + ) + self.assertEqual(200, res.status_code) + self.assertFalse("CIS-371" in res.data["courses"]) + + def test_department(self): + self.assertRequestContainsAppx( + "department-reviews", + "CIS", + { + "courses": {"CIS-471": average_and_recent(3, 4)}, + }, + ) + + def test_department_no_old_codes(self): + res = self.client.get(reverse("department-reviews", args=["CIS"])) + self.assertEqual(200, res.status_code) + self.assertFalse("CIS-371" in res.data["courses"]) + + def test_history(self): + self.assertRequestContainsAppx( + "course-history", + ["CIS-371", Instructor.objects.get(name=self.instructor_name).pk], + {"sections": [rating(4), rating(2)]}, + ) + self.assertRequestContainsAppx( + "course-history", + ["CIS-471", Instructor.objects.get(name=self.instructor_name).pk], + { + "sections": [ + {"course_code": "CIS-471", "activity": "Lecture", **rating(4)}, + {"course_code": "CIS-371", "activity": "Lecture", **rating(2)}, + ] + }, + ) + + +class CourseCodeChangedNoReviewTestCase(TestCase, PCRTestMixin): + def setUp(self): + set_semester() + AddDropPeriod(semester="2012A").save() + self.instructor_name = "Instructor One" + self.client = APIClient() + self.client.force_login(User.objects.create_user(username="test")) + + _, section = create_mock_data("CIS-471-001", TEST_SEMESTER) + instructor, _ = Instructor.objects.get_or_create(name=self.instructor_name) + section.instructors.add(instructor) + + create_review("CIS-371-001", "2012A", self.instructor_name, {"instructor_quality": 2}) + create_review( + "CIS-371-002", + "2007C", + self.instructor_name, + {"instructor_quality": 0}, + responses=0, + ) + create_review( + "CIS-471-001", + "2007C", + "No Responses Instructor", + {"instructor_quality": 0}, + responses=0, + ) + Section.objects.all().update(activity="LEC") + topic_371 = Topic.objects.get(most_recent__full_code="CIS-371") + topic_471 = Topic.objects.get(most_recent__full_code="CIS-471") + topic_371.merge_with(topic_471) + + self.extra_course_data = { + "code": "CIS-471", + "historical_codes": [ + {"full_code": "CIS-371", "branched_from": False, "semester": "2012A"} + ], + } + + def test_course(self): + self.assertRequestContainsAppx( + "course-reviews", + "CIS-471", + { + "num_semesters": 3, + **average_and_recent(2, 2), + **self.extra_course_data, + "instructors": { + Instructor.objects.get(name=self.instructor_name).pk: { + **average_and_recent(2, 2), + "latest_semester": TEST_SEMESTER, + }, + }, + }, + ) + + def test_course_old_code(self): + self.assertRequestContainsAppx( + "course-reviews", + "CIS-371", + { + "num_semesters": 3, + **average_and_recent(2, 2), + **self.extra_course_data, + "instructors": { + Instructor.objects.get(name=self.instructor_name).pk: { + **average_and_recent(2, 2), + "latest_semester": TEST_SEMESTER, + }, + }, + }, + ) + + def test_instructor(self): + self.assertRequestContainsAppx( + "instructor-reviews", + Instructor.objects.get(name=self.instructor_name).pk, + { + **average_and_recent(2, 2), + "courses": {"CIS-471": average_and_recent(2, 2)}, + }, + ) + + def test_instructor_no_old_codes(self): + res = self.client.get( + reverse( + "instructor-reviews", args=[Instructor.objects.get(name=self.instructor_name).pk] + ) + ) + self.assertEqual(200, res.status_code) + self.assertFalse("CIS-371" in res.data["courses"]) + + def test_department(self): + self.assertRequestContainsAppx( + "department-reviews", + "CIS", + { + "courses": {"CIS-471": average_and_recent(2, 2)}, + }, + ) + + def test_department_no_old_codes(self): + res = self.client.get(reverse("department-reviews", args=["CIS"])) + self.assertEqual(200, res.status_code) + self.assertFalse("CIS-371" in res.data["courses"]) + + def test_history(self): + self.assertRequestContainsAppx( + "course-history", + ["CIS-371", Instructor.objects.get(name=self.instructor_name).pk], + { + "sections": [ + { + "course_code": "CIS-471", + "activity": "Lecture", + "semester": TEST_SEMESTER, + "forms_returned": None, + "forms_produced": None, + }, + { + "course_code": "CIS-371", + "semester": "2012A", + "activity": "Lecture", + **rating(2), + }, + ] + }, + ) + self.assertRequestContainsAppx( + "course-history", + ["CIS-471", Instructor.objects.get(name=self.instructor_name).pk], + { + "sections": [ + { + "course_code": "CIS-471", + "activity": "Lecture", + "semester": TEST_SEMESTER, + "forms_returned": None, + "forms_produced": None, + }, + { + "course_code": "CIS-371", + "semester": "2012A", + "activity": "Lecture", + **rating(2), + }, + ] + }, + ) + + +class InstructorNoReviewTestCase(TestCase, PCRTestMixin): + def setUp(self): + set_semester() + AddDropPeriod(semester="2012A").save() + self.instructor_name = "Instructor One" + self.client = APIClient() + self.client.force_login(User.objects.create_user(username="test")) + + _, section = create_mock_data("CIS-471-001", TEST_SEMESTER) + instructor, _ = Instructor.objects.get_or_create(name="Instructor Two") + section.instructors.add(instructor) + + create_review("CIS-371-001", "2012A", self.instructor_name, {"instructor_quality": 2}) + create_review( + "CIS-371-002", + "2007C", + self.instructor_name, + {"instructor_quality": 0}, + responses=0, + ) + create_review( + "CIS-471-001", + "2007C", + "No Responses Instructor", + {"instructor_quality": 0}, + responses=0, + ) + Section.objects.all().update(activity="LEC") + topic_371 = Topic.objects.get(most_recent__full_code="CIS-371") + topic_471 = Topic.objects.get(most_recent__full_code="CIS-471") + topic_371.merge_with(topic_471) + + self.extra_course_data = { + "code": "CIS-471", + "historical_codes": [ + {"full_code": "CIS-371", "branched_from": False, "semester": "2012A"} + ], + } + + def test_course(self): + self.assertRequestContainsAppx( + "course-reviews", + "CIS-471", + { + "num_semesters": 3, + **average_and_recent(2, 2), + **self.extra_course_data, + "instructors": { + Instructor.objects.get(name=self.instructor_name).pk: { + **average_and_recent(2, 2), + "latest_semester": "2012A", + }, + Instructor.objects.get(name="Instructor Two").pk: { + "average_reviews": {}, + "recent_reviews": {}, + "latest_semester": TEST_SEMESTER, + }, + }, + }, + ) + + def test_course_old_code(self): + self.assertRequestContainsAppx( + "course-reviews", + "CIS-371", + { + "num_semesters": 3, + **average_and_recent(2, 2), + **self.extra_course_data, + "instructors": { + Instructor.objects.get(name=self.instructor_name).pk: { + **average_and_recent(2, 2), + "latest_semester": "2012A", + }, + Instructor.objects.get(name="Instructor Two").pk: { + "average_reviews": {}, + "recent_reviews": {}, + "latest_semester": TEST_SEMESTER, + }, + }, + }, + ) + + def test_instructor(self): + self.assertRequestContainsAppx( + "instructor-reviews", + Instructor.objects.get(name=self.instructor_name).pk, + { + **average_and_recent(2, 2), + "courses": {"CIS-471": average_and_recent(2, 2)}, + }, + ) + + def test_instructor_no_old_codes(self): + res = self.client.get( + reverse( + "instructor-reviews", args=[Instructor.objects.get(name=self.instructor_name).pk] + ) + ) + self.assertEqual(200, res.status_code) + self.assertFalse("CIS-371" in res.data["courses"]) + + def test_department(self): + self.assertRequestContainsAppx( + "department-reviews", + "CIS", + { + "courses": {"CIS-471": average_and_recent(2, 2)}, + }, + ) + + def test_department_no_old_codes(self): + res = self.client.get(reverse("department-reviews", args=["CIS"])) + self.assertEqual(200, res.status_code) + self.assertFalse("CIS-371" in res.data["courses"]) + + def test_history(self): + self.assertRequestContainsAppx( + "course-history", + ["CIS-371", Instructor.objects.get(name=self.instructor_name).pk], + { + "sections": [ + { + "course_code": "CIS-371", + "semester": "2012A", + "activity": "Lecture", + **rating(2), + }, + ] + }, + ) + self.assertRequestContainsAppx( + "course-history", + ["CIS-471", Instructor.objects.get(name=self.instructor_name).pk], + { + "sections": [ + { + "course_code": "CIS-371", + "semester": "2012A", + "activity": "Lecture", + **rating(2), + }, + ] + }, + ) + self.assertRequestContainsAppx( + "course-history", + ["CIS-371", Instructor.objects.get(name="Instructor Two").pk], + { + "sections": [ + { + "course_code": "CIS-471", + "activity": "Lecture", + "semester": TEST_SEMESTER, + "forms_returned": None, + "forms_produced": None, + }, + ] + }, + ) + self.assertRequestContainsAppx( + "course-history", + ["CIS-471", Instructor.objects.get(name="Instructor Two").pk], + { + "sections": [ + { + "course_code": "CIS-471", + "activity": "Lecture", + "semester": TEST_SEMESTER, + "forms_returned": None, + "forms_produced": None, + }, + ] + }, + ) + + +class CourseCodeChangedTwoInstructorsMultipleSemestersTestCase(TestCase, PCRTestMixin): + def setUp(self): + set_semester() + AddDropPeriod(semester="2017A").save() + AddDropPeriod(semester="2012A").save() + AddDropPeriod(semester="2012C").save() + self.instructor_name = "Instructor One" + self.client = APIClient() + self.client.force_login(User.objects.create_user(username="test")) + create_review("CIS-471-001", TEST_SEMESTER, self.instructor_name, {"instructor_quality": 4}) + create_review("CIS-471-001", "2017A", "Instructor Two", {"instructor_quality": 2}) + + create_review("CIS-371-900", "2012A", self.instructor_name, {"instructor_quality": 2}) + create_review("CIS-371-003", "2012C", "Instructor Two", {"instructor_quality": 1}) + + Section.objects.all().update(activity="LEC") + + self.instructor1 = Instructor.objects.get(name=self.instructor_name) + self.instructor1_pk = self.instructor1.pk + self.instructor2 = Instructor.objects.get(name="Instructor Two") + self.instructor2_pk = self.instructor2.pk + + topic_371 = Topic.objects.get(most_recent__full_code="CIS-371") + topic_471 = Topic.objects.get(most_recent__full_code="CIS-471") + topic_371.merge_with(topic_471) + + self.extra_course_data = { + "code": "CIS-471", + "historical_codes": [ + {"full_code": "CIS-371", "branched_from": False, "semester": "2012C"} + ], + } + + def test_course(self): + self.assertRequestContainsAppx( + "course-reviews", + "CIS-471", + { + "num_semesters": 4, + **average_and_recent(2.25, 4), + **self.extra_course_data, + "instructors": { + self.instructor1_pk: { + **average_and_recent(3, 4), + "latest_semester": TEST_SEMESTER, + }, + self.instructor2_pk: { + **average_and_recent(1.5, 2), + "latest_semester": "2017A", + }, + }, + }, + ) + + def test_course_old_code(self): + self.assertRequestContainsAppx( + "course-reviews", + "CIS-371", + { + "num_semesters": 4, + **average_and_recent(2.25, 4), + **self.extra_course_data, + "instructors": { + self.instructor1_pk: { + **average_and_recent(3, 4), + "latest_semester": TEST_SEMESTER, + }, + self.instructor2_pk: { + **average_and_recent(1.5, 2), + "latest_semester": "2017A", + }, + }, + }, + ) + + def test_instructor(self): + self.assertRequestContainsAppx( + "instructor-reviews", + self.instructor1_pk, + { + **average_and_recent(3, 4), + "courses": { + "CIS-471": { + **average_and_recent(3, 4), + "full_code": "CIS-471", + "code": "CIS-471", + } + }, + }, + ) + self.assertRequestContainsAppx( + "instructor-reviews", + self.instructor2_pk, + { + **average_and_recent(1.5, 2), + "courses": { + "CIS-471": { + **average_and_recent(1.5, 2), + "full_code": "CIS-471", + "code": "CIS-471", + } + }, + }, + ) + + def test_instructor_no_old_codes(self): + res = self.client.get(reverse("instructor-reviews", args=[self.instructor1_pk])) + self.assertEqual(200, res.status_code) + self.assertFalse("CIS-371" in res.data["courses"]) + + def test_department(self): + self.assertRequestContainsAppx( + "department-reviews", + "CIS", + { + "courses": {"CIS-471": average_and_recent(2.25, 4)}, + }, + ) + + def test_department_no_old_codes(self): + res = self.client.get(reverse("department-reviews", args=["CIS"])) + self.assertEqual(200, res.status_code) + self.assertFalse("CIS-371" in res.data["courses"]) + + def test_history(self): + self.assertRequestContainsAppx( + "course-history", + ["CIS-471", self.instructor1_pk], + { + "sections": [ + {"course_code": "CIS-471", "activity": "Lecture", **rating(4)}, + {"course_code": "CIS-371", "activity": "Lecture", **rating(2)}, + ] + }, + ) + self.assertRequestContainsAppx( + "course-history", + ["CIS-471", self.instructor2_pk], + { + "sections": [ + {"course_code": "CIS-471", "activity": "Lecture", **rating(2)}, + {"course_code": "CIS-371", "activity": "Lecture", **rating(1)}, + ] + }, + ) + + +class BranchedFromTestCase(TestCase, PCRTestMixin): + def setUp(self): + set_semester() + AddDropPeriod(semester="2012A").save() + self.instructor_name = "Instructor One" + self.client = APIClient() + self.client.force_login(User.objects.create_user(username="test")) + create_review( + "ARTH-2220-001", TEST_SEMESTER, self.instructor_name, {"instructor_quality": 4} + ) + create_review( + "NELC-2055-001", TEST_SEMESTER, self.instructor_name, {"instructor_quality": 3} + ) + create_review("ARTH-222-001", "2012A", self.instructor_name, {"instructor_quality": 2}) + topic_2220 = Topic.objects.get(most_recent__full_code="ARTH-2220") + topic_2055 = Topic.objects.get(most_recent__full_code="NELC-2055") + topic_222 = Topic.objects.get(most_recent__full_code="ARTH-222") + topic_2220.branched_from = topic_222 + topic_2220.save() + topic_2055.branched_from = topic_222 + topic_2055.save() + + self.extra_course_data_2220 = { + "code": "ARTH-2220", + "historical_codes": [ + {"full_code": "ARTH-222", "branched_from": True, "semester": "2012A"} + ], + } + self.extra_course_data_2055 = { + "code": "NELC-2055", + "historical_codes": [ + {"full_code": "ARTH-222", "branched_from": True, "semester": "2012A"} + ], + } + self.extra_course_data_222 = { + "code": "ARTH-222", + "historical_codes": [], + } + + def test_course(self): + self.assertRequestContainsAppx( + "course-reviews", + "ARTH-2220", + { + "num_semesters": 1, + **average_and_recent(4, 4), + **self.extra_course_data_2220, + "instructors": { + Instructor.objects.get(name=self.instructor_name).pk: { + **average_and_recent(4, 4), + "latest_semester": TEST_SEMESTER, + }, + }, + }, + ) + self.assertRequestContainsAppx( + "course-reviews", + "NELC-2055", + { + "num_semesters": 1, + **average_and_recent(3, 3), + **self.extra_course_data_2055, + "instructors": { + Instructor.objects.get(name=self.instructor_name).pk: { + **average_and_recent(3, 3), + "latest_semester": TEST_SEMESTER, + }, + }, + }, + ) + self.assertRequestContainsAppx( + "course-reviews", + "ARTH-222", + { + "num_semesters": 1, + **average_and_recent(2, 2), + **self.extra_course_data_222, + "instructors": { + Instructor.objects.get(name=self.instructor_name).pk: { + **average_and_recent(2, 2), + "latest_semester": "2012A", + }, + }, + }, + )