Skip to content

Commit

Permalink
public ui: better document type facet
Browse files Browse the repository at this point in the history
* The functionality of the document main, sub type facet for documents with multiple types was not clear.
Now we display only the sub types accossiated with the main type in the facet.
* closes #1697
* Adds memoized decorator.
  (https://flask-caching.readthedocs.io/en/latest/api.html#flask_caching.Cache.memoize)

Co-Authored-by: Peter Weber <peter.weber@rero.ch>
  • Loading branch information
rerowep committed Mar 9, 2021
1 parent 03825cb commit 5ccf99b
Show file tree
Hide file tree
Showing 7 changed files with 104 additions and 5 deletions.
9 changes: 8 additions & 1 deletion rero_ils/modules/documents/serializers.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@
from invenio_records_rest.serializers.response import record_responsify, \
search_responsify

from .utils import create_contributions
from .utils import create_contributions, filter_document_type_buckets
from ..documents.api import Document
from ..documents.utils import title_format_text_head
from ..documents.views import create_title_alternate_graphic, \
Expand Down Expand Up @@ -133,6 +133,13 @@ def post_process_serialize_search(self, results, pid_fetcher):
results['aggregations']['library'] = lib_agg
del results['aggregations']['organisation']

# Correct document type buckets
new_type_buckets = []
type_buckets = results['aggregations']['document_type']['buckets']
results['aggregations']['document_type']['buckets'] = \
filter_document_type_buckets(type_buckets)


return super(
DocumentJSONSerializer, self).post_process_serialize_search(
results, pid_fetcher)
Expand Down
44 changes: 44 additions & 0 deletions rero_ils/modules/documents/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,10 +26,54 @@
from elasticsearch_dsl.utils import AttrDict
from flask import current_app
from flask import request as flask_request
from invenio_jsonschemas.proxies import current_jsonschemas
from werkzeug.local import LocalProxy

from .dojson.contrib.marc21tojson.model import remove_trailing_punctuation
from ..utils import get_schema_for_resource, memoized
from ...utils import get_i18n_supported_languages

_records_state = LocalProxy(lambda: current_app.extensions['invenio-records'])


@memoized(timeout=3600)
def get_document_types_from_schema(schema='doc'):
"""Create document type definition from schema."""
path = current_jsonschemas.url_to_path(get_schema_for_resource(schema))
schema = current_jsonschemas.get_schema(path=path)
schema = _records_state.replace_refs(schema)
schema_types = schema.get(
'properties', {}).get('type', {}).get('items', {}).get('oneOf', [])
doc_types = {}
for schema_type in schema_types:
doc_types[schema_type['title']] = {}
sub_types = schema_type.get(
'properties', {}).get('subtype', {}).get('enum', [])
for sub_type in sub_types:
doc_types[schema_type['title']][sub_type] = True
return doc_types


def filter_document_type_buckets(buckets):
"""Removes unwanted sub types from buckets."""
doc_types = get_document_types_from_schema()
new_type_buckets = buckets
if doc_types:
new_type_buckets = []
for type_bucket in buckets:
new_type_bucket = type_bucket
main_type = type_bucket['key']
new_subtype_buckets = []
subtype_buckets = type_bucket['document_subtype']['buckets']
for subtype_bucket in subtype_buckets:
if doc_types.get(main_type, {}).get(subtype_bucket['key']):
new_subtype_buckets.append(subtype_bucket)
new_type_bucket[
'document_subtype'
]['buckets'] = new_subtype_buckets
new_type_buckets.append(new_type_bucket)
return new_type_buckets


def clean_text(data):
"""Delete all _text from data."""
Expand Down
2 changes: 1 addition & 1 deletion rero_ils/modules/documents/views.py
Original file line number Diff line number Diff line change
Expand Up @@ -562,7 +562,7 @@ def get_articles(record):
for hit in search.scan():
articles.append({
'title': title_format_text_head(hit.title),
'pid':hit.pid
'pid': hit.pid
})
return articles

Expand Down
2 changes: 1 addition & 1 deletion rero_ils/modules/ext.py
Original file line number Diff line number Diff line change
Expand Up @@ -182,7 +182,7 @@ def register_signals(self, app):

after_record_insert.connect(create_subscription_patron_transaction)
after_record_update.connect(create_subscription_patron_transaction)

after_record_insert.connect(operation_log_record_create)
after_record_update.connect(operation_log_record_update)

Expand Down
2 changes: 2 additions & 0 deletions rero_ils/modules/imports/serializers.py
Original file line number Diff line number Diff line change
Expand Up @@ -89,6 +89,8 @@ def serialize_search(self, pid_fetcher, search_result, links=None,
),
aggregations=search_result.get('aggregations', dict()),
)
# TODO: If we have multiple types for a document we have to Correct
# the document type buckets here.
return json.dumps(results, **self._format_args())

def post_process(self, metadata):
Expand Down
13 changes: 12 additions & 1 deletion rero_ils/modules/items/serializers/json.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,8 @@
"""Item serializers."""

from rero_ils.modules.documents.api import search_document_by_pid
from rero_ils.modules.documents.utils import title_format_text_head
from rero_ils.modules.documents.utils import filter_document_type_buckets, \
title_format_text_head
from rero_ils.modules.item_types.api import ItemType
from rero_ils.modules.items.api import Item
from rero_ils.modules.items.models import ItemStatus
Expand Down Expand Up @@ -110,4 +111,14 @@ def post_process_serialize_search(self, results, pid_fetcher):
vendor = Vendor.get_record_by_pid(vendor_term.get('key'))
vendor_term['name'] = vendor.get('name')

# Correct document type buckets
buckets = results['aggregations']['document_type']['buckets']
results['aggregations']['document_type']['buckets'] = \
filter_document_type_buckets(buckets)

return super().post_process_serialize_search(results, pid_fetcher)

# Correct document type buckets
buckets = results['aggregations']['document_type']['buckets']
results['aggregations']['document_type']['buckets'] = \
filter_document_type_buckets(buckets)
37 changes: 36 additions & 1 deletion rero_ils/modules/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,24 @@


def cached(timeout=50, key_prefix='default', query_string=False):
"""Cache traffic."""
"""Cache traffic.
Decorator. Use this to cache a function. By default the cache key is
view/request.path. You are able to use this decorator with any function
by changing the key_prefix. If the token %s is located within the
key_prefix then it will replace that with request.path
:param timeout: Default 50. If set to an integer, will cache
for that amount of time. Unit of time is in seconds.
:param key_prefix: Default ‘default’. Beginning key to . use for the
cache key. request.path will be the actual request path, or in cases
where the make_cache_key-function is called from other views it will be the
expected URL for the view as generated by Flask’s url_for().
:param query_string: Default False. When True, the cache key used will
be the result of hashing the ordered query string parameters. This avoids
creating different caches for the same query just because the parameters
were passed in a different order.
"""
def caching(f):
@wraps(f)
def wrapper(*args, **kwargs):
Expand All @@ -50,6 +67,24 @@ def wrapper(*args, **kwargs):
return caching


def memoized(timeout=50):
"""Memoize functions.
Use this to cache the result of a function, taking its arguments into
account in the cache key.
:param timeout: Default 50. If set to an integer, will cache for that
amount of time. Unit of time is in seconds.
"""
def memoize(f):
@wraps(f)
def wrapper(*args, **kwargs):
memoize_fun = current_cache.memoize(timeout=timeout)
return memoize_fun(f)(*args, **kwargs)
return wrapper
return memoize


def strtotime(strtime):
"""String to datetime."""
splittime = strtime.split(':')
Expand Down

0 comments on commit 5ccf99b

Please sign in to comment.