Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 7 additions & 3 deletions src/app.py
Original file line number Diff line number Diff line change
Expand Up @@ -277,9 +277,13 @@ def loganswer(questiontype):

isCorrect = data['correct']

misconceptions = ast.literal_eval(data['misconceptions'])
question_text = data['question_text']
question_options = json.dumps(data['question_options'])
misconceptions = ast.literal_eval(data['misconceptions'])
question_text = data['question_text']
question_frame = data.get('question_frame', '')
question_options_payload = {"options": data['question_options']}
if question_frame:
question_options_payload["question_frame"] = question_frame
question_options = json.dumps(question_options_payload)

userId = getUserName()
courseId = getUserCourse(userId)
Expand Down
22 changes: 15 additions & 7 deletions src/exercisebuilder.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
import re
import math
import ltltoeng
import ltlgloss
from syntacticmutator import applyRandomMutationNotEquivalentTo


Expand Down Expand Up @@ -289,14 +290,19 @@ def formula_choice_metric(formula):
def gen_nl_question(self, formula):

as_node = ltlnode.parse_ltl_string(formula)
formula_eng = as_node.__to_english__()
if formula_eng is None or formula_eng == "":
gloss_result = ltlgloss.gloss_formula(as_node)

if gloss_result.best_gloss is None or gloss_result.best_gloss == "":
return None
formula_eng_corrected = ltltoeng.correct_grammar(formula_eng)

formula_eng_corrected = ltltoeng.correct_grammar(gloss_result.best_gloss)
### If there are multiple '.' in a row, replace with a single '.'
formula_eng_corrected = re.sub(r'\.{2,}', '.', formula_eng)
return formula_eng_corrected
formula_eng_corrected = re.sub(r'\.{2,}', '.', formula_eng_corrected)
return {
"text": formula_eng_corrected,
"frame": gloss_result.frame.value,
"candidates": {frame.value: gloss for frame, gloss in gloss_result.frame_glosses.items()},
}


def get_options_with_misconceptions_as_formula(self, answer):
Expand Down Expand Up @@ -357,7 +363,9 @@ def build_english_to_ltl_question(self, answer):
return None

return {
"question": question,
"question": question["text"] if isinstance(question, dict) else question,
"gloss_frame": question.get("frame", "") if isinstance(question, dict) else "",
"gloss_candidates": question.get("candidates", {}) if isinstance(question, dict) else {},
"type": self.ENGLISHTOLTL,
"options": options
}
Expand Down
168 changes: 168 additions & 0 deletions src/ltlgloss.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,168 @@
"""Utilities for producing cognitively-framed glosses of LTL formulae.

This module takes an LTL AST node and produces three English glosses that
highlight different cognitive frames:

1. Abstract / Logical (AL): operator-forward phrasing.
2. Deontic / Obligation (DO): emphasises guarantees the system must provide.
3. Narrative / Story (NS): event-focused narrative phrasing.

It also scores the glosses with a lightweight n-gram heuristic so callers can
pick the best-fitting gloss while keeping track of which frame was selected.
"""
from __future__ import annotations

import re
from dataclasses import dataclass
from enum import Enum
from typing import Dict, Iterable

import ltlnode
import ltltoeng


class CognitiveFrame(Enum):
ABSTRACT_LOGICAL = "AL"
DEONTIC_OBLIGATION = "DO"
NARRATIVE_STORY = "NS"


@dataclass
class GlossResult:
"""Result container for gloss generation."""

best_gloss: str
frame: CognitiveFrame
frame_glosses: Dict[CognitiveFrame, str]


_FRAME_NGRAMS: Dict[CognitiveFrame, Iterable[str]] = {
CognitiveFrame.ABSTRACT_LOGICAL: ("if", "implies", "always", "eventually", "until"),
CognitiveFrame.DEONTIC_OBLIGATION: ("must", "should", "guarantee", "ensure", "oblig"),
CognitiveFrame.NARRATIVE_STORY: ("over time", "story", "unfold", "event", "happen"),
}


def _normalize_sentence(text: str) -> str:
"""Clean a sentence and ensure it ends with punctuation."""

cleaned = ltltoeng.correct_grammar(text.strip())
cleaned = re.sub(r"\s+", " ", cleaned)
cleaned = re.sub(r"\s+([,.;:!?])", r"\1", cleaned)
cleaned = re.sub(r"\(\s+", "(", cleaned)
cleaned = re.sub(r"\s+\)", ")", cleaned)

if cleaned and cleaned[0].isalpha():
cleaned = cleaned[0].upper() + cleaned[1:]

if cleaned and cleaned[-1] not in {".", "!", "?"}:
cleaned += "."

return cleaned


def _normalize_base_gloss(node: ltlnode.LTLNode) -> str:
"""Obtain a base English gloss for an LTL node."""

base = node.__to_english__()
if base is None:
return ""

return _normalize_sentence(base)


def _gloss_do(node: ltlnode.LTLNode) -> str:
"""Recursively build a Deontic/Obligation gloss."""

if isinstance(node, ltlnode.LiteralNode):
return f"{node.value} holds in the current state"
if isinstance(node, ltlnode.NotNode):
return f"the system must ensure that not ({_gloss_do(node.operand)})"
if isinstance(node, ltlnode.AndNode):
return f"the system must satisfy both ({_gloss_do(node.left)}) and ({_gloss_do(node.right)})"
if isinstance(node, ltlnode.OrNode):
return f"the system must satisfy at least one of ({_gloss_do(node.left)}) or ({_gloss_do(node.right)})"
if isinstance(node, ltlnode.ImpliesNode):
return f"whenever ({_gloss_do(node.left)}) occurs, the system is obligated to ensure ({_gloss_do(node.right)})"
if isinstance(node, ltlnode.GloballyNode):
return f"the system must ensure at all times that ({_gloss_do(node.operand)}) holds"
if isinstance(node, ltlnode.FinallyNode):
return f"the system must eventually bring about ({_gloss_do(node.operand)})"
if isinstance(node, ltlnode.NextNode):
return f"on the next step, the system must enforce ({_gloss_do(node.operand)})"
if isinstance(node, ltlnode.UntilNode):
return f"the system must keep ({_gloss_do(node.left)}) true until it achieves ({_gloss_do(node.right)})"

fallback = _normalize_base_gloss(node)
return fallback.rstrip(".") if fallback else ""


def _gloss_ns(node: ltlnode.LTLNode) -> str:
"""Recursively build a Narrative/Event gloss."""

if isinstance(node, ltlnode.LiteralNode):
return f"{node.value} happens"
if isinstance(node, ltlnode.NotNode):
return f"it never happens that ({_gloss_ns(node.operand)})"
if isinstance(node, ltlnode.AndNode):
return f"({_gloss_ns(node.left)}) happens while ({_gloss_ns(node.right)}) also happens"
if isinstance(node, ltlnode.OrNode):
return f"either ({_gloss_ns(node.left)}) happens or ({_gloss_ns(node.right)}) happens"
if isinstance(node, ltlnode.ImpliesNode):
return f"whenever ({_gloss_ns(node.left)}) ever happens, ({_gloss_ns(node.right)}) must also happen"
if isinstance(node, ltlnode.GloballyNode):
return f"from now on, ({_gloss_ns(node.operand)}) always holds"
if isinstance(node, ltlnode.FinallyNode):
return f"sooner or later, ({_gloss_ns(node.operand)}) will happen"
if isinstance(node, ltlnode.NextNode):
return f"right after this, ({_gloss_ns(node.operand)}) happens"
if isinstance(node, ltlnode.UntilNode):
return f"({_gloss_ns(node.left)}) keeps happening until eventually ({_gloss_ns(node.right)}) happens"

fallback = _normalize_base_gloss(node)
return fallback.rstrip(".") if fallback else ""


def _build_frame_glosses(node: ltlnode.LTLNode, base_gloss: str) -> Dict[CognitiveFrame, str]:
"""Create three frame-specific glosses from a base gloss."""

return {
CognitiveFrame.ABSTRACT_LOGICAL: base_gloss,
CognitiveFrame.DEONTIC_OBLIGATION: _normalize_sentence(_gloss_do(node)),
CognitiveFrame.NARRATIVE_STORY: _normalize_sentence(_gloss_ns(node)),
}


def _score_gloss(frame: CognitiveFrame, gloss: str) -> int:
"""Score a gloss using a lightweight frame-specific n-gram heuristic."""

lowered = gloss.lower()
return sum(lowered.count(ng) for ng in _FRAME_NGRAMS[frame])


def gloss_formula(node: ltlnode.LTLNode) -> GlossResult:
"""Generate and score glosses for an LTL formula.

Args:
node: Parsed LTL AST node.

Returns:
GlossResult containing the winning gloss, its frame, and all candidates.
"""

base_gloss = _normalize_base_gloss(node)
if base_gloss == "":
return GlossResult(best_gloss="", frame=CognitiveFrame.ABSTRACT_LOGICAL, frame_glosses={})

frame_glosses = _build_frame_glosses(node, base_gloss)

best_frame = max(
frame_glosses,
key=lambda frame: (_score_gloss(frame, frame_glosses[frame]), frame.value),
)

return GlossResult(
best_gloss=frame_glosses[best_frame],
frame=best_frame,
frame_glosses=frame_glosses,
)
53 changes: 32 additions & 21 deletions src/static/js/checkanswers.js
Original file line number Diff line number Diff line change
Expand Up @@ -11,9 +11,17 @@ function getQuestionText(parentNode) {
return parentNode.querySelector('.actualQuestion').innerText;
}

function getQuestionTrace(parentNode) {
return parentNode.querySelector('.actualQuestionTrace').innerText;
}
function getQuestionTrace(parentNode) {
return parentNode.querySelector('.actualQuestionTrace').innerText;
}

function getQuestionFrame(parentNode) {
let questionNode = parentNode.querySelector('.actualQuestion');
if (questionNode && questionNode.dataset) {
return questionNode.dataset.glossFrame || "";
}
return "";
}

function getQuestionOptions(parentNode) {

Expand Down Expand Up @@ -210,12 +218,13 @@ async function tracesatisfaction_mc_getfeedback(button) {
selected_option: selected_radio.value,
correct_option: correct_option,
correct: correct,
misconceptions: selected_radio.dataset.misconceptions,
question_text: question_text,
question_options: question_options,
formula_for_mp_class: get_formula_for_MP_Classification(parent_node, QUESTION_TYPE),
exercise: getExerciseName()
}
misconceptions: selected_radio.dataset.misconceptions,
question_text: question_text,
question_options: question_options,
question_frame: getQuestionFrame(parent_node),
formula_for_mp_class: get_formula_for_MP_Classification(parent_node, QUESTION_TYPE),
exercise: getExerciseName()
}
let response = await postFeedback(data, QUESTION_TYPE);
}

Expand All @@ -239,12 +248,13 @@ async function tracesatisfaction_yn_getfeedback(button) {
selected_option: selected_radio.value,
correct_option: correct_option,
correct: correct,
misconceptions: selected_radio.dataset.misconceptions,
question_text: question_text,
question_options: question_options,
formula_for_mp_class: get_formula_for_MP_Classification(parent_node, QUESTION_TYPE),
exercise: getExerciseName()
}
misconceptions: selected_radio.dataset.misconceptions,
question_text: question_text,
question_options: question_options,
question_frame: getQuestionFrame(parent_node),
formula_for_mp_class: get_formula_for_MP_Classification(parent_node, QUESTION_TYPE),
exercise: getExerciseName()
}
let response = await postFeedback(data, QUESTION_TYPE);
}

Expand All @@ -268,12 +278,13 @@ async function englishtoltl_getfeedback(button) {
selected_option: selected_radio.value,
correct_option: correct_option,
correct: correct,
misconceptions: selected_radio.dataset.misconceptions,
question_text: question_text,
question_options: question_options,
formula_for_mp_class: get_formula_for_MP_Classification(parent_node, QUESTION_TYPE),
exercise: getExerciseName()
}
misconceptions: selected_radio.dataset.misconceptions,
question_text: question_text,
question_options: question_options,
question_frame: getQuestionFrame(parent_node),
formula_for_mp_class: get_formula_for_MP_Classification(parent_node, QUESTION_TYPE),
exercise: getExerciseName()
}

let response = await postFeedback(data, QUESTION_TYPE);
displayServerResponse(response);
Expand Down
13 changes: 7 additions & 6 deletions src/templates/exercise.html
Original file line number Diff line number Diff line change
Expand Up @@ -91,8 +91,8 @@ <h6 id="exerciseName">{{ exercise_name }}</h6>


{% if q.type == "tracesatisfaction_yn" %}
<h5 id="{{ qindex }}question_text" class="card-title mb-2">
<pre><code class="actualQuestion language-ltl"> {{ q.question}} </code></pre>
<h5 id="{{ qindex }}question_text" class="card-title mb-2">
<pre><code class="actualQuestion language-ltl" data-gloss-frame="{{ q.gloss_frame | default('') }}"> {{ q.question}} </code></pre>
</h5>
<pre class="mermaid" id="q{{qindex}}_yn_trace"
aria-label="{{q.trace}}"> {{ q.mermaid | safe}} </pre>
Expand All @@ -101,12 +101,13 @@ <h5 id="{{ qindex }}question_text" class="card-title mb-2">
<pre class="actualQuestionTrace"> {{ q.trace}}</pre>
</div>
{% elif q.type == "tracesatisfaction_mc" %}
<h5 id="{{ qindex }}question_text" class="card-title mb-2">
<pre><code class="actualQuestion language-ltl">{{ q.question}}</code></pre>
<h5 id="{{ qindex }}question_text" class="card-title mb-2">
<pre><code class="actualQuestion language-ltl" data-gloss-frame="{{ q.gloss_frame | default('') }}">{{ q.question}}</code></pre>
</h5>
{% else %}
<h5 id="{{ qindex }}question_text" class="card-title mb-2 actualQuestion"
style="white-space: pre-wrap;">{{ q.question}}
<h5 id="{{ qindex }}question_text" class="card-title mb-2 actualQuestion"
data-gloss-frame="{{ q.gloss_frame | default('') }}"
style="white-space: pre-wrap;">{{ q.question}}
</h5>
<div class="row mb-2 ml-2">
{% if q.type == "englishtoltl" %}
Expand Down
9 changes: 5 additions & 4 deletions src/templates/prebuiltexercises/lightpanel.html
Original file line number Diff line number Diff line change
Expand Up @@ -74,8 +74,8 @@ <h5 class="card-title">


{% if q.type == "tracesatisfaction_yn" %}
<h5 id="{{ qindex }}question_text" class="card-subtitle mb-2">Formula:
<pre><code class="actualQuestion language-ltl"> {{ q.question}} </code></pre></h5>
<h5 id="{{ qindex }}question_text" class="card-subtitle mb-2">Formula:
<pre><code class="actualQuestion language-ltl" data-gloss-frame="{{ q.gloss_frame | default('') }}"> {{ q.question}} </code></pre></h5>
<pre class="mermaid" id="q{{qindex}}_yn_trace"
aria-label="{{q.trace}}"> {{ q.mermaid | safe}} </pre>

Expand All @@ -84,10 +84,11 @@ <h5 id="{{ qindex }}question_text" class="card-subtitle mb-2">Formula:
<pre class="actualQuestionTrace"> {{ q.trace}}</pre>
</div>
{% elif q.type == "tracesatisfaction_mc" %}
<h5 id="{{ qindex }}question_text" class="card-subtitle mb-2">Formula:
<pre><code class="actualQuestion language-ltl">{{ q.question}}</code></pre></h5>
<h5 id="{{ qindex }}question_text" class="card-subtitle mb-2">Formula:
<pre><code class="actualQuestion language-ltl" data-gloss-frame="{{ q.gloss_frame | default('') }}">{{ q.question}}</code></pre></h5>
{% else %}
<h5 id="{{ qindex }}question_text" class="card-subtitle mb-2 actualQuestion"
data-gloss-frame="{{ q.gloss_frame | default('') }}"
style="white-space: pre-wrap;">{{ q.question}}
</h5>
{% endif %}
Expand Down
5 changes: 3 additions & 2 deletions src/templates/prebuiltexercises/robotrain.html
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,7 @@ <h6>

{% if q.type == "tracesatisfaction_yn" %}
<h5 id="{{ qindex }}question_text" class="card-title mb-2">
<pre><code class="actualQuestion language-ltl"> {{ q.question}} </code></pre>
<pre><code class="actualQuestion language-ltl" data-gloss-frame="{{ q.gloss_frame | default('') }}"> {{ q.question}} </code></pre>
</h5>
<pre class="mermaid" id="q{{qindex}}_yn_trace"
aria-label="{{q.trace}}"> {{ q.mermaid | safe}} </pre>
Expand All @@ -94,10 +94,11 @@ <h5 id="{{ qindex }}question_text" class="card-title mb-2">
</div>
{% elif q.type == "tracesatisfaction_mc" %}
<h5 id="{{ qindex }}question_text" class="card-title mb-2">
<pre><code class="actualQuestion language-ltl">{{ q.question}}</code></pre>
<pre><code class="actualQuestion language-ltl" data-gloss-frame="{{ q.gloss_frame | default('') }}">{{ q.question}}</code></pre>
</h5>
{% else %}
<h5 id="{{ qindex }}question_text" class="card-title mb-2 actualQuestion"
data-gloss-frame="{{ q.gloss_frame | default('') }}"
style="white-space: pre-wrap;">{{ q.question}}
</h5>
{% endif %}
Expand Down