summaryrefslogtreecommitdiffstats
path: root/flashcards-project/src/flashcards/scheduler_brutal.py
diff options
context:
space:
mode:
Diffstat (limited to 'flashcards-project/src/flashcards/scheduler_brutal.py')
-rw-r--r--flashcards-project/src/flashcards/scheduler_brutal.py79
1 files changed, 79 insertions, 0 deletions
diff --git a/flashcards-project/src/flashcards/scheduler_brutal.py b/flashcards-project/src/flashcards/scheduler_brutal.py
new file mode 100644
index 0000000..ebbc0ff
--- /dev/null
+++ b/flashcards-project/src/flashcards/scheduler_brutal.py
@@ -0,0 +1,79 @@
+from random import shuffle
+
+from .scheduler import Scheduler
+from .card import Card
+
+HISTORY_DEPTH = 8
+
+class SchedulerBrutal(Scheduler):
+ """
+ The brutal scheduler tracks how well the player has consolidated each card
+ and also how often the card has been shown.
+
+ Using this information, it prioritizes cards that have been shown less
+ frequently and recently, which means the player will often see totally new
+ cards in test sessions.
+ """
+ def __init__(self, cards: dict[str, Card], state: dict):
+ self._cards = cards
+ self._state = {}
+
+ # Synchronise state with current card collection
+ for id, card in self._cards.items():
+ history = state.get(id, [None] * HISTORY_DEPTH)
+
+ # Adjust history if depth has changed
+ if len(history) > HISTORY_DEPTH:
+ history = history[-HISTORY_DEPTH:]
+ elif len(history) < HISTORY_DEPTH:
+ history = ([None] * (HISTORY_DEPTH - len(history))) + history
+
+ self._state[id] = history
+
+ def practice(self, size: int) -> list[str]:
+ return self._schedule(size)
+
+ def test(self, size: int) -> list[str]:
+ return self._schedule(size)
+
+ def update(self, results: dict[str, int]) -> None:
+ # Add card result to sliding window, or None if card was not shown
+ self._state = {id: history[1:] + [results.get(id, None)]
+ for id, history in self._state.items()}
+
+ def getState(self) -> dict:
+ return self._state
+
+ @staticmethod
+ def _consolidationIndex(history: list, weights: range) -> float:
+ """
+ Consolidation index is a measure of how well the player has guessed the card recently
+ """
+ relevant_history = [(h, w) for h, w in zip(history, weights) if h is not None]
+ weighted_history = sum([h * w for h, w in relevant_history])
+ total_weights = sum([w for h, w in relevant_history])
+ return weighted_history / total_weights if total_weights > 0 else 0.0
+
+ @staticmethod
+ def _exposureIndex(history: list) -> float:
+ """
+ Exposure index is a measure of how much and how recently a card has been shown
+ """
+ return sum([i + 1 for i, h in enumerate(history) if h is not None])
+
+ def _schedule(self, size: int) -> list[str]:
+ weights = range(10, 10 + HISTORY_DEPTH)
+ cards = [id for id, card in self._cards.items()]
+
+ # First sort by consolidation index
+ cards.sort(key=lambda id: SchedulerBrutal._consolidationIndex(self._state[id], weights))
+
+ # Next sort by exposure index
+ cards.sort(key=lambda id: SchedulerBrutal._exposureIndex(self._state[id]))
+
+ # Return least exposed and least consolidated cards, shuffled
+ cards = cards[0:size]
+
+ shuffle(cards)
+
+ return cards