summaryrefslogtreecommitdiffstats
path: root/src/scheduler_brutal.py
diff options
context:
space:
mode:
Diffstat (limited to 'src/scheduler_brutal.py')
-rw-r--r--src/scheduler_brutal.py41
1 files changed, 36 insertions, 5 deletions
diff --git a/src/scheduler_brutal.py b/src/scheduler_brutal.py
index d1c98b5..f41d203 100644
--- a/src/scheduler_brutal.py
+++ b/src/scheduler_brutal.py
@@ -2,6 +2,7 @@
"""
from card import Card
+from random import shuffle
HISTORY_DEPTH = 8
@@ -14,7 +15,7 @@ class SchedulerBrutal:
for id, card in self._cards.items():
history = state.get(id, [None] * HISTORY_DEPTH)
- # adjust history if depth has changed
+ # Adjust history if depth has changed
if len(history) > HISTORY_DEPTH:
history = history[-HISTORY_DEPTH:]
elif len(history) < HISTORY_DEPTH:
@@ -22,11 +23,11 @@ class SchedulerBrutal:
self._state[id] = history
- def practice(self, size: int) -> dict:
- pass
+ def practice(self, size: int) -> list[int]:
+ return self._schedule(size)
- def test(self, size: int) -> dict:
- pass
+ def test(self, size: int) -> list[int]:
+ return self._schedule(size)
def update(self, results: dict[int, int]) -> None:
# Add card result to sliding window, or None if card was not shown
@@ -35,3 +36,33 @@ class SchedulerBrutal:
def getState(self) -> dict:
return self._state
+
+ # Consolidation index is a measure of how well the card has been memorised
+ @staticmethod
+ def _consolidationIndex(history: list, weights: range) -> float:
+ relevant_history = [(h, w) for h, w in zip(history, weights) if h is not None]
+ weighted_history = sum([h * w for h, w in relevant_history])
+ total_weights = sum([w for h, w in relevant_history])
+ return weighted_history / total_weights if total_weights > 0 else 0.0
+
+ # Exposure index is a measure of how much and how recently a card has been shown
+ @staticmethod
+ def _exposureIndex(history: list) -> float:
+ return sum([i + 1 for i, h in enumerate(history) if h is not None])
+
+ def _schedule(self, size: int) -> list[int]:
+ weights = range(10, 10 + HISTORY_DEPTH)
+ cards = [id for id, card in self._cards.items()]
+
+ # First sort by consolidation index
+ cards.sort(key=lambda id: SchedulerBrutal._consolidationIndex(self._state[id], weights))
+
+ # Next sort by exposure index
+ cards.sort(key=lambda id: SchedulerBrutal._exposureIndex(self._state[id]))
+
+ # Return least exposed and least consolidated cards, shuffled
+ cards = cards[0:size]
+
+ shuffle(cards)
+
+ return cards