summaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/scheduler_brutal.py41
-rw-r--r--src/scheduler_brutal_unittest.py26
2 files changed, 62 insertions, 5 deletions
diff --git a/src/scheduler_brutal.py b/src/scheduler_brutal.py
index d1c98b5..f41d203 100644
--- a/src/scheduler_brutal.py
+++ b/src/scheduler_brutal.py
@@ -2,6 +2,7 @@
"""
from card import Card
+from random import shuffle
HISTORY_DEPTH = 8
@@ -14,7 +15,7 @@ class SchedulerBrutal:
for id, card in self._cards.items():
history = state.get(id, [None] * HISTORY_DEPTH)
- # adjust history if depth has changed
+ # Adjust history if depth has changed
if len(history) > HISTORY_DEPTH:
history = history[-HISTORY_DEPTH:]
elif len(history) < HISTORY_DEPTH:
@@ -22,11 +23,11 @@ class SchedulerBrutal:
self._state[id] = history
- def practice(self, size: int) -> dict:
- pass
+ def practice(self, size: int) -> list[int]:
+ return self._schedule(size)
- def test(self, size: int) -> dict:
- pass
+ def test(self, size: int) -> list[int]:
+ return self._schedule(size)
def update(self, results: dict[int, int]) -> None:
# Add card result to sliding window, or None if card was not shown
@@ -35,3 +36,33 @@ class SchedulerBrutal:
def getState(self) -> dict:
return self._state
+
+ # Consolidation index is a measure of how well the card has been memorised
+ @staticmethod
+ def _consolidationIndex(history: list, weights: range) -> float:
+ relevant_history = [(h, w) for h, w in zip(history, weights) if h is not None]
+ weighted_history = sum([h * w for h, w in relevant_history])
+ total_weights = sum([w for h, w in relevant_history])
+ return weighted_history / total_weights if total_weights > 0 else 0.0
+
+ # Exposure index is a measure of how much and how recently a card has been shown
+ @staticmethod
+ def _exposureIndex(history: list) -> float:
+ return sum([i + 1 for i, h in enumerate(history) if h is not None])
+
+ def _schedule(self, size: int) -> list[int]:
+ weights = range(10, 10 + HISTORY_DEPTH)
+ cards = [id for id, card in self._cards.items()]
+
+ # First sort by consolidation index
+ cards.sort(key=lambda id: SchedulerBrutal._consolidationIndex(self._state[id], weights))
+
+ # Next sort by exposure index
+ cards.sort(key=lambda id: SchedulerBrutal._exposureIndex(self._state[id]))
+
+ # Return least exposed and least consolidated cards, shuffled
+ cards = cards[0:size]
+
+ shuffle(cards)
+
+ return cards
diff --git a/src/scheduler_brutal_unittest.py b/src/scheduler_brutal_unittest.py
index adfb417..779d2c3 100644
--- a/src/scheduler_brutal_unittest.py
+++ b/src/scheduler_brutal_unittest.py
@@ -7,6 +7,32 @@ from card import Card
scheduler_brutal.HISTORY_DEPTH = 3
#--------------------------------------------------------------------------
+# Scheduling behaviour
+#--------------------------------------------------------------------------
+def test_scheduling():
+ cards = {id: Card("", "") for id in range(0, 10)}
+ state = {
+ 0: [1, 1, 1],
+ 1: [0, 0, 0],
+ 2: [0, 0, 1],
+ 3: [1, 0, 0],
+
+ 4: [None, None, 1 ],
+ 5: [None, 1, None],
+ 6: [1, None, None],
+ 7: [None, None, 0 ],
+ 8: [0, 0, None],
+ 9: [None, None, None],
+ }
+
+ expected_priority = [9, 6, 5, 7, 8, 4, 1, 3, 2, 0]
+
+ uut = UUT(cards, state)
+
+ for i in range(0, len(expected_priority)):
+ assert set(uut.practice(i + 1)) == set(expected_priority[0:i + 1])
+
+#--------------------------------------------------------------------------
# State update
#--------------------------------------------------------------------------
def test_stateUpdate():