add more tweaks for stability for more users at the same time working in it
All checks were successful
Build & Push / Build & Push image (push) Successful in 31s

This commit is contained in:
2026-02-28 15:24:15 +01:00
parent e745cc78ab
commit f991cef71d
4 changed files with 103 additions and 3 deletions

View File

@@ -28,4 +28,4 @@ USER appuser
EXPOSE 5000
ENTRYPOINT ["/app/entrypoint.sh"]
CMD ["gunicorn", "--bind", "0.0.0.0:5000", "--workers", "4", "--timeout", "120", "app:app"]
CMD ["gunicorn", "--config", "/app/gunicorn.conf.py", "app:app"]

45
backend/gunicorn.conf.py Normal file
View File

@@ -0,0 +1,45 @@
"""
Gunicorn configuratie voor de Leerdoelen Tracker.
Belasting voor deze app:
~264 gebruikers totaal (12 scholen × ~22 per school)
Piekgebruik: 50-80 gelijktijdig tijdens studiedagen
~2-3 requests/seconde op piekmomenten
Worker formule: (2 × CPU-cores) + 1
Voor een typische VPS met 2 vCPU: 5 workers
Elke worker kan 1 request tegelijk afhandelen (sync worker).
Met 4 workers + 2 threads = effectief 8 gelijktijdige requests —
ruim voldoende voor deze schaal.
"""
import multiprocessing
import os
# ── Workers ───────────────────────────────────────────────────────────────────
# Instelbaar via env voor flexibiliteit op grotere of kleinere servers
workers = int(os.environ.get('GUNICORN_WORKERS', multiprocessing.cpu_count() * 2 + 1))
threads = int(os.environ.get('GUNICORN_THREADS', 2))
worker_class = 'gthread' # threads-based: beter voor I/O-bound Flask apps
# ── Binding ───────────────────────────────────────────────────────────────────
bind = '0.0.0.0:5000'
# ── Timeouts ──────────────────────────────────────────────────────────────────
timeout = 60 # worker killed na 60s — voorkomt hangende processen
graceful_timeout = 30 # tijd om lopende requests af te werken bij restart
keepalive = 5 # HTTP keep-alive seconden (nginx hergebruikt connecties)
# ── Logging ───────────────────────────────────────────────────────────────────
accesslog = '-' # stdout → docker logs
errorlog = '-' # stderr → docker logs
loglevel = os.environ.get('LOG_LEVEL', 'info')
access_log_format = '%(h)s %(l)s %(u)s %(t)s "%(r)s" %(s)s %(b)s %(D)sµs'
# ── Worker lifecycle ──────────────────────────────────────────────────────────
# Herstart workers na N requests — voorkomt memory leaks over tijd
max_requests = 1000
max_requests_jitter = 100 # willekeurige offset voorkomt gelijktijdige restarts
# ── Process naam ──────────────────────────────────────────────────────────────
proc_name = 'leerdoelen-tracker'

View File

@@ -6,6 +6,13 @@ Eén bron van waarheid. Geen andere plek in de app definieert vaknamen.
import os
import json
import re
import threading
# In-memory cache: { vak_id: {'data': {...}, 'mtime': float} }
# Geïnvalideerd bij upload/delete — voorkomt schijf-I/O bij elke request
_cache: dict = {}
_cache_lock = threading.Lock()
_INDEX_KEY = "__index__"
DOELEN_DIR = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'doelen')
@@ -49,7 +56,45 @@ def list_installed_vakken():
])
def _cache_get(key):
path = os.path.join(DOELEN_DIR, 'index.json' if key == _INDEX_KEY else f'{key}.json')
if not os.path.exists(path):
return None
try:
mtime = os.path.getmtime(path)
except OSError:
return None
with _cache_lock:
entry = _cache.get(key)
if entry and entry['mtime'] == mtime:
return entry['data']
return None
def _cache_set(key, data):
path = os.path.join(DOELEN_DIR, 'index.json' if key == _INDEX_KEY else f'{key}.json')
try:
mtime = os.path.getmtime(path)
except OSError:
return
with _cache_lock:
_cache[key] = {'data': data, 'mtime': mtime}
def cache_invalidate(vak_id=None):
"""Verwijder één of alle entries uit de cache (aanroepen na upload/delete)."""
with _cache_lock:
if vak_id:
_cache.pop(vak_id, None)
_cache.pop(_INDEX_KEY, None)
else:
_cache.clear()
def load_index():
cached = _cache_get(_INDEX_KEY)
if cached:
return cached
path = os.path.join(DOELEN_DIR, 'index.json')
if not os.path.exists(path):
rebuild_index()
@@ -58,18 +103,23 @@ def load_index():
for vak in data.get('vakken', []):
vak['naam'] = vak_naam(vak['id'])
data['vakken'].sort(key=lambda v: v['naam'])
_cache_set(_INDEX_KEY, data)
return data
def load_vak(vak_id):
if not is_valid_vak_id(vak_id):
return None
cached = _cache_get(vak_id)
if cached:
return cached
path = get_doelen_path(vak_id)
if not os.path.exists(path):
return None
with open(path, encoding='utf-8') as f:
data = json.load(f)
data['vakNaam'] = vak_naam(vak_id)
_cache_set(vak_id, data)
return data
@@ -102,6 +152,7 @@ def save_vak(vak_id, data):
path = get_doelen_path(vak_id)
with open(path, 'w', encoding='utf-8') as f:
json.dump(data, f, ensure_ascii=False, indent=2)
cache_invalidate(vak_id)
rebuild_index()
@@ -110,6 +161,7 @@ def delete_vak(vak_id):
if not os.path.exists(path):
return False
os.remove(path)
cache_invalidate(vak_id)
rebuild_index()
return True

View File

@@ -1,5 +1,3 @@
version: '3.9'
services:
db:
image: postgres:16-alpine
@@ -52,6 +50,11 @@ services:
BASE_URL: ${BASE_URL:-http://localhost}
ORG_NAME: ${ORG_NAME:-GO! Scholengroep}
REDIS_URL: redis://:${REDIS_PASSWORD:-changeme_redis}@redis:6379/0
# Gunicorn tuning — standaard: (2 × CPU) + 1 workers, 2 threads per worker
# Pas aan op basis van je server: 2 vCPU → 5 workers, 4 vCPU → 9 workers
GUNICORN_WORKERS: ${GUNICORN_WORKERS:-5}
GUNICORN_THREADS: ${GUNICORN_THREADS:-2}
LOG_LEVEL: ${LOG_LEVEL:-info}
volumes:
- ./doelen:/app/doelen:ro # JSON doelen bestanden (read-only)
ports: