Update Apache SSL configuration and enhance security features across multiple files. Changed X-Frame-Options to SAMEORIGIN for better security, added optional Content Security Policy headers for testing, and improved password handling with HaveIBeenPwned checks during user registration and password reset. Implemented passkey login functionality in the authentication flow, including UI updates for user experience. Enhanced image upload processing with size limits and validation, and added rate limiting for various API endpoints to prevent abuse.
Some checks failed
Code Analysis (JS/Vue) / analyze (push) Failing after 51s

This commit is contained in:
Torsten Schulz (local)
2026-01-05 11:50:57 +01:00
parent 8bd7ed76cd
commit 673c34ac9d
47 changed files with 1738 additions and 83 deletions

131
server/utils/rate-limit.js Normal file
View File

@@ -0,0 +1,131 @@
/**
* Sehr einfache In-Memory Rate-Limits für Nitro/h3.
*
* Hinweis: In-Memory ist pro Prozess/Instance. Für horizontale Skalierung
* sollte das auf Redis o.ä. umgestellt werden (siehe Doku).
*/
const buckets = globalThis.__HTC_RATE_LIMIT_BUCKETS__ || new Map()
// Persist across hot reloads
globalThis.__HTC_RATE_LIMIT_BUCKETS__ = buckets
function nowMs() {
return Date.now()
}
function sleep(ms) {
return new Promise(resolve => setTimeout(resolve, ms))
}
export function getClientIp(event) {
const xff = getHeader(event, 'x-forwarded-for')
if (xff) {
// First IP in list is original client
const first = xff.split(',')[0]?.trim()
if (first) return first
}
const realIp = getHeader(event, 'x-real-ip')
if (realIp) return realIp.trim()
return event?.node?.req?.socket?.remoteAddress || 'unknown'
}
function getBucket(key) {
let b = buckets.get(key)
if (!b) {
b = {
windowStart: nowMs(),
count: 0,
consecutiveFails: 0,
lockedUntil: 0
}
buckets.set(key, b)
}
return b
}
function normalizeKeyPart(part) {
return String(part || '')
.trim()
.toLowerCase()
.replace(/\s+/g, ' ')
.slice(0, 200)
}
function buildKey(name, keyParts) {
const parts = (Array.isArray(keyParts) ? keyParts : [keyParts]).map(normalizeKeyPart)
return `${name}:${parts.join(':')}`
}
function resetWindowIfNeeded(bucket, windowMs, now) {
if (now - bucket.windowStart >= windowMs) {
bucket.windowStart = now
bucket.count = 0
// consecutiveFails bleibt bewusst erhalten (Backoff für "nervige" Clients)
}
}
export function assertRateLimit(event, options) {
const {
name,
keyParts,
windowMs = 10 * 60 * 1000,
maxAttempts = 10,
lockoutMs = 15 * 60 * 1000,
statusCode = 429,
message = 'Zu viele Versuche. Bitte später erneut versuchen.'
} = options || {}
const key = buildKey(name, keyParts)
const bucket = getBucket(key)
const now = nowMs()
if (bucket.lockedUntil && bucket.lockedUntil > now) {
const retryAfterSec = Math.ceil((bucket.lockedUntil - now) / 1000)
setHeader(event, 'Retry-After', String(retryAfterSec))
throw createError({ statusCode, statusMessage: message })
}
resetWindowIfNeeded(bucket, windowMs, now)
if (bucket.count >= maxAttempts) {
bucket.lockedUntil = now + lockoutMs
const retryAfterSec = Math.ceil(lockoutMs / 1000)
setHeader(event, 'Retry-After', String(retryAfterSec))
throw createError({ statusCode, statusMessage: message })
}
// Count the attempt
bucket.count += 1
}
export async function registerRateLimitFailure(event, options) {
const {
name,
keyParts,
delayBaseMs = 300,
delayMaxMs = 5000
} = options || {}
const key = buildKey(name, keyParts)
const bucket = getBucket(key)
bucket.consecutiveFails = Math.min((bucket.consecutiveFails || 0) + 1, 30)
// Exponential backoff: base * 2^(n-1)
const delay = Math.min(delayBaseMs * Math.pow(2, bucket.consecutiveFails - 1), delayMaxMs)
await sleep(delay)
}
export function registerRateLimitSuccess(_event, options) {
const { name, keyParts } = options || {}
const key = buildKey(name, keyParts)
const bucket = getBucket(key)
bucket.consecutiveFails = 0
// Nach Erfolg darf es "frisch" starten
bucket.count = 0
bucket.windowStart = nowMs()
bucket.lockedUntil = 0
}