Refactor file handling to prioritize internal data directories for backups and uploads; enhance error handling and logging for metadata and CSV operations.
Some checks failed
Code Analysis (JS/Vue) / analyze (push) Failing after 47s

This commit is contained in:
Torsten Schulz (local)
2026-02-11 11:42:24 +01:00
parent 0fcf6ced0e
commit 0d533710cd
15 changed files with 225 additions and 127 deletions

View File

@@ -100,6 +100,18 @@ if ls public/data/*.csv >/dev/null 2>&1; then
else else
echo " No public CSVs to backup (public/data/*.csv not found)" echo " No public CSVs to backup (public/data/*.csv not found)"
fi fi
# Prefer internal public-data under server/data/public-data for backups; fallback to legacy public/data
if ls server/data/public-data/*.csv >/dev/null 2>&1; then
mkdir -p "$BACKUP_DIR/public-data"
cp -a server/data/public-data/*.csv "$BACKUP_DIR/public-data/"
echo " Backed up server/data/public-data/*.csv -> $BACKUP_DIR/public-data/"
elif ls public/data/*.csv >/dev/null 2>&1; then
mkdir -p "$BACKUP_DIR/public-data"
cp -a public/data/*.csv "$BACKUP_DIR/public-data/"
echo " Backed up public/data/*.csv -> $BACKUP_DIR/public-data/"
else
echo " No public CSVs to backup (server/data/public-data or public/data not found)"
fi
# 2. Handle local changes and Git Pull # 2. Handle local changes and Git Pull
echo "2. Handling local changes and pulling latest from git..." echo "2. Handling local changes and pulling latest from git..."
@@ -158,6 +170,38 @@ if [ -d ".output" ]; then
if [ -d ".output" ]; then if [ -d ".output" ]; then
echo "ERROR: .output konnte auch nach erneutem Versuch nicht gelöscht werden!" echo "ERROR: .output konnte auch nach erneutem Versuch nicht gelöscht werden!"
echo "Bitte manuell prüfen und löschen: rm -rf .output" echo "Bitte manuell prüfen und löschen: rm -rf .output"
if ls "$BACKUP_DIR/public-data"/*.csv >/dev/null 2>&1; then
# Restore into internal storage (server/data/public-data)
mkdir -p server/data/public-data
for csv_file in "$BACKUP_DIR/public-data"/*.csv; do
filename=$(basename "$csv_file")
cp -f "$csv_file" "server/data/public-data/$filename"
if [ -f "server/data/public-data/$filename" ]; then
backup_size=$(stat -f%z "$csv_file" 2>/dev/null || stat -c%s "$csv_file" 2>/dev/null || echo "0")
restored_size=$(stat -f%z "server/data/public-data/$filename" 2>/dev/null || stat -c%s "server/data/public-data/$filename" 2>/dev/null || echo "0")
if [ "$backup_size" = "$restored_size" ] && [ "$backup_size" != "0" ]; then
echo " \u2713 Restored server/data/public-data/$filename from backup ($backup_size bytes)"
else
echo " \u26a0 WARNING: server/data/public-data/$filename size mismatch (Backup: $backup_size, Restored: $restored_size)"
fi
else
echo " \u274c ERROR: Konnte server/data/public-data/$filename nicht wiederherstellen!"
fi
done
echo " \u2713 All public-data files restored into server/data/public-data ($BACKUP_DIR/public-data)."
# Optional: synchronize internal public-data into public/data for legacy builds
# This uses the project's sync script and forces overwrite in public/data.
if command -v node >/dev/null 2>&1 && [ -f scripts/sync-public-data.js ]; then
echo " Synchronizing server/data/public-data -> public/data (using scripts/sync-public-data.js --force)"
node scripts/sync-public-data.js --force || echo " WARNING: sync script failed"
else
echo " Note: To publish CSVs to public/data run: node scripts/sync-public-data.js --force"
fi
else
echo " No public CSVs to restore"
fi
exit 1 exit 1
fi fi
fi fi

View File

@@ -92,12 +92,17 @@ else
exit 1 exit 1
fi fi
if ls public/data/*.csv >/dev/null 2>&1; then # Prefer internal public-data under server/data/public-data for backups; fallback to legacy public/data
if ls server/data/public-data/*.csv >/dev/null 2>&1; then
mkdir -p "$BACKUP_DIR/public-data"
cp -a server/data/public-data/*.csv "$BACKUP_DIR/public-data/"
echo " Backed up server/data/public-data/*.csv -> $BACKUP_DIR/public-data/"
elif ls public/data/*.csv >/dev/null 2>&1; then
mkdir -p "$BACKUP_DIR/public-data" mkdir -p "$BACKUP_DIR/public-data"
cp -a public/data/*.csv "$BACKUP_DIR/public-data/" cp -a public/data/*.csv "$BACKUP_DIR/public-data/"
echo " Backed up public/data/*.csv -> $BACKUP_DIR/public-data/" echo " Backed up public/data/*.csv -> $BACKUP_DIR/public-data/"
else else
echo " No public CSVs to backup (public/data/*.csv not found)" echo " No public CSVs to backup (server/data/public-data or public/data not found)"
fi fi
# 2. Handle local changes and Git Pull # 2. Handle local changes and Git Pull
@@ -310,38 +315,33 @@ echo " Restored server/data from backup ($BACKUP_DIR/server-data)."
# Stelle alle CSVs wieder her # Stelle alle CSVs wieder her
if ls "$BACKUP_DIR/public-data"/*.csv >/dev/null 2>&1; then if ls "$BACKUP_DIR/public-data"/*.csv >/dev/null 2>&1; then
mkdir -p public/data # Restore into internal storage (server/data/public-data)
mkdir -p server/data/public-data
# WICHTIG: Überschreibe auch Dateien, die aus dem Git-Repository kommen
# Verwende cp mit -f (force) um sicherzustellen, dass Backup-Dateien Vorrang haben
for csv_file in "$BACKUP_DIR/public-data"/*.csv; do for csv_file in "$BACKUP_DIR/public-data"/*.csv; do
filename=$(basename "$csv_file") filename=$(basename "$csv_file")
# Überschreibe explizit, auch wenn Datei bereits existiert cp -f "$csv_file" "server/data/public-data/$filename"
cp -f "$csv_file" "public/data/$filename" if [ -f "server/data/public-data/$filename" ]; then
# Stelle sicher, dass die Datei wirklich überschrieben wurde
if [ -f "public/data/$filename" ]; then
# Prüfe, ob die Datei wirklich vom Backup kommt (Größenvergleich)
backup_size=$(stat -f%z "$csv_file" 2>/dev/null || stat -c%s "$csv_file" 2>/dev/null || echo "0") backup_size=$(stat -f%z "$csv_file" 2>/dev/null || stat -c%s "$csv_file" 2>/dev/null || echo "0")
restored_size=$(stat -f%z "public/data/$filename" 2>/dev/null || stat -c%s "public/data/$filename" 2>/dev/null || echo "0") restored_size=$(stat -f%z "server/data/public-data/$filename" 2>/dev/null || stat -c%s "server/data/public-data/$filename" 2>/dev/null || echo "0")
if [ "$backup_size" = "$restored_size" ] && [ "$backup_size" != "0" ]; then if [ "$backup_size" = "$restored_size" ] && [ "$backup_size" != "0" ]; then
echo " Restored public/data/$filename from backup ($backup_size bytes)" echo " \u2713 Restored server/data/public-data/$filename from backup ($backup_size bytes)"
else else
echo " WARNING: public/data/$filename Größe stimmt nicht überein (Backup: $backup_size, Restored: $restored_size)" echo " \u26a0 WARNING: server/data/public-data/$filename size mismatch (Backup: $backup_size, Restored: $restored_size)"
fi fi
else else
echo " ERROR: Konnte public/data/$filename nicht wiederherstellen!" echo " \u274c ERROR: Konnte server/data/public-data/$filename nicht wiederherstellen!"
fi fi
done done
echo " All public/data/*.csv files restored from backup ($BACKUP_DIR/public-data)." echo " \u2713 All public-data files restored into server/data/public-data ($BACKUP_DIR/public-data)."
# Zusätzliche Sicherheit: Entferne public/data Dateien aus Git-Index, falls sie getrackt sind # Optional: synchronize internal public-data into public/data for legacy builds
# (nach dem Restore, damit sie nicht beim nächsten git reset überschrieben werden) # This uses the project's sync script and forces overwrite in public/data.
if git ls-files --error-unmatch public/data/*.csv >/dev/null 2>&1; then if command -v node >/dev/null 2>&1 && [ -f scripts/sync-public-data.js ]; then
echo " WARNING: public/data/*.csv Dateien sind noch im Git getrackt!" echo " Synchronizing server/data/public-data -> public/data (using scripts/sync-public-data.js --force)"
echo " Entferne sie aus dem Git-Index (Dateien bleiben erhalten)..." node scripts/sync-public-data.js --force || echo " WARNING: sync script failed"
git rm --cached public/data/*.csv 2>/dev/null || true else
echo " ✓ public/data/*.csv aus Git-Index entfernt" echo " Note: To publish CSVs to public/data run: node scripts/sync-public-data.js --force"
fi fi
else else
echo " No public CSVs to restore" echo " No public CSVs to restore"

View File

@@ -5,13 +5,16 @@
"private": true, "private": true,
"type": "module", "type": "module",
"scripts": { "scripts": {
"dev": "nuxt dev --port 3100", "dev": "nuxt dev --port 3100",
"build": "nuxt build", "build": "nuxt build",
"generate": "nuxt generate", "generate": "nuxt generate",
"preview": "nuxt preview --port 3100", "preview": "nuxt preview --port 3100",
"start": "nuxt start --port 3100", "start": "nuxt start --port 3100",
"postinstall": "nuxt prepare", "postinstall": "nuxt prepare",
"test": "vitest run", "test": "vitest run",
"check-security": "node scripts/verify-no-public-writes.js",
"smoke-local": "BASE_URL=http://127.0.0.1:3100 node scripts/smoke-tests.js",
"sync-public-data": "node scripts/sync-public-data.js",
"test:watch": "vitest watch", "test:watch": "vitest watch",
"lint": "eslint . --fix" "lint": "eslint . --fix"
}, },

View File

@@ -60,25 +60,34 @@ async function inspect(pdfPath) {
async function main() { async function main() {
const repoRoot = process.cwd() const repoRoot = process.cwd()
const template = path.join(repoRoot, 'server', 'templates', 'mitgliedschaft-fillable.pdf') const template = path.join(repoRoot, 'server', 'templates', 'mitgliedschaft-fillable.pdf')
// pick latest generated PDF in public/uploads that is not the sample
// nosemgrep: javascript.lang.security.audit.path-traversal.path-join-resolve-traversal.path-join-resolve-traversal // Prefer internal upload directory used by the API (server/data/uploads).
const uploads = path.join(repoRoot, 'public', 'uploads') // If legacy files exist in public/uploads, warn and inspect them as well.
const internalUploads = path.join(repoRoot, 'server', 'data', 'uploads')
const publicUploads = path.join(repoRoot, 'public', 'uploads')
let pdfFiles = [] let pdfFiles = []
if (fs.existsSync(uploads)) { if (fs.existsSync(internalUploads)) {
pdfFiles = fs.readdirSync(uploads).filter(f => f.toLowerCase().endsWith('.pdf')) pdfFiles = fs.readdirSync(internalUploads).filter(f => f.toLowerCase().endsWith('.pdf'))
.map(f => { .map(f => {
// nosemgrep: javascript.lang.security.audit.path-traversal.path-join-resolve-traversal.path-join-resolve-traversal const filePath = path.join(internalUploads, f)
const filePath = path.join(uploads, f) return { f, mtime: fs.statSync(filePath).mtimeMs, dir: internalUploads }
return { f, mtime: fs.statSync(filePath).mtimeMs }
}) })
.sort((a,b) => b.mtime - a.mtime)
.map(x => x.f)
} }
const apiPdf = pdfFiles.find(n => !n.includes('sample')) || pdfFiles[0]
// Do NOT fall back to public/uploads to avoid encouraging public exposure.
if (pdfFiles.length === 0) {
if (fs.existsSync(publicUploads)) {
console.warn('WARN: PDFs exist in public/uploads. Please migrate them to server/data/uploads using scripts/migrate-public-galerie-to-metadata.js')
}
}
pdfFiles = pdfFiles.sort((a, b) => b.mtime - a.mtime)
const apiPdfEntry = pdfFiles.find(e => !e.f.includes('sample')) || pdfFiles[0]
await inspect(template) await inspect(template)
// nosemgrep: javascript.lang.security.audit.path-traversal.path-join-resolve-traversal.path-join-resolve-traversal if (apiPdfEntry) await inspect(path.join(apiPdfEntry.dir, apiPdfEntry.f))
if (apiPdf) await inspect(path.join(uploads, apiPdf)) else console.log('No API-generated PDF found in server/data/uploads or public/uploads')
else console.log('No API-generated PDF found in public/uploads')
} }
main().catch(e => { console.error(e); process.exit(1) }) main().catch(e => { console.error(e); process.exit(1) })

View File

@@ -26,9 +26,12 @@ const getDataPath = (filename) => {
} }
// Multer-Konfiguration für PDF-Uploads // Multer-Konfiguration für PDF-Uploads
// Store uploads in internal data directory instead of public/
const DOCUMENTS_DIR = getDataPath('documents')
const storage = multer.diskStorage({ const storage = multer.diskStorage({
destination: (req, file, cb) => { destination: (req, file, cb) => {
cb(null, 'public/documents/') cb(null, DOCUMENTS_DIR)
}, },
filename: (req, file, cb) => { filename: (req, file, cb) => {
cb(null, 'satzung.pdf') cb(null, 'satzung.pdf')
@@ -74,8 +77,9 @@ export default defineEventHandler(async (event) => {
}) })
} }
try { try {
await fs.mkdir(path.join(process.cwd(), 'public', 'documents'), { recursive: true }) // Ensure internal documents dir exists
await fs.mkdir(DOCUMENTS_DIR, { recursive: true })
// Multer-Middleware für File-Upload // Multer-Middleware für File-Upload
await new Promise((resolve, reject) => { await new Promise((resolve, reject) => {
@@ -133,8 +137,9 @@ export default defineEventHandler(async (event) => {
configData.seiten = {} configData.seiten = {}
} }
// Serve the uploaded statute via internal media proxy
configData.seiten.satzung = { configData.seiten.satzung = {
pdfUrl: '/documents/satzung.pdf', pdfUrl: '/api/media/documents/satzung.pdf',
content: htmlContent content: htmlContent
} }

View File

@@ -45,15 +45,11 @@ export default defineEventHandler(async (event) => {
}) })
} }
// Wichtig: In Production werden statische Dateien aus `.output/public` ausgeliefert. // Neuer Ablauf (Option B): Schreibe CSVs ausschließlich in internes Datenverzeichnis,
// Wenn PM2 `cwd` auf das Repo-Root setzt, ist `process.cwd()` NICHT `.output` // damit keine direkten Schreibzugriffe auf `public/` stattfinden.
// daher schreiben wir robust in alle sinnvollen Zielorte: // Später kann ein kontrollierter Deploy-/Sync-Prozess die Daten aus `server/data/public-data`
// - `.output/public/data/<file>` (damit die laufende Instanz sofort die neuen Daten liefert) // in die öffentlich ausgelieferte `public/`-Location übernehmen.
// - `public/data/<file>` (damit der nächste Build die Daten wieder übernimmt) const cwd = process.cwd()
//
// nosemgrep: javascript.lang.security.audit.path-traversal.path-join-resolve-traversal.path-join-resolve-traversal
// filename is validated against allowlist above, path traversal prevented
const cwd = process.cwd()
const pathExists = async (p) => { const pathExists = async (p) => {
try { try {
@@ -97,23 +93,15 @@ export default defineEventHandler(async (event) => {
} }
} }
// Preferred: das tatsächlich ausgelieferte Verzeichnis in Production // Ziel: internes Datenverzeichnis unter `server/data/public-data` (persistente, interne Quelle)
// (Nuxt/Nitro serve static aus `.output/public`) const internalPaths = [
const preferredPaths = [] path.join(cwd, 'server/data/public-data', filename),
if (await pathExists(path.join(cwd, '.output/public'))) { path.join(cwd, '../server/data/public-data', filename)
preferredPaths.push(path.join(cwd, '.output/public/data', filename))
}
if (await pathExists(path.join(cwd, '../.output/public'))) {
preferredPaths.push(path.join(cwd, '../.output/public/data', filename))
}
// Fallbacks: Source-Public (für Persistenz bei nächstem Build) und diverse cwd-Layouts
const fallbackPaths = [
path.join(cwd, 'public/data', filename),
path.join(cwd, '../public/data', filename)
] ]
const uniquePaths = [...new Set([...preferredPaths, ...fallbackPaths])] // Behalte legacy `.output` write nur als optionalen, nicht-standardisierten Pfad
// (wird NICHT automatisch gefordert). Hauptsächlich schreiben wir intern.
const uniquePaths = [...new Set([...internalPaths])]
const writeResults = [] const writeResults = []
const writeErrors = [] const writeErrors = []
let wrotePreferred = false let wrotePreferred = false

View File

@@ -17,25 +17,32 @@ export default defineEventHandler(async (event) => {
const isVorstand = hasRole(currentUser, 'vorstand') const isVorstand = hasRole(currentUser, 'vorstand')
// Return users without Passwörter; Kontaktdaten nur für Vorstand // Nur Admin oder Vorstand duerfen vollen Benutzer-Contact und Rollen sehen.
const canSeePrivate = hasAnyRole(currentUser, 'admin', 'vorstand')
const safeUsers = users.map(u => { const safeUsers = users.map(u => {
const migrated = migrateUserRoles({ ...u }) const migrated = migrateUserRoles({ ...u })
const roles = Array.isArray(migrated.roles) ? migrated.roles : (migrated.role ? [migrated.role] : ['mitglied']) const roles = Array.isArray(migrated.roles) ? migrated.roles : (migrated.role ? [migrated.role] : ['mitglied'])
const email = isVorstand ? u.email : undefined return canSeePrivate
const phone = isVorstand ? (u.phone || '') : undefined ? {
id: u.id,
return { email: u.email,
id: u.id, name: u.name,
email, roles: roles,
name: u.name, role: roles[0] || 'mitglied',
roles: roles, phone: u.phone || '',
role: roles[0] || 'mitglied', // Rückwärtskompatibilität active: u.active,
phone, created: u.created,
active: u.active, lastLogin: u.lastLogin
created: u.created, }
lastLogin: u.lastLogin : {
} id: u.id,
name: u.name,
role: roles[0] || 'mitglied',
active: u.active,
lastLogin: u.lastLogin
}
}) })
return { return {

View File

@@ -45,35 +45,49 @@ export default defineEventHandler(async (event) => {
} }
} }
const metadata = await readGalerieMetadata() let metadata = []
try {
metadata = await readGalerieMetadata()
if (!Array.isArray(metadata)) {
console.warn('Galerie-Metadaten haben unerwartetes Format, verwende leere Liste')
metadata = []
}
} catch (e) {
console.error('Fehler beim Lesen der Galerie-Metadaten, liefere leeres Ergebnis:', e.message)
metadata = []
}
// Filtere Bilder basierend auf Sichtbarkeit // Filtere Bilder basierend auf Sichtbarkeit
const visibleImages = metadata.filter(image => { const visibleImages = metadata.filter(image => {
// Öffentliche Bilder sind für alle sichtbar // Defensive checks
if (!image || typeof image !== 'object') return false
if (image.isPublic) return true if (image.isPublic) return true
// Private Bilder nur für eingeloggte Mitglieder
return isLoggedIn return isLoggedIn
}) })
// Sortiere nach Upload-Datum (neueste zuerst) // Sortiere nach Upload-Datum (neueste zuerst) - defensive
visibleImages.sort((a, b) => new Date(b.uploadedAt) - new Date(a.uploadedAt)) visibleImages.sort((a, b) => {
const ta = new Date(a.uploadedAt || 0).getTime()
const tb = new Date(b.uploadedAt || 0).getTime()
return tb - ta
})
// Pagination // Pagination (defensive defaults)
const page = parseInt(getQuery(event).page) || 1 const page = Math.max(1, parseInt(getQuery(event).page) || 1)
const perPage = 10 const perPage = Math.max(1, parseInt(getQuery(event).perPage) || 10)
const start = (page - 1) * perPage const start = (page - 1) * perPage
const end = start + perPage const paginatedImages = visibleImages.slice(start, start + perPage)
const paginatedImages = visibleImages.slice(start, end)
// Konsistente Rückgabeform
return { return {
success: true, success: true,
images: paginatedImages.map(img => ({ images: paginatedImages.map(img => ({
id: img.id, id: img.id || img.filename || null,
title: img.title, title: img.title || '',
description: img.description, description: img.description || '',
isPublic: img.isPublic, isPublic: !!img.isPublic,
uploadedAt: img.uploadedAt, uploadedAt: img.uploadedAt || null,
previewFilename: img.previewFilename previewFilename: img.previewFilename || null
})), })),
pagination: { pagination: {
page, page,

View File

@@ -15,7 +15,10 @@ export default defineEventHandler(async (event) => {
const cwd = process.cwd() const cwd = process.cwd()
const filename = 'mannschaften.csv' const filename = 'mannschaften.csv'
// Prefer server/data, then .output/public/data, then public/data
const candidates = [ const candidates = [
path.join(cwd, '.output/server/data', filename),
path.join(cwd, 'server/data', filename),
path.join(cwd, '.output/public/data', filename), path.join(cwd, '.output/public/data', filename),
path.join(cwd, 'public/data', filename), path.join(cwd, 'public/data', filename),
path.join(cwd, '../.output/public/data', filename), path.join(cwd, '../.output/public/data', filename),

View File

@@ -143,15 +143,19 @@ export default defineEventHandler(async (event) => {
// Sort by name // Sort by name
mergedMembers.sort((a, b) => a.name.localeCompare(b.name)) mergedMembers.sort((a, b) => a.name.localeCompare(b.name))
// Serverseitiger Datenschutz: Kontaktdaten nur für Vorstand // Serverseitiger Datenschutz: nur Vorstands-Mitglieder erhalten volle Kontaktdaten/Logindaten
const isVorstand = hasRole(currentUser, 'vorstand') const isVorstand = hasRole(currentUser, 'vorstand')
// Für nicht-vorstandliche Anfragen liefern wir eine stark reduzierte, nicht-identifizierende
// Ansicht der Mitgliederliste (nur das Nötigste für öffentliche Anzeigen)
const safeMembers = isVorstand const safeMembers = isVorstand
? mergedMembers ? mergedMembers
: mergedMembers.map(m => ({ : mergedMembers.map(m => ({
...m, // Minimale, unkritische Felder
email: undefined, id: m.id,
phone: undefined, name: m.name,
address: undefined source: m.source,
isMannschaftsspieler: !!m.isMannschaftsspieler
})) }))
return { return {

View File

@@ -4,6 +4,13 @@ import { decryptObject } from '../../utils/encryption.js'
export default defineEventHandler(async (event) => { export default defineEventHandler(async (event) => {
try { try {
// Nur Vorstand oder Admin darf Mitgliedschaftsantraege lesen
const token = getCookie(event, 'auth_token')
const currentUser = token ? await getUserFromToken(token) : null
if (!currentUser || !hasAnyRole(currentUser, 'admin', 'vorstand')) {
throw createError({ statusCode: 403, statusMessage: 'Zugriff verweigert' })
}
const config = useRuntimeConfig() const config = useRuntimeConfig()
const encryptionKey = config.encryptionKey || 'local_development_encryption_key_change_in_production' const encryptionKey = config.encryptionKey || 'local_development_encryption_key_change_in_production'
@@ -73,7 +80,7 @@ export default defineEventHandler(async (event) => {
// Nach Zeitstempel sortieren (neueste zuerst) // Nach Zeitstempel sortieren (neueste zuerst)
applications.sort((a, b) => new Date(b.timestamp) - new Date(a.timestamp)) applications.sort((a, b) => new Date(b.timestamp) - new Date(a.timestamp))
return applications return applications
} catch (error) { } catch (error) {
console.error('Fehler beim Laden der Mitgliedschaftsanträge:', error) console.error('Fehler beim Laden der Mitgliedschaftsanträge:', error)

View File

@@ -13,10 +13,15 @@ export default defineEventHandler(async (event) => {
}) })
} }
// Lade Spielplandaten // Lade Spielplandaten - bevorzugt aus server/data
const csvPath = path.join(process.cwd(), 'public/data/spielplan.csv') let csvPath = path.join(process.cwd(), 'server/data/spielplan.csv')
try {
await fs.access(csvPath)
} catch {
csvPath = path.join(process.cwd(), 'public/data/spielplan.csv')
}
let csvContent let csvContent
try { try {
csvContent = await fs.readFile(csvPath, 'utf-8') csvContent = await fs.readFile(csvPath, 'utf-8')
} catch (_error) { } catch (_error) {

View File

@@ -4,13 +4,20 @@ import path from 'path'
export default defineEventHandler(async (event) => { export default defineEventHandler(async (event) => {
try { try {
const cwd = process.cwd() const cwd = process.cwd()
// In production (.output/server), working dir is .output // Prefer internal server/data, fallback to public/data
let csvPath let csvPath
if (cwd.endsWith('.output')) { if (cwd.endsWith('.output')) {
csvPath = path.join(cwd, '../public/data/termine.csv') csvPath = path.join(cwd, '../server/data/termine.csv')
// fallback
if (!(await fs.access(csvPath).then(()=>true).catch(()=>false))) {
csvPath = path.join(cwd, '../public/data/termine.csv')
}
} else { } else {
csvPath = path.join(cwd, 'public/data/termine.csv') csvPath = path.join(cwd, 'server/data/termine.csv')
if (!(await fs.access(csvPath).then(()=>true).catch(()=>false))) {
csvPath = path.join(cwd, 'public/data/termine.csv')
}
} }
const csv = await fs.readFile(csvPath, 'utf-8') const csv = await fs.readFile(csvPath, 'utf-8')

View File

@@ -4,13 +4,19 @@ import path from 'path'
export default defineEventHandler(async (event) => { export default defineEventHandler(async (event) => {
try { try {
const cwd = process.cwd() const cwd = process.cwd()
// In production (.output/server), working dir is .output // Prefer internal server/data, fallback to public/data
let csvPath let csvPath
if (cwd.endsWith('.output')) { if (cwd.endsWith('.output')) {
csvPath = path.join(cwd, '../public/data/vereinsmeisterschaften.csv') csvPath = path.join(cwd, '../server/data/vereinsmeisterschaften.csv')
if (!(await fs.access(csvPath).then(()=>true).catch(()=>false))) {
csvPath = path.join(cwd, '../public/data/vereinsmeisterschaften.csv')
}
} else { } else {
csvPath = path.join(cwd, 'public/data/vereinsmeisterschaften.csv') csvPath = path.join(cwd, 'server/data/vereinsmeisterschaften.csv')
if (!(await fs.access(csvPath).then(()=>true).catch(()=>false))) {
csvPath = path.join(cwd, 'public/data/vereinsmeisterschaften.csv')
}
} }
// CSV-Datei direkt als Text zurückgeben (keine Caching-Probleme) // CSV-Datei direkt als Text zurückgeben (keine Caching-Probleme)

View File

@@ -2,20 +2,16 @@ import { promises as fs } from 'fs'
import path from 'path' import path from 'path'
import { randomUUID } from 'crypto' import { randomUUID } from 'crypto'
// Handle both dev and production paths // Use internal server/data directory for Termine CSV to avoid writing to public/
// filename is always a hardcoded constant (e.g., 'termine.csv'), never user input
const getDataPath = (filename) => { const getDataPath = (filename) => {
const cwd = process.cwd() const cwd = process.cwd()
// In production (.output/server), working dir is .output // Prefer server/data in both production and development
// e.g. project-root/server/data/termine.csv or .output/server/data/termine.csv
if (cwd.endsWith('.output')) { if (cwd.endsWith('.output')) {
// nosemgrep: javascript.lang.security.audit.path-traversal.path-join-resolve-traversal.path-join-resolve-traversal return path.join(cwd, '../server/data', filename)
return path.join(cwd, '../public/data', filename)
} }
return path.join(cwd, 'server/data', filename)
// In development, working dir is project root
// nosemgrep: javascript.lang.security.audit.path-traversal.path-join-resolve-traversal.path-join-resolve-traversal
return path.join(cwd, 'public/data', filename)
} }
const TERMINE_FILE = getDataPath('termine.csv') const TERMINE_FILE = getDataPath('termine.csv')