Enhance CSV file saving mechanism in CMS with atomic write and verification
Some checks failed
Code Analysis (JS/Vue) / analyze (push) Failing after 47s
Some checks failed
Code Analysis (JS/Vue) / analyze (push) Failing after 47s
This commit improves the CSV file handling in the CMS by implementing an atomic write function that ensures data integrity during file saves. It introduces a verification step to check file size after writing, preventing issues with incomplete or corrupted files. Additionally, it refines the logic for determining target paths, prioritizing preferred directories and providing better error handling for write operations. These changes enhance the reliability and robustness of data management in the application.
This commit is contained in:
@@ -54,28 +54,82 @@ export default defineEventHandler(async (event) => {
|
|||||||
// nosemgrep: javascript.lang.security.audit.path-traversal.path-join-resolve-traversal.path-join-resolve-traversal
|
// nosemgrep: javascript.lang.security.audit.path-traversal.path-join-resolve-traversal.path-join-resolve-traversal
|
||||||
// filename is validated against allowlist above, path traversal prevented
|
// filename is validated against allowlist above, path traversal prevented
|
||||||
const cwd = process.cwd()
|
const cwd = process.cwd()
|
||||||
const candidatePaths = [
|
|
||||||
path.join(cwd, '.output/public/data', filename),
|
const pathExists = async (p) => {
|
||||||
|
try {
|
||||||
|
await fs.access(p)
|
||||||
|
return true
|
||||||
|
} catch {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const writeFileAtomicAndVerify = async (targetPath, data) => {
|
||||||
|
const dataDir = path.dirname(targetPath)
|
||||||
|
await fs.mkdir(dataDir, { recursive: true })
|
||||||
|
|
||||||
|
// Atomar schreiben: erst temp-Datei in *gleichem Verzeichnis*, dann rename.
|
||||||
|
// So vermeiden wir:
|
||||||
|
// - halb geschriebene Dateien (Reader sieht "Partial Transfer")
|
||||||
|
// - Erfolgsmeldungen, obwohl die Datei effektiv kaputt ist
|
||||||
|
const tmpPath = `${targetPath}.tmp-${process.pid}-${Date.now()}`
|
||||||
|
try {
|
||||||
|
await fs.writeFile(tmpPath, data, 'utf8')
|
||||||
|
await fs.rename(tmpPath, targetPath)
|
||||||
|
|
||||||
|
const expectedSize = Buffer.byteLength(data, 'utf8')
|
||||||
|
const st = await fs.stat(targetPath)
|
||||||
|
if (st.size !== expectedSize) {
|
||||||
|
throw new Error(`Size mismatch after write. expected=${expectedSize} actual=${st.size}`)
|
||||||
|
}
|
||||||
|
} catch (e) {
|
||||||
|
// best-effort cleanup
|
||||||
|
try { await fs.unlink(tmpPath) } catch (_e2) {}
|
||||||
|
throw e
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Preferred: das tatsächlich ausgelieferte Verzeichnis in Production
|
||||||
|
// (Nuxt/Nitro serve static aus `.output/public`)
|
||||||
|
const preferredPaths = []
|
||||||
|
if (await pathExists(path.join(cwd, '.output/public'))) {
|
||||||
|
preferredPaths.push(path.join(cwd, '.output/public/data', filename))
|
||||||
|
}
|
||||||
|
if (await pathExists(path.join(cwd, '../.output/public'))) {
|
||||||
|
preferredPaths.push(path.join(cwd, '../.output/public/data', filename))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fallbacks: Source-Public (für Persistenz bei nächstem Build) und diverse cwd-Layouts
|
||||||
|
const fallbackPaths = [
|
||||||
path.join(cwd, 'public/data', filename),
|
path.join(cwd, 'public/data', filename),
|
||||||
path.join(cwd, '../public/data', filename), // falls cwd z.B. `.output` oder `.output/server` ist
|
path.join(cwd, '../public/data', filename)
|
||||||
path.join(cwd, '../.output/public/data', filename)
|
|
||||||
]
|
]
|
||||||
|
|
||||||
const uniquePaths = [...new Set(candidatePaths)]
|
const uniquePaths = [...new Set([...preferredPaths, ...fallbackPaths])]
|
||||||
const writeResults = []
|
const writeResults = []
|
||||||
const writeErrors = []
|
const writeErrors = []
|
||||||
|
let wrotePreferred = false
|
||||||
|
|
||||||
for (const targetPath of uniquePaths) {
|
for (const targetPath of uniquePaths) {
|
||||||
try {
|
try {
|
||||||
const dataDir = path.dirname(targetPath)
|
await writeFileAtomicAndVerify(targetPath, content)
|
||||||
await fs.mkdir(dataDir, { recursive: true })
|
|
||||||
await fs.writeFile(targetPath, content, 'utf8')
|
|
||||||
writeResults.push(targetPath)
|
writeResults.push(targetPath)
|
||||||
|
if (preferredPaths.includes(targetPath)) wrotePreferred = true
|
||||||
} catch (e) {
|
} catch (e) {
|
||||||
writeErrors.push({ targetPath, error: e })
|
writeErrors.push({ targetPath, error: e?.message || String(e) })
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Wenn wir ein `.output/public` gefunden haben, MUSS auch dorthin geschrieben worden sein.
|
||||||
|
// Sonst melden wir nicht "Erfolg", weil die laufende Instanz dann weiterhin alte/defekte Daten ausliefert.
|
||||||
|
if (preferredPaths.length > 0 && !wrotePreferred) {
|
||||||
|
console.error('CSV wurde NICHT in .output/public geschrieben. Errors:', writeErrors)
|
||||||
|
throw createError({
|
||||||
|
statusCode: 500,
|
||||||
|
statusMessage: 'CSV konnte nicht in das ausgelieferte Verzeichnis geschrieben werden'
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
if (writeResults.length === 0) {
|
if (writeResults.length === 0) {
|
||||||
console.error('Konnte CSV-Datei in keinen Zielpfad schreiben:', writeErrors)
|
console.error('Konnte CSV-Datei in keinen Zielpfad schreiben:', writeErrors)
|
||||||
throw createError({
|
throw createError({
|
||||||
|
|||||||
Reference in New Issue
Block a user