refactor(exercises): standardize answer language handling across exercise scripts
All checks were successful
Deploy to production / deploy (push) Successful in 2m48s

- Introduced a mechanism to infer answer language based on question phrasing in multiple exercise scripts, enhancing consistency in exercise data.
- Updated question formats to clarify the intent of exercises, improving user understanding and engagement.
- Streamlined the code for better maintainability and clarity in exercise generation processes.
This commit is contained in:
Torsten Schulz (local)
2026-04-07 14:32:44 +02:00
parent 160c9dafb2
commit ebb2283646
7 changed files with 107 additions and 6 deletions

View File

@@ -349,7 +349,8 @@ function createFeelingsAffectionExercises(nativeLanguageName) {
instruction: 'Übersetze den Bisaya-Satz ins ' + nativeLanguageName,
questionData: JSON.stringify({
type: 'multiple_choice',
question: `Wie sagt man "${conv.bisaya}" auf ${nativeLanguageName}?`,
answerLanguage: 'native',
question: `Was bedeutet "${conv.bisaya}"?`,
options: options
}),
answerData: JSON.stringify({
@@ -379,6 +380,7 @@ function createFeelingsAffectionExercises(nativeLanguageName) {
instruction: 'Was bedeutet dieser Bisaya-Satz?',
questionData: JSON.stringify({
type: 'multiple_choice',
answerLanguage: 'native',
question: `Was bedeutet "${conv.bisaya}"?`,
options: options
}),