4 Commits

  1. 6
      debian/changelog
  2. 31
      main.py

6
debian/changelog

@ -1,3 +1,9 @@
klette (0.4) UNRELEASED; urgency=low
* Support multiple data files
-- Adrian Heine <mail@adrianheine.de> Tue, 17 Sep 2024 20:10:00 +0200
klette (0.3) UNRELEASED; urgency=low klette (0.3) UNRELEASED; urgency=low
* Add --otra-vez * Add --otra-vez

31
main.py

@ -4,15 +4,18 @@ import pyamf
import shutil import shutil
import json import json
import os import os
import glob
import argparse import argparse
import unicodedata import unicodedata
import random import random
import re import re
import sys import sys
import readline import readline
import datetime
DATA_DIR = os.environ.get('XDG_DATA_HOME', os.environ['HOME'] + '/.local/share') + '/klette' DATA_DIR = os.environ.get('XDG_DATA_HOME', os.environ['HOME'] + '/.local/share') + '/klette'
VOKABELN_FILE = DATA_DIR + '/vokabeln.json'
DEPRECATED_VOKABELN_FILE = DATA_DIR + '/vokabeln.json'
VOKABELN_DIR = DATA_DIR + '/vokabeln/'
STATUS_FILE = DATA_DIR + '/status.json' STATUS_FILE = DATA_DIR + '/status.json'
AUDIO_BASE = DATA_DIR + '/audio/' AUDIO_BASE = DATA_DIR + '/audio/'
@ -49,9 +52,10 @@ def import_vokabeln(file_name, audio_base):
'unidad': unidad, 'unidad': unidad,
'paso': paso 'paso': paso
}) })
if audio:
shutil.copy2(audio_base + audio + '.bin', shutil.copy2(audio_base + audio + '.bin',
AUDIO_BASE + audio + '.aac') AUDIO_BASE + audio + '.aac')
with open(VOKABELN_FILE, 'x') as writer:
with open(VOKABELN_DIR + '/import-' + datetime.datetime.now().isoformat() + '.json', 'x') as writer:
writer.write(json.dumps(palabras)) writer.write(json.dumps(palabras))
def import_status(file_name): def import_status(file_name):
@ -203,10 +207,10 @@ class Sesion:
def hace_paso(self, unidad, paso, cur_palabras): def hace_paso(self, unidad, paso, cur_palabras):
c = [len(cur_palabras[x]) for x in cur_palabras] c = [len(cur_palabras[x]) for x in cur_palabras]
print(f"{bcolors.BOLD}{unidad}{bcolors.ENDC}: {paso} ({c[0] + c[6]}/{c[1]}/{c[2]}/{c[3]}/{c[4]}/{bcolors.OKGREEN}{c[5]}{bcolors.ENDC})") print(f"{bcolors.BOLD}{unidad}{bcolors.ENDC}: {paso} ({c[0] + c[6]}/{c[1]}/{c[2]}/{c[3]}/{c[4]}/{bcolors.OKGREEN}{c[5]}{bcolors.ENDC})")
for n in range(5): # 1..4, no 5
n = None
if self.hace_palabras(cur_palabras[n], n) == Resultado.ADIOS: if self.hace_palabras(cur_palabras[n], n) == Resultado.ADIOS:
return Resultado.ADIOS return Resultado.ADIOS
n = None
for n in range(5): # 1..4, no 5
if self.hace_palabras(cur_palabras[n], n) == Resultado.ADIOS: if self.hace_palabras(cur_palabras[n], n) == Resultado.ADIOS:
return Resultado.ADIOS return Resultado.ADIOS
@ -237,12 +241,24 @@ class Sesion:
def abfrage(parser, quiero_unidad, otra_vez): def abfrage(parser, quiero_unidad, otra_vez):
random.seed() random.seed()
status = {}
palabras = []
os.makedirs(VOKABELN_DIR, exist_ok=True)
try:
os.rename(DEPRECATED_VOKABELN_FILE, os.path.join(VOKABELN_DIR, 'vokabeln.json'))
except FileNotFoundError:
pass
for filename in glob.glob(os.path.join(VOKABELN_DIR, '*.json')):
with open(filename, 'r') as f:
palabras += json.load(f)
try: try:
with open(VOKABELN_FILE, 'r') as f:
palabras = json.load(f)
with open(STATUS_FILE, 'r') as f: with open(STATUS_FILE, 'r') as f:
status = json.load(f) status = json.load(f)
except FileNotFoundError: except FileNotFoundError:
pass
if len(palabras) == 0:
print(f"{bcolors.FAIL}Daten können nicht geladen werden, hast du sie schon importiert?{bcolors.ENDC}") print(f"{bcolors.FAIL}Daten können nicht geladen werden, hast du sie schon importiert?{bcolors.ENDC}")
print() print()
parser.print_help() parser.print_help()
@ -255,7 +271,6 @@ def abfrage(parser, quiero_unidad, otra_vez):
print(f'{bcolors.OKGREEN}+{sesion.bien}{bcolors.ENDC} / {bcolors.FAIL}-{sesion.mal}{bcolors.ENDC}') print(f'{bcolors.OKGREEN}+{sesion.bien}{bcolors.ENDC} / {bcolors.FAIL}-{sesion.mal}{bcolors.ENDC}')
parser = argparse.ArgumentParser() parser = argparse.ArgumentParser()
default_data_file = os.environ['PWD'] + '/assets/amf/vokabelTrainer147.amf'
parser.add_argument('--import-data', type=str, help="Path to assets", metavar="DIR") parser.add_argument('--import-data', type=str, help="Path to assets", metavar="DIR")
default_status_file = os.environ['HOME'] + '/klett/1266/vokabeltrainerData147' default_status_file = os.environ['HOME'] + '/klett/1266/vokabeltrainerData147'
parser.add_argument('--import-status', type=str, help="Path to AMF File, defaults to " + default_status_file, metavar="FILE", nargs='?', const=default_status_file) parser.add_argument('--import-status', type=str, help="Path to AMF File, defaults to " + default_status_file, metavar="FILE", nargs='?', const=default_status_file)
@ -264,7 +279,7 @@ parser.add_argument('--otra-vez', action='store_true')
args = parser.parse_args() args = parser.parse_args()
if args.import_data: if args.import_data:
import_vokabeln(args.import_data + '/amf/vokabelTrainer147.amf',
import_vokabeln(glob.glob(os.path.join(args.import_data, 'amf/vokabelTrainer*.amf'))[0],
args.import_data + '/amf/medium/') args.import_data + '/amf/medium/')
elif args.import_status: elif args.import_status:
import_status(args.import_status) import_status(args.import_status)

Loading…
Cancel
Save