import os, ast, torch from dotenv import load_dotenv load_dotenv(override=True) ################################################################################################### ### EDIT THESE VARIABLES ### ### available languages: 'ch_sim', 'ch_tra', 'ja', 'ko', 'en' INTERVAL = int(os.getenv('INTERVAL')) ### OCR OCR_MODEL = os.getenv('OCR_MODEL', 'easy') # 'easy', 'paddle', 'rapid' ### easy is the most accurate, paddle is the fastest with CUDA and rapid is the fastest with CPU. Rapid has only between Chinese and English unless you add more languages OCR_USE_GPU = ast.literal_eval(os.getenv('OCR_USE_GPU', 'True')) ### Drawing/Overlay Config ADD_OVERLAY = ast.literal_eval(os.getenv('ADD_OVERLAY', 'True')) FILL_COLOUR = os.getenv('FILL_COLOUR', 'white') FONT_FILE = os.getenv('FONT_FILE') FONT_SIZE = int(os.getenv('FONT_SIZE', 16)) LINE_SPACING = int(os.getenv('LINE_SPACING', 3)) REGION = ast.literal_eval(os.getenv('REGION','(0,0,2560,1440)')) FONT_COLOUR = os.getenv('FONT_COLOUR', "#ff0000") TO_ROMANIZE = ast.literal_eval(os.getenv('TO_ROMANIZE', 'True')) # API KEYS https://github.com/cheahjs/free-llm-api-resources?tab=readme-ov-file GEMINI_API_KEY = os.getenv('GEMINI_KEY') GROQ_API_KEY = os.getenv('GROQ_API_KEY') # # MISTRAL_API_KEY = os.getenv('MISTRAL_API_KEY') # https://console.mistral.ai/api-keys/ slow asf ### Translation MAX_TRANSLATE = int(os.getenv('MAX_TRANSLATION', 200)) SOURCE_LANG = os.getenv('SOURCE_LANG', 'ja') TARGET_LANG = os.getenv('TARGET_LANG', 'en') ### Local Translation TRANSLATION_MODEL= os.environ['TRANSLATION_MODEL'] # 'opus' or 'm2m' # opus is a lot more lightweight TRANSLATION_USE_GPU = ast.literal_eval(os.getenv('TRANSLATION_USE_GPU', 'True')) MAX_INPUT_TOKENS = int(os.getenv('MAX_INPUT_TOKENS', 512)) MAX_OUTPUT_TOKENS = int(os.getenv('MAX_OUTPUT_TOKENS', 512)) BATCH_SIZE = int(os.getenv('BATCH_SIZE', 6)) LOCAL_FILES_ONLY = ast.literal_eval(os.getenv('LOCAL_FILES_ONLY', 'False')) ################################################################################################### LINE_HEIGHT = FONT_SIZE if TRANSLATION_USE_GPU is False: device = torch.device("cpu") else: device = torch.device("cuda" if torch.cuda.is_available() else "cpu") ### Just for info available_langs = ['ch_sim', 'ch_tra', 'ja', 'ko', 'en'] # there are limitations with the languages that can be used with the OCR models seq_llm_models = ['opus', 'm2m'] api_llm_models = ['gemini'] causal_llm_models = [] curr_models = seq_llm_models + api_llm_models + causal_llm_models