onscreen-translator/config.py

54 lines
2.1 KiB
Python

import os, ast, torch
from dotenv import load_dotenv
load_dotenv(override=True)
###################################################################################################
### EDIT THESE VARIABLES ###
### available languages: 'ch_sim', 'ch_tra', 'ja', 'ko', 'en'
INTERVAL = int(os.getenv('INTERVAL'))
### OCR
OCR_MODEL = os.getenv('OCR_MODEL', 'easy') # 'easy', 'paddle', 'rapid' ### easy is the most accurate, paddle is the fastest with CUDA and rapid is the fastest with CPU. Rapid has only between Chinese and English unless you add more languages
OCR_USE_GPU = ast.literal_eval(os.getenv('OCR_USE_GPU', 'True'))
### Drawing/Overlay Config
FONT_FILE = os.getenv('FONT_FILE')
FONT_SIZE = int(os.getenv('FONT_SIZE', 16))
LINE_SPACING = int(os.getenv('LINE_SPACING', 3))
REGION = ast.literal_eval(os.getenv('REGION','(0,0,2560,1440)'))
TEXT_COLOR = os.getenv('TEXT_COLOR', "#ff0000")
TO_ROMANIZE = ast.literal_eval(os.getenv('TO_ROMANIZE', 'True'))
### Translation
BATCH_SIZE = int(os.getenv('BATCH_SIZE', 6))
GEMINI_KEY = os.getenv('GEMINI_KEY')
LOCAL_FILES_ONLY = ast.literal_eval(os.getenv('LOCAL_FILES_ONLY', 'False'))
MAX_INPUT_TOKENS = int(os.getenv('MAX_INPUT_TOKENS', 512))
MAX_OUTPUT_TOKENS = int(os.getenv('MAX_OUTPUT_TOKENS', 512))
MAX_TRANSLATE = int(os.getenv('MAX_TRANSLATION', 200))
SOURCE_LANG = os.getenv('SOURCE_LANG', 'ja')
TARGET_LANG = os.getenv('TARGET_LANG', 'en')
TRANSLATION_MODEL= os.environ['TRANSLATION_MODEL'] # 'opus' or 'm2m' # opus is a lot more lightweight
TRANSLATION_USE_GPU = ast.literal_eval(os.getenv('TRANSLATION_USE_GPU', 'True'))
###################################################################################################
LINE_HEIGHT = FONT_SIZE
if TRANSLATION_USE_GPU is False:
device = torch.device("cpu")
else:
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
### Just for info
available_langs = ['ch_sim', 'ch_tra', 'ja', 'ko', 'en'] # there are limitations with the languages that can be used with the OCR models
seq_llm_models = ['opus', 'm2m']
api_llm_models = ['gemini']
causal_llm_models = []
curr_models = seq_llm_models + api_llm_models + causal_llm_models