From d96333a5a76dd7cf84be980973bcc8de9483e521 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Kristofer=20S=C3=B6derstr=C3=B6m?= Date: Fri, 30 Jun 2023 16:11:59 +0200 Subject: [PATCH] Complete rework for GUI, experimental EXE file and other minor changes, see readme for more info --- CITATION.cff | 6 +- GUI.py | 100 -------- Mac_instructions.md | 9 + Mac_instructions.txt | 5 - README.md | 122 ++++----- app.py | 133 ++++++++++ build_setup.py | 20 ++ example.ipynb | 119 ++++----- example_no_internet.ipynb | 231 ------------------ gui_jpeg.jpg | Bin 29936 -> 0 bytes Picture1.png => images/Picture1.png | Bin gui-mac.png => images/gui-mac.png | Bin images/gui-windows.png | Bin 0 -> 12352 bytes images/icon.ico | Bin 0 -> 1822 bytes run_Windows.bat | 2 +- .../transcriptions/Armstrong_Small_Step.txt | 7 +- .../Axel_Pettersson_röstinspelning.txt | 6 +- src/_LocalTranscribe.py | 90 +++++++ transcribe.py | 56 ----- 19 files changed, 386 insertions(+), 520 deletions(-) delete mode 100644 GUI.py create mode 100644 Mac_instructions.md delete mode 100644 Mac_instructions.txt create mode 100644 app.py create mode 100644 build_setup.py delete mode 100644 example_no_internet.ipynb delete mode 100644 gui_jpeg.jpg rename Picture1.png => images/Picture1.png (100%) rename gui-mac.png => images/gui-mac.png (100%) create mode 100644 images/gui-windows.png create mode 100644 images/icon.ico create mode 100644 src/_LocalTranscribe.py delete mode 100644 transcribe.py diff --git a/CITATION.cff b/CITATION.cff index 57a3355..76153d3 100644 --- a/CITATION.cff +++ b/CITATION.cff @@ -4,8 +4,8 @@ authors: - family-names: "Söderström" given-names: "Kristofer Rolf" orcid: "https://orcid.org/0000-0002-5322-3350" -title: "transcribe" -version: 1.1.1 -doi: 10.5281/zenodo.7760511 +title: "Local Transcribe" +version: 1.2 +doi: 10.5281/zenodo.7760510 date-released: 2023-03-22 url: "https://github.com/soderstromkr/transcribe" diff --git a/GUI.py b/GUI.py deleted file mode 100644 index 79ac5bf..0000000 --- a/GUI.py +++ /dev/null @@ -1,100 +0,0 @@ -import tkinter as tk -from tkinter import ttk -from tkinter import filedialog -from tkinter import messagebox -from transcribe import transcribe -from ttkthemes import ThemedTk -import whisper -import numpy as np -import glob, os - - -class App: - def __init__(self, master): - self.master = master - master.title("Local Transcribe") - - #style options - style = ttk.Style() - style.configure('TLabel', font=('Arial', 10), padding=10) - style.configure('TEntry', font=('Arial', 10), padding=10) - style.configure('TButton', font=('Arial', 10), padding=10) - style.configure('TCheckbutton', font=('Arial', 10), padding=10) - - # Folder Path - path_frame = ttk.Frame(master, padding=10) - path_frame.pack(fill=tk.BOTH) - path_label = ttk.Label(path_frame, text="Folder Path:") - path_label.pack(side=tk.LEFT, padx=5) - self.path_entry = ttk.Entry(path_frame, width=50) - self.path_entry.insert(10, 'sample_audio/') - self.path_entry.pack(side=tk.LEFT, fill=tk.X, expand=True) - browse_button = ttk.Button(path_frame, text="Browse", command=self.browse) - browse_button.pack(side=tk.LEFT, padx=5) - - # File Type - file_type_frame = ttk.Frame(master, padding=10) - file_type_frame.pack(fill=tk.BOTH) - file_type_label = ttk.Label(file_type_frame, text="File Type:") - file_type_label.pack(side=tk.LEFT, padx=5) - self.file_type_entry = ttk.Entry(file_type_frame, width=50) - self.file_type_entry.insert(10, 'ogg') - self.file_type_entry.pack(side=tk.LEFT, fill=tk.X, expand=True) - - # Model - model_frame = ttk.Frame(master, padding=10) - model_frame.pack(fill=tk.BOTH) - model_label = ttk.Label(model_frame, text="Model:") - model_label.pack(side=tk.LEFT, padx=5) - self.model_entry = ttk.Entry(model_frame, width=50) - self.model_entry.insert(10, 'small') - self.model_entry.pack(side=tk.LEFT, fill=tk.X, expand=True) - - # Language (currently disabled) - #language_frame = ttk.Frame(master, padding=10) - #language_frame.pack(fill=tk.BOTH) - #language_label = ttk.Label(language_frame, text="Language:") - #language_label.pack(side=tk.LEFT, padx=5) - #self.language_entry = ttk.Entry(language_frame, width=50) - #self.language_entry.insert(10, np.nan) - #self.language_entry.pack(side=tk.LEFT, fill=tk.X, expand=True) - - # Verbose - verbose_frame = ttk.Frame(master, padding=10) - verbose_frame.pack(fill=tk.BOTH) - self.verbose_var = tk.BooleanVar() - verbose_checkbutton = ttk.Checkbutton(verbose_frame, text="Verbose", variable=self.verbose_var) - verbose_checkbutton.pack(side=tk.LEFT, padx=5) - - # Buttons - button_frame = ttk.Frame(master, padding=10) - button_frame.pack(fill=tk.BOTH) - transcribe_button = ttk.Button(button_frame, text="Transcribe Audio", command=self.transcribe) - transcribe_button.pack(side=tk.LEFT, padx=5, pady=10, fill=tk.X, expand=True) - quit_button = ttk.Button(button_frame, text="Quit", command=master.quit) - quit_button.pack(side=tk.RIGHT, padx=5, pady=10, fill=tk.X, expand=True) - - def browse(self): - folder_path = filedialog.askdirectory() - self.path_entry.delete(0, tk.END) - self.path_entry.insert(0, folder_path) - - def transcribe(self): - path = self.path_entry.get() - file_type = self.file_type_entry.get() - model = self.model_entry.get() - #language = self.language_entry.get() - language = None # set to auto-detect - verbose = self.verbose_var.get() - - # Call the transcribe function with the appropriate arguments - result = transcribe(path, file_type, model=model, language=language, verbose=verbose) - - # Show the result in a message box - tk.messagebox.showinfo("Finished!", result) - -if __name__ == "__main__": -# root = tk.Tk() - root = ThemedTk(theme="clearlooks") - app = App(root) - root.mainloop() \ No newline at end of file diff --git a/Mac_instructions.md b/Mac_instructions.md new file mode 100644 index 0000000..acb3700 --- /dev/null +++ b/Mac_instructions.md @@ -0,0 +1,9 @@ +### How to run on Mac +Unfortunately, I have not found a permament solution for this, not being a Mac user has limited the ways I can test this. +#### Recommended steps +1. Open a terminal and navigate to the root folder (the downloaded the folder). + 1. You can also right-click (or equivalent) on the root folder to open a Terminal within the folder. +2. Run the following command: +``` +python main.py +``` \ No newline at end of file diff --git a/Mac_instructions.txt b/Mac_instructions.txt deleted file mode 100644 index f92baff..0000000 --- a/Mac_instructions.txt +++ /dev/null @@ -1,5 +0,0 @@ -### How to run on Mac -Unfortunately, I have not found a permament solution for this, not being a Mac user has limited the ways I can test this. For now, these are the recommended steps for a beginner user: -1. Open a terminal and navigate to the root folder (transcribe-main if you downloaded the folder). You can also right-click (or equivalent) on the root folder to open a Terminal within the folder. -2. Run the following command: -python GUI.py diff --git a/README.md b/README.md index a3a6e75..c64030d 100644 --- a/README.md +++ b/README.md @@ -1,71 +1,75 @@ -## Local Transcribe +## Local Transcribe with Whisper +Local Transcribe with Whisper is a user-friendly desktop application that allows you to transcribe audio and video files using the Whisper ASR system. This application provides a graphical user interface (GUI) built with Python and the Tkinter library, making it easy to use even for those not familiar with programming. -Local Transcribe uses OpenAI's Whisper to transcribe audio files from your local folders, creating text files on disk. +## New in version 1.2! +1. Simpler usage: + 1. File type: You no longer need to specify file type. The program will only transcribe elligible files. + 2. Language: Added option to specify language, which might help in some cases. Clear the default text to run automatic language recognition. + 3. Model selection: Now a dropdown option that includes most models for typical use. +2. New and improved GUI. +![python GUI.py](images/gui-windows.png) +3. Executable: On Windows and don't want to install python? Try the Exe file! See below for instructions (Experimental) -## Note - -This implementation and guide is mostly made for researchers not familiar with programming that want a way to transcribe their files locally, without internet connection, usually required within ethical data practices and frameworks. Two examples are shown, a normal workflow with internet connection. And one in which the model is loaded first, via openai-whisper, and then the transcription can be done without being connected to the internet. There is now also a GUI implementation, read below for more information. - -### Instructions - -#### Requirements - -1. This script was made and tested in an Anaconda environment with Python 3.10. I recommend this method if you're not familiar with Python. -See [here](https://docs.anaconda.com/anaconda/install/index.html) for instructions. You might need administrator rights. - -2. Whisper requires some additional libraries. The [setup](https://github.com/openai/whisper#setup) page states: "The codebase also depends on a few Python packages, most notably HuggingFace Transformers for their fast tokenizer implementation and ffmpeg-python for reading audio files." -Users might not need to specifically install Transfomers. However, a conda installation might be needed for ffmpeg[^1], which takes care of setting up PATH variables. From the anaconda prompt, type or copy the following: - -``` -conda install -c conda-forge ffmpeg-python -``` - -3. The main functionality comes from openai-whisper. See their [page](https://github.com/openai/whisper) for details. As of 2023-03-22 you can install via: - -``` -pip install -U openai-whisper -``` - -4. There is an option to run a batch file, which launches a GUI built on TKinter and TTKthemes. If using these options, make sure they are installed in your Python build. You can install them via pip. - -``` -pip install tk -``` - -and - -``` -pip install ttkthemes -``` - -#### Using the script - -This is a simple script with no installation. You can download the zip folder and extract it to your preferred working folder. +## Features +* Select the folder containing the audio or video files you want to transcribe. Tested with m4a video. +* Choose the language of the files you are transcribing. You can either select a specific language or let the application automatically detect the language. +* Select the Whisper model to use for the transcription. Available models include "base.en", "base", "small.en", "small", "medium.en", "medium", and "large". Models with .en ending are better if you're transcribing English, especially the base and small models. +* Enable the verbose mode to receive detailed information during the transcription process. +* Monitor the progress of the transcription with the progress bar and terminal. +* Confirmation dialog before starting the transcription to ensure you have selected the correct folder. +* View the transcribed text in a message box once the transcription is completed. +## Installation +### Get the files +Download the zip folder and extract it to your preferred working folder. ![](Picture1.png) - Or by cloning the repository with: - ``` git clone https://github.com/soderstromkr/transcribe.git ``` +### Executable Version **(Experimental. Windows only)** +The executable version of Local Transcribe with Whisper is a standalone program and should work out of the box. This experimental version is available if you have Windows, and do not have (or don't want to install) python and additional dependencies. However, it requires more disk space (around 1Gb), has no GPU acceleration and has only been lightly tested for bugs, etc. Let me know if you run into any issues! +1. Download the project folder. As the image above shows. +2. Navigate to build. +3. Unzip the folder (get a coffee or a tea, this might take a while depending on your computer) +3. Run the executable (app.exe) file. +### Python Version **(any platform including Mac users)** +This is recommended if you don't have Windows. Have Windows and use python, or want to use GPU acceleration (Pytorch and Cuda) for faster transcriptions. I would generally recommend this method anyway, but I can understand not everyone wants to go through the installation process for Python, Anaconda and the other required packages. +1. This script was made and tested in an Anaconda environment with Python 3.10. I recommend this method if you're not familiar with Python. +See [here](https://docs.anaconda.com/anaconda/install/index.html) for instructions. You might need administrator rights. +2. Whisper requires some additional libraries. The [setup](https://github.com/openai/whisper#setup) page states: "The codebase also depends on a few Python packages, most notably HuggingFace Transformers for their fast tokenizer implementation and ffmpeg-python for reading audio files." +Users might not need to specifically install Transfomers. However, a conda installation might be needed for ffmpeg[^1], which takes care of setting up PATH variables. From the anaconda prompt, type or copy the following: +``` +conda install -c conda-forge ffmpeg-python +``` +3. The main functionality comes from openai-whisper. See their [page](https://github.com/openai/whisper) for details. As of 2023-03-22 you can install via: +``` +pip install -U openai-whisper +``` +4. To run the app built on TKinter and TTKthemes. If using these options, make sure they are installed in your Python build. You can install them via pip. +``` +pip install tkinter +``` +and +``` +pip install customtkinter +``` +5. Run the app: + 1. For **Windows**: In the same folder as the *app.py* file, run the app from terminal by running ```python app.py``` or with the batch file called run_Windows.bat (for Windows users), which assumes you have conda installed and in the base environment (This is for simplicity, but users are usually adviced to create an environment, see [here](https://conda.io/projects/conda/en/latest/user-guide/tasks/manage-environments.html#creating-an-environment-with-commands) for more info) just make sure you have the correct environment (right click on the file and press edit to make any changes). If you want to download a model first, and then go offline for transcription, I recommend running the model with the default sample folder, which will download the model locally. + 2. For **Mac**: Haven't figured out a better way to do this, see [the instructions here](Mac_instructions.txt) +## Usage +1. When launched, the app will also open a terminal that shows some additional information. +2. Select the folder containing the audio or video files you want to transcribe by clicking the "Browse" button next to the "Folder" label. This will open a file dialog where you can navigate to the desired folder. Remember, you won't be choosing individual files but whole folders! +3. Enter the desired language for the transcription in the "Language" field. You can either select a language or leave it blank to enable automatic language detection. +4. Choose the Whisper model to use for the transcription from the dropdown list next to the "Model" label. +5. Enable the verbose mode by checking the "Verbose" checkbox if you want to receive detailed information during the transcription process. +6. Click the "Transcribe" button to start the transcription. The button will be disabled during the process to prevent multiple transcriptions at once. +7. Monitor the progress of the transcription with the progress bar. +8. Once the transcription is completed, a message box will appear displaying the transcribed text. Click "OK" to close the message box. +9. You can run the application again or quit the application at any time by clicking the "Quit" button. - -#### Example with Jupyter Notebook - -See [example](example.ipynb) for an implementation on Jupyter Notebook, also added an example for a simple [workaround](example_no_internet.ipynb) to transcribe while offline. - -#### Using the GUI - -You can also run the GUI version from your terminal running ```python GUI.py``` or with the batch file called run_Windows.bat (for Windows users), just make sure to add your conda path to it. If you want to download a model first, and then go offline for transcription, I recommend running the model with the default sample folder, which will download the model locally. - -The GUI should look like this: - -![python GUI.py](gui_jpeg.jpg?raw=true) - -or this, on a Mac, by running `python GUI.py` or `python3 GUI.py`: - -![python GUI Mac.py](gui-mac.png) +## Jupyter Notebook +Don't want fancy EXEs or GUIs? Use the function as is. See [example](example.ipynb) for an implementation on Jupyter Notebook. [^1]: Advanced users can use ```pip install ffmpeg-python``` but be ready to deal with some [PATH issues](https://stackoverflow.com/questions/65836756/python-ffmpeg-wont-accept-path-why), which I encountered in Windows 11. diff --git a/app.py b/app.py new file mode 100644 index 0000000..5511129 --- /dev/null +++ b/app.py @@ -0,0 +1,133 @@ +import tkinter as tk +from tkinter import ttk +from tkinter import filedialog +from tkinter import messagebox +from src._LocalTranscribe import transcribe, get_path +import customtkinter +import threading +from colorama import Back, Fore +import colorama +colorama.init(autoreset=True) + + + +customtkinter.set_appearance_mode("System") +customtkinter.set_default_color_theme("blue") # Themes: blue (default), dark-blue, green +firstclick = True + +class App: + def __init__(self, master): + print(Back.CYAN + "Welcome to Local Transcribe with Whisper!\U0001f600\nCheck back here to see some output from your transcriptions.\nDon't worry, they will also be saved on the computer!\U0001f64f") + self.master = master + # Change font + font = ('Roboto', 13, 'bold') # Change the font and size here + font_b = ('Roboto', 12) # Change the font and size here + # Folder Path + path_frame = customtkinter.CTkFrame(master) + path_frame.pack(fill=tk.BOTH, padx=10, pady=10) + customtkinter.CTkLabel(path_frame, text="Folder:", font=font).pack(side=tk.LEFT, padx=5) + self.path_entry = customtkinter.CTkEntry(path_frame, width=50, font=font_b) + self.path_entry.pack(side=tk.LEFT, fill=tk.X, expand=True) + customtkinter.CTkButton(path_frame, text="Browse", command=self.browse, font=font).pack(side=tk.LEFT, padx=5) + # Language frame + #thanks to pommicket from Stackoverflow for this fix + def on_entry_click(event): + """function that gets called whenever entry is clicked""" + global firstclick + if firstclick: # if this is the first time they clicked it + firstclick = False + self.language_entry.delete(0, "end") # delete all the text in the entry + language_frame = customtkinter.CTkFrame(master) + language_frame.pack(fill=tk.BOTH, padx=10, pady=10) + customtkinter.CTkLabel(language_frame, text="Language:", font=font).pack(side=tk.LEFT, padx=5) + self.language_entry = customtkinter.CTkEntry(language_frame, width=50, font=('Roboto', 12, 'italic')) + self.language_entry.insert(0, 'Select language or clear to detect automatically') + self.language_entry.bind('', on_entry_click) + self.language_entry.pack(side=tk.LEFT, fill=tk.X, expand=True) + # Model frame + models = ['base.en', 'base', 'small.en', + 'small', 'medium.en', 'medium', 'large'] + model_frame = customtkinter.CTkFrame(master) + model_frame.pack(fill=tk.BOTH, padx=10, pady=10) + customtkinter.CTkLabel(model_frame, text="Model:", font=font).pack(side=tk.LEFT, padx=5) + # ComboBox frame + self.model_combobox = customtkinter.CTkComboBox( + model_frame, width=50, state="readonly", + values=models, font=font_b) + self.model_combobox.set(models[1]) # Set the default value + self.model_combobox.pack(side=tk.LEFT, fill=tk.X, expand=True) + # Verbose frame + verbose_frame = customtkinter.CTkFrame(master) + verbose_frame.pack(fill=tk.BOTH, padx=10, pady=10) + self.verbose_var = tk.BooleanVar() + customtkinter.CTkCheckBox(verbose_frame, text="Output transcription to terminal", variable=self.verbose_var, font=font).pack(side=tk.LEFT, padx=5) + # Progress Bar + self.progress_bar = ttk.Progressbar(master, length=200, mode='indeterminate') + # Button actions frame + button_frame = customtkinter.CTkFrame(master) + button_frame.pack(fill=tk.BOTH, padx=10, pady=10) + self.transcribe_button = customtkinter.CTkButton(button_frame, text="Transcribe", command=self.start_transcription, font=font) + self.transcribe_button.pack(side=tk.LEFT, padx=5, pady=10, fill=tk.X, expand=True) + customtkinter.CTkButton(button_frame, text="Quit", command=master.quit, font=font).pack(side=tk.RIGHT, padx=5, pady=10, fill=tk.X, expand=True) + # Helper functions + # Browsing + def browse(self): + folder_path = filedialog.askdirectory() + self.path_entry.delete(0, tk.END) + self.path_entry.insert(0, folder_path) + # Start transcription + def start_transcription(self): + # Disable transcribe button + self.transcribe_button.configure(state=tk.DISABLED) + # Start a new thread for the transcription process + threading.Thread(target=self.transcribe_thread).start() + # Threading + def transcribe_thread(self): + path = self.path_entry.get() + model = self.model_combobox.get() + language = self.language_entry.get() or None + verbose = self.verbose_var.get() + # Show progress bar + self.progress_bar.pack(fill=tk.X, padx=5, pady=5) + self.progress_bar.start() + # Setting path and files + glob_file = get_path(path) + info_path = 'I will transcribe all eligible audio/video files in the path: {}\n\nContinue?'.format(path) + answer = messagebox.askyesno("Confirmation", info_path) + if not answer: + self.progress_bar.stop() + self.progress_bar.pack_forget() + self.transcribe_button.configure(state=tk.NORMAL) + return + # Start transcription + error_language = 'https://github.com/openai/whisper#available-models-and-languages' + try: + output_text = transcribe(path, glob_file, model, language, verbose) + except UnboundLocalError: + messagebox.showinfo("Files not found error!", 'Nothing found, choose another folder.') + pass + except ValueError: + messagebox.showinfo("Language error!", 'See {} for supported languages'.format(error_language)) + # Hide progress bar + self.progress_bar.stop() + self.progress_bar.pack_forget() + # Enable transcribe button + self.transcribe_button.configure(state=tk.NORMAL) + # Recover output text + try: + messagebox.showinfo("Finished!", output_text) + except UnboundLocalError: + pass + +if __name__ == "__main__": + # Setting custom themes + root = customtkinter.CTk() + root.title("Local Transcribe with Whisper") + # Geometry + width,height = 450,275 + root.geometry('{}x{}'.format(width,height)) + # Icon + root.iconbitmap('images/icon.ico') + # Run + app = App(root) + root.mainloop() diff --git a/build_setup.py b/build_setup.py new file mode 100644 index 0000000..7a349ba --- /dev/null +++ b/build_setup.py @@ -0,0 +1,20 @@ +from cx_Freeze import setup, Executable + +build_exe_options = { + "packages": ['whisper','tkinter','customtkinter'] + } +executables = ( + [ + Executable( + "app.py", + icon='images/icon.ico', + ) + ] +) +setup( + name="Local Transcribe with Whisper", + version="1.2", + author="Kristofer Rolf Söderström", + options={"build_exe":build_exe_options}, + executables=executables +) \ No newline at end of file diff --git a/example.ipynb b/example.ipynb index fe69770..11fe499 100644 --- a/example.ipynb +++ b/example.ipynb @@ -1,123 +1,125 @@ { "cells": [ { - "cell_type": "code", - "execution_count": 1, - "id": "a2cd4050", + "attachments": {}, + "cell_type": "markdown", "metadata": {}, - "outputs": [], "source": [ - "from transcribe import transcribe" + "# Local Transcribe with Whisper\n", + "## Example" ] }, { "cell_type": "code", - "execution_count": 2, - "id": "24e1d24e", + "execution_count": 1, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "Help on function transcribe in module transcribe:\n", + "Help on function transcribe in module src._LocalTranscribe:\n", "\n", - "transcribe(path, file_type, model=None, language=None, verbose=True)\n", - " Implementation of OpenAI's whisper model. Downloads model, transcribes audio files in a folder and returns the text files with transcriptions\n", + "transcribe(path, glob_file, model=None, language=None, verbose=False)\n", + " Transcribes audio files in a specified folder using OpenAI's Whisper model.\n", + " \n", + " Args:\n", + " path (str): Path to the folder containing the audio files.\n", + " glob_file (list): List of audio file paths to transcribe.\n", + " model (str, optional): Name of the Whisper model to use for transcription.\n", + " Defaults to None, which uses the default model.\n", + " language (str, optional): Language code for transcription. Defaults to None,\n", + " which enables automatic language detection.\n", + " verbose (bool, optional): If True, enables verbose mode with detailed information\n", + " during the transcription process. Defaults to False.\n", + " \n", + " Returns:\n", + " str: A message indicating the result of the transcription process.\n", + " \n", + " Raises:\n", + " RuntimeError: If an invalid file is encountered, it will be skipped.\n", + " \n", + " Notes:\n", + " - The function downloads the specified model if not available locally.\n", + " - The transcribed text files will be saved in a \"transcriptions\" folder\n", + " within the specified path.\n", "\n" ] } ], "source": [ + "# Import the modules and get the docstring\n", + "from src._LocalTranscribe import transcribe, get_path\n", "help(transcribe)" ] }, { "cell_type": "code", - "execution_count": 3, - "id": "e52477fb", + "execution_count": 2, "metadata": {}, "outputs": [], "source": [ - "path='sample_audio/'#folder path\n", - "file_type='ogg' #check your file for file type, will only transcribe those files\n", - "model='medium' #'small', 'medium', 'large' (tradeoff between speed and accuracy)\n", - "language= None #tries to auto-detect, other options include 'English', 'Spanish', etc...\n", - "verbose = True # prints output while transcribing, False to deactivate" + "# Set the variables\n", + "path='sample_audio/'# Folder path\n", + "model='small' # Model size\n", + "language= None # Preset language, None for automatic detection\n", + "verbose = True # Output transcription in realtime\n", + "\n", + "# Get glob file, additional step for app version.\n", + "\n", + "glob_file = get_path(path)" ] }, { "cell_type": "code", - "execution_count": 4, - "id": "d66866af", + "execution_count": 3, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "Using medium model, you can change this by specifying model=\"medium\" for example\n", - "Only looking for file type ogg, you can change this by specifying file_type=\"mp3\"\n", - "Expecting None language, you can change this by specifying language=\"English\". None will try to auto-detect\n", - "Verbosity is True. If TRUE it will print out the text as it is transcribed, you can turn this off by setting verbose=False\n", - "\n", - "There are 2 ogg files in path: sample_audio/\n", - "\n", - "\n", - "Loading model...\n", - "Transcribing file number number 1: Armstrong_Small_Step\n", - "Model and file loaded...\n", - "Starting transcription...\n", "\n", + "Trying to transcribe file named: Armstrong_Small_Step🕐\n", "Detecting language using up to the first 30 seconds. Use `--language` to specify the language\n", "Detected language: English\n", - "[00:00.000 --> 00:24.000] That's one small step for man, one giant leap for mankind.\n", - "\n", - "Finished file number 1.\n", - "\n", - "\n", - "\n", - "Transcribing file number number 2: Axel_Pettersson_röstinspelning\n", - "Model and file loaded...\n", - "Starting transcription...\n", + "[00:00.000 --> 00:07.000] I'm going to step off the limb now.\n", + "[00:07.000 --> 00:18.000] That's one small step for man.\n", + "[00:18.000 --> 00:24.000] One giant leap for mankind.\n", "\n", + "Trying to transcribe file named: Axel_Pettersson_röstinspelning🕐\n", "Detecting language using up to the first 30 seconds. Use `--language` to specify the language\n", "Detected language: Swedish\n", - "[00:00.000 --> 00:16.000] Hej, jag heter Axel Pettersson, jag föddes i Örebro 1976. Jag har varit Wikipedia sen 2008 och jag har översatt röstintroduktionsprojektet till svenska.\n", + "[00:00.000 --> 00:06.140] Hej, jag heter Axel Pettersson. Jag följer bror 1976.\n", + "[00:06.400 --> 00:15.100] Jag har varit vikerpedjan sen 2008 och jag har översatt röstintroduktionsprojektet till svenska.\n", "\n", - "Finished file number 2.\n", + "Trying to transcribe file named: readme🕐\n", + "Not a valid file, skipping.\n", "\n", - "\n", - "\n" + "Trying to transcribe file named: transcriptions🕐\n", + "Not a valid file, skipping.\n" ] }, { "data": { "text/plain": [ - "'Finished transcription, files can be found in sample_audio/transcriptions'" + "'Finished transcription, 2 files can be found in sample_audio//transcriptions'" ] }, - "execution_count": 4, + "execution_count": 3, "metadata": {}, "output_type": "execute_result" } ], "source": [ - "transcribe(path, file_type, model, language, verbose)" + "# Run the script\n", + "transcribe(path, glob_file, model, language, verbose)" ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "0bc67265", - "metadata": {}, - "outputs": [], - "source": [] } ], "metadata": { "kernelspec": { - "display_name": "Python 3 (ipykernel)", + "display_name": "venv", "language": "python", "name": "python3" }, @@ -132,8 +134,9 @@ "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.4" - } + }, + "orig_nbformat": 4 }, "nbformat": 4, - "nbformat_minor": 5 + "nbformat_minor": 2 } diff --git a/example_no_internet.ipynb b/example_no_internet.ipynb deleted file mode 100644 index 14510b3..0000000 --- a/example_no_internet.ipynb +++ /dev/null @@ -1,231 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "eba9e610", - "metadata": {}, - "source": [ - "A simple way to avoid being connected while transcribing is to first load the model version you want to use. See [here](https://github.com/openai/whisper/blob/main/README.md#available-models-and-languages) for more info." - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "id": "85cd2d12", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "Whisper(\n", - " (encoder): AudioEncoder(\n", - " (conv1): Conv1d(80, 1024, kernel_size=(3,), stride=(1,), padding=(1,))\n", - " (conv2): Conv1d(1024, 1024, kernel_size=(3,), stride=(2,), padding=(1,))\n", - " (blocks): ModuleList(\n", - " (0-23): 24 x ResidualAttentionBlock(\n", - " (attn): MultiHeadAttention(\n", - " (query): Linear(in_features=1024, out_features=1024, bias=True)\n", - " (key): Linear(in_features=1024, out_features=1024, bias=False)\n", - " (value): Linear(in_features=1024, out_features=1024, bias=True)\n", - " (out): Linear(in_features=1024, out_features=1024, bias=True)\n", - " )\n", - " (attn_ln): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)\n", - " (mlp): Sequential(\n", - " (0): Linear(in_features=1024, out_features=4096, bias=True)\n", - " (1): GELU(approximate='none')\n", - " (2): Linear(in_features=4096, out_features=1024, bias=True)\n", - " )\n", - " (mlp_ln): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)\n", - " )\n", - " )\n", - " (ln_post): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)\n", - " )\n", - " (decoder): TextDecoder(\n", - " (token_embedding): Embedding(51865, 1024)\n", - " (blocks): ModuleList(\n", - " (0-23): 24 x ResidualAttentionBlock(\n", - " (attn): MultiHeadAttention(\n", - " (query): Linear(in_features=1024, out_features=1024, bias=True)\n", - " (key): Linear(in_features=1024, out_features=1024, bias=False)\n", - " (value): Linear(in_features=1024, out_features=1024, bias=True)\n", - " (out): Linear(in_features=1024, out_features=1024, bias=True)\n", - " )\n", - " (attn_ln): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)\n", - " (cross_attn): MultiHeadAttention(\n", - " (query): Linear(in_features=1024, out_features=1024, bias=True)\n", - " (key): Linear(in_features=1024, out_features=1024, bias=False)\n", - " (value): Linear(in_features=1024, out_features=1024, bias=True)\n", - " (out): Linear(in_features=1024, out_features=1024, bias=True)\n", - " )\n", - " (cross_attn_ln): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)\n", - " (mlp): Sequential(\n", - " (0): Linear(in_features=1024, out_features=4096, bias=True)\n", - " (1): GELU(approximate='none')\n", - " (2): Linear(in_features=4096, out_features=1024, bias=True)\n", - " )\n", - " (mlp_ln): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)\n", - " )\n", - " )\n", - " (ln): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)\n", - " )\n", - ")" - ] - }, - "execution_count": 6, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "import whisper\n", - "#change to model size, bigger is more accurate but slower\n", - "whisper.load_model(\"medium\") #base, small, medium, large" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "id": "0d2acd54", - "metadata": {}, - "outputs": [], - "source": [ - "#after it loads, you can disconnect from the internet and run the rest" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "id": "a2cd4050", - "metadata": {}, - "outputs": [], - "source": [ - "from transcribe import transcribe" - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "id": "24e1d24e", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Help on function transcribe in module transcribe:\n", - "\n", - "transcribe(path, file_type, model=None, language=None, verbose=True)\n", - " Implementation of OpenAI's whisper model. Downloads model, transcribes audio files in a folder and returns the text files with transcriptions\n", - "\n" - ] - } - ], - "source": [ - "help(transcribe)" - ] - }, - { - "cell_type": "code", - "execution_count": 11, - "id": "e52477fb", - "metadata": {}, - "outputs": [], - "source": [ - "path='sample_audio/'#folder path\n", - "file_type='ogg' #check your file for file type, will only transcribe those files\n", - "model='medium' #'small', 'medium', 'large' (tradeoff between speed and accuracy)\n", - "language= None #tries to auto-detect, other options include 'English', 'Spanish', etc...\n", - "verbose = True # prints output while transcribing, False to deactivate" - ] - }, - { - "cell_type": "code", - "execution_count": 12, - "id": "d66866af", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Using medium model, you can change this by specifying model=\"medium\" for example\n", - "Only looking for file type ogg, you can change this by specifying file_type=\"mp3\"\n", - "Expecting None language, you can change this by specifying language=\"English\". None will try to auto-detect\n", - "Verbosity is True. If TRUE it will print out the text as it is transcribed, you can turn this off by setting verbose=False\n", - "\n", - "There are 2 ogg files in path: sample_audio/\n", - "\n", - "\n", - "Loading model...\n", - "Transcribing file number number 1: Armstrong_Small_Step\n", - "Model and file loaded...\n", - "Starting transcription...\n", - "\n", - "Detecting language using up to the first 30 seconds. Use `--language` to specify the language\n", - "Detected language: English\n", - "[00:00.000 --> 00:24.000] That's one small step for man, one giant leap for mankind.\n", - "\n", - "Finished file number 1.\n", - "\n", - "\n", - "\n", - "Transcribing file number number 2: Axel_Pettersson_röstinspelning\n", - "Model and file loaded...\n", - "Starting transcription...\n", - "\n", - "Detecting language using up to the first 30 seconds. Use `--language` to specify the language\n", - "Detected language: Swedish\n", - "[00:00.000 --> 00:16.000] Hej, jag heter Axel Pettersson, jag föddes i Örebro 1976. Jag har varit Wikipedia sen 2008 och jag har översatt röstintroduktionsprojektet till svenska.\n", - "\n", - "Finished file number 2.\n", - "\n", - "\n", - "\n" - ] - }, - { - "data": { - "text/plain": [ - "'Finished transcription, files can be found in sample_audio/transcriptions'" - ] - }, - "execution_count": 12, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "transcribe(path, file_type, model, language, verbose)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "0bc67265", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.10.4" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/gui_jpeg.jpg b/gui_jpeg.jpg deleted file mode 100644 index 1fa0afcf7e093b06f71650433586f03ab9ec7b84..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 29936 zcmeEu2{=@H{P##DRI(Q_Wi4x2lVz?7$(3Y_!PK=AlVmBwl#(qeLJ`W6-DDZcHf2qc z?7J~!-)1c1VCKwwbhqyEf8YE6KkxE?pXW}U=ixZ>JKufz{J!TfdKn|Iedl!bbzw|Q zOt8z)KNw>Gb{e*giD~QqFCXR|%v+zG+qX0CVA;vSvUTiYWoO;Bi)|MR3)>zxHul}n zhlO=7$DZALw|?GQWb5l&--7<{-o>(O>x%!}hw%=^xr^!Qwgb#eBCu_oOw622j5-(` z24mU*QM*O(FCV6D%-eVDgh*mzhc3YHgUDxQ-Ubo7WBYdK>Hz5Pu{!C_{8MY^bBQbd1aNl2CQ#v@x=sV z{+X@cIQtV{oDg5zARTOH+2V_7n=f=Qb8g>p`1npPZDW=zp4=h|cXsiddi=8RJ*%kV zMKbSC*W1|m#FR+llr7f2arVy`3;Hi{_8Vir^VJXA&CCQTkC_vOfYDzfy~Y9p7y4bq zY*f!5iPjU0RpolkabGm)F~5OGo0&;)QDD4^*vU{`b) zuyzC24od5T6$UIsn6+EubZ#rvT~fO+(YPwn1ueD9&PD9%-t)Zr?^X()irXc-55A*l zM%A|NK)Lwzf706B?%~~>QPh!IW#A0I@ORif;Qo+-5`l~+2i z&H62!X}>qLF>TX3$X$ExSYlUt|9oK7xYP29*Kcqh8As-l;*-kh8<+?3fVr>$1NPGY zGz0c-7Xe5>kS_VTfx+_}3>Zv=0ULE|3EUglRl1S+E=tgEvC;PyB4j0n#V@z6U#Gs) zb$Yk?Ek~TM9s6sUx%QH4dXET!x`%n}WhG)5upK)9iyT1Fx;ybw&k7OH^O(<*zma57 zbF2Cp?%6`fhLlc*wN)cl?Rm&0%+rHEww=p~ z28^w?7PTVpj0KqZ88B{Je;hbi%|wn3x$m*+Tz%L@MN8&*e#X#RZR)l;=>1r_g_m_tJ&8j|DFL0ua*W#WQZ+}UJnh;RJRRyxN^$gX@0NiVq41^StsWg zaidJ0+@Jh*bV)?`{`ACIAZZ$~Awk?IECe%Pw>hyCr4({5Mv(zKH36Ln0w9sC`;KtT zzaLFuner8AY=j>Gq-OS5GzhHSp{b_>ilom)V<`|Oj8@lgdD?MK;5!qKI3)AZMT64wF&$sT(`J` z9aZ;ftb^44W8%&N$&rv;%4!iSa?U6~K7s)YO9Oe)_pwvfUkGH>H#uFggA~CD7-j=@ zwq2LISNnb<&$lGyQ6or(s7+l8?m%42Tprj=O`OVH`g+{Z?O@@D!gC+%?XD{)Qaj0< zl&BDlCO6P9YfF)>N*cW7jSIr(MkI*{mO0O-ca9mH>c4olNlt+F1H$w8G*7T0v&mZq zEEq{j+2B`_r}5YSqPsuPh}%t(r19|AfQsZ3H%X886c2bH_a#OI5LUT$pR%5;Y?~i$Ayl3 zkNk-XuO`p?hnjf{qSe1z$_#r;oVD;&XsrCAtQqqs$>`Cfh`SlEFK=IBN2=vPiK0MO z8kXEMzatfe^1E*6jwqI^ZuED`n^N82d+8)s|A^IH{^flVvuK*M(q)C$QfCX=lY=Ub zVM*qI6ByjW4vCop83(o|kK9OSPv8LAN4LD^R=7-rE(cAjbvO3Q+ZT{aZ`~^jFiYMK zIh~g_1NI&R`8eA6eC^x2kTQ#F;gB*BzbVt{J_1rE_Ge`xJI(rbe1iY^%W!h8#_=tC zgckT_kHe`EhyKZG#`gSM^!wRC^t1fO^aoA+1NwCySwrp>Y)OiOG!Oq>^M7C{f96OL z`%}c9=|%jJ2IVRpK5Z`+koOPFL;CuI49l`oKPdkL@=`bQ#*ee{nmi#YTT?kRKo zpUHyxpD?-mpDFxrEHR(l3Rxom@0R$Dtp9~`ibClu3;cvw@npc52;>nL1TC-%O^T{F z+KstN){GCM9T?SQsUH^Xq_q~#^vk{ErAHFuMbCiNZHl7vkA?QK)`~KJP;!QRwdM~5 zIR7~?bg%MQGHn_CAELvbBN(!4git0KfkE2kWBAD__b^v-{J2nIS{eb&#@#n6qy=+l z-OjEEIb9UttqSsERw~CxS&pI~Ki|!@Uw~=$FR>a0n1Kf%*YiE~OTJr=A1k|~bu_T~ zB+Gpb8~%Atih6@F{2*X*sew^E8wpFQtb3l18w9OmW@weirlYl#u$ABK!u% z*zM_R)V+S$koYkn{-#KIC1>8g&xI}$%%AMc>$*Rk2gb}pxJ32T>{;^uv`#xD_aZX! z_uSbN6WD~8t5iAL_ak zi{@SP&*%H$x~(XAMQdlFyTv(Fk&a79IbOGpt3Ksv@KoN5ue=SOcN-0{^YXXuj)pbX zA~>IA`0#nn0?e@Fxnu7b!hk1@_6h#3Edo#f#+1#8>Q|xjZV)3OJ zU)~k5^{7Ws9F4}l_Q}|k1dJM!#VK`-=0NNiYCmw_tzxa?Md6u_&u_7^gua)dMYVfA zU`yS1pWQrn_{9BVgv+K_*C_CoBz@(`uZvopON*&Yf|fjxg*=-!UPlkA^!FNB6!Mds z9rX6Y!o!7oM+`;C-4d=yk#Hdcv#E0O1hcpVjq z@hTVuChJgK7&S!wtlN;p*qQDdtt|R3FI`?wr_xTNC6dHx9iNN#SX|(GT^0G%fUlM_ zR53u6n#F)krecYwqVQ0CsmTp)@T!IdQYdBfvJBYvHx#{jmoZIgTMF(Wc+fQ=vN|c# zs#S>r+cP}B;vbj7RLa_yR0@BkpVMsy z?58N;6vlugbQljjs!nY-+Hx8Yh5hvE!F`55b-X(zzt38`U_?(PKdWlCDO=w@^l=@m?9}Zn zQ$Zhk&tJvCgX7$6ydBsEFO*$uKnVkjbt(dcAh2!Bh{MlI8;RiYGk5oBHae+8#rc&v zzb0B4OvZQd)QcDhBCB#fc&`Rq0bU7;DcYyMXx`N0ZR^JzyjEOL#xInO(69! zV68Q{ZBQY=MO^2~#}8m30XZ;Wjg!=*{H|)Bn?%W2YEo}Ax-TkT?cn(e=o@9-;4zv) zIhk0`5!w%r=r~8dOtuKNFCtrAs?S8yUp34aM4mcOP+)Q)&axz5_0tcsBMw12oJXF{ zZ!*stL-#wwfK>;>f3@Yx)598AsyzJ(iT*@`BiAJ&jV9P$plq;+R|CAsfS*ExDQ6BY z2~Dj?lX($BpM0uiZq~07J=|nW|3%Mfzpu)FTAwPGn)Wvgt35WifAUukGEc7vekRTf z|6MX#U2xip(dLxNgiXz-=`o*+VlP09rP5~70yW^{^Evd&vyL1gpEH1{YL`UcGTC=`|XCm#T6c} zL;n*dEMfw?Qi><4C@mP~Y`~;mz~~;$ggjQqfRSTde60$?B#N&nMZO|Vx9ez@_^K2s zJoRP`l|H#|D&!GcirL@U%p=Q`6Mh}k%K(v*<$Mwqr6q%rsU(-ikJ|9p#rssl0T*lr zaab!pd%*R=nRvz7c;kkA&2;UiJ58tFU4B$Mee&QI`Y z5wcSI`Y4o=v$rtY^e%lqTYfq>MXq?}6czpd>EbGDW_LSqaG>*()gWh+} zaIkHG&*>>mlK$)^AV|U1og@U~!<2|Sfo%J#YI1zvUCRO2R*_<2zZQw=d1P5*>eQ2H zT}!<<&?T`m=Wq0wpjq>&9>`$8#G30+RCcIcYBRJH40gpBzkNVcr0{=ES=#*;l;5!L zih5(A>6rK>U1vldV8AFG^n39Pn7;dLYJt}~NmWpU0eco2pv}6!DD>k zg0T({>@UX!qD??F?7gpI`I+0#w|#zZA^E!LzzGc>Wq*(p&WUY(2xY%JEeu%0boy}3 zTiftL25eVUC}FbkJXF$5FkoXhur;Ei^6Qii0$Gg!3~5>P%7w3(MFD3pIt-ZBNnemf zl>rk>`_pPu;{icaIGsyS%F(MsEstelN4&6TnFecm<5gZ?fXR=z$)6>z>-$L?Tok8)8@D zhRdx!V>vy0kEB*MQ`>;noWRf6BKf*jxbvgwHG#&v7)5MepHARIYLVfNiVs0%y1GYW z53qgoe9qZj98>f*|D;EHr)Y(brb_raii)BaiUPqoGaGTan}$%s##yOJY7wwOSr(IR z%G6_YXC?t3{4Rd+jFu_#?TLea<`IOie9Ib|4&z2pJ!7Ux_CB4bN%J zqL(aa$pa}rwxN_TP}v$3>31F8FAvpLwFDL$A*6AShNo(k|5ux+) zpIn@jKPe8>9t=BJ^mJLITzZ>DK2(>EyoADzvKyRlC^I}WsQJB>SwlsZjm=a-aDLV2 zBxVIee+QY@Qm%X-n(rrkYS}*oZ`rIHV!yPUU$!omM3-6XZ8vtcjtbB+Y^s->ywO#r znG;SyL@{9aDB4!6#70%i10uu7Fbz#ol7UrwTF(k{NYg{CG+g(zY{~47#wRVp+UveA zcRw(TjS}^u_>ZZ+!-b>R=V-p8=wMrwwm@+=z1q=MZF16Dh_|fPX$Ma~xyP?M7vn~i zkQQ8e;|-^(8L(qlkF>^SUvDrch{R5<;4Jc|Igh1QGGNy9S2Xb#7_TDW+Ty5mEAQ|?y-%CV@PW=%c1^9Lm^N-( z+xH&SQh!>Q5B=4pwW}vudFy9TwWLDi7syjT{vYu5scz9OZZUeAzw1 zNGfVqWdOwzW| zQWqh8TN2(%Fkm~YSBKc$h?z7vs6%fp)+`NQM^ho$2PFa~b=cy@8d>z{p0QrP=27^4 zrNw=&eB~@5-kPUsbq!KEJ>=4Z)OyaBxSR=(v28x+^f7)?%QQGJ0=PStD}O(5cc=2a z?jFa9#vda-C&)^kt?0tpT;C~zFL`J=;So6JS?qn{owE1k>9UqKd|(0^_h{WJUeb2B zB4DD0cbv#hxY`qemtZsyX^6|CX@*19`rv#F4m)u-G6I!}&&{bl8 zNU!^=1ca>B^gOM>l2h<{sf6uJ1z5&_eRWvV`c!V3aZz6n+)3@)NHK4--N7>7LxM9+ z+lp6FLLy{6zP7I>sh^2B`=#<|_MmIrTG=fJ{LCf2?Fi?@i%Ds*1xzKO+wVPUerg|G zqJiAJ;*zWY945_UNe^GXaudKk^zN-xe9@R=HHchSt@g_AAMDp3w@#0H^!}zy>4NB! zfVea6A!5k87srdAhXoI6R+-m(eu?IVud=SS?$WTZF z!{h4^o0L>92JFeqta8LsLj8wU{pW$NDl?(%vX}#YM*W;!9=rUUU9#L%j8SP-c`@1^ zOQ|2Trs`RZALV5V?V2@~+@D))WgM%YrRV#yi{lReS8UToAglv1qhthHKETzc_6^P+bT2U(ESEC z+&n7>wc$n44>-0T+MbSy7Z}Gjp=7JB6G65RGWG)CpXQg`VFd-ibRcY8;Duy*8OUO^ow(M_F zon;=;0kL1RF{h^)FpXachvWp|eZd(Nj#t=D=M-8&0slqF1@F)#IyA%s3J8&T4s9#c zc0RXe|C(_Wu;7l{redI=9MIII;+WtE9N z_pbgq`ldMgW{yC`Go4pH6uUNd1_IBHcK%uL7JDe{R61ZuE=TNb&H=LUDPeP0y4=w> z=nBghi^^U<>k_G|cvBjFHZNc3x%-=P=TvnR$C4TfX>;}i)1F7V+AK=f2fT+c-LqYq zb(sOl8?@xc=d>&2>SD49DX?O6)<_t@Str)|ptF+HP0Kp8nlXNf7@F6I_y0?PiuGtf~ zd@JA?)ZGy4;M6@ocQ>n5vJWH-$s-esP{+d!-!MSiD0{*8JoKo91aRdz1D0O8+R^O= zL_B$K^F5$HlVbIND7V8WkVOy*KD8m3yPZ)=-_e63`YMZW_yr&tK|TeSm5%_62&h>G#1S z&eg}rlJ~3IMpOZ(5suLEvPjw0)7ir+RapbB`)29}Gz-g)HHsX3W&r}4qPsI zI4PDDId=9`(}tvu)b(ovApE>dpdn6uXeiNN|7do8SN^z*S|EDGTrW1T2fht29|bRj z)(R02D|dL6JBnkool?1IQ>^E-yBMi-#5z@R*LzX*3(s$`WGIsO=T4L3shFw&Bx$XN zcwcKfMI&y{Pu>A6HKpdN>X(j6+22?4IrTBm^tn))@+{BAnClbE!~I%SfsV({9lOW( zWdPc2{c5~UP^EEIj732_|AZCvORSBa-N4nNIRS+=EC{WhnC z3gVgdnTA{(_Fd|V=Qv`jLq&xX3RUY6k%9ccfLAuQhBMtSWVEctQ2ibC3)0nB)u%i# zjfGnF+{yke$nbWkX2~*NmnM$Ih&d3lKIEu^TifVz*7(;*b}4TDJksn?kb$ z9w>P(veUFXXvi0U-uW-2v*?FWFNTnQiI$OkWA5o238*}--jJtfWea2+**<=H@bG;{ zO&|oMzUL7nWrkF0qrJwWc9o`$GLumFnUj*Y8)7|k^FE`Y^OJ!R>+Kj~`j69-BmjLXOHoli>iY2o&>x7?2PB5TiV1 zw7z3KrH|0dfc3$rh_fNjCb16KymghE445|+GiLzgCYWmJC({5 zY&|H~UMVjhJW<>wx8TsFd}7|opZaz{JVMW#dQS(xd(IB6nYPX`uwvSGeMvJPO+PF- z^F-9CEjQm=%KKpUrX+q5YM?&K-cpvtz5(RmfQVe;i(#5#r~4wf52!UG%LkPZcG#BH z83lX#rLsR=S+|d4uhKMJ;d1HO?er9}=%_1f)cmw)IFpi4+mKS$S~KrOYIsSm80CLI z>lWi4KrbBIMbpfmvML%F)|Q?w^Re>4i`Tc{KeX1(oh~n?XU$Nh4GU> zCHdEG@t#B0R#wxmQMFW*)Y(J(2uUJ24X5=a<{lu4fEYTWM80njqef0?a1#KD>)#R%l zU*d&(<1Bs^ZrBu6WjY!}I%6_NjR1*>T=;`kd49ldbVuCdu@jX}nLz8Qx1Aa%do}_d zl!8K0KO_uK^m)AWe>z^q{pNTJvuG1sMpWc{85K)^h(`xwxo*t6)oC5VQ>3R0U2o;Z zAA3i2^DmewGn8HZ*zeH3>!)DRpdn0OI%mrF3a4=J0hX0s z>s1V}f!AM6;YJnLX@aXIm+eNG|lhH`c`kXvt2p7d!6 zfZK2xN~^Q`53q`Ck3Qevf6ozh>chQqh^Ua!DE4Kfe64?z7Ea-%?>#RlgBP5qJmHyK+UFYbteu{ym0l(?Yw@6XzjfN zZ^eA~NW+hEJMMp7LpGWVp&+OMWR`bsi#zqmYjwiM02?wD#pJr-fy*75PO$lsps6{$ zFgzoz|MjEMWi8z@l(K|ai31EBsC9^<244~bAb4=w4I*gp0tl)5%7A@%{41T=o#oM` zn~Md}5u0NYDY5xEZ}(;HPcIjD2y7aXkLj_^Y>tPM&)JwMrGzBIg(W%`Fq)|tO*AWz zL~F{o?~)%Wsw#c`CWv=;VMAmNO0Dirhr=720l#8sibikrB-lF4Qpjh^M1j`{8YJ17 zQTQ@h9;~+RBx+~Rcq!K{Uz$C6)&08r-O@&U^bY-QbsyOScZ{o_2_K@IKzv7ahVWsE z7WV5|iafY|nm~&`GowaK%xA#(CFc8SLnn{)59V}Ni>oa3tcwHUPy=hQo&=f!5Y(`} zjnY~dU}R8L-IG^+*fnA8wh|O}tJh2@gisApca1oLGnXreLt1%3Rt&x9I@On}X6HLT zZ?3oSYcAIs(_MV1kKHf1r=B&|K4E4C^Ok%qq`%R#52HPL9r0EFS8C4v>31`!rD&H` z_xYi+>8^p!mWvPL{l%)k#*go(_Y76^)6*rF88D|s!2)gBm&1Fju-Ch$yuaGYbW|Eq z_-h3o-i45k>ik!7#qQz}L&Mg6H~Df!VqJEHlZHxz&R!rKhjG1#6Rt28vT2kSi#`TI zy=0Rrg@u0C1G{2k?nAOda1*{J`h|e+;}IK2bEU5p=qA@!n^N(hr4$Xrqpu+FF@#Yxz&*h-bX3lpV1n~#eVTMr9rUH*o_inAHGuo zW#$ani-qiUsGM$?+Vp_5t37106vBW-wzui@TC!4ykl@|&%yDS=OfYS}yo}D`ZS)Zi zsv79!Y`i6Lnm$HKpBzDe(*l51KacA&&Oo+jWkNL=- zX*d|Y*WR(s3iU@)pt+h-Jmhb(^3Y%>lpV&NTtBP!FKyrH?q9k6Yj1z_(_(K;h2`3W zI+b`Yq;=v`TLIRThfZ?Tw2Fe12TGhrM zDIE7P;@PgZ+(&}cTJUu$?HcIu4|$RIFI@Ym-FT?`x{rlQhGAzyrqDpy@U3xPicxT& zrrrqg9&w+BOBTk7p1V5BvCB_)?NZg5uVAP4G4RCb(GsLKZ(q^p(Z}& za6l;j9@af~kp*ZSZFyCO#`(1`^^D5m?$LNjLn6Z;HO~S1q{yS!H$|$0?9Yh!n4YqitKz(q1a?s764-@c7VFkf z4H$*9wq(l=KU37KDw+WW3HSA77Y_T4Th4=RXbq1{X(gQ)(NCw?F|Fi^#$LZZ796UXa6&$axKQe!{ohVUdRGNk30A@D>0qGI3@}7aKl?FH+5}oBdWzSp! zZ#3#^S*Lt(Y=Q{u_Gkm?FdJi@=to)#1H6Uq*pakyX{4)wJF%7+cG9WNK=Y@b(;@FH zE==D=b~?Sq`+u-yHD4{Eh7O$eI{au@DznegeBjV7vy|1@=JhGVp4EZabKvuQ^?AFg z9O>3Tw!k(OgfQ*!%h_AP!J|CIov$j3JIyJwq{Lc_xM(HCzF}R*p@}?76aT{Yrg8<} zIjQ+6Q>Bh#_Ce_a;0lxk7a{lzcY*<1;k1tQChkUjO`0OCPQt+;>2t^!&Bz()&j=TI zzljn3xsnZ2`WKAe=)8Y@f06SomXH;m(v$ZKeXb;s9t%u3%Z{0rl_i_sd=U{)U z*hLEl;6@{1z+U+>KUqnm6;IU^Vq`MTD~r2T}X+zv6KSgl{&^IBq_n?*n0VO z3EQm=NSiHybi(b`pN2oI%aS!`T&}7?DA=ygSkl&&IIw@9=Zdv&4fA6tim)+Y^Z9rm z<>#Qt*lUX&n-?)$4A}ixdLqV`ytZky$q#b7?_55!iS4WIpvsq=YP(r&`;}Pc_Y<*9p|(jvN!hz7z&WZ9+d$X@ZH!`}K@ZX~@@n^8dKwj75)jjRCv_Q< zKTGhKn;(ALpm4G$6ypLR-@aNXzOjny>p=wguoikG?%^%a1GF+muOtl1d zxNR!XINLFBR}GCF5v>|&+T?j$$V!_`hsNhps`;4x@uSys4y4Dp-W4l)>mn#!gJppx z9&h)+4^$fE+Ss^lprTe|Mv*OA13C#yZq6p>iVFShgWxL2QimcLUeUo}k9YSTl;s(p z8HkDTZ&IQQp6^~YSa^M^tq5t$fTgS#FE{JbV=EnAK`@&vMhDGX7H;%F!=}KQ*tf4a zLw%i6Cl9Tspgg$@oxS~)$xMKF4-0AuJ@@{ zMlwrEIWqb4_LuTS^sa9!A3Qt7kI9#qMs;SThUtH_ZXeVa%L^)ByboVGzk=FWj)Dq5 z-VZ%CDRzmfQN*sn|GGFREU5 zD)|@g?w*?}JJyO8sj8}@C>ZVzHNWU{pRIYhab9}!@C<4vhLZ$^@LLv*M1H^JvnM-@ z3IHQpa#s>qv=Pzh=~rGn)5;@k)>Y@JGJ9lu9C&u$u6eA#Gb>br*m)sfer63mG>bVJ@&6&4(CT? zSf`dHSsFGGN(Qhh#KA^07pPjjQP~7 z9Y%BaAm@qHW5Adp+D50q4@T=iCzh;*1?BSFe&9tEaf5-%a2f{`1S8Tm4{0;$brR<2U}WTJDVt|K9#z;r}-=y%_^{Ep->)`^kC@UpjtV ziWPb%b6)YzbMJR6*;DB|IqFYG-4_=9HSdqP6#~$1kr<&ah00#OHS}rijW)15nn>6g zwLm)!?Sn#2k{;L9*TYY?2e&p#dEp!S?lu{Ji&8K<#9j_d>J>R8^Qn>PhZ+4GEIDSH zu(Gd^0VB+UO7!n$ag4!MXoo6!AGC45t>D;%_{YXrXn5ZU%6VT-HG)t93A8a_tXfb+ z(q@Nmf`l>KYyfrqq$A&I*Ef$KJp!dQT7KX|7+|S z&)t2#MEK)95G2VC?9^d91`OLnbL;w!r{5XXeB_mE@kasb(Zn{;^zAa(#SIA9yp{PI zPv7xbwAbq+RC|55zVn|%i!=S6uiu!ZYJa|o`oj(2->Y7ECgnGc8Golo;SXy6D(@GA z{8xFuIR3w$_sz5aKk+!RNI_|lKMY$gaygE+c48IE!Z&E@Zr3))`oRWI6 z$ur4zoaSlzk7+}=um`nch4>Txtx5hB}KdN1!Scu_0X=2`V;4fdx4*7 z+)Yd>mc&x#y2&qZ!tGr@_nJwvr9Z#Mn!iCt15!q`H1rsO9-7byK5o9S=)U)Xvhu|e zmuAQI&Au;w1JrS7$Z3{pUJCT(0ok%z2Zk;CiKlZTwPtkv3M{$cEB2v-9x^j*H~l*% z2Kn9Wx?UuCT-M(=E_UIgXQ!}Eph;jaWzZGDuBAfbivGN+1ix#`M?utG67%cTRuV`~ z82j-7y!=B9(d}IB^Oqezoq!+3=>ScX=(V%-3=Jc+d>w*!8ns_(Q3$YrX4gh4u=|FP z18%klpDDGLeoQ#^;Sk}@y>dc|uNW7e@aU^Pysrm+=IGInQV5snsX;p@`nbe;ynVlw z+sAbJ16QqfLXNS^x=w@stIhPefcY_OcmhXlB&yc@WXoFbLgkel>`&9IU-c#X7r<7{ z*4QT(x!25>RVJkf5{l7G5MB`04}Xuo-@g`7Y3NT+BrWdlHO!u|2>dZ7AYqtCUUV+f za)2$JWE%633D+{qA!zA`qoxDKDrI9hE?4+LSy0LC^=$I)a*Fs+eR70@(EaiRPa9d{ zhmw2!8M&vgJ#O&ebMg|acp7!Qy;>gg0}#1+l;#FypSukLLJQemWT!e+_`XVwlzpqv zlQRN7rJO#3kthB4&tueNJk}ly9?4`Ec_4N@!cJuJlyJLrg~d{tR9w==s~!s>nSqmC z;`3vMbAu*}dE*>$VM-~8{z2%GgyE25fcR9crE0t8DSxC(zDQ9PYn#a(AFV&Wz%j z+F%Lp(wS|^ZuWI0^xnF2%}%$ZT3X?zXNkwBCU(EfxyDUN+F6QYEO}>Z19+VNN+U&x zrre1R6v6jAz&EqHdq>#4!a6my>xrQ=e@ykq_pzVq(b87kMvCoikGbYm+C$^X$P5tm z7;Mp9yI3^7_7)Sf$$&L4x*;ud!RVgnfIPJrz*0CG`LqPvpgN}1%__E`%*`E56?{r- zQgVAUw<^?AI9!WrPE74|Mxu!RbK_< z!#xNjiDVmHrLLFN1_oJ5iIQbyVaZD3Ia>OWV#8Tm z#RmWIX$0vifeu@RZ*HOcs^uN05hF6`MWpA6cFHJTcs=kHOpu}}~tNDiZ{)!*~n7I?XI0vWM9Hi_u z_ffJCg2n|MAb5c#3#yiT5diea)%UdNX`GHtisWs?*7RXFm}U@bR1Q!s3P8#;U_WT; zf#7@SEhKHN`~D^Cn@VU=sevjZ}Xc*^8@=A=e+vF@s*Ea zpS4BA*&m8bU+MRH?k(hh=4-;2tXI!I)1ZVFjT}X7?y$j5#o;uGw!xIkV>RH* z1D0x4xe$(IbFw5OK+SKXJZL(;v|#iN`c9+D@YYVjx7g+Nx3TMg+djuX+&^JzrCCDU zSp33GJWh>h8%c2j^-;faV_P{~mQncRbH&J&mQ1M1l!fwNT-T5VJ6Rso{5>?!6sOc# z`>-9!DQ_rqA`Ph+w47S#Z)iVwGiLByq3*LoyNmbj)MuX2(bmb4eg*}=u93wJK@CL) z?Cx0vkkbnd9EU3r`7H4bv00a9%%>();@Z8R@!sB^Ep_(ut`loKPh$EWws_d4h93?6 zGb;LjZE0O!;R!gqes$AGQNDx8n(!BgR_|jF6brE4Cy&@lfW{TvW;V(8w*MGJ^X%*U#nHf zx!B75TVod=G`l{y(I3+y!kLIl5y|Bl0}WuK+A* z4SGop399z?BDL*C2%(StLiEa`kOU;Vr^HL4)TbUIiJz7)A5-Dc{jkKUWsJRw3nMUr zCoosLX@(=4ns)C%o=%Mu!=ftSvE&iJ+IqUw;N-MVtD{!W!Ru$2&sq1yoqlWfRaRir z0XHYD|RXk>D2}DmsFP+3L*nEuOLky2|B)wWV*v1 zD*2!}x`_XWtSXMa;5cLo^+ z8|cRi?){7!A<%el zbu`*?0AZtOE>1 zuWYjHSJ1n$?-3e`By2=?wU`k%Fg$st9Y24%-b(MDV>w-r-&!PBF32?HcS+0MM;;eL&Z^k`P| zvjQ4Njw@8&0a59>pTTW3rV2nJ;qDA#A9#p%y0ZEq`;9jg<)Li83#az$@m#PR7(iw> z7rOkY>!OgVe^Bg702e-IWPHJ$jx`GK`BHPRc@(Um;rzYvxGZBG-p5z|vk$WU7p`Z~ zq_~0l7GZ=r?ycNqTiISEKiWf!#&KhNCAKQ@s+(~e!*LBJ>WDimD&GY%Yxp}XK!ILx z5KN$U_g8%UWfmqp2->-NvF-oSvDg1kwEJwV!Xr>i=PC`M*q8{l8mr^OHd_#WQrSC` z#4VHr^$jJdEMOmzq}~07>0`IjHT9Q#mipqw|EWglAFRuq43v73GwX;{)-A+G-qvQq zB8!HcBpLm#u%d72nK%ML|6$lr>>Sh(Krqm7&kiy-sPQd9|957{(l(Rg1?n4xF<`%1 z;I9lx()|3OT~Y>Y)6Lyx{u+u2F;@FqYF%_wqvxr$JXE$a&lh7+))i_!|0_2hJH_MHSLJCu7WHu{%#}uCcSfLqT+ROlgY+f| z%39Q;xMFk**`2UncAf!Q)~$|#Ev(VQ%Au#JZylnqW$csIH+>%P+!2Is%({dI37p)L!uB3Pxe5wxasaw>&?&#wka^EBBO#0zWoO6;A>^cQDDA*Ve&9f@mPofF z<(#GKHJhg0+@GnXvL0(+Bu&j z)J3|zza_Mq87X&fx#iV>n}Mg7w_{vez1Z%n#}69G*GKvB?UR zH@Yi?0mXV@DKZ%yGG74Lwl|wZ8}0BLhV#29SPiY`3g8E?y&oSEdvvbohc=SllaC9y zrA`Y)S7`@qY(HH%alqnLYK*4^+F)QZa~)SDyo7~F|HWNaA-j0QWYvydEhw-QT2?4GrizFUuW%F(;2m6>7axn0F&r#ODeBQ40n?P=eG zqO9OHeUHrdh%%#4Bb!h9d=9^p3@>6dD^O;~9ff*w?RwdWJRMS|WU@Gw3JU&qe zdL7`M2MWRBm^!iC%a3Cb2YsotE&Mh?b045#x~qUW zWbP48ej^9y_riiXdMQo`C(#$u%xd`toBbj$Z-Tysdc)#|beD+(N z|DkFUH>=Zg#npc6nhS`fA3-5HmL#y1RBpigihnuvYv`V}wpo4v3N7pXoThAi*o#HM z1I46ElAtUk?JLk^;?P!RO9Ky-k&NM6tNr@bT|rN zfL%u>;_X?7q1Z#W-B92G9O+Uk4GAe7+lk*&ZLa(n*Ue<%Ha)tbEgTV7Z10w*>W{7K zu(hAH+y4)p;uF>*B|P~sfF+}hO39_U^g9K9wRPo);KSYK`zPIb(Tk~N(bHT%*eRrY zfVQjl%e(z!^hX5ML zYE*G=*n?0igSz)<$ljpjy=8)s={VKAHrf(FU@gd50i8k}p$CU%oAS=dA{Asa z=+WOwPcqU1NKwv@3U}&QxZ2$isyc(LEq#griDg2|E z4Y;UVP7(PlC!o+w+fj^HhYq)2!FM&Y!0ZZ$aR5UG5Tx#=>6H&Uf3@8 ztmb)Cu$vs((%CO8Lv6;cZ$;RPzskXC^k*;B`sP}Yt=c^Mo5T;DBCv&iFF<9hiDgmT ztae4C+E@>s-{!8#!GCBa)>|~b_}RB0Z=4FzjAdeYRLY0ck7CuD;*q7bSu_CXZ~3GxJ6WZ zrqotD{~UrN3f}wJV{26~(%Yd;fMURR4nY$97B!%gG%3;Fi>5BO#4XZxE*_kWvhB3- z+4G~va;^KpOo!lrBjtOlukMN_<&d>Z((e$oKJb2`3}n` z8}8Sz_=CgPxq-2I5WIU!i}3c=qLfy5aKIBUB0u8m6CeeAuZr3bpKX@z1m&Rr*$hVhvqW`Kl~p zXmHwWl+-LjADao4VwhDX!i*i?B}5o($e^T{8DXYz=6IiT25EM$Yj@v&-aq`~I?w%e z?)&%L=YH2Ujb*mGPm9QARzX#wXsykmt@8&@C| z&j~&dZ0yqS49b8d^Sd=k70p&h1T<5TJyJim#hL8;xRTN$dV z>neuz-hi5O8?MxLG5+H<*nH9vzQ5AIEN~I}j-2LaDn1p3`F@1rUYten ziqwn5+U|XgCyRU;KZv6o^BZ+`+{sUNkEN58Zf#vS-}+0R%gfHqSk+2;1&S2#f+)@s z+F4&qvvjmRM~=$XbSYgQZ03;jV$E3SoW?RP5$z3+k$43c)Ts zk4q-2Z4gxgzVGz5$8;~^cTGhb?{qlpUc29a`qU%Fte)hf4{Pa;wH@ZmE+4s2b~rkO zR&=|O;7(Ba!%N~K_Na9GI&?mxk1LVBT!K)ol0_Ao-X9qZdh5w7-8@fa#6^23v~g(J zAKz}MmgI}Qx@U|ltS3Yf1T@e(9S^Wg-rk05h`YQPytuK)c9$OYih`bB9V&M=%PS?@!#ZRO!*JDtU)2uOMV6yQ@Kbg}w33aj=v@0^^mj>_w{1Hh21 z5#7g|F_o?uX#LLkNOMO;)?uZBM!G znN?Gyzi{g_zuQM&o>6|VcGdeG0ag1l7>=C$+wbve-0%x&;1H|s77fm9Z4zcwQj1pl zs)_wV9a5Po$u5T$9X#ote_o#w(v_eay=k%9@*`W08?E#^J4Xj?cmT%!{9U$7R5b#1 z#pQy|Qb;RYqd!?yxF@B!-sF_Sb(5Q~5>0ImElV^f9gWi`#m;Zoa`eo?UnOC_-f-sOgY5hSmOOsdIW%SNz)S$ z_Mb=5l({DJoNtocMU*N9KK-d)`Cklgh)^}l_BRuc!0JktK5Hq zb9;y0Yw-jAHNvH0%QBH>>R6|p*Itw>Q>I@eq32WHrYPqhcP1hV!(p4z2e|HWg?CP$ zU}Gv_0n-B5M&Ktav7{yx@u z&fh~$tf}`f>9!j%IDI&QGWV9ykB5tLUds^tGpYW>9|Phl$dfWsApWX*d+AY{brIg} zCG+&25SEB@lTwyw_f}GOKKK6cjWVMSYmc-izQ#hM(h42QwJ##o?SfR#1|=Lf8jkj3 z4L*r?d|=MMNE_OT&U%kRPjvyTLWc>j2fv?{ZJj2LaBT4KTUhXOHRVM98C_}Hwtn{Q5O2S} zj!LZrq8cv?G!s5LpfYo|IoVpdJ5O=}8Fkip* zpd>u{<58xoEg6nNkFp{%ixKnP@s3SjKvk5oPO(KHIvau&k2fFLqc%hxt<3~CL%=U? zL9hDxU{-~_i%S#Or+)z_M8$%@Ky?1Z=x*~m`U*z;5pJM6ZOAAX0Ggtc8|zu8CQI+^_oz`Yd;$Qx#3{ ziw}O-NDTG{MKT2YU+vItwuY+8c~n@_6%5JP7_SoWuHT^hqoob~XF!5lj+aqL_y*Pi zV>Xn-!3PDWy%U@oq_rBou_{kxTL8&!I0MPhXE$I9n_U?BjnG2^Rb!RxVsWq5lPL4i z)oQ|Gc)3OA1c@e)0WZCq7d`SjB=?|$nlMF3%!gjf)&muK^}xb6Qsrd*&v-veWv>Uk z)4-ULl%#Su@div&D(~ixxR2_gpa>ID$N+Ob9vrR>yRkL&FvyyTKssx~=wbmGvo%=E zaQ$pCYNpm3iOkT4R}#d#P52V^B8)D0%NX6vttg+lkt{&h45dq! z%@Fyy6jXLnZlFS820`hz<${wU!BwBm;q}454KVY^BMYPyB+^$6dNl~DEtmJdzZyXE z+3yDGS|x4_(AikSW5Am2*z^)ZV6nbqLMKFJaMGqsFrWZlE&+otl{;3Vwn2>wt`SM0 ze?-$un8?P#G1Hc!ue2qVy~1|!JCHh6O>rl~x=TcpOr~CbQz{#9mDd$2In0121P6B> zF_#)}HsCSj47gWoHP;VpHQ4*W?1uV&ZXe@fQu+0_grRll6)((6NyvgMG7{;AS_uss z%u1g7!52u0Q#LSC^`wyw2ptP-;TTk^30t5F)&8ZQKqdCs(DNre8nExAnD3t&bZ_Qm z{Rln#^nXH^0hiE+`QjkziqVP91j#=>nlLSvbM7%NgQj9Iu;vp9LvZEj zvpB#)sVtRkb+N4A#T#TLVIcqr)F14NyS{6NWJ@O_X+)|vqzCIPsgx>PO$B4R4R0O^ ziNvddlO7COFdO|)@1_HlY^FocOL>qiz~jdtC(!gNk2rmdcqi8;DqIO;CakZ`8 zI-EFJoWzjMo>XSgO(Rfj3`VUH-}W|k`0U>p^YvLXw!qU0*5;cK!vh2e7J|E5@L<8+JwR~j;MPcR2@nY0K;tfHoS=cmHMqN51C8tS z``&Nvy*2ZV)HhZ0$EmJ!YImKz_o=P5cAU-ps&&hfaD)U7fY=%>cDhBE8aB z{QMScBM_^(QM7R>z?byM{8k?P1)*+tl9Oam>~N9KE-jl-IvL*m{QIj1dyaTR-gua9 zlJ4olaC&uhHNxtWg~gPp6=Fs4-eqerm7CkjKb^b!!$jYo>d07(wWeBz6C=0Z{-%&B z%9SDoA+*Ufn8?pLr%XCv&@y( zS=r79j}R?SIl9~%^CWH3`;&Oqf?|_yu?SMr3LW8NHRJ8`W*!liI8EHIo=@ANUz3sh z$Lh8ggcEY2j`D_>S^Qj$zjS_x@30vi=oUsMkM>!@FS1uNVi0$kQa%Z#;t|Wg8dPJ?eC~0e2OCrT@tGn5%*|5vWvEQp& z4!L6U{kD4fP}3gUzo0ok{tDB#HikMUJBPvx<%j0sPoYs_=EI2-UlsMN>K2<+V0WIb2Vt?I z^NW{9S621PRcrEm3p^z3VTsiqQ1oh@)!$(Ba8P~&Y9CkQSS=A$4bgRCe1 zIrQ!ip~ z)|L3xwlv(F^u?roI5kP-be;s_8?RP@6n5Ppzs%GIH$#am#+cN0a;_w9mg^}z)mU~^ zw}`ltBA(eSh0l(QEu5(rLca8=eHClHB4QdO+#%Uz8z+Zc-yNT+RgkX4r6S0<68OIA@LMJbA>Ug%F zv)|nhD0fBu!_9*b1O)oH?3A0c1@hcTY-^(2pRB);&WzhIFlMI4ySePCNzpAtR3~n% z_P)m0+t>lqc&TdIn}vzlfJPoXM3s~ zx?IqPyokC7bEiXZ+mxc3F(HnKCn$e}eb?`CM(q*>Jqjv8F8!;|7?cp+nZ-)SwPrN} ztb6pgHL~UEq}>H~H4%Pml@1I^lY9s=Urt2VxHE&Pmkld7y-E`vs7<3LIm%2s4AqDG zpk{Pcw^c$8Bkk80o0*J@Q<=w&?GAeX`(EFh76tS59JW*iXgt*x?H|Yu_}1KesPXer zdL!qbWcF}_hG3@iTe{akb@UJSGPlYNBcuDo+wiLl@qT*;M}bvQh03%-KWPD;uB{(O z{)-w)c6!h7GJ#izIB)0XF1jI%N+w=d30r@@H-lDa`)vfSAUHtZaKhrPXt9~Ws6RZA zNQ>oD=~h{GrVp$Sa8d<^duq4-O7ayP{-lY=)%paLp>^EU5@#CP)AbzbRv9sRDTB>I z%#&)qV~eva`734y{F$d6Nvm>EtF-9SB<1F$#zYj-iYI=}H{*!y?(RRMr(-i-$$qwP zYpw3^`Du6D}tsF6SD@T zr;TJHI$pGP?E~!`vrDepuc0#jr(65Lt48Ngt(qeL6OEDK*8ZPUgO76oHRK^!Vlug6 zobMts`b;ln{EV9o-#5zY=z+i4jk`dpWar*+?^W~DF{-Kk(E$m%|E_hy(AY90>D*o? z%*he;(TFcpv8!zN+&O9ZB94_j@MHY?!w6Z&XVq7Cmvbg3Bn;mo4e=kY!k=fp)~$6a zvTqU@;~Mi?3kiDc>q$Awk?rRm+F-6{*%f2CF|q6Rz@0cTHk{zO#|Z55k@|U0^+fUE z``IO%+a(W9_~h^7;eJFlFw>|4G z4g*gLc#JiC!<*9^?HJ93JipI_DCoyan1sbrOviGv{%QZQ#)ZXk{r!jY6JyMBtys}J z*`r01d>GTBlit|UiN>NA*7@!7q9(tk^DC;&*^JU-!7yYBzg`K#5r#j1FjH>6Wh-CX zjq1EBvf>gSpFh*=;q_t$MxK&k-ipYUnot%#@$y)xfk9PSFwXow1N%m33K6@u_xNoE zzjaRIU$p)=6*3V%)21bIvmhG15JS=PZPb^y8tcWdRhf6*Z;gxZ6VuvXa0=!AQA_FX(CA zZ9$w7)jfWdhIX&C4&UBx5UIfZA42$h9xwRo%C@TakO9)rxo>fn-~STq0h1azclZja zZr4^i4UmlDS1ynX{b9QVt77V}DO*q_rPmm%DAwm}dXjEI-5IeN0hR2*)x1uHfqr*B zYNi-i??kAFaXf4BSSw`P3!XIb*(bfJT&{oETv`470}d%jx2s6=ABc|d@ZE)L;8pNR z+aXr3UcWqU3n){=()g03DXvvc9kC_Wc^*lMLM8feP^_?Ys|U z5;NB|MjVR~B(Y9-bhn3b@ERdsr%b_#Rcolqt(f|Cmp>u}?ds{Mllk*P*AmV-(=UI``{SOrgjx@gi6&X52&l zsApRG4%cIfD@*wh*1>a?YP7JtfK~e^yat&>t+{9u?oeIhQ4U9TMXB~n`!_AcL2IO( z^lq%tcIFPJbNU660{|?kN9Ioe0a#Ls0DxY2 z3II^`iW3F!nbPbDpjQI+6!1MH91ZXe0~a6gfsl$05Wq6Od`(P2A?9(EuKQAfBIEWd zgvf*t12E3Y?I0%~FUd0F>GApC#Rz8PvlPdD$CkYc3k+Cb4h#m9lG%UU&V9BYE+8oA z{cz_6eUffbQ#y0Q@b1~A_bfpFh}7E+v0eSPm$=}ji?_BJo2&^yM=Sw+P*?t;b5 zVVH>dX9uKe zgtP*!9tvf>&)aBvQi{QQ7j|_P5eq-Zw2i-~rixbvo@#T-yVNET{Y)PWP)41}BuG zy=bkSYe$6M&cHihmYWM%oA#?}ytHww6-KgRv(?TsB@`MDfgxg+7I=F0x<9`}X^+go zSWbj^Pqb6xthlOAvoHG3?H78>D$D9Y!6KS)M)fJ0Dos&vUA0mREn{hG277sZ#bjA| zS)P@Ro0X1LA$MhAUZ`}&@3}EN3uIwfJ&@{=SE7j_6s>F_iE5$E?!eM#H$2;gu6pAH zM3Ej~Im!6eRMb!Hr3_6D)JIe{%^psa)d~Z3^m&=}5+#~=KG{i&fD=oEmn{Lo{ef3L z@#Z_X$6-q&} z^KJDUrC)&Lvj!>ePEq%@Z;W1d;6;)Ca<9nQgsm}c4{JpvIDg*G_n;tGRbZ6=Xk3@p z$V6|_H?sOHK4}NtlS{{eK}l?^GRmuh(8|LvLeNf`Whs4maeKC7z_F0dVJLdnfd4qY zYXZ;YrnZbYdgx{9D>F_z^du3ztmjoI(=NVL$(H-%_tdtPv29iY&onMur&Su1qlINw zRn(H`HLgg#qTTkd?W_&771iR`;0l(VDn>KW*>6h7Kt4u8*=|GBuaj-qBb{%qgL2ta z+FOiSiJWMtE#XI0ZpSiGhW3@_P!2Z@HB$)*?LP&Y{kDRiUZzU+%vfY`>1kT#6LN(a z+h!{zF1(m6uyu!LZR@}^s(}M|;#^;k6N0q}hHbm_(t5TBsQR&*Rom(7yqbP_DUbd- zQ*+9yRIp(&B^WEsQ`;r}`9`82&NBpuI2>0fDJtVL28`lfa$aIywZ#hD3^QH~b>*%m zLMK?4uT{alE{N$-)r1gzChnU-h#@((BguDBv-E(GJ8_zz!_AWkob@%5egWj-|uFnZ-Q$R%o1;LonR!gD?3ANRaJenxxK8_j@kO_rgN2Fpc_*Y&^qs1TRefm9 z&jZ4=<#B|`yP8VI)4l9l)3-HCN+aZH|=)d*H@$ojd3 zsN^}n*7>slnM{qlZ0G=b+*Bp10z-@b2b(ZdR9HGJ{n60;#O&+e^hJ zXAE{J*gJ4H0#cv;R-4(M#;29b*?SqHtV--wQo)i_EU1gTjbYrbKE0YHVTxV*A`YxwUrx*fw?yl zxXlY+@`h}K^>^%?{_tO$71btL@ol@xe`H`X%%OO@rGp4l=J1)E5>Yb2@;^RY5;+*@ z`SAY7T@Vn3+HwZ1FdXeua5!-`4fPfc+ z!3?`wG1xa;Za%(+$DQ#=i`298FVpjXr9e>LL)7D-E2OaEv%-Z$T;R{MLv~|UbtW;> z(Knp;Pp3JY7OAGLjSoGm{W&DWP%*g2XJ)RgAh*6Z>gW8Sz80*F5_#|L79OulrQ`|7 z9q!><0#T1TqN1XH%J*u-bqhyOvAR|ULO@4%JdMo9IXvjf{+WrG-s9&s-GHAbwH7*r zi#ImVtk8j!Hy5{A?krSv{Cs>{?xYVAa-O@SU)(Op2#_dps8H{}dfG4m3yfZ-5Io;t zhDRY`M68AVWX=;F&c(*&IGhfMb+@Oc0|4Hv=m{XH=KmUh{tE-`A4gQK9%cIaZg&=X z*qg7abO8ag|G=f7!tEh&5B~EhTo+f@v%71AnQ7?W-mZw?7ey8Tps2O^<{~9!=N(nS z{NYyp1z2f5!SX; zyq++XV>0f4V9k(WSX1oi^uYSs6%_#BVcv~(c786I0s1rehSTcqP+<&Cg2xC5__Q)E z{VyKW|5BSs@Y(Wnm5#UwgJ;L z{Pnq|N!WbVgB3sYN0o;MkNh;~{>Gc2vPuYV{li>8hCJA%M6gLvs3|>Pfs~^DNUMUd zF#eME#AZ6ckX?1xetGQo?NmP-{! zM@C+o<)9XM9aJphehg4p4NrJ413QuAV&I&3W@&?dZ5m@Xp|*l|{TPX9#kjLe>4lwP zd4atC0OJssufpwHuEWLUYp69?y0#l(`R5J!V)o?B!JKAxv1zM}smp5k(D`YBUNqjJ zl>xy{MOEpPYnRgPX}PvXS9EfKZq2xhk#IyE7kpJ?&NJ^5&uX~9gmGhLv}o#iVkRX6 zMVxIjb3(Rvc|%E6#^&j%t^VtkYxnc{$xhob0$|}<&dSq|Jv?Rh1irmcZ>tmA7fNFa zv0IvdimC-oX0;?URBkDO#+ySTF$7NDB5Wax?6~TD7g)LnWLf>)Cqnxk3q-PDL40-&Om{ z}n;xF|Sz^In1yp z;S6?rTI&3hlMelV!4v!#O|clvw8!MEF&im}OC05X$t2H~SlO!;b!DZ~rX=#3T%?P! zslNDIB>n57a%vw8d?`xCn6;9<^2lnMSij+9M1!2juB@?E7t>Tryn4d71R95r=EnIW zax6=`_6*83dl@$ZfA+Anu)0cTQmnHonKNv4rro!7;F{cWzx1ohEtA!MMI}#gO~z-| zmn;jG^VPrS!69i*U;s?_>?q2(TID$i4HfO0;{$S*=_y&tyU%ysY z!5(MGAls|FOHJ4K{q4TRFZ8c-m5y3#E&cgTp8BOz&L%h7dKp$K>ke~$^IHyiG6r6? z>g2Ekzp1oOFhvHCZg6h!Je~$)u7~O+^IY+4y#?*(az)&z*MDNpp}Sh{DNcJPXu zBuWc)DEgXq>34F+O=+XY9-U0huiAoy>F;V|DXXcRZkyZ3kc-Ut19$NU%M0aE3;iUT zt-p3akjG8L6sX|#sz3QAmcd5uY&fqIp}8jhXV|mXZ@>P%aj9+byHaTkk*#jeqA&&&mci^gdY2>gsh{GV z=$3+;P~=RF4*A)$FZSOP<&+EuiSl(P;cb}(T-vQ=5&5?<&qRoI=4=OSYi)|n-IPUd zmBk>4Qk$yW7A9>kzh1BoBab#-E-F_MwwhAQ59`jSY06bSihEl;LWt!>XZRkxZ_$d5GOuEgTFGTfpP*w`%kEn$Y@lcKq{;n;PIfHbwM*}&p^6yc zB!K@d#om2kVt{V>cApQ*ndKif>16iGe(%x%XQ=PeE-?KYchLRtN2|7mpj}&L<`Ya1 z33yV`eLGWHa}mdDohfwrs~HYeUEQdlFZ^uOR&`lHz@AHblcLZgl71YaZKh$9DTn$a z69rnS9@n>)rT@)hkDq_qrm@lDmRJ8eaPMu}a4B4IB&9bQPI)UJhFnd>)R!g2=o3m8 z>sA8gtB%xppclcDIurlIb1Oxx6c&dTcB9&x^1I{O>55t^Xu?=Yxahs&fD$_&9)kr zW@}b~-Y%cYNi#l=jFVhzRn($Uxs7{d{2t&dlMeUoYZbXD!E%27dK1Y_g~`*@{7GN1 zNAYFX%)+ejYaa5dHiY>k-@YyJh*!1kH=^=MY$YMIsj#i$(dH@F*P@%eN!wGWOdJn$ zm2Le?+*>YPte4VKwW8azqn=>Du7Q1zo{pA|qm(Gxm%abOy8M)FTWgm3de!2}1@YVu zanjDr7s#3=e-aN(cnC4uNxY0De0`X>CrKLJ%TI`-iStbUTrZA^w|;HaA<9I5gzoGw z%&P+RfJgXiN2k|vfTvxu96Qd8*e~{v9BK_H;LJ8J9kF@)hD<}veq82)Njcij)C?|p zd|Muk&nwd9+QW&dcrJw8?N7`ijd)h40!ws>gH(XYzZ1sQ72 z(eQ_*TK%khR~Z?ayoM&^86pRpz`>g2%rae}XMBAw^$GSgAc{tq%JRK9Quq;iKyoAG z4geUW`ll=e%6o4QiFre9X>VU9J7mb5WHC0~X#_+dG2tlJWqZfY^4AHF$od)eT@*rVHf0N?`XIwE$v03T2GexwRb?RsRUKq^Uv&*z8-k?N-Tev}ys z;6p7BHmV;<%j9go4 z*-mC#xe=sX)jB40eD+Nm-jG`+_qiApSx?$sY({;3!aQ#Hm*fD5S1c(mhyL6`z?ui6otj;pWVE5~l4Ob(zz2s=_Sy?+)kyZdt8Ov9?7w zT^3JO?9Hf@=98ggXVmvBf^>vB0v;9W+|`J&rdaHsW|3p%XH5(& zbcFn_hlcqL`ss_;v|R@}aQrC#ZI8=saxbv(ktpFrQ9&;;|K2S<0lL})QTq^x=@dz^M{sU71PNISE>)L=+e zTkCx(ft5`rA2Y?yzB-A*m9R+lUqZ;S;xfvo(5s_M1+O(CvC=Ud*4n$Q-j9lJseHiJ zqC?NaaZ0|b$w-Na{?uIGUXwD{oXi%u_ES>DKMvqhkKeJg^NQfEj4YV{TgL!lkp^q? z@PrF^+8lmTm;dO^2yTqh2@+g*8bG{J)zC2J@JK@_%n)|kCBcD|d~7c|-S&dP`8GL2 z8cd@2@I@mXF&>(?=$jRJPe2WF*N2jl=S#()4u@IW6LL*ZQA<3h$a*&K?`T0%@T9@_ zin_Ftqx`;B(^#bzubjGf)$NE^4AH*XXie%`oz;d87Ml(Yv1lCJYT<^6+DeVU)-=>2 z;cGoFiPfOJq27suc~Dp|S_qELMgfyjQ&gmNQbt-+u_Ks7t-7vQyso&c$h!V$sgPkw zp{p3A0jV0RZ_U0ruPNv~!(4=b@`21lWeEA04IP~_Gl5e61U9#=Kdwz#ANBfsQexJO zCT)g=`Cg2;FimQ8MI>lRBV4svsNqhuGA(d40QVRcG5=|@Zsx1?T5MFnt_{wG3vyG&%~1@&5YI58q`I! z0oQo)-v@sM|j+t-QbU)J4Y@1f_? zPypbSxE&ZT>wnbFO@D>0|6V!+vXArU+rD@$SO}B5B9j0BRhaq3|Lg($ z7mDEjrK^y1_;}5EC)2lzPw#g%>JEKZxC6ed=(`6ZL;btvuD^T(nW$%^|EiG1)%GCw zvD+C+W|Xo`{@T2ck<73sfDfKgQBh#-9`{oRXvd-Ze&;6Q_5mK;NjrOXuO9aV@Y3g| zu{O2q4dP(7nkt_Ov?3&oJ7IltmddSPb{7Ws+m3=flBu8qf*WUMW*S|#BcGXcbJ7j< zNXu*KJhG_VAs!b9CpOa1$LWOc+x$;}!YF`O516K^BX2m*5l&U`*dgJ#Uo@%o8%m$> z0Rdk^|Bl6Jt>FTiZ4cO0t$wnf84eReF&{lo`rV<(4>?)DZbr9|lp(+D{UY|nBL5!X zLj_6mHF#q39PpBR{5>-fW%VwAf|lforBA-#Q;R8`x@yzHHwh)psDBE$RFXVu1U9On z+O9=Qn)ICTmw@&;*QFk0oxqXYdh&f2pM&vz5;D3Xy@7v3*DCU*V(-0z$Y1w97`0!E z7+~@zDafe>Ii&B;dV0jAsmwNky)|5zFQy&jE%Nsnrp;u^kX>cW}$nfF^2eD{&3Js5>k&s z0qCYIh3}%Jwk@0}5OU~ks8{N;u#&OG1_XrI#H3JtzPKonY)RC!&HSKY!S{N@b^HsnWDjPXvXpMz$hwupR9(wI zR8hrc4;f(`K)d}f>VlW4-}pbBT1m7tF&1E9)#`B7Q3%-H&=Rb1I~k)wA})_A5JS`c zDRQLW^$%sso&Y#fFT#sM6z^aD7|CKxARr(}Mk$E{5~N`5zfVRzsxN2Iv4k zK2r6~eneV;9PNcsfd8HS_y2zMEpGk&x_f#?GKA=n@3#Dvf>gM6tf8Me1wm)tiPqC z#G(Rbpe?`A0D$NJ#MOVem|T8=_kSJw&HcqVD1|ULa>aC3i{(0twf0|izlSQnq}*u3 zAO6c91j$Jc78-k1o{WZ>p}dp2s8Gm!ELP}}c()VGL2w|LZ3M#ZDu>NyAkZIBaKMTM|E`76o_ctK;HL8*udJIX~k4o7Jv=#Zb|XwgL~ zuR~IMxGGwX>X6sj`L>$g&}@||ss-pUm*$zDyE$s^o8u)cg3B18VmYFdFz#dfd|o=~ z^`f8e96qjJwNtU|?8|!P_lIQlG=9MAJ86*V##yTvmZ(c4yYA^BUUMu~!xV39`1z+v zH2R0#oS$P$AZsJ67DICTzLrXHqx8BjavvdHNwt(TtBlO49ki*Bv#-x@rGl$OUWJm? zFC4!N8HgMAA;ES43Y5b}OfTB)`mmNowv51J17K4pVK-XF@90UKB@e_gHSy zI4VZ23Xqcr#gc#aM+|ewlfQj`jj-fhVbg8c9JWhBkQ50$*!;|xBx|!MmmJXaPa$>Z zDO#{0Gq(+WVJL-jf0Xs4husv_Z+(yrjuh2Y-TV9U*y$@pBIR0U1B{=t{1)4zWoVu>6S z?i9zF@(oAnx#s_p;9U?K^RwOHT`$x&d%fp1QEFYFVlJry2R*D5mBL}MMRlS0ne$Og znd{D&h+I{d8QIAe8EC$6sgz9=QPzFgb{cf z9tve1*US*9USnT(U5X>C6ur^RW%4n6`^8MO1Uua#e1~NYvC6PzhI=p}`=aS+$tq~t zr0+-p+dR(;I=I1EU^~GLLfHN8pr}S!!8$N5oVpfnK^z${XtUipiO*3#!?+hsc%4+K zyQAA~1>+fbi+c_;L5OIp+$E*MSOr3FQ=s*9r6C4VvWvOk6a=Xj54ga=?FH8jc2oYV zYRhMv?003?_&S)72o2=ceRxXhn{RT6!#u;&8y8-AOSdPtK&pE>aHY|j@R z-O%tejy3mEFEeX+Q38&4nYc1dOJTP{nR7dSiqRm4-TAeKztr4{*F@Nq--(=TfqO+e z-qU9RX=L4@FiWvb|E{n1NPk;k^9o*iK$+Lj;yA4ZIx7}y`$48sQR{v8@~hNNVbrHB zWcZ-z_5l zo+C^Jeb4DYj;{*zR5Lg}z!u_LRPz7@?e%S*wX(C4^udg@<7m7nRF+fm%zs7CqsKcf zs!bw6N=5QMDE#pwYpi8VrH^9X YFcb|mb^>i=*FQi`T3M<>;#1&%0|>zdWB>pF literal 0 HcmV?d00001 diff --git a/images/icon.ico b/images/icon.ico new file mode 100644 index 0000000000000000000000000000000000000000..8b2a6044e495bd15ddc70447f52ef0dfb751df16 GIT binary patch literal 1822 zcmV+(2jTbt0096203aX$0000W00;*F02TlM0EtjeM-2)Z3IG5A4M|8uQUCw|AOHXW zAP5Ek0047(dh`GQ2G2=EK~#90t(RM)Ku!4|atQRmu@IoUg1Vr#b@%4N(rfsTFqfjUoTeP()NlZ#1dYYy^ zxtvSB%l`IWYkb)Iv?s|)Y7^&Wvsr8AKWk>q%zp_t7NI?%bRG!+AK&%EyMp737!P4= z23rL*s=a-Xa|o{?9CLv`oIL!KvjDUw9GyoZV2gt+Cj0g&`}aqHdmjGY{gx!(Hf)=! zUkYF`U>3nDn<9uPq7Hxd4Rkeyz=$J5YKY);{!&R?@ojr>J~WeIhX* zVll)@OF*vCHN(nNiaOOKB^J36?N|BCyFYmI*so{iZ7vrzGH(9dcMNMmu zS_~9fKbS=;k12XR_`)!Vnnf$Jey|wOwDxGSWW|f?cJ98H&Lg4So^Xptpxt(KI${08 zdp;}4$Wc+(3VD``NOC>c|1M^hyQG#_3^SH@x1Bz6@P&D-_am@xpVH}swL2c$B)<79 zNGs%7xLt4n5lKRx1(25b=Cf;eJhq8WC$MjyRt}f|Y}mec%zO8wI6odk9z?|5-1LWX z!o`pWaemx;_vD7{d&kNZ6K%IU%KrT^cs<~1&2=Fk6#q}~g-(QgP`FxiT~Iy1{{6At z?x+B=ZpU}-)wJ=C>Xs`vD$m;&!-$%s8U|NBb>^vG{8Ir#)4l3bv9hpl3_f4?X565! ze2@q#7P6j*rh5UCyLWzfji^5%qGMRRsRQa$VlhOVFUvj_H}&@_-($cx$GQ0W^VYj? zRzAArD|7ku)J9cghx6$eW%w6ZTX;&tEHZWcFJMOZ+!?Z&cM+GoAC;S;8=BM^T$tqI zsTXAVCq5rh9kWJrhnnct5=j?eg|=$w6&a~J44cJRq4pD z%YcZMb3z0`h#@Cwws`xcr$CgY8$Ltcn^Edlhx4PnrAA39h_-n*PN} zrd~USg&bcSp~$-B(5VK`e3UQ)5yG&Cs1gQUhTY2~&2eV`{W^1#C-}$%J1Tt@ixGiU zib$<;9Z|>N(p%*HS$ty@QBU5RAst)B@XBR;V~jW)fX+weJj1z*h&seMh>`C3H_3am z^e;|=8F_z>OaJ;SEARgjzA=i$P+c_xk(vh~7DFuLs87lJbD$2FHVK0+Mb^WQv?%(s zxcUeoACRg;eL~TnMO;G6Gs3V(7!Gi?CRoHb#<3XaPn~7u179W?SxU(IL2yn2X=VA1+2aIVU`)t0QlH^$2gSmibh`}q1OMgI!EK0+w6 z_vnCy0+%*ON0*kpK?Ji{_6HGM+APUS^}nz#MHIo#d4yL*-C7GFiii_Y($S^3T9ew6 z6=+f~FICpX)kleWc0C;sxuzdT1rx9Oe;;3>s8fr@t7>M?l{-wR%4T%+27GgzdTS-E zk9`s|BkxX=G{z}%{A9a5Q4$i`ZC%5fv=Qomb7_537!C@N#Zz%x;A!25 z;ox}R4L&`|^#wD<3?ki zkOJ?~^1HY1xzE)`{wU6`2*W{MMdDWL&Z?6q7K(R5*8Rd8Pak~cdRh+9=>*yn?v1Ap zzLNI_Tdc@m_Vtkj5W|WOmwdBQ2l4fh#ESgoyf@fVf=|2$&Y~x)yu0gjU#-{fSo0%M z*{!}-F9%i;OX6Zuv5|Rw!_4N_#Ca|GLN>5yPh=M+e|+}&-whU?{KgKzRS(JayY}3J zYko^$C*oF1)$fUM{wY|_2Gxp;7+84A;P-^yFHavncy%8PEyWxykeVzro3Zd)?0LAOHXW M07*qoM6N<$f>rx|4gdfE literal 0 HcmV?d00001 diff --git a/run_Windows.bat b/run_Windows.bat index 57748ed..0123d1d 100644 --- a/run_Windows.bat +++ b/run_Windows.bat @@ -2,4 +2,4 @@ echo Starting... call conda activate base REM OPTION 2 : (KEEP TEXT WITHIN QUOTES AND CHANGE USERNAME) "C:/Users/user/Anaconda3/condabin/activate.bat" -call python GUI.py +call python app.py \ No newline at end of file diff --git a/sample_audio/transcriptions/Armstrong_Small_Step.txt b/sample_audio/transcriptions/Armstrong_Small_Step.txt index 8bdc6b8..cbc4bed 100644 --- a/sample_audio/transcriptions/Armstrong_Small_Step.txt +++ b/sample_audio/transcriptions/Armstrong_Small_Step.txt @@ -1,5 +1,4 @@ Armstrong_Small_Step -In seconds: -[0.00 --> 7.00]: I'm going to step off the limb now. -[7.00 --> 18.00]: That's one small step for man. -[18.00 --> 24.00]: One giant leap for mankind. \ No newline at end of file +[0:00:00 --> 0:00:07]: And they're still brought to land now. +[0:00:07 --> 0:00:18]: It's one small step for man. +[0:00:18 --> 0:00:23]: One by a fleet for man time. \ No newline at end of file diff --git a/sample_audio/transcriptions/Axel_Pettersson_röstinspelning.txt b/sample_audio/transcriptions/Axel_Pettersson_röstinspelning.txt index ad17e5f..f1db492 100644 --- a/sample_audio/transcriptions/Axel_Pettersson_röstinspelning.txt +++ b/sample_audio/transcriptions/Axel_Pettersson_röstinspelning.txt @@ -1,4 +1,4 @@ Axel_Pettersson_röstinspelning -In seconds: -[0.00 --> 6.14]: Hej, jag heter Axel Pettersson. Jag följer bror 1976. -[6.40 --> 15.10]: Jag har varit vikerpedjan sen 2008 och jag har översatt röstintroduktionsprojektet till svenska. \ No newline at end of file +[0:00:00 --> 0:00:06]: Hej, jag heter Raxel Patterson, jag får att se över UR 1976. +[0:00:06 --> 0:00:12.540000]: Jag har varit Wikipedia-périonsen 2018 och jag har översat röst-intro- +[0:00:12.540000 --> 0:00:15.540000]:-projektet till svenska. \ No newline at end of file diff --git a/src/_LocalTranscribe.py b/src/_LocalTranscribe.py new file mode 100644 index 0000000..94c8f8f --- /dev/null +++ b/src/_LocalTranscribe.py @@ -0,0 +1,90 @@ +import os +import datetime +from glob import glob +import whisper +from torch import cuda, Generator +import colorama +from colorama import Back,Fore +colorama.init(autoreset=True) + + +# Get the path +def get_path(path): + glob_file = glob(path + '/*') + return glob_file + +# Main function +def transcribe(path, glob_file, model=None, language=None, verbose=False): + """ + Transcribes audio files in a specified folder using OpenAI's Whisper model. + + Args: + path (str): Path to the folder containing the audio files. + glob_file (list): List of audio file paths to transcribe. + model (str, optional): Name of the Whisper model to use for transcription. + Defaults to None, which uses the default model. + language (str, optional): Language code for transcription. Defaults to None, + which enables automatic language detection. + verbose (bool, optional): If True, enables verbose mode with detailed information + during the transcription process. Defaults to False. + + Returns: + str: A message indicating the result of the transcription process. + + Raises: + RuntimeError: If an invalid file is encountered, it will be skipped. + + Notes: + - The function downloads the specified model if not available locally. + - The transcribed text files will be saved in a "transcriptions" folder + within the specified path. + + """ + # Check for GPU acceleration + if cuda.is_available(): + Generator('cuda').manual_seed(42) + else: + Generator().manual_seed(42) + # Load model + model = whisper.load_model(model) + # Start main loop + files_transcripted=[] + for file in glob_file: + title = os.path.basename(file).split('.')[0] + print(Back.CYAN + '\nTrying to transcribe file named: {}\U0001f550'.format(title)) + try: + result = model.transcribe( + file, + language=language, + verbose=verbose + ) + files_transcripted.append(result) + # Make folder if missing + try: + os.makedirs('{}/transcriptions'.format(path), exist_ok=True) + except FileExistsError: + pass + # Create segments for text files + start = [] + end = [] + text = [] + for segment in result['segments']: + start.append(str(datetime.timedelta(seconds=segment['start']))) + end.append(str(datetime.timedelta(seconds=segment['end']))) + text.append(segment['text']) + # Save files to transcriptions folder + with open("{}/transcriptions/{}.txt".format(path, title), 'w', encoding='utf-8') as file: + file.write(title) + for i in range(len(result['segments'])): + file.write('\n[{} --> {}]:{}'.format(start[i], end[i], text[i])) + # Skip invalid files + except RuntimeError: + print(Fore.RED + 'Not a valid file, skipping.') + pass + # Check if any files were processed. + if len(files_transcripted) > 0: + output_text = 'Finished transcription, {} files can be found in {}/transcriptions'.format(len(files_transcripted), path) + else: + output_text = 'No files elligible for transcription, try adding audio or video files to this folder or choose another folder!' + # Return output text + return output_text diff --git a/transcribe.py b/transcribe.py deleted file mode 100644 index 9d4007a..0000000 --- a/transcribe.py +++ /dev/null @@ -1,56 +0,0 @@ -import whisper -import glob, os -#import torch #uncomment if using torch with cuda, below too -import datetime - -def transcribe(path, file_type, model=None, language=None, verbose=False): - '''Implementation of OpenAI's whisper model. Downloads model, transcribes audio files in a folder and returns the text files with transcriptions''' - - try: - os.mkdir('{}/transcriptions'.format(path)) - except FileExistsError: - pass - - glob_file = glob.glob(path+'/*{}'.format(file_type)) - - #if torch.cuda.is_available(): - # generator = torch.Generator('cuda').manual_seed(42) - #else: - # generator = torch.Generator().manual_seed(42) - - print('Using {} model'.format(model)) - print('File type is {}'.format(file_type)) - print('Language is being detected automatically for each file') - print('Verbosity is set to {}'.format(verbose)) - print('\nThere are {} {} files in path: {}\n\n'.format(len(glob_file), file_type, path)) - - print('Loading model...') - model = whisper.load_model(model) - - for idx,file in enumerate(glob_file): - title = os.path.basename(file).split('.')[0] - - print('Transcribing file number number {}: {}'.format(idx+1,title)) - print('Model and file loaded...\nStarting transcription...\n') - result = model.transcribe( - file, - language=language, - verbose=verbose - ) - start=[] - end=[] - text=[] - for i in range(len(result['segments'])): - start.append(str(datetime.timedelta(seconds=(result['segments'][i]['start'])))) - end.append(str(datetime.timedelta(seconds=(result['segments'][i]['end'])))) - text.append(result['segments'][i]['text']) - - with open("{}/transcriptions/{}.txt".format(path,title), 'w', encoding='utf-8') as file: - file.write(title) - file.write('\nIn seconds:') - for i in range(len(result['segments'])): - file.writelines('\n[{} --> {}]:{}'.format(start[i], end[i], text[i])) - - print('\nFinished file number {}.\n\n\n'.format(idx+1)) - - return 'Finished transcription, files can be found in {}/transcriptions'.format(path)