Compare commits

7 Commits

Author SHA1 Message Date
Kristofer Rolf Söderström 4e1c709f43 Update transcribe.py
better time keeping
2023-04-20 20:13:54 +02:00
Kristofer Rolf Söderström dfe967bd58 Update run_Windows.bat 2023-04-20 19:35:51 +02:00
Kristofer Rolf Söderström 586289efe5 Update Mac_instructions.txt 2023-04-19 16:51:36 +02:00
Kristofer Rolf Söderström c5a5597eee Update README.md 2023-04-19 16:46:49 +02:00
Kristofer Rolf Söderström ce8c365fc4 Update and rename Mac_2_instructions.txt to Mac_instructions.txt 2023-04-17 20:28:52 +02:00
Kristofer Rolf Söderström e2afd34170 Delete run_Mac_2.command 2023-04-17 20:25:18 +02:00
Kristofer Rolf Söderström 6fa49e41d9 Delete run_Mac_1.sh 2023-04-17 20:24:50 +02:00
7 changed files with 18 additions and 19 deletions
-6
View File
@@ -1,6 +0,0 @@
### Steps to make command file executable
To make a file executable on a Mac, you need to open a terminal window in the directory where the file is located. Then run the following command:
chmod +x run_MAC_2.command
After running this command, the file should be marked as executable and you should be able to run it by double-clicking on it.
+5
View File
@@ -0,0 +1,5 @@
### How to run on Mac
Unfortunately, I have not found a permament solution for this, not being a Mac user has limited the ways I can test this. For now, these are the recommended steps for a beginner user:
1. Open a terminal and navigate to the root folder (transcribe-main if you downloaded the folder). You can also right-click (or equivalent) on the root folder to open a Terminal within the folder.
2. Run the following command:
python GUI.py
+1 -1
View File
@@ -34,7 +34,7 @@ and use the example.ipynb template to use the script.
#### Example with jupyter notebook #### Example with jupyter notebook
See [example](example.ipynb) for an implementation on jupyter notebook, also added an example for a simple [workaround](example_no_internet.ipynb) to transcribe while offline. See [example](example.ipynb) for an implementation on jupyter notebook, also added an example for a simple [workaround](example_no_internet.ipynb) to transcribe while offline.
#### Using the GUI #### Using the GUI
You can also run the GUI version from your terminal running ```python GUI.py``` or with the batch file called run_gui.bat, just make sure to add your conda path to it. If you want to download a model first, and then go offline for transcription, I recommend running the model with the default sample folder, which will download the model locally. The GUI should look like this: You can also run the GUI version from your terminal running ```python GUI.py``` or with the batch file called run_Windows.bat (for Windows user, Mac users should read the text file for instructions), just make sure to add your conda path to it. If you want to download a model first, and then go offline for transcription, I recommend running the model with the default sample folder, which will download the model locally. The GUI should look like this:
![python GUI.py](gui_jpeg.jpg?raw=true) ![python GUI.py](gui_jpeg.jpg?raw=true)
-4
View File
@@ -1,4 +0,0 @@
#!/bin/bash
echo Starting...
conda activate venv
python -u GUI.py
-3
View File
@@ -1,3 +0,0 @@
#!/bin/bash
echo Running Script
python -u GUI.py
+2 -2
View File
@@ -1,5 +1,5 @@
@echo off @echo off
echo Starting... echo Starting...
call conda activate venv call conda activate base
REM OPTION 2 : (KEEP TEXT WITHIN QUOTES AND CHANGE USERNAME) "C:/Users/user/Anaconda3/condabin/activate.bat" REM OPTION 2 : (KEEP TEXT WITHIN QUOTES AND CHANGE USERNAME) "C:/Users/user/Anaconda3/condabin/activate.bat"
call python GUI.py call python GUI.py
+10 -3
View File
@@ -1,5 +1,7 @@
import whisper import whisper
import glob, os import glob, os
#import torch #uncomment if using torch with cuda, below too
import datetime
def transcribe(path, file_type, model=None, language=None, verbose=False): def transcribe(path, file_type, model=None, language=None, verbose=False):
'''Implementation of OpenAI's whisper model. Downloads model, transcribes audio files in a folder and returns the text files with transcriptions''' '''Implementation of OpenAI's whisper model. Downloads model, transcribes audio files in a folder and returns the text files with transcriptions'''
@@ -10,6 +12,11 @@ def transcribe(path, file_type, model=None, language=None, verbose=False):
pass pass
glob_file = glob.glob(path+'/*{}'.format(file_type)) glob_file = glob.glob(path+'/*{}'.format(file_type))
#if torch.cuda.is_available():
# generator = torch.Generator('cuda').manual_seed(42)
#else:
# generator = torch.Generator().manual_seed(42)
print('Using {} model'.format(model)) print('Using {} model'.format(model))
print('File type is {}'.format(file_type)) print('File type is {}'.format(file_type))
@@ -34,15 +41,15 @@ def transcribe(path, file_type, model=None, language=None, verbose=False):
end=[] end=[]
text=[] text=[]
for i in range(len(result['segments'])): for i in range(len(result['segments'])):
start.append(result['segments'][i]['start']) start.append(str(datetime.timedelta(seconds=(result['segments'][i]['start']))))
end.append(result['segments'][i]['end']) end.append(str(datetime.timedelta(seconds=(result['segments'][i]['end']))))
text.append(result['segments'][i]['text']) text.append(result['segments'][i]['text'])
with open("{}/transcriptions/{}.txt".format(path,title), 'w', encoding='utf-8') as file: with open("{}/transcriptions/{}.txt".format(path,title), 'w', encoding='utf-8') as file:
file.write(title) file.write(title)
file.write('\nIn seconds:') file.write('\nIn seconds:')
for i in range(len(result['segments'])): for i in range(len(result['segments'])):
file.writelines('\n[{:.2f} --> {:.2f}]:{}'.format(start[i], end[i], text[i])) file.writelines('\n[{} --> {}]:{}'.format(start[i], end[i], text[i]))
print('\nFinished file number {}.\n\n\n'.format(idx+1)) print('\nFinished file number {}.\n\n\n'.format(idx+1))