Compare commits
4 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 4e1c709f43 | |||
| dfe967bd58 | |||
| 586289efe5 | |||
| c5a5597eee |
@@ -2,4 +2,4 @@
|
|||||||
Unfortunately, I have not found a permament solution for this, not being a Mac user has limited the ways I can test this. For now, these are the recommended steps for a beginner user:
|
Unfortunately, I have not found a permament solution for this, not being a Mac user has limited the ways I can test this. For now, these are the recommended steps for a beginner user:
|
||||||
1. Open a terminal and navigate to the root folder (transcribe-main if you downloaded the folder). You can also right-click (or equivalent) on the root folder to open a Terminal within the folder.
|
1. Open a terminal and navigate to the root folder (transcribe-main if you downloaded the folder). You can also right-click (or equivalent) on the root folder to open a Terminal within the folder.
|
||||||
2. Run the following command:
|
2. Run the following command:
|
||||||
```python GUI.py```
|
python GUI.py
|
||||||
|
|||||||
@@ -34,7 +34,7 @@ and use the example.ipynb template to use the script.
|
|||||||
#### Example with jupyter notebook
|
#### Example with jupyter notebook
|
||||||
See [example](example.ipynb) for an implementation on jupyter notebook, also added an example for a simple [workaround](example_no_internet.ipynb) to transcribe while offline.
|
See [example](example.ipynb) for an implementation on jupyter notebook, also added an example for a simple [workaround](example_no_internet.ipynb) to transcribe while offline.
|
||||||
#### Using the GUI
|
#### Using the GUI
|
||||||
You can also run the GUI version from your terminal running ```python GUI.py``` or with the batch file called run_gui.bat, just make sure to add your conda path to it. If you want to download a model first, and then go offline for transcription, I recommend running the model with the default sample folder, which will download the model locally. The GUI should look like this:
|
You can also run the GUI version from your terminal running ```python GUI.py``` or with the batch file called run_Windows.bat (for Windows user, Mac users should read the text file for instructions), just make sure to add your conda path to it. If you want to download a model first, and then go offline for transcription, I recommend running the model with the default sample folder, which will download the model locally. The GUI should look like this:
|
||||||

|

|
||||||
|
|
||||||
|
|
||||||
|
|||||||
+2
-2
@@ -1,5 +1,5 @@
|
|||||||
@echo off
|
@echo off
|
||||||
echo Starting...
|
echo Starting...
|
||||||
call conda activate venv
|
call conda activate base
|
||||||
REM OPTION 2 : (KEEP TEXT WITHIN QUOTES AND CHANGE USERNAME) "C:/Users/user/Anaconda3/condabin/activate.bat"
|
REM OPTION 2 : (KEEP TEXT WITHIN QUOTES AND CHANGE USERNAME) "C:/Users/user/Anaconda3/condabin/activate.bat"
|
||||||
call python GUI.py
|
call python GUI.py
|
||||||
|
|||||||
+10
-3
@@ -1,5 +1,7 @@
|
|||||||
import whisper
|
import whisper
|
||||||
import glob, os
|
import glob, os
|
||||||
|
#import torch #uncomment if using torch with cuda, below too
|
||||||
|
import datetime
|
||||||
|
|
||||||
def transcribe(path, file_type, model=None, language=None, verbose=False):
|
def transcribe(path, file_type, model=None, language=None, verbose=False):
|
||||||
'''Implementation of OpenAI's whisper model. Downloads model, transcribes audio files in a folder and returns the text files with transcriptions'''
|
'''Implementation of OpenAI's whisper model. Downloads model, transcribes audio files in a folder and returns the text files with transcriptions'''
|
||||||
@@ -10,6 +12,11 @@ def transcribe(path, file_type, model=None, language=None, verbose=False):
|
|||||||
pass
|
pass
|
||||||
|
|
||||||
glob_file = glob.glob(path+'/*{}'.format(file_type))
|
glob_file = glob.glob(path+'/*{}'.format(file_type))
|
||||||
|
|
||||||
|
#if torch.cuda.is_available():
|
||||||
|
# generator = torch.Generator('cuda').manual_seed(42)
|
||||||
|
#else:
|
||||||
|
# generator = torch.Generator().manual_seed(42)
|
||||||
|
|
||||||
print('Using {} model'.format(model))
|
print('Using {} model'.format(model))
|
||||||
print('File type is {}'.format(file_type))
|
print('File type is {}'.format(file_type))
|
||||||
@@ -34,15 +41,15 @@ def transcribe(path, file_type, model=None, language=None, verbose=False):
|
|||||||
end=[]
|
end=[]
|
||||||
text=[]
|
text=[]
|
||||||
for i in range(len(result['segments'])):
|
for i in range(len(result['segments'])):
|
||||||
start.append(result['segments'][i]['start'])
|
start.append(str(datetime.timedelta(seconds=(result['segments'][i]['start']))))
|
||||||
end.append(result['segments'][i]['end'])
|
end.append(str(datetime.timedelta(seconds=(result['segments'][i]['end']))))
|
||||||
text.append(result['segments'][i]['text'])
|
text.append(result['segments'][i]['text'])
|
||||||
|
|
||||||
with open("{}/transcriptions/{}.txt".format(path,title), 'w', encoding='utf-8') as file:
|
with open("{}/transcriptions/{}.txt".format(path,title), 'w', encoding='utf-8') as file:
|
||||||
file.write(title)
|
file.write(title)
|
||||||
file.write('\nIn seconds:')
|
file.write('\nIn seconds:')
|
||||||
for i in range(len(result['segments'])):
|
for i in range(len(result['segments'])):
|
||||||
file.writelines('\n[{:.2f} --> {:.2f}]:{}'.format(start[i], end[i], text[i]))
|
file.writelines('\n[{} --> {}]:{}'.format(start[i], end[i], text[i]))
|
||||||
|
|
||||||
print('\nFinished file number {}.\n\n\n'.format(idx+1))
|
print('\nFinished file number {}.\n\n\n'.format(idx+1))
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user