Compare commits

4 Commits

Author SHA1 Message Date
Kristofer Söderström acadd17007 some corrections 2023-03-23 15:14:03 +01:00
Kristofer Söderström 918c2e489e added example 2023-03-23 15:06:20 +01:00
Kristofer Rolf Söderström 4710c61e22 Update README.md 2023-03-23 14:58:23 +01:00
Kristofer Söderström eea7441e43 fixed some spacing 2023-03-22 22:02:41 +01:00
4 changed files with 237 additions and 5 deletions
+3 -1
View File
@@ -1,5 +1,7 @@
## transcribe ## transcribe
Simple script that uses OpenAI's Whisper to transcribe audio files from your local folders. Simple script that uses OpenAI's Whisper to transcribe audio files from your local folders.
## Note
This implementation and guide is mostly made for researchers not familiar with programming that want a way to transcribe their files locally, without internet connection, usually required within ethical data practices and frameworks. Two examples are shown, a normal workflow with interent connection. And one in which the model is loaded first, via openai-whisper, and then the transcription can be done without being connected to the internet.
### Instructions ### Instructions
#### Requirements #### Requirements
@@ -22,7 +24,7 @@ git clone https://github.com/soderstromkr/transcribe.git
and use the example.ipynb template to use the script **OR (for beginners)** download the ```transcribe.py``` file into your work folder. Then you can either import it to another script or notebook for use. I recommend jupyter notebook for new users, see the example below. (Remember to have transcribe.py and example.ipynb in the same working folder). and use the example.ipynb template to use the script **OR (for beginners)** download the ```transcribe.py``` file into your work folder. Then you can either import it to another script or notebook for use. I recommend jupyter notebook for new users, see the example below. (Remember to have transcribe.py and example.ipynb in the same working folder).
### Example ### Example
See the [example](example.ipynb) implementation on jupyter notebook. See [example](example.ipynb) for an implementation on jupyter notebook, also added an example for a simple [workaround](example_no_internet.ipynb) to transcribe while offline.
[^1]: Advanced users can use ```pip install ffmpeg-python``` but be ready to deal with some [PATH issues](https://stackoverflow.com/questions/65836756/python-ffmpeg-wont-accept-path-why), which I encountered in Windows 11. [^1]: Advanced users can use ```pip install ffmpeg-python``` but be ready to deal with some [PATH issues](https://stackoverflow.com/questions/65836756/python-ffmpeg-wont-accept-path-why), which I encountered in Windows 11.
+1 -1
View File
@@ -40,7 +40,7 @@
"outputs": [], "outputs": [],
"source": [ "source": [
"path='sample_audio/'#folder path\n", "path='sample_audio/'#folder path\n",
"file_type='ogg' #check your file for file type, will only transcribe files with the file type, 'ogg', 'WAV'\n", "file_type='ogg' #check your file for file type, will only transcribe those files\n",
"model='medium' #'small', 'medium', 'large' (tradeoff between speed and accuracy)\n", "model='medium' #'small', 'medium', 'large' (tradeoff between speed and accuracy)\n",
"language= None #tries to auto-detect, other options include 'English', 'Spanish', etc...\n", "language= None #tries to auto-detect, other options include 'English', 'Spanish', etc...\n",
"verbose = True # prints output while transcribing, False to deactivate" "verbose = True # prints output while transcribing, False to deactivate"
+231
View File
@@ -0,0 +1,231 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "eba9e610",
"metadata": {},
"source": [
"A simple way to avoid being connected while transcribing is to first load the model version you want to use. See [here](https://github.com/openai/whisper/blob/main/README.md#available-models-and-languages) for more info."
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "85cd2d12",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"Whisper(\n",
" (encoder): AudioEncoder(\n",
" (conv1): Conv1d(80, 1024, kernel_size=(3,), stride=(1,), padding=(1,))\n",
" (conv2): Conv1d(1024, 1024, kernel_size=(3,), stride=(2,), padding=(1,))\n",
" (blocks): ModuleList(\n",
" (0-23): 24 x ResidualAttentionBlock(\n",
" (attn): MultiHeadAttention(\n",
" (query): Linear(in_features=1024, out_features=1024, bias=True)\n",
" (key): Linear(in_features=1024, out_features=1024, bias=False)\n",
" (value): Linear(in_features=1024, out_features=1024, bias=True)\n",
" (out): Linear(in_features=1024, out_features=1024, bias=True)\n",
" )\n",
" (attn_ln): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)\n",
" (mlp): Sequential(\n",
" (0): Linear(in_features=1024, out_features=4096, bias=True)\n",
" (1): GELU(approximate='none')\n",
" (2): Linear(in_features=4096, out_features=1024, bias=True)\n",
" )\n",
" (mlp_ln): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)\n",
" )\n",
" )\n",
" (ln_post): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)\n",
" )\n",
" (decoder): TextDecoder(\n",
" (token_embedding): Embedding(51865, 1024)\n",
" (blocks): ModuleList(\n",
" (0-23): 24 x ResidualAttentionBlock(\n",
" (attn): MultiHeadAttention(\n",
" (query): Linear(in_features=1024, out_features=1024, bias=True)\n",
" (key): Linear(in_features=1024, out_features=1024, bias=False)\n",
" (value): Linear(in_features=1024, out_features=1024, bias=True)\n",
" (out): Linear(in_features=1024, out_features=1024, bias=True)\n",
" )\n",
" (attn_ln): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)\n",
" (cross_attn): MultiHeadAttention(\n",
" (query): Linear(in_features=1024, out_features=1024, bias=True)\n",
" (key): Linear(in_features=1024, out_features=1024, bias=False)\n",
" (value): Linear(in_features=1024, out_features=1024, bias=True)\n",
" (out): Linear(in_features=1024, out_features=1024, bias=True)\n",
" )\n",
" (cross_attn_ln): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)\n",
" (mlp): Sequential(\n",
" (0): Linear(in_features=1024, out_features=4096, bias=True)\n",
" (1): GELU(approximate='none')\n",
" (2): Linear(in_features=4096, out_features=1024, bias=True)\n",
" )\n",
" (mlp_ln): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)\n",
" )\n",
" )\n",
" (ln): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)\n",
" )\n",
")"
]
},
"execution_count": 6,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"import whisper\n",
"#change to model size, bigger is more accurate but slower\n",
"whisper.load_model(\"medium\") #base, small, medium, large"
]
},
{
"cell_type": "code",
"execution_count": 7,
"id": "0d2acd54",
"metadata": {},
"outputs": [],
"source": [
"#after it loads, you can disconnect from the internet and run the rest"
]
},
{
"cell_type": "code",
"execution_count": 8,
"id": "a2cd4050",
"metadata": {},
"outputs": [],
"source": [
"from transcribe import transcribe"
]
},
{
"cell_type": "code",
"execution_count": 9,
"id": "24e1d24e",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Help on function transcribe in module transcribe:\n",
"\n",
"transcribe(path, file_type, model=None, language=None, verbose=True)\n",
" Implementation of OpenAI's whisper model. Downloads model, transcribes audio files in a folder and returns the text files with transcriptions\n",
"\n"
]
}
],
"source": [
"help(transcribe)"
]
},
{
"cell_type": "code",
"execution_count": 11,
"id": "e52477fb",
"metadata": {},
"outputs": [],
"source": [
"path='sample_audio/'#folder path\n",
"file_type='ogg' #check your file for file type, will only transcribe those files\n",
"model='medium' #'small', 'medium', 'large' (tradeoff between speed and accuracy)\n",
"language= None #tries to auto-detect, other options include 'English', 'Spanish', etc...\n",
"verbose = True # prints output while transcribing, False to deactivate"
]
},
{
"cell_type": "code",
"execution_count": 12,
"id": "d66866af",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Using medium model, you can change this by specifying model=\"medium\" for example\n",
"Only looking for file type ogg, you can change this by specifying file_type=\"mp3\"\n",
"Expecting None language, you can change this by specifying language=\"English\". None will try to auto-detect\n",
"Verbosity is True. If TRUE it will print out the text as it is transcribed, you can turn this off by setting verbose=False\n",
"\n",
"There are 2 ogg files in path: sample_audio/\n",
"\n",
"\n",
"Loading model...\n",
"Transcribing file number number 1: Armstrong_Small_Step\n",
"Model and file loaded...\n",
"Starting transcription...\n",
"\n",
"Detecting language using up to the first 30 seconds. Use `--language` to specify the language\n",
"Detected language: English\n",
"[00:00.000 --> 00:24.000] That's one small step for man, one giant leap for mankind.\n",
"\n",
"Finished file number 1.\n",
"\n",
"\n",
"\n",
"Transcribing file number number 2: Axel_Pettersson_röstinspelning\n",
"Model and file loaded...\n",
"Starting transcription...\n",
"\n",
"Detecting language using up to the first 30 seconds. Use `--language` to specify the language\n",
"Detected language: Swedish\n",
"[00:00.000 --> 00:16.000] Hej, jag heter Axel Pettersson, jag föddes i Örebro 1976. Jag har varit Wikipedia sen 2008 och jag har översatt röstintroduktionsprojektet till svenska.\n",
"\n",
"Finished file number 2.\n",
"\n",
"\n",
"\n"
]
},
{
"data": {
"text/plain": [
"'Finished transcription, files can be found in sample_audio/transcriptions'"
]
},
"execution_count": 12,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"transcribe(path, file_type, model, language, verbose)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "0bc67265",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.4"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
+1 -2
View File
@@ -17,11 +17,10 @@ def transcribe(path, file_type, model=None, language=None, verbose=True):
print('Expecting {} language, you can change this by specifying language="English". None will try to auto-detect'.format(language)) print('Expecting {} language, you can change this by specifying language="English". None will try to auto-detect'.format(language))
print('Verbosity is {}. If TRUE it will print out the text as it is transcribed, you can turn this off by setting verbose=False'.format(verbose)) print('Verbosity is {}. If TRUE it will print out the text as it is transcribed, you can turn this off by setting verbose=False'.format(verbose))
print('\nThere are {} {} files in path: {}\n\n'.format(len(glob_file), file_type, path)) print('\nThere are {} {} files in path: {}\n\n'.format(len(glob_file), file_type, path))
print('Loading model...') print('Loading model...')
model = whisper.load_model(model) model = whisper.load_model(model)
for idx,file in enumerate(glob_file): for idx,file in enumerate(glob_file):
title = os.path.basename(file).split('.')[0] title = os.path.basename(file).split('.')[0]