From 918c2e489e6957d43af8eaf811bad59089fe3ed1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Kristofer=20S=C3=B6derstr=C3=B6m?= Date: Thu, 23 Mar 2023 15:06:20 +0100 Subject: [PATCH] added example --- example_no_internet.ipynb | 231 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 231 insertions(+) create mode 100644 example_no_internet.ipynb diff --git a/example_no_internet.ipynb b/example_no_internet.ipynb new file mode 100644 index 0000000..b85ed60 --- /dev/null +++ b/example_no_internet.ipynb @@ -0,0 +1,231 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "eba9e610", + "metadata": {}, + "source": [ + "A simple way to avoid being connected while transcribing is to first load the model version you want to use. See [here](https://github.com/openai/whisper/blob/main/README.md#available-models-and-languages) for more info." + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "85cd2d12", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "Whisper(\n", + " (encoder): AudioEncoder(\n", + " (conv1): Conv1d(80, 1024, kernel_size=(3,), stride=(1,), padding=(1,))\n", + " (conv2): Conv1d(1024, 1024, kernel_size=(3,), stride=(2,), padding=(1,))\n", + " (blocks): ModuleList(\n", + " (0-23): 24 x ResidualAttentionBlock(\n", + " (attn): MultiHeadAttention(\n", + " (query): Linear(in_features=1024, out_features=1024, bias=True)\n", + " (key): Linear(in_features=1024, out_features=1024, bias=False)\n", + " (value): Linear(in_features=1024, out_features=1024, bias=True)\n", + " (out): Linear(in_features=1024, out_features=1024, bias=True)\n", + " )\n", + " (attn_ln): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)\n", + " (mlp): Sequential(\n", + " (0): Linear(in_features=1024, out_features=4096, bias=True)\n", + " (1): GELU(approximate='none')\n", + " (2): Linear(in_features=4096, out_features=1024, bias=True)\n", + " )\n", + " (mlp_ln): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)\n", + " )\n", + " )\n", + " (ln_post): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)\n", + " )\n", + " (decoder): TextDecoder(\n", + " (token_embedding): Embedding(51865, 1024)\n", + " (blocks): ModuleList(\n", + " (0-23): 24 x ResidualAttentionBlock(\n", + " (attn): MultiHeadAttention(\n", + " (query): Linear(in_features=1024, out_features=1024, bias=True)\n", + " (key): Linear(in_features=1024, out_features=1024, bias=False)\n", + " (value): Linear(in_features=1024, out_features=1024, bias=True)\n", + " (out): Linear(in_features=1024, out_features=1024, bias=True)\n", + " )\n", + " (attn_ln): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)\n", + " (cross_attn): MultiHeadAttention(\n", + " (query): Linear(in_features=1024, out_features=1024, bias=True)\n", + " (key): Linear(in_features=1024, out_features=1024, bias=False)\n", + " (value): Linear(in_features=1024, out_features=1024, bias=True)\n", + " (out): Linear(in_features=1024, out_features=1024, bias=True)\n", + " )\n", + " (cross_attn_ln): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)\n", + " (mlp): Sequential(\n", + " (0): Linear(in_features=1024, out_features=4096, bias=True)\n", + " (1): GELU(approximate='none')\n", + " (2): Linear(in_features=4096, out_features=1024, bias=True)\n", + " )\n", + " (mlp_ln): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)\n", + " )\n", + " )\n", + " (ln): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)\n", + " )\n", + ")" + ] + }, + "execution_count": 6, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "import whisper\n", + "#change to model size, bigger is more accurate but slower\n", + "whisper.load_model(\"medium\") #base, small, medium, large" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "0d2acd54", + "metadata": {}, + "outputs": [], + "source": [ + "#after it loads, you can disconnect from the internet and run the rest" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "a2cd4050", + "metadata": {}, + "outputs": [], + "source": [ + "from transcribe import transcribe" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "24e1d24e", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Help on function transcribe in module transcribe:\n", + "\n", + "transcribe(path, file_type, model=None, language=None, verbose=True)\n", + " Implementation of OpenAI's whisper model. Downloads model, transcribes audio files in a folder and returns the text files with transcriptions\n", + "\n" + ] + } + ], + "source": [ + "help(transcribe)" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "id": "e52477fb", + "metadata": {}, + "outputs": [], + "source": [ + "path='sample_audio/'#folder path\n", + "file_type='ogg' #check your file for file type, will only transcribe files with the file type, 'ogg', 'WAV'\n", + "model='medium' #'small', 'medium', 'large' (tradeoff between speed and accuracy)\n", + "language= None #tries to auto-detect, other options include 'English', 'Spanish', etc...\n", + "verbose = True # prints output while transcribing, False to deactivate" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "id": "d66866af", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Using medium model, you can change this by specifying model=\"medium\" for example\n", + "Only looking for file type ogg, you can change this by specifying file_type=\"mp3\"\n", + "Expecting None language, you can change this by specifying language=\"English\". None will try to auto-detect\n", + "Verbosity is True. If TRUE it will print out the text as it is transcribed, you can turn this off by setting verbose=False\n", + "\n", + "There are 2 ogg files in path: sample_audio/\n", + "\n", + "\n", + "Loading model...\n", + "Transcribing file number number 1: Armstrong_Small_Step\n", + "Model and file loaded...\n", + "Starting transcription...\n", + "\n", + "Detecting language using up to the first 30 seconds. Use `--language` to specify the language\n", + "Detected language: English\n", + "[00:00.000 --> 00:24.000] That's one small step for man, one giant leap for mankind.\n", + "\n", + "Finished file number 1.\n", + "\n", + "\n", + "\n", + "Transcribing file number number 2: Axel_Pettersson_röstinspelning\n", + "Model and file loaded...\n", + "Starting transcription...\n", + "\n", + "Detecting language using up to the first 30 seconds. Use `--language` to specify the language\n", + "Detected language: Swedish\n", + "[00:00.000 --> 00:16.000] Hej, jag heter Axel Pettersson, jag föddes i Örebro 1976. Jag har varit Wikipedia sen 2008 och jag har översatt röstintroduktionsprojektet till svenska.\n", + "\n", + "Finished file number 2.\n", + "\n", + "\n", + "\n" + ] + }, + { + "data": { + "text/plain": [ + "'Finished transcription, files can be found in sample_audio/transcriptions'" + ] + }, + "execution_count": 12, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "transcribe(path, file_type, model, language, verbose)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "0bc67265", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.4" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +}