Files
whisper-local-transcribe/example.ipynb

143 lines
4.5 KiB
Plaintext

{
"cells": [
{
"attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
"# Local Transcribe with Whisper\n",
"## Example"
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Help on function transcribe in module src._LocalTranscribe:\n",
"\n",
"transcribe(path, glob_file, model=None, language=None, verbose=False)\n",
" Transcribes audio files in a specified folder using OpenAI's Whisper model.\n",
" \n",
" Args:\n",
" path (str): Path to the folder containing the audio files.\n",
" glob_file (list): List of audio file paths to transcribe.\n",
" model (str, optional): Name of the Whisper model to use for transcription.\n",
" Defaults to None, which uses the default model.\n",
" language (str, optional): Language code for transcription. Defaults to None,\n",
" which enables automatic language detection.\n",
" verbose (bool, optional): If True, enables verbose mode with detailed information\n",
" during the transcription process. Defaults to False.\n",
" \n",
" Returns:\n",
" str: A message indicating the result of the transcription process.\n",
" \n",
" Raises:\n",
" RuntimeError: If an invalid file is encountered, it will be skipped.\n",
" \n",
" Notes:\n",
" - The function downloads the specified model if not available locally.\n",
" - The transcribed text files will be saved in a \"transcriptions\" folder\n",
" within the specified path.\n",
"\n"
]
}
],
"source": [
"# Import the modules and get the docstring\n",
"from src._LocalTranscribe import transcribe, get_path\n",
"help(transcribe)"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"# Set the variables\n",
"path='sample_audio/'# Folder path\n",
"model='small' # Model size\n",
"language= None # Preset language, None for automatic detection\n",
"verbose = True # Output transcription in realtime\n",
"\n",
"# Get glob file, additional step for app version.\n",
"\n",
"glob_file = get_path(path)"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"Trying to transcribe file named: Armstrong_Small_Step🕐\n",
"Detecting language using up to the first 30 seconds. Use `--language` to specify the language\n",
"Detected language: English\n",
"[00:00.000 --> 00:07.000] I'm going to step off the limb now.\n",
"[00:07.000 --> 00:18.000] That's one small step for man.\n",
"[00:18.000 --> 00:24.000] One giant leap for mankind.\n",
"\n",
"Trying to transcribe file named: Axel_Pettersson_röstinspelning🕐\n",
"Detecting language using up to the first 30 seconds. Use `--language` to specify the language\n",
"Detected language: Swedish\n",
"[00:00.000 --> 00:06.140] Hej, jag heter Axel Pettersson. Jag följer bror 1976.\n",
"[00:06.400 --> 00:15.100] Jag har varit vikerpedjan sen 2008 och jag har översatt röstintroduktionsprojektet till svenska.\n",
"\n",
"Trying to transcribe file named: readme🕐\n",
"Not a valid file, skipping.\n",
"\n",
"Trying to transcribe file named: transcriptions🕐\n",
"Not a valid file, skipping.\n"
]
},
{
"data": {
"text/plain": [
"'Finished transcription, 2 files can be found in sample_audio//transcriptions'"
]
},
"execution_count": 3,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"# Run the script\n",
"transcribe(path, glob_file, model, language, verbose)"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "venv",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.4"
},
"orig_nbformat": 4
},
"nbformat": 4,
"nbformat_minor": 2
}