Skip to content

Instantly share code, notes, and snippets.

View eustlb's full-sized avatar

eustlb

  • Hugging Face
  • Paris, France
View GitHub Profile
OPENAI_SRC_PATH = "/admin/home/eustache_lebihan/dev/benchmark-whisper/whisper-myfork"
import sys
sys.path.insert(0, OPENAI_SRC_PATH)
import wandb
from tqdm import tqdm
import evaluate
import os
@eustlb
eustlb / benchmark_openai_whisper.py
Last active November 21, 2024 22:40
Benchmark WER and RTFx for openai whisper.
OPENAI_SRC_PATH = "/admin/home/eustache_lebihan/dev/benchmark-whisper/whisper"
import sys
sys.path.insert(0, OPENAI_SRC_PATH)
import wandb
from tqdm import tqdm
import evaluate
import os
import torch
@eustlb
eustlb / benchmark_transformers_whisper.py
Last active November 22, 2024 10:36
Benchmark WER and RTFx for transformers whisper.
TRANSFORMERS_SRC_PATH = "/admin/home/eustache_lebihan/dev/benchmark-whisper/transformers-fix/src"
import sys
sys.path.insert(0, TRANSFORMERS_SRC_PATH)
import wandb
from tqdm import tqdm
import evaluate
import os
import torch
@eustlb
eustlb / benchmark_parler_streaming.py
Last active September 19, 2024 09:45
Benchmark ParlerTTS + streaming time to first audio.
import os
import torch
import time
from parler_tts import ParlerTTSForConditionalGeneration, ParlerTTSStreamer
from transformers import AutoTokenizer
from threading import Thread
# caching allows ~50% compilation time reduction
# see https://docs.google.com/document/d/1y5CRfMLdwEoF1nTk9q8qEu1mgMUuUtvhklPKJ2emLU8/edit#heading=h.o2asbxsrp1ma
@eustlb
eustlb / test_compile_parler.py
Created September 5, 2024 16:38
Test compile on ParlerTTS + streaming
import os
import torch
import time
from parler_tts import ParlerTTSForConditionalGeneration, ParlerTTSStreamer
from transformers import AutoTokenizer
from threading import Thread
# caching allows ~50% compilation time reduction
# see https://docs.google.com/document/d/1y5CRfMLdwEoF1nTk9q8qEu1mgMUuUtvhklPKJ2emLU8/edit#heading=h.o2asbxsrp1ma
CURRENT_DIR = os.path.dirname(os.path.realpath(__file__))
@eustlb
eustlb / reproduce_bug_description.py
Created September 5, 2024 14:00
Reproduces a bug when changing the description when using a compiled model
import os
import torch
import soundfile as sf
from parler_tts import ParlerTTSForConditionalGeneration
from transformers import AutoTokenizer
# caching allows ~50% compilation time reduction
# see https://docs.google.com/document/d/1y5CRfMLdwEoF1nTk9q8qEu1mgMUuUtvhklPKJ2emLU8/edit#heading=h.o2asbxsrp1ma
CURRENT_DIR = os.path.dirname(os.path.realpath(__file__))
os.environ["TORCHINDUCTOR_CACHE_DIR"] = os.path.join(CURRENT_DIR, "tmp")
@eustlb
eustlb / reproducer_bug_jenny.py
Created July 29, 2024 09:45
Reproduce generation bug for a long prompt with the Jenny model (Parler-TTS).
import torch
import soundfile as sf
from parler_tts import ParlerTTSForConditionalGeneration
from transformers import AutoTokenizer
model_name = "ylacombe/parler-tts-mini-jenny-30H"
torch_device = "cuda:0"
torch_dtype = torch.bfloat16
attn_implementation = "eager"
import os
import torch
import soundfile as sf
from parler_tts import ParlerTTSForConditionalGeneration
from transformers import AutoTokenizer
# caching allows ~50% compilation time reduction
# see https://docs.google.com/document/d/1y5CRfMLdwEoF1nTk9q8qEu1mgMUuUtvhklPKJ2emLU8/edit#heading=h.o2asbxsrp1ma
CURRENT_DIR = os.path.dirname(os.path.realpath(__file__))
os.environ["TORCHINDUCTOR_CACHE_DIR"] = os.path.join(CURRENT_DIR, "tmp")
@eustlb
eustlb / reproduce_bug_compile.py
Created July 23, 2024 16:30
Reproduce a bug happening with torch 2.3.1 and compile.
import soundfile as sf
from parler_tts import ParlerTTSForConditionalGeneration
from transformers import AutoTokenizer
import torch
torch.manual_seed(0)
CUDA_DEVICE = 0
torch_device = f"cuda:{CUDA_DEVICE}"
@eustlb
eustlb / reproduce_bug_generation.py
Last active July 23, 2024 16:56
Reproduce generation error on dev branch
import soundfile as sf
from parler_tts import ParlerTTSForConditionalGeneration
from transformers import AutoTokenizer
import torch
torch._logging.set_logs(graph_breaks=True, recompiles=True)
torch.manual_seed(0)
CUDA_DEVICE = 0
torch_device = f"cuda:{CUDA_DEVICE}"