Multi-Modal Models with Hugging Face
James Chapman
Curriculum Manager, DataCamp
E.g., large English language for general pretraining model ⇏ realistic Italian speech
VoxPopuli dataset: transcribed speech data for 18 languages from EU parliament
from datasets import load_dataset
dataset = load_dataset("facebook/voxpopuli", "it", split="train",
trust_remote_code=True)
print(dataset.features)
['audio', 'raw_text', 'normalized_text', 'gender', 'speaker_id', ... ]
speaker_model = EncoderClassifier.from_hparams(source="speechbrain/spkrec-xvect-voxceleb",
savedir="pretrained_models/spkrec-xvect-voxceleb")
from transformers import SpeechT5Processor processor = SpeechT5Processor.from_pretrained("microsoft/speecht5_tts")
def prepare_dataset(example): audio = example["audio"] example = processor(text=example["normalized_text"], audio_target=audio["array"], sampling_rate=audio["sampling_rate"], return_attention_mask=False)
example["labels"] = example["labels"][0]
with torch.no_grad(): speaker_embeddings = speaker_model.encode_batch(torch.tensor(audio["array"])) speaker_embeddings = torch.nn.functional.normalize(speaker_embeddings, dim=2) example["speaker_embeddings"] = speaker_embeddings.squeeze().cpu().numpy() return example
dataset = dataset.map(prepare_dataset)
from transformers import Seq2SeqTrainingArguments
training_args = Seq2SeqTrainingArguments( per_device_train_batch_size=4,
gradient_accumulation_steps=8,
learning_rate=1e-5,
warmup_steps=500,
label_names=["labels"],
data_collator=data_collator )
model = SpeechT5ForTextToSpeech.from_pretrained("microsoft/speecht5_tts")
processor = SpeechT5Processor.from_pretrained("microsoft/speecht5_tts")
vocoder = SpeechT5HifiGan.from_pretrained("microsoft/speecht5_hifigan")
Trainer:
trainer = Seq2SeqTrainer(args=training_args, model=model,
train_dataset=dataset["train"], eval_dataset=dataset["test"],
tokenizer=processor)
Run the training: trainer.train()
text = "se sono italiano posso cantare l'opera lirica"
speaker_embedding = torch.tensor(dataset[5]["speaker_embeddings"]).unsqueeze(0)
inputs = processor(text=text, return_tensors="pt")
speech = model.generate_speech(inputs["input_ids"], speaker_embedding, vocoder=vocoder)
make_spectrogram(speech)
Multi-Modal Models with Hugging Face