Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
TensorSpeech
GitHub Repository: TensorSpeech/TensorFlowTTS
Path: blob/master/tensorflow_tts/processor/libritts.py
1558 views
1
# -*- coding: utf-8 -*-
2
# Copyright 2020 TensorFlowTTS Team.
3
#
4
# Licensed under the Apache License, Version 2.0 (the "License");
5
# you may not use this file except in compliance with the License.
6
# You may obtain a copy of the License at
7
#
8
# http://www.apache.org/licenses/LICENSE-2.0
9
#
10
# Unless required by applicable law or agreed to in writing, software
11
# distributed under the License is distributed on an "AS IS" BASIS,
12
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
# See the License for the specific language governing permissions and
14
# limitations under the License.
15
"""Perform preprocessing and raw feature extraction for LibriTTS dataset."""
16
17
import os
18
import re
19
20
import numpy as np
21
import soundfile as sf
22
from dataclasses import dataclass
23
24
from g2p_en import g2p as grapheme_to_phonem
25
26
from tensorflow_tts.processor.base_processor import BaseProcessor
27
from tensorflow_tts.utils.utils import PROCESSOR_FILE_NAME
28
29
g2p = grapheme_to_phonem.G2p()
30
31
valid_symbols = g2p.phonemes
32
valid_symbols.append("SIL")
33
valid_symbols.append("END")
34
35
_punctuation = "!'(),.:;? "
36
_arpabet = ["@" + s for s in valid_symbols]
37
38
LIBRITTS_SYMBOLS = _arpabet + list(_punctuation)
39
40
41
@dataclass
42
class LibriTTSProcessor(BaseProcessor):
43
44
mode: str = "train"
45
train_f_name: str = "train.txt"
46
positions = {
47
"file": 0,
48
"text": 1,
49
"speaker_name": 2,
50
} # positions of file,text,speaker_name after split line
51
f_extension: str = ".wav"
52
cleaner_names: str = None
53
54
def create_items(self):
55
with open(
56
os.path.join(self.data_dir, self.train_f_name), mode="r", encoding="utf-8"
57
) as f:
58
for line in f:
59
parts = line.strip().split(self.delimiter)
60
wav_path = os.path.join(self.data_dir, parts[self.positions["file"]])
61
wav_path = (
62
wav_path + self.f_extension
63
if wav_path[-len(self.f_extension) :] != self.f_extension
64
else wav_path
65
)
66
text = parts[self.positions["text"]]
67
speaker_name = parts[self.positions["speaker_name"]]
68
self.items.append([text, wav_path, speaker_name])
69
70
def get_one_sample(self, item):
71
text, wav_path, speaker_name = item
72
audio, rate = sf.read(wav_path, dtype="float32")
73
74
text_ids = np.asarray(self.text_to_sequence(text), np.int32)
75
76
sample = {
77
"raw_text": text,
78
"text_ids": text_ids,
79
"audio": audio,
80
"utt_id": wav_path.split("/")[-1].split(".")[0],
81
"speaker_name": speaker_name,
82
"rate": rate,
83
}
84
85
return sample
86
87
def setup_eos_token(self):
88
return None # because we do not use this
89
90
def save_pretrained(self, saved_path):
91
os.makedirs(saved_path, exist_ok=True)
92
self._save_mapper(os.path.join(saved_path, PROCESSOR_FILE_NAME), {})
93
94
def text_to_sequence(self, text):
95
if (
96
self.mode == "train"
97
): # in train mode text should be already transformed to phonemes
98
return self.symbols_to_ids(self.clean_g2p(text.split(" ")))
99
else:
100
return self.inference_text_to_seq(text)
101
102
def inference_text_to_seq(self, text: str):
103
return self.symbols_to_ids(self.text_to_ph(text))
104
105
def symbols_to_ids(self, symbols_list: list):
106
return [self.symbol_to_id[s] for s in symbols_list]
107
108
def text_to_ph(self, text: str):
109
return self.clean_g2p(g2p(text))
110
111
def clean_g2p(self, g2p_text: list):
112
data = []
113
for i, txt in enumerate(g2p_text):
114
if i == len(g2p_text) - 1:
115
if txt != " " and txt != "SIL":
116
data.append("@" + txt)
117
else:
118
data.append(
119
"@END"
120
) # TODO try learning without end token and compare results
121
break
122
if txt != " ":
123
data.append("@" + txt)
124
return data
125
126