preprocess.py 7.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184
  1. from datetime import datetime
  2. from functools import partial
  3. from multiprocessing import Pool
  4. from pathlib import Path
  5. import numpy as np
  6. from tqdm import tqdm
  7. from encoder import audio
  8. from encoder.config import librispeech_datasets, anglophone_nationalites
  9. from encoder.params_data import *
  10. _AUDIO_EXTENSIONS = ("wav", "flac", "m4a", "mp3")
  11. class DatasetLog:
  12. """
  13. Registers metadata about the dataset in a text file.
  14. """
  15. def __init__(self, root, name):
  16. self.text_file = open(Path(root, "Log_%s.txt" % name.replace("/", "_")), "w")
  17. self.sample_data = dict()
  18. start_time = str(datetime.now().strftime("%A %d %B %Y at %H:%M"))
  19. self.write_line("Creating dataset %s on %s" % (name, start_time))
  20. self.write_line("-----")
  21. self._log_params()
  22. def _log_params(self):
  23. from encoder import params_data
  24. self.write_line("Parameter values:")
  25. for param_name in (p for p in dir(params_data) if not p.startswith("__")):
  26. value = getattr(params_data, param_name)
  27. self.write_line("\t%s: %s" % (param_name, value))
  28. self.write_line("-----")
  29. def write_line(self, line):
  30. self.text_file.write("%s\n" % line)
  31. def add_sample(self, **kwargs):
  32. for param_name, value in kwargs.items():
  33. if not param_name in self.sample_data:
  34. self.sample_data[param_name] = []
  35. self.sample_data[param_name].append(value)
  36. def finalize(self):
  37. self.write_line("Statistics:")
  38. for param_name, values in self.sample_data.items():
  39. self.write_line("\t%s:" % param_name)
  40. self.write_line("\t\tmin %.3f, max %.3f" % (np.min(values), np.max(values)))
  41. self.write_line("\t\tmean %.3f, median %.3f" % (np.mean(values), np.median(values)))
  42. self.write_line("-----")
  43. end_time = str(datetime.now().strftime("%A %d %B %Y at %H:%M"))
  44. self.write_line("Finished on %s" % end_time)
  45. self.text_file.close()
  46. def _init_preprocess_dataset(dataset_name, datasets_root, out_dir) -> (Path, DatasetLog):
  47. dataset_root = datasets_root.joinpath(dataset_name)
  48. if not dataset_root.exists():
  49. print("Couldn\'t find %s, skipping this dataset." % dataset_root)
  50. return None, None
  51. return dataset_root, DatasetLog(out_dir, dataset_name)
  52. def _preprocess_speaker(speaker_dir: Path, datasets_root: Path, out_dir: Path, skip_existing: bool):
  53. # Give a name to the speaker that includes its dataset
  54. speaker_name = "_".join(speaker_dir.relative_to(datasets_root).parts)
  55. # Create an output directory with that name, as well as a txt file containing a
  56. # reference to each source file.
  57. speaker_out_dir = out_dir.joinpath(speaker_name)
  58. speaker_out_dir.mkdir(exist_ok=True)
  59. sources_fpath = speaker_out_dir.joinpath("_sources.txt")
  60. # There's a possibility that the preprocessing was interrupted earlier, check if
  61. # there already is a sources file.
  62. if sources_fpath.exists():
  63. try:
  64. with sources_fpath.open("r") as sources_file:
  65. existing_fnames = {line.split(",")[0] for line in sources_file}
  66. except:
  67. existing_fnames = {}
  68. else:
  69. existing_fnames = {}
  70. # Gather all audio files for that speaker recursively
  71. sources_file = sources_fpath.open("a" if skip_existing else "w")
  72. audio_durs = []
  73. for extension in _AUDIO_EXTENSIONS:
  74. for in_fpath in speaker_dir.glob("**/*.%s" % extension):
  75. # Check if the target output file already exists
  76. out_fname = "_".join(in_fpath.relative_to(speaker_dir).parts)
  77. out_fname = out_fname.replace(".%s" % extension, ".npy")
  78. if skip_existing and out_fname in existing_fnames:
  79. continue
  80. # Load and preprocess the waveform
  81. wav = audio.preprocess_wav(in_fpath)
  82. if len(wav) == 0:
  83. continue
  84. # Create the mel spectrogram, discard those that are too short
  85. frames = audio.wav_to_mel_spectrogram(wav)
  86. if len(frames) < partials_n_frames:
  87. continue
  88. out_fpath = speaker_out_dir.joinpath(out_fname)
  89. np.save(out_fpath, frames)
  90. sources_file.write("%s,%s\n" % (out_fname, in_fpath))
  91. audio_durs.append(len(wav) / sampling_rate)
  92. sources_file.close()
  93. return audio_durs
  94. def _preprocess_speaker_dirs(speaker_dirs, dataset_name, datasets_root, out_dir, skip_existing, logger):
  95. print("%s: Preprocessing data for %d speakers." % (dataset_name, len(speaker_dirs)))
  96. # Process the utterances for each speaker
  97. work_fn = partial(_preprocess_speaker, datasets_root=datasets_root, out_dir=out_dir, skip_existing=skip_existing)
  98. with Pool(4) as pool:
  99. tasks = pool.imap(work_fn, speaker_dirs)
  100. for sample_durs in tqdm(tasks, dataset_name, len(speaker_dirs), unit="speakers"):
  101. for sample_dur in sample_durs:
  102. logger.add_sample(duration=sample_dur)
  103. logger.finalize()
  104. print("Done preprocessing %s.\n" % dataset_name)
  105. def preprocess_librispeech(datasets_root: Path, out_dir: Path, skip_existing=False):
  106. for dataset_name in librispeech_datasets["train"]["other"]:
  107. # Initialize the preprocessing
  108. dataset_root, logger = _init_preprocess_dataset(dataset_name, datasets_root, out_dir)
  109. if not dataset_root:
  110. return
  111. # Preprocess all speakers
  112. speaker_dirs = list(dataset_root.glob("*"))
  113. _preprocess_speaker_dirs(speaker_dirs, dataset_name, datasets_root, out_dir, skip_existing, logger)
  114. def preprocess_voxceleb1(datasets_root: Path, out_dir: Path, skip_existing=False):
  115. # Initialize the preprocessing
  116. dataset_name = "VoxCeleb1"
  117. dataset_root, logger = _init_preprocess_dataset(dataset_name, datasets_root, out_dir)
  118. if not dataset_root:
  119. return
  120. # Get the contents of the meta file
  121. with dataset_root.joinpath("vox1_meta.csv").open("r") as metafile:
  122. metadata = [line.split("\t") for line in metafile][1:]
  123. # Select the ID and the nationality, filter out non-anglophone speakers
  124. nationalities = {line[0]: line[3] for line in metadata}
  125. keep_speaker_ids = [speaker_id for speaker_id, nationality in nationalities.items() if
  126. nationality.lower() in anglophone_nationalites]
  127. print("VoxCeleb1: using samples from %d (presumed anglophone) speakers out of %d." %
  128. (len(keep_speaker_ids), len(nationalities)))
  129. # Get the speaker directories for anglophone speakers only
  130. speaker_dirs = dataset_root.joinpath("wav").glob("*")
  131. speaker_dirs = [speaker_dir for speaker_dir in speaker_dirs if
  132. speaker_dir.name in keep_speaker_ids]
  133. print("VoxCeleb1: found %d anglophone speakers on the disk, %d missing (this is normal)." %
  134. (len(speaker_dirs), len(keep_speaker_ids) - len(speaker_dirs)))
  135. # Preprocess all speakers
  136. _preprocess_speaker_dirs(speaker_dirs, dataset_name, datasets_root, out_dir, skip_existing, logger)
  137. def preprocess_voxceleb2(datasets_root: Path, out_dir: Path, skip_existing=False):
  138. # Initialize the preprocessing
  139. dataset_name = "VoxCeleb2"
  140. dataset_root, logger = _init_preprocess_dataset(dataset_name, datasets_root, out_dir)
  141. if not dataset_root:
  142. return
  143. # Get the speaker directories
  144. # Preprocess all speakers
  145. speaker_dirs = list(dataset_root.joinpath("dev", "aac").glob("*"))
  146. _preprocess_speaker_dirs(speaker_dirs, dataset_name, datasets_root, out_dir, skip_existing, logger)