audio_griffinlim.py 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422
  1. #!usr/bin/env python
  2. # -*- coding: utf-8 -*-
  3. # author: kuangdd
  4. # date: 2019/11/30
  5. """
  6. ### audio_griffinlim
  7. griffinlim声码器,线性频谱转语音,梅尔频谱转语音,TensorFlow版本转语音,梅尔频谱和线性频谱相互转换。
  8. """
  9. from pathlib import Path
  10. import logging
  11. logging.basicConfig(level=logging.INFO)
  12. logger = logging.getLogger(Path(__name__).stem)
  13. import librosa
  14. import librosa.filters
  15. import numpy as np
  16. from scipy import signal
  17. from scipy.io import wavfile
  18. from .audio_spectrogram import default_hparams
  19. from .audio_io import Dict2Obj
  20. # try:
  21. # import tensorflow as tf
  22. # except ImportError as e:
  23. # logger.info("ImportError: {}".format(e))
  24. tmp = dict([('use_lws', False), ('frame_shift_ms', None), ('silence_threshold', 2), ('griffin_lim_iters', 30)])
  25. default_hparams.update(tmp)
  26. def hparams_debug_string(hparams=None):
  27. hparams = hparams or default_hparams
  28. values = hparams.values()
  29. hp = [" %s: %s" % (name, values[name]) for name in sorted(values) if name != "sentences"]
  30. return "Hyperparameters:\n" + "\n".join(hp)
  31. def inv_linear_spectrogram(linear_spectrogram, hparams=None):
  32. """Converts linear spectrogram to waveform using librosa"""
  33. hparams = hparams or default_hparams
  34. if hparams.signal_normalization:
  35. D = _denormalize(linear_spectrogram, hparams)
  36. else:
  37. D = linear_spectrogram
  38. S = _db_to_amp(D + hparams.ref_level_db) # Convert back to linear
  39. if hparams.use_lws:
  40. processor = _lws_processor(hparams)
  41. D = processor.run_lws(S.astype(np.float64).T ** hparams.power)
  42. y = processor.istft(D).astype(np.float32)
  43. return inv_preemphasis(y, hparams.preemphasis, hparams.preemphasize)
  44. else:
  45. return inv_preemphasis(_griffin_lim(S ** hparams.power, hparams), hparams.preemphasis, hparams.preemphasize)
  46. def inv_mel_spectrogram(mel_spectrogram, hparams=None):
  47. """Converts mel spectrogram to waveform using librosa"""
  48. hparams = hparams or default_hparams
  49. if hparams.signal_normalization:
  50. D = _denormalize(mel_spectrogram, hparams)
  51. else:
  52. D = mel_spectrogram
  53. S = _mel_to_linear(_db_to_amp(D + hparams.ref_level_db), hparams) # Convert back to linear
  54. if hparams.use_lws:
  55. processor = _lws_processor(hparams)
  56. D = processor.run_lws(S.astype(np.float64).T ** hparams.power)
  57. y = processor.istft(D).astype(np.float32)
  58. return inv_preemphasis(y, hparams.preemphasis, hparams.preemphasize)
  59. else:
  60. return inv_preemphasis(_griffin_lim(S ** hparams.power, hparams), hparams.preemphasis, hparams.preemphasize)
  61. def inv_linear_spectrogram_tensorflow(linear_spectrogram, hparams=None):
  62. '''Builds computational graph to convert spectrogram to waveform using TensorFlow.
  63. Unlike inv_spectrogram, this does NOT invert the preemphasis. The caller should call
  64. inv_preemphasis on the output after running the graph.
  65. linear_spectrogram.shape[1] = n_fft
  66. '''
  67. import tensorflow as tf
  68. hparams = hparams or default_hparams
  69. S = _db_to_amp_tensorflow(_denormalize_tensorflow(linear_spectrogram, hparams) + hparams.ref_level_db)
  70. return _griffin_lim_tensorflow(tf.pow(S, hparams.power), hparams)
  71. def inv_linear_spectrogram_tf(linear_spectrogram, hparams=None):
  72. """
  73. 返回wav语音信号。
  74. linear_spectrogram.shape[1] = num_freq = (n_fft / 2) + 1
  75. """
  76. import tensorflow as tf
  77. hparams = hparams or default_hparams
  78. _shape = linear_spectrogram.shape
  79. tmp = np.concatenate(
  80. (linear_spectrogram, np.zeros((_shape[0], (hparams.n_fft // 2) + 1 - _shape[1]), dtype=np.float32)), axis=1)
  81. wav_tf = inv_linear_spectrogram_tensorflow(tmp, hparams)
  82. with tf.Session() as sess:
  83. return sess.run(wav_tf)
  84. # 以下模块后续版本可能删除
  85. def load_wav(path, sr):
  86. return librosa.core.load(path, sr=sr)[0]
  87. def save_wav(wav, path, sr):
  88. out = wav * 32767 / max(0.01, np.max(np.abs(wav)))
  89. # proposed by @dsmiller
  90. wavfile.write(path, sr, out.astype(np.int16))
  91. def save_wavenet_wav(wav, path, sr):
  92. librosa.output.write_wav(path, wav, sr=sr)
  93. def preemphasis(wav, k, preemphasize=True):
  94. if preemphasize:
  95. return signal.lfilter([1, -k], [1], wav)
  96. return wav
  97. def inv_preemphasis(wav, k, inv_preemphasize=True):
  98. if inv_preemphasize:
  99. return signal.lfilter([1], [1, -k], wav)
  100. return wav
  101. # From https://github.com/r9y9/wavenet_vocoder/blob/master/audio.py
  102. def start_and_end_indices(quantized, silence_threshold=2):
  103. for start in range(quantized.size):
  104. if abs(quantized[start] - 127) > silence_threshold:
  105. break
  106. for end in range(quantized.size - 1, 1, -1):
  107. if abs(quantized[end] - 127) > silence_threshold:
  108. break
  109. assert abs(quantized[start] - 127) > silence_threshold
  110. assert abs(quantized[end] - 127) > silence_threshold
  111. return start, end
  112. def get_hop_size(hparams=None):
  113. hparams = hparams or default_hparams
  114. hop_size = hparams.hop_size
  115. if hop_size is None:
  116. assert hparams.frame_shift_ms is not None
  117. hop_size = int(hparams.frame_shift_ms / 1000 * hparams.sample_rate)
  118. return hop_size
  119. def linear_spectrogram(wav, hparams=None):
  120. hparams = hparams or default_hparams
  121. D = _stft(preemphasis(wav, hparams.preemphasis, hparams.preemphasize), hparams)
  122. S = _amp_to_db(np.abs(D), hparams) - hparams.ref_level_db
  123. if hparams.signal_normalization:
  124. return _normalize(S, hparams)
  125. return S
  126. def mel_spectrogram(wav, hparams=None):
  127. hparams = hparams or default_hparams
  128. D = _stft(preemphasis(wav, hparams.preemphasis, hparams.preemphasize), hparams)
  129. S = _amp_to_db(_linear_to_mel(np.abs(D), hparams), hparams) - hparams.ref_level_db
  130. if hparams.signal_normalization:
  131. return _normalize(S, hparams)
  132. return S
  133. def mel_spectrogram_feature(wav, hparams=None):
  134. """
  135. Derives a mel spectrogram ready to be used by the encoder from a preprocessed audio waveform.
  136. Note: this not a log-mel spectrogram.
  137. """
  138. hparams = hparams or default_hparams
  139. frames = librosa.feature.melspectrogram(
  140. wav,
  141. hparams.sample_rate,
  142. n_fft=hparams.n_fft,
  143. hop_length=hparams.hop_size,
  144. n_mels=hparams.num_mels
  145. )
  146. return _amp_to_db(frames.astype(np.float32))
  147. def linear2mel_spectrogram(linear_spectrogram, hparams=None):
  148. """Converts linear spectrogram to mel spectrogram"""
  149. hparams = hparams or default_hparams
  150. if hparams.signal_normalization:
  151. D = _denormalize(linear_spectrogram, hparams)
  152. else:
  153. D = linear_spectrogram
  154. D = _db_to_amp(D + hparams.ref_level_db) # Convert back to linear
  155. S = _amp_to_db(_linear_to_mel(np.abs(D), hparams), hparams) - hparams.ref_level_db
  156. if hparams.signal_normalization:
  157. return _normalize(S, hparams)
  158. return S
  159. def mel2linear_spectrogram(mel_spectrogram, hparams=None):
  160. """Converts mel spectrogram to linear spectrogram"""
  161. hparams = hparams or default_hparams
  162. if hparams.signal_normalization:
  163. D = _denormalize(mel_spectrogram, hparams)
  164. else:
  165. D = mel_spectrogram
  166. D = _mel_to_linear(_db_to_amp(D - hparams.ref_level_db), hparams) # Convert back to linear
  167. S = _amp_to_db(np.abs(D), hparams) - hparams.ref_level_db
  168. if hparams.signal_normalization:
  169. return _normalize(S, hparams)
  170. return S
  171. def _lws_processor(hparams=None):
  172. hparams = hparams or default_hparams
  173. import lws
  174. return lws.lws(hparams.n_fft, get_hop_size(hparams), fftsize=hparams.win_size, mode="speech")
  175. def find_endpoint(wav, threshold_db=-40, min_silence_sec=0.8, hparams=None):
  176. hparams = hparams or default_hparams
  177. window_length = int(hparams.sample_rate * min_silence_sec)
  178. hop_length = int(window_length / 4)
  179. threshold = _db_to_amp(threshold_db)
  180. for x in range(hop_length, len(wav) - window_length, hop_length):
  181. if np.max(wav[x:x + window_length]) < threshold:
  182. return x + hop_length
  183. return len(wav)
  184. def _griffin_lim(S, hparams=None):
  185. """librosa implementation of Griffin-Lim
  186. Based on https://github.com/librosa/librosa/issues/434
  187. """
  188. hparams = hparams or default_hparams
  189. angles = np.exp(2j * np.pi * np.random.rand(*S.shape))
  190. S_complex = np.abs(S).astype(np.complex)
  191. y = _istft(S_complex * angles, hparams)
  192. for i in range(hparams.griffin_lim_iters):
  193. angles = np.exp(1j * np.angle(_stft(y, hparams)))
  194. y = _istft(S_complex * angles, hparams)
  195. return y
  196. def _griffin_lim_tensorflow(S, hparams=None):
  197. '''TensorFlow implementation of Griffin-Lim
  198. Based on https://github.com/Kyubyong/tensorflow-exercises/blob/master/Audio_Processing.ipynb
  199. '''
  200. import tensorflow as tf
  201. hparams = hparams or default_hparams
  202. with tf.variable_scope('griffinlim'):
  203. # TensorFlow's stft and istft operate on a batch of spectrograms; create batch of size 1
  204. S = tf.expand_dims(S, 0)
  205. S_complex = tf.identity(tf.cast(S, dtype=tf.complex64))
  206. y = _istft_tensorflow(S_complex, hparams)
  207. for i in range(hparams.griffin_lim_iters):
  208. est = _stft_tensorflow(y, hparams)
  209. angles = est / tf.cast(tf.maximum(1e-8, tf.abs(est)), tf.complex64)
  210. y = _istft_tensorflow(S_complex * angles, hparams)
  211. return tf.squeeze(y, 0)
  212. def _stft(y, hparams=None):
  213. hparams = hparams or default_hparams
  214. if hparams.use_lws:
  215. return _lws_processor(hparams).stft(y).T
  216. else:
  217. return librosa.stft(y=y, n_fft=hparams.n_fft, hop_length=get_hop_size(hparams), win_length=hparams.win_size,
  218. center=hparams.center)
  219. def _stft_tensorflow(signals, hparams=None):
  220. import tensorflow as tf
  221. hparams = hparams or default_hparams
  222. n_fft, hop_length, win_length = _stft_parameters(hparams)
  223. return tf.contrib.signal.stft(signals, win_length, hop_length, n_fft, pad_end=False)
  224. def _istft(y, hparams=None):
  225. hparams = hparams or default_hparams
  226. return librosa.istft(y, hop_length=get_hop_size(hparams), win_length=hparams.win_size, center=hparams.center)
  227. def _istft_tensorflow(stfts, hparams=None):
  228. import tensorflow as tf
  229. hparams = hparams or default_hparams
  230. n_fft, hop_length, win_length = _stft_parameters(hparams)
  231. return tf.contrib.signal.inverse_stft(stfts, win_length, hop_length, n_fft)
  232. def _stft_parameters(hparams=None):
  233. hparams = hparams or default_hparams
  234. n_fft = hparams.n_fft # (hparams.num_freq - 1) * 2
  235. hop_length = hparams.hop_size # int(hparams.frame_shift_ms / 1000 * hparams.sample_rate)
  236. win_length = hparams.win_size # int(hparams.frame_length_ms / 1000 * hparams.sample_rate)
  237. return n_fft, hop_length, win_length
  238. ##########################################################
  239. # Those are only correct when using lws!!! (This was messing with Wavenet quality for a long time!)
  240. def num_frames(length, fsize, fshift):
  241. """Compute number of time frames of spectrogram
  242. """
  243. pad = (fsize - fshift)
  244. if length % fshift == 0:
  245. M = (length + pad * 2 - fsize) // fshift + 1
  246. else:
  247. M = (length + pad * 2 - fsize) // fshift + 2
  248. return M
  249. def pad_lr(x, fsize, fshift):
  250. """Compute left and right padding
  251. """
  252. M = num_frames(len(x), fsize, fshift)
  253. pad = (fsize - fshift)
  254. T = len(x) + 2 * pad
  255. r = (M - 1) * fshift + fsize - T
  256. return pad, pad + r
  257. ##########################################################
  258. # Librosa correct padding
  259. def librosa_pad_lr(x, fsize, fshift):
  260. return 0, (x.shape[0] // fshift + 1) * fshift - x.shape[0]
  261. def _linear_to_mel(spectogram, hparams=None):
  262. hparams = hparams or default_hparams
  263. if hparams.mel_basis is None:
  264. _mel_basis = _build_mel_basis(hparams)
  265. else:
  266. _mel_basis = hparams.mel_basis
  267. return np.dot(_mel_basis, spectogram)
  268. def _mel_to_linear(mel_spectrogram, hparams=None):
  269. hparams = hparams or default_hparams
  270. if hparams.inv_mel_basis is None:
  271. _inv_mel_basis = np.linalg.pinv(_build_mel_basis(hparams))
  272. else:
  273. _inv_mel_basis = hparams.inv_mel_basis
  274. return np.maximum(1e-10, np.dot(_inv_mel_basis, mel_spectrogram))
  275. def _build_mel_basis(hparams=None):
  276. hparams = hparams or default_hparams
  277. assert hparams.fmax <= hparams.sample_rate // 2
  278. return librosa.filters.mel(hparams.sample_rate, hparams.n_fft, n_mels=hparams.num_mels,
  279. fmin=hparams.fmin, fmax=hparams.fmax)
  280. def _amp_to_db(x, hparams=None):
  281. hparams = hparams or default_hparams
  282. min_level = np.exp(hparams.min_level_db / 20 * np.log(10))
  283. return 20 * np.log10(np.maximum(min_level, x))
  284. def _db_to_amp(x):
  285. return np.power(10.0, (x) * 0.05)
  286. def _db_to_amp_tensorflow(x):
  287. import tensorflow as tf
  288. return tf.pow(tf.ones(tf.shape(x)) * 10.0, x * 0.05)
  289. def _normalize(S, hparams=None):
  290. hparams = hparams or default_hparams
  291. ma = hparams.max_abs_value
  292. mi = hparams.min_level_db
  293. if hparams.allow_clipping_in_normalization:
  294. if hparams.symmetric_mels:
  295. return np.clip((2 * ma) * ((S - mi) / (-mi)) - ma, -ma, ma)
  296. else:
  297. return np.clip(ma * ((S - mi) / (-mi)), 0, ma)
  298. else:
  299. assert S.max() <= 0 and S.min() - mi >= 0
  300. if hparams.symmetric_mels:
  301. return (2 * ma) * ((S - mi) / (-mi)) - ma
  302. else:
  303. return ma * ((S - mi) / (-mi))
  304. def _denormalize(D, hparams=None):
  305. hparams = hparams or default_hparams
  306. ma = hparams.max_abs_value
  307. mi = hparams.min_level_db
  308. if hparams.allow_clipping_in_normalization:
  309. if hparams.symmetric_mels:
  310. return ((np.clip(D, -ma, ma) + ma) * -mi / (2 * ma)) + mi
  311. else:
  312. return (np.clip(D, 0, ma) * -mi / ma) + mi
  313. else:
  314. if hparams.symmetric_mels:
  315. return ((D + ma) * -mi / (2 * ma)) + mi
  316. else:
  317. return (D * -mi / ma) + mi
  318. def _denormalize_tensorflow(S, hparams=None):
  319. import tensorflow as tf
  320. hparams = hparams or default_hparams
  321. mi = hparams.min_level_db
  322. return (tf.clip_by_value(S, 0, 1) * -mi) + mi
  323. if __name__ == "__main__":
  324. print(__file__)