2019-10-28 14:12:13 +01:00
|
|
|
#!/usr/bin/env python
|
|
|
|
|
# coding: utf8
|
|
|
|
|
|
|
|
|
|
"""
|
|
|
|
|
Module that provides a class wrapper for source separation.
|
|
|
|
|
|
2020-12-07 19:19:19 +01:00
|
|
|
Examples:
|
2019-10-28 14:12:13 +01:00
|
|
|
|
2020-12-07 19:19:19 +01:00
|
|
|
```python
|
2019-10-28 14:12:13 +01:00
|
|
|
>>> from spleeter.separator import Separator
|
|
|
|
|
>>> separator = Separator('spleeter:2stems')
|
|
|
|
|
>>> separator.separate(waveform, lambda instrument, data: ...)
|
|
|
|
|
>>> separator.separate_to_file(...)
|
2020-12-07 19:19:19 +01:00
|
|
|
```
|
2019-10-28 14:12:13 +01:00
|
|
|
"""
|
|
|
|
|
|
2020-10-02 17:29:59 +02:00
|
|
|
import atexit
|
2019-10-28 14:12:13 +01:00
|
|
|
import os
|
2020-12-07 16:08:12 +01:00
|
|
|
|
2019-10-28 14:12:13 +01:00
|
|
|
from multiprocessing import Pool
|
2020-05-22 10:09:28 +02:00
|
|
|
from os.path import basename, join, splitext, dirname
|
2020-12-07 19:19:19 +01:00
|
|
|
from typing import Generator, Optional
|
|
|
|
|
|
|
|
|
|
from . import SpleeterError
|
|
|
|
|
from .audio import STFTBackend
|
|
|
|
|
from .audio.adapter import get_default_audio_adapter
|
|
|
|
|
from .audio.convertor import to_stereo
|
|
|
|
|
from .model import EstimatorSpecBuilder, InputProviderFactory
|
|
|
|
|
from .utils.configuration import load_configuration
|
2020-10-02 17:29:59 +02:00
|
|
|
|
2020-12-07 19:19:19 +01:00
|
|
|
# pyright: reportMissingImports=false
|
|
|
|
|
# pylint: disable=import-error
|
2020-02-19 10:55:48 +01:00
|
|
|
import numpy as np
|
2020-02-19 23:11:16 +01:00
|
|
|
import tensorflow as tf
|
2020-10-02 17:29:59 +02:00
|
|
|
|
2020-02-26 16:31:24 +01:00
|
|
|
from librosa.core import stft, istft
|
|
|
|
|
from scipy.signal.windows import hann
|
2020-12-07 19:19:19 +01:00
|
|
|
# pylint: enable=import-error
|
2020-02-19 23:11:16 +01:00
|
|
|
|
2020-07-17 13:30:42 +02:00
|
|
|
__email__ = 'spleeter@deezer.com'
|
2019-10-28 14:12:13 +01:00
|
|
|
__author__ = 'Deezer Research'
|
|
|
|
|
__license__ = 'MIT License'
|
|
|
|
|
|
2020-02-27 14:13:59 +01:00
|
|
|
|
2020-12-07 19:19:19 +01:00
|
|
|
class DataGenerator(object):
|
2020-07-01 15:49:32 +02:00
|
|
|
"""
|
2020-10-02 17:29:59 +02:00
|
|
|
Generator object that store a sample and generate it once while called.
|
|
|
|
|
Used to feed a tensorflow estimator without knowing the whole data at
|
|
|
|
|
build time.
|
2020-07-01 15:49:32 +02:00
|
|
|
"""
|
|
|
|
|
|
2020-12-07 19:19:19 +01:00
|
|
|
def __init__(self) -> None:
|
2020-10-02 17:29:59 +02:00
|
|
|
""" Default constructor. """
|
2020-07-01 15:49:32 +02:00
|
|
|
self._current_data = None
|
|
|
|
|
|
2020-12-07 19:19:19 +01:00
|
|
|
def update_data(self, data) -> None:
|
2020-10-02 17:29:59 +02:00
|
|
|
""" Replace internal data. """
|
2020-07-01 15:49:32 +02:00
|
|
|
self._current_data = data
|
|
|
|
|
|
2020-12-07 19:19:19 +01:00
|
|
|
def __call__(self) -> Generator:
|
2020-10-02 17:29:59 +02:00
|
|
|
""" Generation process. """
|
|
|
|
|
buffer = self._current_data
|
|
|
|
|
while buffer:
|
|
|
|
|
yield buffer
|
|
|
|
|
buffer = self._current_data
|
2020-07-01 15:49:32 +02:00
|
|
|
|
|
|
|
|
|
2020-10-02 17:29:59 +02:00
|
|
|
def get_backend(backend: str) -> str:
|
|
|
|
|
"""
|
|
|
|
|
"""
|
|
|
|
|
if backend not in SUPPORTED_BACKEND:
|
|
|
|
|
raise ValueError(f'Unsupported backend {backend}')
|
|
|
|
|
if backend == 'auto':
|
|
|
|
|
if len(tf.config.list_physical_devices('GPU')):
|
|
|
|
|
return 'tensorflow'
|
|
|
|
|
return 'librosa'
|
2020-02-27 14:13:59 +01:00
|
|
|
return backend
|
|
|
|
|
|
|
|
|
|
|
2020-12-07 19:19:19 +01:00
|
|
|
def create_estimator(params, MWF):
|
|
|
|
|
"""
|
|
|
|
|
Initialize tensorflow estimator that will perform separation
|
|
|
|
|
|
|
|
|
|
Params:
|
|
|
|
|
- params: a dictionary of parameters for building the model
|
|
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
|
a tensorflow estimator
|
|
|
|
|
"""
|
|
|
|
|
# Load model.
|
|
|
|
|
provider: ModelProvider = ModelProvider.default()
|
|
|
|
|
params['model_dir'] = provider.get(params['model_dir'])
|
|
|
|
|
params['MWF'] = MWF
|
|
|
|
|
# Setup config
|
|
|
|
|
session_config = tf.compat.v1.ConfigProto()
|
|
|
|
|
session_config.gpu_options.per_process_gpu_memory_fraction = 0.7
|
|
|
|
|
config = tf.estimator.RunConfig(session_config=session_config)
|
|
|
|
|
# Setup estimator
|
|
|
|
|
estimator = tf.estimator.Estimator(
|
|
|
|
|
model_fn=model_fn,
|
|
|
|
|
model_dir=params['model_dir'],
|
|
|
|
|
params=params,
|
|
|
|
|
config=config)
|
|
|
|
|
return estimator
|
|
|
|
|
|
|
|
|
|
|
2019-10-28 14:12:13 +01:00
|
|
|
class Separator(object):
|
|
|
|
|
""" A wrapper class for performing separation. """
|
|
|
|
|
|
2020-10-02 17:29:59 +02:00
|
|
|
def __init__(
|
|
|
|
|
self,
|
2020-12-07 19:19:19 +01:00
|
|
|
params_descriptor: str,
|
2020-10-02 17:29:59 +02:00
|
|
|
MWF: bool = False,
|
2020-12-07 19:19:19 +01:00
|
|
|
stft_backend: STFTBackend = STFTBackend.AUTO,
|
|
|
|
|
multiprocess: bool = True) -> None:
|
|
|
|
|
"""
|
|
|
|
|
Default constructor.
|
2019-10-28 14:12:13 +01:00
|
|
|
|
2020-12-07 19:19:19 +01:00
|
|
|
Parameters:
|
|
|
|
|
params_descriptor (str):
|
|
|
|
|
Descriptor for TF params to be used.
|
|
|
|
|
MWF (bool):
|
|
|
|
|
(Optional) `True` if MWF should be used, `False` otherwise.
|
2019-10-28 14:12:13 +01:00
|
|
|
"""
|
|
|
|
|
self._params = load_configuration(params_descriptor)
|
|
|
|
|
self._sample_rate = self._params['sample_rate']
|
|
|
|
|
self._MWF = MWF
|
2020-05-12 21:48:41 +02:00
|
|
|
self._tf_graph = tf.Graph()
|
2020-07-01 15:49:32 +02:00
|
|
|
self._prediction_generator = None
|
2020-04-10 18:06:37 +02:00
|
|
|
self._input_provider = None
|
|
|
|
|
self._builder = None
|
|
|
|
|
self._features = None
|
2020-05-12 21:48:41 +02:00
|
|
|
self._session = None
|
2020-10-02 17:29:59 +02:00
|
|
|
if multiprocess:
|
|
|
|
|
self._pool = Pool()
|
|
|
|
|
atexit.register(self._pool.close)
|
|
|
|
|
else:
|
|
|
|
|
self._pool = None
|
2019-10-28 14:12:13 +01:00
|
|
|
self._tasks = []
|
2020-10-02 17:29:59 +02:00
|
|
|
self._params['stft_backend'] = get_backend(stft_backend)
|
2020-07-01 15:49:32 +02:00
|
|
|
self._data_generator = DataGenerator()
|
|
|
|
|
|
2020-12-07 19:19:19 +01:00
|
|
|
def __del__(self) -> None:
|
2020-05-12 21:48:41 +02:00
|
|
|
if self._session:
|
|
|
|
|
self._session.close()
|
|
|
|
|
|
2020-07-01 15:49:32 +02:00
|
|
|
def _get_prediction_generator(self):
|
2020-10-02 17:29:59 +02:00
|
|
|
""" Lazy loading access method for internal prediction generator
|
|
|
|
|
returned by the predict method of a tensorflow estimator.
|
2019-10-28 14:12:13 +01:00
|
|
|
|
2020-10-02 17:29:59 +02:00
|
|
|
:returns: generator of prediction.
|
2019-10-28 14:12:13 +01:00
|
|
|
"""
|
2020-07-01 15:49:32 +02:00
|
|
|
if self._prediction_generator is None:
|
2019-10-28 14:12:13 +01:00
|
|
|
estimator = create_estimator(self._params, self._MWF)
|
2020-07-01 15:49:32 +02:00
|
|
|
|
2020-10-02 17:29:59 +02:00
|
|
|
def get_dataset():
|
|
|
|
|
return tf.data.Dataset.from_generator(
|
|
|
|
|
self._data_generator,
|
|
|
|
|
output_types={
|
|
|
|
|
'waveform': tf.float32,
|
|
|
|
|
'audio_id': tf.string},
|
|
|
|
|
output_shapes={
|
|
|
|
|
'waveform': (None, 2),
|
|
|
|
|
'audio_id': ()})
|
|
|
|
|
|
|
|
|
|
self._prediction_generator = estimator.predict(
|
|
|
|
|
get_dataset,
|
|
|
|
|
yield_single_examples=False)
|
2020-07-01 15:49:32 +02:00
|
|
|
return self._prediction_generator
|
2019-10-28 14:12:13 +01:00
|
|
|
|
2020-12-07 19:19:19 +01:00
|
|
|
def join(self, timeout: int = 200) -> None:
|
|
|
|
|
"""
|
|
|
|
|
Wait for all pending tasks to be finished.
|
2019-10-28 14:12:13 +01:00
|
|
|
|
2020-12-07 19:19:19 +01:00
|
|
|
Parameters:
|
|
|
|
|
timeout (int):
|
|
|
|
|
(Optional) task waiting timeout.
|
2019-10-28 14:12:13 +01:00
|
|
|
"""
|
|
|
|
|
while len(self._tasks) > 0:
|
|
|
|
|
task = self._tasks.pop()
|
|
|
|
|
task.get()
|
|
|
|
|
task.wait(timeout=timeout)
|
|
|
|
|
|
2020-10-02 17:29:59 +02:00
|
|
|
def _stft(self, data, inverse: bool = False, length=None):
|
|
|
|
|
""" Single entrypoint for both stft and istft. This computes stft and
|
|
|
|
|
istft with librosa on stereo data. The two channels are processed
|
|
|
|
|
separately and are concatenated together in the result. The expected
|
|
|
|
|
input formats are: (n_samples, 2) for stft and (T, F, 2) for istft.
|
|
|
|
|
|
|
|
|
|
:param data: np.array with either the waveform or the complex
|
|
|
|
|
spectrogram depending on the parameter inverse
|
2020-02-27 11:05:06 +01:00
|
|
|
:param inverse: should a stft or an istft be computed.
|
2020-10-02 17:29:59 +02:00
|
|
|
:returns: Stereo data as numpy array for the transform.
|
|
|
|
|
The channels are stored in the last dimension.
|
2020-02-27 11:05:06 +01:00
|
|
|
"""
|
2020-02-27 15:38:46 +01:00
|
|
|
assert not (inverse and length is None)
|
|
|
|
|
data = np.asfortranarray(data)
|
2020-10-02 17:29:59 +02:00
|
|
|
N = self._params['frame_length']
|
|
|
|
|
H = self._params['frame_step']
|
2020-02-26 16:31:24 +01:00
|
|
|
win = hann(N, sym=False)
|
|
|
|
|
fstft = istft if inverse else stft
|
2020-10-02 17:29:59 +02:00
|
|
|
win_len_arg = {
|
|
|
|
|
'win_length': None,
|
|
|
|
|
'length': None} if inverse else {'n_fft': N}
|
2020-03-26 14:23:41 +01:00
|
|
|
n_channels = data.shape[-1]
|
|
|
|
|
out = []
|
|
|
|
|
for c in range(n_channels):
|
2020-10-02 17:29:59 +02:00
|
|
|
d = np.concatenate(
|
|
|
|
|
(np.zeros((N, )), data[:, c], np.zeros((N, )))
|
|
|
|
|
) if not inverse else data[:, :, c].T
|
2020-03-27 11:12:05 +01:00
|
|
|
s = fstft(d, hop_length=H, window=win, center=False, **win_len_arg)
|
2020-06-18 18:01:03 +02:00
|
|
|
if inverse:
|
2020-07-24 16:32:32 +02:00
|
|
|
s = s[N:N+length]
|
2020-03-26 14:23:41 +01:00
|
|
|
s = np.expand_dims(s.T, 2-inverse)
|
|
|
|
|
out.append(s)
|
|
|
|
|
if len(out) == 1:
|
|
|
|
|
return out[0]
|
|
|
|
|
return np.concatenate(out, axis=2-inverse)
|
2020-02-26 16:31:24 +01:00
|
|
|
|
2020-04-10 18:06:37 +02:00
|
|
|
def _get_input_provider(self):
|
|
|
|
|
if self._input_provider is None:
|
|
|
|
|
self._input_provider = InputProviderFactory.get(self._params)
|
|
|
|
|
return self._input_provider
|
|
|
|
|
|
|
|
|
|
def _get_features(self):
|
|
|
|
|
if self._features is None:
|
2020-10-02 17:29:59 +02:00
|
|
|
provider = self._get_input_provider()
|
|
|
|
|
self._features = provider.get_input_dict_placeholders()
|
2020-04-10 18:06:37 +02:00
|
|
|
return self._features
|
|
|
|
|
|
|
|
|
|
def _get_builder(self):
|
|
|
|
|
if self._builder is None:
|
2020-10-02 17:29:59 +02:00
|
|
|
self._builder = EstimatorSpecBuilder(
|
|
|
|
|
self._get_features(),
|
|
|
|
|
self._params)
|
2020-04-10 18:06:37 +02:00
|
|
|
return self._builder
|
|
|
|
|
|
2020-05-12 21:48:41 +02:00
|
|
|
def _get_session(self):
|
|
|
|
|
if self._session is None:
|
2020-07-01 15:49:32 +02:00
|
|
|
saver = tf.compat.v1.train.Saver()
|
2020-10-02 17:29:59 +02:00
|
|
|
latest_checkpoint = tf.train.latest_checkpoint(
|
|
|
|
|
get_default_model_dir(self._params['model_dir']))
|
2020-07-01 15:49:32 +02:00
|
|
|
self._session = tf.compat.v1.Session()
|
2020-05-12 21:48:41 +02:00
|
|
|
saver.restore(self._session, latest_checkpoint)
|
|
|
|
|
return self._session
|
|
|
|
|
|
2020-10-02 17:29:59 +02:00
|
|
|
def _separate_librosa(self, waveform: np.ndarray, audio_id):
|
2020-12-07 19:19:19 +01:00
|
|
|
"""
|
|
|
|
|
Performs separation with librosa backend for STFT.
|
|
|
|
|
|
|
|
|
|
Parameters:
|
|
|
|
|
waveform (numpy.ndarray):
|
|
|
|
|
Waveform to be separated (as a numpy array)
|
2020-04-10 17:17:35 +02:00
|
|
|
"""
|
2020-05-12 21:48:41 +02:00
|
|
|
with self._tf_graph.as_default():
|
|
|
|
|
out = {}
|
|
|
|
|
features = self._get_features()
|
2020-10-02 17:29:59 +02:00
|
|
|
# TODO: fix the logic, build sometimes return,
|
|
|
|
|
# sometimes set attribute.
|
2020-05-12 21:48:41 +02:00
|
|
|
outputs = self._get_builder().outputs
|
|
|
|
|
stft = self._stft(waveform)
|
|
|
|
|
if stft.shape[-1] == 1:
|
|
|
|
|
stft = np.concatenate([stft, stft], axis=-1)
|
|
|
|
|
elif stft.shape[-1] > 2:
|
|
|
|
|
stft = stft[:, :2]
|
|
|
|
|
sess = self._get_session()
|
2020-10-02 17:29:59 +02:00
|
|
|
outputs = sess.run(
|
|
|
|
|
outputs,
|
|
|
|
|
feed_dict=self._get_input_provider().get_feed_dict(
|
|
|
|
|
features,
|
|
|
|
|
stft,
|
|
|
|
|
audio_id))
|
2020-04-10 18:06:37 +02:00
|
|
|
for inst in self._get_builder().instruments:
|
2020-10-02 17:29:59 +02:00
|
|
|
out[inst] = self._stft(
|
|
|
|
|
outputs[inst],
|
|
|
|
|
inverse=True,
|
|
|
|
|
length=waveform.shape[0])
|
2020-05-12 21:48:41 +02:00
|
|
|
return out
|
2020-02-19 10:55:48 +01:00
|
|
|
|
2020-12-07 19:19:19 +01:00
|
|
|
def _separate_tensorflow(self, waveform: np.ndarray, audio_descriptor):
|
|
|
|
|
"""
|
|
|
|
|
Performs source separation over the given waveform with tensorflow
|
|
|
|
|
backend.
|
|
|
|
|
|
|
|
|
|
Parameters:
|
|
|
|
|
waveform (numpy.ndarray):
|
|
|
|
|
Waveform to be separated (as a numpy array)
|
|
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
|
Separated waveforms.
|
|
|
|
|
"""
|
|
|
|
|
if not waveform.shape[-1] == 2:
|
|
|
|
|
waveform = to_stereo(waveform)
|
|
|
|
|
prediction_generator = self._get_prediction_generator()
|
|
|
|
|
# NOTE: update data in generator before performing separation.
|
|
|
|
|
self._data_generator.update_data({
|
|
|
|
|
'waveform': waveform,
|
|
|
|
|
'audio_id': np.array(audio_descriptor)})
|
|
|
|
|
# NOTE: perform separation.
|
|
|
|
|
prediction = next(prediction_generator)
|
|
|
|
|
prediction.pop('audio_id')
|
|
|
|
|
return prediction
|
|
|
|
|
|
|
|
|
|
def separate(
|
|
|
|
|
self,
|
|
|
|
|
waveform: np.ndarray,
|
|
|
|
|
audio_descriptor: Optional[str] = None) -> None:
|
|
|
|
|
"""
|
|
|
|
|
Performs separation on a waveform.
|
2020-04-10 17:17:35 +02:00
|
|
|
|
2020-12-07 19:19:19 +01:00
|
|
|
Parameters:
|
|
|
|
|
waveform (numpy.ndarray):
|
|
|
|
|
Waveform to be separated (as a numpy array)
|
|
|
|
|
audio_descriptor (str):
|
|
|
|
|
(Optional) string describing the waveform (e.g. filename).
|
2020-04-10 17:17:35 +02:00
|
|
|
"""
|
2020-10-02 17:29:59 +02:00
|
|
|
if self._params['stft_backend'] == 'tensorflow':
|
2020-04-10 17:17:35 +02:00
|
|
|
return self._separate_tensorflow(waveform, audio_descriptor)
|
2020-02-27 15:38:46 +01:00
|
|
|
else:
|
2020-04-10 17:17:35 +02:00
|
|
|
return self._separate_librosa(waveform, audio_descriptor)
|
2020-02-27 15:38:46 +01:00
|
|
|
|
2019-10-28 14:12:13 +01:00
|
|
|
def separate_to_file(
|
2020-10-02 17:29:59 +02:00
|
|
|
self,
|
|
|
|
|
audio_descriptor,
|
|
|
|
|
destination,
|
2020-02-26 16:31:24 +01:00
|
|
|
audio_adapter=get_default_audio_adapter(),
|
2020-10-02 17:29:59 +02:00
|
|
|
offset=0,
|
|
|
|
|
duration=600.,
|
|
|
|
|
codec='wav',
|
|
|
|
|
bitrate='128k',
|
2019-11-22 12:24:50 +01:00
|
|
|
filename_format='{filename}/{instrument}.{codec}',
|
|
|
|
|
synchronous=True):
|
2019-10-28 14:12:13 +01:00
|
|
|
""" Performs source separation and export result to file using
|
|
|
|
|
given audio adapter.
|
|
|
|
|
|
2019-11-20 15:07:12 +01:00
|
|
|
Filename format should be a Python formattable string that could use
|
2020-10-02 17:29:59 +02:00
|
|
|
following parameters : {instrument}, {filename}, {foldername} and
|
|
|
|
|
{codec}.
|
2019-11-20 15:07:12 +01:00
|
|
|
|
2019-10-28 14:12:13 +01:00
|
|
|
:param audio_descriptor: Describe song to separate, used by audio
|
|
|
|
|
adapter to retrieve and load audio data,
|
|
|
|
|
in case of file based audio adapter, such
|
|
|
|
|
descriptor would be a file path.
|
|
|
|
|
:param destination: Target directory to write output to.
|
|
|
|
|
:param audio_adapter: (Optional) Audio adapter to use for I/O.
|
|
|
|
|
:param offset: (Optional) Offset of loaded song.
|
2020-10-02 17:29:59 +02:00
|
|
|
:param duration: (Optional) Duration of loaded song
|
|
|
|
|
(default: 600s).
|
2019-10-28 14:12:13 +01:00
|
|
|
:param codec: (Optional) Export codec.
|
|
|
|
|
:param bitrate: (Optional) Export bitrate.
|
2019-11-20 15:07:12 +01:00
|
|
|
:param filename_format: (Optional) Filename format.
|
2019-10-28 14:12:13 +01:00
|
|
|
:param synchronous: (Optional) True is should by synchronous.
|
|
|
|
|
"""
|
2020-02-19 10:55:48 +01:00
|
|
|
waveform, sample_rate = audio_adapter.load(
|
2019-10-28 14:12:13 +01:00
|
|
|
audio_descriptor,
|
|
|
|
|
offset=offset,
|
|
|
|
|
duration=duration,
|
|
|
|
|
sample_rate=self._sample_rate)
|
2020-02-27 15:38:46 +01:00
|
|
|
sources = self.separate(waveform, audio_descriptor)
|
2020-10-02 17:29:59 +02:00
|
|
|
self.save_to_file(
|
|
|
|
|
sources,
|
|
|
|
|
audio_descriptor,
|
|
|
|
|
destination,
|
|
|
|
|
filename_format,
|
|
|
|
|
codec,
|
|
|
|
|
audio_adapter,
|
|
|
|
|
bitrate,
|
|
|
|
|
synchronous)
|
2020-04-14 22:33:35 +02:00
|
|
|
|
|
|
|
|
def save_to_file(
|
2020-10-02 17:29:59 +02:00
|
|
|
self,
|
|
|
|
|
sources,
|
|
|
|
|
audio_descriptor,
|
|
|
|
|
destination,
|
2020-04-14 22:33:35 +02:00
|
|
|
filename_format='{filename}/{instrument}.{codec}',
|
2020-10-02 17:29:59 +02:00
|
|
|
codec='wav',
|
|
|
|
|
audio_adapter=get_default_audio_adapter(),
|
|
|
|
|
bitrate='128k',
|
|
|
|
|
synchronous=True):
|
|
|
|
|
""" Export dictionary of sources to files.
|
2020-04-14 22:33:35 +02:00
|
|
|
|
|
|
|
|
:param sources: Dictionary of sources to be exported. The
|
|
|
|
|
keys are the name of the instruments, and
|
|
|
|
|
the values are Nx2 numpy arrays containing
|
|
|
|
|
the corresponding intrument waveform, as
|
|
|
|
|
returned by the separate method
|
|
|
|
|
:param audio_descriptor: Describe song to separate, used by audio
|
|
|
|
|
adapter to retrieve and load audio data,
|
|
|
|
|
in case of file based audio adapter, such
|
|
|
|
|
descriptor would be a file path.
|
|
|
|
|
:param destination: Target directory to write output to.
|
|
|
|
|
:param filename_format: (Optional) Filename format.
|
|
|
|
|
:param codec: (Optional) Export codec.
|
|
|
|
|
:param audio_adapter: (Optional) Audio adapter to use for I/O.
|
|
|
|
|
:param bitrate: (Optional) Export bitrate.
|
|
|
|
|
:param synchronous: (Optional) True is should by synchronous.
|
|
|
|
|
|
|
|
|
|
"""
|
2020-05-22 10:09:28 +02:00
|
|
|
foldername = basename(dirname(audio_descriptor))
|
2019-11-23 15:42:40 -08:00
|
|
|
filename = splitext(basename(audio_descriptor))[0]
|
2019-11-20 15:07:12 +01:00
|
|
|
generated = []
|
2019-10-28 14:12:13 +01:00
|
|
|
for instrument, data in sources.items():
|
2019-11-20 15:07:12 +01:00
|
|
|
path = join(destination, filename_format.format(
|
|
|
|
|
filename=filename,
|
|
|
|
|
instrument=instrument,
|
2020-05-22 10:09:28 +02:00
|
|
|
foldername=foldername,
|
|
|
|
|
codec=codec,
|
|
|
|
|
))
|
2020-01-27 15:10:34 +01:00
|
|
|
directory = os.path.dirname(path)
|
|
|
|
|
if not os.path.exists(directory):
|
|
|
|
|
os.makedirs(directory)
|
2019-11-20 15:07:12 +01:00
|
|
|
if path in generated:
|
|
|
|
|
raise SpleeterError((
|
|
|
|
|
f'Separated source path conflict : {path},'
|
|
|
|
|
'please check your filename format'))
|
|
|
|
|
generated.append(path)
|
2020-01-11 14:50:37 -08:00
|
|
|
if self._pool:
|
|
|
|
|
task = self._pool.apply_async(audio_adapter.save, (
|
|
|
|
|
path,
|
|
|
|
|
data,
|
|
|
|
|
self._sample_rate,
|
|
|
|
|
codec,
|
|
|
|
|
bitrate))
|
|
|
|
|
self._tasks.append(task)
|
|
|
|
|
else:
|
2020-10-02 17:29:59 +02:00
|
|
|
audio_adapter.save(
|
|
|
|
|
path,
|
|
|
|
|
data,
|
|
|
|
|
self._sample_rate,
|
|
|
|
|
codec,
|
|
|
|
|
bitrate)
|
2020-01-11 14:50:37 -08:00
|
|
|
if synchronous and self._pool:
|
2019-10-28 14:12:13 +01:00
|
|
|
self.join()
|