diff --git a/.circleci/config.yml b/.circleci/config.yml index 2fc1e5d..a1d8826 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -103,8 +103,8 @@ jobs: - run: docker build --build-arg BASETAG=conda --build-arg MODEL=5stems -t researchdeezer/spleeter:conda-5stems -f docker/embedded-model.dockerfile . - run: docker run -v $(pwd):/runtime researchdeezer/spleeter:conda separate -i /runtime/audio_example.mp3 -o /tmp - run: docker run -v $(pwd):/runtime researchdeezer/spleeter:conda-2stems separate -i /runtime/audio_example.mp3 -o /tmp - - run: docker run -v $(pwd):/runtime researchdeezer/spleeter:conda-4stems separate -i /runtime/audio_example.mp3 -o /tmp - - run: docker run -v $(pwd):/runtime researchdeezer/spleeter:conda-5stems separate -i /runtime/audio_example.mp3 -o /tmp + - run: docker run -v $(pwd):/runtime researchdeezer/spleeter:conda-4stems separate -i /runtime/audio_example.mp3 -p spleeter:4stems -o /tmp + - run: docker run -v $(pwd):/runtime researchdeezer/spleeter:conda-5stems separate -i /runtime/audio_example.mp3 -p spleeter:5stems -o /tmp - run: docker login -u $DOCKERHUB_USERNAME -p $DOCKERHUB_PASSWORD - run: docker push researchdeezer/spleeter:conda - run: docker push researchdeezer/spleeter:conda-2stems @@ -139,8 +139,8 @@ jobs: - run: docker build --build-arg BASETAG=3.6 --build-arg MODEL=5stems -t researchdeezer/spleeter:3.6-5stems -f docker/embedded-model.dockerfile . - run: docker run -v $(pwd):/runtime researchdeezer/spleeter:3.6 separate -i /runtime/audio_example.mp3 -o /tmp - run: docker run -v $(pwd):/runtime researchdeezer/spleeter:3.6-2stems separate -i /runtime/audio_example.mp3 -o /tmp - - run: docker run -v $(pwd):/runtime researchdeezer/spleeter:3.6-4stems separate -i /runtime/audio_example.mp3 -o /tmp - - run: docker run -v $(pwd):/runtime researchdeezer/spleeter:3.6-5stems separate -i /runtime/audio_example.mp3 -o /tmp + - run: docker run -v $(pwd):/runtime researchdeezer/spleeter:3.6-4stems separate -i /runtime/audio_example.mp3 -p spleeter:4stems -o /tmp + - run: docker run -v $(pwd):/runtime researchdeezer/spleeter:3.6-5stems separate -i /runtime/audio_example.mp3 -p spleeter:5stems -o /tmp - run: docker login -u $DOCKERHUB_USERNAME -p $DOCKERHUB_PASSWORD - run: docker push researchdeezer/spleeter:3.6 - run: docker push researchdeezer/spleeter:3.6-2stems @@ -173,8 +173,8 @@ jobs: - run: docker build --build-arg BASETAG=3.7 --build-arg MODEL=5stems -t researchdeezer/spleeter:3.7-5stems -f docker/embedded-model.dockerfile . - run: docker run -v $(pwd):/runtime researchdeezer/spleeter:3.7 separate -i /runtime/audio_example.mp3 -o /tmp - run: docker run -v $(pwd):/runtime researchdeezer/spleeter:3.7-2stems separate -i /runtime/audio_example.mp3 -o /tmp - - run: docker run -v $(pwd):/runtime researchdeezer/spleeter:3.7-4stems separate -i /runtime/audio_example.mp3 -o /tmp - - run: docker run -v $(pwd):/runtime researchdeezer/spleeter:3.7-5stems separate -i /runtime/audio_example.mp3 -o /tmp + - run: docker run -v $(pwd):/runtime researchdeezer/spleeter:3.7-4stems separate -i /runtime/audio_example.mp3 -p spleeter:4stems -o /tmp + - run: docker run -v $(pwd):/runtime researchdeezer/spleeter:3.7-5stems separate -i /runtime/audio_example.mp3 -p spleeter:5stems -o /tmp - run: docker login -u $DOCKERHUB_USERNAME -p $DOCKERHUB_PASSWORD - run: docker tag researchdeezer/spleeter:3.7 researchdeezer/spleeter:latest - run: docker push researchdeezer/spleeter:latest @@ -275,4 +275,4 @@ workflows: filters: branches: only: - - master \ No newline at end of file + - master diff --git a/docker/conda-gpu.dockerfile b/docker/conda-gpu.dockerfile index 10d7631..2fb92d7 100644 --- a/docker/conda-gpu.dockerfile +++ b/docker/conda-gpu.dockerfile @@ -17,7 +17,7 @@ COPY audio_example.mp3 . RUN conda install -y -c conda-forge musdb # RUN conda install -y -c conda-forge museval -RUN conda install -y -c conda-forge spleeter-gpu=1.4.5 +RUN conda install -y -c conda-forge spleeter-gpu=1.4.9 ENTRYPOINT ["spleeter"] \ No newline at end of file diff --git a/docker/conda.dockerfile b/docker/conda.dockerfile index c40d0b7..1ecf193 100644 --- a/docker/conda.dockerfile +++ b/docker/conda.dockerfile @@ -6,6 +6,6 @@ COPY audio_example.mp3 . RUN conda install -y -c conda-forge musdb # RUN conda install -y -c conda-forge museval -RUN conda install -y -c conda-forge spleeter=1.4.5 +RUN conda install -y -c conda-forge spleeter=1.4.9 ENTRYPOINT ["spleeter"] \ No newline at end of file diff --git a/docker/python-3.6-gpu.dockerfile b/docker/python-3.6-gpu.dockerfile index 3fe010b..4fcd596 100644 --- a/docker/python-3.6-gpu.dockerfile +++ b/docker/python-3.6-gpu.dockerfile @@ -52,6 +52,6 @@ COPY audio_example.mp3 . # Spleeter installation. RUN apt-get update && apt-get install -y ffmpeg libsndfile1 RUN pip install musdb museval -RUN pip install spleeter-gpu==1.4.5 +RUN pip install spleeter-gpu==1.4.9 ENTRYPOINT ["spleeter"] \ No newline at end of file diff --git a/docker/python-3.6.dockerfile b/docker/python-3.6.dockerfile index 7654ff1..0cad379 100644 --- a/docker/python-3.6.dockerfile +++ b/docker/python-3.6.dockerfile @@ -6,6 +6,6 @@ COPY audio_example.mp3 . RUN apt-get update && apt-get install -y ffmpeg libsndfile1 RUN pip install musdb museval -RUN pip install spleeter==1.4.5 +RUN pip install spleeter==1.4.9 ENTRYPOINT ["spleeter"] \ No newline at end of file diff --git a/docker/python-3.7-gpu.dockerfile b/docker/python-3.7-gpu.dockerfile index b94b84c..e4e4de6 100644 --- a/docker/python-3.7-gpu.dockerfile +++ b/docker/python-3.7-gpu.dockerfile @@ -52,6 +52,6 @@ COPY audio_example.mp3 . # Spleeter installation. RUN apt-get update && apt-get install -y ffmpeg libsndfile1 RUN pip install musdb museval -RUN pip install spleeter-gpu==1.4.5 +RUN pip install spleeter-gpu==1.4.9 ENTRYPOINT ["spleeter"] \ No newline at end of file diff --git a/docker/python-3.7.dockerfile b/docker/python-3.7.dockerfile index 223d0ed..efa9c1f 100644 --- a/docker/python-3.7.dockerfile +++ b/docker/python-3.7.dockerfile @@ -6,6 +6,6 @@ COPY audio_example.mp3 . RUN apt-get update && apt-get install -y ffmpeg libsndfile1 RUN pip install musdb museval -RUN pip install spleeter==1.4.5 +RUN pip install spleeter==1.4.9 ENTRYPOINT ["spleeter"] \ No newline at end of file diff --git a/paper.bib b/paper.bib new file mode 100644 index 0000000..819c309 --- /dev/null +++ b/paper.bib @@ -0,0 +1,151 @@ +% bibtex + +@article{SISEC18, + author = {{St{\"o}ter}, Fabian-Robert and {Liutkus}, Antoine and {Ito}, Nobutaka}, + title = "{The 2018 Signal Separation Evaluation Campaign}", + journal = {arXiv e-prints}, + keywords = {Electrical Engineering and Systems Science - Audio and Speech Processing, Computer Science - Sound}, + year = "2018", + month = "Apr", + eid = {arXiv:1804.06267}, + pages = {arXiv:1804.06267}, +archivePrefix = {arXiv}, + eprint = {1804.06267}, + primaryClass = {eess.AS}, + adsurl = {https://ui.adsabs.harvard.edu/abs/2018arXiv180406267S}, + adsnote = {Provided by the SAO/NASA Astrophysics Data System} +} + +@misc{spleeter2019, + title={Spleeter: A Fast And State-of-the Art Music Source Separation Tool With Pre-trained Models}, + author={Romain Hennequin and Anis Khlif and Felix Voituret and Manuel Moussallam}, + howpublished={Late-Breaking/Demo ISMIR 2019}, + month={November}, + note={Deezer Research}, + year={2019} +} + +@inproceedings{unet2017, + title={Singing voice separation with deep U-Net convolutional networks}, + author={Jansson, Andreas and Humphrey, Eric J. and Montecchio, Nicola and Bittner, Rachel and Kumar, Aparna and Weyde, Tillman}, + booktitle={Proceedings of the International Society for Music Information Retrieval Conference (ISMIR)}, + pages={323--332}, + year={2017} +} + +@inproceedings{deezerICASSP2019, +author={Laure {Pr\'etet} and Romain {Hennequin} and Jimena {Royo-Letelier} and Andrea {Vaglio}}, +booktitle={ICASSP 2019 - 2019 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)}, +title={Singing Voice Separation: A Study on Training Data}, +year={2019}, +volume={}, +number={}, +pages={506-510}, +keywords={feature extraction;source separation;speech processing;supervised training;separation quality;data augmentation;singing voice separation systems;singing voice separation algorithms;separation diversity;source separation;supervised learning;training data;data augmentation}, +doi={10.1109/ICASSP.2019.8683555}, +ISSN={}, +month={May},} + + +@misc{Norbert, + author = {Antoine Liutkus and + Fabian-Robert St{\"o}ter}, + title = {sigsep/norbert: First official Norbert release}, + month = jul, + year = 2019, + doi = {10.5281/zenodo.3269749}, + url = {https://doi.org/10.5281/zenodo.3269749} +} + +@ARTICLE{separation_metrics, +author={Emmanuel {Vincent} and Remi {Gribonval} and Cedric {Fevotte}}, +journal={IEEE Transactions on Audio, Speech, and Language Processing}, +title={Performance measurement in blind audio source separation}, +year={2006}, +volume={14}, +number={4}, +pages={1462-1469}, +keywords={audio signal processing;blind source separation;distortion;time-varying filters;blind audio source separation;distortions;time-invariant gains;time-varying filters;source estimation;interference;additive noise;algorithmic artifacts;Source separation;Data mining;Filters;Additive noise;Microphones;Distortion measurement;Energy measurement;Independent component analysis;Interference;Image analysis;Audio source separation;evaluation;measure;performance;quality}, +doi={10.1109/TSA.2005.858005}, +ISSN={}, +month={July},} + +@misc{musdb18, + author = {Rafii, Zafar and + Liutkus, Antoine and + Fabian-Robert St{\"o}ter and + Mimilakis, Stylianos Ioannis and + Bittner, Rachel}, + title = {The {MUSDB18} corpus for music separation}, + month = dec, + year = 2017, + doi = {10.5281/zenodo.1117372}, + url = {https://doi.org/10.5281/zenodo.1117372} +} + + +@misc{tensorflow2015-whitepaper, +title={ {TensorFlow}: Large-Scale Machine Learning on Heterogeneous Systems}, +url={https://www.tensorflow.org/}, +note={Software available from tensorflow.org}, +author={ + Abadi, Mart{\'{\i}}n et al.}, + year={2015}, +} + +@article{2019arXiv190611139L, + author = {{Lee}, Kyungyun and {Nam}, Juhan}, + title = "{Learning a Joint Embedding Space of Monophonic and Mixed Music Signals for Singing Voice}", + journal = {arXiv e-prints}, + keywords = {Computer Science - Sound, Electrical Engineering and Systems Science - Audio and Speech Processing}, + year = "2019", + month = "Jun", + eid = {arXiv:1906.11139}, + pages = {arXiv:1906.11139}, +archivePrefix = {arXiv}, + eprint = {1906.11139}, + primaryClass = {cs.SD}, + adsurl = {https://ui.adsabs.harvard.edu/abs/2019arXiv190611139L}, + adsnote = {Provided by the SAO/NASA Astrophysics Data System} +} + +@article{Adam, + author = {{Kingma}, Diederik P. and {Ba}, Jimmy}, + title = "{Adam: A Method for Stochastic Optimization}", + journal = {arXiv e-prints}, + keywords = {Computer Science - Machine Learning}, + year = "2014", + month = "Dec", + eid = {arXiv:1412.6980}, + pages = {arXiv:1412.6980}, +archivePrefix = {arXiv}, + eprint = {1412.6980}, + primaryClass = {cs.LG}, + adsurl = {https://ui.adsabs.harvard.edu/abs/2014arXiv1412.6980K}, + adsnote = {Provided by the SAO/NASA Astrophysics Data System} +} + +@article{Open-Unmix, + author={Fabian-Robert St\"{o}ter and Stefan Uhlich and Antoine Liutkus and Yuki Mitsufuji}, + title={Open-Unmix - A Reference Implementation for Music Source Separation}, + journal={Journal of Open Source Software}, + year=2019, + doi = {10.21105/joss.01667}, + url = {https://doi.org/10.21105/joss.01667} +} + +@misc{spleeter, + author={Romain Hennequin and Anis Khlif and Felix Voituret and Manuel Moussallam}, + title={Spleeter}, + year=2019, + url = {https://www.github.com/deezer/spleeter} +} + +@misc{demucs, + title={Music Source Separation in the Waveform Domain}, + author={Alexandre Défossez and Nicolas Usunier and Léon Bottou and Francis Bach}, + year={2019}, + eprint={1911.13254}, + archivePrefix={arXiv}, + primaryClass={cs.SD} +} \ No newline at end of file diff --git a/paper.md b/paper.md new file mode 100644 index 0000000..0ffb5cd --- /dev/null +++ b/paper.md @@ -0,0 +1,96 @@ +--- +title: 'Spleeter: a fast and state-of-the art music source separation tool with pre-trained models' +tags: + - Python + - musical signal processing + - source separation + - vocal isolation +authors: + - name: Romain Hennequin + orcid: 0000-0001-8158-5562 + affiliation: 1 + - name: Anis Khlif + affiliation: 1 + - name: Felix Voituret + affiliation: 1 + - name: Manuel Moussallam + orcid: 0000-0003-0886-5423 + affiliation: 1 +affiliations: + - name: Deezer Research, Paris + index: 1 +date: 04 March 2020 +bibliography: paper.bib + +--- + +## Summary + +We present and release a new tool for music source separation with pre-trained models called Spleeter. Spleeter was designed with ease of use, separation performance and speed in mind. Spleeter is based on Tensorflow [@tensorflow2015-whitepaper] and makes it possible to: + +- split music audio files into several stems with a single command line using pre-trained models. A music audio file can be separated into $2$ stems (vocals and accompaniments), $4$ stems (vocals, drums, bass and other) or $5$ stems (vocals, drums, bass, piano and other). +- train source separation models or fine-tune pre-trained ones with Tensorflow (provided you have a dataset of isolated sources). + +The performance of the pre-trained models are very close to the published state of the art and is one of the best performing $4$ stems separation model on the common musdb18 benchmark [@musdb18] to be publicly released. Spleeter is also very fast as it can separate a mix audio file into $4$ stems $100$ times faster than real-time (we note, though, that the model cannot be applied in real-time as it needs buffering) on a single Graphics Processing Unit (GPU) using the pre-trained $4$-stems model. + +## Purpose + +We release Spleeter with pre-trained state-of-the-art models in order to help the Music Information Retrieval (MIR) research community leverage the power of source separation in various MIR tasks, such as vocal lyrics analysis from audio (audio/lyrics alignement, lyrics transcription...), music transcription (chord transcription, drums transcription, bass transcription, chord estimation, beat tracking), singer identification, any type of multilabel classification (mood/genre...), vocal melody extraction or cover detection. +We believe that source separation has reached a level of maturity that makes it worth consideration for these tasks and that specific features computed from isolated vocals, drums or bass may help increase performances, especially in low data availability scenarios (small datasets, limited annotation availability) for which supervised learning might be difficult. +Spleeter also makes it possible to fine tune the provided state-of-the-art models in order to adapt the system to a specific use-case. +Finally, having an available source separation tool such as Spleeter will allow researchers to compare performances of their new models to a state-of-the-art one on their own private datasets instead of musdb18, which is usually the only used dataset for reporting separation performances for unreleased models. +Note that we cannot release the training data for copyright reasons, and thus, sharing pre-trained models were the only way to make these results available to the community. + +## Implementation details + +Spleeter contains pre-trained models for: + +- vocals/accompaniment separation. +- $4$ stems separation as in SiSec [@SISEC18] (vocals, bass, drums and other). +- $5$ stems separation with an extra piano stem (vocals, bass, drums, piano and other). It is, to the authors knowledge, the first released model to perform such a separation. + +The pre-trained models are U-nets [@unet2017] and follows similar specifications as in [@deezerICASSP2019]. The U-net is a encoder/decoder Convolutional Neural Network (CNN) architecture with skip connections. We used $12$-layer U-nets ($6$ layers for the encoder and $6$ for the decoder). A U-net is used for estimating a soft mask for each source (stem). Training loss is a $L_1$-norm between masked input mix spectrograms and source target spectrograms. The models were trained on Deezer internal datasets (noteworthily the Bean dataset that was used in [@deezerICASSP2019]) using Adam [@Adam]. Training time took approximately a full week on a single GPU. Separation is then done from estimated source spectrograms using soft masking or multi-channel Wiener filtering. + +Training and inference is implemented in Tensorflow which makes it possible to run the code on Central Processing Unit (CPU) or GPU. + +## Speed + +As the whole separation pipeline can be run on a GPU and the model is based on a CNN, computations are efficiently parallelized and model inference is very fast. For instance, Spleeter is able to separate the whole musdb18 test dataset (about $3$ hours and $27$ minutes of audio) into $4$ stems in less than $2$ minutes, including model loading time (about $15$ seconds), and audio wav files export, using a single GeForce RTX 2080 GPU, and a double Intel Xeon Gold 6134 CPU @ 3.20GHz (CPU is used for mix files loading and stem files export only). In this setup, Spleeter is able to process $100$ seconds of stereo audio in less than $1$ second, which makes it very useful for efficiently processing large datasets. + +## Separation performances + +The models compete with the state of the art on the standard musdb18 dataset [@musdb18] while it was not trained, validated or optimized in any way with musdb18 data. We report results in terms of standard source separation metrics [@separation_metrics], namely Signal to Distortion Ratio (SDR), Signal to Artifacts Ratio (SAR), Signal to Interference Ratio (SIR) and source Image to Spatial distortion Ratio (ISR), are presented in the following table compared to Open-Unmix [@Open-Unmix] and Demucs [@demucs] (only SDR are reported for Demucs since other metrics are not available in the paper) which are, to the authors knowledge, the only released system that perform near state-of-the-art performances. +We present results for soft masking and for multi-channel Wiener filtering (applied using Norbert [@Norbert]). As can be seen, for most metrics Spleeter is competitive with Open-Unmix and especially on SDR for all instruments, and is almost on par with Demucs. + + +| |Spleeter Mask |Spleeter MWF |Open-Unmix |Demucs| +|-----------|---------------|---------------|-----------|------| +| Vocals SDR|6.55 |6.86 |6.32 |7.05  | +| Vocals SIR|15.19 |15.86 |13.33 |  | +| Vocals SAR|6.44 |6.99 |6.52 |  | +| Vocals ISR|12.01 |11.95 |11.93 |  | +| Bass SDR |5.10 |5.51 |5.23 |6.70  | +| Bass SIR |10.01 |10.30 |10.93 |  | +| Bass SAR |5.15 |5.96 |6.34 |  | +| Bass ISR |9.18 |9.61 |9.23 |  | +| Drums SDR |5.93 |6.71 |5.73 |7.08  | +| Drums SIR |12.24 |13.67 |11.12 |  | +| Drums SAR |5.78 |6.54 |6.02 |  | +| Drums ISR |10.50 |10.69 |10.51 |  | +| Other SDR |4.24 |4.55 |4.02 |4.47  | +| Other SIR |7.86 |8.16 |6.59 |  | +| Other SAR |4.63 |4.88 |4.74 |  | +| Other ISR |9.83 |9.87 |9.31 |  | + + +Spleeter [@spleeter] source code and pre-trained models are available on [github](https://www.github.com/deezer/spleeter) and distributed under a MIT license. This repository will eventually be used for releasing other models with improved performances or models separating into more than $5$ stems in the future. + +## Distribution + +Spleeter is available as a standalone Python package, and also provided as a [conda](https://github.com/conda-forge/spleeter-feedstock) recipe and self-contained [Dockers](https://hub.docker.com/r/researchdeezer/spleeter) which makes it usable as is on various platforms. + +## Acknowledgements + +We acknowledge contributions from Laure Pretet who trained first models and wrote the first piece of code that lead to Spleeter. + +## References diff --git a/requirements.txt b/requirements.txt index 2efde6f..1fd76f7 100644 --- a/requirements.txt +++ b/requirements.txt @@ -4,4 +4,5 @@ setuptools>=41.0.0 pandas==0.25.1 tensorflow==1.15 ffmpeg-python -norbert==0.2.1 \ No newline at end of file +norbert==0.2.1 +librosa==0.7.2 \ No newline at end of file diff --git a/setup.py b/setup.py index 3fc22e8..deb47cf 100644 --- a/setup.py +++ b/setup.py @@ -14,7 +14,7 @@ __license__ = 'MIT License' # Default project values. project_name = 'spleeter' -project_version = '1.4.9' +project_version = '1.5.0' tensorflow_dependency = 'tensorflow' tensorflow_version = '1.15' here = path.abspath(path.dirname(__file__)) @@ -56,6 +56,7 @@ setup( 'pandas==0.25.1', 'requests', 'setuptools>=41.0.0', + 'librosa==0.7.2', '{}=={}'.format(tensorflow_dependency, tensorflow_version), ], extras_require={ diff --git a/spleeter/audio/ffmpeg.py b/spleeter/audio/ffmpeg.py index 168ec36..08218e9 100644 --- a/spleeter/audio/ffmpeg.py +++ b/spleeter/audio/ffmpeg.py @@ -112,9 +112,9 @@ class FFMPEGProcessAudioAdapter(AudioAdapter): :param bitrate: (Optional) Bitrate of the written audio file. :raise IOError: If any error occurs while using FFMPEG to write data. """ - directory = os.path.split(path)[0] + directory = os.path.dirname(path) if not os.path.exists(directory): - os.makedirs(directory) + raise SpleeterError(f'output directory does not exists: {directory}') get_logger().debug('Writing file %s', path) input_kwargs = {'ar': sample_rate, 'ac': data.shape[1]} output_kwargs = {'ar': sample_rate, 'strict': '-2'} @@ -127,11 +127,11 @@ class FFMPEGProcessAudioAdapter(AudioAdapter): .input('pipe:', format='f32le', **input_kwargs) .output(path, **output_kwargs) .overwrite_output() - .run_async(pipe_stdin=True, quiet=True)) + .run_async(pipe_stdin=True, pipe_stderr=True, quiet=True)) try: process.stdin.write(data.astype('>> from spleeter.model import EstimatorSpecBuilder >>> builder = EstimatorSpecBuilder() - >>> builder.build_prediction_model() + >>> builder.build_predict_model() >>> builder.build_evaluation_model() - >>> builder.build_training_model() + >>> builder.build_train_model() >>> from spleeter.model import model_fn >>> estimator = tf.estimator.Estimator(model_fn=model_fn, ...) @@ -94,6 +165,7 @@ class EstimatorSpecBuilder(object): :param features: The input features for the estimator. :param params: Some hyperparameters as a dictionary. """ + self._features = features self._params = params # Get instrument name. @@ -106,7 +178,10 @@ class EstimatorSpecBuilder(object): self._frame_length = params['frame_length'] self._frame_step = params['frame_step'] - def _build_output_dict(self): + def include_stft_computations(self): + return self._params["stft_backend"] == "tensorflow" + + def _build_model_outputs(self): """ Created a batch_sizexTxFxn_channels input tensor containing mix magnitude spectrogram, then an output dict from it according to the selected model in internal parameters. @@ -114,7 +189,8 @@ class EstimatorSpecBuilder(object): :returns: Build output dict. :raise ValueError: If required model_type is not supported. """ - input_tensor = self._features[f'{self._mix_name}_spectrogram'] + + input_tensor = self.spectrogram_feature model = self._params.get('model', None) if model is not None: model_type = model.get('type', self.DEFAULT_MODEL) @@ -124,12 +200,12 @@ class EstimatorSpecBuilder(object): apply_model = get_model_function(model_type) except ModuleNotFoundError: raise ValueError(f'No model function {model_type} found') - return apply_model( + self._model_outputs = apply_model( input_tensor, self._instruments, self._params['model']['params']) - def _build_loss(self, output_dict, labels): + def _build_loss(self, labels): """ Construct tensorflow loss and metrics :param output_dict: dictionary of network outputs (key: instrument @@ -138,6 +214,7 @@ class EstimatorSpecBuilder(object): name, value: ground truth spectrogram of the instrument) :returns: tensorflow (loss, metrics) tuple. """ + output_dict = self.model_outputs loss_type = self._params.get('loss_type', self.L1_MASK) if loss_type == self.L1_MASK: losses = { @@ -177,51 +254,106 @@ class EstimatorSpecBuilder(object): return tf.compat.v1.train.GradientDescentOptimizer(rate) return tf.compat.v1.train.AdamOptimizer(rate) + @property + def instruments(self): + return self._instruments + + @property + def stft_name(self): + return f'{self._mix_name}_stft' + + @property + def spectrogram_name(self): + return f'{self._mix_name}_spectrogram' + def _build_stft_feature(self): """ Compute STFT of waveform and slice the STFT in segment with the right length to feed the network. """ - stft_feature = tf.transpose( - stft( - tf.transpose(self._features['waveform']), - self._frame_length, - self._frame_step, - window_fn=lambda frame_length, dtype: ( - hann_window(frame_length, periodic=True, dtype=dtype)), - pad_end=True), - perm=[1, 2, 0]) - self._features[f'{self._mix_name}_stft'] = stft_feature - self._features[f'{self._mix_name}_spectrogram'] = tf.abs( - pad_and_partition(stft_feature, self._T))[:, :, :self._F, :] - def _inverse_stft(self, stft): + stft_name = self.stft_name + spec_name = self.spectrogram_name + + if stft_name not in self._features: + stft_feature = tf.transpose( + stft( + tf.transpose(self._features['waveform']), + self._frame_length, + self._frame_step, + window_fn=lambda frame_length, dtype: ( + hann_window(frame_length, periodic=True, dtype=dtype)), + pad_end=True), + perm=[1, 2, 0]) + self._features[f'{self._mix_name}_stft'] = stft_feature + if spec_name not in self._features: + self._features[spec_name] = tf.abs( + pad_and_partition(self._features[stft_name], self._T))[:, :, :self._F, :] + + @property + def model_outputs(self): + if not hasattr(self, "_model_outputs"): + self._build_model_outputs() + return self._model_outputs + + @property + def outputs(self): + if not hasattr(self, "_outputs"): + self._build_outputs() + return self._outputs + + @property + def stft_feature(self): + if self.stft_name not in self._features: + self._build_stft_feature() + return self._features[self.stft_name] + + @property + def spectrogram_feature(self): + if self.spectrogram_name not in self._features: + self._build_stft_feature() + return self._features[self.spectrogram_name] + + @property + def masks(self): + if not hasattr(self, "_masks"): + self._build_masks() + return self._masks + + @property + def masked_stfts(self): + if not hasattr(self, "_masked_stfts"): + self._build_masked_stfts() + return self._masked_stfts + + def _inverse_stft(self, stft_t, time_crop=None): """ Inverse and reshape the given STFT - :param stft: input STFT + :param stft_t: input STFT :returns: inverse STFT (waveform) """ inversed = inverse_stft( - tf.transpose(stft, perm=[2, 0, 1]), + tf.transpose(stft_t, perm=[2, 0, 1]), self._frame_length, self._frame_step, window_fn=lambda frame_length, dtype: ( hann_window(frame_length, periodic=True, dtype=dtype)) ) * self.WINDOW_COMPENSATION_FACTOR reshaped = tf.transpose(inversed) - return reshaped[:tf.shape(self._features['waveform'])[0], :] + if time_crop is None: + time_crop = tf.shape(self._features['waveform'])[0] + return reshaped[:time_crop, :] - def _build_mwf_output_waveform(self, output_dict): + def _build_mwf_output_waveform(self): """ Perform separation with multichannel Wiener Filtering using Norbert. Note: multichannel Wiener Filtering is not coded in Tensorflow and thus may be quite slow. - :param output_dict: dictionary of estimated spectrogram (key: instrument - name, value: estimated spectrogram of the instrument) :returns: dictionary of separated waveforms (key: instrument name, value: estimated waveform of the instrument) """ import norbert # pylint: disable=import-error - x = self._features[f'{self._mix_name}_stft'] + output_dict = self.model_outputs + x = self.stft_feature v = tf.stack( [ pad_and_reshape( @@ -265,30 +397,28 @@ class EstimatorSpecBuilder(object): mask_shape[-1])) else: raise ValueError(f'Invalid mask_extension parameter {extension}') - n_extra_row = (self._frame_length) // 2 + 1 - self._F + n_extra_row = self._frame_length // 2 + 1 - self._F extension = tf.tile(extension_row, [1, 1, n_extra_row, 1]) return tf.concat([mask, extension], axis=2) - def _build_manual_output_waveform(self, output_dict): - """ Perform ratio mask separation - - :param output_dict: dictionary of estimated spectrogram (key: instrument - name, value: estimated spectrogram of the instrument) - :returns: dictionary of separated waveforms (key: instrument name, - value: estimated waveform of the instrument) + def _build_masks(self): """ + Compute masks from the output spectrograms of the model. + :return: + """ + output_dict = self.model_outputs + stft_feature = self.stft_feature separation_exponent = self._params['separation_exponent'] output_sum = tf.reduce_sum( [e ** separation_exponent for e in output_dict.values()], axis=0 ) + self.EPSILON - output_waveform = {} + out = {} for instrument in self._instruments: output = output_dict[f'{instrument}_spectrogram'] # Compute mask with the model. - instrument_mask = ( - output ** separation_exponent - + (self.EPSILON / len(output_dict))) / output_sum + instrument_mask = (output ** separation_exponent + + (self.EPSILON / len(output_dict))) / output_sum # Extend mask; instrument_mask = self._extend_mask(instrument_mask) # Stack back mask. @@ -298,30 +428,56 @@ class EstimatorSpecBuilder(object): axis=0) instrument_mask = tf.reshape(instrument_mask, new_shape) # Remove padded part (for mask having the same size as STFT); - stft_feature = self._features[f'{self._mix_name}_stft'] + instrument_mask = instrument_mask[ - :tf.shape(stft_feature)[0], ...] - # Compute masked STFT and normalize it. - output_waveform[instrument] = self._inverse_stft( - tf.cast(instrument_mask, dtype=tf.complex64) * stft_feature) + :tf.shape(stft_feature)[0], ...] + out[instrument] = instrument_mask + self._masks = out + + def _build_masked_stfts(self): + input_stft = self.stft_feature + out = {} + for instrument, mask in self.masks.items(): + out[instrument] = tf.cast(mask, dtype=tf.complex64) * input_stft + self._masked_stfts = out + + def _build_manual_output_waveform(self, masked_stft): + """ Perform ratio mask separation + + :param output_dict: dictionary of estimated spectrogram (key: instrument + name, value: estimated spectrogram of the instrument) + :returns: dictionary of separated waveforms (key: instrument name, + value: estimated waveform of the instrument) + """ + + output_waveform = {} + for instrument, stft_data in masked_stft.items(): + output_waveform[instrument] = self._inverse_stft(stft_data) return output_waveform - def _build_output_waveform(self, output_dict): + def _build_output_waveform(self, masked_stft): """ Build output waveform from given output dict in order to be used in prediction context. Regarding of the configuration building method will be using MWF. - :param output_dict: Output dict to build output waveform from. :returns: Built output waveform. """ + if self._params.get('MWF', False): - output_waveform = self._build_mwf_output_waveform(output_dict) + output_waveform = self._build_mwf_output_waveform() else: - output_waveform = self._build_manual_output_waveform(output_dict) - if 'audio_id' in self._features: - output_waveform['audio_id'] = self._features['audio_id'] + output_waveform = self._build_manual_output_waveform(masked_stft) return output_waveform + def _build_outputs(self): + if self.include_stft_computations(): + self._outputs = self._build_output_waveform(self.masked_stfts) + else: + self._outputs = self.masked_stfts + + if 'audio_id' in self._features: + self._outputs['audio_id'] = self._features['audio_id'] + def build_predict_model(self): """ Builder interface for creating model instance that aims to perform prediction / inference over given track. The output of such estimator @@ -330,12 +486,10 @@ class EstimatorSpecBuilder(object): :returns: An estimator for performing prediction. """ - self._build_stft_feature() - output_dict = self._build_output_dict() - output_waveform = self._build_output_waveform(output_dict) + return tf.estimator.EstimatorSpec( tf.estimator.ModeKeys.PREDICT, - predictions=output_waveform) + predictions=self.outputs) def build_evaluation_model(self, labels): """ Builder interface for creating model instance that aims to perform @@ -346,8 +500,7 @@ class EstimatorSpecBuilder(object): :param labels: Model labels. :returns: An estimator for performing model evaluation. """ - output_dict = self._build_output_dict() - loss, metrics = self._build_loss(output_dict, labels) + loss, metrics = self._build_loss(labels) return tf.estimator.EstimatorSpec( tf.estimator.ModeKeys.EVAL, loss=loss, @@ -362,8 +515,7 @@ class EstimatorSpecBuilder(object): :param labels: Model labels. :returns: An estimator for performing model training. """ - output_dict = self._build_output_dict() - loss, metrics = self._build_loss(output_dict, labels) + loss, metrics = self._build_loss(labels) optimizer = self._build_optimizer() train_operation = optimizer.minimize( loss=loss, diff --git a/spleeter/separator.py b/spleeter/separator.py index c1c8f0e..174c73f 100644 --- a/spleeter/separator.py +++ b/spleeter/separator.py @@ -13,40 +13,57 @@ """ import os -import json +import logging -from functools import partial +from time import time from multiprocessing import Pool -from pathlib import Path from os.path import basename, join, splitext +import numpy as np +import tensorflow as tf +from librosa.core import stft, istft +from scipy.signal.windows import hann from . import SpleeterError from .audio.adapter import get_default_audio_adapter from .audio.convertor import to_stereo -from .model import model_fn from .utils.configuration import load_configuration -from .utils.estimator import create_estimator, to_predictor +from .utils.estimator import create_estimator, to_predictor, get_default_model_dir +from .model import EstimatorSpecBuilder, InputProviderFactory + __email__ = 'research@deezer.com' __author__ = 'Deezer Research' __license__ = 'MIT License' +logger = logging.getLogger("spleeter") + + + +def get_backend(backend): + assert backend in ["auto", "tensorflow", "librosa"] + if backend == "auto": + return "tensorflow" if tf.test.is_gpu_available() else "librosa" + return backend + + class Separator(object): """ A wrapper class for performing separation. """ - def __init__(self, params_descriptor, MWF=False): + def __init__(self, params_descriptor, MWF=False, stft_backend="auto", multiprocess=True): """ Default constructor. :param params_descriptor: Descriptor for TF params to be used. :param MWF: (Optional) True if MWF should be used, False otherwise. """ + self._params = load_configuration(params_descriptor) self._sample_rate = self._params['sample_rate'] self._MWF = MWF self._predictor = None - self._pool = Pool() + self._pool = Pool() if multiprocess else None self._tasks = [] + self._params["stft_backend"] = get_backend(stft_backend) def _get_predictor(self): """ Lazy loading access method for internal predictor instance. @@ -68,7 +85,7 @@ class Separator(object): task.get() task.wait(timeout=timeout) - def separate(self, waveform): + def separate_tensorflow(self, waveform, audio_descriptor): """ Performs source separation over the given waveform. The separation is performed synchronously but the result @@ -86,10 +103,59 @@ class Separator(object): predictor = self._get_predictor() prediction = predictor({ 'waveform': waveform, - 'audio_id': ''}) + 'audio_id': audio_descriptor}) prediction.pop('audio_id') return prediction + def stft(self, data, inverse=False, length=None): + """ + Single entrypoint for both stft and istft. This computes stft and istft with librosa on stereo data. The two + channels are processed separately and are concatenated together in the result. The expected input formats are: + (n_samples, 2) for stft and (T, F, 2) for istft. + :param data: np.array with either the waveform or the complex spectrogram depending on the parameter inverse + :param inverse: should a stft or an istft be computed. + :return: Stereo data as numpy array for the transform. The channels are stored in the last dimension + """ + assert not (inverse and length is None) + data = np.asfortranarray(data) + N = self._params["frame_length"] + H = self._params["frame_step"] + win = hann(N, sym=False) + fstft = istft if inverse else stft + win_len_arg = {"win_length": None, "length": length} if inverse else {"n_fft": N} + dl, dr = (data[:, :, 0].T, data[:, :, 1].T) if inverse else (data[:, 0], data[:, 1]) + s1 = fstft(dl, hop_length=H, window=win, center=False, **win_len_arg) + s2 = fstft(dr, hop_length=H, window=win, center=False, **win_len_arg) + s1 = np.expand_dims(s1.T, 2-inverse) + s2 = np.expand_dims(s2.T, 2-inverse) + return np.concatenate([s1, s2], axis=2-inverse) + + def separate_librosa(self, waveform, audio_id): + out = {} + input_provider = InputProviderFactory.get(self._params) + features = input_provider.get_input_dict_placeholders() + + builder = EstimatorSpecBuilder(features, self._params) + latest_checkpoint = tf.train.latest_checkpoint(get_default_model_dir(self._params['model_dir'])) + + # TODO: fix the logic, build sometimes return, sometimes set attribute + outputs = builder.outputs + + saver = tf.train.Saver() + stft = self.stft(waveform) + with tf.Session() as sess: + saver.restore(sess, latest_checkpoint) + outputs = sess.run(outputs, feed_dict=input_provider.get_feed_dict(features, stft, audio_id)) + for inst in builder.instruments: + out[inst] = self.stft(outputs[inst], inverse=True, length=waveform.shape[0]) + return out + + def separate(self, waveform, audio_descriptor): + if self._params["stft_backend"] == "tensorflow": + return self.separate_tensorflow(waveform, audio_descriptor) + else: + return self.separate_librosa(waveform, audio_descriptor) + def separate_to_file( self, audio_descriptor, destination, audio_adapter=get_default_audio_adapter(), @@ -108,6 +174,8 @@ class Separator(object): descriptor would be a file path. :param destination: Target directory to write output to. :param audio_adapter: (Optional) Audio adapter to use for I/O. + :param chunk_duration: (Optional) Maximum signal duration that is processed + in one pass. Default: all signal. :param offset: (Optional) Offset of loaded song. :param duration: (Optional) Duration of loaded song. :param codec: (Optional) Export codec. @@ -115,12 +183,17 @@ class Separator(object): :param filename_format: (Optional) Filename format. :param synchronous: (Optional) True is should by synchronous. """ - waveform, _ = audio_adapter.load( + waveform, sample_rate = audio_adapter.load( audio_descriptor, offset=offset, duration=duration, sample_rate=self._sample_rate) - sources = self.separate(waveform) + sources = self.separate(waveform, audio_descriptor) + self.save_to_file(sources, audio_descriptor, destination, filename_format, codec, + audio_adapter, bitrate, synchronous) + + def save_to_file(self, sources, audio_descriptor, destination, filename_format, codec, + audio_adapter, bitrate, synchronous): filename = splitext(basename(audio_descriptor))[0] generated = [] for instrument, data in sources.items(): @@ -128,17 +201,23 @@ class Separator(object): filename=filename, instrument=instrument, codec=codec)) + directory = os.path.dirname(path) + if not os.path.exists(directory): + os.makedirs(directory) if path in generated: raise SpleeterError(( f'Separated source path conflict : {path},' 'please check your filename format')) generated.append(path) - task = self._pool.apply_async(audio_adapter.save, ( - path, - data, - self._sample_rate, - codec, - bitrate)) - self._tasks.append(task) - if synchronous: + if self._pool: + task = self._pool.apply_async(audio_adapter.save, ( + path, + data, + self._sample_rate, + codec, + bitrate)) + self._tasks.append(task) + else: + audio_adapter.save(path, data, self._sample_rate, codec, bitrate) + if synchronous and self._pool: self.join() diff --git a/spleeter/utils/estimator.py b/spleeter/utils/estimator.py index 95c4219..a9aa736 100644 --- a/spleeter/utils/estimator.py +++ b/spleeter/utils/estimator.py @@ -13,27 +13,37 @@ import tensorflow as tf from tensorflow.contrib import predictor # pylint: enable=import-error -from ..model import model_fn +from ..model import model_fn, InputProviderFactory from ..model.provider import get_default_model_provider # Default exporting directory for predictor. DEFAULT_EXPORT_DIRECTORY = join(gettempdir(), 'serving') + +def get_default_model_dir(model_dir): + """ + Transforms a string like 'spleeter:2stems' into an actual path. + :param model_dir: + :return: + """ + model_provider = get_default_model_provider() + return model_provider.get(model_dir) + def create_estimator(params, MWF): """ Initialize tensorflow estimator that will perform separation Params: - - params: a dictionnary of parameters for building the model + - params: a dictionary of parameters for building the model Returns: a tensorflow estimator """ # Load model. - model_directory = params['model_dir'] - model_provider = get_default_model_provider() - params['model_dir'] = model_provider.get(model_directory) + + + params['model_dir'] = get_default_model_dir(params['model_dir']) params['MWF'] = MWF # Setup config session_config = tf.compat.v1.ConfigProto() @@ -56,11 +66,10 @@ def to_predictor(estimator, directory=DEFAULT_EXPORT_DIRECTORY): :param estimator: Estimator to export. :param directory: (Optional) path to write exported model into. """ + + input_provider = InputProviderFactory.get(estimator.params) def receiver(): - shape = (None, estimator.params['n_channels']) - features = { - 'waveform': tf.compat.v1.placeholder(tf.float32, shape=shape), - 'audio_id': tf.compat.v1.placeholder(tf.string)} + features = input_provider.get_input_dict_placeholders() return tf.estimator.export.ServingInputReceiver(features, features) estimator.export_saved_model(directory, receiver) diff --git a/tests/test_separator.py b/tests/test_separator.py index 9235731..271fdfb 100644 --- a/tests/test_separator.py +++ b/tests/test_separator.py @@ -13,6 +13,7 @@ from os.path import splitext, basename, exists, join from tempfile import TemporaryDirectory import pytest +import numpy as np from spleeter import SpleeterError from spleeter.audio.adapter import get_default_audio_adapter @@ -21,34 +22,38 @@ from spleeter.separator import Separator TEST_AUDIO_DESCRIPTOR = 'audio_example.mp3' TEST_AUDIO_BASENAME = splitext(basename(TEST_AUDIO_DESCRIPTOR))[0] TEST_CONFIGURATIONS = [ - ('spleeter:2stems', ('vocals', 'accompaniment')), - ('spleeter:4stems', ('vocals', 'drums', 'bass', 'other')), - ('spleeter:5stems', ('vocals', 'drums', 'bass', 'piano', 'other')) + ('spleeter:2stems', ('vocals', 'accompaniment'), 'tensorflow'), + ('spleeter:4stems', ('vocals', 'drums', 'bass', 'other'), 'tensorflow'), + ('spleeter:5stems', ('vocals', 'drums', 'bass', 'piano', 'other'), 'tensorflow'), + ('spleeter:2stems', ('vocals', 'accompaniment'), 'librosa'), + ('spleeter:4stems', ('vocals', 'drums', 'bass', 'other'), 'librosa'), + ('spleeter:5stems', ('vocals', 'drums', 'bass', 'piano', 'other'), 'librosa') ] -@pytest.mark.parametrize('configuration, instruments', TEST_CONFIGURATIONS) -def test_separate(configuration, instruments): +@pytest.mark.parametrize('configuration, instruments, backend', TEST_CONFIGURATIONS) +def test_separate(configuration, instruments, backend): """ Test separation from raw data. """ adapter = get_default_audio_adapter() waveform, _ = adapter.load(TEST_AUDIO_DESCRIPTOR) - separator = Separator(configuration) - prediction = separator.separate(waveform) + separator = Separator(configuration, stft_backend=backend) + prediction = separator.separate(waveform, TEST_AUDIO_DESCRIPTOR) assert len(prediction) == len(instruments) for instrument in instruments: assert instrument in prediction for instrument in instruments: track = prediction[instrument] - assert not (waveform == track).all() + assert waveform.shape == track.shape + assert not np.allclose(waveform, track) for compared in instruments: if instrument != compared: - assert not (track == prediction[compared]).all() + assert not np.allclose(track, prediction[compared]) -@pytest.mark.parametrize('configuration, instruments', TEST_CONFIGURATIONS) -def test_separate_to_file(configuration, instruments): +@pytest.mark.parametrize('configuration, instruments, backend', TEST_CONFIGURATIONS) +def test_separate_to_file(configuration, instruments, backend): """ Test file based separation. """ - separator = Separator(configuration) + separator = Separator(configuration, stft_backend=backend) with TemporaryDirectory() as directory: separator.separate_to_file( TEST_AUDIO_DESCRIPTOR, @@ -59,10 +64,10 @@ def test_separate_to_file(configuration, instruments): '{}/{}.wav'.format(TEST_AUDIO_BASENAME, instrument))) -@pytest.mark.parametrize('configuration, instruments', TEST_CONFIGURATIONS) -def test_filename_format(configuration, instruments): +@pytest.mark.parametrize('configuration, instruments, backend', TEST_CONFIGURATIONS) +def test_filename_format(configuration, instruments, backend): """ Test custom filename format. """ - separator = Separator(configuration) + separator = Separator(configuration, stft_backend=backend) with TemporaryDirectory() as directory: separator.separate_to_file( TEST_AUDIO_DESCRIPTOR, @@ -74,7 +79,7 @@ def test_filename_format(configuration, instruments): 'export/{}/{}.wav'.format(TEST_AUDIO_BASENAME, instrument))) -def test_filename_confilct(): +def test_filename_conflict(): """ Test error handling with static pattern. """ separator = Separator(TEST_CONFIGURATIONS[0][0]) with TemporaryDirectory() as directory: