mirror of
https://github.com/YuzuZensai/spleeter.git
synced 2026-01-06 04:32:43 +00:00
Merge master
This commit is contained in:
163
.github/workflows/docker.yml
vendored
163
.github/workflows/docker.yml
vendored
@@ -1,81 +1,124 @@
|
||||
name: docker
|
||||
on:
|
||||
- workflow_dispatch
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
version:
|
||||
description: "Spleeter version to build image for"
|
||||
required: true
|
||||
default: "2.1.2"
|
||||
jobs:
|
||||
build-test-push:
|
||||
cuda-base:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
distribution: [3.6, 3.7, 3.8]
|
||||
fail-fast: true
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Build CUDA base image
|
||||
run: |
|
||||
docker build \
|
||||
--build-arg BASE=python:${{ matrix.distribution }} \
|
||||
-t deezer/python-cuda-10-1:${{ matrix.distribution }} \
|
||||
-f docker/cuda-10-1.dockerfile .
|
||||
- name: Docker login
|
||||
run: echo ${{ secrets.DOCKERHUB_PASSWORD }} | docker login -u ${{ secrets.DOCKERHUB_USERNAME }} --password-stdin
|
||||
- name: Push deezer/python-cuda-10-1:${{ matrix.distribution }} image
|
||||
run: docker push deezer/python-cuda-10-1:${{ matrix.distribution }}
|
||||
pip-images:
|
||||
needs: cuda-base
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
platform: [cpu, gpu]
|
||||
distribution: [3.6, 3.7, 3.8]
|
||||
fail-fast: true
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- if: ${{ matrix.platform == 'cpu' }}
|
||||
run: |
|
||||
echo "base=python:${{ matrix.distribution }}" >> $GITHUB_ENV
|
||||
echo "image=spleeter" >> $GITHUB_ENV
|
||||
- if: ${{ matrix.platform == 'gpu' }}
|
||||
run: |
|
||||
echo "base=deezer/python-cuda-10-1:${{ matrix.distribution }}" >> $GITHUB_ENV
|
||||
echo "image=spleeter-gpu" >> $GITHUB_ENV
|
||||
- name: Build deezer/${{ env.image }}:${{ matrix.distribution }} image
|
||||
run: |
|
||||
docker build \
|
||||
--build-arg BASE=${{ env.base }} \
|
||||
--build-arg SPLEETER_VERSION=${{ github.event.inputs.version }} \
|
||||
-t deezer/${{ env.image }}:${{ matrix.distribution }} \
|
||||
-f docker/spleeter.dockerfile .
|
||||
- name: Test deezer/${{ env.image }}:${{ matrix.distribution }} image
|
||||
run: |
|
||||
docker run \
|
||||
-v $(pwd):/runtime \
|
||||
deezer/${{ env.image }}:${{ matrix.distribution }} \
|
||||
separate -o /tmp /runtime/audio_example.mp3
|
||||
- name: Docker login
|
||||
run: echo ${{ secrets.DOCKERHUB_PASSWORD }} | docker login -u ${{ secrets.DOCKERHUB_USERNAME }} --password-stdin
|
||||
- name: Push deezer/${{ env.image }}:${{ matrix.distribution }} image
|
||||
run: docker push deezer/${{ env.image }}:${{ matrix.distribution }}
|
||||
conda-images:
|
||||
needs: cuda-base
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
platform: [cpu, gpu]
|
||||
fail-fast: true
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- if: ${{ matrix.platform == 'cpu' }}
|
||||
name: Build Conda base image
|
||||
run: |
|
||||
docker build -t conda:cpu -f docker/conda.dockerfile .
|
||||
echo "image=spleeter" >> $GITHUB_ENV
|
||||
- if: ${{ matrix.platform == 'gpu' }}
|
||||
name: Build Conda base image
|
||||
run: |
|
||||
docker build --build-arg BASE=deezer/python-cuda-10-1:3.8 -t conda:gpu -f docker/conda.dockerfile .
|
||||
echo "image=spleeter-gpu" >> $GITHUB_ENV
|
||||
- name: Build deezer/${{ env.image }}:${{ env.tag }} image
|
||||
run: |
|
||||
docker build \
|
||||
--build-arg BASE=conda:${{ matrix.platform }} \
|
||||
--build-arg SPLEETER_VERSION=${{ github.event.inputs.version }} \
|
||||
-t deezer/${{ env.image }}:conda \
|
||||
-f docker/spleeter-conda.dockerfile .
|
||||
- name: Docker login
|
||||
run: echo ${{ secrets.DOCKERHUB_PASSWORD }} | docker login -u ${{ secrets.DOCKERHUB_USERNAME }} --password-stdin
|
||||
- name: Push deezer/${{ env.image }}:conda image
|
||||
run: docker push deezer/${{ env.image }}:conda
|
||||
images-with-model:
|
||||
needs: [pip-images, conda-images]
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
platform: [cpu, gpu]
|
||||
distribution: [3.6, 3.7, 3.8, conda]
|
||||
model: [modelless, 2stems, 4stems, 5stems]
|
||||
model: [2stems, 4stems, 5stems]
|
||||
fail-fast: true
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
# ----------------------------------------------------------------------
|
||||
# Note: base image building and env setup.
|
||||
- name: Setup Python distribution
|
||||
run: |
|
||||
echo "::set-env name=base::python:${{ matrix.distribution }}"
|
||||
echo "::set-env name=tag::${{ matrix.distribution }}"
|
||||
echo "::set-env name=file::spleeter"
|
||||
echo "::set-env name=package::spleeter"
|
||||
- if: ${{ matrix.distribution == 'conda' }}
|
||||
name: Build Conda base image
|
||||
run: |
|
||||
docker build -t python:conda -f docker/conda.dockerfile .
|
||||
echo "::set-env name=file::spleeter-conda"
|
||||
- if: ${{ matrix.platform == 'cpu' }}
|
||||
run: echo "image=spleeter" >> $GITHUB_ENV
|
||||
- if: ${{ matrix.platform == 'gpu' }}
|
||||
name: Build CUDA base image
|
||||
run: echo "image=spleeter-gpu" >> $GITHUB_ENV
|
||||
- name: Build deezer/${{ env.image }}:${{ matrix.distribution }}-${{ matrix.model }} image
|
||||
run: |
|
||||
docker build \
|
||||
--build-arg BASE=python:${{ matrix.distribution }} \
|
||||
-t cuda:${{ matrix.distribution }} \
|
||||
-f docker/cuda-10-0.dockerfile .
|
||||
echo "::set-env name=base::cuda:${{ matrix.distribution }}"
|
||||
echo "::set-env name=tag::${{ matrix.distribution }}-gpu"
|
||||
echo "::set-env name=package::spleeter-gpu"
|
||||
# ----------------------------------------------------------------------
|
||||
# Note: image building.
|
||||
- name: Build deezer/spleeter:${{ env.tag }} image
|
||||
run: |
|
||||
docker build \
|
||||
--build-arg BASE=${{ env.base }} \
|
||||
--build-arg SPLEETER_PACKAGE=${{ env.package }} \
|
||||
-t deezer/spleeter:${{ env.tag }} \
|
||||
-f docker/${{ env.file }}.dockerfile .
|
||||
echo "::set-env name=modelargs::"
|
||||
- if: ${{ matrix.model != 'modelless' }}
|
||||
name: Build deezer/spleeter:${{ env.tag }}-${{ matrix.model }} image
|
||||
run: |
|
||||
docker build \
|
||||
--build-arg BASE=deezer/spleeter:${{ env.tag }} \
|
||||
--build-arg BASE=deezer/${{ env.image }}:${{ matrix.distribution }} \
|
||||
--build-arg MODEL=${{ matrix.model }} \
|
||||
-t deezer/spleeter:${{ env.tag }}-${{ matrix.model }} \
|
||||
-t deezer/${{ env.image }}:${{ matrix.distribution }}-${{ matrix.model }} \
|
||||
-f docker/spleeter-model.dockerfile .
|
||||
echo "::set-env name=tag::${{ env.tag }}-${{ matrix.model }}"
|
||||
echo "::set-env name=modelarg::-p spleeter:${{ matrix.model }}"
|
||||
# ----------------------------------------------------------------------
|
||||
# Note: image testing.
|
||||
- name: Test deezer/spleeter:${{ env.tag }} image
|
||||
- name: Test deezer/${{ env.image }}:${{ matrix.distribution }}-${{ matrix.model }} image
|
||||
run: |
|
||||
docker run \
|
||||
-v $(pwd):/runtime \
|
||||
deezer/spleeter:${{ env.tag }} \
|
||||
separate -i /runtime/audio_example.mp3 -o /tmp \${{ env.modelarg }}
|
||||
# ----------------------------------------------------------------------
|
||||
# Note: image deploy.
|
||||
deezer/${{ env.image }}:${{ matrix.distribution }} \
|
||||
separate -o /tmp -p spleeter:${{ matrix.model }} /runtime/audio_example.mp3
|
||||
- name: Docker login
|
||||
run: echo ${{ secrets.DOCKERHUB_PASSWORD }} | docker login -u ${{ secrets.DOCKERHUB_USERNAME }} --password-stdin
|
||||
- name: Push deezer/spleeter:${{ env.tag }} image
|
||||
run: docker push deezer/spleeter:${{ env.tag }}
|
||||
- if: ${{ env.tag == 'spleeter:3.8' }}
|
||||
name: Push deezer/spleeter:latest image
|
||||
run: |
|
||||
docker tag deezer/spleeter:3.8 deezer/spleeter:latest
|
||||
docker push deezer/spleeter:latest
|
||||
- if: ${{ env.tag == 'spleeter:3.8-gpu' }}
|
||||
name: Push deezer/spleeter:gpu image
|
||||
run: |
|
||||
docker tag deezer/spleeter:3.8-gpu deezer/spleeter:gpu
|
||||
docker push deezer/spleeter:gpu
|
||||
- name: Push deezer/${{ env.image }}:${{ matrix.distribution }}-${{ matrix.model }} image
|
||||
run: docker push deezer/${{ env.image }}:${{ matrix.distribution }}-${{ matrix.model }}
|
||||
|
||||
6
.github/workflows/pypi.yml
vendored
6
.github/workflows/pypi.yml
vendored
@@ -1,8 +1,6 @@
|
||||
name: pypi
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
- workflow_dispatch
|
||||
env:
|
||||
PYPI_TOKEN: ${{ secrets.PYPI_TOKEN }}
|
||||
jobs:
|
||||
@@ -22,4 +20,4 @@ jobs:
|
||||
- name: Deploy to pypi
|
||||
run: |
|
||||
poetry build
|
||||
poetry publish
|
||||
poetry publish
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
<img src="https://github.com/deezer/spleeter/raw/master/images/spleeter_logo.png" height="80" />
|
||||
|
||||
[](https://github.com/deezer/spleeter/actions)  [](https://badge.fury.io/py/spleeter) [](https://anaconda.org/conda-forge/spleeter) [](https://hub.docker.com/r/researchdeezer/spleeter) [](https://colab.research.google.com/github/deezer/spleeter/blob/master/spleeter.ipynb) [](https://gitter.im/spleeter/community) [](https://joss.theoj.org/papers/259e5efe669945a343bad6eccb89018b)
|
||||
[](https://github.com/deezer/spleeter/actions)  [](https://badge.fury.io/py/spleeter) [](https://anaconda.org/deezer-research/spleeter) [](https://hub.docker.com/deezer/spleeter) [](https://colab.research.google.com/github/deezer/spleeter/blob/master/spleeter.ipynb) [](https://gitter.im/spleeter/community) [](https://joss.theoj.org/papers/259e5efe669945a343bad6eccb89018b)
|
||||
|
||||
> :warning: [Spleeter 2.1.0](https://pypi.org/project/spleeter/) release introduces some breaking changes, including new CLI option naming for input, and the drop
|
||||
> of dedicated GPU package. Please read [CHANGELOG](CHANGELOG.md) for more details.
|
||||
@@ -45,16 +45,18 @@ Ready to dig into it ? In a few lines you can install **Spleeter** using [Conda]
|
||||
|
||||
```bash
|
||||
# install using conda
|
||||
conda install -c conda-forge spleeter
|
||||
conda config --add channels conda-forge # only needed if you don't already have this channel set
|
||||
conda install -c deezer-research spleeter
|
||||
# download an example audio file (if you don't have wget, use another tool for downloading)
|
||||
wget https://github.com/deezer/spleeter/raw/master/audio_example.mp3
|
||||
# separate the example audio into two components
|
||||
spleeter separate -p spleeter:2stems -o output audio_example.mp3
|
||||
```
|
||||
> :warning: for Mac Users, this will work but will install an old version of spleeter. To get the latest version, you need to install **Spleeter** using `pip`. Check the [wiki](https://github.com/deezer/spleeter/wiki/1.-Installation) for details.
|
||||
|
||||
You should get two separated audio files (`vocals.wav` and `accompaniment.wav`) in the `output/audio_example` folder.
|
||||
|
||||
For a detailed documentation, please check the [repository wiki](https://github.com/deezer/spleeter/wiki)
|
||||
For a detailed documentation, please check the [repository wiki](https://github.com/deezer/spleeter/wiki/1.-Installation)
|
||||
|
||||
## Development and Testing
|
||||
|
||||
|
||||
@@ -25,6 +25,7 @@ requirements:
|
||||
- python {{ python }}
|
||||
- tensorflow ==2.3.0 # [linux]
|
||||
- tensorflow ==2.3.0 # [win]
|
||||
- numpy <1.20.0
|
||||
- pandas
|
||||
- ffmpeg-python
|
||||
- norbert
|
||||
|
||||
@@ -2,12 +2,12 @@ ARG BASE=conda
|
||||
|
||||
FROM ${BASE}
|
||||
|
||||
ARG SPLEETER_PACKAGE=spleeter
|
||||
ARG SPLEETER_VERSION=1.5.3
|
||||
ENV MODEL_PATH /model
|
||||
|
||||
RUN mkdir -p /model
|
||||
RUN conda config --add channels conda-forge
|
||||
RUN conda install -y -c conda-forge musdb
|
||||
RUN conda install -y -c conda-forge ${SPLEETER_PACKAGE}==${SPLEETER_VERSION}
|
||||
RUN conda install -y -c deezer-research spleeter
|
||||
COPY docker/conda-entrypoint.sh spleeter-entrypoint.sh
|
||||
ENTRYPOINT ["/bin/bash", "spleeter-entrypoint.sh"]
|
||||
@@ -2,13 +2,12 @@ ARG BASE=python:3.6
|
||||
|
||||
FROM ${BASE}
|
||||
|
||||
ARG SPLEETER_PACKAGE=spleeter
|
||||
ARG SPLEETER_VERSION=1.5.3
|
||||
ENV MODEL_PATH /model
|
||||
|
||||
RUN mkdir -p /model
|
||||
RUN apt-get update && apt-get install -y ffmpeg libsndfile1
|
||||
RUN pip install musdb museval
|
||||
RUN pip install ${SPLEETER_PACKAGE}==${SPLEETER_VERSION}
|
||||
RUN pip install spleeter==${SPLEETER_VERSION}
|
||||
|
||||
ENTRYPOINT ["spleeter"]
|
||||
|
||||
@@ -241,7 +241,8 @@ class InstrumentDatasetBuilder(object):
|
||||
def filter_shape(self, sample):
|
||||
""" Filter badly shaped sample. """
|
||||
return check_tensor_shape(
|
||||
sample[self._spectrogram_key], (self._parent._T, self._parent._F, 2)
|
||||
sample[self._spectrogram_key],
|
||||
(self._parent._T, self._parent._F, self._parent._n_channels),
|
||||
)
|
||||
|
||||
def reshape_spectrogram(self, sample):
|
||||
@@ -250,7 +251,8 @@ class InstrumentDatasetBuilder(object):
|
||||
sample,
|
||||
**{
|
||||
self._spectrogram_key: set_tensor_shape(
|
||||
sample[self._spectrogram_key], (self._parent._T, self._parent._F, 2)
|
||||
sample[self._spectrogram_key],
|
||||
(self._parent._T, self._parent._F, self._parent._n_channels),
|
||||
)
|
||||
},
|
||||
)
|
||||
@@ -299,6 +301,7 @@ class DatasetBuilder(object):
|
||||
self._frame_length = audio_params["frame_length"]
|
||||
self._frame_step = audio_params["frame_step"]
|
||||
self._mix_name = audio_params["mix_name"]
|
||||
self._n_channels = audio_params["n_channels"]
|
||||
self._instruments = [self._mix_name] + audio_params["instrument_list"]
|
||||
self._instrument_builders = None
|
||||
self._chunk_duration = chunk_duration
|
||||
@@ -307,6 +310,21 @@ class DatasetBuilder(object):
|
||||
self._audio_path = audio_path
|
||||
self._random_seed = random_seed
|
||||
|
||||
self.check_parameters_compatibility()
|
||||
|
||||
def check_parameters_compatibility(self):
|
||||
if self._frame_length / 2 + 1 < self._F:
|
||||
raise ValueError(
|
||||
"F is too large and must be set to at most frame_length/2+1. Decrease F or increase frame_length to fix."
|
||||
)
|
||||
|
||||
if (
|
||||
self._chunk_duration * self._sample_rate - self._frame_length
|
||||
) / self._frame_step < self._T:
|
||||
raise ValueError(
|
||||
"T is too large considering STFT parameters and chunk duratoin. Make sure spectrogram time dimension of chunks is larger than T (for instance reducing T or frame_step or increasing chunk duration)."
|
||||
)
|
||||
|
||||
def expand_path(self, sample):
|
||||
""" Expands audio paths for the given sample. """
|
||||
return dict(
|
||||
@@ -368,7 +386,7 @@ class DatasetBuilder(object):
|
||||
},
|
||||
lambda x: tf.image.random_crop(
|
||||
x,
|
||||
(self._T, len(self._instruments) * self._F, 2),
|
||||
(self._T, len(self._instruments) * self._F, self._n_channels),
|
||||
seed=self._random_seed,
|
||||
),
|
||||
),
|
||||
|
||||
@@ -307,7 +307,7 @@ class Separator(object):
|
||||
return prediction
|
||||
|
||||
def separate(
|
||||
self, waveform: np.ndarray, audio_descriptor: Optional[str] = None
|
||||
self, waveform: np.ndarray, audio_descriptor: Optional[str] = ""
|
||||
) -> None:
|
||||
"""
|
||||
Performs separation on a waveform.
|
||||
|
||||
@@ -52,17 +52,19 @@ TRAIN_CONFIG = {
|
||||
}
|
||||
|
||||
|
||||
def generate_fake_training_dataset(path, instrument_list=['vocals', 'other']):
|
||||
def generate_fake_training_dataset(path,
|
||||
instrument_list=['vocals', 'other'],
|
||||
n_channels=2,
|
||||
n_songs = 2,
|
||||
fs = 44100,
|
||||
duration = 6,
|
||||
):
|
||||
"""
|
||||
generates a fake training dataset in path:
|
||||
- generates audio files
|
||||
- generates a csv file describing the dataset
|
||||
"""
|
||||
aa = AudioAdapter.default()
|
||||
n_songs = 2
|
||||
fs = 44100
|
||||
duration = 6
|
||||
n_channels = 2
|
||||
rng = np.random.RandomState(seed=0)
|
||||
dataset_df = pd.DataFrame(
|
||||
columns=['mix_path'] + [
|
||||
@@ -83,26 +85,40 @@ def generate_fake_training_dataset(path, instrument_list=['vocals', 'other']):
|
||||
|
||||
|
||||
def test_train():
|
||||
|
||||
with TemporaryDirectory() as path:
|
||||
# generate training dataset
|
||||
generate_fake_training_dataset(path)
|
||||
# set training command aruments
|
||||
runner = CliRunner()
|
||||
TRAIN_CONFIG['train_csv'] = join(path, 'train', 'train.csv')
|
||||
TRAIN_CONFIG['validation_csv'] = join(path, 'train', 'train.csv')
|
||||
TRAIN_CONFIG['model_dir'] = join(path, 'model')
|
||||
TRAIN_CONFIG['training_cache'] = join(path, 'cache', 'training')
|
||||
TRAIN_CONFIG['validation_cache'] = join(path, 'cache', 'validation')
|
||||
with open('useless_config.json', 'w') as stream:
|
||||
json.dump(TRAIN_CONFIG, stream)
|
||||
# execute training
|
||||
result = runner.invoke(spleeter, [
|
||||
'train',
|
||||
'-p', 'useless_config.json',
|
||||
'-d', path
|
||||
])
|
||||
# assert that model checkpoint was created.
|
||||
assert os.path.exists(join(path, 'model', 'model.ckpt-10.index'))
|
||||
assert os.path.exists(join(path, 'model', 'checkpoint'))
|
||||
assert os.path.exists(join(path, 'model', 'model.ckpt-0.meta'))
|
||||
assert result.exit_code == 0
|
||||
for n_channels in [1,2]:
|
||||
TRAIN_CONFIG["n_channels"] = n_channels
|
||||
generate_fake_training_dataset(path,
|
||||
n_channels=n_channels,
|
||||
fs=TRAIN_CONFIG["sample_rate"]
|
||||
)
|
||||
# set training command arguments
|
||||
runner = CliRunner()
|
||||
|
||||
model_dir = join(path, f'model_{n_channels}')
|
||||
train_dir = join(path, f'train')
|
||||
cache_dir = join(path, f'cache_{n_channels}')
|
||||
|
||||
TRAIN_CONFIG['train_csv'] = join(train_dir, 'train.csv')
|
||||
TRAIN_CONFIG['validation_csv'] = join(train_dir, 'train.csv')
|
||||
TRAIN_CONFIG['model_dir'] = model_dir
|
||||
TRAIN_CONFIG['training_cache'] = join(cache_dir, 'training')
|
||||
TRAIN_CONFIG['validation_cache'] = join(cache_dir, 'validation')
|
||||
with open('useless_config.json', 'w') as stream:
|
||||
json.dump(TRAIN_CONFIG, stream)
|
||||
|
||||
# execute training
|
||||
result = runner.invoke(spleeter, [
|
||||
'train',
|
||||
'-p', 'useless_config.json',
|
||||
'-d', path,
|
||||
"--verbose"
|
||||
])
|
||||
|
||||
# assert that model checkpoint was created.
|
||||
assert os.path.exists(join(model_dir, 'model.ckpt-10.index'))
|
||||
assert os.path.exists(join(model_dir, 'checkpoint'))
|
||||
assert os.path.exists(join(model_dir, 'model.ckpt-0.meta'))
|
||||
assert result.exit_code == 0
|
||||
|
||||
Reference in New Issue
Block a user