diff --git a/.github/workflows/conda.yml b/.github/workflows/conda.yml index e13d9b3..6e97522 100644 --- a/.github/workflows/conda.yml +++ b/.github/workflows/conda.yml @@ -1,8 +1,6 @@ name: conda on: - push: - branches: - - master + - workflow_dispatch env: ANACONDA_USERNAME: ${{ secrets.ANACONDA_USERNAME }} ANACONDA_PASSWORD: ${{ secrets.ANACONDA_PASSWORD }} @@ -11,7 +9,7 @@ jobs: strategy: matrix: python: [3.7, 3.8] - package: [spleeter] + package: [spleeter, spleeter-gpu] runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 diff --git a/.github/workflows/pypi.yml b/.github/workflows/pypi.yml index e2cd0b2..d87f573 100644 --- a/.github/workflows/pypi.yml +++ b/.github/workflows/pypi.yml @@ -4,40 +4,22 @@ on: branches: - master env: - TWINE_USERNAME: ${{ secrets.TWINE_USERNAME }} - TWINE_PASSWORD: ${{ secrets.TWINE_PASSWORD }} + PYPI_TOKEN: ${{ secrets.PYPI_TOKEN }} jobs: package-and-deploy: - strategy: - matrix: - platform: [cpu, gpu] runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 - uses: actions/setup-python@v2 with: python-version: 3.7 - - uses: actions/cache@v2 - with: - path: ~/.cache/pip - key: ${{ runner.os }}-pip-${{ hashFiles('**/setup.py') }} - restore-keys: | - ${{ runner.os }}-pip- - - uses: actions/cache@v2 - with: - path: ${{ env.GITHUB_WORKSPACE }}/dist - key: sdist-${{ matrix.platform }}-${{ hashFiles('**/setup.py') }} - restore-keys: | - sdist-${{ matrix.platform }}-${{ hashFiles('**/setup.py') }} - sdist-${{ matrix.platform }} - sdist- - - name: Install dependencies - run: pip install --upgrade pip setuptools twine - - if: ${{ matrix.platform == 'cpu' }} - name: Package CPU distribution - run: make build - - if: ${{ matrix.platform == 'gpu' }} - name: Package GPU distribution) - run: make build-gpu + - name: Install Poetry + run: | + pip install poetry + poetry config virtualenvs.in-project false + poetry config virtualenvs.path ~/.virtualenvs + poetry config pypi-token.pypi $PYPI_TOKEN - name: Deploy to pypi - run: make deploy \ No newline at end of file + run: | + poetry build + poetry publish \ No newline at end of file diff --git a/.github/workflows/pytest.yml b/.github/workflows/pytest.yml deleted file mode 100644 index 36ed370..0000000 --- a/.github/workflows/pytest.yml +++ /dev/null @@ -1,41 +0,0 @@ -name: pytest -on: - pull_request: - branches: - - master -jobs: - tests: - runs-on: ubuntu-latest - strategy: - matrix: - python-version: [3.6, 3.7, 3.8] - steps: - - uses: actions/checkout@v2 - - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v2 - with: - python-version: ${{ matrix.python-version }} - - uses: actions/cache@v2 - id: spleeter-pip-cache - with: - path: ~/.cache/pip - key: ${{ runner.os }}-pip-${{ hashFiles('**/setup.py') }} - restore-keys: | - ${{ runner.os }}-pip- - - uses: actions/cache@v2 - env: - model-release: 1 - id: spleeter-model-cache - with: - path: ${{ env.GITHUB_WORKSPACE }}/pretrained_models - key: models-${{ env.model-release }} - restore-keys: | - models-${{ env.model-release }} - - name: Install dependencies - run: | - sudo apt-get update && sudo apt-get install -y ffmpeg - pip install --upgrade pip setuptools - pip install pytest==5.4.3 pytest-xdist==1.32.0 pytest-forked==1.1.3 musdb museval - python setup.py install - - name: Test with pytest - run: make test \ No newline at end of file diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml new file mode 100644 index 0000000..13ca958 --- /dev/null +++ b/.github/workflows/test.yml @@ -0,0 +1,51 @@ +name: test +on: + pull_request: + branches: + - master +jobs: + tests: + runs-on: ubuntu-latest + strategy: + matrix: + python-version: [3.7, 3.8] + steps: + - uses: actions/checkout@v2 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python-version }} + - uses: actions/cache@v2 + env: + model-release: 1 + id: spleeter-model-cache + with: + path: ${{ env.GITHUB_WORKSPACE }}/pretrained_models + key: models-${{ env.model-release }} + restore-keys: | + models-${{ env.model-release }} + - name: Install ffmpeg + run: | + sudo apt-get update && sudo apt-get install -y ffmpeg + - name: Install Poetry + run: | + pip install poetry + poetry config virtualenvs.in-project false + poetry config virtualenvs.path ~/.virtualenvs + - name: Cache Poetry virtualenv + uses: actions/cache@v1 + id: cache + with: + path: ~/.virtualenvs + key: poetry-${{ matrix.python-version }}-${{ hashFiles('**/poetry.lock') }} + restore-keys: | + poetry-${{ matrix.python-version }}-${{ hashFiles('**/poetry.lock') }} + - name: Install Dependencies + run: poetry install + if: steps.cache.outputs.cache-hit != 'true' + - name: Code quality checks + run: | + poetry run black spleeter --check + poetry run isort spleeter --check + - name: Test with pytest + run: poetry run pytest tests/ \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index ee851ca..6142016 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,27 @@ # Changelog History +## 2.1.0 + +This version introduce design related changes, especially transition to Typer for CLI managment and Poetry as +library build backend. + +* `-i` option is now deprecated and replaced by traditional CLI input argument listing +* Project is now built using Poetry +* Project requires code formatting using Black and iSort +* Dedicated GPU package `spleeter-gpu` is not supported anymore, `spleeter` package will support both CPU and GPU hardware + +### API changes: + +* function `get_default_audio_adapter` is now available as `default()` class method within `AudioAdapter` class +* function `get_default_model_provider` is now available as `default()` class method within `ModelProvider` class +* `STFTBackend` and `Codec` are now string enum +* `GithubModelProvider` now use `httpx` with HTTP/2 support +* Commands are now located in `__main__` module, wrapped as simple function using Typer options module provide specification for each available option and argument +* `types` module provide custom type specification and must be enhanced in future release to provide more robust typing support with MyPy +* `utils.logging` module has been cleaned, logger instance is now a module singleton, and a single function is used to configure it with verbose parameter +* Added a custom logger handler (see tiangolo/typer#203 discussion) + + ## 2.0 First release, October 9th 2020 diff --git a/MANIFEST.in b/MANIFEST.in deleted file mode 100644 index 900e35d..0000000 --- a/MANIFEST.in +++ /dev/null @@ -1,3 +0,0 @@ -include spleeter/resources/*.json -include README.md -include LICENSE \ No newline at end of file diff --git a/Makefile b/Makefile deleted file mode 100644 index d667361..0000000 --- a/Makefile +++ /dev/null @@ -1,34 +0,0 @@ -# ======================================================= -# Library lifecycle management. -# -# @author Deezer Research -# @licence MIT Licence -# ======================================================= - -FEEDSTOCK = spleeter-feedstock -FEEDSTOCK_REPOSITORY = https://github.com/deezer/$(FEEDSTOCK) -FEEDSTOCK_RECIPE = $(FEEDSTOCK)/recipe/spleeter/meta.yaml -PYTEST_CMD = pytest -W ignore::FutureWarning -W ignore::DeprecationWarning -vv --forked - -all: clean build test deploy - -clean: - rm -Rf *.egg-info - rm -Rf dist - -build: clean - sed -i "s/project_name = '[^']*'/project_name = 'spleeter'/g" setup.py - sed -i "s/tensorflow_dependency = '[^']*'/tensorflow_dependency = 'tensorflow'/g" setup.py - python3 setup.py sdist - -build-gpu: clean - sed -i "s/project_name = '[^']*'/project_name = 'spleeter-gpu'/g" setup.py - sed -i "s/tensorflow_dependency = '[^']*'/tensorflow_dependency = 'tensorflow-gpu'/g" setup.py - python3 setup.py sdist - -test: - $(PYTEST_CMD) tests/ - -deploy: - pip install twine - twine upload --skip-existing dist/* diff --git a/README.md b/README.md index db6915e..c77362d 100644 --- a/README.md +++ b/README.md @@ -2,6 +2,9 @@ [![Github actions](https://github.com/deezer/spleeter/workflows/pytest/badge.svg)](https://github.com/deezer/spleeter/actions) ![PyPI - Python Version](https://img.shields.io/pypi/pyversions/spleeter) [![PyPI version](https://badge.fury.io/py/spleeter.svg)](https://badge.fury.io/py/spleeter) [![Conda](https://img.shields.io/conda/vn/conda-forge/spleeter)](https://anaconda.org/conda-forge/spleeter) [![Docker Pulls](https://img.shields.io/docker/pulls/researchdeezer/spleeter)](https://hub.docker.com/r/researchdeezer/spleeter) [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/deezer/spleeter/blob/master/spleeter.ipynb) [![Gitter chat](https://badges.gitter.im/gitterHQ/gitter.png)](https://gitter.im/spleeter/community) [![status](https://joss.theoj.org/papers/259e5efe669945a343bad6eccb89018b/status.svg)](https://joss.theoj.org/papers/259e5efe669945a343bad6eccb89018b) +> :warning: [Spleeter 2.1.0](https://pypi.org/project/spleeter/) release introduces some breaking changes, including new CLI option naming for input, and the drop +> of dedicated GPU package. Please read [CHANGELOG](CHANGELOG.md) for more details. + ## About **Spleeter** is [Deezer](https://www.deezer.com/) source separation library with pretrained models @@ -46,7 +49,7 @@ conda install -c conda-forge spleeter # download an example audio file (if you don't have wget, use another tool for downloading) wget https://github.com/deezer/spleeter/raw/master/audio_example.mp3 # separate the example audio into two components -spleeter separate -i audio_example.mp3 -p spleeter:2stems -o output +spleeter separate -p spleeter:2stems -o output audio_example.mp3 ``` You should get two separated audio files (`vocals.wav` and `accompaniment.wav`) in the `output/audio_example` folder. @@ -55,13 +58,18 @@ For a detailed documentation, please check the [repository wiki](https://github. ## Development and Testing -The following set of commands will clone this repository, create a virtual environment provisioned with the dependencies and run the tests (will take a few minutes): +This project is managed using [Poetry](https://python-poetry.org/docs/basic-usage/), to run test suite you +can execute the following set of commands: ```bash +# Clone spleeter repository git clone https://github.com/Deezer/spleeter && cd spleeter -python -m venv spleeterenv && source spleeterenv/bin/activate -pip install . && pip install pytest pytest-xdist -make test +# Install poetry +pip install poetry +# Install spleeter dependencies +poetry install +# Run unit test suite +poetry run pytest tests/ ``` ## Reference diff --git a/conda/spleeter-gpu/meta.yaml b/conda/spleeter-gpu/meta.yaml new file mode 100644 index 0000000..4740dd3 --- /dev/null +++ b/conda/spleeter-gpu/meta.yaml @@ -0,0 +1,52 @@ +{% set name = "spleeter-gpu" %} +{% set version = "2.0.2" %} + +package: + name: {{ name|lower }} + version: {{ version }} + +source: + - url: https://pypi.io/packages/source/{{ name[0] }}/{{ name }}/{{ name }}-{{ version }}.tar.gz + sha256: ecd3518a98f9978b9088d1cb2ef98f766401fd9007c2bf72a34e5b5bc5a6fdc3 + +build: + number: 0 + script: {{ PYTHON }} -m pip install . -vv + skip: True # [osx] + entry_points: + - spleeter = spleeter.__main__:entrypoint + +requirements: + host: + - python {{ python }} + - pip + run: + - python {{ python }} + - tensorflow-gpu ==2.2.0 # [linux] + - tensorflow-gpu ==23.0 # [win] + - pandas + - ffmpeg-python + - norbert + - librosa + +test: + imports: + - spleeter + - spleeter.commands + - spleeter.model + - spleeter.utils + - spleeter.separator + +about: + home: https://github.com/deezer/spleeter + license: MIT + license_family: MIT + license_file: LICENSE + summary: The Deezer source separation library with pretrained models based on tensorflow. + doc_url: https://github.com/deezer/spleeter/wiki + dev_url: https://github.com/deezer/spleeter + +extra: + recipe-maintainers: + - Faylixe + - romi1502 \ No newline at end of file diff --git a/conda/spleeter/conda_build_config.yaml b/conda/spleeter/conda_build_config.yaml deleted file mode 100644 index b441c3a..0000000 --- a/conda/spleeter/conda_build_config.yaml +++ /dev/null @@ -1,3 +0,0 @@ -python: - - 3.7 - - 3.8 \ No newline at end of file diff --git a/poetry.lock b/poetry.lock new file mode 100644 index 0000000..4765376 --- /dev/null +++ b/poetry.lock @@ -0,0 +1,1880 @@ +[[package]] +name = "absl-py" +version = "0.11.0" +description = "Abseil Python Common Libraries, see https://github.com/abseil/abseil-py." +category = "main" +optional = false +python-versions = "*" + +[package.dependencies] +six = "*" + +[[package]] +name = "appdirs" +version = "1.4.4" +description = "A small Python module for determining appropriate platform-specific dirs, e.g. a \"user data dir\"." +category = "main" +optional = false +python-versions = "*" + +[[package]] +name = "astunparse" +version = "1.6.3" +description = "An AST unparser for Python" +category = "main" +optional = false +python-versions = "*" + +[package.dependencies] +six = ">=1.6.1,<2.0" + +[[package]] +name = "atomicwrites" +version = "1.4.0" +description = "Atomic file writes." +category = "dev" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" + +[[package]] +name = "attrs" +version = "20.3.0" +description = "Classes Without Boilerplate" +category = "main" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" + +[package.extras] +dev = ["coverage[toml] (>=5.0.2)", "hypothesis", "pympler", "pytest (>=4.3.0)", "six", "zope.interface", "furo", "sphinx", "pre-commit"] +docs = ["furo", "sphinx", "zope.interface"] +tests = ["coverage[toml] (>=5.0.2)", "hypothesis", "pympler", "pytest (>=4.3.0)", "six", "zope.interface"] +tests_no_zope = ["coverage[toml] (>=5.0.2)", "hypothesis", "pympler", "pytest (>=4.3.0)", "six"] + +[[package]] +name = "audioread" +version = "2.1.9" +description = "multi-library, cross-platform audio decoding" +category = "main" +optional = false +python-versions = "*" + +[[package]] +name = "black" +version = "20.8b1" +description = "The uncompromising code formatter." +category = "dev" +optional = false +python-versions = ">=3.6" + +[package.dependencies] +appdirs = "*" +click = ">=7.1.2" +mypy-extensions = ">=0.4.3" +pathspec = ">=0.6,<1" +regex = ">=2020.1.8" +toml = ">=0.10.1" +typed-ast = ">=1.4.0" +typing-extensions = ">=3.7.4" + +[package.extras] +colorama = ["colorama (>=0.4.3)"] +d = ["aiohttp (>=3.3.2)", "aiohttp-cors"] + +[[package]] +name = "cachetools" +version = "4.2.0" +description = "Extensible memoizing collections and decorators" +category = "main" +optional = false +python-versions = "~=3.5" + +[[package]] +name = "certifi" +version = "2020.12.5" +description = "Python package for providing Mozilla's CA Bundle." +category = "main" +optional = false +python-versions = "*" + +[[package]] +name = "cffi" +version = "1.14.4" +description = "Foreign Function Interface for Python calling C code." +category = "main" +optional = false +python-versions = "*" + +[package.dependencies] +pycparser = "*" + +[[package]] +name = "chardet" +version = "4.0.0" +description = "Universal encoding detector for Python 2 and 3" +category = "main" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" + +[[package]] +name = "click" +version = "7.1.2" +description = "Composable command line interface toolkit" +category = "main" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" + +[[package]] +name = "colorama" +version = "0.4.4" +description = "Cross-platform colored terminal text." +category = "dev" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" + +[[package]] +name = "decorator" +version = "4.4.2" +description = "Decorators for Humans" +category = "main" +optional = false +python-versions = ">=2.6, !=3.0.*, !=3.1.*" + +[[package]] +name = "ffmpeg-python" +version = "0.2.0" +description = "Python bindings for FFmpeg - with complex filtering support" +category = "main" +optional = false +python-versions = "*" + +[package.dependencies] +future = "*" + +[package.extras] +dev = ["future (==0.17.1)", "numpy (==1.16.4)", "pytest-mock (==1.10.4)", "pytest (==4.6.1)", "Sphinx (==2.1.0)", "tox (==3.12.1)"] + +[[package]] +name = "future" +version = "0.18.2" +description = "Clean single-source support for Python 3 and 2" +category = "main" +optional = false +python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*" + +[[package]] +name = "gast" +version = "0.3.3" +description = "Python AST that abstracts the underlying Python version" +category = "main" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" + +[[package]] +name = "google-auth" +version = "1.24.0" +description = "Google Authentication Library" +category = "main" +optional = false +python-versions = ">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*" + +[package.dependencies] +cachetools = ">=2.0.0,<5.0" +pyasn1-modules = ">=0.2.1" +rsa = {version = ">=3.1.4,<5", markers = "python_version >= \"3.6\""} +six = ">=1.9.0" + +[package.extras] +aiohttp = ["aiohttp (>=3.6.2,<4.0.0dev)"] + +[[package]] +name = "google-auth-oauthlib" +version = "0.4.2" +description = "Google Authentication Library" +category = "main" +optional = false +python-versions = ">=3.6" + +[package.dependencies] +google-auth = "*" +requests-oauthlib = ">=0.7.0" + +[package.extras] +tool = ["click"] + +[[package]] +name = "google-pasta" +version = "0.2.0" +description = "pasta is an AST-based Python refactoring library" +category = "main" +optional = false +python-versions = "*" + +[package.dependencies] +six = "*" + +[[package]] +name = "grpcio" +version = "1.34.0" +description = "HTTP/2-based RPC framework" +category = "main" +optional = false +python-versions = "*" + +[package.dependencies] +six = ">=1.5.2" + +[package.extras] +protobuf = ["grpcio-tools (>=1.34.0)"] + +[[package]] +name = "h11" +version = "0.12.0" +description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1" +category = "main" +optional = false +python-versions = ">=3.6" + +[[package]] +name = "h2" +version = "3.2.0" +description = "HTTP/2 State-Machine based protocol implementation" +category = "main" +optional = false +python-versions = "*" + +[package.dependencies] +hpack = ">=3.0,<4" +hyperframe = ">=5.2.0,<6" + +[[package]] +name = "h5py" +version = "2.10.0" +description = "Read and write HDF5 files from Python" +category = "main" +optional = false +python-versions = "*" + +[package.dependencies] +numpy = ">=1.7" +six = "*" + +[[package]] +name = "hpack" +version = "3.0.0" +description = "Pure-Python HPACK header compression" +category = "main" +optional = false +python-versions = "*" + +[[package]] +name = "httpcore" +version = "0.12.2" +description = "A minimal low-level HTTP client." +category = "main" +optional = false +python-versions = ">=3.6" + +[package.dependencies] +h11 = "<1.0.0" +sniffio = ">=1.0.0,<2.0.0" + +[package.extras] +http2 = ["h2 (>=3,<5)"] + +[[package]] +name = "httpx" +version = "0.16.1" +description = "The next generation HTTP client." +category = "main" +optional = false +python-versions = ">=3.6" + +[package.dependencies] +certifi = "*" +h2 = {version = ">=3.0.0,<4.0.0", optional = true, markers = "extra == \"http2\""} +httpcore = ">=0.12.0,<0.13.0" +rfc3986 = {version = ">=1.3,<2", extras = ["idna2008"]} +sniffio = "*" + +[package.extras] +brotli = ["brotlipy (>=0.7.0,<0.8.0)"] +http2 = ["h2 (>=3.0.0,<4.0.0)"] + +[[package]] +name = "hyperframe" +version = "5.2.0" +description = "HTTP/2 framing layer for Python" +category = "main" +optional = false +python-versions = "*" + +[[package]] +name = "idna" +version = "2.10" +description = "Internationalized Domain Names in Applications (IDNA)" +category = "main" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" + +[[package]] +name = "importlib-metadata" +version = "3.3.0" +description = "Read metadata from Python packages" +category = "main" +optional = false +python-versions = ">=3.6" + +[package.dependencies] +typing-extensions = {version = ">=3.6.4", markers = "python_version < \"3.8\""} +zipp = ">=0.5" + +[package.extras] +docs = ["sphinx", "jaraco.packaging (>=3.2)", "rst.linker (>=1.9)"] +testing = ["pytest (>=3.5,!=3.7.3)", "pytest-checkdocs (>=1.2.3)", "pytest-flake8", "pytest-cov", "jaraco.test (>=3.2.0)", "packaging", "pep517", "pyfakefs", "flufl.flake8", "pytest-black (>=0.3.7)", "pytest-mypy", "importlib-resources (>=1.3)"] + +[[package]] +name = "iniconfig" +version = "1.1.1" +description = "iniconfig: brain-dead simple config-ini parsing" +category = "dev" +optional = false +python-versions = "*" + +[[package]] +name = "isort" +version = "5.7.0" +description = "A Python utility / library to sort Python imports." +category = "dev" +optional = false +python-versions = ">=3.6,<4.0" + +[package.extras] +pipfile_deprecated_finder = ["pipreqs", "requirementslib"] +requirements_deprecated_finder = ["pipreqs", "pip-api"] +colors = ["colorama (>=0.4.3,<0.5.0)"] + +[[package]] +name = "joblib" +version = "1.0.0" +description = "Lightweight pipelining with Python functions" +category = "main" +optional = false +python-versions = ">=3.6" + +[[package]] +name = "jsonschema" +version = "3.2.0" +description = "An implementation of JSON Schema validation for Python" +category = "main" +optional = false +python-versions = "*" + +[package.dependencies] +attrs = ">=17.4.0" +importlib-metadata = {version = "*", markers = "python_version < \"3.8\""} +pyrsistent = ">=0.14.0" +six = ">=1.11.0" + +[package.extras] +format = ["idna", "jsonpointer (>1.13)", "rfc3987", "strict-rfc3339", "webcolors"] +format_nongpl = ["idna", "jsonpointer (>1.13)", "webcolors", "rfc3986-validator (>0.1.0)", "rfc3339-validator"] + +[[package]] +name = "keras-preprocessing" +version = "1.1.2" +description = "Easy data preprocessing and data augmentation for deep learning models" +category = "main" +optional = false +python-versions = "*" + +[package.dependencies] +numpy = ">=1.9.1" +six = ">=1.9.0" + +[package.extras] +image = ["scipy (>=0.14)", "Pillow (>=5.2.0)"] +pep8 = ["flake8"] +tests = ["pandas", "pillow", "tensorflow", "keras", "pytest", "pytest-xdist", "pytest-cov"] + +[[package]] +name = "librosa" +version = "0.8.0" +description = "Python module for audio and music processing" +category = "main" +optional = false +python-versions = ">=3.6" + +[package.dependencies] +audioread = ">=2.0.0" +decorator = ">=3.0.0" +joblib = ">=0.14" +numba = ">=0.43.0" +numpy = ">=1.15.0" +pooch = ">=1.0" +resampy = ">=0.2.2" +scikit-learn = ">=0.14.0,<0.19.0 || >0.19.0" +scipy = ">=1.0.0" +soundfile = ">=0.9.0" + +[package.extras] +display = ["matplotlib (>=1.5)"] +docs = ["numpydoc", "sphinx (!=1.3.1)", "sphinx_rtd_theme (>=0.5.0,<0.6.0)", "numba (<0.50)", "matplotlib (>=2.0.0,<3.3)", "sphinx-multiversion (==0.2.3)", "sphinx-gallery (>=0.7)", "sphinxcontrib-svg2pdfconverter", "presets"] +tests = ["matplotlib (>=2.1)", "pytest-mpl", "pytest-cov", "pytest", "contextlib2", "samplerate"] + +[[package]] +name = "llvmlite" +version = "0.35.0rc3" +description = "lightweight wrapper around basic LLVM functionality" +category = "main" +optional = false +python-versions = ">=3.6" + +[[package]] +name = "markdown" +version = "3.3.3" +description = "Python implementation of Markdown." +category = "main" +optional = false +python-versions = ">=3.6" + +[package.dependencies] +importlib-metadata = {version = "*", markers = "python_version < \"3.8\""} + +[package.extras] +testing = ["coverage", "pyyaml"] + +[[package]] +name = "musdb" +version = "0.3.1" +description = "Python parser for the SIGSEP MUSDB18 dataset" +category = "main" +optional = false +python-versions = "*" + +[package.dependencies] +numpy = ">=1.7" +pyaml = "*" +soundfile = ">=0.9.0" +stempeg = ">=0.1.7" +tqdm = "*" + +[package.extras] +dev = ["check-manifest"] +docs = ["sphinx", "sphinx-rtd-theme", "recommonmark"] +tests = ["pytest", "pytest-pep8"] + +[[package]] +name = "museval" +version = "0.3.0" +description = "Evaluation tools for the SIGSEP MUS database" +category = "main" +optional = false +python-versions = "*" + +[package.dependencies] +jsonschema = "*" +musdb = ">=0.3.0" +numpy = "*" +pandas = ">=0.25.0" +scipy = "*" +simplejson = "*" +soundfile = "*" + +[package.extras] +dev = ["check-manifest"] +docs = ["sphinx", "sphinx-rtd-theme", "recommonmark", "numpydoc"] +tests = ["pytest", "pytest-pep8"] + +[[package]] +name = "mypy" +version = "0.790" +description = "Optional static typing for Python" +category = "dev" +optional = false +python-versions = ">=3.5" + +[package.dependencies] +mypy-extensions = ">=0.4.3,<0.5.0" +typed-ast = ">=1.4.0,<1.5.0" +typing-extensions = ">=3.7.4" + +[package.extras] +dmypy = ["psutil (>=4.0)"] + +[[package]] +name = "mypy-extensions" +version = "0.4.3" +description = "Experimental type system extensions for programs checked with the mypy typechecker." +category = "dev" +optional = false +python-versions = "*" + +[[package]] +name = "norbert" +version = "0.2.1" +description = "Painless Wiener Filters" +category = "main" +optional = false +python-versions = "*" + +[package.dependencies] +scipy = "*" + +[package.extras] +dev = ["check-manifest"] +docs = ["sphinx", "sphinx-rtd-theme", "recommonmark", "numpydoc"] +tests = ["pytest", "pytest-pep8"] + +[[package]] +name = "numba" +version = "0.51.2" +description = "compiling Python code using LLVM" +category = "main" +optional = false +python-versions = ">=3.6" + +[package.dependencies] +llvmlite = ">=0.34.0.dev0,<0.35" +numpy = ">=1.15" + +[[package]] +name = "numpy" +version = "1.18.5" +description = "NumPy is the fundamental package for array computing with Python." +category = "main" +optional = false +python-versions = ">=3.5" + +[[package]] +name = "oauthlib" +version = "3.1.0" +description = "A generic, spec-compliant, thorough implementation of the OAuth request-signing logic" +category = "main" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" + +[package.extras] +rsa = ["cryptography"] +signals = ["blinker"] +signedtoken = ["cryptography", "pyjwt (>=1.0.0)"] + +[[package]] +name = "opt-einsum" +version = "3.3.0" +description = "Optimizing numpys einsum function" +category = "main" +optional = false +python-versions = ">=3.5" + +[package.dependencies] +numpy = ">=1.7" + +[package.extras] +docs = ["sphinx (==1.2.3)", "sphinxcontrib-napoleon", "sphinx-rtd-theme", "numpydoc"] +tests = ["pytest", "pytest-cov", "pytest-pep8"] + +[[package]] +name = "packaging" +version = "20.8" +description = "Core utilities for Python packages" +category = "main" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" + +[package.dependencies] +pyparsing = ">=2.0.2" + +[[package]] +name = "pandas" +version = "1.1.2" +description = "Powerful data structures for data analysis, time series, and statistics" +category = "main" +optional = false +python-versions = ">=3.6.1" + +[package.dependencies] +numpy = ">=1.15.4" +python-dateutil = ">=2.7.3" +pytz = ">=2017.2" + +[package.extras] +test = ["pytest (>=4.0.2)", "pytest-xdist", "hypothesis (>=3.58)"] + +[[package]] +name = "pathspec" +version = "0.8.1" +description = "Utility library for gitignore style pattern matching of file paths." +category = "dev" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" + +[[package]] +name = "pluggy" +version = "0.13.1" +description = "plugin and hook calling mechanisms for python" +category = "dev" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" + +[package.dependencies] +importlib-metadata = {version = ">=0.12", markers = "python_version < \"3.8\""} + +[package.extras] +dev = ["pre-commit", "tox"] + +[[package]] +name = "pooch" +version = "1.3.0" +description = "Pooch manages your Python library's sample data files: it automatically downloads and stores them in a local directory, with support for versioning and corruption checks." +category = "main" +optional = false +python-versions = ">=3.6" + +[package.dependencies] +appdirs = "*" +packaging = "*" +requests = "*" + +[[package]] +name = "protobuf" +version = "3.14.0" +description = "Protocol Buffers" +category = "main" +optional = false +python-versions = "*" + +[package.dependencies] +six = ">=1.9" + +[[package]] +name = "py" +version = "1.10.0" +description = "library with cross-python path, ini-parsing, io, code, log facilities" +category = "dev" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" + +[[package]] +name = "pyaml" +version = "20.4.0" +description = "PyYAML-based module to produce pretty and readable YAML-serialized data" +category = "main" +optional = false +python-versions = "*" + +[package.dependencies] +PyYAML = "*" + +[[package]] +name = "pyasn1" +version = "0.4.8" +description = "ASN.1 types and codecs" +category = "main" +optional = false +python-versions = "*" + +[[package]] +name = "pyasn1-modules" +version = "0.2.8" +description = "A collection of ASN.1-based protocols modules." +category = "main" +optional = false +python-versions = "*" + +[package.dependencies] +pyasn1 = ">=0.4.6,<0.5.0" + +[[package]] +name = "pycparser" +version = "2.20" +description = "C parser in Python" +category = "main" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" + +[[package]] +name = "pyparsing" +version = "2.4.7" +description = "Python parsing module" +category = "main" +optional = false +python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*" + +[[package]] +name = "pyrsistent" +version = "0.17.3" +description = "Persistent/Functional/Immutable data structures" +category = "main" +optional = false +python-versions = ">=3.5" + +[[package]] +name = "pytest" +version = "6.2.1" +description = "pytest: simple powerful testing with Python" +category = "dev" +optional = false +python-versions = ">=3.6" + +[package.dependencies] +atomicwrites = {version = ">=1.0", markers = "sys_platform == \"win32\""} +attrs = ">=19.2.0" +colorama = {version = "*", markers = "sys_platform == \"win32\""} +importlib-metadata = {version = ">=0.12", markers = "python_version < \"3.8\""} +iniconfig = "*" +packaging = "*" +pluggy = ">=0.12,<1.0.0a1" +py = ">=1.8.2" +toml = "*" + +[package.extras] +testing = ["argcomplete", "hypothesis (>=3.56)", "mock", "nose", "requests", "xmlschema"] + +[[package]] +name = "pytest-forked" +version = "1.3.0" +description = "run tests in isolated forked subprocesses" +category = "dev" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" + +[package.dependencies] +py = "*" +pytest = ">=3.10" + +[[package]] +name = "python-dateutil" +version = "2.8.1" +description = "Extensions to the standard Python datetime module" +category = "main" +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" + +[package.dependencies] +six = ">=1.5" + +[[package]] +name = "pytz" +version = "2020.5" +description = "World timezone definitions, modern and historical" +category = "main" +optional = false +python-versions = "*" + +[[package]] +name = "pyyaml" +version = "5.3.1" +description = "YAML parser and emitter for Python" +category = "main" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" + +[[package]] +name = "regex" +version = "2020.11.13" +description = "Alternative regular expression module, to replace re." +category = "dev" +optional = false +python-versions = "*" + +[[package]] +name = "requests" +version = "2.25.1" +description = "Python HTTP for Humans." +category = "main" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" + +[package.dependencies] +certifi = ">=2017.4.17" +chardet = ">=3.0.2,<5" +idna = ">=2.5,<3" +urllib3 = ">=1.21.1,<1.27" + +[package.extras] +security = ["pyOpenSSL (>=0.14)", "cryptography (>=1.3.4)"] +socks = ["PySocks (>=1.5.6,!=1.5.7)", "win-inet-pton"] + +[[package]] +name = "requests-oauthlib" +version = "1.3.0" +description = "OAuthlib authentication support for Requests." +category = "main" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" + +[package.dependencies] +oauthlib = ">=3.0.0" +requests = ">=2.0.0" + +[package.extras] +rsa = ["oauthlib[signedtoken] (>=3.0.0)"] + +[[package]] +name = "resampy" +version = "0.2.2" +description = "Efficient signal resampling" +category = "main" +optional = false +python-versions = "*" + +[package.dependencies] +numba = ">=0.32" +numpy = ">=1.10" +scipy = ">=0.13" +six = ">=1.3" + +[package.extras] +docs = ["sphinx (!=1.3.1)", "numpydoc"] +tests = ["pytest (<4)", "pytest-cov"] + +[[package]] +name = "rfc3986" +version = "1.4.0" +description = "Validating URI References per RFC 3986" +category = "main" +optional = false +python-versions = "*" + +[package.dependencies] +idna = {version = "*", optional = true, markers = "extra == \"idna2008\""} + +[package.extras] +idna2008 = ["idna"] + +[[package]] +name = "rsa" +version = "4.6" +description = "Pure-Python RSA implementation" +category = "main" +optional = false +python-versions = ">=3.5, <4" + +[package.dependencies] +pyasn1 = ">=0.1.3" + +[[package]] +name = "scikit-learn" +version = "0.24.0" +description = "A set of python modules for machine learning and data mining" +category = "main" +optional = false +python-versions = ">=3.6" + +[package.dependencies] +joblib = ">=0.11" +numpy = ">=1.13.3" +scipy = ">=0.19.1" +threadpoolctl = ">=2.0.0" + +[package.extras] +benchmark = ["matplotlib (>=2.1.1)", "pandas (>=0.25.0)", "memory-profiler (>=0.57.0)"] +docs = ["matplotlib (>=2.1.1)", "scikit-image (>=0.13)", "pandas (>=0.25.0)", "seaborn (>=0.9.0)", "memory-profiler (>=0.57.0)", "sphinx (>=3.2.0)", "sphinx-gallery (>=0.7.0)", "numpydoc (>=1.0.0)", "Pillow (>=7.1.2)", "sphinx-prompt (>=1.3.0)"] +examples = ["matplotlib (>=2.1.1)", "scikit-image (>=0.13)", "pandas (>=0.25.0)", "seaborn (>=0.9.0)"] +tests = ["matplotlib (>=2.1.1)", "scikit-image (>=0.13)", "pandas (>=0.25.0)", "pytest (>=5.0.1)", "pytest-cov (>=2.9.0)", "flake8 (>=3.8.2)", "mypy (>=0.770)", "pyamg (>=4.0.0)"] + +[[package]] +name = "scipy" +version = "1.4.1" +description = "SciPy: Scientific Library for Python" +category = "main" +optional = false +python-versions = ">=3.5" + +[package.dependencies] +numpy = ">=1.13.3" + +[[package]] +name = "simplejson" +version = "3.17.2" +description = "Simple, fast, extensible JSON encoder/decoder for Python" +category = "main" +optional = false +python-versions = ">=2.5, !=3.0.*, !=3.1.*, !=3.2.*" + +[[package]] +name = "six" +version = "1.15.0" +description = "Python 2 and 3 compatibility utilities" +category = "main" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" + +[[package]] +name = "sniffio" +version = "1.2.0" +description = "Sniff out which async library your code is running under" +category = "main" +optional = false +python-versions = ">=3.5" + +[[package]] +name = "soundfile" +version = "0.10.3.post1" +description = "An audio library based on libsndfile, CFFI and NumPy" +category = "main" +optional = false +python-versions = "*" + +[package.dependencies] +cffi = ">=1.0" + +[package.extras] +numpy = ["numpy"] + +[[package]] +name = "stempeg" +version = "0.2.2" +description = "Read and write stem/multistream audio files" +category = "main" +optional = false +python-versions = "*" + +[package.dependencies] +ffmpeg-python = ">=0.2.0" +numpy = ">=1.6" + +[package.extras] +tests = ["pytest"] + +[[package]] +name = "tensorboard" +version = "2.4.0" +description = "TensorBoard lets you watch Tensors Flow" +category = "main" +optional = false +python-versions = ">= 2.7, != 3.0.*, != 3.1.*" + +[package.dependencies] +absl-py = ">=0.4" +google-auth = ">=1.6.3,<2" +google-auth-oauthlib = ">=0.4.1,<0.5" +grpcio = ">=1.24.3" +markdown = ">=2.6.8" +numpy = ">=1.12.0" +protobuf = ">=3.6.0" +requests = ">=2.21.0,<3" +six = ">=1.10.0" +tensorboard-plugin-wit = ">=1.6.0" +werkzeug = ">=0.11.15" + +[[package]] +name = "tensorboard-plugin-wit" +version = "1.7.0" +description = "What-If Tool TensorBoard plugin." +category = "main" +optional = false +python-versions = "*" + +[[package]] +name = "tensorflow" +version = "2.3.0" +description = "TensorFlow is an open source machine learning framework for everyone." +category = "main" +optional = false +python-versions = "*" + +[package.dependencies] +absl-py = ">=0.7.0" +astunparse = "1.6.3" +gast = "0.3.3" +google-pasta = ">=0.1.8" +grpcio = ">=1.8.6" +h5py = ">=2.10.0,<2.11.0" +keras-preprocessing = ">=1.1.1,<1.2" +numpy = ">=1.16.0,<1.19.0" +opt-einsum = ">=2.3.2" +protobuf = ">=3.9.2" +scipy = "1.4.1" +six = ">=1.12.0" +tensorboard = ">=2.3.0,<3" +tensorflow-estimator = ">=2.3.0,<2.4.0" +termcolor = ">=1.1.0" +wrapt = ">=1.11.1" + +[[package]] +name = "tensorflow-estimator" +version = "2.3.0" +description = "TensorFlow Estimator." +category = "main" +optional = false +python-versions = "*" + +[[package]] +name = "termcolor" +version = "1.1.0" +description = "ANSII Color formatting for output in terminal." +category = "main" +optional = false +python-versions = "*" + +[[package]] +name = "threadpoolctl" +version = "2.1.0" +description = "threadpoolctl" +category = "main" +optional = false +python-versions = ">=3.5" + +[[package]] +name = "toml" +version = "0.10.2" +description = "Python Library for Tom's Obvious, Minimal Language" +category = "dev" +optional = false +python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*" + +[[package]] +name = "tqdm" +version = "4.55.1" +description = "Fast, Extensible Progress Meter" +category = "main" +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,>=2.7" + +[package.extras] +dev = ["py-make (>=0.1.0)", "twine", "wheel"] +telegram = ["requests"] + +[[package]] +name = "typed-ast" +version = "1.4.2" +description = "a fork of Python 2 and 3 ast modules with type comment support" +category = "dev" +optional = false +python-versions = "*" + +[[package]] +name = "typer" +version = "0.3.2" +description = "Typer, build great CLIs. Easy to code. Based on Python type hints." +category = "main" +optional = false +python-versions = ">=3.6" + +[package.dependencies] +click = ">=7.1.1,<7.2.0" + +[package.extras] +test = ["pytest-xdist (>=1.32.0,<2.0.0)", "pytest-sugar (>=0.9.4,<0.10.0)", "mypy (==0.782)", "black (>=19.10b0,<20.0b0)", "isort (>=5.0.6,<6.0.0)", "shellingham (>=1.3.0,<2.0.0)", "pytest (>=4.4.0,<5.4.0)", "pytest-cov (>=2.10.0,<3.0.0)", "coverage (>=5.2,<6.0)"] +all = ["colorama (>=0.4.3,<0.5.0)", "shellingham (>=1.3.0,<2.0.0)"] +dev = ["autoflake (>=1.3.1,<2.0.0)", "flake8 (>=3.8.3,<4.0.0)"] +doc = ["mkdocs (>=1.1.2,<2.0.0)", "mkdocs-material (>=5.4.0,<6.0.0)", "markdown-include (>=0.5.1,<0.6.0)"] + +[[package]] +name = "typing-extensions" +version = "3.7.4.3" +description = "Backported and Experimental Type Hints for Python 3.5+" +category = "main" +optional = false +python-versions = "*" + +[[package]] +name = "urllib3" +version = "1.26.2" +description = "HTTP library with thread-safe connection pooling, file post, and more." +category = "main" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, <4" + +[package.extras] +brotli = ["brotlipy (>=0.6.0)"] +secure = ["pyOpenSSL (>=0.14)", "cryptography (>=1.3.4)", "idna (>=2.0.0)", "certifi", "ipaddress"] +socks = ["PySocks (>=1.5.6,!=1.5.7,<2.0)"] + +[[package]] +name = "werkzeug" +version = "1.0.1" +description = "The comprehensive WSGI web application library." +category = "main" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" + +[package.extras] +dev = ["pytest", "pytest-timeout", "coverage", "tox", "sphinx", "pallets-sphinx-themes", "sphinx-issues"] +watchdog = ["watchdog"] + +[[package]] +name = "wrapt" +version = "1.12.1" +description = "Module for decorators, wrappers and monkey patching." +category = "main" +optional = false +python-versions = "*" + +[[package]] +name = "zipp" +version = "3.4.0" +description = "Backport of pathlib-compatible object wrapper for zip files" +category = "main" +optional = false +python-versions = ">=3.6" + +[package.extras] +docs = ["sphinx", "jaraco.packaging (>=3.2)", "rst.linker (>=1.9)"] +testing = ["pytest (>=3.5,!=3.7.3)", "pytest-checkdocs (>=1.2.3)", "pytest-flake8", "pytest-cov", "jaraco.test (>=3.2.0)", "jaraco.itertools", "func-timeout", "pytest-black (>=0.3.7)", "pytest-mypy"] + +[extras] +evaluation = ["musdb", "museval"] + +[metadata] +lock-version = "1.1" +python-versions = "^3.7" +content-hash = "d61f00d106e24c5eeb45ecb834e1e156d66f7a75afc4e023207455f14246f2c2" + +[metadata.files] +absl-py = [ + {file = "absl-py-0.11.0.tar.gz", hash = "sha256:673cccb88d810e5627d0c1c818158485d106f65a583880e2f730c997399bcfa7"}, + {file = "absl_py-0.11.0-py3-none-any.whl", hash = "sha256:b3d9eb5119ff6e0a0125f6dabf2f9fae02f8acae7be70576002fac27235611c5"}, +] +appdirs = [ + {file = "appdirs-1.4.4-py2.py3-none-any.whl", hash = "sha256:a841dacd6b99318a741b166adb07e19ee71a274450e68237b4650ca1055ab128"}, + {file = "appdirs-1.4.4.tar.gz", hash = "sha256:7d5d0167b2b1ba821647616af46a749d1c653740dd0d2415100fe26e27afdf41"}, +] +astunparse = [ + {file = "astunparse-1.6.3-py2.py3-none-any.whl", hash = "sha256:c2652417f2c8b5bb325c885ae329bdf3f86424075c4fd1a128674bc6fba4b8e8"}, + {file = "astunparse-1.6.3.tar.gz", hash = "sha256:5ad93a8456f0d084c3456d059fd9a92cce667963232cbf763eac3bc5b7940872"}, +] +atomicwrites = [ + {file = "atomicwrites-1.4.0-py2.py3-none-any.whl", hash = "sha256:6d1784dea7c0c8d4a5172b6c620f40b6e4cbfdf96d783691f2e1302a7b88e197"}, + {file = "atomicwrites-1.4.0.tar.gz", hash = "sha256:ae70396ad1a434f9c7046fd2dd196fc04b12f9e91ffb859164193be8b6168a7a"}, +] +attrs = [ + {file = "attrs-20.3.0-py2.py3-none-any.whl", hash = "sha256:31b2eced602aa8423c2aea9c76a724617ed67cf9513173fd3a4f03e3a929c7e6"}, + {file = "attrs-20.3.0.tar.gz", hash = "sha256:832aa3cde19744e49938b91fea06d69ecb9e649c93ba974535d08ad92164f700"}, +] +audioread = [ + {file = "audioread-2.1.9.tar.gz", hash = "sha256:a3480e42056c8e80a8192a54f6729a280ef66d27782ee11cbd63e9d4d1523089"}, +] +black = [ + {file = "black-20.8b1.tar.gz", hash = "sha256:1c02557aa099101b9d21496f8a914e9ed2222ef70336404eeeac8edba836fbea"}, +] +cachetools = [ + {file = "cachetools-4.2.0-py3-none-any.whl", hash = "sha256:c6b07a6ded8c78bf36730b3dc452dfff7d95f2a12a2fed856b1a0cb13ca78c61"}, + {file = "cachetools-4.2.0.tar.gz", hash = "sha256:3796e1de094f0eaca982441c92ce96c68c89cced4cd97721ab297ea4b16db90e"}, +] +certifi = [ + {file = "certifi-2020.12.5-py2.py3-none-any.whl", hash = "sha256:719a74fb9e33b9bd44cc7f3a8d94bc35e4049deebe19ba7d8e108280cfd59830"}, + {file = "certifi-2020.12.5.tar.gz", hash = "sha256:1a4995114262bffbc2413b159f2a1a480c969de6e6eb13ee966d470af86af59c"}, +] +cffi = [ + {file = "cffi-1.14.4-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:ebb253464a5d0482b191274f1c8bf00e33f7e0b9c66405fbffc61ed2c839c775"}, + {file = "cffi-1.14.4-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:2c24d61263f511551f740d1a065eb0212db1dbbbbd241db758f5244281590c06"}, + {file = "cffi-1.14.4-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:9f7a31251289b2ab6d4012f6e83e58bc3b96bd151f5b5262467f4bb6b34a7c26"}, + {file = "cffi-1.14.4-cp27-cp27m-win32.whl", hash = "sha256:5cf4be6c304ad0b6602f5c4e90e2f59b47653ac1ed9c662ed379fe48a8f26b0c"}, + {file = "cffi-1.14.4-cp27-cp27m-win_amd64.whl", hash = "sha256:f60567825f791c6f8a592f3c6e3bd93dd2934e3f9dac189308426bd76b00ef3b"}, + {file = "cffi-1.14.4-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:c6332685306b6417a91b1ff9fae889b3ba65c2292d64bd9245c093b1b284809d"}, + {file = "cffi-1.14.4-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:d9efd8b7a3ef378dd61a1e77367f1924375befc2eba06168b6ebfa903a5e59ca"}, + {file = "cffi-1.14.4-cp35-cp35m-macosx_10_9_x86_64.whl", hash = "sha256:51a8b381b16ddd370178a65360ebe15fbc1c71cf6f584613a7ea08bfad946698"}, + {file = "cffi-1.14.4-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:1d2c4994f515e5b485fd6d3a73d05526aa0fcf248eb135996b088d25dfa1865b"}, + {file = "cffi-1.14.4-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:af5c59122a011049aad5dd87424b8e65a80e4a6477419c0c1015f73fb5ea0293"}, + {file = "cffi-1.14.4-cp35-cp35m-win32.whl", hash = "sha256:594234691ac0e9b770aee9fcdb8fa02c22e43e5c619456efd0d6c2bf276f3eb2"}, + {file = "cffi-1.14.4-cp35-cp35m-win_amd64.whl", hash = "sha256:64081b3f8f6f3c3de6191ec89d7dc6c86a8a43911f7ecb422c60e90c70be41c7"}, + {file = "cffi-1.14.4-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:f803eaa94c2fcda012c047e62bc7a51b0bdabda1cad7a92a522694ea2d76e49f"}, + {file = "cffi-1.14.4-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:105abaf8a6075dc96c1fe5ae7aae073f4696f2905fde6aeada4c9d2926752362"}, + {file = "cffi-1.14.4-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:0638c3ae1a0edfb77c6765d487fee624d2b1ee1bdfeffc1f0b58c64d149e7eec"}, + {file = "cffi-1.14.4-cp36-cp36m-manylinux2014_aarch64.whl", hash = "sha256:7c6b1dece89874d9541fc974917b631406233ea0440d0bdfbb8e03bf39a49b3b"}, + {file = "cffi-1.14.4-cp36-cp36m-win32.whl", hash = "sha256:155136b51fd733fa94e1c2ea5211dcd4c8879869008fc811648f16541bf99668"}, + {file = "cffi-1.14.4-cp36-cp36m-win_amd64.whl", hash = "sha256:6bc25fc545a6b3d57b5f8618e59fc13d3a3a68431e8ca5fd4c13241cd70d0009"}, + {file = "cffi-1.14.4-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:a7711edca4dcef1a75257b50a2fbfe92a65187c47dab5a0f1b9b332c5919a3fb"}, + {file = "cffi-1.14.4-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:00e28066507bfc3fe865a31f325c8391a1ac2916219340f87dfad602c3e48e5d"}, + {file = "cffi-1.14.4-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:798caa2a2384b1cbe8a2a139d80734c9db54f9cc155c99d7cc92441a23871c03"}, + {file = "cffi-1.14.4-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:a5ed8c05548b54b998b9498753fb9cadbfd92ee88e884641377d8a8b291bcc01"}, + {file = "cffi-1.14.4-cp37-cp37m-win32.whl", hash = "sha256:00a1ba5e2e95684448de9b89888ccd02c98d512064b4cb987d48f4b40aa0421e"}, + {file = "cffi-1.14.4-cp37-cp37m-win_amd64.whl", hash = "sha256:9cc46bc107224ff5b6d04369e7c595acb700c3613ad7bcf2e2012f62ece80c35"}, + {file = "cffi-1.14.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:df5169c4396adc04f9b0a05f13c074df878b6052430e03f50e68adf3a57aa28d"}, + {file = "cffi-1.14.4-cp38-cp38-manylinux1_i686.whl", hash = "sha256:9ffb888f19d54a4d4dfd4b3f29bc2c16aa4972f1c2ab9c4ab09b8ab8685b9c2b"}, + {file = "cffi-1.14.4-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:8d6603078baf4e11edc4168a514c5ce5b3ba6e3e9c374298cb88437957960a53"}, + {file = "cffi-1.14.4-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:d5ff0621c88ce83a28a10d2ce719b2ee85635e85c515f12bac99a95306da4b2e"}, + {file = "cffi-1.14.4-cp38-cp38-win32.whl", hash = "sha256:b4e248d1087abf9f4c10f3c398896c87ce82a9856494a7155823eb45a892395d"}, + {file = "cffi-1.14.4-cp38-cp38-win_amd64.whl", hash = "sha256:ec80dc47f54e6e9a78181ce05feb71a0353854cc26999db963695f950b5fb375"}, + {file = "cffi-1.14.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:840793c68105fe031f34d6a086eaea153a0cd5c491cde82a74b420edd0a2b909"}, + {file = "cffi-1.14.4-cp39-cp39-manylinux1_i686.whl", hash = "sha256:b18e0a9ef57d2b41f5c68beefa32317d286c3d6ac0484efd10d6e07491bb95dd"}, + {file = "cffi-1.14.4-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:045d792900a75e8b1e1b0ab6787dd733a8190ffcf80e8c8ceb2fb10a29ff238a"}, + {file = "cffi-1.14.4-cp39-cp39-win32.whl", hash = "sha256:ba4e9e0ae13fc41c6b23299545e5ef73055213e466bd107953e4a013a5ddd7e3"}, + {file = "cffi-1.14.4-cp39-cp39-win_amd64.whl", hash = "sha256:f032b34669220030f905152045dfa27741ce1a6db3324a5bc0b96b6c7420c87b"}, + {file = "cffi-1.14.4.tar.gz", hash = "sha256:1a465cbe98a7fd391d47dce4b8f7e5b921e6cd805ef421d04f5f66ba8f06086c"}, +] +chardet = [ + {file = "chardet-4.0.0-py2.py3-none-any.whl", hash = "sha256:f864054d66fd9118f2e67044ac8981a54775ec5b67aed0441892edb553d21da5"}, + {file = "chardet-4.0.0.tar.gz", hash = "sha256:0d6f53a15db4120f2b08c94f11e7d93d2c911ee118b6b30a04ec3ee8310179fa"}, +] +click = [ + {file = "click-7.1.2-py2.py3-none-any.whl", hash = "sha256:dacca89f4bfadd5de3d7489b7c8a566eee0d3676333fbb50030263894c38c0dc"}, + {file = "click-7.1.2.tar.gz", hash = "sha256:d2b5255c7c6349bc1bd1e59e08cd12acbbd63ce649f2588755783aa94dfb6b1a"}, +] +colorama = [ + {file = "colorama-0.4.4-py2.py3-none-any.whl", hash = "sha256:9f47eda37229f68eee03b24b9748937c7dc3868f906e8ba69fbcbdd3bc5dc3e2"}, + {file = "colorama-0.4.4.tar.gz", hash = "sha256:5941b2b48a20143d2267e95b1c2a7603ce057ee39fd88e7329b0c292aa16869b"}, +] +decorator = [ + {file = "decorator-4.4.2-py2.py3-none-any.whl", hash = "sha256:41fa54c2a0cc4ba648be4fd43cff00aedf5b9465c9bf18d64325bc225f08f760"}, + {file = "decorator-4.4.2.tar.gz", hash = "sha256:e3a62f0520172440ca0dcc823749319382e377f37f140a0b99ef45fecb84bfe7"}, +] +ffmpeg-python = [ + {file = "ffmpeg-python-0.2.0.tar.gz", hash = "sha256:65225db34627c578ef0e11c8b1eb528bb35e024752f6f10b78c011f6f64c4127"}, + {file = "ffmpeg_python-0.2.0-py3-none-any.whl", hash = "sha256:ac441a0404e053f8b6a1113a77c0f452f1cfc62f6344a769475ffdc0f56c23c5"}, +] +future = [ + {file = "future-0.18.2.tar.gz", hash = "sha256:b1bead90b70cf6ec3f0710ae53a525360fa360d306a86583adc6bf83a4db537d"}, +] +gast = [ + {file = "gast-0.3.3-py2.py3-none-any.whl", hash = "sha256:8f46f5be57ae6889a4e16e2ca113b1703ef17f2b0abceb83793eaba9e1351a45"}, + {file = "gast-0.3.3.tar.gz", hash = "sha256:b881ef288a49aa81440d2c5eb8aeefd4c2bb8993d5f50edae7413a85bfdb3b57"}, +] +google-auth = [ + {file = "google-auth-1.24.0.tar.gz", hash = "sha256:0b0e026b412a0ad096e753907559e4bdb180d9ba9f68dd9036164db4fdc4ad2e"}, + {file = "google_auth-1.24.0-py2.py3-none-any.whl", hash = "sha256:ce752cc51c31f479dbf9928435ef4b07514b20261b021c7383bee4bda646acb8"}, +] +google-auth-oauthlib = [ + {file = "google-auth-oauthlib-0.4.2.tar.gz", hash = "sha256:65b65bc39ad8cab15039b35e5898455d3d66296d0584d96fe0e79d67d04c51d9"}, + {file = "google_auth_oauthlib-0.4.2-py2.py3-none-any.whl", hash = "sha256:d4d98c831ea21d574699978827490a41b94f05d565c617fe1b420e88f1fc8d8d"}, +] +google-pasta = [ + {file = "google-pasta-0.2.0.tar.gz", hash = "sha256:c9f2c8dfc8f96d0d5808299920721be30c9eec37f2389f28904f454565c8a16e"}, + {file = "google_pasta-0.2.0-py2-none-any.whl", hash = "sha256:4612951da876b1a10fe3960d7226f0c7682cf901e16ac06e473b267a5afa8954"}, + {file = "google_pasta-0.2.0-py3-none-any.whl", hash = "sha256:b32482794a366b5366a32c92a9a9201b107821889935a02b3e51f6b432ea84ed"}, +] +grpcio = [ + {file = "grpcio-1.34.0-cp27-cp27m-macosx_10_10_x86_64.whl", hash = "sha256:e2ffa46db9103706640c74886ac23ed18d1487a8523cc128da239e1d5a4e3301"}, + {file = "grpcio-1.34.0-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:843436e69c37eb45b0285fa42f7acc06d147f2e9c1d515b0f901e94d40107e79"}, + {file = "grpcio-1.34.0-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:a403ed4d8fcc441a2c2ec9ede838b0ae5f9da996d950cf2ff9f82242b496e0a7"}, + {file = "grpcio-1.34.0-cp27-cp27m-win32.whl", hash = "sha256:dc45f5750ce50f34f20a0607efae5c797d01681a44465b8287bebef1e9847d5b"}, + {file = "grpcio-1.34.0-cp27-cp27m-win_amd64.whl", hash = "sha256:2fd4a80f267aa258f5a74df5fe243eff80299a4f5b356c1da53f6f5793bbbf4b"}, + {file = "grpcio-1.34.0-cp27-cp27mu-linux_armv7l.whl", hash = "sha256:f2e4d64675351a058f9cb35fe390ca0956bd2926171bfb7c87596a1ee10ff6ba"}, + {file = "grpcio-1.34.0-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:4a2c85cd4a67c36fe12535fe32eb336635843d1eb31d3fa301444e60a8df9c90"}, + {file = "grpcio-1.34.0-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:32ad56f6d3d7e699f9a0d62719f2de9092e79f444d875d70f58cf7f8bb19684c"}, + {file = "grpcio-1.34.0-cp35-cp35m-linux_armv7l.whl", hash = "sha256:e69ac6fc9096bbb43f5276655661db746233cd320808e0d302198eb43dc7bd04"}, + {file = "grpcio-1.34.0-cp35-cp35m-macosx_10_10_intel.whl", hash = "sha256:5b105adb44486fb594b8d8142b5d4fbe50cb125c77ac7d270f5d0277ce5c554a"}, + {file = "grpcio-1.34.0-cp35-cp35m-manylinux2010_i686.whl", hash = "sha256:923a3b18badc3749c4d715216934f62f46a818790e325ece6184d07e7d6c7f73"}, + {file = "grpcio-1.34.0-cp35-cp35m-manylinux2010_x86_64.whl", hash = "sha256:9579f22222ac89ceee64c1101cced6434d9f6b12078b43ece0f9d8ebdb657f73"}, + {file = "grpcio-1.34.0-cp35-cp35m-manylinux2014_i686.whl", hash = "sha256:dfa098a6ff8d1b68ed7bd655150ee91f57c29042c093ff51113176aded3f0071"}, + {file = "grpcio-1.34.0-cp35-cp35m-manylinux2014_x86_64.whl", hash = "sha256:32fbc78d558d9468a4b16f79f4130daec8e431bc7a3b1775b0e98f09a7ab45a2"}, + {file = "grpcio-1.34.0-cp35-cp35m-win32.whl", hash = "sha256:205eda06d8aeffc87a1e29ff1f090546adf0b6e766378cc4c13686534397fdb4"}, + {file = "grpcio-1.34.0-cp35-cp35m-win_amd64.whl", hash = "sha256:2ea864ae3d3abc99d3988d1d27dee3f6350b60149ccf810a89cd9a9d02a675d6"}, + {file = "grpcio-1.34.0-cp36-cp36m-linux_armv7l.whl", hash = "sha256:5d8108b240fd5b8a0483f95ab2651fe2d633311faae93a12938ea06cf61a5efd"}, + {file = "grpcio-1.34.0-cp36-cp36m-macosx_10_10_x86_64.whl", hash = "sha256:bda0f52eb1279a7119526df2ef33ea2808691120daf9effaf60ca0c07f76058a"}, + {file = "grpcio-1.34.0-cp36-cp36m-manylinux2010_i686.whl", hash = "sha256:c89b6a3eca8eae10eea78896ccfdc9d04aa2f7b2ee96de20246e5c96494c68f5"}, + {file = "grpcio-1.34.0-cp36-cp36m-manylinux2010_x86_64.whl", hash = "sha256:fa834f4c70b9df83d5af610097747c224513d59af1f03e8c06bca9a7d81fd1a3"}, + {file = "grpcio-1.34.0-cp36-cp36m-manylinux2014_i686.whl", hash = "sha256:20606ec7c265f81c5a0226f69842dc8dde66d921968ab9448e59d440cf98bebf"}, + {file = "grpcio-1.34.0-cp36-cp36m-manylinux2014_x86_64.whl", hash = "sha256:72b6a89aabf937d706946230f5aa13bdf7d2a42874810fa54436c647577b543e"}, + {file = "grpcio-1.34.0-cp36-cp36m-win32.whl", hash = "sha256:49da07ae43c552280b8b4c70617f9b589588404c2545d6eba2c55179b3d836af"}, + {file = "grpcio-1.34.0-cp36-cp36m-win_amd64.whl", hash = "sha256:beef6be49ada569edf3b73fd4eb57d6c2af7e10c0c82a210dbe51de7c4a1ed53"}, + {file = "grpcio-1.34.0-cp37-cp37m-macosx_10_10_x86_64.whl", hash = "sha256:8d92e884f6d67b9a2a4514631d3c9836281044caedb5fd34d4ce2bbec138c87d"}, + {file = "grpcio-1.34.0-cp37-cp37m-manylinux2010_i686.whl", hash = "sha256:e238a554f29d90b0e7fca15e8119b9a7c5f88faacbf9b982751ad54d639b57f8"}, + {file = "grpcio-1.34.0-cp37-cp37m-manylinux2010_x86_64.whl", hash = "sha256:98b0b6e44c451093354a38b620e6e0df958b0710abd6a0ddd84da84424bce003"}, + {file = "grpcio-1.34.0-cp37-cp37m-manylinux2014_i686.whl", hash = "sha256:bbd3522f821fb5d01049db214fb9f949a8b2d92761c2780a20ff73818efd5360"}, + {file = "grpcio-1.34.0-cp37-cp37m-manylinux2014_x86_64.whl", hash = "sha256:2f54046ca2a81ff45ec8f6d3d7447ad562adb067c3640c35354e440fd771b625"}, + {file = "grpcio-1.34.0-cp37-cp37m-win32.whl", hash = "sha256:50c4f10e7deff96d197bc6d1988c2a5a0bc6252bbd31d7fb374ce8923f937e7a"}, + {file = "grpcio-1.34.0-cp37-cp37m-win_amd64.whl", hash = "sha256:6fafdba42c26bbdf78948c09a93a8b3a8a509c66c6b4324bc1fb360bf4e82b9d"}, + {file = "grpcio-1.34.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:bd7634f8c49c8467fec5fd9e0d1abb205b0aa61670ff0113ef835ca6548aad3d"}, + {file = "grpcio-1.34.0-cp38-cp38-manylinux2010_i686.whl", hash = "sha256:69127393fc3513da228bc3908914df2284923e0eacf8d73f21ad387317450317"}, + {file = "grpcio-1.34.0-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:5e8e6035d4f9ab856ab437e381e652b31dfd42443d2243d45bdf4b90adaf3559"}, + {file = "grpcio-1.34.0-cp38-cp38-manylinux2014_i686.whl", hash = "sha256:95de4ad9ae39590668e3330d414253f672aedd46cc107d7f71b4a2268f3d6066"}, + {file = "grpcio-1.34.0-cp38-cp38-manylinux2014_x86_64.whl", hash = "sha256:a1024006fe61ee7e43e7099faf08f4508ea0c944a1558e8d715a5b4556937ace"}, + {file = "grpcio-1.34.0-cp38-cp38-win32.whl", hash = "sha256:dea35dcf09aee91552cb4b3e250efdbcb79564b5b5517246bcbead8d5871e291"}, + {file = "grpcio-1.34.0-cp38-cp38-win_amd64.whl", hash = "sha256:e95bda60c584b3deb5c37babb44d4300cf4bf3a6c43198a244ddcaddca3fde3a"}, + {file = "grpcio-1.34.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:c88ce184973fe2035ffa176eb08cd492db090505e6b1ddc68b5cc1e0b01a07a0"}, + {file = "grpcio-1.34.0-cp39-cp39-manylinux2010_i686.whl", hash = "sha256:57a30f9df0f5342e4dad384e7023b9f88742c325838da977828c37f49eb8940a"}, + {file = "grpcio-1.34.0-cp39-cp39-manylinux2010_x86_64.whl", hash = "sha256:924d5e8b18942ebea1260e60be7e2bde2a3587ea386190b442790f84180bf372"}, + {file = "grpcio-1.34.0-cp39-cp39-manylinux2014_i686.whl", hash = "sha256:43fafebcc2e81d012f7147a0ddf9be69864c40fc4edd9844937eba0020508297"}, + {file = "grpcio-1.34.0-cp39-cp39-manylinux2014_x86_64.whl", hash = "sha256:9550b7c9d2f11579b484accc6183e02ebe33ce80a0ff15f5c28895df6b3d3108"}, + {file = "grpcio-1.34.0-cp39-cp39-win32.whl", hash = "sha256:d16f7f5a10bf24640fa639974d409c220e587b3e2fa2620af00d43ba36dafc2c"}, + {file = "grpcio-1.34.0-cp39-cp39-win_amd64.whl", hash = "sha256:25958bd7c6773e6de79781cc0d6f19d0c82332984dd07ef238889e93485d5afc"}, + {file = "grpcio-1.34.0.tar.gz", hash = "sha256:f98f746cacbaa681de0bcd90d7aa77b440e3e1327a9988f6a2b580d54e27d4c3"}, +] +h11 = [ + {file = "h11-0.12.0-py3-none-any.whl", hash = "sha256:36a3cb8c0a032f56e2da7084577878a035d3b61d104230d4bd49c0c6b555a9c6"}, + {file = "h11-0.12.0.tar.gz", hash = "sha256:47222cb6067e4a307d535814917cd98fd0a57b6788ce715755fa2b6c28b56042"}, +] +h2 = [ + {file = "h2-3.2.0-py2.py3-none-any.whl", hash = "sha256:61e0f6601fa709f35cdb730863b4e5ec7ad449792add80d1410d4174ed139af5"}, + {file = "h2-3.2.0.tar.gz", hash = "sha256:875f41ebd6f2c44781259005b157faed1a5031df3ae5aa7bcb4628a6c0782f14"}, +] +h5py = [ + {file = "h5py-2.10.0-cp27-cp27m-macosx_10_6_intel.whl", hash = "sha256:ecf4d0b56ee394a0984de15bceeb97cbe1fe485f1ac205121293fc44dcf3f31f"}, + {file = "h5py-2.10.0-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:86868dc07b9cc8cb7627372a2e6636cdc7a53b7e2854ad020c9e9d8a4d3fd0f5"}, + {file = "h5py-2.10.0-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:aac4b57097ac29089f179bbc2a6e14102dd210618e94d77ee4831c65f82f17c0"}, + {file = "h5py-2.10.0-cp27-cp27m-win32.whl", hash = "sha256:7be5754a159236e95bd196419485343e2b5875e806fe68919e087b6351f40a70"}, + {file = "h5py-2.10.0-cp27-cp27m-win_amd64.whl", hash = "sha256:13c87efa24768a5e24e360a40e0bc4c49bcb7ce1bb13a3a7f9902cec302ccd36"}, + {file = "h5py-2.10.0-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:79b23f47c6524d61f899254f5cd5e486e19868f1823298bc0c29d345c2447172"}, + {file = "h5py-2.10.0-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:cbf28ae4b5af0f05aa6e7551cee304f1d317dbed1eb7ac1d827cee2f1ef97a99"}, + {file = "h5py-2.10.0-cp34-cp34m-manylinux1_i686.whl", hash = "sha256:c0d4b04bbf96c47b6d360cd06939e72def512b20a18a8547fa4af810258355d5"}, + {file = "h5py-2.10.0-cp34-cp34m-manylinux1_x86_64.whl", hash = "sha256:549ad124df27c056b2e255ea1c44d30fb7a17d17676d03096ad5cd85edb32dc1"}, + {file = "h5py-2.10.0-cp35-cp35m-macosx_10_6_intel.whl", hash = "sha256:a5f82cd4938ff8761d9760af3274acf55afc3c91c649c50ab18fcff5510a14a5"}, + {file = "h5py-2.10.0-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:3dad1730b6470fad853ef56d755d06bb916ee68a3d8272b3bab0c1ddf83bb99e"}, + {file = "h5py-2.10.0-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:063947eaed5f271679ed4ffa36bb96f57bc14f44dd4336a827d9a02702e6ce6b"}, + {file = "h5py-2.10.0-cp35-cp35m-win32.whl", hash = "sha256:c54a2c0dd4957776ace7f95879d81582298c5daf89e77fb8bee7378f132951de"}, + {file = "h5py-2.10.0-cp35-cp35m-win_amd64.whl", hash = "sha256:6998be619c695910cb0effe5eb15d3a511d3d1a5d217d4bd0bebad1151ec2262"}, + {file = "h5py-2.10.0-cp36-cp36m-macosx_10_6_intel.whl", hash = "sha256:ff7d241f866b718e4584fa95f520cb19405220c501bd3a53ee11871ba5166ea2"}, + {file = "h5py-2.10.0-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:54817b696e87eb9e403e42643305f142cd8b940fe9b3b490bbf98c3b8a894cf4"}, + {file = "h5py-2.10.0-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:d3c59549f90a891691991c17f8e58c8544060fdf3ccdea267100fa5f561ff62f"}, + {file = "h5py-2.10.0-cp36-cp36m-win32.whl", hash = "sha256:d7ae7a0576b06cb8e8a1c265a8bc4b73d05fdee6429bffc9a26a6eb531e79d72"}, + {file = "h5py-2.10.0-cp36-cp36m-win_amd64.whl", hash = "sha256:bffbc48331b4a801d2f4b7dac8a72609f0b10e6e516e5c480a3e3241e091c878"}, + {file = "h5py-2.10.0-cp37-cp37m-macosx_10_6_intel.whl", hash = "sha256:51ae56894c6c93159086ffa2c94b5b3388c0400548ab26555c143e7cfa05b8e5"}, + {file = "h5py-2.10.0-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:16ead3c57141101e3296ebeed79c9c143c32bdd0e82a61a2fc67e8e6d493e9d1"}, + {file = "h5py-2.10.0-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:f0e25bb91e7a02efccb50aba6591d3fe2c725479e34769802fcdd4076abfa917"}, + {file = "h5py-2.10.0-cp37-cp37m-win32.whl", hash = "sha256:f23951a53d18398ef1344c186fb04b26163ca6ce449ebd23404b153fd111ded9"}, + {file = "h5py-2.10.0-cp37-cp37m-win_amd64.whl", hash = "sha256:8bb1d2de101f39743f91512a9750fb6c351c032e5cd3204b4487383e34da7f75"}, + {file = "h5py-2.10.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:64f74da4a1dd0d2042e7d04cf8294e04ddad686f8eba9bb79e517ae582f6668d"}, + {file = "h5py-2.10.0-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:d35f7a3a6cefec82bfdad2785e78359a0e6a5fbb3f605dd5623ce88082ccd681"}, + {file = "h5py-2.10.0-cp38-cp38-win32.whl", hash = "sha256:6ef7ab1089e3ef53ca099038f3c0a94d03e3560e6aff0e9d6c64c55fb13fc681"}, + {file = "h5py-2.10.0-cp38-cp38-win_amd64.whl", hash = "sha256:769e141512b54dee14ec76ed354fcacfc7d97fea5a7646b709f7400cf1838630"}, + {file = "h5py-2.10.0.tar.gz", hash = "sha256:84412798925dc870ffd7107f045d7659e60f5d46d1c70c700375248bf6bf512d"}, +] +hpack = [ + {file = "hpack-3.0.0-py2.py3-none-any.whl", hash = "sha256:0edd79eda27a53ba5be2dfabf3b15780928a0dff6eb0c60a3d6767720e970c89"}, + {file = "hpack-3.0.0.tar.gz", hash = "sha256:8eec9c1f4bfae3408a3f30500261f7e6a65912dc138526ea054f9ad98892e9d2"}, +] +httpcore = [ + {file = "httpcore-0.12.2-py3-none-any.whl", hash = "sha256:420700af11db658c782f7e8fda34f9dcd95e3ee93944dd97d78cb70247e0cd06"}, + {file = "httpcore-0.12.2.tar.gz", hash = "sha256:dd1d762d4f7c2702149d06be2597c35fb154c5eff9789a8c5823fbcf4d2978d6"}, +] +httpx = [ + {file = "httpx-0.16.1-py3-none-any.whl", hash = "sha256:9cffb8ba31fac6536f2c8cde30df859013f59e4bcc5b8d43901cb3654a8e0a5b"}, + {file = "httpx-0.16.1.tar.gz", hash = "sha256:126424c279c842738805974687e0518a94c7ae8d140cd65b9c4f77ac46ffa537"}, +] +hyperframe = [ + {file = "hyperframe-5.2.0-py2.py3-none-any.whl", hash = "sha256:5187962cb16dcc078f23cb5a4b110098d546c3f41ff2d4038a9896893bbd0b40"}, + {file = "hyperframe-5.2.0.tar.gz", hash = "sha256:a9f5c17f2cc3c719b917c4f33ed1c61bd1f8dfac4b1bd23b7c80b3400971b41f"}, +] +idna = [ + {file = "idna-2.10-py2.py3-none-any.whl", hash = "sha256:b97d804b1e9b523befed77c48dacec60e6dcb0b5391d57af6a65a312a90648c0"}, + {file = "idna-2.10.tar.gz", hash = "sha256:b307872f855b18632ce0c21c5e45be78c0ea7ae4c15c828c20788b26921eb3f6"}, +] +importlib-metadata = [ + {file = "importlib_metadata-3.3.0-py3-none-any.whl", hash = "sha256:bf792d480abbd5eda85794e4afb09dd538393f7d6e6ffef6e9f03d2014cf9450"}, + {file = "importlib_metadata-3.3.0.tar.gz", hash = "sha256:5c5a2720817414a6c41f0a49993908068243ae02c1635a228126519b509c8aed"}, +] +iniconfig = [ + {file = "iniconfig-1.1.1-py2.py3-none-any.whl", hash = "sha256:011e24c64b7f47f6ebd835bb12a743f2fbe9a26d4cecaa7f53bc4f35ee9da8b3"}, + {file = "iniconfig-1.1.1.tar.gz", hash = "sha256:bc3af051d7d14b2ee5ef9969666def0cd1a000e121eaea580d4a313df4b37f32"}, +] +isort = [ + {file = "isort-5.7.0-py3-none-any.whl", hash = "sha256:fff4f0c04e1825522ce6949973e83110a6e907750cd92d128b0d14aaaadbffdc"}, + {file = "isort-5.7.0.tar.gz", hash = "sha256:c729845434366216d320e936b8ad6f9d681aab72dc7cbc2d51bedc3582f3ad1e"}, +] +joblib = [ + {file = "joblib-1.0.0-py3-none-any.whl", hash = "sha256:75ead23f13484a2a414874779d69ade40d4fa1abe62b222a23cd50d4bc822f6f"}, + {file = "joblib-1.0.0.tar.gz", hash = "sha256:7ad866067ac1fdec27d51c8678ea760601b70e32ff1881d4dc8e1171f2b64b24"}, +] +jsonschema = [ + {file = "jsonschema-3.2.0-py2.py3-none-any.whl", hash = "sha256:4e5b3cf8216f577bee9ce139cbe72eca3ea4f292ec60928ff24758ce626cd163"}, + {file = "jsonschema-3.2.0.tar.gz", hash = "sha256:c8a85b28d377cc7737e46e2d9f2b4f44ee3c0e1deac6bf46ddefc7187d30797a"}, +] +keras-preprocessing = [ + {file = "Keras_Preprocessing-1.1.2-py2.py3-none-any.whl", hash = "sha256:7b82029b130ff61cc99b55f3bd27427df4838576838c5b2f65940e4fcec99a7b"}, + {file = "Keras_Preprocessing-1.1.2.tar.gz", hash = "sha256:add82567c50c8bc648c14195bf544a5ce7c1f76761536956c3d2978970179ef3"}, +] +librosa = [ + {file = "librosa-0.8.0.tar.gz", hash = "sha256:af0b9f2ed4bbf6aecbc448a4cd27c16453c397cb6bef0f0cfba0e63afea2b839"}, +] +llvmlite = [ + {file = "llvmlite-0.35.0rc3-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:7b3b8b059f0449907c0376c7cecf6e0b4bdacc13797ab9f3cc64bb602e31c0a8"}, + {file = "llvmlite-0.35.0rc3-cp36-cp36m-manylinux2010_i686.whl", hash = "sha256:2d61fe18cf7b27f06e7663bd94d330d909e12a7595f220c7bff0f43ea271460c"}, + {file = "llvmlite-0.35.0rc3-cp36-cp36m-manylinux2010_x86_64.whl", hash = "sha256:ecd9ba96592fb5f3a9b1645cc7c73b8a1f2e74573f2afe1af15f8d13556e6a4b"}, + {file = "llvmlite-0.35.0rc3-cp36-cp36m-win32.whl", hash = "sha256:d80e892bf1278f6bc92e892e92f4b9170e02a1dfd9bbd618e23e76c47a1be3f7"}, + {file = "llvmlite-0.35.0rc3-cp36-cp36m-win_amd64.whl", hash = "sha256:ea727570ce8ca621959df9fb39bb8cff103d9817bd5c9ed5980607fa3b67d0c4"}, + {file = "llvmlite-0.35.0rc3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:ef23850e8720b52f3d5d5dd86566a9351f1d81d0c06cdb92f21c364aab53f4a8"}, + {file = "llvmlite-0.35.0rc3-cp37-cp37m-manylinux2010_i686.whl", hash = "sha256:b1faf7c3ca9d3a5c95cc47682a3efab1a9f64e2862a5570d922e6ec216e21c74"}, + {file = "llvmlite-0.35.0rc3-cp37-cp37m-manylinux2010_x86_64.whl", hash = "sha256:6d27b8c12c03dacd84e04db6c4bcf848d4aa7cbba51ee0625e46f7ced89ac603"}, + {file = "llvmlite-0.35.0rc3-cp37-cp37m-win32.whl", hash = "sha256:c8748823e3901833c8aaec89a46d38e302a43a2ffa944c2edcbd60ef7bf521ee"}, + {file = "llvmlite-0.35.0rc3-cp37-cp37m-win_amd64.whl", hash = "sha256:4cae79abf76b9ed801a0a27863c94c844712605868ff6802a2f402051ddf15c4"}, + {file = "llvmlite-0.35.0rc3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:77a645b4ea84267fd497e45db531237dea097e2a0c3f0fa8ce66fbe6cd022924"}, + {file = "llvmlite-0.35.0rc3-cp38-cp38-manylinux2010_i686.whl", hash = "sha256:fd0d534ded3a757611a2334bc9b1f5d2415bb34fc177793805ed4eac91cce0a5"}, + {file = "llvmlite-0.35.0rc3-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:b3ac274cb3bd3caecf8fdfd15c99f293b187a8ea8be2c7706fb6a32d9fa4e284"}, + {file = "llvmlite-0.35.0rc3-cp38-cp38-win32.whl", hash = "sha256:ee4cb5fe63b547cdfd77184e1d8d3992ede14ede47c178434b60851603c05896"}, + {file = "llvmlite-0.35.0rc3-cp38-cp38-win_amd64.whl", hash = "sha256:c7c070bf9e194d3d731bdd7b75c28e39efcdac5ea888efc092922afdec33e938"}, +] +markdown = [ + {file = "Markdown-3.3.3-py3-none-any.whl", hash = "sha256:c109c15b7dc20a9ac454c9e6025927d44460b85bd039da028d85e2b6d0bcc328"}, + {file = "Markdown-3.3.3.tar.gz", hash = "sha256:5d9f2b5ca24bc4c7a390d22323ca4bad200368612b5aaa7796babf971d2b2f18"}, +] +musdb = [ + {file = "musdb-0.3.1-py2.py3-none-any.whl", hash = "sha256:db8f65b40c56938dab9e1b042254c6e3a0d75b139f98a96d6edbb646c0d25c7d"}, + {file = "musdb-0.3.1.tar.gz", hash = "sha256:27aac71095f919c7e0cfb526af1e6f745275645e959f759c42711354ea0fcbf7"}, +] +museval = [ + {file = "museval-0.3.0-py2.py3-none-any.whl", hash = "sha256:8c913fd48224746e480b9c4d854e9b0e2c2a0ba9d457ae3dc4360b99be2677ab"}, + {file = "museval-0.3.0.tar.gz", hash = "sha256:2e1334dd3367dea562c1902515f6d55e87dc3072cfc411c616ac6f0a48d31d5f"}, +] +mypy = [ + {file = "mypy-0.790-cp35-cp35m-macosx_10_6_x86_64.whl", hash = "sha256:bd03b3cf666bff8d710d633d1c56ab7facbdc204d567715cb3b9f85c6e94f669"}, + {file = "mypy-0.790-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:2170492030f6faa537647d29945786d297e4862765f0b4ac5930ff62e300d802"}, + {file = "mypy-0.790-cp35-cp35m-win_amd64.whl", hash = "sha256:e86bdace26c5fe9cf8cb735e7cedfe7850ad92b327ac5d797c656717d2ca66de"}, + {file = "mypy-0.790-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:e97e9c13d67fbe524be17e4d8025d51a7dca38f90de2e462243ab8ed8a9178d1"}, + {file = "mypy-0.790-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:0d34d6b122597d48a36d6c59e35341f410d4abfa771d96d04ae2c468dd201abc"}, + {file = "mypy-0.790-cp36-cp36m-win_amd64.whl", hash = "sha256:72060bf64f290fb629bd4a67c707a66fd88ca26e413a91384b18db3876e57ed7"}, + {file = "mypy-0.790-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:eea260feb1830a627fb526d22fbb426b750d9f5a47b624e8d5e7e004359b219c"}, + {file = "mypy-0.790-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:c614194e01c85bb2e551c421397e49afb2872c88b5830e3554f0519f9fb1c178"}, + {file = "mypy-0.790-cp37-cp37m-win_amd64.whl", hash = "sha256:0a0d102247c16ce93c97066443d11e2d36e6cc2a32d8ccc1f705268970479324"}, + {file = "mypy-0.790-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:cf4e7bf7f1214826cf7333627cb2547c0db7e3078723227820d0a2490f117a01"}, + {file = "mypy-0.790-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:af4e9ff1834e565f1baa74ccf7ae2564ae38c8df2a85b057af1dbbc958eb6666"}, + {file = "mypy-0.790-cp38-cp38-win_amd64.whl", hash = "sha256:da56dedcd7cd502ccd3c5dddc656cb36113dd793ad466e894574125945653cea"}, + {file = "mypy-0.790-py3-none-any.whl", hash = "sha256:2842d4fbd1b12ab422346376aad03ff5d0805b706102e475e962370f874a5122"}, + {file = "mypy-0.790.tar.gz", hash = "sha256:2b21ba45ad9ef2e2eb88ce4aeadd0112d0f5026418324176fd494a6824b74975"}, +] +mypy-extensions = [ + {file = "mypy_extensions-0.4.3-py2.py3-none-any.whl", hash = "sha256:090fedd75945a69ae91ce1303b5824f428daf5a028d2f6ab8a299250a846f15d"}, + {file = "mypy_extensions-0.4.3.tar.gz", hash = "sha256:2d82818f5bb3e369420cb3c4060a7970edba416647068eb4c5343488a6c604a8"}, +] +norbert = [ + {file = "norbert-0.2.1-py2.py3-none-any.whl", hash = "sha256:409ac3f173cfb1fdaad21563b8f730d7cbe01af81349bcd96fb2b8b9d5f74339"}, + {file = "norbert-0.2.1.tar.gz", hash = "sha256:bd4cbc2527f0550b81bf4265c1a64b352cab7f71e4e3c823d30b71a7368de74e"}, +] +numba = [ + {file = "numba-0.51.2-cp36-cp36m-macosx_10_14_x86_64.whl", hash = "sha256:af798310eeb318c56cdb83254abbe9a938cc0182d08671d7f9f032dc817e064d"}, + {file = "numba-0.51.2-cp36-cp36m-manylinux2014_i686.whl", hash = "sha256:93e18350f2094e7432321c1275730a3143b94af012fb609cc180fa376c44867f"}, + {file = "numba-0.51.2-cp36-cp36m-manylinux2014_x86_64.whl", hash = "sha256:9e2bb1f129bfadd757ad7a9c18ab79c3ab25ce6d6a68e58565d6c52ad07b3566"}, + {file = "numba-0.51.2-cp36-cp36m-win32.whl", hash = "sha256:31cdf6b6d1301d5fb6c4fcb8b4c711ba5c9f60ba2fca008b550da9b56185367c"}, + {file = "numba-0.51.2-cp36-cp36m-win_amd64.whl", hash = "sha256:df6edca13c04a31fdb5addf5205199478a7da372712829157ef491e8a6e7031f"}, + {file = "numba-0.51.2-cp37-cp37m-macosx_10_14_x86_64.whl", hash = "sha256:a628122dacfcba9a3ea68a9e95578c6b6391016e34962c46550ea8e189e0412e"}, + {file = "numba-0.51.2-cp37-cp37m-manylinux2014_i686.whl", hash = "sha256:106736d5a8dab6bebce989d4ab1b3f169c264582598f172e6e5b736210d2e834"}, + {file = "numba-0.51.2-cp37-cp37m-manylinux2014_x86_64.whl", hash = "sha256:a12f16fdb4ca5edc94e2ef412e4e768c29217ef9b6fdfc237d064ebe30acfe14"}, + {file = "numba-0.51.2-cp37-cp37m-win32.whl", hash = "sha256:025b033fd31c44bba17802293c81270084b5454b5b055b8c10c394385c232f00"}, + {file = "numba-0.51.2-cp37-cp37m-win_amd64.whl", hash = "sha256:081788f584fa500339e9b74bf02e3c5029d408c114e555ada19cae0b92721416"}, + {file = "numba-0.51.2-cp38-cp38-macosx_10_14_x86_64.whl", hash = "sha256:5416b584183fd599afda11b947b64f89450fcf26a9c15b408167f412b98a3a94"}, + {file = "numba-0.51.2-cp38-cp38-manylinux2014_i686.whl", hash = "sha256:05da65dca2ac28a192c9d8f20e9e477eb1237205cfc4d131c414f5f8092c6639"}, + {file = "numba-0.51.2-cp38-cp38-manylinux2014_x86_64.whl", hash = "sha256:aee435e3b7e465dd49971f8ea76aa414532a87736916cb399534e017334d1138"}, + {file = "numba-0.51.2-cp38-cp38-win32.whl", hash = "sha256:bbbe2432433b11d3fadab0226a84c1a81918cb905ba1aeb022249e8d2ba8856c"}, + {file = "numba-0.51.2-cp38-cp38-win_amd64.whl", hash = "sha256:259e7c15b24feec4a99fb41eb8c47b5ad49b544d1a5ad40ad0252ef531ba06fd"}, + {file = "numba-0.51.2.tar.gz", hash = "sha256:16bd59572114adbf5f600ea383880d7b2071ae45477e84a24994e089ea390768"}, +] +numpy = [ + {file = "numpy-1.18.5-cp35-cp35m-macosx_10_9_intel.whl", hash = "sha256:e91d31b34fc7c2c8f756b4e902f901f856ae53a93399368d9a0dc7be17ed2ca0"}, + {file = "numpy-1.18.5-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:7d42ab8cedd175b5ebcb39b5208b25ba104842489ed59fbb29356f671ac93583"}, + {file = "numpy-1.18.5-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:a78e438db8ec26d5d9d0e584b27ef25c7afa5a182d1bf4d05e313d2d6d515271"}, + {file = "numpy-1.18.5-cp35-cp35m-win32.whl", hash = "sha256:a87f59508c2b7ceb8631c20630118cc546f1f815e034193dc72390db038a5cb3"}, + {file = "numpy-1.18.5-cp35-cp35m-win_amd64.whl", hash = "sha256:965df25449305092b23d5145b9bdaeb0149b6e41a77a7d728b1644b3c99277c1"}, + {file = "numpy-1.18.5-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:ac792b385d81151bae2a5a8adb2b88261ceb4976dbfaaad9ce3a200e036753dc"}, + {file = "numpy-1.18.5-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:ef627986941b5edd1ed74ba89ca43196ed197f1a206a3f18cc9faf2fb84fd675"}, + {file = "numpy-1.18.5-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:f718a7949d1c4f622ff548c572e0c03440b49b9531ff00e4ed5738b459f011e8"}, + {file = "numpy-1.18.5-cp36-cp36m-win32.whl", hash = "sha256:4064f53d4cce69e9ac613256dc2162e56f20a4e2d2086b1956dd2fcf77b7fac5"}, + {file = "numpy-1.18.5-cp36-cp36m-win_amd64.whl", hash = "sha256:b03b2c0badeb606d1232e5f78852c102c0a7989d3a534b3129e7856a52f3d161"}, + {file = "numpy-1.18.5-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:a7acefddf994af1aeba05bbbafe4ba983a187079f125146dc5859e6d817df824"}, + {file = "numpy-1.18.5-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:cd49930af1d1e49a812d987c2620ee63965b619257bd76eaaa95870ca08837cf"}, + {file = "numpy-1.18.5-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:b39321f1a74d1f9183bf1638a745b4fd6fe80efbb1f6b32b932a588b4bc7695f"}, + {file = "numpy-1.18.5-cp37-cp37m-win32.whl", hash = "sha256:cae14a01a159b1ed91a324722d746523ec757357260c6804d11d6147a9e53e3f"}, + {file = "numpy-1.18.5-cp37-cp37m-win_amd64.whl", hash = "sha256:0172304e7d8d40e9e49553901903dc5f5a49a703363ed756796f5808a06fc233"}, + {file = "numpy-1.18.5-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:e15b382603c58f24265c9c931c9a45eebf44fe2e6b4eaedbb0d025ab3255228b"}, + {file = "numpy-1.18.5-cp38-cp38-manylinux1_i686.whl", hash = "sha256:3676abe3d621fc467c4c1469ee11e395c82b2d6b5463a9454e37fe9da07cd0d7"}, + {file = "numpy-1.18.5-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:4674f7d27a6c1c52a4d1aa5f0881f1eff840d2206989bae6acb1c7668c02ebfb"}, + {file = "numpy-1.18.5-cp38-cp38-win32.whl", hash = "sha256:9c9d6531bc1886454f44aa8f809268bc481295cf9740827254f53c30104f074a"}, + {file = "numpy-1.18.5-cp38-cp38-win_amd64.whl", hash = "sha256:3dd6823d3e04b5f223e3e265b4a1eae15f104f4366edd409e5a5e413a98f911f"}, + {file = "numpy-1.18.5.zip", hash = "sha256:34e96e9dae65c4839bd80012023aadd6ee2ccb73ce7fdf3074c62f301e63120b"}, +] +oauthlib = [ + {file = "oauthlib-3.1.0-py2.py3-none-any.whl", hash = "sha256:df884cd6cbe20e32633f1db1072e9356f53638e4361bef4e8b03c9127c9328ea"}, + {file = "oauthlib-3.1.0.tar.gz", hash = "sha256:bee41cc35fcca6e988463cacc3bcb8a96224f470ca547e697b604cc697b2f889"}, +] +opt-einsum = [ + {file = "opt_einsum-3.3.0-py3-none-any.whl", hash = "sha256:2455e59e3947d3c275477df7f5205b30635e266fe6dc300e3d9f9646bfcea147"}, + {file = "opt_einsum-3.3.0.tar.gz", hash = "sha256:59f6475f77bbc37dcf7cd748519c0ec60722e91e63ca114e68821c0c54a46549"}, +] +packaging = [ + {file = "packaging-20.8-py2.py3-none-any.whl", hash = "sha256:24e0da08660a87484d1602c30bb4902d74816b6985b93de36926f5bc95741858"}, + {file = "packaging-20.8.tar.gz", hash = "sha256:78598185a7008a470d64526a8059de9aaa449238f280fc9eb6b13ba6c4109093"}, +] +pandas = [ + {file = "pandas-1.1.2-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:eb0ac2fd04428f18b547716f70c699a7cc9c65a6947ed8c7e688d96eb91e3db8"}, + {file = "pandas-1.1.2-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:02ec9f5f0b7df7227931a884569ef0b6d32d76789c84bcac1a719dafd1f912e8"}, + {file = "pandas-1.1.2-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:1edf6c254d2d138188e9987159978ee70e23362fe9197f3f100844a197f7e1e4"}, + {file = "pandas-1.1.2-cp36-cp36m-win32.whl", hash = "sha256:b821f239514a9ce46dd1cd6c9298a03ed58d0235d414ea264aacc1b14916bbe4"}, + {file = "pandas-1.1.2-cp36-cp36m-win_amd64.whl", hash = "sha256:ab6ea0f3116f408a8a59cd50158bfd19d2a024f4e221f14ab1bcd2da4f0c6fdf"}, + {file = "pandas-1.1.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:474fa53e3b2f3a543cbca81f7457bd1f44e7eb1be7171067636307e21b624e9c"}, + {file = "pandas-1.1.2-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:9e135ce9929cd0f0ba24f0545936af17ba935f844d4c3a2b979354a73c9440e0"}, + {file = "pandas-1.1.2-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:188cdfbf8399bc144fa95040536b5ce3429d2eda6c9c8b238c987af7df9f128c"}, + {file = "pandas-1.1.2-cp37-cp37m-win32.whl", hash = "sha256:08783a33989a6747317766b75be30a594a9764b9f145bb4bcc06e337930d9807"}, + {file = "pandas-1.1.2-cp37-cp37m-win_amd64.whl", hash = "sha256:f7008ec22b92d771b145150978d930a28fab8da3a10131b01bbf39574acdad0b"}, + {file = "pandas-1.1.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:59df9f0276aa4854d8bff28c5e5aeb74d9c6bb4d9f55d272b7124a7df40e47d0"}, + {file = "pandas-1.1.2-cp38-cp38-manylinux1_i686.whl", hash = "sha256:eeb64c5b3d4f2ea072ca8afdeb2b946cd681a863382ca79734f1b520b8d2fa26"}, + {file = "pandas-1.1.2-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:c9235b37489168ed6b173551c816b50aa89f03c24a8549a8b4d47d8dc79bfb1e"}, + {file = "pandas-1.1.2-cp38-cp38-win32.whl", hash = "sha256:0936991228241db937e87f82ec552a33888dd04a2e0d5a2fa3c689f92fab09e0"}, + {file = "pandas-1.1.2-cp38-cp38-win_amd64.whl", hash = "sha256:026d764d0b86ee53183aa4c0b90774b6146123eeada4e24946d7d24290777be1"}, + {file = "pandas-1.1.2.tar.gz", hash = "sha256:b64ffd87a2cfd31b40acd4b92cb72ea9a52a48165aec4c140e78fd69c45d1444"}, +] +pathspec = [ + {file = "pathspec-0.8.1-py2.py3-none-any.whl", hash = "sha256:aa0cb481c4041bf52ffa7b0d8fa6cd3e88a2ca4879c533c9153882ee2556790d"}, + {file = "pathspec-0.8.1.tar.gz", hash = "sha256:86379d6b86d75816baba717e64b1a3a3469deb93bb76d613c9ce79edc5cb68fd"}, +] +pluggy = [ + {file = "pluggy-0.13.1-py2.py3-none-any.whl", hash = "sha256:966c145cd83c96502c3c3868f50408687b38434af77734af1e9ca461a4081d2d"}, + {file = "pluggy-0.13.1.tar.gz", hash = "sha256:15b2acde666561e1298d71b523007ed7364de07029219b604cf808bfa1c765b0"}, +] +pooch = [ + {file = "pooch-1.3.0-py3-none-any.whl", hash = "sha256:2cec8cbd0515462da1f84446113e77a785029b8514841e0ad344dd57f7924902"}, + {file = "pooch-1.3.0.tar.gz", hash = "sha256:30d448e825904e2d763bbbe418831a788813c32f636b21c8d60ee5f474532898"}, +] +protobuf = [ + {file = "protobuf-3.14.0-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:629b03fd3caae7f815b0c66b41273f6b1900a579e2ccb41ef4493a4f5fb84f3a"}, + {file = "protobuf-3.14.0-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:5b7a637212cc9b2bcf85dd828b1178d19efdf74dbfe1ddf8cd1b8e01fdaaa7f5"}, + {file = "protobuf-3.14.0-cp35-cp35m-macosx_10_9_intel.whl", hash = "sha256:43b554b9e73a07ba84ed6cf25db0ff88b1e06be610b37656e292e3cbb5437472"}, + {file = "protobuf-3.14.0-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:5e9806a43232a1fa0c9cf5da8dc06f6910d53e4390be1fa06f06454d888a9142"}, + {file = "protobuf-3.14.0-cp35-cp35m-win32.whl", hash = "sha256:1c51fda1bbc9634246e7be6016d860be01747354ed7015ebe38acf4452f470d2"}, + {file = "protobuf-3.14.0-cp35-cp35m-win_amd64.whl", hash = "sha256:4b74301b30513b1a7494d3055d95c714b560fbb630d8fb9956b6f27992c9f980"}, + {file = "protobuf-3.14.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:86a75477addde4918e9a1904e5c6af8d7b691f2a3f65587d73b16100fbe4c3b2"}, + {file = "protobuf-3.14.0-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:ecc33531a213eee22ad60e0e2aaea6c8ba0021f0cce35dbf0ab03dee6e2a23a1"}, + {file = "protobuf-3.14.0-cp36-cp36m-win32.whl", hash = "sha256:72230ed56f026dd664c21d73c5db73ebba50d924d7ba6b7c0d81a121e390406e"}, + {file = "protobuf-3.14.0-cp36-cp36m-win_amd64.whl", hash = "sha256:0fc96785262042e4863b3f3b5c429d4636f10d90061e1840fce1baaf59b1a836"}, + {file = "protobuf-3.14.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:4e75105c9dfe13719b7293f75bd53033108f4ba03d44e71db0ec2a0e8401eafd"}, + {file = "protobuf-3.14.0-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:2a7e2fe101a7ace75e9327b9c946d247749e564a267b0515cf41dfe450b69bac"}, + {file = "protobuf-3.14.0-cp37-cp37m-win32.whl", hash = "sha256:b0d5d35faeb07e22a1ddf8dce620860c8fe145426c02d1a0ae2688c6e8ede36d"}, + {file = "protobuf-3.14.0-cp37-cp37m-win_amd64.whl", hash = "sha256:8971c421dbd7aad930c9bd2694122f332350b6ccb5202a8b7b06f3f1a5c41ed5"}, + {file = "protobuf-3.14.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:9616f0b65a30851e62f1713336c931fcd32c057202b7ff2cfbfca0fc7d5e3043"}, + {file = "protobuf-3.14.0-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:22bcd2e284b3b1d969c12e84dc9b9a71701ec82d8ce975fdda19712e1cfd4e00"}, + {file = "protobuf-3.14.0-py2.py3-none-any.whl", hash = "sha256:0e247612fadda953047f53301a7b0407cb0c3cb4ae25a6fde661597a04039b3c"}, + {file = "protobuf-3.14.0.tar.gz", hash = "sha256:1d63eb389347293d8915fb47bee0951c7b5dab522a4a60118b9a18f33e21f8ce"}, +] +py = [ + {file = "py-1.10.0-py2.py3-none-any.whl", hash = "sha256:3b80836aa6d1feeaa108e046da6423ab8f6ceda6468545ae8d02d9d58d18818a"}, + {file = "py-1.10.0.tar.gz", hash = "sha256:21b81bda15b66ef5e1a777a21c4dcd9c20ad3efd0b3f817e7a809035269e1bd3"}, +] +pyaml = [ + {file = "pyaml-20.4.0-py2.py3-none-any.whl", hash = "sha256:67081749a82b72c45e5f7f812ee3a14a03b3f5c25ff36ec3b290514f8c4c4b99"}, + {file = "pyaml-20.4.0.tar.gz", hash = "sha256:29a5c2a68660a799103d6949167bd6c7953d031449d08802386372de1db6ad71"}, +] +pyasn1 = [ + {file = "pyasn1-0.4.8-py2.4.egg", hash = "sha256:fec3e9d8e36808a28efb59b489e4528c10ad0f480e57dcc32b4de5c9d8c9fdf3"}, + {file = "pyasn1-0.4.8-py2.5.egg", hash = "sha256:0458773cfe65b153891ac249bcf1b5f8f320b7c2ce462151f8fa74de8934becf"}, + {file = "pyasn1-0.4.8-py2.6.egg", hash = "sha256:5c9414dcfede6e441f7e8f81b43b34e834731003427e5b09e4e00e3172a10f00"}, + {file = "pyasn1-0.4.8-py2.7.egg", hash = "sha256:6e7545f1a61025a4e58bb336952c5061697da694db1cae97b116e9c46abcf7c8"}, + {file = "pyasn1-0.4.8-py2.py3-none-any.whl", hash = "sha256:39c7e2ec30515947ff4e87fb6f456dfc6e84857d34be479c9d4a4ba4bf46aa5d"}, + {file = "pyasn1-0.4.8-py3.1.egg", hash = "sha256:78fa6da68ed2727915c4767bb386ab32cdba863caa7dbe473eaae45f9959da86"}, + {file = "pyasn1-0.4.8-py3.2.egg", hash = "sha256:08c3c53b75eaa48d71cf8c710312316392ed40899cb34710d092e96745a358b7"}, + {file = "pyasn1-0.4.8-py3.3.egg", hash = "sha256:03840c999ba71680a131cfaee6fab142e1ed9bbd9c693e285cc6aca0d555e576"}, + {file = "pyasn1-0.4.8-py3.4.egg", hash = "sha256:7ab8a544af125fb704feadb008c99a88805126fb525280b2270bb25cc1d78a12"}, + {file = "pyasn1-0.4.8-py3.5.egg", hash = "sha256:e89bf84b5437b532b0803ba5c9a5e054d21fec423a89952a74f87fa2c9b7bce2"}, + {file = "pyasn1-0.4.8-py3.6.egg", hash = "sha256:014c0e9976956a08139dc0712ae195324a75e142284d5f87f1a87ee1b068a359"}, + {file = "pyasn1-0.4.8-py3.7.egg", hash = "sha256:99fcc3c8d804d1bc6d9a099921e39d827026409a58f2a720dcdb89374ea0c776"}, + {file = "pyasn1-0.4.8.tar.gz", hash = "sha256:aef77c9fb94a3ac588e87841208bdec464471d9871bd5050a287cc9a475cd0ba"}, +] +pyasn1-modules = [ + {file = "pyasn1-modules-0.2.8.tar.gz", hash = "sha256:905f84c712230b2c592c19470d3ca8d552de726050d1d1716282a1f6146be65e"}, + {file = "pyasn1_modules-0.2.8-py2.4.egg", hash = "sha256:0fe1b68d1e486a1ed5473f1302bd991c1611d319bba158e98b106ff86e1d7199"}, + {file = "pyasn1_modules-0.2.8-py2.5.egg", hash = "sha256:fe0644d9ab041506b62782e92b06b8c68cca799e1a9636ec398675459e031405"}, + {file = "pyasn1_modules-0.2.8-py2.6.egg", hash = "sha256:a99324196732f53093a84c4369c996713eb8c89d360a496b599fb1a9c47fc3eb"}, + {file = "pyasn1_modules-0.2.8-py2.7.egg", hash = "sha256:0845a5582f6a02bb3e1bde9ecfc4bfcae6ec3210dd270522fee602365430c3f8"}, + {file = "pyasn1_modules-0.2.8-py2.py3-none-any.whl", hash = "sha256:a50b808ffeb97cb3601dd25981f6b016cbb3d31fbf57a8b8a87428e6158d0c74"}, + {file = "pyasn1_modules-0.2.8-py3.1.egg", hash = "sha256:f39edd8c4ecaa4556e989147ebf219227e2cd2e8a43c7e7fcb1f1c18c5fd6a3d"}, + {file = "pyasn1_modules-0.2.8-py3.2.egg", hash = "sha256:b80486a6c77252ea3a3e9b1e360bc9cf28eaac41263d173c032581ad2f20fe45"}, + {file = "pyasn1_modules-0.2.8-py3.3.egg", hash = "sha256:65cebbaffc913f4fe9e4808735c95ea22d7a7775646ab690518c056784bc21b4"}, + {file = "pyasn1_modules-0.2.8-py3.4.egg", hash = "sha256:15b7c67fabc7fc240d87fb9aabf999cf82311a6d6fb2c70d00d3d0604878c811"}, + {file = "pyasn1_modules-0.2.8-py3.5.egg", hash = "sha256:426edb7a5e8879f1ec54a1864f16b882c2837bfd06eee62f2c982315ee2473ed"}, + {file = "pyasn1_modules-0.2.8-py3.6.egg", hash = "sha256:cbac4bc38d117f2a49aeedec4407d23e8866ea4ac27ff2cf7fb3e5b570df19e0"}, + {file = "pyasn1_modules-0.2.8-py3.7.egg", hash = "sha256:c29a5e5cc7a3f05926aff34e097e84f8589cd790ce0ed41b67aed6857b26aafd"}, +] +pycparser = [ + {file = "pycparser-2.20-py2.py3-none-any.whl", hash = "sha256:7582ad22678f0fcd81102833f60ef8d0e57288b6b5fb00323d101be910e35705"}, + {file = "pycparser-2.20.tar.gz", hash = "sha256:2d475327684562c3a96cc71adf7dc8c4f0565175cf86b6d7a404ff4c771f15f0"}, +] +pyparsing = [ + {file = "pyparsing-2.4.7-py2.py3-none-any.whl", hash = "sha256:ef9d7589ef3c200abe66653d3f1ab1033c3c419ae9b9bdb1240a85b024efc88b"}, + {file = "pyparsing-2.4.7.tar.gz", hash = "sha256:c203ec8783bf771a155b207279b9bccb8dea02d8f0c9e5f8ead507bc3246ecc1"}, +] +pyrsistent = [ + {file = "pyrsistent-0.17.3.tar.gz", hash = "sha256:2e636185d9eb976a18a8a8e96efce62f2905fea90041958d8cc2a189756ebf3e"}, +] +pytest = [ + {file = "pytest-6.2.1-py3-none-any.whl", hash = "sha256:1969f797a1a0dbd8ccf0fecc80262312729afea9c17f1d70ebf85c5e76c6f7c8"}, + {file = "pytest-6.2.1.tar.gz", hash = "sha256:66e419b1899bc27346cb2c993e12c5e5e8daba9073c1fbce33b9807abc95c306"}, +] +pytest-forked = [ + {file = "pytest-forked-1.3.0.tar.gz", hash = "sha256:6aa9ac7e00ad1a539c41bec6d21011332de671e938c7637378ec9710204e37ca"}, + {file = "pytest_forked-1.3.0-py2.py3-none-any.whl", hash = "sha256:dc4147784048e70ef5d437951728825a131b81714b398d5d52f17c7c144d8815"}, +] +python-dateutil = [ + {file = "python-dateutil-2.8.1.tar.gz", hash = "sha256:73ebfe9dbf22e832286dafa60473e4cd239f8592f699aa5adaf10050e6e1823c"}, + {file = "python_dateutil-2.8.1-py2.py3-none-any.whl", hash = "sha256:75bb3f31ea686f1197762692a9ee6a7550b59fc6ca3a1f4b5d7e32fb98e2da2a"}, +] +pytz = [ + {file = "pytz-2020.5-py2.py3-none-any.whl", hash = "sha256:16962c5fb8db4a8f63a26646d8886e9d769b6c511543557bc84e9569fb9a9cb4"}, + {file = "pytz-2020.5.tar.gz", hash = "sha256:180befebb1927b16f6b57101720075a984c019ac16b1b7575673bea42c6c3da5"}, +] +pyyaml = [ + {file = "PyYAML-5.3.1-cp27-cp27m-win32.whl", hash = "sha256:74809a57b329d6cc0fdccee6318f44b9b8649961fa73144a98735b0aaf029f1f"}, + {file = "PyYAML-5.3.1-cp27-cp27m-win_amd64.whl", hash = "sha256:240097ff019d7c70a4922b6869d8a86407758333f02203e0fc6ff79c5dcede76"}, + {file = "PyYAML-5.3.1-cp35-cp35m-win32.whl", hash = "sha256:4f4b913ca1a7319b33cfb1369e91e50354d6f07a135f3b901aca02aa95940bd2"}, + {file = "PyYAML-5.3.1-cp35-cp35m-win_amd64.whl", hash = "sha256:cc8955cfbfc7a115fa81d85284ee61147059a753344bc51098f3ccd69b0d7e0c"}, + {file = "PyYAML-5.3.1-cp36-cp36m-win32.whl", hash = "sha256:7739fc0fa8205b3ee8808aea45e968bc90082c10aef6ea95e855e10abf4a37b2"}, + {file = "PyYAML-5.3.1-cp36-cp36m-win_amd64.whl", hash = "sha256:69f00dca373f240f842b2931fb2c7e14ddbacd1397d57157a9b005a6a9942648"}, + {file = "PyYAML-5.3.1-cp37-cp37m-win32.whl", hash = "sha256:d13155f591e6fcc1ec3b30685d50bf0711574e2c0dfffd7644babf8b5102ca1a"}, + {file = "PyYAML-5.3.1-cp37-cp37m-win_amd64.whl", hash = "sha256:73f099454b799e05e5ab51423c7bcf361c58d3206fa7b0d555426b1f4d9a3eaf"}, + {file = "PyYAML-5.3.1-cp38-cp38-win32.whl", hash = "sha256:06a0d7ba600ce0b2d2fe2e78453a470b5a6e000a985dd4a4e54e436cc36b0e97"}, + {file = "PyYAML-5.3.1-cp38-cp38-win_amd64.whl", hash = "sha256:95f71d2af0ff4227885f7a6605c37fd53d3a106fcab511b8860ecca9fcf400ee"}, + {file = "PyYAML-5.3.1-cp39-cp39-win32.whl", hash = "sha256:ad9c67312c84def58f3c04504727ca879cb0013b2517c85a9a253f0cb6380c0a"}, + {file = "PyYAML-5.3.1-cp39-cp39-win_amd64.whl", hash = "sha256:6034f55dab5fea9e53f436aa68fa3ace2634918e8b5994d82f3621c04ff5ed2e"}, + {file = "PyYAML-5.3.1.tar.gz", hash = "sha256:b8eac752c5e14d3eca0e6dd9199cd627518cb5ec06add0de9d32baeee6fe645d"}, +] +regex = [ + {file = "regex-2020.11.13-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:8b882a78c320478b12ff024e81dc7d43c1462aa4a3341c754ee65d857a521f85"}, + {file = "regex-2020.11.13-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:a63f1a07932c9686d2d416fb295ec2c01ab246e89b4d58e5fa468089cab44b70"}, + {file = "regex-2020.11.13-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:6e4b08c6f8daca7d8f07c8d24e4331ae7953333dbd09c648ed6ebd24db5a10ee"}, + {file = "regex-2020.11.13-cp36-cp36m-manylinux2010_i686.whl", hash = "sha256:bba349276b126947b014e50ab3316c027cac1495992f10e5682dc677b3dfa0c5"}, + {file = "regex-2020.11.13-cp36-cp36m-manylinux2010_x86_64.whl", hash = "sha256:56e01daca75eae420bce184edd8bb341c8eebb19dd3bce7266332258f9fb9dd7"}, + {file = "regex-2020.11.13-cp36-cp36m-manylinux2014_aarch64.whl", hash = "sha256:6a8ce43923c518c24a2579fda49f093f1397dad5d18346211e46f134fc624e31"}, + {file = "regex-2020.11.13-cp36-cp36m-manylinux2014_i686.whl", hash = "sha256:1ab79fcb02b930de09c76d024d279686ec5d532eb814fd0ed1e0051eb8bd2daa"}, + {file = "regex-2020.11.13-cp36-cp36m-manylinux2014_x86_64.whl", hash = "sha256:9801c4c1d9ae6a70aeb2128e5b4b68c45d4f0af0d1535500884d644fa9b768c6"}, + {file = "regex-2020.11.13-cp36-cp36m-win32.whl", hash = "sha256:49cae022fa13f09be91b2c880e58e14b6da5d10639ed45ca69b85faf039f7a4e"}, + {file = "regex-2020.11.13-cp36-cp36m-win_amd64.whl", hash = "sha256:749078d1eb89484db5f34b4012092ad14b327944ee7f1c4f74d6279a6e4d1884"}, + {file = "regex-2020.11.13-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:b2f4007bff007c96a173e24dcda236e5e83bde4358a557f9ccf5e014439eae4b"}, + {file = "regex-2020.11.13-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:38c8fd190db64f513fe4e1baa59fed086ae71fa45083b6936b52d34df8f86a88"}, + {file = "regex-2020.11.13-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:5862975b45d451b6db51c2e654990c1820523a5b07100fc6903e9c86575202a0"}, + {file = "regex-2020.11.13-cp37-cp37m-manylinux2010_i686.whl", hash = "sha256:262c6825b309e6485ec2493ffc7e62a13cf13fb2a8b6d212f72bd53ad34118f1"}, + {file = "regex-2020.11.13-cp37-cp37m-manylinux2010_x86_64.whl", hash = "sha256:bafb01b4688833e099d79e7efd23f99172f501a15c44f21ea2118681473fdba0"}, + {file = "regex-2020.11.13-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:e32f5f3d1b1c663af7f9c4c1e72e6ffe9a78c03a31e149259f531e0fed826512"}, + {file = "regex-2020.11.13-cp37-cp37m-manylinux2014_i686.whl", hash = "sha256:3bddc701bdd1efa0d5264d2649588cbfda549b2899dc8d50417e47a82e1387ba"}, + {file = "regex-2020.11.13-cp37-cp37m-manylinux2014_x86_64.whl", hash = "sha256:02951b7dacb123d8ea6da44fe45ddd084aa6777d4b2454fa0da61d569c6fa538"}, + {file = "regex-2020.11.13-cp37-cp37m-win32.whl", hash = "sha256:0d08e71e70c0237883d0bef12cad5145b84c3705e9c6a588b2a9c7080e5af2a4"}, + {file = "regex-2020.11.13-cp37-cp37m-win_amd64.whl", hash = "sha256:1fa7ee9c2a0e30405e21031d07d7ba8617bc590d391adfc2b7f1e8b99f46f444"}, + {file = "regex-2020.11.13-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:baf378ba6151f6e272824b86a774326f692bc2ef4cc5ce8d5bc76e38c813a55f"}, + {file = "regex-2020.11.13-cp38-cp38-manylinux1_i686.whl", hash = "sha256:e3faaf10a0d1e8e23a9b51d1900b72e1635c2d5b0e1bea1c18022486a8e2e52d"}, + {file = "regex-2020.11.13-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:2a11a3e90bd9901d70a5b31d7dd85114755a581a5da3fc996abfefa48aee78af"}, + {file = "regex-2020.11.13-cp38-cp38-manylinux2010_i686.whl", hash = "sha256:d1ebb090a426db66dd80df8ca85adc4abfcbad8a7c2e9a5ec7513ede522e0a8f"}, + {file = "regex-2020.11.13-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:b2b1a5ddae3677d89b686e5c625fc5547c6e492bd755b520de5332773a8af06b"}, + {file = "regex-2020.11.13-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:2c99e97d388cd0a8d30f7c514d67887d8021541b875baf09791a3baad48bb4f8"}, + {file = "regex-2020.11.13-cp38-cp38-manylinux2014_i686.whl", hash = "sha256:c084582d4215593f2f1d28b65d2a2f3aceff8342aa85afd7be23a9cad74a0de5"}, + {file = "regex-2020.11.13-cp38-cp38-manylinux2014_x86_64.whl", hash = "sha256:a3d748383762e56337c39ab35c6ed4deb88df5326f97a38946ddd19028ecce6b"}, + {file = "regex-2020.11.13-cp38-cp38-win32.whl", hash = "sha256:7913bd25f4ab274ba37bc97ad0e21c31004224ccb02765ad984eef43e04acc6c"}, + {file = "regex-2020.11.13-cp38-cp38-win_amd64.whl", hash = "sha256:6c54ce4b5d61a7129bad5c5dc279e222afd00e721bf92f9ef09e4fae28755683"}, + {file = "regex-2020.11.13-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:1862a9d9194fae76a7aaf0150d5f2a8ec1da89e8b55890b1786b8f88a0f619dc"}, + {file = "regex-2020.11.13-cp39-cp39-manylinux1_i686.whl", hash = "sha256:4902e6aa086cbb224241adbc2f06235927d5cdacffb2425c73e6570e8d862364"}, + {file = "regex-2020.11.13-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:7a25fcbeae08f96a754b45bdc050e1fb94b95cab046bf56b016c25e9ab127b3e"}, + {file = "regex-2020.11.13-cp39-cp39-manylinux2010_i686.whl", hash = "sha256:d2d8ce12b7c12c87e41123997ebaf1a5767a5be3ec545f64675388970f415e2e"}, + {file = "regex-2020.11.13-cp39-cp39-manylinux2010_x86_64.whl", hash = "sha256:f7d29a6fc4760300f86ae329e3b6ca28ea9c20823df123a2ea8693e967b29917"}, + {file = "regex-2020.11.13-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:717881211f46de3ab130b58ec0908267961fadc06e44f974466d1887f865bd5b"}, + {file = "regex-2020.11.13-cp39-cp39-manylinux2014_i686.whl", hash = "sha256:3128e30d83f2e70b0bed9b2a34e92707d0877e460b402faca908c6667092ada9"}, + {file = "regex-2020.11.13-cp39-cp39-manylinux2014_x86_64.whl", hash = "sha256:8f6a2229e8ad946e36815f2a03386bb8353d4bde368fdf8ca5f0cb97264d3b5c"}, + {file = "regex-2020.11.13-cp39-cp39-win32.whl", hash = "sha256:f8f295db00ef5f8bae530fc39af0b40486ca6068733fb860b42115052206466f"}, + {file = "regex-2020.11.13-cp39-cp39-win_amd64.whl", hash = "sha256:a15f64ae3a027b64496a71ab1f722355e570c3fac5ba2801cafce846bf5af01d"}, + {file = "regex-2020.11.13.tar.gz", hash = "sha256:83d6b356e116ca119db8e7c6fc2983289d87b27b3fac238cfe5dca529d884562"}, +] +requests = [ + {file = "requests-2.25.1-py2.py3-none-any.whl", hash = "sha256:c210084e36a42ae6b9219e00e48287def368a26d03a048ddad7bfee44f75871e"}, + {file = "requests-2.25.1.tar.gz", hash = "sha256:27973dd4a904a4f13b263a19c866c13b92a39ed1c964655f025f3f8d3d75b804"}, +] +requests-oauthlib = [ + {file = "requests-oauthlib-1.3.0.tar.gz", hash = "sha256:b4261601a71fd721a8bd6d7aa1cc1d6a8a93b4a9f5e96626f8e4d91e8beeaa6a"}, + {file = "requests_oauthlib-1.3.0-py2.py3-none-any.whl", hash = "sha256:7f71572defaecd16372f9006f33c2ec8c077c3cfa6f5911a9a90202beb513f3d"}, + {file = "requests_oauthlib-1.3.0-py3.7.egg", hash = "sha256:fa6c47b933f01060936d87ae9327fead68768b69c6c9ea2109c48be30f2d4dbc"}, +] +resampy = [ + {file = "resampy-0.2.2.tar.gz", hash = "sha256:62af020d8a6674d8117f62320ce9470437bb1d738a5d06cd55591b69b463929e"}, +] +rfc3986 = [ + {file = "rfc3986-1.4.0-py2.py3-none-any.whl", hash = "sha256:af9147e9aceda37c91a05f4deb128d4b4b49d6b199775fd2d2927768abdc8f50"}, + {file = "rfc3986-1.4.0.tar.gz", hash = "sha256:112398da31a3344dc25dbf477d8df6cb34f9278a94fee2625d89e4514be8bb9d"}, +] +rsa = [ + {file = "rsa-4.6-py3-none-any.whl", hash = "sha256:6166864e23d6b5195a5cfed6cd9fed0fe774e226d8f854fcb23b7bbef0350233"}, + {file = "rsa-4.6.tar.gz", hash = "sha256:109ea5a66744dd859bf16fe904b8d8b627adafb9408753161e766a92e7d681fa"}, +] +scikit-learn = [ + {file = "scikit-learn-0.24.0.tar.gz", hash = "sha256:076369634ee72b5a5941440661e2f306ff4ac30903802dc52031c7e9199ac640"}, + {file = "scikit_learn-0.24.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:890d7d588f65acb0c4f6c083347c9076916bda5e6bd8400f06244b1afc1009af"}, + {file = "scikit_learn-0.24.0-cp36-cp36m-manylinux2010_i686.whl", hash = "sha256:e534f5f3796db6781c87e9835dcd51b7854c8c5a379c9210b93605965c1941fd"}, + {file = "scikit_learn-0.24.0-cp36-cp36m-manylinux2010_x86_64.whl", hash = "sha256:d7fe05fcb44eadd6d6c874c768f085f5de1239db3a3b7be4d3d23d12e4120589"}, + {file = "scikit_learn-0.24.0-cp36-cp36m-manylinux2014_aarch64.whl", hash = "sha256:7f654befc5ad413690cc58f3f34a3e906caf825195ce0fda00a8e9565e1403e6"}, + {file = "scikit_learn-0.24.0-cp36-cp36m-win32.whl", hash = "sha256:afeb06dc69847927634e58579b9cdc72e1390b79497336b2324b1b173f33bd47"}, + {file = "scikit_learn-0.24.0-cp36-cp36m-win_amd64.whl", hash = "sha256:26f66b3726b54dfb76ea51c5d9c2431ed17ebc066cb4527662b9e851a3e7ba61"}, + {file = "scikit_learn-0.24.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:c08b27cb78ee8d2dc781a7affed09859441f5b624f9f92da59ac0791c8774dfc"}, + {file = "scikit_learn-0.24.0-cp37-cp37m-manylinux2010_i686.whl", hash = "sha256:905d8934d1e27a686698864a5863ff2c0e13a2ae1adb78a8a848aacc8a49927d"}, + {file = "scikit_learn-0.24.0-cp37-cp37m-manylinux2010_x86_64.whl", hash = "sha256:d819d625832fb2969911a243e009cfa135cb8ef1e150866e417d6e9d75290087"}, + {file = "scikit_learn-0.24.0-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:18f7131e62265bf2691ed1d0303c640313894ccfe4278427478c6b2f45094b53"}, + {file = "scikit_learn-0.24.0-cp37-cp37m-win32.whl", hash = "sha256:b0d13fd56d26cf3de0314a4fd48037108c638fe126d813f5c1222bb0f08b6a76"}, + {file = "scikit_learn-0.24.0-cp37-cp37m-win_amd64.whl", hash = "sha256:c912247e42114f389858ae05d63f4359d4e667ea72aaabee191aee9ad3f9774a"}, + {file = "scikit_learn-0.24.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:758619e49cd7c17282e6cc60d5cc73c02c072b47c9a10010bb3bb47e0d976e50"}, + {file = "scikit_learn-0.24.0-cp38-cp38-manylinux2010_i686.whl", hash = "sha256:66f27bf21202a850bcd7b6303916e4907f6e22ec59a14974ede4955aed5c7ed0"}, + {file = "scikit_learn-0.24.0-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:5e6e3c042cea83f2e20a45e563b8eabc1f8f72446251fe23ebefdf111a173a33"}, + {file = "scikit_learn-0.24.0-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:2a5348585aa793bc8cc5a72f8e9067c9380834b0aadbd55f924843b071f13282"}, + {file = "scikit_learn-0.24.0-cp38-cp38-win32.whl", hash = "sha256:743b6edd98c98991be46c08e6b21df3861d5ae915f91d59f988384d93f7263e7"}, + {file = "scikit_learn-0.24.0-cp38-cp38-win_amd64.whl", hash = "sha256:2951f87d35e72f007701c6e028aa230f6df6212a3194677c0c950486066a454d"}, + {file = "scikit_learn-0.24.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:44e452ea8491225c5783d49577aad0f36202dfd52aec7f82c0fdfe5fbd5f7400"}, + {file = "scikit_learn-0.24.0-cp39-cp39-manylinux2010_i686.whl", hash = "sha256:800aaf63f8838c00e85db2267dd226f89858594843fd03932a9eda95746d2c40"}, + {file = "scikit_learn-0.24.0-cp39-cp39-manylinux2010_x86_64.whl", hash = "sha256:3eeff086f7329521d27249a082ea3c48c085cedb110db5f65968ab55c3ba2e09"}, + {file = "scikit_learn-0.24.0-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:4395e91b3548005f4a645018435b5a94f8cce232b5b70753020e606c6a750656"}, + {file = "scikit_learn-0.24.0-cp39-cp39-win32.whl", hash = "sha256:80ca024154b84b6ac4cfc86930ba13fdc348a209753bf2c16129db6f9eb8a80b"}, + {file = "scikit_learn-0.24.0-cp39-cp39-win_amd64.whl", hash = "sha256:490436b44b3a1957cb625e871764b0aa330b34cc416aea4abc6c38ca63d0d682"}, +] +scipy = [ + {file = "scipy-1.4.1-cp35-cp35m-macosx_10_6_intel.whl", hash = "sha256:c5cac0c0387272ee0e789e94a570ac51deb01c796b37fb2aad1fb13f85e2f97d"}, + {file = "scipy-1.4.1-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:a144811318853a23d32a07bc7fd5561ff0cac5da643d96ed94a4ffe967d89672"}, + {file = "scipy-1.4.1-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:71eb180f22c49066f25d6df16f8709f215723317cc951d99e54dc88020ea57be"}, + {file = "scipy-1.4.1-cp35-cp35m-win32.whl", hash = "sha256:770254a280d741dd3436919d47e35712fb081a6ff8bafc0f319382b954b77802"}, + {file = "scipy-1.4.1-cp35-cp35m-win_amd64.whl", hash = "sha256:a1aae70d52d0b074d8121333bc807a485f9f1e6a69742010b33780df2e60cfe0"}, + {file = "scipy-1.4.1-cp36-cp36m-macosx_10_6_intel.whl", hash = "sha256:bb517872058a1f087c4528e7429b4a44533a902644987e7b2fe35ecc223bc408"}, + {file = "scipy-1.4.1-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:dba8306f6da99e37ea08c08fef6e274b5bf8567bb094d1dbe86a20e532aca088"}, + {file = "scipy-1.4.1-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:386086e2972ed2db17cebf88610aab7d7f6e2c0ca30042dc9a89cf18dcc363fa"}, + {file = "scipy-1.4.1-cp36-cp36m-win32.whl", hash = "sha256:8d3bc3993b8e4be7eade6dcc6fd59a412d96d3a33fa42b0fa45dc9e24495ede9"}, + {file = "scipy-1.4.1-cp36-cp36m-win_amd64.whl", hash = "sha256:dc60bb302f48acf6da8ca4444cfa17d52c63c5415302a9ee77b3b21618090521"}, + {file = "scipy-1.4.1-cp37-cp37m-macosx_10_6_intel.whl", hash = "sha256:787cc50cab3020a865640aba3485e9fbd161d4d3b0d03a967df1a2881320512d"}, + {file = "scipy-1.4.1-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:0902a620a381f101e184a958459b36d3ee50f5effd186db76e131cbefcbb96f7"}, + {file = "scipy-1.4.1-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:00af72998a46c25bdb5824d2b729e7dabec0c765f9deb0b504f928591f5ff9d4"}, + {file = "scipy-1.4.1-cp37-cp37m-win32.whl", hash = "sha256:9508a7c628a165c2c835f2497837bf6ac80eb25291055f56c129df3c943cbaf8"}, + {file = "scipy-1.4.1-cp37-cp37m-win_amd64.whl", hash = "sha256:a2d6df9eb074af7f08866598e4ef068a2b310d98f87dc23bd1b90ec7bdcec802"}, + {file = "scipy-1.4.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:3092857f36b690a321a662fe5496cb816a7f4eecd875e1d36793d92d3f884073"}, + {file = "scipy-1.4.1-cp38-cp38-manylinux1_i686.whl", hash = "sha256:8a07760d5c7f3a92e440ad3aedcc98891e915ce857664282ae3c0220f3301eb6"}, + {file = "scipy-1.4.1-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:1e3190466d669d658233e8a583b854f6386dd62d655539b77b3fa25bfb2abb70"}, + {file = "scipy-1.4.1-cp38-cp38-win32.whl", hash = "sha256:cc971a82ea1170e677443108703a2ec9ff0f70752258d0e9f5433d00dda01f59"}, + {file = "scipy-1.4.1-cp38-cp38-win_amd64.whl", hash = "sha256:2cce3f9847a1a51019e8c5b47620da93950e58ebc611f13e0d11f4980ca5fecb"}, + {file = "scipy-1.4.1.tar.gz", hash = "sha256:dee1bbf3a6c8f73b6b218cb28eed8dd13347ea2f87d572ce19b289d6fd3fbc59"}, +] +simplejson = [ + {file = "simplejson-3.17.2-cp27-cp27m-macosx_10_13_x86_64.whl", hash = "sha256:2d3eab2c3fe52007d703a26f71cf649a8c771fcdd949a3ae73041ba6797cfcf8"}, + {file = "simplejson-3.17.2-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:813846738277729d7db71b82176204abc7fdae2f566e2d9fcf874f9b6472e3e6"}, + {file = "simplejson-3.17.2-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:292c2e3f53be314cc59853bd20a35bf1f965f3bc121e007ab6fd526ed412a85d"}, + {file = "simplejson-3.17.2-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:0dd9d9c738cb008bfc0862c9b8fa6743495c03a0ed543884bf92fb7d30f8d043"}, + {file = "simplejson-3.17.2-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:42b8b8dd0799f78e067e2aaae97e60d58a8f63582939af60abce4c48631a0aa4"}, + {file = "simplejson-3.17.2-cp27-cp27m-win32.whl", hash = "sha256:8042040af86a494a23c189b5aa0ea9433769cc029707833f261a79c98e3375f9"}, + {file = "simplejson-3.17.2-cp27-cp27m-win_amd64.whl", hash = "sha256:034550078a11664d77bc1a8364c90bb7eef0e44c2dbb1fd0a4d92e3997088667"}, + {file = "simplejson-3.17.2-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:fed0f22bf1313ff79c7fc318f7199d6c2f96d4de3234b2f12a1eab350e597c06"}, + {file = "simplejson-3.17.2-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:2e7b57c2c146f8e4dadf84977a83f7ee50da17c8861fd7faf694d55e3274784f"}, + {file = "simplejson-3.17.2-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:da3c55cdc66cfc3fffb607db49a42448785ea2732f055ac1549b69dcb392663b"}, + {file = "simplejson-3.17.2-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:c1cb29b1fced01f97e6d5631c3edc2dadb424d1f4421dad079cb13fc97acb42f"}, + {file = "simplejson-3.17.2-cp33-cp33m-win32.whl", hash = "sha256:8f713ea65958ef40049b6c45c40c206ab363db9591ff5a49d89b448933fa5746"}, + {file = "simplejson-3.17.2-cp33-cp33m-win_amd64.whl", hash = "sha256:344e2d920a7f27b4023c087ab539877a1e39ce8e3e90b867e0bfa97829824748"}, + {file = "simplejson-3.17.2-cp34-cp34m-win32.whl", hash = "sha256:05b43d568300c1cd43f95ff4bfcff984bc658aa001be91efb3bb21df9d6288d3"}, + {file = "simplejson-3.17.2-cp34-cp34m-win_amd64.whl", hash = "sha256:cff6453e25204d3369c47b97dd34783ca820611bd334779d22192da23784194b"}, + {file = "simplejson-3.17.2-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:8acf76443cfb5c949b6e781c154278c059b09ac717d2757a830c869ba000cf8d"}, + {file = "simplejson-3.17.2-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:869a183c8e44bc03be1b2bbcc9ec4338e37fa8557fc506bf6115887c1d3bb956"}, + {file = "simplejson-3.17.2-cp35-cp35m-manylinux2010_i686.whl", hash = "sha256:5c659a0efc80aaaba57fcd878855c8534ecb655a28ac8508885c50648e6e659d"}, + {file = "simplejson-3.17.2-cp35-cp35m-manylinux2010_x86_64.whl", hash = "sha256:72d8a3ffca19a901002d6b068cf746be85747571c6a7ba12cbcf427bfb4ed971"}, + {file = "simplejson-3.17.2-cp35-cp35m-manylinux2014_aarch64.whl", hash = "sha256:4b3442249d5e3893b90cb9f72c7d6ce4d2ea144d2c0d9f75b9ae1e5460f3121a"}, + {file = "simplejson-3.17.2-cp35-cp35m-win32.whl", hash = "sha256:e058c7656c44fb494a11443191e381355388443d543f6fc1a245d5d238544396"}, + {file = "simplejson-3.17.2-cp35-cp35m-win_amd64.whl", hash = "sha256:934115642c8ba9659b402c8bdbdedb48651fb94b576e3b3efd1ccb079609b04a"}, + {file = "simplejson-3.17.2-cp36-cp36m-macosx_10_13_x86_64.whl", hash = "sha256:ffd4e4877a78c84d693e491b223385e0271278f5f4e1476a4962dca6824ecfeb"}, + {file = "simplejson-3.17.2-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:10fc250c3edea4abc15d930d77274ddb8df4803453dde7ad50c2f5565a18a4bb"}, + {file = "simplejson-3.17.2-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:76ac9605bf2f6d9b56abf6f9da9047a8782574ad3531c82eae774947ae99cc3f"}, + {file = "simplejson-3.17.2-cp36-cp36m-manylinux2010_i686.whl", hash = "sha256:7f10f8ba9c1b1430addc7dd385fc322e221559d3ae49b812aebf57470ce8de45"}, + {file = "simplejson-3.17.2-cp36-cp36m-manylinux2010_x86_64.whl", hash = "sha256:bc00d1210567a4cdd215ac6e17dc00cb9893ee521cee701adfd0fa43f7c73139"}, + {file = "simplejson-3.17.2-cp36-cp36m-manylinux2014_aarch64.whl", hash = "sha256:af4868da7dd53296cd7630687161d53a7ebe2e63814234631445697bd7c29f46"}, + {file = "simplejson-3.17.2-cp36-cp36m-win32.whl", hash = "sha256:7d276f69bfc8c7ba6c717ba8deaf28f9d3c8450ff0aa8713f5a3280e232be16b"}, + {file = "simplejson-3.17.2-cp36-cp36m-win_amd64.whl", hash = "sha256:a55c76254d7cf8d4494bc508e7abb993a82a192d0db4552421e5139235604625"}, + {file = "simplejson-3.17.2-cp37-cp37m-macosx_10_14_x86_64.whl", hash = "sha256:9a2b7543559f8a1c9ed72724b549d8cc3515da7daf3e79813a15bdc4a769de25"}, + {file = "simplejson-3.17.2-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:311f5dc2af07361725033b13cc3d0351de3da8bede3397d45650784c3f21fbcf"}, + {file = "simplejson-3.17.2-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:2862beabfb9097a745a961426fe7daf66e1714151da8bb9a0c430dde3d59c7c0"}, + {file = "simplejson-3.17.2-cp37-cp37m-manylinux2010_i686.whl", hash = "sha256:afebfc3dd3520d37056f641969ce320b071bc7a0800639c71877b90d053e087f"}, + {file = "simplejson-3.17.2-cp37-cp37m-manylinux2010_x86_64.whl", hash = "sha256:d4813b30cb62d3b63ccc60dd12f2121780c7a3068db692daeb90f989877aaf04"}, + {file = "simplejson-3.17.2-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:3fabde09af43e0cbdee407555383063f8b45bfb52c361bc5da83fcffdb4fd278"}, + {file = "simplejson-3.17.2-cp37-cp37m-win32.whl", hash = "sha256:ceaa28a5bce8a46a130cd223e895080e258a88d51bf6e8de2fc54a6ef7e38c34"}, + {file = "simplejson-3.17.2-cp37-cp37m-win_amd64.whl", hash = "sha256:9551f23e09300a9a528f7af20e35c9f79686d46d646152a0c8fc41d2d074d9b0"}, + {file = "simplejson-3.17.2-cp38-cp38-macosx_10_14_x86_64.whl", hash = "sha256:c94dc64b1a389a416fc4218cd4799aa3756f25940cae33530a4f7f2f54f166da"}, + {file = "simplejson-3.17.2-cp38-cp38-manylinux1_i686.whl", hash = "sha256:b59aa298137ca74a744c1e6e22cfc0bf9dca3a2f41f51bc92eb05695155d905a"}, + {file = "simplejson-3.17.2-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:ad8f41c2357b73bc9e8606d2fa226233bf4d55d85a8982ecdfd55823a6959995"}, + {file = "simplejson-3.17.2-cp38-cp38-manylinux2010_i686.whl", hash = "sha256:845a14f6deb124a3bcb98a62def067a67462a000e0508f256f9c18eff5847efc"}, + {file = "simplejson-3.17.2-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:d0b64409df09edb4c365d95004775c988259efe9be39697d7315c42b7a5e7e94"}, + {file = "simplejson-3.17.2-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:55d65f9cc1b733d85ef95ab11f559cce55c7649a2160da2ac7a078534da676c8"}, + {file = "simplejson-3.17.2.tar.gz", hash = "sha256:75ecc79f26d99222a084fbdd1ce5aad3ac3a8bd535cd9059528452da38b68841"}, +] +six = [ + {file = "six-1.15.0-py2.py3-none-any.whl", hash = "sha256:8b74bedcbbbaca38ff6d7491d76f2b06b3592611af620f8426e82dddb04a5ced"}, + {file = "six-1.15.0.tar.gz", hash = "sha256:30639c035cdb23534cd4aa2dd52c3bf48f06e5f4a941509c8bafd8ce11080259"}, +] +sniffio = [ + {file = "sniffio-1.2.0-py3-none-any.whl", hash = "sha256:471b71698eac1c2112a40ce2752bb2f4a4814c22a54a3eed3676bc0f5ca9f663"}, + {file = "sniffio-1.2.0.tar.gz", hash = "sha256:c4666eecec1d3f50960c6bdf61ab7bc350648da6c126e3cf6898d8cd4ddcd3de"}, +] +soundfile = [ + {file = "SoundFile-0.10.3.post1-py2.py3-none-any.whl", hash = "sha256:2d17e0a6fc2af0d6c1d868bafa5ec80aae6e186a97fec8db07ad6af29842fbc7"}, + {file = "SoundFile-0.10.3.post1-py2.py3.cp26.cp27.cp32.cp33.cp34.cp35.cp36.pp27.pp32.pp33-none-macosx_10_5_x86_64.macosx_10_6_intel.macosx_10_9_intel.macosx_10_9_x86_64.whl", hash = "sha256:5e342ee293b896d31da67617fe65d0bdca217af193991b0cb6052353b1e0e506"}, + {file = "SoundFile-0.10.3.post1-py2.py3.cp26.cp27.cp32.cp33.cp34.cp35.cp36.pp27.pp32.pp33-none-win32.whl", hash = "sha256:4555438c2c4f02b39fea2ed40f6ddeda88a80cd1ee9dd129be4d5f5134698cc2"}, + {file = "SoundFile-0.10.3.post1-py2.py3.cp26.cp27.cp32.cp33.cp34.cp35.cp36.pp27.pp32.pp33-none-win_amd64.whl", hash = "sha256:b361d4ac1519a2e516cabafa6bf7e93492f999f35d7d25350cd87fdc3e5cb27e"}, + {file = "SoundFile-0.10.3.post1.tar.gz", hash = "sha256:490cff42650733d1832728b937fe99fa1802896f5ef4d61bcf78cf7ebecb107b"}, +] +stempeg = [ + {file = "stempeg-0.2.2-py3-none-any.whl", hash = "sha256:c97eb344e03a21387b4a5d325191321fc85876fb5825cca52168953d3932ee11"}, + {file = "stempeg-0.2.2.tar.gz", hash = "sha256:3a09c8de50f218c8d5e32fca3ecf59d2c1a27430fea1e20340c7f0468a240c82"}, +] +tensorboard = [ + {file = "tensorboard-2.4.0-py3-none-any.whl", hash = "sha256:cde0c663a85609441cb4d624e7255fd8e2b6b1d679645095aac8a234a2812738"}, +] +tensorboard-plugin-wit = [ + {file = "tensorboard_plugin_wit-1.7.0-py3-none-any.whl", hash = "sha256:ee775f04821185c90d9a0e9c56970ee43d7c41403beb6629385b39517129685b"}, +] +tensorflow = [ + {file = "tensorflow-2.3.0-cp35-cp35m-macosx_10_11_x86_64.whl", hash = "sha256:c6fad4e944e20199e963e158fe626352e349865ea4ca71655f5456193a6d3b9d"}, + {file = "tensorflow-2.3.0-cp35-cp35m-manylinux2010_x86_64.whl", hash = "sha256:6f74ef59dc59cf8f2002738c65dffa591e2c332e9b1b4ced33ff8d39b6fb477c"}, + {file = "tensorflow-2.3.0-cp35-cp35m-win_amd64.whl", hash = "sha256:797d6ca09d4f69570458180b7813dc12efe9166ba60454b0df7bed531bb5e4f4"}, + {file = "tensorflow-2.3.0-cp36-cp36m-macosx_10_11_x86_64.whl", hash = "sha256:b1699903cf3a9f41c379d79ada2279a206a071b7e05671646d7b5e7fc37e2eae"}, + {file = "tensorflow-2.3.0-cp36-cp36m-manylinux2010_x86_64.whl", hash = "sha256:5c9f9a36d5b4d0ceb67b985486fe4cc6999a96e2bf89f3ba82ffd8317e5efadd"}, + {file = "tensorflow-2.3.0-cp36-cp36m-win_amd64.whl", hash = "sha256:bc9d761a857839344930eef86f0d6409840b1c9ada9cbe56b92287b2077ef752"}, + {file = "tensorflow-2.3.0-cp37-cp37m-macosx_10_11_x86_64.whl", hash = "sha256:0cfb0fbe875408cdbfc7677f12aa0b23656f3e6d8c5f568b3100450ec29262a7"}, + {file = "tensorflow-2.3.0-cp37-cp37m-manylinux2010_x86_64.whl", hash = "sha256:92430b6e91f00f38a602c4f547bbbaca598a3a90376f90d5b2acd24bc18fa1d7"}, + {file = "tensorflow-2.3.0-cp37-cp37m-win_amd64.whl", hash = "sha256:36a4ce9bbc9865385c1bb606fe34f0da96b0496ce3997e652d2b765a4382fe48"}, + {file = "tensorflow-2.3.0-cp38-cp38-macosx_10_11_x86_64.whl", hash = "sha256:44c8d979b2d19ed56dbe6b03aef87616d6138a58fd80c43e7a758c90105e9adf"}, + {file = "tensorflow-2.3.0-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:c33a423eb1f39c4c6acc44c044a138979868f0d4c91e380c191bd8fddc7c2e9b"}, + {file = "tensorflow-2.3.0-cp38-cp38-win_amd64.whl", hash = "sha256:2d9994157d6a222d9ffd956e99af4b5e46e47338428d2d197e325362283ec835"}, +] +tensorflow-estimator = [ + {file = "tensorflow_estimator-2.3.0-py2.py3-none-any.whl", hash = "sha256:b75e034300ccb169403cf2695adf3368da68863aeb0c14c3760064c713d5c486"}, +] +termcolor = [ + {file = "termcolor-1.1.0.tar.gz", hash = "sha256:1d6d69ce66211143803fbc56652b41d73b4a400a2891d7bf7a1cdf4c02de613b"}, +] +threadpoolctl = [ + {file = "threadpoolctl-2.1.0-py3-none-any.whl", hash = "sha256:38b74ca20ff3bb42caca8b00055111d74159ee95c4370882bbff2b93d24da725"}, + {file = "threadpoolctl-2.1.0.tar.gz", hash = "sha256:ddc57c96a38beb63db45d6c159b5ab07b6bced12c45a1f07b2b92f272aebfa6b"}, +] +toml = [ + {file = "toml-0.10.2-py2.py3-none-any.whl", hash = "sha256:806143ae5bfb6a3c6e736a764057db0e6a0e05e338b5630894a5f779cabb4f9b"}, + {file = "toml-0.10.2.tar.gz", hash = "sha256:b3bda1d108d5dd99f4a20d24d9c348e91c4db7ab1b749200bded2f839ccbe68f"}, +] +tqdm = [ + {file = "tqdm-4.55.1-py2.py3-none-any.whl", hash = "sha256:b8b46036fd00176d0870307123ef06bb851096964fa7fc578d789f90ce82c3e4"}, + {file = "tqdm-4.55.1.tar.gz", hash = "sha256:556c55b081bd9aa746d34125d024b73f0e2a0e62d5927ff0e400e20ee0a03b9a"}, +] +typed-ast = [ + {file = "typed_ast-1.4.2-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:7703620125e4fb79b64aa52427ec192822e9f45d37d4b6625ab37ef403e1df70"}, + {file = "typed_ast-1.4.2-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:c9aadc4924d4b5799112837b226160428524a9a45f830e0d0f184b19e4090487"}, + {file = "typed_ast-1.4.2-cp35-cp35m-manylinux2014_aarch64.whl", hash = "sha256:9ec45db0c766f196ae629e509f059ff05fc3148f9ffd28f3cfe75d4afb485412"}, + {file = "typed_ast-1.4.2-cp35-cp35m-win32.whl", hash = "sha256:85f95aa97a35bdb2f2f7d10ec5bbdac0aeb9dafdaf88e17492da0504de2e6400"}, + {file = "typed_ast-1.4.2-cp35-cp35m-win_amd64.whl", hash = "sha256:9044ef2df88d7f33692ae3f18d3be63dec69c4fb1b5a4a9ac950f9b4ba571606"}, + {file = "typed_ast-1.4.2-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:c1c876fd795b36126f773db9cbb393f19808edd2637e00fd6caba0e25f2c7b64"}, + {file = "typed_ast-1.4.2-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:5dcfc2e264bd8a1db8b11a892bd1647154ce03eeba94b461effe68790d8b8e07"}, + {file = "typed_ast-1.4.2-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:8db0e856712f79c45956da0c9a40ca4246abc3485ae0d7ecc86a20f5e4c09abc"}, + {file = "typed_ast-1.4.2-cp36-cp36m-manylinux2014_aarch64.whl", hash = "sha256:d003156bb6a59cda9050e983441b7fa2487f7800d76bdc065566b7d728b4581a"}, + {file = "typed_ast-1.4.2-cp36-cp36m-win32.whl", hash = "sha256:4c790331247081ea7c632a76d5b2a265e6d325ecd3179d06e9cf8d46d90dd151"}, + {file = "typed_ast-1.4.2-cp36-cp36m-win_amd64.whl", hash = "sha256:d175297e9533d8d37437abc14e8a83cbc68af93cc9c1c59c2c292ec59a0697a3"}, + {file = "typed_ast-1.4.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:cf54cfa843f297991b7388c281cb3855d911137223c6b6d2dd82a47ae5125a41"}, + {file = "typed_ast-1.4.2-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:b4fcdcfa302538f70929eb7b392f536a237cbe2ed9cba88e3bf5027b39f5f77f"}, + {file = "typed_ast-1.4.2-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:987f15737aba2ab5f3928c617ccf1ce412e2e321c77ab16ca5a293e7bbffd581"}, + {file = "typed_ast-1.4.2-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:37f48d46d733d57cc70fd5f30572d11ab8ed92da6e6b28e024e4a3edfb456e37"}, + {file = "typed_ast-1.4.2-cp37-cp37m-win32.whl", hash = "sha256:36d829b31ab67d6fcb30e185ec996e1f72b892255a745d3a82138c97d21ed1cd"}, + {file = "typed_ast-1.4.2-cp37-cp37m-win_amd64.whl", hash = "sha256:8368f83e93c7156ccd40e49a783a6a6850ca25b556c0fa0240ed0f659d2fe496"}, + {file = "typed_ast-1.4.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:963c80b583b0661918718b095e02303d8078950b26cc00b5e5ea9ababe0de1fc"}, + {file = "typed_ast-1.4.2-cp38-cp38-manylinux1_i686.whl", hash = "sha256:e683e409e5c45d5c9082dc1daf13f6374300806240719f95dc783d1fc942af10"}, + {file = "typed_ast-1.4.2-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:84aa6223d71012c68d577c83f4e7db50d11d6b1399a9c779046d75e24bed74ea"}, + {file = "typed_ast-1.4.2-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:a38878a223bdd37c9709d07cd357bb79f4c760b29210e14ad0fb395294583787"}, + {file = "typed_ast-1.4.2-cp38-cp38-win32.whl", hash = "sha256:a2c927c49f2029291fbabd673d51a2180038f8cd5a5b2f290f78c4516be48be2"}, + {file = "typed_ast-1.4.2-cp38-cp38-win_amd64.whl", hash = "sha256:c0c74e5579af4b977c8b932f40a5464764b2f86681327410aa028a22d2f54937"}, + {file = "typed_ast-1.4.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:07d49388d5bf7e863f7fa2f124b1b1d89d8aa0e2f7812faff0a5658c01c59aa1"}, + {file = "typed_ast-1.4.2-cp39-cp39-manylinux1_i686.whl", hash = "sha256:240296b27397e4e37874abb1df2a608a92df85cf3e2a04d0d4d61055c8305ba6"}, + {file = "typed_ast-1.4.2-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:d746a437cdbca200622385305aedd9aef68e8a645e385cc483bdc5e488f07166"}, + {file = "typed_ast-1.4.2-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:14bf1522cdee369e8f5581238edac09150c765ec1cb33615855889cf33dcb92d"}, + {file = "typed_ast-1.4.2-cp39-cp39-win32.whl", hash = "sha256:cc7b98bf58167b7f2db91a4327da24fb93368838eb84a44c472283778fc2446b"}, + {file = "typed_ast-1.4.2-cp39-cp39-win_amd64.whl", hash = "sha256:7147e2a76c75f0f64c4319886e7639e490fee87c9d25cb1d4faef1d8cf83a440"}, + {file = "typed_ast-1.4.2.tar.gz", hash = "sha256:9fc0b3cb5d1720e7141d103cf4819aea239f7d136acf9ee4a69b047b7986175a"}, +] +typer = [ + {file = "typer-0.3.2-py3-none-any.whl", hash = "sha256:ba58b920ce851b12a2d790143009fa00ac1d05b3ff3257061ff69dbdfc3d161b"}, + {file = "typer-0.3.2.tar.gz", hash = "sha256:5455d750122cff96745b0dec87368f56d023725a7ebc9d2e54dd23dc86816303"}, +] +typing-extensions = [ + {file = "typing_extensions-3.7.4.3-py2-none-any.whl", hash = "sha256:dafc7639cde7f1b6e1acc0f457842a83e722ccca8eef5270af2d74792619a89f"}, + {file = "typing_extensions-3.7.4.3-py3-none-any.whl", hash = "sha256:7cb407020f00f7bfc3cb3e7881628838e69d8f3fcab2f64742a5e76b2f841918"}, + {file = "typing_extensions-3.7.4.3.tar.gz", hash = "sha256:99d4073b617d30288f569d3f13d2bd7548c3a7e4c8de87db09a9d29bb3a4a60c"}, +] +urllib3 = [ + {file = "urllib3-1.26.2-py2.py3-none-any.whl", hash = "sha256:d8ff90d979214d7b4f8ce956e80f4028fc6860e4431f731ea4a8c08f23f99473"}, + {file = "urllib3-1.26.2.tar.gz", hash = "sha256:19188f96923873c92ccb987120ec4acaa12f0461fa9ce5d3d0772bc965a39e08"}, +] +werkzeug = [ + {file = "Werkzeug-1.0.1-py2.py3-none-any.whl", hash = "sha256:2de2a5db0baeae7b2d2664949077c2ac63fbd16d98da0ff71837f7d1dea3fd43"}, + {file = "Werkzeug-1.0.1.tar.gz", hash = "sha256:6c80b1e5ad3665290ea39320b91e1be1e0d5f60652b964a3070216de83d2e47c"}, +] +wrapt = [ + {file = "wrapt-1.12.1.tar.gz", hash = "sha256:b62ffa81fb85f4332a4f609cab4ac40709470da05643a082ec1eb88e6d9b97d7"}, +] +zipp = [ + {file = "zipp-3.4.0-py3-none-any.whl", hash = "sha256:102c24ef8f171fd729d46599845e95c7ab894a4cf45f5de11a44cc7444fb1108"}, + {file = "zipp-3.4.0.tar.gz", hash = "sha256:ed5eee1974372595f9e416cc7bbeeb12335201d8081ca8a0743c954d4446e5cb"}, +] diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000..1ba4db8 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,83 @@ +[tool.poetry] +name = "spleeter" +version = "2.1.0" +description = "The Deezer source separation library with pretrained models based on tensorflow." +authors = ["Deezer Research "] +license = "MIT License" +readme = "README.md" +repository = "https://github.com/deezer/spleeter" +homepage = "https://github.com/deezer/spleeter" +classifiers = [ + "Environment :: Console", + "Environment :: MacOS X", + "Intended Audience :: Developers", + "Intended Audience :: Information Technology", + "Intended Audience :: Science/Research", + "License :: OSI Approved :: MIT License", + "Natural Language :: English", + "Operating System :: MacOS", + "Operating System :: Microsoft :: Windows", + "Operating System :: POSIX :: Linux", + "Operating System :: Unix", + "Programming Language :: Python", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.6", + "Programming Language :: Python :: 3.7", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3 :: Only", + "Programming Language :: Python :: Implementation :: CPython", + "Topic :: Artistic Software", + "Topic :: Multimedia", + "Topic :: Multimedia :: Sound/Audio", + "Topic :: Multimedia :: Sound/Audio :: Analysis", + "Topic :: Multimedia :: Sound/Audio :: Conversion", + "Topic :: Multimedia :: Sound/Audio :: Sound Synthesis", + "Topic :: Scientific/Engineering", + "Topic :: Scientific/Engineering :: Artificial Intelligence", + "Topic :: Scientific/Engineering :: Information Analysis", + "Topic :: Software Development", + "Topic :: Software Development :: Libraries", + "Topic :: Software Development :: Libraries :: Python Modules", + "Topic :: Utilities" +] +packages = [ { include = "spleeter" } ] +include = ["LICENSE", "spleeter/resources/*.json"] + +[tool.poetry.dependencies] +python = "^3.7" +ffmpeg-python = "0.2.0" +norbert = "0.2.1" +httpx = {extras = ["http2"], version = "^0.16.1"} +typer = "^0.3.2" +librosa = "0.8.0" +musdb = {version = "0.3.1", optional = true} +museval = {version = "0.3.0", optional = true} +tensorflow = "2.3.0" +pandas = "1.1.2" +numpy = "<1.19.0,>=1.16.0" + +[tool.poetry.dev-dependencies] +pytest = "^6.2.1" +isort = "^5.7.0" +black = "^20.8b1" +mypy = "^0.790" +pytest-forked = "^1.3.0" +musdb = "0.3.1" +museval = "0.3.0" + +[tool.poetry.scripts] +spleeter = 'spleeter.__main__:entrypoint' + +[tool.poetry.extras] +evaluation = ["musdb", "museval"] + +[tool.isort] +profile = "black" +multi_line_output = 3 + +[tool.pytest.ini_options] +addopts = "-W ignore::FutureWarning -W ignore::DeprecationWarning -vv --forked" + +[build-system] +requires = ["poetry-core>=1.0.0"] +build-backend = "poetry.core.masonry.api" diff --git a/setup.py b/setup.py deleted file mode 100644 index 5984078..0000000 --- a/setup.py +++ /dev/null @@ -1,102 +0,0 @@ -#!/usr/bin/env python -# coding: utf8 - -""" Distribution script. """ - -import sys - -from os import path -from setuptools import setup - -__email__ = 'spleeter@deezer.com' -__author__ = 'Deezer Research' -__license__ = 'MIT License' - -# Default project values. -project_name = 'spleeter' -project_version = '2.0.2' -tensorflow_dependency = 'tensorflow' -tensorflow_version = '2.3.0' -here = path.abspath(path.dirname(__file__)) -readme_path = path.join(here, 'README.md') -with open(readme_path, 'r') as stream: - readme = stream.read() - -# Package setup entrypoint. -setup( - name=project_name, - version=project_version, - description=''' - The Deezer source separation library with - pretrained models based on tensorflow. - ''', - long_description=readme, - long_description_content_type='text/markdown', - author='Deezer Research', - author_email='spleeter@deezer.com', - url='https://github.com/deezer/spleeter', - license='MIT License', - packages=[ - 'spleeter', - 'spleeter.audio', - 'spleeter.commands', - 'spleeter.model', - 'spleeter.model.functions', - 'spleeter.model.provider', - 'spleeter.resources', - 'spleeter.utils', - ], - package_data={'spleeter.resources': ['*.json']}, - python_requires='>=3.6, <3.9', - include_package_data=True, - install_requires=[ - 'ffmpeg-python==0.2.0', - 'importlib_resources ; python_version<"3.7"', - 'norbert==0.2.1', - 'numpy<1.19.0,>=1.16.0', - 'pandas==1.1.2', - 'requests', - 'scipy==1.4.1', - 'setuptools>=41.0.0', - 'librosa==0.8.0', - '{}=={}'.format(tensorflow_dependency, tensorflow_version), - ], - extras_require={ - 'evaluation': ['musdb==0.3.1', 'museval==0.3.0'] - }, - entry_points={ - 'console_scripts': ['spleeter=spleeter.__main__:entrypoint'] - }, - classifiers=[ - 'Environment :: Console', - 'Environment :: MacOS X', - 'Intended Audience :: Developers', - 'Intended Audience :: Information Technology', - 'Intended Audience :: Science/Research', - 'License :: OSI Approved :: MIT License', - 'Natural Language :: English', - 'Operating System :: MacOS', - 'Operating System :: Microsoft :: Windows', - 'Operating System :: POSIX :: Linux', - 'Operating System :: Unix', - 'Programming Language :: Python', - 'Programming Language :: Python :: 3', - 'Programming Language :: Python :: 3.6', - 'Programming Language :: Python :: 3.7', - 'Programming Language :: Python :: 3.8', - 'Programming Language :: Python :: 3 :: Only', - 'Programming Language :: Python :: Implementation :: CPython', - 'Topic :: Artistic Software', - 'Topic :: Multimedia', - 'Topic :: Multimedia :: Sound/Audio', - 'Topic :: Multimedia :: Sound/Audio :: Analysis', - 'Topic :: Multimedia :: Sound/Audio :: Conversion', - 'Topic :: Multimedia :: Sound/Audio :: Sound Synthesis', - 'Topic :: Scientific/Engineering', - 'Topic :: Scientific/Engineering :: Artificial Intelligence', - 'Topic :: Scientific/Engineering :: Information Analysis', - 'Topic :: Software Development', - 'Topic :: Software Development :: Libraries', - 'Topic :: Software Development :: Libraries :: Python Modules', - 'Topic :: Utilities'] -) diff --git a/spleeter/__init__.py b/spleeter/__init__.py index c2329ed..9c89afa 100644 --- a/spleeter/__init__.py +++ b/spleeter/__init__.py @@ -13,9 +13,9 @@ by providing train, evaluation and source separation action. """ -__email__ = 'spleeter@deezer.com' -__author__ = 'Deezer Research' -__license__ = 'MIT License' +__email__ = "spleeter@deezer.com" +__author__ = "Deezer Research" +__license__ = "MIT License" class SpleeterError(Exception): diff --git a/spleeter/__main__.py b/spleeter/__main__.py index e4f3f4a..1f36d2e 100644 --- a/spleeter/__main__.py +++ b/spleeter/__main__.py @@ -5,54 +5,252 @@ Python oneliner script usage. USAGE: python -m spleeter {train,evaluate,separate} ... + + Notes: + All critical import involving TF, numpy or Pandas are deported to + command function scope to avoid heavy import on CLI evaluation, + leading to large bootstraping time. """ -import sys -import warnings +import json +from functools import partial +from glob import glob +from itertools import product +from os.path import join +from pathlib import Path +from typing import Container, Dict, List, Optional + +# pyright: reportMissingImports=false +# pylint: disable=import-error +from typer import Exit, Typer from . import SpleeterError -from .commands import create_argument_parser -from .utils.configuration import load_configuration -from .utils.logging import ( - enable_logging, - enable_tensorflow_logging, - get_logger) +from .options import * +from .utils.logging import configure_logger, logger -__email__ = 'spleeter@deezer.com' -__author__ = 'Deezer Research' -__license__ = 'MIT License' +# pylint: enable=import-error + +spleeter: Typer = Typer(add_completion=False) +""" CLI application. """ -def main(argv): - """ Spleeter runner. Parse provided command line arguments - and run entrypoint for required command (either train, - evaluate or separate). - - :param argv: Provided command line arguments. +@spleeter.command() +def train( + adapter: str = AudioAdapterOption, + data: Path = TrainingDataDirectoryOption, + params_filename: str = ModelParametersOption, + verbose: bool = VerboseOption, +) -> None: """ + Train a source separation model + """ + import tensorflow as tf + + from .audio.adapter import AudioAdapter + from .dataset import get_training_dataset, get_validation_dataset + from .model import model_fn + from .model.provider import ModelProvider + from .utils.configuration import load_configuration + + configure_logger(verbose) + audio_adapter = AudioAdapter.get(adapter) + audio_path = str(data) + params = load_configuration(params_filename) + session_config = tf.compat.v1.ConfigProto() + session_config.gpu_options.per_process_gpu_memory_fraction = 0.45 + estimator = tf.estimator.Estimator( + model_fn=model_fn, + model_dir=params["model_dir"], + params=params, + config=tf.estimator.RunConfig( + save_checkpoints_steps=params["save_checkpoints_steps"], + tf_random_seed=params["random_seed"], + save_summary_steps=params["save_summary_steps"], + session_config=session_config, + log_step_count_steps=10, + keep_checkpoint_max=2, + ), + ) + input_fn = partial(get_training_dataset, params, audio_adapter, audio_path) + train_spec = tf.estimator.TrainSpec( + input_fn=input_fn, max_steps=params["train_max_steps"] + ) + input_fn = partial(get_validation_dataset, params, audio_adapter, audio_path) + evaluation_spec = tf.estimator.EvalSpec( + input_fn=input_fn, steps=None, throttle_secs=params["throttle_secs"] + ) + logger.info("Start model training") + tf.estimator.train_and_evaluate(estimator, train_spec, evaluation_spec) + ModelProvider.writeProbe(params["model_dir"]) + logger.info("Model training done") + + +@spleeter.command() +def separate( + deprecated_files: Optional[str] = AudioInputOption, + files: List[Path] = AudioInputArgument, + adapter: str = AudioAdapterOption, + bitrate: str = AudioBitrateOption, + codec: Codec = AudioCodecOption, + duration: float = AudioDurationOption, + offset: float = AudioOffsetOption, + output_path: Path = AudioOutputOption, + stft_backend: STFTBackend = AudioSTFTBackendOption, + filename_format: str = FilenameFormatOption, + params_filename: str = ModelParametersOption, + mwf: bool = MWFOption, + verbose: bool = VerboseOption, +) -> None: + """ + Separate audio file(s) + """ + from .audio.adapter import AudioAdapter + from .separator import Separator + + configure_logger(verbose) + if deprecated_files is not None: + logger.error( + "⚠️ -i option is not supported anymore, audio files must be supplied " + "using input argument instead (see spleeter separate --help)" + ) + raise Exit(20) + audio_adapter: AudioAdapter = AudioAdapter.get(adapter) + separator: Separator = Separator( + params_filename, MWF=mwf, stft_backend=stft_backend + ) + for filename in files: + separator.separate_to_file( + str(filename), + str(output_path), + audio_adapter=audio_adapter, + offset=offset, + duration=duration, + codec=codec, + bitrate=bitrate, + filename_format=filename_format, + synchronous=False, + ) + separator.join() + + +EVALUATION_SPLIT: str = "test" +EVALUATION_METRICS_DIRECTORY: str = "metrics" +EVALUATION_INSTRUMENTS: Container[str] = ("vocals", "drums", "bass", "other") +EVALUATION_METRICS: Container[str] = ("SDR", "SAR", "SIR", "ISR") +EVALUATION_MIXTURE: str = "mixture.wav" +EVALUATION_AUDIO_DIRECTORY: str = "audio" + + +def _compile_metrics(metrics_output_directory) -> Dict: + """ + Compiles metrics from given directory and returns results as dict. + + Parameters: + metrics_output_directory (str): + Directory to get metrics from. + + Returns: + Dict: + Compiled metrics as dict. + """ + import numpy as np + import pandas as pd + + songs = glob(join(metrics_output_directory, "test/*.json")) + index = pd.MultiIndex.from_tuples( + product(EVALUATION_INSTRUMENTS, EVALUATION_METRICS), + names=["instrument", "metric"], + ) + pd.DataFrame([], index=["config1", "config2"], columns=index) + metrics = { + instrument: {k: [] for k in EVALUATION_METRICS} + for instrument in EVALUATION_INSTRUMENTS + } + for song in songs: + with open(song, "r") as stream: + data = json.load(stream) + for target in data["targets"]: + instrument = target["name"] + for metric in EVALUATION_METRICS: + sdr_med = np.median( + [ + frame["metrics"][metric] + for frame in target["frames"] + if not np.isnan(frame["metrics"][metric]) + ] + ) + metrics[instrument][metric].append(sdr_med) + return metrics + + +@spleeter.command() +def evaluate( + adapter: str = AudioAdapterOption, + output_path: Path = AudioOutputOption, + stft_backend: STFTBackend = AudioSTFTBackendOption, + params_filename: str = ModelParametersOption, + mus_dir: Path = MUSDBDirectoryOption, + mwf: bool = MWFOption, + verbose: bool = VerboseOption, +) -> Dict: + """ + Evaluate a model on the musDB test dataset + """ + import numpy as np + + configure_logger(verbose) try: - parser = create_argument_parser() - arguments = parser.parse_args(argv[1:]) - enable_logging() - if arguments.verbose: - enable_tensorflow_logging() - if arguments.command == 'separate': - from .commands.separate import entrypoint - elif arguments.command == 'train': - from .commands.train import entrypoint - elif arguments.command == 'evaluate': - from .commands.evaluate import entrypoint - params = load_configuration(arguments.configuration) - entrypoint(arguments, params) - except SpleeterError as e: - get_logger().error(e) + import musdb + import museval + except ImportError: + logger.error("Extra dependencies musdb and museval not found") + logger.error("Please install musdb and museval first, abort") + raise Exit(10) + # Separate musdb sources. + songs = glob(join(mus_dir, EVALUATION_SPLIT, "*/")) + mixtures = [join(song, EVALUATION_MIXTURE) for song in songs] + audio_output_directory = join(output_path, EVALUATION_AUDIO_DIRECTORY) + separate( + deprecated_files=None, + files=mixtures, + adapter=adapter, + bitrate="128k", + codec=Codec.WAV, + duration=600.0, + offset=0, + output_path=join(audio_output_directory, EVALUATION_SPLIT), + stft_backend=stft_backend, + filename_format="{foldername}/{instrument}.{codec}", + params_filename=params_filename, + mwf=mwf, + verbose=verbose, + ) + # Compute metrics with musdb. + metrics_output_directory = join(output_path, EVALUATION_METRICS_DIRECTORY) + logger.info("Starting musdb evaluation (this could be long) ...") + dataset = musdb.DB(root=mus_dir, is_wav=True, subsets=[EVALUATION_SPLIT]) + museval.eval_mus_dir( + dataset=dataset, + estimates_dir=audio_output_directory, + output_dir=metrics_output_directory, + ) + logger.info("musdb evaluation done") + # Compute and pretty print median metrics. + metrics = _compile_metrics(metrics_output_directory) + for instrument, metric in metrics.items(): + logger.info(f"{instrument}:") + for metric, value in metric.items(): + logger.info(f"{metric}: {np.median(value):.3f}") + return metrics def entrypoint(): - """ Command line entrypoint. """ - warnings.filterwarnings('ignore') - main(sys.argv) + """ Application entrypoint. """ + try: + spleeter() + except SpleeterError as e: + logger.error(e) -if __name__ == '__main__': +if __name__ == "__main__": entrypoint() diff --git a/spleeter/audio/__init__.py b/spleeter/audio/__init__.py index 3d973c5..32012e1 100644 --- a/spleeter/audio/__init__.py +++ b/spleeter/audio/__init__.py @@ -10,6 +10,43 @@ - Waveform convertion and transforming functions. """ -__email__ = 'spleeter@deezer.com' -__author__ = 'Deezer Research' -__license__ = 'MIT License' +from enum import Enum + +__email__ = "spleeter@deezer.com" +__author__ = "Deezer Research" +__license__ = "MIT License" + + +class Codec(str, Enum): + """ Enumeration of supported audio codec. """ + + WAV: str = "wav" + MP3: str = "mp3" + OGG: str = "ogg" + M4A: str = "m4a" + WMA: str = "wma" + FLAC: str = "flac" + + +class STFTBackend(str, Enum): + """ Enumeration of supported STFT backend. """ + + AUTO: str = "auto" + TENSORFLOW: str = "tensorflow" + LIBROSA: str = "librosa" + + @classmethod + def resolve(cls: type, backend: str) -> str: + # NOTE: import is resolved here to avoid performance issues on command + # evaluation. + # pyright: reportMissingImports=false + # pylint: disable=import-error + import tensorflow as tf + + if backend not in cls.__members__.values(): + raise ValueError(f"Unsupported backend {backend}") + if backend == cls.AUTO: + if len(tf.config.list_physical_devices("GPU")): + return cls.TENSORFLOW + return cls.LIBROSA + return backend diff --git a/spleeter/audio/adapter.py b/spleeter/audio/adapter.py index 994c8df..21c39cf 100644 --- a/spleeter/audio/adapter.py +++ b/spleeter/audio/adapter.py @@ -3,70 +3,101 @@ """ AudioAdapter class defintion. """ -import subprocess - from abc import ABC, abstractmethod from importlib import import_module -from os.path import exists +from pathlib import Path +from typing import Any, Dict, List, Optional, Union +# pyright: reportMissingImports=false # pylint: disable=import-error import numpy as np import tensorflow as tf -from tensorflow.signal import stft, hann_window -# pylint: enable=import-error +from spleeter.audio import Codec from .. import SpleeterError -from ..utils.logging import get_logger +from ..types import AudioDescriptor, Signal +from ..utils.logging import logger -__email__ = 'spleeter@deezer.com' -__author__ = 'Deezer Research' -__license__ = 'MIT License' +# pylint: enable=import-error + + +__email__ = "spleeter@deezer.com" +__author__ = "Deezer Research" +__license__ = "MIT License" class AudioAdapter(ABC): """ An abstract class for manipulating audio signal. """ - # Default audio adapter singleton instance. - DEFAULT = None + _DEFAULT: "AudioAdapter" = None + """ Default audio adapter singleton instance. """ @abstractmethod def load( - self, audio_descriptor, offset, duration, - sample_rate, dtype=np.float32): - """ Loads the audio file denoted by the given audio descriptor - and returns it data as a waveform. Aims to be implemented - by client. + self, + audio_descriptor: AudioDescriptor, + offset: Optional[float] = None, + duration: Optional[float] = None, + sample_rate: Optional[float] = None, + dtype: np.dtype = np.float32, + ) -> Signal: + """ + Loads the audio file denoted by the given audio descriptor and + returns it data as a waveform. Aims to be implemented by client. - :param audio_descriptor: Describe song to load, in case of file - based audio adapter, such descriptor would - be a file path. - :param offset: Start offset to load from in seconds. - :param duration: Duration to load in seconds. - :param sample_rate: Sample rate to load audio with. - :param dtype: Numpy data type to use, default to float32. - :returns: Loaded data as (wf, sample_rate) tuple. + Parameters: + audio_descriptor (AudioDescriptor): + Describe song to load, in case of file based audio adapter, + such descriptor would be a file path. + offset (Optional[float]): + Start offset to load from in seconds. + duration (Optional[float]): + Duration to load in seconds. + sample_rate (Optional[float]): + Sample rate to load audio with. + dtype (numpy.dtype): + (Optional) Numpy data type to use, default to `float32`. + + Returns: + Signal: + Loaded data as (wf, sample_rate) tuple. """ pass def load_tf_waveform( - self, audio_descriptor, - offset=0.0, duration=1800., sample_rate=44100, - dtype=b'float32', waveform_name='waveform'): - """ Load the audio and convert it to a tensorflow waveform. + self, + audio_descriptor, + offset: float = 0.0, + duration: float = 1800.0, + sample_rate: int = 44100, + dtype: bytes = b"float32", + waveform_name: str = "waveform", + ) -> Dict[str, Any]: + """ + Load the audio and convert it to a tensorflow waveform. - :param audio_descriptor: Describe song to load, in case of file - based audio adapter, such descriptor would - be a file path. - :param offset: Start offset to load from in seconds. - :param duration: Duration to load in seconds. - :param sample_rate: Sample rate to load audio with. - :param dtype: Numpy data type to use, default to float32. - :param waveform_name: (Optional) Name of the key in output dict. - :returns: TF output dict with waveform as - (T x chan numpy array) and a boolean that - tells whether there were an error while - trying to load the waveform. + Parameters: + audio_descriptor (): + Describe song to load, in case of file based audio adapter, + such descriptor would be a file path. + offset (float): + Start offset to load from in seconds. + duration (float): + Duration to load in seconds. + sample_rate (float): + Sample rate to load audio with. + dtype (bytes): + (Optional)data type to use, default to `b'float32'`. + waveform_name (str): + (Optional) Name of the key in output dict, default to + `'waveform'`. + + Returns: + Dict[str, Any]: + TF output dict with waveform as `(T x chan numpy array)` + and a boolean that tells whether there were an error while + trying to load the waveform. """ # Cast parameters to TF format. offset = tf.cast(offset, tf.float64) @@ -74,76 +105,96 @@ class AudioAdapter(ABC): # Defined safe loading function. def safe_load(path, offset, duration, sample_rate, dtype): - logger = get_logger() - logger.info( - f'Loading audio {path} from {offset} to {offset + duration}') + logger.info(f"Loading audio {path} from {offset} to {offset + duration}") try: (data, _) = self.load( path.numpy(), offset.numpy(), duration.numpy(), sample_rate.numpy(), - dtype=dtype.numpy()) - logger.info('Audio data loaded successfully') + dtype=dtype.numpy(), + ) + logger.info("Audio data loaded successfully") return (data, False) except Exception as e: - logger.exception( - 'An error occurs while loading audio', - exc_info=e) + logger.exception("An error occurs while loading audio", exc_info=e) return (np.float32(-1.0), True) # Execute function and format results. - results = tf.py_function( - safe_load, - [audio_descriptor, offset, duration, sample_rate, dtype], - (tf.float32, tf.bool)), + results = ( + tf.py_function( + safe_load, + [audio_descriptor, offset, duration, sample_rate, dtype], + (tf.float32, tf.bool), + ), + ) waveform, error = results[0] - return { - waveform_name: waveform, - f'{waveform_name}_error': error - } + return {waveform_name: waveform, f"{waveform_name}_error": error} @abstractmethod def save( - self, path, data, sample_rate, - codec=None, bitrate=None): - """ Save the given audio data to the file denoted by - the given path. + self, + path: Union[Path, str], + data: np.ndarray, + sample_rate: float, + codec: Codec = None, + bitrate: str = None, + ) -> None: + """ + Save the given audio data to the file denoted by the given path. - :param path: Path of the audio file to save data in. - :param data: Waveform data to write. - :param sample_rate: Sample rate to write file in. - :param codec: (Optional) Writing codec to use. - :param bitrate: (Optional) Bitrate of the written audio file. + Parameters: + path (Union[Path, str]): + Path like of the audio file to save data in. + data (numpy.ndarray): + Waveform data to write. + sample_rate (float): + Sample rate to write file in. + codec (): + (Optional) Writing codec to use, default to `None`. + bitrate (str): + (Optional) Bitrate of the written audio file, default to + `None`. """ pass + @classmethod + def default(cls: type) -> "AudioAdapter": + """ + Builds and returns a default audio adapter instance. -def get_default_audio_adapter(): - """ Builds and returns a default audio adapter instance. + Returns: + AudioAdapter: + Default adapter instance to use. + """ + if cls._DEFAULT is None: + from .ffmpeg import FFMPEGProcessAudioAdapter - :returns: An audio adapter instance. - """ - if AudioAdapter.DEFAULT is None: - from .ffmpeg import FFMPEGProcessAudioAdapter - AudioAdapter.DEFAULT = FFMPEGProcessAudioAdapter() - return AudioAdapter.DEFAULT + cls._DEFAULT = FFMPEGProcessAudioAdapter() + return cls._DEFAULT + @classmethod + def get(cls: type, descriptor: str) -> "AudioAdapter": + """ + Load dynamically an AudioAdapter from given class descriptor. -def get_audio_adapter(descriptor): - """ Load dynamically an AudioAdapter from given class descriptor. + Parameters: + descriptor (str): + Adapter class descriptor (module.Class) - :param descriptor: Adapter class descriptor (module.Class) - :returns: Created adapter instance. - """ - if descriptor is None: - return get_default_audio_adapter() - module_path = descriptor.split('.') - adapter_class_name = module_path[-1] - module_path = '.'.join(module_path[:-1]) - adapter_module = import_module(module_path) - adapter_class = getattr(adapter_module, adapter_class_name) - if not isinstance(adapter_class, AudioAdapter): - raise SpleeterError( - f'{adapter_class_name} is not a valid AudioAdapter class') - return adapter_class() + Returns: + AudioAdapter: + Created adapter instance. + """ + if not descriptor: + return cls.default() + module_path: List[str] = descriptor.split(".") + adapter_class_name: str = module_path[-1] + module_path: str = ".".join(module_path[:-1]) + adapter_module = import_module(module_path) + adapter_class = getattr(adapter_module, adapter_class_name) + if not issubclass(adapter_class, AudioAdapter): + raise SpleeterError( + f"{adapter_class_name} is not a valid AudioAdapter class" + ) + return adapter_class() diff --git a/spleeter/audio/convertor.py b/spleeter/audio/convertor.py index 0751b03..9d4ab7e 100644 --- a/spleeter/audio/convertor.py +++ b/spleeter/audio/convertor.py @@ -3,39 +3,54 @@ """ This module provides audio data convertion functions. """ +# pyright: reportMissingImports=false # pylint: disable=import-error import numpy as np import tensorflow as tf -# pylint: enable=import-error from ..utils.tensor import from_float32_to_uint8, from_uint8_to_float32 -__email__ = 'spleeter@deezer.com' -__author__ = 'Deezer Research' -__license__ = 'MIT License' +# pylint: enable=import-error + +__email__ = "spleeter@deezer.com" +__author__ = "Deezer Research" +__license__ = "MIT License" -def to_n_channels(waveform, n_channels): - """ Convert a waveform to n_channels by removing or - duplicating channels if needed (in tensorflow). +def to_n_channels(waveform: tf.Tensor, n_channels: int) -> tf.Tensor: + """ + Convert a waveform to n_channels by removing or duplicating channels if + needed (in tensorflow). - :param waveform: Waveform to transform. - :param n_channels: Number of channel to reshape waveform in. - :returns: Reshaped waveform. + Parameters: + waveform (tensorflow.Tensor): + Waveform to transform. + n_channels (int): + Number of channel to reshape waveform in. + + Returns: + tensorflow.Tensor: + Reshaped waveform. """ return tf.cond( tf.shape(waveform)[1] >= n_channels, true_fn=lambda: waveform[:, :n_channels], - false_fn=lambda: tf.tile(waveform, [1, n_channels])[:, :n_channels] + false_fn=lambda: tf.tile(waveform, [1, n_channels])[:, :n_channels], ) -def to_stereo(waveform): - """ Convert a waveform to stereo by duplicating if mono, - or truncating if too many channels. +def to_stereo(waveform: np.ndarray) -> np.ndarray: + """ + Convert a waveform to stereo by duplicating if mono, or truncating + if too many channels. - :param waveform: a (N, d) numpy array. - :returns: A stereo waveform as a (N, 1) numpy array. + Parameters: + waveform (numpy.ndarray): + a `(N, d)` numpy array. + + Returns: + numpy.ndarray: + A stereo waveform as a `(N, 1)` numpy array. """ if waveform.shape[1] == 1: return np.repeat(waveform, 2, axis=-1) @@ -44,45 +59,81 @@ def to_stereo(waveform): return waveform -def gain_to_db(tensor, espilon=10e-10): - """ Convert from gain to decibel in tensorflow. - - :param tensor: Tensor to convert. - :param epsilon: Operation constant. - :returns: Converted tensor. +def gain_to_db(tensor: tf.Tensor, espilon: float = 10e-10) -> tf.Tensor: """ - return 20. / np.log(10) * tf.math.log(tf.maximum(tensor, espilon)) + Convert from gain to decibel in tensorflow. + Parameters: + tensor (tensorflow.Tensor): + Tensor to convert + epsilon (float): + Operation constant. -def db_to_gain(tensor): - """ Convert from decibel to gain in tensorflow. - - :param tensor_db: Tensor to convert. - :returns: Converted tensor. + Returns: + tensorflow.Tensor: + Converted tensor. """ - return tf.pow(10., (tensor / 20.)) + return 20.0 / np.log(10) * tf.math.log(tf.maximum(tensor, espilon)) -def spectrogram_to_db_uint(spectrogram, db_range=100., **kwargs): - """ Encodes given spectrogram into uint8 using decibel scale. - - :param spectrogram: Spectrogram to be encoded as TF float tensor. - :param db_range: Range in decibel for encoding. - :returns: Encoded decibel spectrogram as uint8 tensor. +def db_to_gain(tensor: tf.Tensor) -> tf.Tensor: """ - db_spectrogram = gain_to_db(spectrogram) - max_db_spectrogram = tf.reduce_max(db_spectrogram) - db_spectrogram = tf.maximum(db_spectrogram, max_db_spectrogram - db_range) + Convert from decibel to gain in tensorflow. + + Parameters: + tensor (tensorflow.Tensor): + Tensor to convert + + Returns: + tensorflow.Tensor: + Converted tensor. + """ + return tf.pow(10.0, (tensor / 20.0)) + + +def spectrogram_to_db_uint( + spectrogram: tf.Tensor, db_range: float = 100.0, **kwargs +) -> tf.Tensor: + """ + Encodes given spectrogram into uint8 using decibel scale. + + Parameters: + spectrogram (tensorflow.Tensor): + Spectrogram to be encoded as TF float tensor. + db_range (float): + Range in decibel for encoding. + + Returns: + tensorflow.Tensor: + Encoded decibel spectrogram as `uint8` tensor. + """ + db_spectrogram: tf.Tensor = gain_to_db(spectrogram) + max_db_spectrogram: tf.Tensor = tf.reduce_max(db_spectrogram) + db_spectrogram: tf.Tensor = tf.maximum( + db_spectrogram, max_db_spectrogram - db_range + ) return from_float32_to_uint8(db_spectrogram, **kwargs) -def db_uint_spectrogram_to_gain(db_uint_spectrogram, min_db, max_db): - """ Decode spectrogram from uint8 decibel scale. - - :param db_uint_spectrogram: Decibel pectrogram to decode. - :param min_db: Lower bound limit for decoding. - :param max_db: Upper bound limit for decoding. - :returns: Decoded spectrogram as float2 tensor. +def db_uint_spectrogram_to_gain( + db_uint_spectrogram: tf.Tensor, min_db: tf.Tensor, max_db: tf.Tensor +) -> tf.Tensor: """ - db_spectrogram = from_uint8_to_float32(db_uint_spectrogram, min_db, max_db) + Decode spectrogram from uint8 decibel scale. + + Paramters: + db_uint_spectrogram (tensorflow.Tensor): + Decibel spectrogram to decode. + min_db (tensorflow.Tensor): + Lower bound limit for decoding. + max_db (tensorflow.Tensor): + Upper bound limit for decoding. + + Returns: + tensorflow.Tensor: + Decoded spectrogram as `float32` tensor. + """ + db_spectrogram: tf.Tensor = from_uint8_to_float32( + db_uint_spectrogram, min_db, max_db + ) return db_to_gain(db_spectrogram) diff --git a/spleeter/audio/ffmpeg.py b/spleeter/audio/ffmpeg.py index 890e02e..230678c 100644 --- a/spleeter/audio/ffmpeg.py +++ b/spleeter/audio/ffmpeg.py @@ -8,143 +8,178 @@ used within this library. """ +import datetime as dt import os import shutil +from pathlib import Path +from typing import Dict, Optional, Union +# pyright: reportMissingImports=false # pylint: disable=import-error import ffmpeg import numpy as np + +from .. import SpleeterError +from ..types import Signal +from ..utils.logging import logger +from . import Codec +from .adapter import AudioAdapter + # pylint: enable=import-error -from .adapter import AudioAdapter -from .. import SpleeterError -from ..utils.logging import get_logger - -__email__ = 'spleeter@deezer.com' -__author__ = 'Deezer Research' -__license__ = 'MIT License' - - -def _check_ffmpeg_install(): - """ Ensure FFMPEG binaries are available. - - :raise SpleeterError: If ffmpeg or ffprobe is not found. - """ - for binary in ('ffmpeg', 'ffprobe'): - if shutil.which(binary) is None: - raise SpleeterError('{} binary not found'.format(binary)) - - -def _to_ffmpeg_time(n): - """ Format number of seconds to time expected by FFMPEG. - :param n: Time in seconds to format. - :returns: Formatted time in FFMPEG format. - """ - m, s = divmod(n, 60) - h, m = divmod(m, 60) - return '%d:%02d:%09.6f' % (h, m, s) - - -def _to_ffmpeg_codec(codec): - ffmpeg_codecs = { - 'm4a': 'aac', - 'ogg': 'libvorbis', - 'wma': 'wmav2', - } - return ffmpeg_codecs.get(codec) or codec +__email__ = "spleeter@deezer.com" +__author__ = "Deezer Research" +__license__ = "MIT License" class FFMPEGProcessAudioAdapter(AudioAdapter): - """ An AudioAdapter implementation that use FFMPEG binary through + """ + An AudioAdapter implementation that use FFMPEG binary through subprocess in order to perform I/O operation for audio processing. When created, FFMPEG binary path will be checked and expended, raising exception if not found. Such path could be infered using - FFMPEG_PATH environment variable. + `FFMPEG_PATH` environment variable. """ + SUPPORTED_CODECS: Dict[Codec, str] = { + Codec.M4A: "aac", + Codec.OGG: "libvorbis", + Codec.WMA: "wmav2", + } + """ FFMPEG codec name mapping. """ + + def __init__(_) -> None: + """ + Default constructor, ensure FFMPEG binaries are available. + + Raises: + SpleeterError: + If ffmpeg or ffprobe is not found. + """ + for binary in ("ffmpeg", "ffprobe"): + if shutil.which(binary) is None: + raise SpleeterError("{} binary not found".format(binary)) + def load( - self, path, offset=None, duration=None, - sample_rate=None, dtype=np.float32): - """ Loads the audio file denoted by the given path + _, + path: Union[Path, str], + offset: Optional[float] = None, + duration: Optional[float] = None, + sample_rate: Optional[float] = None, + dtype: np.dtype = np.float32, + ) -> Signal: + """ + Loads the audio file denoted by the given path and returns it data as a waveform. - :param path: Path of the audio file to load data from. - :param offset: (Optional) Start offset to load from in seconds. - :param duration: (Optional) Duration to load in seconds. - :param sample_rate: (Optional) Sample rate to load audio with. - :param dtype: (Optional) Numpy data type to use, default to float32. - :returns: Loaded data a (waveform, sample_rate) tuple. - :raise SpleeterError: If any error occurs while loading audio. + Parameters: + path (Union[Path, str]: + Path of the audio file to load data from. + offset (Optional[float]): + Start offset to load from in seconds. + duration (Optional[float]): + Duration to load in seconds. + sample_rate (Optional[float]): + Sample rate to load audio with. + dtype (numpy.dtype): + (Optional) Numpy data type to use, default to `float32`. + + Returns: + Signal: + Loaded data a (waveform, sample_rate) tuple. + + Raises: + SpleeterError: + If any error occurs while loading audio. """ - _check_ffmpeg_install() + if isinstance(path, Path): + path = str(path) if not isinstance(path, str): path = path.decode() try: probe = ffmpeg.probe(path) except ffmpeg._run.Error as e: raise SpleeterError( - 'An error occurs with ffprobe (see ffprobe output below)\n\n{}' - .format(e.stderr.decode())) - if 'streams' not in probe or len(probe['streams']) == 0: - raise SpleeterError('No stream was found with ffprobe') + "An error occurs with ffprobe (see ffprobe output below)\n\n{}".format( + e.stderr.decode() + ) + ) + if "streams" not in probe or len(probe["streams"]) == 0: + raise SpleeterError("No stream was found with ffprobe") metadata = next( - stream - for stream in probe['streams'] - if stream['codec_type'] == 'audio') - n_channels = metadata['channels'] + stream for stream in probe["streams"] if stream["codec_type"] == "audio" + ) + n_channels = metadata["channels"] if sample_rate is None: - sample_rate = metadata['sample_rate'] - output_kwargs = {'format': 'f32le', 'ar': sample_rate} + sample_rate = metadata["sample_rate"] + output_kwargs = {"format": "f32le", "ar": sample_rate} if duration is not None: - output_kwargs['t'] = _to_ffmpeg_time(duration) + output_kwargs["t"] = str(dt.timedelta(seconds=duration)) if offset is not None: - output_kwargs['ss'] = _to_ffmpeg_time(offset) + output_kwargs["ss"] = str(dt.timedelta(seconds=offset)) process = ( - ffmpeg - .input(path) - .output('pipe:', **output_kwargs) - .run_async(pipe_stdout=True, pipe_stderr=True)) + ffmpeg.input(path) + .output("pipe:", **output_kwargs) + .run_async(pipe_stdout=True, pipe_stderr=True) + ) buffer, _ = process.communicate() - waveform = np.frombuffer(buffer, dtype=' None: """ - _check_ffmpeg_install() + Write waveform data to the file denoted by the given path using + FFMPEG process. + + Parameters: + path (Union[Path, str]): + Path like of the audio file to save data in. + data (numpy.ndarray): + Waveform data to write. + sample_rate (float): + Sample rate to write file in. + codec (): + (Optional) Writing codec to use, default to `None`. + bitrate (str): + (Optional) Bitrate of the written audio file, default to + `None`. + + Raises: + IOError: + If any error occurs while using FFMPEG to write data. + """ + if isinstance(path, Path): + path = str(path) directory = os.path.dirname(path) if not os.path.exists(directory): - raise SpleeterError(f'output directory does not exists: {directory}') - get_logger().debug('Writing file %s', path) - input_kwargs = {'ar': sample_rate, 'ac': data.shape[1]} - output_kwargs = {'ar': sample_rate, 'strict': '-2'} + raise SpleeterError(f"output directory does not exists: {directory}") + logger.debug(f"Writing file {path}") + input_kwargs = {"ar": sample_rate, "ac": data.shape[1]} + output_kwargs = {"ar": sample_rate, "strict": "-2"} if bitrate: - output_kwargs['audio_bitrate'] = bitrate - if codec is not None and codec != 'wav': - output_kwargs['codec'] = _to_ffmpeg_codec(codec) + output_kwargs["audio_bitrate"] = bitrate + if codec is not None and codec != "wav": + output_kwargs["codec"] = self.SUPPORTED_CODECS.get(codec, codec) process = ( - ffmpeg - .input('pipe:', format='f32le', **input_kwargs) + ffmpeg.input("pipe:", format="f32le", **input_kwargs) .output(path, **output_kwargs) .overwrite_output() - .run_async(pipe_stdin=True, pipe_stderr=True, quiet=True)) + .run_async(pipe_stdin=True, pipe_stderr=True, quiet=True) + ) try: - process.stdin.write(data.astype(' tf.Tensor: """ - stft_tensor = tf.transpose( + Compute magnitude / power spectrogram from waveform as a + `n_samples x n_channels` tensor. + + Parameters: + waveform (tensorflow.Tensor): + Input waveform as `(times x number of channels)` tensor. + frame_length (int): + Length of a STFT frame to use. + frame_step (int): + HOP between successive frames. + spec_exponent (float): + Exponent of the spectrogram (usually 1 for magnitude + spectrogram, or 2 for power spectrogram). + window_exponent (float): + Exponent applied to the Hann windowing function (may be + useful for making perfect STFT/iSTFT reconstruction). + + Returns: + tensorflow.Tensor: + Computed magnitude / power spectrogram as a + `(T x F x n_channels)` tensor. + """ + stft_tensor: tf.Tensor = tf.transpose( stft( tf.transpose(waveform), frame_length, frame_step, window_fn=lambda f, dtype: hann_window( - f, - periodic=True, - dtype=waveform.dtype) ** window_exponent), - perm=[1, 2, 0]) + f, periodic=True, dtype=waveform.dtype + ) + ** window_exponent, + ), + perm=[1, 2, 0], + ) return tf.abs(stft_tensor) ** spec_exponent def time_stretch( - spectrogram, - factor=1.0, - method=tf.image.ResizeMethod.BILINEAR): - """ Time stretch a spectrogram preserving shape in tensorflow. Note that + spectrogram: tf.Tensor, + factor: float = 1.0, + method: tf.image.ResizeMethod = tf.image.ResizeMethod.BILINEAR, +) -> tf.Tensor: + """ + Time stretch a spectrogram preserving shape in tensorflow. Note that this is an approximation in the frequency domain. - :param spectrogram: Input spectrogram to be time stretched as tensor. - :param factor: (Optional) Time stretch factor, must be >0, default to 1. - :param mehtod: (Optional) Interpolation method, default to BILINEAR. - :returns: Time stretched spectrogram as tensor with same shape. + Parameters: + spectrogram (tensorflow.Tensor): + Input spectrogram to be time stretched as tensor. + factor (float): + (Optional) Time stretch factor, must be > 0, default to `1`. + method (tensorflow.image.ResizeMethod): + (Optional) Interpolation method, default to `BILINEAR`. + + Returns: + tensorflow.Tensor: + Time stretched spectrogram as tensor with same shape. """ T = tf.shape(spectrogram)[0] T_ts = tf.cast(tf.cast(T, tf.float32) * factor, tf.int32)[0] F = tf.shape(spectrogram)[1] ts_spec = tf.image.resize_images( - spectrogram, - [T_ts, F], - method=method, - align_corners=True) + spectrogram, [T_ts, F], method=method, align_corners=True + ) return tf.image.resize_image_with_crop_or_pad(ts_spec, T, F) -def random_time_stretch(spectrogram, factor_min=0.9, factor_max=1.1, **kwargs): - """ Time stretch a spectrogram preserving shape with random ratio in - tensorflow. Applies time_stretch to spectrogram with a random ratio drawn - uniformly in [factor_min, factor_max]. - - :param spectrogram: Input spectrogram to be time stretched as tensor. - :param factor_min: (Optional) Min time stretch factor, default to 0.9. - :param factor_max: (Optional) Max time stretch factor, default to 1.1. - :returns: Randomly time stretched spectrogram as tensor with same shape. +def random_time_stretch( + spectrogram: tf.Tensor, factor_min: float = 0.9, factor_max: float = 1.1, **kwargs +) -> tf.Tensor: """ - factor = tf.random_uniform( - shape=(1,), - seed=0) * (factor_max - factor_min) + factor_min + Time stretch a spectrogram preserving shape with random ratio in + tensorflow. Applies time_stretch to spectrogram with a random ratio + drawn uniformly in `[factor_min, factor_max]`. + + Parameters: + spectrogram (tensorflow.Tensor): + Input spectrogram to be time stretched as tensor. + factor_min (float): + (Optional) Min time stretch factor, default to `0.9`. + factor_max (float): + (Optional) Max time stretch factor, default to `1.1`. + + Returns: + tensorflow.Tensor: + Randomly time stretched spectrogram as tensor with same shape. + """ + factor = ( + tf.random_uniform(shape=(1,), seed=0) * (factor_max - factor_min) + factor_min + ) return time_stretch(spectrogram, factor=factor, **kwargs) def pitch_shift( - spectrogram, - semitone_shift=0.0, - method=tf.image.ResizeMethod.BILINEAR): - """ Pitch shift a spectrogram preserving shape in tensorflow. Note that + spectrogram: tf.Tensor, + semitone_shift: float = 0.0, + method: tf.image.ResizeMethod = tf.image.ResizeMethod.BILINEAR, +) -> tf.Tensor: + """ + Pitch shift a spectrogram preserving shape in tensorflow. Note that this is an approximation in the frequency domain. - :param spectrogram: Input spectrogram to be pitch shifted as tensor. - :param semitone_shift: (Optional) Pitch shift in semitone, default to 0.0. - :param mehtod: (Optional) Interpolation method, default to BILINEAR. - :returns: Pitch shifted spectrogram (same shape as spectrogram). + Parameters: + spectrogram (tensorflow.Tensor): + Input spectrogram to be pitch shifted as tensor. + semitone_shift (float): + (Optional) Pitch shift in semitone, default to `0.0`. + method (tensorflow.image.ResizeMethod): + (Optional) Interpolation method, default to `BILINEAR`. + + Returns: + tensorflow.Tensor: + Pitch shifted spectrogram (same shape as spectrogram). """ - factor = 2 ** (semitone_shift / 12.) + factor = 2 ** (semitone_shift / 12.0) T = tf.shape(spectrogram)[0] F = tf.shape(spectrogram)[1] F_ps = tf.cast(tf.cast(F, tf.float32) * factor, tf.int32)[0] ps_spec = tf.image.resize_images( - spectrogram, - [T, F_ps], - method=method, - align_corners=True) + spectrogram, [T, F_ps], method=method, align_corners=True + ) paddings = [[0, 0], [0, tf.maximum(0, F - F_ps)], [0, 0]] - return tf.pad(ps_spec[:, :F, :], paddings, 'CONSTANT') + return tf.pad(ps_spec[:, :F, :], paddings, "CONSTANT") -def random_pitch_shift(spectrogram, shift_min=-1., shift_max=1., **kwargs): - """ Pitch shift a spectrogram preserving shape with random ratio in - tensorflow. Applies pitch_shift to spectrogram with a random shift - amount (expressed in semitones) drawn uniformly in [shift_min, shift_max]. - - :param spectrogram: Input spectrogram to be pitch shifted as tensor. - - :param shift_min: (Optional) Min pitch shift in semitone, default to -1. - :param shift_max: (Optional) Max pitch shift in semitone, default to 1. - :returns: Randomly pitch shifted spectrogram (same shape as spectrogram). +def random_pitch_shift( + spectrogram: tf.Tensor, shift_min: float = -1.0, shift_max: float = 1.0, **kwargs +) -> tf.Tensor: """ - semitone_shift = tf.random_uniform( - shape=(1,), - seed=0) * (shift_max - shift_min) + shift_min + Pitch shift a spectrogram preserving shape with random ratio in + tensorflow. Applies pitch_shift to spectrogram with a random shift + amount (expressed in semitones) drawn uniformly in + `[shift_min, shift_max]`. + + Parameters: + spectrogram (tensorflow.Tensor): + Input spectrogram to be pitch shifted as tensor. + shift_min (float): + (Optional) Min pitch shift in semitone, default to -1. + shift_max (float): + (Optional) Max pitch shift in semitone, default to 1. + + Returns: + tensorflow.Tensor: + Randomly pitch shifted spectrogram (same shape as spectrogram). + """ + semitone_shift = ( + tf.random_uniform(shape=(1,), seed=0) * (shift_max - shift_min) + shift_min + ) return pitch_shift(spectrogram, semitone_shift=semitone_shift, **kwargs) diff --git a/spleeter/commands/__init__.py b/spleeter/commands/__init__.py deleted file mode 100644 index a54e4c1..0000000 --- a/spleeter/commands/__init__.py +++ /dev/null @@ -1,209 +0,0 @@ -#!/usr/bin/env python -# coding: utf8 - -""" This modules provides spleeter command as well as CLI parsing methods. """ - -import json -import logging -from argparse import ArgumentParser -from tempfile import gettempdir -from os.path import exists, join - -__email__ = 'spleeter@deezer.com' -__author__ = 'Deezer Research' -__license__ = 'MIT License' - - - -# -i opt specification (separate). -OPT_INPUT = { - 'dest': 'inputs', - 'nargs': '+', - 'help': 'List of input audio filenames', - 'required': True -} - -# -o opt specification (evaluate and separate). -OPT_OUTPUT = { - 'dest': 'output_path', - 'default': join(gettempdir(), 'separated_audio'), - 'help': 'Path of the output directory to write audio files in' -} - -# -f opt specification (separate). -OPT_FORMAT = { - 'dest': 'filename_format', - 'default': '{filename}/{instrument}.{codec}', - 'help': ( - 'Template string that will be formatted to generated' - 'output filename. Such template should be Python formattable' - 'string, and could use {filename}, {instrument}, and {codec}' - 'variables.' - ) -} - -# -p opt specification (train, evaluate and separate). -OPT_PARAMS = { - 'dest': 'configuration', - 'default': 'spleeter:2stems', - 'type': str, - 'action': 'store', - 'help': 'JSON filename that contains params' -} - -# -s opt specification (separate). -OPT_OFFSET = { - 'dest': 'offset', - 'type': float, - 'default': 0., - 'help': 'Set the starting offset to separate audio from.' -} - -# -d opt specification (separate). -OPT_DURATION = { - 'dest': 'duration', - 'type': float, - 'default': 600., - 'help': ( - 'Set a maximum duration for processing audio ' - '(only separate offset + duration first seconds of ' - 'the input file)') -} - -# -w opt specification (separate) -OPT_STFT_BACKEND = { - 'dest': 'stft_backend', - 'type': str, - 'choices' : ["tensorflow", "librosa", "auto"], - 'default': "auto", - 'help': 'Who should be in charge of computing the stfts. Librosa is faster than tensorflow on CPU and uses' - ' less memory. "auto" will use tensorflow when GPU acceleration is available and librosa when not.' -} - - -# -c opt specification (separate). -OPT_CODEC = { - 'dest': 'codec', - 'choices': ('wav', 'mp3', 'ogg', 'm4a', 'wma', 'flac'), - 'default': 'wav', - 'help': 'Audio codec to be used for the separated output' -} - -# -b opt specification (separate). -OPT_BITRATE = { - 'dest': 'bitrate', - 'default': '128k', - 'help': 'Audio bitrate to be used for the separated output' -} - -# -m opt specification (evaluate and separate). -OPT_MWF = { - 'dest': 'MWF', - 'action': 'store_const', - 'const': True, - 'default': False, - 'help': 'Whether to use multichannel Wiener filtering for separation', -} - -# --mus_dir opt specification (evaluate). -OPT_MUSDB = { - 'dest': 'mus_dir', - 'type': str, - 'required': True, - 'help': 'Path to folder with musDB' -} - -# -d opt specification (train). -OPT_DATA = { - 'dest': 'audio_path', - 'type': str, - 'required': True, - 'help': 'Path of the folder containing audio data for training' -} - -# -a opt specification (train, evaluate and separate). -OPT_ADAPTER = { - 'dest': 'audio_adapter', - 'type': str, - 'help': 'Name of the audio adapter to use for audio I/O' -} - -# -a opt specification (train, evaluate and separate). -OPT_VERBOSE = { - 'action': 'store_true', - 'help': 'Shows verbose logs' -} - - -def _add_common_options(parser): - """ Add common option to the given parser. - - :param parser: Parser to add common opt to. - """ - parser.add_argument('-a', '--adapter', **OPT_ADAPTER) - parser.add_argument('-p', '--params_filename', **OPT_PARAMS) - parser.add_argument('--verbose', **OPT_VERBOSE) - - -def _create_train_parser(parser_factory): - """ Creates an argparser for training command - - :param parser_factory: Factory to use to create parser instance. - :returns: Created and configured parser. - """ - parser = parser_factory('train', help='Train a source separation model') - _add_common_options(parser) - parser.add_argument('-d', '--data', **OPT_DATA) - return parser - - -def _create_evaluate_parser(parser_factory): - """ Creates an argparser for evaluation command - - :param parser_factory: Factory to use to create parser instance. - :returns: Created and configured parser. - """ - parser = parser_factory( - 'evaluate', - help='Evaluate a model on the musDB test dataset') - _add_common_options(parser) - parser.add_argument('-o', '--output_path', **OPT_OUTPUT) - parser.add_argument('--mus_dir', **OPT_MUSDB) - parser.add_argument('-m', '--mwf', **OPT_MWF) - parser.add_argument('-B', '--stft-backend', **OPT_STFT_BACKEND) - return parser - - -def _create_separate_parser(parser_factory): - """ Creates an argparser for separation command - - :param parser_factory: Factory to use to create parser instance. - :returns: Created and configured parser. - """ - parser = parser_factory('separate', help='Separate audio files') - _add_common_options(parser) - parser.add_argument('-i', '--inputs', **OPT_INPUT) - parser.add_argument('-o', '--output_path', **OPT_OUTPUT) - parser.add_argument('-f', '--filename_format', **OPT_FORMAT) - parser.add_argument('-d', '--duration', **OPT_DURATION) - parser.add_argument('-s', '--offset', **OPT_OFFSET) - parser.add_argument('-c', '--codec', **OPT_CODEC) - parser.add_argument('-b', '--birate', **OPT_BITRATE) - parser.add_argument('-m', '--mwf', **OPT_MWF) - parser.add_argument('-B', '--stft-backend', **OPT_STFT_BACKEND) - return parser - - -def create_argument_parser(): - """ Creates overall command line parser for Spleeter. - - :returns: Created argument parser. - """ - parser = ArgumentParser(prog='spleeter') - subparsers = parser.add_subparsers() - subparsers.dest = 'command' - subparsers.required = True - _create_separate_parser(subparsers.add_parser) - _create_train_parser(subparsers.add_parser) - _create_evaluate_parser(subparsers.add_parser) - return parser diff --git a/spleeter/commands/evaluate.py b/spleeter/commands/evaluate.py deleted file mode 100644 index 93e0990..0000000 --- a/spleeter/commands/evaluate.py +++ /dev/null @@ -1,167 +0,0 @@ -#!/usr/bin/env python -# coding: utf8 - -""" - Entrypoint provider for performing model evaluation. - - Evaluation is performed against musDB dataset. - - USAGE: python -m spleeter evaluate \ - -p /path/to/params \ - -o /path/to/output/dir \ - [-m] \ - --mus_dir /path/to/musdb dataset -""" - -import sys -import json - -from argparse import Namespace -from itertools import product -from glob import glob -from os.path import join, exists - -# pylint: disable=import-error -import numpy as np -import pandas as pd -# pylint: enable=import-error - -from .separate import entrypoint as separate_entrypoint -from ..utils.logging import get_logger - -try: - import musdb - import museval -except ImportError: - logger = get_logger() - logger.error('Extra dependencies musdb and museval not found') - logger.error('Please install musdb and museval first, abort') - sys.exit(1) - -__email__ = 'spleeter@deezer.com' -__author__ = 'Deezer Research' -__license__ = 'MIT License' - -_SPLIT = 'test' -_MIXTURE = 'mixture.wav' -_AUDIO_DIRECTORY = 'audio' -_METRICS_DIRECTORY = 'metrics' -_INSTRUMENTS = ('vocals', 'drums', 'bass', 'other') -_METRICS = ('SDR', 'SAR', 'SIR', 'ISR') - - -def _separate_evaluation_dataset(arguments, musdb_root_directory, params): - """ Performs audio separation on the musdb dataset from - the given directory and params. - - :param arguments: Entrypoint arguments. - :param musdb_root_directory: Directory to retrieve dataset from. - :param params: Spleeter configuration to apply to separation. - :returns: Separation output directory path. - """ - songs = glob(join(musdb_root_directory, _SPLIT, '*/')) - mixtures = [join(song, _MIXTURE) for song in songs] - audio_output_directory = join( - arguments.output_path, - _AUDIO_DIRECTORY) - separate_entrypoint( - Namespace( - audio_adapter=arguments.audio_adapter, - configuration=arguments.configuration, - inputs=mixtures, - output_path=join(audio_output_directory, _SPLIT), - filename_format='{foldername}/{instrument}.{codec}', - codec='wav', - duration=600., - offset=0., - bitrate='128k', - MWF=arguments.MWF, - verbose=arguments.verbose, - stft_backend=arguments.stft_backend), - params) - return audio_output_directory - - -def _compute_musdb_metrics( - arguments, - musdb_root_directory, - audio_output_directory): - """ Generates musdb metrics fro previsouly computed audio estimation. - - :param arguments: Entrypoint arguments. - :param audio_output_directory: Directory to get audio estimation from. - :returns: Path of generated metrics directory. - """ - metrics_output_directory = join( - arguments.output_path, - _METRICS_DIRECTORY) - get_logger().info('Starting musdb evaluation (this could be long) ...') - dataset = musdb.DB( - root=musdb_root_directory, - is_wav=True, - subsets=[_SPLIT]) - museval.eval_mus_dir( - dataset=dataset, - estimates_dir=audio_output_directory, - output_dir=metrics_output_directory) - get_logger().info('musdb evaluation done') - return metrics_output_directory - - -def _compile_metrics(metrics_output_directory): - """ Compiles metrics from given directory and returns - results as dict. - - :param metrics_output_directory: Directory to get metrics from. - :returns: Compiled metrics as dict. - """ - songs = glob(join(metrics_output_directory, 'test/*.json')) - index = pd.MultiIndex.from_tuples( - product(_INSTRUMENTS, _METRICS), - names=['instrument', 'metric']) - pd.DataFrame([], index=['config1', 'config2'], columns=index) - metrics = { - instrument: {k: [] for k in _METRICS} - for instrument in _INSTRUMENTS} - for song in songs: - with open(song, 'r') as stream: - data = json.load(stream) - for target in data['targets']: - instrument = target['name'] - for metric in _METRICS: - sdr_med = np.median([ - frame['metrics'][metric] - for frame in target['frames'] - if not np.isnan(frame['metrics'][metric])]) - metrics[instrument][metric].append(sdr_med) - return metrics - - -def entrypoint(arguments, params): - """ Command entrypoint. - - :param arguments: Command line parsed argument as argparse.Namespace. - :param params: Deserialized JSON configuration file provided in CLI args. - """ - # Parse and check musdb directory. - musdb_root_directory = arguments.mus_dir - if not exists(musdb_root_directory): - raise IOError(f'musdb directory {musdb_root_directory} not found') - # Separate musdb sources. - audio_output_directory = _separate_evaluation_dataset( - arguments, - musdb_root_directory, - params) - # Compute metrics with musdb. - metrics_output_directory = _compute_musdb_metrics( - arguments, - musdb_root_directory, - audio_output_directory) - # Compute and pretty print median metrics. - metrics = _compile_metrics(metrics_output_directory) - for instrument, metric in metrics.items(): - get_logger().info('%s:', instrument) - for metric, value in metric.items(): - get_logger().info('%s: %s', metric, f'{np.median(value):.3f}') - - return metrics diff --git a/spleeter/commands/separate.py b/spleeter/commands/separate.py deleted file mode 100644 index 193d8f6..0000000 --- a/spleeter/commands/separate.py +++ /dev/null @@ -1,47 +0,0 @@ -#!/usr/bin/env python -# coding: utf8 - -""" - Entrypoint provider for performing source separation. - - USAGE: python -m spleeter separate \ - -p /path/to/params \ - -i inputfile1 inputfile2 ... inputfilen - -o /path/to/output/dir \ - -i /path/to/audio1.wav /path/to/audio2.mp3 -""" - -from ..audio.adapter import get_audio_adapter -from ..separator import Separator - -__email__ = 'spleeter@deezer.com' -__author__ = 'Deezer Research' -__license__ = 'MIT License' - - - -def entrypoint(arguments, params): - """ Command entrypoint. - - :param arguments: Command line parsed argument as argparse.Namespace. - :param params: Deserialized JSON configuration file provided in CLI args. - """ - # TODO: check with output naming. - audio_adapter = get_audio_adapter(arguments.audio_adapter) - separator = Separator( - arguments.configuration, - MWF=arguments.MWF, - stft_backend=arguments.stft_backend) - for filename in arguments.inputs: - separator.separate_to_file( - filename, - arguments.output_path, - audio_adapter=audio_adapter, - offset=arguments.offset, - duration=arguments.duration, - codec=arguments.codec, - bitrate=arguments.bitrate, - filename_format=arguments.filename_format, - synchronous=False - ) - separator.join() diff --git a/spleeter/commands/train.py b/spleeter/commands/train.py deleted file mode 100644 index 3bffaef..0000000 --- a/spleeter/commands/train.py +++ /dev/null @@ -1,100 +0,0 @@ -#!/usr/bin/env python -# coding: utf8 - -""" - Entrypoint provider for performing model training. - - USAGE: python -m spleeter train -p /path/to/params -""" - -from functools import partial - -# pylint: disable=import-error -import tensorflow as tf -# pylint: enable=import-error - -from ..audio.adapter import get_audio_adapter -from ..dataset import get_training_dataset, get_validation_dataset -from ..model import model_fn -from ..model.provider import ModelProvider -from ..utils.logging import get_logger - -__email__ = 'spleeter@deezer.com' -__author__ = 'Deezer Research' -__license__ = 'MIT License' - - -def _create_estimator(params): - """ Creates estimator. - - :param params: TF params to build estimator from. - :returns: Built estimator. - """ - session_config = tf.compat.v1.ConfigProto() - session_config.gpu_options.per_process_gpu_memory_fraction = 0.45 - estimator = tf.estimator.Estimator( - model_fn=model_fn, - model_dir=params['model_dir'], - params=params, - config=tf.estimator.RunConfig( - save_checkpoints_steps=params['save_checkpoints_steps'], - tf_random_seed=params['random_seed'], - save_summary_steps=params['save_summary_steps'], - session_config=session_config, - log_step_count_steps=10, - keep_checkpoint_max=2)) - return estimator - - -def _create_train_spec(params, audio_adapter, audio_path): - """ Creates train spec. - - :param params: TF params to build spec from. - :returns: Built train spec. - """ - input_fn = partial(get_training_dataset, params, audio_adapter, audio_path) - train_spec = tf.estimator.TrainSpec( - input_fn=input_fn, - max_steps=params['train_max_steps']) - return train_spec - - -def _create_evaluation_spec(params, audio_adapter, audio_path): - """ Setup eval spec evaluating ever n seconds - - :param params: TF params to build spec from. - :returns: Built evaluation spec. - """ - input_fn = partial( - get_validation_dataset, - params, - audio_adapter, - audio_path) - evaluation_spec = tf.estimator.EvalSpec( - input_fn=input_fn, - steps=None, - throttle_secs=params['throttle_secs']) - return evaluation_spec - - -def entrypoint(arguments, params): - """ Command entrypoint. - - :param arguments: Command line parsed argument as argparse.Namespace. - :param params: Deserialized JSON configuration file provided in CLI args. - """ - audio_adapter = get_audio_adapter(arguments.audio_adapter) - audio_path = arguments.audio_path - estimator = _create_estimator(params) - train_spec = _create_train_spec(params, audio_adapter, audio_path) - evaluation_spec = _create_evaluation_spec( - params, - audio_adapter, - audio_path) - get_logger().info('Start model training') - tf.estimator.train_and_evaluate( - estimator, - train_spec, - evaluation_spec) - ModelProvider.writeProbe(params['model_dir']) - get_logger().info('Model training done') diff --git a/spleeter/dataset.py b/spleeter/dataset.py index 5b11969..4ee096d 100644 --- a/spleeter/dataset.py +++ b/spleeter/dataset.py @@ -14,87 +14,110 @@ (ground truth) """ -import time import os -from os.path import exists, join, sep as SEPARATOR +import time +from os.path import exists +from os.path import sep as SEPARATOR +from typing import Any, Dict, Optional +# pyright: reportMissingImports=false # pylint: disable=import-error -import pandas as pd -import numpy as np import tensorflow as tf -# pylint: enable=import-error -from .audio.convertor import ( - db_uint_spectrogram_to_gain, - spectrogram_to_db_uint) +from .audio.adapter import AudioAdapter +from .audio.convertor import db_uint_spectrogram_to_gain, spectrogram_to_db_uint from .audio.spectrogram import ( compute_spectrogram_tf, random_pitch_shift, - random_time_stretch) -from .utils.logging import get_logger + random_time_stretch, +) +from .utils.logging import logger from .utils.tensor import ( check_tensor_shape, dataset_from_csv, set_tensor_shape, - sync_apply) + sync_apply, +) -__email__ = 'spleeter@deezer.com' -__author__ = 'Deezer Research' -__license__ = 'MIT License' +# pylint: enable=import-error + +__email__ = "spleeter@deezer.com" +__author__ = "Deezer Research" +__license__ = "MIT License" # Default audio parameters to use. -DEFAULT_AUDIO_PARAMS = { - 'instrument_list': ('vocals', 'accompaniment'), - 'mix_name': 'mix', - 'sample_rate': 44100, - 'frame_length': 4096, - 'frame_step': 1024, - 'T': 512, - 'F': 1024 +DEFAULT_AUDIO_PARAMS: Dict = { + "instrument_list": ("vocals", "accompaniment"), + "mix_name": "mix", + "sample_rate": 44100, + "frame_length": 4096, + "frame_step": 1024, + "T": 512, + "F": 1024, } -def get_training_dataset(audio_params, audio_adapter, audio_path): - """ Builds training dataset. +def get_training_dataset( + audio_params: Dict, audio_adapter: AudioAdapter, audio_path: str +) -> Any: + """ + Builds training dataset. - :param audio_params: Audio parameters. - :param audio_adapter: Adapter to load audio from. - :param audio_path: Path of directory containing audio. - :returns: Built dataset. + Parameters: + audio_params (Dict): + Audio parameters. + audio_adapter (AudioAdapter): + Adapter to load audio from. + audio_path (str): + Path of directory containing audio. + + Returns: + Any: + Built dataset. """ builder = DatasetBuilder( audio_params, audio_adapter, audio_path, - chunk_duration=audio_params.get('chunk_duration', 20.0), - random_seed=audio_params.get('random_seed', 0)) + chunk_duration=audio_params.get("chunk_duration", 20.0), + random_seed=audio_params.get("random_seed", 0), + ) return builder.build( - audio_params.get('train_csv'), - cache_directory=audio_params.get('training_cache'), - batch_size=audio_params.get('batch_size'), - n_chunks_per_song=audio_params.get('n_chunks_per_song', 2), + audio_params.get("train_csv"), + cache_directory=audio_params.get("training_cache"), + batch_size=audio_params.get("batch_size"), + n_chunks_per_song=audio_params.get("n_chunks_per_song", 2), random_data_augmentation=False, convert_to_uint=True, - wait_for_cache=False) + wait_for_cache=False, + ) -def get_validation_dataset(audio_params, audio_adapter, audio_path): - """ Builds validation dataset. +def get_validation_dataset( + audio_params: Dict, audio_adapter: AudioAdapter, audio_path: str +) -> Any: + """ + Builds validation dataset. - :param audio_params: Audio parameters. - :param audio_adapter: Adapter to load audio from. - :param audio_path: Path of directory containing audio. - :returns: Built dataset. + Parameters: + audio_params (Dict): + Audio parameters. + audio_adapter (AudioAdapter): + Adapter to load audio from. + audio_path (str): + Path of directory containing audio. + + Returns: + Any: + Built dataset. """ builder = DatasetBuilder( - audio_params, - audio_adapter, - audio_path, - chunk_duration=12.0) + audio_params, audio_adapter, audio_path, chunk_duration=12.0 + ) return builder.build( - audio_params.get('validation_csv'), - batch_size=audio_params.get('batch_size'), - cache_directory=audio_params.get('validation_cache'), + audio_params.get("validation_csv"), + batch_size=audio_params.get("batch_size"), + cache_directory=audio_params.get("validation_cache"), convert_to_uint=True, infinite_generator=False, n_chunks_per_song=1, @@ -108,127 +131,175 @@ def get_validation_dataset(audio_params, audio_adapter, audio_path): class InstrumentDatasetBuilder(object): """ Instrument based filter and mapper provider. """ - def __init__(self, parent, instrument): - """ Default constructor. + def __init__(self, parent, instrument) -> None: + """ + Default constructor. - :param parent: Parent dataset builder. - :param instrument: Target instrument. + Parameters: + parent: + Parent dataset builder. + instrument: + Target instrument. """ self._parent = parent self._instrument = instrument - self._spectrogram_key = f'{instrument}_spectrogram' - self._min_spectrogram_key = f'min_{instrument}_spectrogram' - self._max_spectrogram_key = f'max_{instrument}_spectrogram' + self._spectrogram_key = f"{instrument}_spectrogram" + self._min_spectrogram_key = f"min_{instrument}_spectrogram" + self._max_spectrogram_key = f"max_{instrument}_spectrogram" def load_waveform(self, sample): """ Load waveform for given sample. """ - return dict(sample, **self._parent._audio_adapter.load_tf_waveform( - sample[f'{self._instrument}_path'], - offset=sample['start'], - duration=self._parent._chunk_duration, - sample_rate=self._parent._sample_rate, - waveform_name='waveform')) + return dict( + sample, + **self._parent._audio_adapter.load_tf_waveform( + sample[f"{self._instrument}_path"], + offset=sample["start"], + duration=self._parent._chunk_duration, + sample_rate=self._parent._sample_rate, + waveform_name="waveform", + ), + ) def compute_spectrogram(self, sample): """ Compute spectrogram of the given sample. """ - return dict(sample, **{ - self._spectrogram_key: compute_spectrogram_tf( - sample['waveform'], - frame_length=self._parent._frame_length, - frame_step=self._parent._frame_step, - spec_exponent=1., - window_exponent=1.)}) + return dict( + sample, + **{ + self._spectrogram_key: compute_spectrogram_tf( + sample["waveform"], + frame_length=self._parent._frame_length, + frame_step=self._parent._frame_step, + spec_exponent=1.0, + window_exponent=1.0, + ) + }, + ) def filter_frequencies(self, sample): """ """ - return dict(sample, **{ - self._spectrogram_key: - sample[self._spectrogram_key][:, :self._parent._F, :]}) + return dict( + sample, + **{ + self._spectrogram_key: sample[self._spectrogram_key][ + :, : self._parent._F, : + ] + }, + ) def convert_to_uint(self, sample): """ Convert given sample from float to unit. """ - return dict(sample, **spectrogram_to_db_uint( - sample[self._spectrogram_key], - tensor_key=self._spectrogram_key, - min_key=self._min_spectrogram_key, - max_key=self._max_spectrogram_key)) + return dict( + sample, + **spectrogram_to_db_uint( + sample[self._spectrogram_key], + tensor_key=self._spectrogram_key, + min_key=self._min_spectrogram_key, + max_key=self._max_spectrogram_key, + ), + ) def filter_infinity(self, sample): """ Filter infinity sample. """ - return tf.logical_not( - tf.math.is_inf( - sample[self._min_spectrogram_key])) + return tf.logical_not(tf.math.is_inf(sample[self._min_spectrogram_key])) def convert_to_float32(self, sample): """ Convert given sample from unit to float. """ - return dict(sample, **{ - self._spectrogram_key: db_uint_spectrogram_to_gain( - sample[self._spectrogram_key], - sample[self._min_spectrogram_key], - sample[self._max_spectrogram_key])}) + return dict( + sample, + **{ + self._spectrogram_key: db_uint_spectrogram_to_gain( + sample[self._spectrogram_key], + sample[self._min_spectrogram_key], + sample[self._max_spectrogram_key], + ) + }, + ) def time_crop(self, sample): """ """ + def start(sample): """ mid_segment_start """ return tf.cast( tf.maximum( - tf.shape(sample[self._spectrogram_key])[0] - / 2 - self._parent._T / 2, 0), - tf.int32) - return dict(sample, **{ - self._spectrogram_key: sample[self._spectrogram_key][ - start(sample):start(sample) + self._parent._T, :, :]}) + tf.shape(sample[self._spectrogram_key])[0] / 2 + - self._parent._T / 2, + 0, + ), + tf.int32, + ) + + return dict( + sample, + **{ + self._spectrogram_key: sample[self._spectrogram_key][ + start(sample) : start(sample) + self._parent._T, :, : + ] + }, + ) def filter_shape(self, sample): """ Filter badly shaped sample. """ return check_tensor_shape( - sample[self._spectrogram_key], ( - self._parent._T, self._parent._F, 2)) + sample[self._spectrogram_key], (self._parent._T, self._parent._F, 2) + ) def reshape_spectrogram(self, sample): - """ """ - return dict(sample, **{ - self._spectrogram_key: set_tensor_shape( - sample[self._spectrogram_key], - (self._parent._T, self._parent._F, 2))}) + """ Reshape given sample. """ + return dict( + sample, + **{ + self._spectrogram_key: set_tensor_shape( + sample[self._spectrogram_key], (self._parent._T, self._parent._F, 2) + ) + }, + ) class DatasetBuilder(object): """ + TO BE DOCUMENTED. """ - # Margin at beginning and end of songs in seconds. - MARGIN = 0.5 + MARGIN: float = 0.5 + """ Margin at beginning and end of songs in seconds. """ - # Wait period for cache (in seconds). - WAIT_PERIOD = 60 + WAIT_PERIOD: int = 60 + """ Wait period for cache (in seconds). """ def __init__( - self, - audio_params, audio_adapter, audio_path, - random_seed=0, chunk_duration=20.0): - """ Default constructor. + self, + audio_params: Dict, + audio_adapter: AudioAdapter, + audio_path: str, + random_seed: int = 0, + chunk_duration: float = 20.0, + ) -> None: + """ + Default constructor. NOTE: Probably need for AudioAdapter. - :param audio_params: Audio parameters to use. - :param audio_adapter: Audio adapter to use. - :param audio_path: - :param random_seed: - :param chunk_duration: + Parameters: + audio_params (Dict): + Audio parameters to use. + audio_adapter (AudioAdapter): + Audio adapter to use. + audio_path (str): + random_seed (int): + chunk_duration (float): """ # Length of segment in frames (if fs=22050 and # frame_step=512, then T=512 corresponds to 11.89s) - self._T = audio_params['T'] + self._T = audio_params["T"] # Number of frequency bins to be used (should # be less than frame_length/2 + 1) - self._F = audio_params['F'] - self._sample_rate = audio_params['sample_rate'] - self._frame_length = audio_params['frame_length'] - self._frame_step = audio_params['frame_step'] - self._mix_name = audio_params['mix_name'] - self._instruments = [self._mix_name] + audio_params['instrument_list'] + self._F = audio_params["F"] + self._sample_rate = audio_params["sample_rate"] + self._frame_length = audio_params["frame_length"] + self._frame_step = audio_params["frame_step"] + self._mix_name = audio_params["mix_name"] + self._instruments = [self._mix_name] + audio_params["instrument_list"] self._instrument_builders = None self._chunk_duration = chunk_duration self._audio_adapter = audio_adapter @@ -238,130 +309,202 @@ class DatasetBuilder(object): def expand_path(self, sample): """ Expands audio paths for the given sample. """ - return dict(sample, **{f'{instrument}_path': tf.strings.join( - (self._audio_path, sample[f'{instrument}_path']), SEPARATOR) - for instrument in self._instruments}) + return dict( + sample, + **{ + f"{instrument}_path": tf.strings.join( + (self._audio_path, sample[f"{instrument}_path"]), SEPARATOR + ) + for instrument in self._instruments + }, + ) def filter_error(self, sample): """ Filter errored sample. """ - return tf.logical_not(sample['waveform_error']) + return tf.logical_not(sample["waveform_error"]) def filter_waveform(self, sample): """ Filter waveform from sample. """ - return {k: v for k, v in sample.items() if not k == 'waveform'} + return {k: v for k, v in sample.items() if not k == "waveform"} def harmonize_spectrogram(self, sample): """ Ensure same size for vocals and mix spectrograms. """ + def _reduce(sample): - return tf.reduce_min([ - tf.shape(sample[f'{instrument}_spectrogram'])[0] - for instrument in self._instruments]) - return dict(sample, **{ - f'{instrument}_spectrogram': - sample[f'{instrument}_spectrogram'][:_reduce(sample), :, :] - for instrument in self._instruments}) + return tf.reduce_min( + [ + tf.shape(sample[f"{instrument}_spectrogram"])[0] + for instrument in self._instruments + ] + ) + + return dict( + sample, + **{ + f"{instrument}_spectrogram": sample[f"{instrument}_spectrogram"][ + : _reduce(sample), :, : + ] + for instrument in self._instruments + }, + ) def filter_short_segments(self, sample): """ Filter out too short segment. """ - return tf.reduce_any([ - tf.shape(sample[f'{instrument}_spectrogram'])[0] >= self._T - for instrument in self._instruments]) + return tf.reduce_any( + [ + tf.shape(sample[f"{instrument}_spectrogram"])[0] >= self._T + for instrument in self._instruments + ] + ) def random_time_crop(self, sample): """ Random time crop of 11.88s. """ - return dict(sample, **sync_apply({ - f'{instrument}_spectrogram': sample[f'{instrument}_spectrogram'] - for instrument in self._instruments}, - lambda x: tf.image.random_crop( - x, (self._T, len(self._instruments) * self._F, 2), - seed=self._random_seed))) + return dict( + sample, + **sync_apply( + { + f"{instrument}_spectrogram": sample[f"{instrument}_spectrogram"] + for instrument in self._instruments + }, + lambda x: tf.image.random_crop( + x, + (self._T, len(self._instruments) * self._F, 2), + seed=self._random_seed, + ), + ), + ) def random_time_stretch(self, sample): """ Randomly time stretch the given sample. """ - return dict(sample, **sync_apply({ - f'{instrument}_spectrogram': - sample[f'{instrument}_spectrogram'] - for instrument in self._instruments}, - lambda x: random_time_stretch( - x, factor_min=0.9, factor_max=1.1))) + return dict( + sample, + **sync_apply( + { + f"{instrument}_spectrogram": sample[f"{instrument}_spectrogram"] + for instrument in self._instruments + }, + lambda x: random_time_stretch(x, factor_min=0.9, factor_max=1.1), + ), + ) def random_pitch_shift(self, sample): """ Randomly pitch shift the given sample. """ - return dict(sample, **sync_apply({ - f'{instrument}_spectrogram': - sample[f'{instrument}_spectrogram'] - for instrument in self._instruments}, - lambda x: random_pitch_shift( - x, shift_min=-1.0, shift_max=1.0), concat_axis=0)) + return dict( + sample, + **sync_apply( + { + f"{instrument}_spectrogram": sample[f"{instrument}_spectrogram"] + for instrument in self._instruments + }, + lambda x: random_pitch_shift(x, shift_min=-1.0, shift_max=1.0), + concat_axis=0, + ), + ) def map_features(self, sample): """ Select features and annotation of the given sample. """ input_ = { - f'{self._mix_name}_spectrogram': - sample[f'{self._mix_name}_spectrogram']} + f"{self._mix_name}_spectrogram": sample[f"{self._mix_name}_spectrogram"] + } output = { - f'{instrument}_spectrogram': sample[f'{instrument}_spectrogram'] - for instrument in self._audio_params['instrument_list']} + f"{instrument}_spectrogram": sample[f"{instrument}_spectrogram"] + for instrument in self._audio_params["instrument_list"] + } return (input_, output) - def compute_segments(self, dataset, n_chunks_per_song): - """ Computes segments for each song of the dataset. + def compute_segments(self, dataset: Any, n_chunks_per_song: int) -> Any: + """ + Computes segments for each song of the dataset. - :param dataset: Dataset to compute segments for. - :param n_chunks_per_song: Number of segment per song to compute. - :returns: Segmented dataset. + Parameters: + dataset (Any): + Dataset to compute segments for. + n_chunks_per_song (int): + Number of segment per song to compute. + + Returns: + Any: + Segmented dataset. """ if n_chunks_per_song <= 0: - raise ValueError('n_chunks_per_song must be positif') + raise ValueError("n_chunks_per_song must be positif") datasets = [] for k in range(n_chunks_per_song): if n_chunks_per_song > 1: datasets.append( - dataset.map(lambda sample: dict(sample, start=tf.maximum( - k * ( - sample['duration'] - self._chunk_duration - 2 - * self.MARGIN) / (n_chunks_per_song - 1) - + self.MARGIN, 0)))) + dataset.map( + lambda sample: dict( + sample, + start=tf.maximum( + k + * ( + sample["duration"] + - self._chunk_duration + - 2 * self.MARGIN + ) + / (n_chunks_per_song - 1) + + self.MARGIN, + 0, + ), + ) + ) + ) elif n_chunks_per_song == 1: # Take central segment. datasets.append( - dataset.map(lambda sample: dict(sample, start=tf.maximum( - sample['duration'] / 2 - self._chunk_duration / 2, - 0)))) + dataset.map( + lambda sample: dict( + sample, + start=tf.maximum( + sample["duration"] / 2 - self._chunk_duration / 2, 0 + ), + ) + ) + ) dataset = datasets[-1] for d in datasets[:-1]: dataset = dataset.concatenate(d) return dataset @property - def instruments(self): - """ Instrument dataset builder generator. + def instruments(self) -> Any: + """ + Instrument dataset builder generator. - :yield InstrumentBuilder instance. + Yields: + Any: + InstrumentBuilder instance. """ if self._instrument_builders is None: self._instrument_builders = [] for instrument in self._instruments: self._instrument_builders.append( - InstrumentDatasetBuilder(self, instrument)) + InstrumentDatasetBuilder(self, instrument) + ) for builder in self._instrument_builders: yield builder - def cache(self, dataset, cache, wait): - """ Cache the given dataset if cache is enabled. Eventually waits for - cache to be available (useful if another process is already computing - cache) if provided wait flag is True. + def cache(self, dataset: Any, cache: str, wait: bool) -> Any: + """ + Cache the given dataset if cache is enabled. Eventually waits for + cache to be available (useful if another process is already + computing cache) if provided wait flag is `True`. - :param dataset: Dataset to be cached if cache is required. - :param cache: Path of cache directory to be used, None if no cache. - :param wait: If caching is enabled, True is cache should be waited. - :returns: Cached dataset if needed, original dataset otherwise. + Parameters: + dataset (Any): + Dataset to be cached if cache is required. + cache (str): + Path of cache directory to be used, None if no cache. + wait (bool): + If caching is enabled, True is cache should be waited. + + Returns: + Any: + Cached dataset if needed, original dataset otherwise. """ if cache is not None: if wait: - while not exists(f'{cache}.index'): - get_logger().info( - 'Cache not available, wait %s', - self.WAIT_PERIOD) + while not exists(f"{cache}.index"): + logger.info(f"Cache not available, wait {self.WAIT_PERIOD}") time.sleep(self.WAIT_PERIOD) cache_path = os.path.split(cache)[0] os.makedirs(cache_path, exist_ok=True) @@ -369,11 +512,19 @@ class DatasetBuilder(object): return dataset def build( - self, csv_path, - batch_size=8, shuffle=True, convert_to_uint=True, - random_data_augmentation=False, random_time_crop=True, - infinite_generator=True, cache_directory=None, - wait_for_cache=False, num_parallel_calls=4, n_chunks_per_song=2,): + self, + csv_path: str, + batch_size: int = 8, + shuffle: bool = True, + convert_to_uint: bool = True, + random_data_augmentation: bool = False, + random_time_crop: bool = True, + infinite_generator: bool = True, + cache_directory: Optional[str] = None, + wait_for_cache: bool = False, + num_parallel_calls: int = 4, + n_chunks_per_song: float = 2, + ) -> Any: """ TO BE DOCUMENTED. """ @@ -385,7 +536,8 @@ class DatasetBuilder(object): buffer_size=200000, seed=self._random_seed, # useless since it is cached : - reshuffle_each_iteration=True) + reshuffle_each_iteration=True, + ) # Expand audio path. dataset = dataset.map(self.expand_path) # Load waveform, compute spectrogram, and filtering error, @@ -393,11 +545,11 @@ class DatasetBuilder(object): N = num_parallel_calls for instrument in self.instruments: dataset = ( - dataset - .map(instrument.load_waveform, num_parallel_calls=N) + dataset.map(instrument.load_waveform, num_parallel_calls=N) .filter(self.filter_error) .map(instrument.compute_spectrogram, num_parallel_calls=N) - .map(instrument.filter_frequencies)) + .map(instrument.filter_frequencies) + ) dataset = dataset.map(self.filter_waveform) # Convert to uint before caching in order to save space. if convert_to_uint: @@ -428,26 +580,25 @@ class DatasetBuilder(object): # after croping but before converting back to float. if shuffle: dataset = dataset.shuffle( - buffer_size=256, seed=self._random_seed, - reshuffle_each_iteration=True) + buffer_size=256, seed=self._random_seed, reshuffle_each_iteration=True + ) # Convert back to float32 if convert_to_uint: for instrument in self.instruments: dataset = dataset.map( - instrument.convert_to_float32, num_parallel_calls=N) + instrument.convert_to_float32, num_parallel_calls=N + ) M = 8 # Parallel call post caching. # Must be applied with the same factor on mix and vocals. if random_data_augmentation: - dataset = ( - dataset - .map(self.random_time_stretch, num_parallel_calls=M) - .map(self.random_pitch_shift, num_parallel_calls=M)) + dataset = dataset.map(self.random_time_stretch, num_parallel_calls=M).map( + self.random_pitch_shift, num_parallel_calls=M + ) # Filter by shape (remove badly shaped tensors). for instrument in self.instruments: - dataset = ( - dataset - .filter(instrument.filter_shape) - .map(instrument.reshape_spectrogram)) + dataset = dataset.filter(instrument.filter_shape).map( + instrument.reshape_spectrogram + ) # Select features and annotation. dataset = dataset.map(self.map_features) # Make batch (done after selection to avoid diff --git a/spleeter/model/__init__.py b/spleeter/model/__init__.py index 8b8f511..f8fa5d0 100644 --- a/spleeter/model/__init__.py +++ b/spleeter/model/__init__.py @@ -5,17 +5,19 @@ import importlib +# pyright: reportMissingImports=false # pylint: disable=import-error import tensorflow as tf - -from tensorflow.signal import stft, inverse_stft, hann_window -# pylint: enable=import-error +from tensorflow.signal import hann_window, inverse_stft, stft from ..utils.tensor import pad_and_partition, pad_and_reshape -__email__ = 'spleeter@deezer.com' -__author__ = 'Deezer Research' -__license__ = 'MIT License' +# pylint: enable=import-error + + +__email__ = "spleeter@deezer.com" +__author__ = "Deezer Research" +__license__ = "MIT License" placeholder = tf.compat.v1.placeholder @@ -23,29 +25,28 @@ placeholder = tf.compat.v1.placeholder def get_model_function(model_type): """ - Get tensorflow function of the model to be applied to the input tensor. - For instance "unet.softmax_unet" will return the softmax_unet function - in the "unet.py" submodule of the current module (spleeter.model). + Get tensorflow function of the model to be applied to the input tensor. + For instance "unet.softmax_unet" will return the softmax_unet function + in the "unet.py" submodule of the current module (spleeter.model). - Params: - - model_type: str - the relative module path to the model function. + Params: + - model_type: str + the relative module path to the model function. - Returns: - A tensorflow function to be applied to the input tensor to get the - multitrack output. + Returns: + A tensorflow function to be applied to the input tensor to get the + multitrack output. """ - relative_path_to_module = '.'.join(model_type.split('.')[:-1]) - model_name = model_type.split('.')[-1] - main_module = '.'.join((__name__, 'functions')) - path_to_module = f'{main_module}.{relative_path_to_module}' + relative_path_to_module = ".".join(model_type.split(".")[:-1]) + model_name = model_type.split(".")[-1] + main_module = ".".join((__name__, "functions")) + path_to_module = f"{main_module}.{relative_path_to_module}" module = importlib.import_module(path_to_module) model_function = getattr(module, model_name) return model_function class InputProvider(object): - def __init__(self, params): self.params = params @@ -61,16 +62,16 @@ class InputProvider(object): class WaveformInputProvider(InputProvider): - @property def input_names(self): return ["audio_id", "waveform"] def get_input_dict_placeholders(self): - shape = (None, self.params['n_channels']) + shape = (None, self.params["n_channels"]) features = { - 'waveform': placeholder(tf.float32, shape=shape, name="waveform"), - 'audio_id': placeholder(tf.string, name="audio_id")} + "waveform": placeholder(tf.float32, shape=shape, name="waveform"), + "audio_id": placeholder(tf.string, name="audio_id"), + } return features def get_feed_dict(self, features, waveform, audio_id): @@ -78,7 +79,6 @@ class WaveformInputProvider(InputProvider): class SpectralInputProvider(InputProvider): - def __init__(self, params): super().__init__(params) self.stft_input_name = "{}_stft".format(self.params["mix_name"]) @@ -89,11 +89,17 @@ class SpectralInputProvider(InputProvider): def get_input_dict_placeholders(self): features = { - self.stft_input_name: placeholder(tf.complex64, - shape=(None, self.params["frame_length"]//2+1, - self.params['n_channels']), - name=self.stft_input_name), - 'audio_id': placeholder(tf.string, name="audio_id")} + self.stft_input_name: placeholder( + tf.complex64, + shape=( + None, + self.params["frame_length"] // 2 + 1, + self.params["n_channels"], + ), + name=self.stft_input_name, + ), + "audio_id": placeholder(tf.string, name="audio_id"), + } return features def get_feed_dict(self, features, stft, audio_id): @@ -101,11 +107,13 @@ class SpectralInputProvider(InputProvider): class InputProviderFactory(object): - @staticmethod def get(params): stft_backend = params["stft_backend"] - assert stft_backend in ("tensorflow", "librosa"), "Unexpected backend {}".format(stft_backend) + assert stft_backend in ( + "tensorflow", + "librosa", + ), "Unexpected backend {}".format(stft_backend) if stft_backend == "tensorflow": return WaveformInputProvider(params) else: @@ -113,7 +121,7 @@ class InputProviderFactory(object): class EstimatorSpecBuilder(object): - """ A builder class that allows to builds a multitrack unet model + """A builder class that allows to builds a multitrack unet model estimator. The built model estimator has a different behaviour when used in a train/eval mode and in predict mode. @@ -137,22 +145,22 @@ class EstimatorSpecBuilder(object): """ # Supported model functions. - DEFAULT_MODEL = 'unet.unet' + DEFAULT_MODEL = "unet.unet" # Supported loss functions. - L1_MASK = 'L1_mask' - WEIGHTED_L1_MASK = 'weighted_L1_mask' + L1_MASK = "L1_mask" + WEIGHTED_L1_MASK = "weighted_L1_mask" # Supported optimizers. - ADADELTA = 'Adadelta' - SGD = 'SGD' + ADADELTA = "Adadelta" + SGD = "SGD" # Math constants. - WINDOW_COMPENSATION_FACTOR = 2./3. + WINDOW_COMPENSATION_FACTOR = 2.0 / 3.0 EPSILON = 1e-10 def __init__(self, features, params): - """ Default constructor. Depending on built model + """Default constructor. Depending on built model usage, the provided features should be different: * In train/eval mode: features is a dictionary with a @@ -169,20 +177,20 @@ class EstimatorSpecBuilder(object): self._features = features self._params = params # Get instrument name. - self._mix_name = params['mix_name'] - self._instruments = params['instrument_list'] + self._mix_name = params["mix_name"] + self._instruments = params["instrument_list"] # Get STFT/signals parameters - self._n_channels = params['n_channels'] - self._T = params['T'] - self._F = params['F'] - self._frame_length = params['frame_length'] - self._frame_step = params['frame_step'] + self._n_channels = params["n_channels"] + self._T = params["T"] + self._F = params["F"] + self._frame_length = params["frame_length"] + self._frame_step = params["frame_step"] def include_stft_computations(self): return self._params["stft_backend"] == "tensorflow" def _build_model_outputs(self): - """ Created a batch_sizexTxFxn_channels input tensor containing + """Created a batch_sizexTxFxn_channels input tensor containing mix magnitude spectrogram, then an output dict from it according to the selected model in internal parameters. @@ -191,22 +199,21 @@ class EstimatorSpecBuilder(object): """ input_tensor = self.spectrogram_feature - model = self._params.get('model', None) + model = self._params.get("model", None) if model is not None: - model_type = model.get('type', self.DEFAULT_MODEL) + model_type = model.get("type", self.DEFAULT_MODEL) else: model_type = self.DEFAULT_MODEL try: apply_model = get_model_function(model_type) except ModuleNotFoundError: - raise ValueError(f'No model function {model_type} found') + raise ValueError(f"No model function {model_type} found") self._model_outputs = apply_model( - input_tensor, - self._instruments, - self._params['model']['params']) + input_tensor, self._instruments, self._params["model"]["params"] + ) def _build_loss(self, labels): - """ Construct tensorflow loss and metrics + """Construct tensorflow loss and metrics :param output_dict: dictionary of network outputs (key: instrument name, value: estimated spectrogram of the instrument) @@ -215,7 +222,7 @@ class EstimatorSpecBuilder(object): :returns: tensorflow (loss, metrics) tuple. """ output_dict = self.model_outputs - loss_type = self._params.get('loss_type', self.L1_MASK) + loss_type = self._params.get("loss_type", self.L1_MASK) if loss_type == self.L1_MASK: losses = { name: tf.reduce_mean(tf.abs(output - labels[name])) @@ -224,11 +231,9 @@ class EstimatorSpecBuilder(object): elif loss_type == self.WEIGHTED_L1_MASK: losses = { name: tf.reduce_mean( - tf.reduce_mean( - labels[name], - axis=[1, 2, 3], - keep_dims=True) * - tf.abs(output - labels[name])) + tf.reduce_mean(labels[name], axis=[1, 2, 3], keep_dims=True) + * tf.abs(output - labels[name]) + ) for name, output in output_dict.items() } else: @@ -236,20 +241,20 @@ class EstimatorSpecBuilder(object): loss = tf.reduce_sum(list(losses.values())) # Add metrics for monitoring each instrument. metrics = {k: tf.compat.v1.metrics.mean(v) for k, v in losses.items()} - metrics['absolute_difference'] = tf.compat.v1.metrics.mean(loss) + metrics["absolute_difference"] = tf.compat.v1.metrics.mean(loss) return loss, metrics def _build_optimizer(self): - """ Builds an optimizer instance from internal parameter values. + """Builds an optimizer instance from internal parameter values. Default to AdamOptimizer if not specified. :returns: Optimizer instance from internal configuration. """ - name = self._params.get('optimizer') + name = self._params.get("optimizer") if name == self.ADADELTA: return tf.compat.v1.train.AdadeltaOptimizer() - rate = self._params['learning_rate'] + rate = self._params["learning_rate"] if name == self.SGD: return tf.compat.v1.train.GradientDescentOptimizer(rate) return tf.compat.v1.train.AdamOptimizer(rate) @@ -260,15 +265,15 @@ class EstimatorSpecBuilder(object): @property def stft_name(self): - return f'{self._mix_name}_stft' + return f"{self._mix_name}_stft" @property def spectrogram_name(self): - return f'{self._mix_name}_spectrogram' + return f"{self._mix_name}_spectrogram" def _build_stft_feature(self): - """ Compute STFT of waveform and slice the STFT in segment - with the right length to feed the network. + """Compute STFT of waveform and slice the STFT in segment + with the right length to feed the network. """ stft_name = self.stft_name @@ -276,25 +281,30 @@ class EstimatorSpecBuilder(object): if stft_name not in self._features: # pad input with a frame of zeros - waveform = tf.concat([ - tf.zeros((self._frame_length, self._n_channels)), - self._features['waveform'] - ], - 0 - ) + waveform = tf.concat( + [ + tf.zeros((self._frame_length, self._n_channels)), + self._features["waveform"], + ], + 0, + ) stft_feature = tf.transpose( stft( tf.transpose(waveform), self._frame_length, self._frame_step, window_fn=lambda frame_length, dtype: ( - hann_window(frame_length, periodic=True, dtype=dtype)), - pad_end=True), - perm=[1, 2, 0]) - self._features[f'{self._mix_name}_stft'] = stft_feature + hann_window(frame_length, periodic=True, dtype=dtype) + ), + pad_end=True, + ), + perm=[1, 2, 0], + ) + self._features[f"{self._mix_name}_stft"] = stft_feature if spec_name not in self._features: self._features[spec_name] = tf.abs( - pad_and_partition(self._features[stft_name], self._T))[:, :, :self._F, :] + pad_and_partition(self._features[stft_name], self._T) + )[:, :, : self._F, :] @property def model_outputs(self): @@ -333,25 +343,29 @@ class EstimatorSpecBuilder(object): return self._masked_stfts def _inverse_stft(self, stft_t, time_crop=None): - """ Inverse and reshape the given STFT + """Inverse and reshape the given STFT :param stft_t: input STFT :returns: inverse STFT (waveform) """ - inversed = inverse_stft( - tf.transpose(stft_t, perm=[2, 0, 1]), - self._frame_length, - self._frame_step, - window_fn=lambda frame_length, dtype: ( - hann_window(frame_length, periodic=True, dtype=dtype)) - ) * self.WINDOW_COMPENSATION_FACTOR + inversed = ( + inverse_stft( + tf.transpose(stft_t, perm=[2, 0, 1]), + self._frame_length, + self._frame_step, + window_fn=lambda frame_length, dtype: ( + hann_window(frame_length, periodic=True, dtype=dtype) + ), + ) + * self.WINDOW_COMPENSATION_FACTOR + ) reshaped = tf.transpose(inversed) if time_crop is None: - time_crop = tf.shape(self._features['waveform'])[0] - return reshaped[self._frame_length:self._frame_length+time_crop, :] + time_crop = tf.shape(self._features["waveform"])[0] + return reshaped[self._frame_length : self._frame_length + time_crop, :] def _build_mwf_output_waveform(self): - """ Perform separation with multichannel Wiener Filtering using Norbert. + """Perform separation with multichannel Wiener Filtering using Norbert. Note: multichannel Wiener Filtering is not coded in Tensorflow and thus may be quite slow. @@ -359,36 +373,42 @@ class EstimatorSpecBuilder(object): value: estimated waveform of the instrument) """ import norbert # pylint: disable=import-error + output_dict = self.model_outputs x = self.stft_feature v = tf.stack( [ pad_and_reshape( - output_dict[f'{instrument}_spectrogram'], + output_dict[f"{instrument}_spectrogram"], self._frame_length, - self._F)[:tf.shape(x)[0], ...] + self._F, + )[: tf.shape(x)[0], ...] for instrument in self._instruments ], - axis=3) + axis=3, + ) input_args = [v, x] - stft_function = tf.py_function( - lambda v, x: norbert.wiener(v.numpy(), x.numpy()), - input_args, - tf.complex64), + stft_function = ( + tf.py_function( + lambda v, x: norbert.wiener(v.numpy(), x.numpy()), + input_args, + tf.complex64, + ), + ) return { instrument: self._inverse_stft(stft_function[0][:, :, :, k]) for k, instrument in enumerate(self._instruments) } def _extend_mask(self, mask): - """ Extend mask, from reduced number of frequency bin to the number of + """Extend mask, from reduced number of frequency bin to the number of frequency bin in the STFT. :param mask: restricted mask :returns: extended mask :raise ValueError: If invalid mask_extension parameter is set. """ - extension = self._params['mask_extension'] + extension = self._params["mask_extension"] # Extend with average # (dispatch according to energy in the processed band) if extension == "average": @@ -397,13 +417,9 @@ class EstimatorSpecBuilder(object): # (avoid extension artifacts but not conservative separation) elif extension == "zeros": mask_shape = tf.shape(mask) - extension_row = tf.zeros(( - mask_shape[0], - mask_shape[1], - 1, - mask_shape[-1])) + extension_row = tf.zeros((mask_shape[0], mask_shape[1], 1, mask_shape[-1])) else: - raise ValueError(f'Invalid mask_extension parameter {extension}') + raise ValueError(f"Invalid mask_extension parameter {extension}") n_extra_row = self._frame_length // 2 + 1 - self._F extension = tf.tile(extension_row, [1, 1, n_extra_row, 1]) return tf.concat([mask, extension], axis=2) @@ -415,29 +431,31 @@ class EstimatorSpecBuilder(object): """ output_dict = self.model_outputs stft_feature = self.stft_feature - separation_exponent = self._params['separation_exponent'] - output_sum = tf.reduce_sum( - [e ** separation_exponent for e in output_dict.values()], - axis=0 - ) + self.EPSILON + separation_exponent = self._params["separation_exponent"] + output_sum = ( + tf.reduce_sum( + [e ** separation_exponent for e in output_dict.values()], axis=0 + ) + + self.EPSILON + ) out = {} for instrument in self._instruments: - output = output_dict[f'{instrument}_spectrogram'] + output = output_dict[f"{instrument}_spectrogram"] # Compute mask with the model. - instrument_mask = (output ** separation_exponent - + (self.EPSILON / len(output_dict))) / output_sum + instrument_mask = ( + output ** separation_exponent + (self.EPSILON / len(output_dict)) + ) / output_sum # Extend mask; instrument_mask = self._extend_mask(instrument_mask) # Stack back mask. old_shape = tf.shape(instrument_mask) new_shape = tf.concat( - [[old_shape[0] * old_shape[1]], old_shape[2:]], - axis=0) + [[old_shape[0] * old_shape[1]], old_shape[2:]], axis=0 + ) instrument_mask = tf.reshape(instrument_mask, new_shape) # Remove padded part (for mask having the same size as STFT); - instrument_mask = instrument_mask[ - :tf.shape(stft_feature)[0], ...] + instrument_mask = instrument_mask[: tf.shape(stft_feature)[0], ...] out[instrument] = instrument_mask self._masks = out @@ -449,7 +467,7 @@ class EstimatorSpecBuilder(object): self._masked_stfts = out def _build_manual_output_waveform(self, masked_stft): - """ Perform ratio mask separation + """Perform ratio mask separation :param output_dict: dictionary of estimated spectrogram (key: instrument name, value: estimated spectrogram of the instrument) @@ -463,14 +481,14 @@ class EstimatorSpecBuilder(object): return output_waveform def _build_output_waveform(self, masked_stft): - """ Build output waveform from given output dict in order to be used in + """Build output waveform from given output dict in order to be used in prediction context. Regarding of the configuration building method will be using MWF. :returns: Built output waveform. """ - if self._params.get('MWF', False): + if self._params.get("MWF", False): output_waveform = self._build_mwf_output_waveform() else: output_waveform = self._build_manual_output_waveform(masked_stft) @@ -482,11 +500,11 @@ class EstimatorSpecBuilder(object): else: self._outputs = self.masked_stfts - if 'audio_id' in self._features: - self._outputs['audio_id'] = self._features['audio_id'] + if "audio_id" in self._features: + self._outputs["audio_id"] = self._features["audio_id"] def build_predict_model(self): - """ Builder interface for creating model instance that aims to perform + """Builder interface for creating model instance that aims to perform prediction / inference over given track. The output of such estimator will be a dictionary with a "" key per separated instrument , associated to the estimated separated waveform of the instrument. @@ -495,11 +513,11 @@ class EstimatorSpecBuilder(object): """ return tf.estimator.EstimatorSpec( - tf.estimator.ModeKeys.PREDICT, - predictions=self.outputs) + tf.estimator.ModeKeys.PREDICT, predictions=self.outputs + ) def build_evaluation_model(self, labels): - """ Builder interface for creating model instance that aims to perform + """Builder interface for creating model instance that aims to perform model evaluation. The output of such estimator will be a dictionary with a key "_spectrogram" per separated instrument, associated to the estimated separated instrument magnitude spectrogram. @@ -509,12 +527,11 @@ class EstimatorSpecBuilder(object): """ loss, metrics = self._build_loss(labels) return tf.estimator.EstimatorSpec( - tf.estimator.ModeKeys.EVAL, - loss=loss, - eval_metric_ops=metrics) + tf.estimator.ModeKeys.EVAL, loss=loss, eval_metric_ops=metrics + ) def build_train_model(self, labels): - """ Builder interface for creating model instance that aims to perform + """Builder interface for creating model instance that aims to perform model training. The output of such estimator will be a dictionary with a key "_spectrogram" per separated instrument, associated to the estimated separated instrument magnitude spectrogram. @@ -525,8 +542,8 @@ class EstimatorSpecBuilder(object): loss, metrics = self._build_loss(labels) optimizer = self._build_optimizer() train_operation = optimizer.minimize( - loss=loss, - global_step=tf.compat.v1.train.get_global_step()) + loss=loss, global_step=tf.compat.v1.train.get_global_step() + ) return tf.estimator.EstimatorSpec( mode=tf.estimator.ModeKeys.TRAIN, loss=loss, @@ -539,9 +556,9 @@ def model_fn(features, labels, mode, params, config): """ :param features: - :param labels: + :param labels: :param mode: Estimator mode. - :param params: + :param params: :param config: TF configuration (not used). :returns: Built EstimatorSpec. :raise ValueError: If estimator mode is not supported. @@ -553,4 +570,4 @@ def model_fn(features, labels, mode, params, config): return builder.build_evaluation_model(labels) elif mode == tf.estimator.ModeKeys.TRAIN: return builder.build_train_model(labels) - raise ValueError(f'Unknown mode {mode}') + raise ValueError(f"Unknown mode {mode}") diff --git a/spleeter/model/functions/__init__.py b/spleeter/model/functions/__init__.py index 684f923..ddbd3af 100644 --- a/spleeter/model/functions/__init__.py +++ b/spleeter/model/functions/__init__.py @@ -3,25 +3,45 @@ """ This package provide model functions. """ -__email__ = 'spleeter@deezer.com' -__author__ = 'Deezer Research' -__license__ = 'MIT License' +from typing import Callable, Dict, Iterable, Optional + +# pyright: reportMissingImports=false +# pylint: disable=import-error +import tensorflow as tf + +# pylint: enable=import-error + +__email__ = "spleeter@deezer.com" +__author__ = "Deezer Research" +__license__ = "MIT License" -def apply(function, input_tensor, instruments, params={}): - """ Apply given function to the input tensor. - - :param function: Function to be applied to tensor. - :param input_tensor: Tensor to apply blstm to. - :param instruments: Iterable that provides a collection of instruments. - :param params: (Optional) dict of BLSTM parameters. - :returns: Created output tensor dict. +def apply( + function: Callable, + input_tensor: tf.Tensor, + instruments: Iterable[str], + params: Optional[Dict] = None, +) -> Dict: """ - output_dict = {} + Apply given function to the input tensor. + + Parameters: + function: + Function to be applied to tensor. + input_tensor (tensorflow.Tensor): + Tensor to apply blstm to. + instruments (Iterable[str]): + Iterable that provides a collection of instruments. + params: + (Optional) dict of BLSTM parameters. + + Returns: + Created output tensor dict. + """ + output_dict: Dict = {} for instrument in instruments: - out_name = f'{instrument}_spectrogram' + out_name = f"{instrument}_spectrogram" output_dict[out_name] = function( - input_tensor, - output_name=out_name, - params=params) + input_tensor, output_name=out_name, params=params or {} + ) return output_dict diff --git a/spleeter/model/functions/blstm.py b/spleeter/model/functions/blstm.py index b81122b..6dc63bc 100644 --- a/spleeter/model/functions/blstm.py +++ b/spleeter/model/functions/blstm.py @@ -20,7 +20,11 @@ selection (LSTM layer dropout rate, regularization strength). """ +from typing import Dict, Optional + +# pyright: reportMissingImports=false # pylint: disable=import-error +import tensorflow as tf from tensorflow.compat.v1.keras.initializers import he_uniform from tensorflow.compat.v1.keras.layers import CuDNNLSTM from tensorflow.keras.layers import ( @@ -28,34 +32,48 @@ from tensorflow.keras.layers import ( Dense, Flatten, Reshape, - TimeDistributed) -# pylint: enable=import-error + TimeDistributed, +) from . import apply -__email__ = 'spleeter@deezer.com' -__author__ = 'Deezer Research' -__license__ = 'MIT License' +# pylint: enable=import-error + +__email__ = "spleeter@deezer.com" +__author__ = "Deezer Research" +__license__ = "MIT License" -def apply_blstm(input_tensor, output_name='output', params={}): - """ Apply BLSTM to the given input_tensor. - - :param input_tensor: Input of the model. - :param output_name: (Optional) name of the output, default to 'output'. - :param params: (Optional) dict of BLSTM parameters. - :returns: Output tensor. +def apply_blstm( + input_tensor: tf.Tensor, output_name: str = "output", params: Optional[Dict] = None +) -> tf.Tensor: """ - units = params.get('lstm_units', 250) + Apply BLSTM to the given input_tensor. + + Parameters: + input_tensor (tensorflow.Tensor): + Input of the model. + output_name (str): + (Optional) name of the output, default to 'output'. + params (Optional[Dict]): + (Optional) dict of BLSTM parameters. + + Returns: + tensorflow.Tensor: + Output tensor. + """ + if params is None: + params = {} + units: int = params.get("lstm_units", 250) kernel_initializer = he_uniform(seed=50) flatten_input = TimeDistributed(Flatten())((input_tensor)) def create_bidirectional(): return Bidirectional( CuDNNLSTM( - units, - kernel_initializer=kernel_initializer, - return_sequences=True)) + units, kernel_initializer=kernel_initializer, return_sequences=True + ) + ) l1 = create_bidirectional()((flatten_input)) l2 = create_bidirectional()((l1)) @@ -63,14 +81,18 @@ def apply_blstm(input_tensor, output_name='output', params={}): dense = TimeDistributed( Dense( int(flatten_input.shape[2]), - activation='relu', - kernel_initializer=kernel_initializer))((l3)) - output = TimeDistributed( - Reshape(input_tensor.shape[2:]), - name=output_name)(dense) + activation="relu", + kernel_initializer=kernel_initializer, + ) + )((l3)) + output: tf.Tensor = TimeDistributed( + Reshape(input_tensor.shape[2:]), name=output_name + )(dense) return output -def blstm(input_tensor, output_name='output', params={}): +def blstm( + input_tensor: tf.Tensor, output_name: str = "output", params: Optional[Dict] = None +) -> tf.Tensor: """ Model function applier. """ return apply(apply_blstm, input_tensor, output_name, params) diff --git a/spleeter/model/functions/unet.py b/spleeter/model/functions/unet.py index 7f9dbea..9fccb15 100644 --- a/spleeter/model/functions/unet.py +++ b/spleeter/model/functions/unet.py @@ -2,92 +2,109 @@ # coding: utf8 """ -This module contains building functions for U-net source -separation models in a similar way as in A. Jansson et al. "Singing -voice separation with deep u-net convolutional networks", ISMIR 2017. -Each instrument is modeled by a single U-net convolutional -/ deconvolutional network that take a mix spectrogram as input and the -estimated sound spectrogram as output. + This module contains building functions for U-net source + separation models in a similar way as in A. Jansson et al. : + + "Singing voice separation with deep u-net convolutional networks", + ISMIR 2017 + + Each instrument is modeled by a single U-net + convolutional / deconvolutional network that take a mix spectrogram + as input and the estimated sound spectrogram as output. """ from functools import partial +from typing import Any, Dict, Iterable, Optional +# pyright: reportMissingImports=false # pylint: disable=import-error import tensorflow as tf - +from tensorflow.compat.v1 import logging +from tensorflow.compat.v1.keras.initializers import he_uniform from tensorflow.keras.layers import ( + ELU, BatchNormalization, Concatenate, Conv2D, Conv2DTranspose, Dropout, - ELU, LeakyReLU, Multiply, ReLU, - Softmax) -from tensorflow.compat.v1 import logging -from tensorflow.compat.v1.keras.initializers import he_uniform -# pylint: enable=import-error + Softmax, +) from . import apply -__email__ = 'spleeter@deezer.com' -__author__ = 'Deezer Research' -__license__ = 'MIT License' +# pylint: enable=import-error + +__email__ = "spleeter@deezer.com" +__author__ = "Deezer Research" +__license__ = "MIT License" -def _get_conv_activation_layer(params): +def _get_conv_activation_layer(params: Dict) -> Any: """ + > To be documented. - :param params: - :returns: Required Activation function. + Parameters: + params (Dict): + + Returns: + Any: + Required Activation function. """ - conv_activation = params.get('conv_activation') - if conv_activation == 'ReLU': + conv_activation: str = params.get("conv_activation") + if conv_activation == "ReLU": return ReLU() - elif conv_activation == 'ELU': + elif conv_activation == "ELU": return ELU() return LeakyReLU(0.2) -def _get_deconv_activation_layer(params): +def _get_deconv_activation_layer(params: Dict) -> Any: """ + > To be documented. - :param params: - :returns: Required Activation function. + Parameters: + params (Dict): + + Returns: + Any: + Required Activation function. """ - deconv_activation = params.get('deconv_activation') - if deconv_activation == 'LeakyReLU': + deconv_activation: str = params.get("deconv_activation") + if deconv_activation == "LeakyReLU": return LeakyReLU(0.2) - elif deconv_activation == 'ELU': + elif deconv_activation == "ELU": return ELU() return ReLU() def apply_unet( - input_tensor, - output_name='output', - params={}, - output_mask_logit=False): - """ Apply a convolutionnal U-net to model a single instrument (one U-net + input_tensor: tf.Tensor, + output_name: str = "output", + params: Optional[Dict] = None, + output_mask_logit: bool = False, +) -> Any: + """ + Apply a convolutionnal U-net to model a single instrument (one U-net is used for each instrument). - :param input_tensor: - :param output_name: (Optional) , default to 'output' - :param params: (Optional) , default to empty dict. - :param output_mask_logit: (Optional) , default to False. + Parameters: + input_tensor (tensorflow.Tensor): + output_name (str): + params (Optional[Dict]): + output_mask_logit (bool): """ - logging.info(f'Apply unet for {output_name}') - conv_n_filters = params.get('conv_n_filters', [16, 32, 64, 128, 256, 512]) + logging.info(f"Apply unet for {output_name}") + conv_n_filters = params.get("conv_n_filters", [16, 32, 64, 128, 256, 512]) conv_activation_layer = _get_conv_activation_layer(params) deconv_activation_layer = _get_deconv_activation_layer(params) kernel_initializer = he_uniform(seed=50) conv2d_factory = partial( - Conv2D, - strides=(2, 2), - padding='same', - kernel_initializer=kernel_initializer) + Conv2D, strides=(2, 2), padding="same", kernel_initializer=kernel_initializer + ) # First layer. conv1 = conv2d_factory(conv_n_filters[0], (5, 5))(input_tensor) batch1 = BatchNormalization(axis=-1)(conv1) @@ -117,8 +134,9 @@ def apply_unet( conv2d_transpose_factory = partial( Conv2DTranspose, strides=(2, 2), - padding='same', - kernel_initializer=kernel_initializer) + padding="same", + kernel_initializer=kernel_initializer, + ) # up1 = conv2d_transpose_factory(conv_n_filters[4], (5, 5))((conv6)) up1 = deconv_activation_layer(up1) @@ -157,46 +175,60 @@ def apply_unet( 2, (4, 4), dilation_rate=(2, 2), - activation='sigmoid', - padding='same', - kernel_initializer=kernel_initializer)((batch12)) + activation="sigmoid", + padding="same", + kernel_initializer=kernel_initializer, + )((batch12)) output = Multiply(name=output_name)([up7, input_tensor]) return output return Conv2D( 2, (4, 4), dilation_rate=(2, 2), - padding='same', - kernel_initializer=kernel_initializer)((batch12)) + padding="same", + kernel_initializer=kernel_initializer, + )((batch12)) -def unet(input_tensor, instruments, params={}): +def unet( + input_tensor: tf.Tensor, instruments: Iterable[str], params: Optional[Dict] = None +) -> Dict: """ Model function applier. """ return apply(apply_unet, input_tensor, instruments, params) -def softmax_unet(input_tensor, instruments, params={}): - """ Apply softmax to multitrack unet in order to have mask suming to one. +def softmax_unet( + input_tensor: tf.Tensor, instruments: Iterable[str], params: Optional[Dict] = None +) -> Dict: + """ + Apply softmax to multitrack unet in order to have mask suming to one. - :param input_tensor: Tensor to apply blstm to. - :param instruments: Iterable that provides a collection of instruments. - :param params: (Optional) dict of BLSTM parameters. - :returns: Created output tensor dict. + Parameters: + input_tensor (tensorflow.Tensor): + Tensor to apply blstm to. + instruments (Iterable[str]): + Iterable that provides a collection of instruments. + params (Optional[Dict]): + (Optional) dict of BLSTM parameters. + + Returns: + Dict: + Created output tensor dict. """ logit_mask_list = [] for instrument in instruments: - out_name = f'{instrument}_spectrogram' + out_name = f"{instrument}_spectrogram" logit_mask_list.append( apply_unet( input_tensor, output_name=out_name, params=params, - output_mask_logit=True)) + output_mask_logit=True, + ) + ) masks = Softmax(axis=4)(tf.stack(logit_mask_list, axis=4)) output_dict = {} for i, instrument in enumerate(instruments): - out_name = f'{instrument}_spectrogram' - output_dict[out_name] = Multiply(name=out_name)([ - masks[..., i], - input_tensor]) + out_name = f"{instrument}_spectrogram" + output_dict[out_name] = Multiply(name=out_name)([masks[..., i], input_tensor]) return output_dict diff --git a/spleeter/model/provider/__init__.py b/spleeter/model/provider/__init__.py index 3921907..ab446ad 100644 --- a/spleeter/model/provider/__init__.py +++ b/spleeter/model/provider/__init__.py @@ -5,77 +5,91 @@ This package provides tools for downloading model from network using remote storage abstraction. - :Example: + Examples: + ```python >>> provider = MyProviderImplementation() >>> provider.get('/path/to/local/storage', params) + ``` """ from abc import ABC, abstractmethod from os import environ, makedirs from os.path import exists, isabs, join, sep -__email__ = 'spleeter@deezer.com' -__author__ = 'Deezer Research' -__license__ = 'MIT License' +__email__ = "spleeter@deezer.com" +__author__ = "Deezer Research" +__license__ = "MIT License" class ModelProvider(ABC): """ - A ModelProvider manages model files on disk and - file download is not available. + A ModelProvider manages model files on disk and + file download is not available. """ - DEFAULT_MODEL_PATH = environ.get('MODEL_PATH', 'pretrained_models') - MODEL_PROBE_PATH = '.probe' + DEFAULT_MODEL_PATH: str = environ.get("MODEL_PATH", "pretrained_models") + MODEL_PROBE_PATH: str = ".probe" @abstractmethod - def download(self, name, path): - """ Download model denoted by the given name to disk. + def download(_, name: str, path: str) -> None: + """ + Download model denoted by the given name to disk. - :param name: Name of the model to download. - :param path: Path of the directory to save model into. + Parameters: + name (str): + Name of the model to download. + path (str): + Path of the directory to save model into. """ pass @staticmethod - def writeProbe(directory): - """ Write a model probe file into the given directory. - - :param directory: Directory to write probe into. + def writeProbe(directory: str) -> None: """ - probe = join(directory, ModelProvider.MODEL_PROBE_PATH) - with open(probe, 'w') as stream: - stream.write('OK') + Write a model probe file into the given directory. - def get(self, model_directory): - """ Ensures required model is available at given location. + Parameters: + directory (str): + Directory to write probe into. + """ + probe: str = join(directory, ModelProvider.MODEL_PROBE_PATH) + with open(probe, "w") as stream: + stream.write("OK") - :param model_directory: Expected model_directory to be available. - :raise IOError: If model can not be retrieved. + def get(self, model_directory: str) -> str: + """ + Ensures required model is available at given location. + + Parameters: + model_directory (str): + Expected model_directory to be available. + + Raises: + IOError: + If model can not be retrieved. """ # Expend model directory if needed. if not isabs(model_directory): model_directory = join(self.DEFAULT_MODEL_PATH, model_directory) # Download it if not exists. - model_probe = join(model_directory, self.MODEL_PROBE_PATH) + model_probe: str = join(model_directory, self.MODEL_PROBE_PATH) if not exists(model_probe): if not exists(model_directory): makedirs(model_directory) - self.download( - model_directory.split(sep)[-1], - model_directory) + self.download(model_directory.split(sep)[-1], model_directory) self.writeProbe(model_directory) return model_directory + @classmethod + def default(_: type) -> "ModelProvider": + """ + Builds and returns a default model provider. -def get_default_model_provider(): - """ Builds and returns a default model provider. + Returns: + ModelProvider: + A default model provider instance to use. + """ + from .github import GithubModelProvider - :returns: A default model provider instance to use. - """ - from .github import GithubModelProvider - host = environ.get('GITHUB_HOST', 'https://github.com') - repository = environ.get('GITHUB_REPOSITORY', 'deezer/spleeter') - release = environ.get('GITHUB_RELEASE', GithubModelProvider.LATEST_RELEASE) - return GithubModelProvider(host, repository, release) + return GithubModelProvider.from_environ() diff --git a/spleeter/model/provider/github.py b/spleeter/model/provider/github.py index 65a10b4..3a9b190 100644 --- a/spleeter/model/provider/github.py +++ b/spleeter/model/provider/github.py @@ -4,41 +4,48 @@ """ A ModelProvider backed by Github Release feature. - :Example: + Examples: + ```python >>> from spleeter.model.provider import github >>> provider = github.GithubModelProvider( 'github.com', 'Deezer/spleeter', 'latest') >>> provider.download('2stems', '/path/to/local/storage') + ``` """ import hashlib -import tarfile import os - +import tarfile +from os import environ from tempfile import NamedTemporaryFile +from typing import Dict -import requests +# pyright: reportMissingImports=false +# pylint: disable=import-error +import httpx +from ...utils.logging import logger from . import ModelProvider -from ...utils.logging import get_logger -__email__ = 'spleeter@deezer.com' -__author__ = 'Deezer Research' -__license__ = 'MIT License' +# pylint: enable=import-error + +__email__ = "spleeter@deezer.com" +__author__ = "Deezer Research" +__license__ = "MIT License" def compute_file_checksum(path): - """ Computes given path file sha256. + """Computes given path file sha256. :param path: Path of the file to compute checksum for. :returns: File checksum. """ sha256 = hashlib.sha256() - with open(path, 'rb') as stream: - for chunk in iter(lambda: stream.read(4096), b''): + with open(path, "rb") as stream: + for chunk in iter(lambda: stream.read(4096), b""): sha256.update(chunk) return sha256.hexdigest() @@ -46,69 +53,104 @@ def compute_file_checksum(path): class GithubModelProvider(ModelProvider): """ A ModelProvider implementation backed on Github for remote storage. """ - LATEST_RELEASE = 'v1.4.0' - RELEASE_PATH = 'releases/download' - CHECKSUM_INDEX = 'checksum.json' + DEFAULT_HOST: str = "https://github.com" + DEFAULT_REPOSITORY: str = "deezer/spleeter" - def __init__(self, host, repository, release): - """ Default constructor. + CHECKSUM_INDEX: str = "checksum.json" + LATEST_RELEASE: str = "v1.4.0" + RELEASE_PATH: str = "releases/download" - :param host: Host to the Github instance to reach. - :param repository: Repository path within target Github. - :param release: Release name to get models from. + def __init__(self, host: str, repository: str, release: str) -> None: + """Default constructor. + + Parameters: + host (str): + Host to the Github instance to reach. + repository (str): + Repository path within target Github. + release (str): + Release name to get models from. """ - self._host = host - self._repository = repository - self._release = release + self._host: str = host + self._repository: str = repository + self._release: str = release - def checksum(self, name): - """ Downloads and returns reference checksum for the given model name. - - :param name: Name of the model to get checksum for. - :returns: Checksum of the required model. - :raise ValueError: If the given model name is not indexed. + @classmethod + def from_environ(cls: type) -> "GithubModelProvider": """ - url = '{}/{}/{}/{}/{}'.format( - self._host, - self._repository, - self.RELEASE_PATH, - self._release, - self.CHECKSUM_INDEX) - response = requests.get(url) + Factory method that creates provider from envvars. + + Returns: + GithubModelProvider: + Created instance. + """ + return cls( + environ.get("GITHUB_HOST", cls.DEFAULT_HOST), + environ.get("GITHUB_REPOSITORY", cls.DEFAULT_REPOSITORY), + environ.get("GITHUB_RELEASE", cls.LATEST_RELEASE), + ) + + def checksum(self, name: str) -> str: + """ + Downloads and returns reference checksum for the given model name. + + Parameters: + name (str): + Name of the model to get checksum for. + Returns: + str: + Checksum of the required model. + + Raises: + ValueError: + If the given model name is not indexed. + """ + url: str = "/".join( + ( + self._host, + self._repository, + self.RELEASE_PATH, + self._release, + self.CHECKSUM_INDEX, + ) + ) + response: httpx.Response = httpx.get(url) response.raise_for_status() - index = response.json() + index: Dict = response.json() if name not in index: - raise ValueError('No checksum for model {}'.format(name)) + raise ValueError(f"No checksum for model {name}") return index[name] - def download(self, name, path): - """ Download model denoted by the given name to disk. - - :param name: Name of the model to download. - :param path: Path of the directory to save model into. + def download(self, name: str, path: str) -> None: """ - url = '{}/{}/{}/{}/{}.tar.gz'.format( - self._host, - self._repository, - self.RELEASE_PATH, - self._release, - name) - get_logger().info('Downloading model archive %s', url) - with requests.get(url, stream=True) as response: - response.raise_for_status() - archive = NamedTemporaryFile(delete=False) - try: - with archive as stream: - # Note: check for chunk size parameters ? - for chunk in response.iter_content(chunk_size=8192): - if chunk: + Download model denoted by the given name to disk. + + Parameters: + name (str): + Name of the model to download. + path (str): + Path of the directory to save model into. + """ + url: str = "/".join( + (self._host, self._repository, self.RELEASE_PATH, self._release, name) + ) + url = f"{url}.tar.gz" + logger.info(f"Downloading model archive {url}") + with httpx.Client(http2=True) as client: + with client.stream("GET", url) as response: + response.raise_for_status() + archive = NamedTemporaryFile(delete=False) + try: + with archive as stream: + for chunk in response.iter_raw(): stream.write(chunk) - get_logger().info('Validating archive checksum') - if compute_file_checksum(archive.name) != self.checksum(name): - raise IOError('Downloaded file is corrupted, please retry') - get_logger().info('Extracting downloaded %s archive', name) - with tarfile.open(name=archive.name) as tar: - tar.extractall(path=path) - finally: - os.unlink(archive.name) - get_logger().info('%s model file(s) extracted', name) + logger.info("Validating archive checksum") + checksum: str = compute_file_checksum(archive.name) + if checksum != self.checksum(name): + raise IOError("Downloaded file is corrupted, please retry") + logger.info(f"Extracting downloaded {name} archive") + with tarfile.open(name=archive.name) as tar: + tar.extractall(path=path) + finally: + os.unlink(archive.name) + logger.info(f"{name} model file(s) extracted") diff --git a/spleeter/options.py b/spleeter/options.py new file mode 100644 index 0000000..caf6781 --- /dev/null +++ b/spleeter/options.py @@ -0,0 +1,128 @@ +#!/usr/bin/env python +# coding: utf8 + +""" This modules provides spleeter command as well as CLI parsing methods. """ + +from os.path import join +from tempfile import gettempdir + +from typer import Argument, Option +from typer.models import ArgumentInfo, OptionInfo + +from .audio import Codec, STFTBackend + +__email__ = "spleeter@deezer.com" +__author__ = "Deezer Research" +__license__ = "MIT License" + +AudioInputArgument: ArgumentInfo = Argument( + ..., + help="List of input audio file path", + exists=True, + file_okay=True, + dir_okay=False, + readable=True, + resolve_path=True, +) + +AudioInputOption: OptionInfo = Option( + None, "--inputs", "-i", help="(DEPRECATED) placeholder for deprecated input option" +) + +AudioAdapterOption: OptionInfo = Option( + "spleeter.audio.ffmpeg.FFMPEGProcessAudioAdapter", + "--adapter", + "-a", + help="Name of the audio adapter to use for audio I/O", +) + +AudioOutputOption: OptionInfo = Option( + join(gettempdir(), "separated_audio"), + "--output_path", + "-o", + help="Path of the output directory to write audio files in", +) + +AudioOffsetOption: OptionInfo = Option( + 0.0, "--offset", "-s", help="Set the starting offset to separate audio from" +) + +AudioDurationOption: OptionInfo = Option( + 600.0, + "--duration", + "-d", + help=( + "Set a maximum duration for processing audio " + "(only separate offset + duration first seconds of " + "the input file)" + ), +) + +AudioSTFTBackendOption: OptionInfo = Option( + STFTBackend.AUTO, + "--stft-backend", + "-B", + case_sensitive=False, + help=( + "Who should be in charge of computing the stfts. Librosa is faster " + 'than tensorflow on CPU and uses less memory. "auto" will use ' + "tensorflow when GPU acceleration is available and librosa when not" + ), +) + +AudioCodecOption: OptionInfo = Option( + Codec.WAV, "--codec", "-c", help="Audio codec to be used for the separated output" +) + +AudioBitrateOption: OptionInfo = Option( + "128k", "--bitrate", "-b", help="Audio bitrate to be used for the separated output" +) + +FilenameFormatOption: OptionInfo = Option( + "{filename}/{instrument}.{codec}", + "--filename_format", + "-f", + help=( + "Template string that will be formatted to generated" + "output filename. Such template should be Python formattable" + "string, and could use {filename}, {instrument}, and {codec}" + "variables" + ), +) + +ModelParametersOption: OptionInfo = Option( + "spleeter:2stems", + "--params_filename", + "-p", + help="JSON filename that contains params", +) + + +MWFOption: OptionInfo = Option( + False, "--mwf", help="Whether to use multichannel Wiener filtering for separation" +) + +MUSDBDirectoryOption: OptionInfo = Option( + ..., + "--mus_dir", + exists=True, + dir_okay=True, + file_okay=False, + readable=True, + resolve_path=True, + help="Path to musDB dataset directory", +) + +TrainingDataDirectoryOption: OptionInfo = Option( + ..., + "--data", + "-d", + exists=True, + dir_okay=True, + file_okay=False, + readable=True, + resolve_path=True, + help="Path of the folder containing audio data for training", +) + +VerboseOption: OptionInfo = Option(False, "--verbose", help="Enable verbose logs") diff --git a/spleeter/py.typed b/spleeter/py.typed new file mode 100644 index 0000000..e69de29 diff --git a/spleeter/resources/__init__.py b/spleeter/resources/__init__.py index 01939fb..f3c9084 100644 --- a/spleeter/resources/__init__.py +++ b/spleeter/resources/__init__.py @@ -3,6 +3,6 @@ """ Packages that provides static resources file for the library. """ -__email__ = 'spleeter@deezer.com' -__author__ = 'Deezer Research' -__license__ = 'MIT License' +__email__ = "spleeter@deezer.com" +__author__ = "Deezer Research" +__license__ = "MIT License" diff --git a/spleeter/separator.py b/spleeter/separator.py index 131bdac..0c90ece 100644 --- a/spleeter/separator.py +++ b/spleeter/separator.py @@ -4,60 +4,63 @@ """ Module that provides a class wrapper for source separation. - :Example: + Examples: + ```python >>> from spleeter.separator import Separator >>> separator = Separator('spleeter:2stems') >>> separator.separate(waveform, lambda instrument, data: ...) >>> separator.separate_to_file(...) + ``` """ import atexit import os -import logging - from multiprocessing import Pool -from os.path import basename, join, splitext, dirname -from time import time -from typing import Container, NoReturn +from os.path import basename, dirname, join, splitext +from typing import Dict, Generator, Optional +# pyright: reportMissingImports=false +# pylint: disable=import-error import numpy as np import tensorflow as tf - -from librosa.core import stft, istft +from librosa.core import istft, stft from scipy.signal.windows import hann +from spleeter.model.provider import ModelProvider + from . import SpleeterError -from .audio.adapter import get_default_audio_adapter +from .audio import Codec, STFTBackend +from .audio.adapter import AudioAdapter from .audio.convertor import to_stereo +from .model import EstimatorSpecBuilder, InputProviderFactory, model_fn +from .model.provider import ModelProvider +from .types import AudioDescriptor from .utils.configuration import load_configuration -from .utils.estimator import create_estimator, get_default_model_dir -from .model import EstimatorSpecBuilder, InputProviderFactory -__email__ = 'spleeter@deezer.com' -__author__ = 'Deezer Research' -__license__ = 'MIT License' +# pylint: enable=import-error -SUPPORTED_BACKEND: Container[str] = ('auto', 'tensorflow', 'librosa') -""" """ +__email__ = "spleeter@deezer.com" +__author__ = "Deezer Research" +__license__ = "MIT License" -class DataGenerator(): +class DataGenerator(object): """ - Generator object that store a sample and generate it once while called. - Used to feed a tensorflow estimator without knowing the whole data at - build time. + Generator object that store a sample and generate it once while called. + Used to feed a tensorflow estimator without knowing the whole data at + build time. """ - def __init__(self): + def __init__(self) -> None: """ Default constructor. """ self._current_data = None - def update_data(self, data): + def update_data(self, data) -> None: """ Replace internal data. """ self._current_data = data - def __call__(self): + def __call__(self) -> Generator: """ Generation process. """ buffer = self._current_data while buffer: @@ -65,34 +68,52 @@ class DataGenerator(): buffer = self._current_data -def get_backend(backend: str) -> str: +def create_estimator(params, MWF): """ + Initialize tensorflow estimator that will perform separation + + Params: + - params: a dictionary of parameters for building the model + + Returns: + a tensorflow estimator """ - if backend not in SUPPORTED_BACKEND: - raise ValueError(f'Unsupported backend {backend}') - if backend == 'auto': - if len(tf.config.list_physical_devices('GPU')): - return 'tensorflow' - return 'librosa' - return backend + # Load model. + provider: ModelProvider = ModelProvider.default() + params["model_dir"] = provider.get(params["model_dir"]) + params["MWF"] = MWF + # Setup config + session_config = tf.compat.v1.ConfigProto() + session_config.gpu_options.per_process_gpu_memory_fraction = 0.7 + config = tf.estimator.RunConfig(session_config=session_config) + # Setup estimator + estimator = tf.estimator.Estimator( + model_fn=model_fn, model_dir=params["model_dir"], params=params, config=config + ) + return estimator class Separator(object): """ A wrapper class for performing separation. """ def __init__( - self, - params_descriptor, - MWF: bool = False, - stft_backend: str = 'auto', - multiprocess: bool = True): - """ Default constructor. + self, + params_descriptor: str, + MWF: bool = False, + stft_backend: STFTBackend = STFTBackend.AUTO, + multiprocess: bool = True, + ) -> None: + """ + Default constructor. - :param params_descriptor: Descriptor for TF params to be used. - :param MWF: (Optional) True if MWF should be used, False otherwise. + Parameters: + params_descriptor (str): + Descriptor for TF params to be used. + MWF (bool): + (Optional) `True` if MWF should be used, `False` otherwise. """ self._params = load_configuration(params_descriptor) - self._sample_rate = self._params['sample_rate'] + self._sample_rate = self._params["sample_rate"] self._MWF = MWF self._tf_graph = tf.Graph() self._prediction_generator = None @@ -106,19 +127,21 @@ class Separator(object): else: self._pool = None self._tasks = [] - self._params['stft_backend'] = get_backend(stft_backend) + self._params["stft_backend"] = STFTBackend.resolve(stft_backend) self._data_generator = DataGenerator() - def __del__(self): - """ """ + def __del__(self) -> None: if self._session: self._session.close() - def _get_prediction_generator(self): - """ Lazy loading access method for internal prediction generator + def _get_prediction_generator(self) -> Generator: + """ + Lazy loading access method for internal prediction generator returned by the predict method of a tensorflow estimator. - :returns: generator of prediction. + Returns: + Generator: + Generator of prediction. """ if self._prediction_generator is None: estimator = create_estimator(self._params, self._MWF) @@ -126,82 +149,74 @@ class Separator(object): def get_dataset(): return tf.data.Dataset.from_generator( self._data_generator, - output_types={ - 'waveform': tf.float32, - 'audio_id': tf.string}, - output_shapes={ - 'waveform': (None, 2), - 'audio_id': ()}) + output_types={"waveform": tf.float32, "audio_id": tf.string}, + output_shapes={"waveform": (None, 2), "audio_id": ()}, + ) self._prediction_generator = estimator.predict( - get_dataset, - yield_single_examples=False) + get_dataset, yield_single_examples=False + ) return self._prediction_generator - def join(self, timeout: int = 200) -> NoReturn: - """ Wait for all pending tasks to be finished. + def join(self, timeout: int = 200) -> None: + """ + Wait for all pending tasks to be finished. - :param timeout: (Optional) task waiting timeout. + Parameters: + timeout (int): + (Optional) task waiting timeout. """ while len(self._tasks) > 0: task = self._tasks.pop() task.get() task.wait(timeout=timeout) - def _separate_tensorflow(self, waveform: np.ndarray, audio_descriptor): - """ Performs source separation over the given waveform with tensorflow - backend. - - :param waveform: Waveform to apply separation on. - :returns: Separated waveforms. + def _stft( + self, data: np.ndarray, inverse: bool = False, length: Optional[int] = None + ) -> np.ndarray: """ - if not waveform.shape[-1] == 2: - waveform = to_stereo(waveform) - prediction_generator = self._get_prediction_generator() - # NOTE: update data in generator before performing separation. - self._data_generator.update_data({ - 'waveform': waveform, - 'audio_id': np.array(audio_descriptor)}) - # NOTE: perform separation. - prediction = next(prediction_generator) - prediction.pop('audio_id') - return prediction - - def _stft(self, data, inverse: bool = False, length=None): - """ Single entrypoint for both stft and istft. This computes stft and + Single entrypoint for both stft and istft. This computes stft and istft with librosa on stereo data. The two channels are processed - separately and are concatenated together in the result. The expected - input formats are: (n_samples, 2) for stft and (T, F, 2) for istft. + separately and are concatenated together in the result. The + expected input formats are: (n_samples, 2) for stft and (T, F, 2) + for istft. - :param data: np.array with either the waveform or the complex - spectrogram depending on the parameter inverse - :param inverse: should a stft or an istft be computed. - :returns: Stereo data as numpy array for the transform. - The channels are stored in the last dimension. + Parameters: + data (numpy.array): + Array with either the waveform or the complex spectrogram + depending on the parameter inverse + inverse (bool): + (Optional) Should a stft or an istft be computed. + length (Optional[int]): + + Returns: + numpy.ndarray: + Stereo data as numpy array for the transform. The channels + are stored in the last dimension. """ assert not (inverse and length is None) data = np.asfortranarray(data) - N = self._params['frame_length'] - H = self._params['frame_step'] + N = self._params["frame_length"] + H = self._params["frame_step"] win = hann(N, sym=False) fstft = istft if inverse else stft - win_len_arg = { - 'win_length': None, - 'length': None} if inverse else {'n_fft': N} + win_len_arg = {"win_length": None, "length": None} if inverse else {"n_fft": N} n_channels = data.shape[-1] out = [] for c in range(n_channels): - d = np.concatenate( - (np.zeros((N, )), data[:, c], np.zeros((N, ))) - ) if not inverse else data[:, :, c].T + d = ( + np.concatenate((np.zeros((N,)), data[:, c], np.zeros((N,)))) + if not inverse + else data[:, :, c].T + ) s = fstft(d, hop_length=H, window=win, center=False, **win_len_arg) if inverse: - s = s[N:N+length] - s = np.expand_dims(s.T, 2-inverse) + s = s[N : N + length] + s = np.expand_dims(s.T, 2 - inverse) out.append(s) if len(out) == 1: return out[0] - return np.concatenate(out, axis=2-inverse) + return np.concatenate(out, axis=2 - inverse) def _get_input_provider(self): if self._input_provider is None: @@ -216,22 +231,29 @@ class Separator(object): def _get_builder(self): if self._builder is None: - self._builder = EstimatorSpecBuilder( - self._get_features(), - self._params) + self._builder = EstimatorSpecBuilder(self._get_features(), self._params) return self._builder def _get_session(self): if self._session is None: saver = tf.compat.v1.train.Saver() - latest_checkpoint = tf.train.latest_checkpoint( - get_default_model_dir(self._params['model_dir'])) + provider = ModelProvider.default() + model_directory: str = provider.get(self._params["model_dir"]) + latest_checkpoint = tf.train.latest_checkpoint(model_directory) self._session = tf.compat.v1.Session() saver.restore(self._session, latest_checkpoint) return self._session - def _separate_librosa(self, waveform: np.ndarray, audio_id): - """ Performs separation with librosa backend for STFT. + def _separate_librosa( + self, waveform: np.ndarray, audio_descriptor: AudioDescriptor + ) -> Dict: + """ + Performs separation with librosa backend for STFT. + + Parameters: + waveform (numpy.ndarray): + Waveform to be separated (as a numpy array) + audio_descriptor (AudioDescriptor): """ with self._tf_graph.as_default(): out = {} @@ -248,65 +270,115 @@ class Separator(object): outputs = sess.run( outputs, feed_dict=self._get_input_provider().get_feed_dict( - features, - stft, - audio_id)) + features, stft, audio_descriptor + ), + ) for inst in self._get_builder().instruments: out[inst] = self._stft( - outputs[inst], - inverse=True, - length=waveform.shape[0]) + outputs[inst], inverse=True, length=waveform.shape[0] + ) return out - def separate(self, waveform: np.ndarray, audio_descriptor=''): - """ Performs separation on a waveform. - - :param waveform: Waveform to be separated (as a numpy array) - :param audio_descriptor: (Optional) string describing the waveform - (e.g. filename). + def _separate_tensorflow( + self, waveform: np.ndarray, audio_descriptor: AudioDescriptor + ) -> Dict: """ - if self._params['stft_backend'] == 'tensorflow': + Performs source separation over the given waveform with tensorflow + backend. + + Parameters: + waveform (numpy.ndarray): + Waveform to be separated (as a numpy array) + audio_descriptor (AudioDescriptor): + + Returns: + Separated waveforms. + """ + if not waveform.shape[-1] == 2: + waveform = to_stereo(waveform) + prediction_generator = self._get_prediction_generator() + # NOTE: update data in generator before performing separation. + self._data_generator.update_data( + {"waveform": waveform, "audio_id": np.array(audio_descriptor)} + ) + # NOTE: perform separation. + prediction = next(prediction_generator) + prediction.pop("audio_id") + return prediction + + def separate( + self, waveform: np.ndarray, audio_descriptor: Optional[str] = None + ) -> None: + """ + Performs separation on a waveform. + + Parameters: + waveform (numpy.ndarray): + Waveform to be separated (as a numpy array) + audio_descriptor (str): + (Optional) string describing the waveform (e.g. filename). + """ + backend: str = self._params["stft_backend"] + if backend == STFTBackend.TENSORFLOW: return self._separate_tensorflow(waveform, audio_descriptor) - else: + elif backend == STFTBackend.LIBROSA: return self._separate_librosa(waveform, audio_descriptor) + raise ValueError(f"Unsupported STFT backend {backend}") def separate_to_file( - self, - audio_descriptor, - destination, - audio_adapter=get_default_audio_adapter(), - offset=0, - duration=600., - codec='wav', - bitrate='128k', - filename_format='{filename}/{instrument}.{codec}', - synchronous=True): - """ Performs source separation and export result to file using + self, + audio_descriptor: AudioDescriptor, + destination: str, + audio_adapter: Optional[AudioAdapter] = None, + offset: int = 0, + duration: float = 600.0, + codec: Codec = Codec.WAV, + bitrate: str = "128k", + filename_format: str = "{filename}/{instrument}.{codec}", + synchronous: bool = True, + ) -> None: + """ + Performs source separation and export result to file using given audio adapter. - Filename format should be a Python formattable string that could use - following parameters : {instrument}, {filename}, {foldername} and - {codec}. + Filename format should be a Python formattable string that could + use following parameters : - :param audio_descriptor: Describe song to separate, used by audio - adapter to retrieve and load audio data, - in case of file based audio adapter, such - descriptor would be a file path. - :param destination: Target directory to write output to. - :param audio_adapter: (Optional) Audio adapter to use for I/O. - :param offset: (Optional) Offset of loaded song. - :param duration: (Optional) Duration of loaded song - (default: 600s). - :param codec: (Optional) Export codec. - :param bitrate: (Optional) Export bitrate. - :param filename_format: (Optional) Filename format. - :param synchronous: (Optional) True is should by synchronous. + - {instrument} + - {filename} + - {foldername} + - {codec}. + + Parameters: + audio_descriptor (AudioDescriptor): + Describe song to separate, used by audio adapter to + retrieve and load audio data, in case of file based + audio adapter, such descriptor would be a file path. + destination (str): + Target directory to write output to. + audio_adapter (Optional[AudioAdapter]): + (Optional) Audio adapter to use for I/O. + offset (int): + (Optional) Offset of loaded song. + duration (float): + (Optional) Duration of loaded song (default: 600s). + codec (Codec): + (Optional) Export codec. + bitrate (str): + (Optional) Export bitrate. + filename_format (str): + (Optional) Filename format. + synchronous (bool): + (Optional) True is should by synchronous. """ - waveform, sample_rate = audio_adapter.load( + if audio_adapter is None: + audio_adapter = AudioAdapter.default() + waveform, _ = audio_adapter.load( audio_descriptor, offset=offset, duration=duration, - sample_rate=self._sample_rate) + sample_rate=self._sample_rate, + ) sources = self.separate(waveform, audio_descriptor) self.save_to_file( sources, @@ -316,69 +388,78 @@ class Separator(object): codec, audio_adapter, bitrate, - synchronous) + synchronous, + ) def save_to_file( - self, - sources, - audio_descriptor, - destination, - filename_format='{filename}/{instrument}.{codec}', - codec='wav', - audio_adapter=get_default_audio_adapter(), - bitrate='128k', - synchronous=True): - """ Export dictionary of sources to files. - - :param sources: Dictionary of sources to be exported. The - keys are the name of the instruments, and - the values are Nx2 numpy arrays containing - the corresponding intrument waveform, as - returned by the separate method - :param audio_descriptor: Describe song to separate, used by audio - adapter to retrieve and load audio data, - in case of file based audio adapter, such - descriptor would be a file path. - :param destination: Target directory to write output to. - :param filename_format: (Optional) Filename format. - :param codec: (Optional) Export codec. - :param audio_adapter: (Optional) Audio adapter to use for I/O. - :param bitrate: (Optional) Export bitrate. - :param synchronous: (Optional) True is should by synchronous. - + self, + sources: Dict, + audio_descriptor: AudioDescriptor, + destination: str, + filename_format: str = "{filename}/{instrument}.{codec}", + codec: Codec = Codec.WAV, + audio_adapter: Optional[AudioAdapter] = None, + bitrate: str = "128k", + synchronous: bool = True, + ) -> None: """ + Export dictionary of sources to files. + + Parameters: + sources (Dict): + Dictionary of sources to be exported. The keys are the name + of the instruments, and the values are `N x 2` numpy arrays + containing the corresponding intrument waveform, as + returned by the separate method + audio_descriptor (AudioDescriptor): + Describe song to separate, used by audio adapter to + retrieve and load audio data, in case of file based audio + adapter, such descriptor would be a file path. + destination (str): + Target directory to write output to. + filename_format (str): + (Optional) Filename format. + codec (Codec): + (Optional) Export codec. + audio_adapter (Optional[AudioAdapter]): + (Optional) Audio adapter to use for I/O. + bitrate (str): + (Optional) Export bitrate. + synchronous (bool): + (Optional) True is should by synchronous. + """ + if audio_adapter is None: + audio_adapter = AudioAdapter.default() foldername = basename(dirname(audio_descriptor)) filename = splitext(basename(audio_descriptor))[0] generated = [] for instrument, data in sources.items(): - path = join(destination, filename_format.format( - filename=filename, - instrument=instrument, - foldername=foldername, - codec=codec, - )) + path = join( + destination, + filename_format.format( + filename=filename, + instrument=instrument, + foldername=foldername, + codec=codec, + ), + ) directory = os.path.dirname(path) if not os.path.exists(directory): os.makedirs(directory) if path in generated: - raise SpleeterError(( - f'Separated source path conflict : {path},' - 'please check your filename format')) + raise SpleeterError( + ( + f"Separated source path conflict : {path}," + "please check your filename format" + ) + ) generated.append(path) if self._pool: - task = self._pool.apply_async(audio_adapter.save, ( - path, - data, - self._sample_rate, - codec, - bitrate)) + task = self._pool.apply_async( + audio_adapter.save, (path, data, self._sample_rate, codec, bitrate) + ) self._tasks.append(task) else: - audio_adapter.save( - path, - data, - self._sample_rate, - codec, - bitrate) + audio_adapter.save(path, data, self._sample_rate, codec, bitrate) if synchronous and self._pool: self.join() diff --git a/spleeter/types.py b/spleeter/types.py new file mode 100644 index 0000000..0fa6f78 --- /dev/null +++ b/spleeter/types.py @@ -0,0 +1,15 @@ +#!/usr/bin/env python +# coding: utf8 + +""" Custom types definition. """ + +from typing import Any, Tuple + +# pyright: reportMissingImports=false +# pylint: disable=import-error +import numpy as np + +# pylint: enable=import-error + +AudioDescriptor: type = Any +Signal: type = Tuple[np.ndarray, float] diff --git a/spleeter/utils/__init__.py b/spleeter/utils/__init__.py index 8828652..f2ef6d3 100644 --- a/spleeter/utils/__init__.py +++ b/spleeter/utils/__init__.py @@ -3,6 +3,6 @@ """ This package provides utility function and classes. """ -__email__ = 'spleeter@deezer.com' -__author__ = 'Deezer Research' -__license__ = 'MIT License' +__email__ = "spleeter@deezer.com" +__author__ = "Deezer Research" +__license__ = "MIT License" diff --git a/spleeter/utils/configuration.py b/spleeter/utils/configuration.py index 36f1043..ba6318b 100644 --- a/spleeter/utils/configuration.py +++ b/spleeter/utils/configuration.py @@ -3,45 +3,49 @@ """ Module that provides configuration loading function. """ +import importlib.resources as loader import json - -try: - import importlib.resources as loader -except ImportError: - # Try backported to PY<37 `importlib_resources`. - import importlib_resources as loader - from os.path import exists +from typing import Dict -from .. import resources, SpleeterError +from .. import SpleeterError, resources + +__email__ = "spleeter@deezer.com" +__author__ = "Deezer Research" +__license__ = "MIT License" + +_EMBEDDED_CONFIGURATION_PREFIX: str = "spleeter:" -__email__ = 'spleeter@deezer.com' -__author__ = 'Deezer Research' -__license__ = 'MIT License' +def load_configuration(descriptor: str) -> Dict: + """ + Load configuration from the given descriptor. Could be either a + `spleeter:` prefixed embedded configuration name or a file system path + to read configuration from. -_EMBEDDED_CONFIGURATION_PREFIX = 'spleeter:' + Parameters: + descriptor (str): + Configuration descriptor to use for lookup. + Returns: + Dict: + Loaded description as dict. -def load_configuration(descriptor): - """ Load configuration from the given descriptor. Could be - either a `spleeter:` prefixed embedded configuration name - or a file system path to read configuration from. - - :param descriptor: Configuration descriptor to use for lookup. - :returns: Loaded description as dict. - :raise ValueError: If required embedded configuration does not exists. - :raise SpleeterError: If required configuration file does not exists. + Raises: + ValueError: + If required embedded configuration does not exists. + SpleeterError: + If required configuration file does not exists. """ # Embedded configuration reading. if descriptor.startswith(_EMBEDDED_CONFIGURATION_PREFIX): - name = descriptor[len(_EMBEDDED_CONFIGURATION_PREFIX):] - if not loader.is_resource(resources, f'{name}.json'): - raise SpleeterError(f'No embedded configuration {name} found') - with loader.open_text(resources, f'{name}.json') as stream: + name = descriptor[len(_EMBEDDED_CONFIGURATION_PREFIX) :] + if not loader.is_resource(resources, f"{name}.json"): + raise SpleeterError(f"No embedded configuration {name} found") + with loader.open_text(resources, f"{name}.json") as stream: return json.load(stream) # Standard file reading. if not exists(descriptor): - raise SpleeterError(f'Configuration file {descriptor} not found') - with open(descriptor, 'r') as stream: + raise SpleeterError(f"Configuration file {descriptor} not found") + with open(descriptor, "r") as stream: return json.load(stream) diff --git a/spleeter/utils/estimator.py b/spleeter/utils/estimator.py deleted file mode 100644 index aefc355..0000000 --- a/spleeter/utils/estimator.py +++ /dev/null @@ -1,46 +0,0 @@ -#!/usr/bin/env python -# coding: utf8 - -""" Utility functions for creating estimator. """ - -import tensorflow as tf # pylint: disable=import-error - -from ..model import model_fn -from ..model.provider import get_default_model_provider - - -def get_default_model_dir(model_dir): - """ - Transforms a string like 'spleeter:2stems' into an actual path. - :param model_dir: - :return: - """ - model_provider = get_default_model_provider() - return model_provider.get(model_dir) - - -def create_estimator(params, MWF): - """ - Initialize tensorflow estimator that will perform separation - - Params: - - params: a dictionary of parameters for building the model - - Returns: - a tensorflow estimator - """ - # Load model. - params['model_dir'] = get_default_model_dir(params['model_dir']) - params['MWF'] = MWF - # Setup config - session_config = tf.compat.v1.ConfigProto() - session_config.gpu_options.per_process_gpu_memory_fraction = 0.7 - config = tf.estimator.RunConfig(session_config=session_config) - # Setup estimator - estimator = tf.estimator.Estimator( - model_fn=model_fn, - model_dir=params['model_dir'], - params=params, - config=config - ) - return estimator diff --git a/spleeter/utils/logging.py b/spleeter/utils/logging.py index 6fee540..27ef34e 100644 --- a/spleeter/utils/logging.py +++ b/spleeter/utils/logging.py @@ -4,58 +4,53 @@ """ Centralized logging facilities for Spleeter. """ import logging - +import warnings from os import environ -__email__ = 'spleeter@deezer.com' -__author__ = 'Deezer Research' -__license__ = 'MIT License' +# pyright: reportMissingImports=false +# pylint: disable=import-error +from typer import echo -_FORMAT = '%(levelname)s:%(name)s:%(message)s' +# pylint: enable=import-error + +__email__ = "spleeter@deezer.com" +__author__ = "Deezer Research" +__license__ = "MIT License" + +environ["TF_CPP_MIN_LOG_LEVEL"] = "3" -class _LoggerHolder(object): - """ Logger singleton instance holder. """ +class TyperLoggerHandler(logging.Handler): + """ A custom logger handler that use Typer echo. """ - INSTANCE = None + def emit(self, record: logging.LogRecord) -> None: + echo(self.format(record)) -def get_tensorflow_logger(): +formatter = logging.Formatter("%(levelname)s:%(name)s:%(message)s") +handler = TyperLoggerHandler() +handler.setFormatter(formatter) +logger: logging.Logger = logging.getLogger("spleeter") +logger.addHandler(handler) +logger.setLevel(logging.INFO) + + +def configure_logger(verbose: bool) -> None: """ + Configure application logger. + + Parameters: + verbose (bool): + `True` to use verbose logger, `False` otherwise. """ - # pylint: disable=import-error - from tensorflow.compat.v1 import logging - # pylint: enable=import-error - return logging + from tensorflow import get_logger + from tensorflow.compat.v1 import logging as tf_logging - -def get_logger(): - """ Returns library scoped logger. - - :returns: Library logger. - """ - if _LoggerHolder.INSTANCE is None: - formatter = logging.Formatter(_FORMAT) - handler = logging.StreamHandler() - handler.setFormatter(formatter) - logger = logging.getLogger('spleeter') - logger.addHandler(handler) - logger.setLevel(logging.INFO) - _LoggerHolder.INSTANCE = logger - return _LoggerHolder.INSTANCE - - -def enable_tensorflow_logging(): - """ Enable tensorflow logging. """ - environ['TF_CPP_MIN_LOG_LEVEL'] = '1' - tf_logger = get_tensorflow_logger() - tf_logger.set_verbosity(tf_logger.INFO) - logger = get_logger() - logger.setLevel(logging.DEBUG) - - -def enable_logging(): - """ Configure default logging. """ - environ['TF_CPP_MIN_LOG_LEVEL'] = '3' - tf_logger = get_tensorflow_logger() - tf_logger.set_verbosity(tf_logger.ERROR) + tf_logger = get_logger() + tf_logger.handlers = [handler] + if verbose: + tf_logging.set_verbosity(tf_logging.INFO) + logger.setLevel(logging.DEBUG) + else: + warnings.filterwarnings("ignore") + tf_logging.set_verbosity(tf_logging.ERROR) diff --git a/spleeter/utils/tensor.py b/spleeter/utils/tensor.py index cc5e7e8..d155845 100644 --- a/spleeter/utils/tensor.py +++ b/spleeter/utils/tensor.py @@ -3,43 +3,54 @@ """ Utility function for tensorflow. """ +from typing import Any, Callable, Dict + +import pandas as pd + +# pyright: reportMissingImports=false # pylint: disable=import-error import tensorflow as tf -import pandas as pd + # pylint: enable=import-error -__email__ = 'spleeter@deezer.com' -__author__ = 'Deezer Research' -__license__ = 'MIT License' +__email__ = "spleeter@deezer.com" +__author__ = "Deezer Research" +__license__ = "MIT License" -def sync_apply(tensor_dict, func, concat_axis=1): - """ Return a function that applies synchronously the provided func on the +def sync_apply( + tensor_dict: tf.Tensor, func: Callable, concat_axis: int = 1 +) -> Dict[str, tf.Tensor]: + """ + Return a function that applies synchronously the provided func on the provided dictionnary of tensor. This means that func is applied to the - concatenation of the tensors in tensor_dict. This is useful for performing - random operation that needs the same drawn value on multiple tensor, such - as a random time-crop on both input data and label (the same crop should be - applied to both input data and label, so random crop cannot be applied - separately on each of them). + concatenation of the tensors in tensor_dict. This is useful for + performing random operation that needs the same drawn value on multiple + tensor, such as a random time-crop on both input data and label (the + same crop should be applied to both input data and label, so random + crop cannot be applied separately on each of them). - IMPORTANT NOTE: all tensor are assumed to be the same shape. + Notes: + All tensor are assumed to be the same shape. - Params: - - tensor_dict: dictionary (key: strings, values: tf.tensor) - a dictionary of tensor. - - func: function - function to be applied to the concatenation of the tensors in - tensor_dict - - concat_axis: int - The axis on which to perform the concatenation. + Parameters: + tensor_dict (Dict[str, tensorflow.Tensor]): + A dictionary of tensor. + func (Callable): + Function to be applied to the concatenation of the tensors in + `tensor_dict`. + concat_axis (int): + The axis on which to perform the concatenation. - Returns: - processed tensors dictionary with the same name (keys) as input - tensor_dict. + Returns: + Dict[str, tensorflow.Tensor]: + Processed tensors dictionary with the same name (keys) as input + tensor_dict. """ if concat_axis not in {0, 1}: raise NotImplementedError( - 'Function only implemented for concat_axis equal to 0 or 1') + "Function only implemented for concat_axis equal to 0 or 1" + ) tensor_list = list(tensor_dict.values()) concat_tensor = tf.concat(tensor_list, concat_axis) processed_concat_tensor = func(concat_tensor) @@ -47,90 +58,104 @@ def sync_apply(tensor_dict, func, concat_axis=1): D = tensor_shape[concat_axis] if concat_axis == 0: return { - name: processed_concat_tensor[index * D:(index + 1) * D, :, :] + name: processed_concat_tensor[index * D : (index + 1) * D, :, :] for index, name in enumerate(tensor_dict) } return { - name: processed_concat_tensor[:, index * D:(index + 1) * D, :] + name: processed_concat_tensor[:, index * D : (index + 1) * D, :] for index, name in enumerate(tensor_dict) } def from_float32_to_uint8( - tensor, - tensor_key='tensor', - min_key='min', - max_key='max'): + tensor: tf.Tensor, + tensor_key: str = "tensor", + min_key: str = "min", + max_key: str = "max", +) -> tf.Tensor: """ - :param tensor: - :param tensor_key: - :param min_key: - :param max_key: - :returns: + Parameters: + tensor (tensorflow.Tensor): + tensor_key (str): + min_key (str): + max_key (str): + + Returns: + tensorflow.Tensor: """ tensor_min = tf.reduce_min(tensor) tensor_max = tf.reduce_max(tensor) return { tensor_key: tf.cast( - (tensor - tensor_min) / (tensor_max - tensor_min + 1e-16) - * 255.9999, dtype=tf.uint8), + (tensor - tensor_min) / (tensor_max - tensor_min + 1e-16) * 255.9999, + dtype=tf.uint8, + ), min_key: tensor_min, - max_key: tensor_max + max_key: tensor_max, } -def from_uint8_to_float32(tensor, tensor_min, tensor_max): +def from_uint8_to_float32( + tensor: tf.Tensor, tensor_min: tf.Tensor, tensor_max: tf.Tensor +) -> tf.Tensor: """ - :param tensor: - :param tensor_min: - :param tensor_max: - :returns: + Parameters: + tensor (tensorflow.Tensor): + tensor_min (tensorflow.Tensor): + tensor_max (tensorflow.Tensor): + + Returns: + tensorflow.Tensor: """ return ( - tf.cast(tensor, tf.float32) - * (tensor_max - tensor_min) - / 255.9999 + tensor_min) + tf.cast(tensor, tf.float32) * (tensor_max - tensor_min) / 255.9999 + tensor_min + ) -def pad_and_partition(tensor, segment_len): - """ Pad and partition a tensor into segment of len segment_len +def pad_and_partition(tensor: tf.Tensor, segment_len: int) -> tf.Tensor: + """ + Pad and partition a tensor into segment of len `segment_len` along the first dimension. The tensor is padded with 0 in order - to ensure that the first dimension is a multiple of segment_len. + to ensure that the first dimension is a multiple of `segment_len`. Tensor must be of known fixed rank - :Example: + Examples: - >>> tensor = [[1, 2, 3], [4, 5, 6]] - >>> segment_len = 2 - >>> pad_and_partition(tensor, segment_len) - [[[1, 2], [4, 5]], [[3, 0], [6, 0]]] + ```python + >>> tensor = [[1, 2, 3], [4, 5, 6]] + >>> segment_len = 2 + >>> pad_and_partition(tensor, segment_len) + [[[1, 2], [4, 5]], [[3, 0], [6, 0]]] + ```` - :param tensor: - :param segment_len: - :returns: + Parameters: + tensor (tensorflow.Tensor): + segment_len (int): + + Returns: + tensorflow.Tensor: """ tensor_size = tf.math.floormod(tf.shape(tensor)[0], segment_len) pad_size = tf.math.floormod(segment_len - tensor_size, segment_len) - padded = tf.pad( - tensor, - [[0, pad_size]] + [[0, 0]] * (len(tensor.shape)-1)) + padded = tf.pad(tensor, [[0, pad_size]] + [[0, 0]] * (len(tensor.shape) - 1)) split = (tf.shape(padded)[0] + segment_len - 1) // segment_len return tf.reshape( - padded, - tf.concat( - [[split, segment_len], tf.shape(padded)[1:]], - axis=0)) + padded, tf.concat([[split, segment_len], tf.shape(padded)[1:]], axis=0) + ) -def pad_and_reshape(instr_spec, frame_length, F): +def pad_and_reshape(instr_spec, frame_length, F) -> Any: """ - :param instr_spec: - :param frame_length: - :param F: - :returns: + Parameters: + instr_spec: + frame_length: + F: + + Returns: + Any: """ spec_shape = tf.shape(instr_spec) extension_row = tf.zeros((spec_shape[0], spec_shape[1], 1, spec_shape[-1])) @@ -138,53 +163,67 @@ def pad_and_reshape(instr_spec, frame_length, F): extension = tf.tile(extension_row, [1, 1, n_extra_row, 1]) extended_spec = tf.concat([instr_spec, extension], axis=2) old_shape = tf.shape(extended_spec) - new_shape = tf.concat([ - [old_shape[0] * old_shape[1]], - old_shape[2:]], - axis=0) + new_shape = tf.concat([[old_shape[0] * old_shape[1]], old_shape[2:]], axis=0) processed_instr_spec = tf.reshape(extended_spec, new_shape) return processed_instr_spec -def dataset_from_csv(csv_path, **kwargs): - """ Load dataset from a CSV file using Pandas. kwargs if any are +def dataset_from_csv(csv_path: str, **kwargs) -> Any: + """ + Load dataset from a CSV file using Pandas. kwargs if any are forwarded to the `pandas.read_csv` function. - :param csv_path: Path of the CSV file to load dataset from. - :returns: Loaded dataset. + Parameters: + csv_path (str): + Path of the CSV file to load dataset from. + + Returns: + Any: + Loaded dataset. """ df = pd.read_csv(csv_path, **kwargs) - dataset = ( - tf.data.Dataset.from_tensor_slices( - {key: df[key].values for key in df}) - ) + dataset = tf.data.Dataset.from_tensor_slices({key: df[key].values for key in df}) return dataset -def check_tensor_shape(tensor_tf, target_shape): - """ Return a Tensorflow boolean graph that indicates whether +def check_tensor_shape(tensor_tf: tf.Tensor, target_shape: Any) -> bool: + """ + Return a Tensorflow boolean graph that indicates whether sample[features_key] has the specified target shape. Only check not None entries of target_shape. - :param tensor_tf: Tensor to check shape for. - :param target_shape: Target shape to compare tensor to. - :returns: True if shape is valid, False otherwise (as TF boolean). + Parameters: + tensor_tf (tensorflow.Tensor): + Tensor to check shape for. + target_shape (Any): + Target shape to compare tensor to. + + Returns: + bool: + `True` if shape is valid, `False` otherwise (as TF boolean). """ result = tf.constant(True) for i, target_length in enumerate(target_shape): if target_length: result = tf.logical_and( - result, - tf.equal(tf.constant(target_length), tf.shape(tensor_tf)[i])) + result, tf.equal(tf.constant(target_length), tf.shape(tensor_tf)[i]) + ) return result -def set_tensor_shape(tensor, tensor_shape): - """ Set shape for a tensor (not in place, as opposed to tf.set_shape) +def set_tensor_shape(tensor: tf.Tensor, tensor_shape: Any) -> tf.Tensor: + """ + Set shape for a tensor (not in place, as opposed to tf.set_shape) - :param tensor: Tensor to reshape. - :param tensor_shape: Shape to apply to the tensor. - :returns: A reshaped tensor. + Parameters: + tensor (tensorflow.Tensor): + Tensor to reshape. + tensor_shape (Any): + Shape to apply to the tensor. + + Returns: + tensorflow.Tensor: + A reshaped tensor. """ # NOTE: That SOUND LIKE IN PLACE HERE ? tensor.set_shape(tensor_shape) diff --git a/tests/test_eval.py b/tests/test_eval.py index f3764b6..9ff4e67 100644 --- a/tests/test_eval.py +++ b/tests/test_eval.py @@ -7,82 +7,82 @@ __email__ = 'spleeter@deezer.com' __author__ = 'Deezer Research' __license__ = 'MIT License' -import filecmp -import itertools from os import makedirs -from os.path import splitext, basename, exists, join +from os.path import join from tempfile import TemporaryDirectory import pytest import numpy as np -import tensorflow as tf +from spleeter.__main__ import evaluate +from spleeter.audio.adapter import AudioAdapter -from spleeter.audio.adapter import get_default_audio_adapter -from spleeter.commands import create_argument_parser - -from spleeter.commands import evaluate - -from spleeter.utils.configuration import load_configuration - -BACKENDS = ["tensorflow", "librosa"] -TEST_CONFIGURATIONS = {el:el for el in BACKENDS} +BACKENDS = ['tensorflow', 'librosa'] +TEST_CONFIGURATIONS = {el: el for el in BACKENDS} res_4stems = { - "vocals": { - "SDR": 3.25e-05, - "SAR": -11.153575, - "SIR": -1.3849, - "ISR": 2.75e-05 - }, - "drums": { - "SDR": -0.079505, - "SAR": -15.7073575, - "SIR": -4.972755, - "ISR": 0.0013575 - }, - "bass":{ - "SDR": 2.5e-06, - "SAR": -10.3520575, - "SIR": -4.272325, - "ISR": 2.5e-06 - }, - "other":{ - "SDR": -1.359175, - "SAR": -14.7076775, - "SIR": -4.761505, - "ISR": -0.01528 - } - } + 'vocals': { + 'SDR': 3.25e-05, + 'SAR': -11.153575, + 'SIR': -1.3849, + 'ISR': 2.75e-05 + }, + 'drums': { + 'SDR': -0.079505, + 'SAR': -15.7073575, + 'SIR': -4.972755, + 'ISR': 0.0013575 + }, + 'bass': { + 'SDR': 2.5e-06, + 'SAR': -10.3520575, + 'SIR': -4.272325, + 'ISR': 2.5e-06 + }, + 'other': { + 'SDR': -1.359175, + 'SAR': -14.7076775, + 'SIR': -4.761505, + 'ISR': -0.01528 + } +} + def generate_fake_eval_dataset(path): """ generate fake evaluation dataset """ - aa = get_default_audio_adapter() + aa = AudioAdapter.default() n_songs = 2 fs = 44100 duration = 3 n_channels = 2 rng = np.random.RandomState(seed=0) for song in range(n_songs): - song_path = join(path, "test", f"song{song}") + song_path = join(path, 'test', f'song{song}') makedirs(song_path, exist_ok=True) - for instr in ["mixture", "vocals", "bass", "drums", "other"]: - filename = join(song_path, f"{instr}.wav") + for instr in ['mixture', 'vocals', 'bass', 'drums', 'other']: + filename = join(song_path, f'{instr}.wav') data = rng.rand(duration*fs, n_channels)-0.5 aa.save(filename, data, fs) - @pytest.mark.parametrize('backend', TEST_CONFIGURATIONS) def test_evaluate(backend): - with TemporaryDirectory() as directory: - generate_fake_eval_dataset(directory) - p = create_argument_parser() - arguments = p.parse_args(["evaluate", "-p", "spleeter:4stems", "--mus_dir", directory, "-B", backend]) - params = load_configuration(arguments.configuration) - metrics = evaluate.entrypoint(arguments, params) - for instrument, metric in metrics.items(): - for m, value in metric.items(): - assert np.allclose(np.median(value), res_4stems[instrument][m], atol=1e-3) \ No newline at end of file + with TemporaryDirectory() as dataset: + with TemporaryDirectory() as evaluation: + generate_fake_eval_dataset(dataset) + metrics = evaluate( + adapter='spleeter.audio.ffmpeg.FFMPEGProcessAudioAdapter', + output_path=evaluation, + stft_backend=backend, + params_filename='spleeter:4stems', + mus_dir=dataset, + mwf=False, + verbose=False) + for instrument, metric in metrics.items(): + for m, value in metric.items(): + assert np.allclose( + np.median(value), + res_4stems[instrument][m], + atol=1e-3) diff --git a/tests/test_ffmpeg_adapter.py b/tests/test_ffmpeg_adapter.py index 8eb284a..9fe8e00 100644 --- a/tests/test_ffmpeg_adapter.py +++ b/tests/test_ffmpeg_adapter.py @@ -10,6 +10,11 @@ __license__ = 'MIT License' from os.path import join from tempfile import TemporaryDirectory +from spleeter import SpleeterError +from spleeter.audio.adapter import AudioAdapter +from spleeter.audio.ffmpeg import FFMPEGProcessAudioAdapter + +# pyright: reportMissingImports=false # pylint: disable=import-error from pytest import fixture, raises @@ -17,12 +22,6 @@ import numpy as np import ffmpeg # pylint: enable=import-error -from spleeter import SpleeterError -from spleeter.audio.adapter import AudioAdapter -from spleeter.audio.adapter import get_default_audio_adapter -from spleeter.audio.adapter import get_audio_adapter -from spleeter.audio.ffmpeg import FFMPEGProcessAudioAdapter - TEST_AUDIO_DESCRIPTOR = 'audio_example.mp3' TEST_OFFSET = 0 TEST_DURATION = 600. @@ -32,7 +31,7 @@ TEST_SAMPLE_RATE = 44100 @fixture(scope='session') def adapter(): """ Target test audio adapter fixture. """ - return get_default_audio_adapter() + return AudioAdapter.default() @fixture(scope='session') @@ -48,7 +47,7 @@ def audio_data(adapter): def test_default_adapter(adapter): """ Test adapter as default adapter. """ assert isinstance(adapter, FFMPEGProcessAudioAdapter) - assert adapter is AudioAdapter.DEFAULT + assert adapter is AudioAdapter._DEFAULT def test_load(audio_data): diff --git a/tests/test_github_model_provider.py b/tests/test_github_model_provider.py index 248b1d5..6313999 100644 --- a/tests/test_github_model_provider.py +++ b/tests/test_github_model_provider.py @@ -5,12 +5,12 @@ from pytest import raises -from spleeter.model.provider import get_default_model_provider +from spleeter.model.provider import ModelProvider def test_checksum(): """ Test archive checksum index retrieval. """ - provider = get_default_model_provider() + provider = ModelProvider.default() assert provider.checksum('2stems') == \ 'f3a90b39dd2874269e8b05a48a86745df897b848c61f3958efc80a39152bd692' assert provider.checksum('4stems') == \ diff --git a/tests/test_separator.py b/tests/test_separator.py index e757abf..947b037 100644 --- a/tests/test_separator.py +++ b/tests/test_separator.py @@ -17,7 +17,7 @@ import numpy as np import tensorflow as tf from spleeter import SpleeterError -from spleeter.audio.adapter import get_default_audio_adapter +from spleeter.audio.adapter import AudioAdapter from spleeter.separator import Separator TEST_AUDIO_DESCRIPTORS = ['audio_example.mp3', 'audio_example_mono.mp3'] @@ -41,7 +41,7 @@ print("RUNNING TESTS WITH TF VERSION {}".format(tf.__version__)) @pytest.mark.parametrize('test_file', TEST_AUDIO_DESCRIPTORS) def test_separator_backends(test_file): - adapter = get_default_audio_adapter() + adapter = AudioAdapter.default() waveform, _ = adapter.load(test_file) separator_lib = Separator( @@ -64,11 +64,13 @@ def test_separator_backends(test_file): assert np.allclose(out_tf[instrument], out_lib[instrument], atol=1e-5) -@pytest.mark.parametrize('test_file, configuration, backend', TEST_CONFIGURATIONS) +@pytest.mark.parametrize( + 'test_file, configuration, backend', + TEST_CONFIGURATIONS) def test_separate(test_file, configuration, backend): """ Test separation from raw data. """ instruments = MODEL_TO_INST[configuration] - adapter = get_default_audio_adapter() + adapter = AudioAdapter.default() waveform, _ = adapter.load(test_file) separator = Separator( configuration, stft_backend=backend, multiprocess=False) @@ -85,7 +87,9 @@ def test_separate(test_file, configuration, backend): assert not np.allclose(track, prediction[compared]) -@pytest.mark.parametrize('test_file, configuration, backend', TEST_CONFIGURATIONS) +@pytest.mark.parametrize( + 'test_file, configuration, backend', + TEST_CONFIGURATIONS) def test_separate_to_file(test_file, configuration, backend): """ Test file based separation. """ instruments = MODEL_TO_INST[configuration] @@ -102,7 +106,9 @@ def test_separate_to_file(test_file, configuration, backend): '{}/{}.wav'.format(name, instrument))) -@pytest.mark.parametrize('test_file, configuration, backend', TEST_CONFIGURATIONS) +@pytest.mark.parametrize( + 'test_file, configuration, backend', + TEST_CONFIGURATIONS) def test_filename_format(test_file, configuration, backend): """ Test custom filename format. """ instruments = MODEL_TO_INST[configuration] @@ -120,7 +126,9 @@ def test_filename_format(test_file, configuration, backend): 'export/{}/{}.wav'.format(name, instrument))) -@pytest.mark.parametrize('test_file, configuration', MODELS_AND_TEST_FILES) +@pytest.mark.parametrize( + 'test_file, configuration', + MODELS_AND_TEST_FILES) def test_filename_conflict(test_file, configuration): """ Test error handling with static pattern. """ separator = Separator(configuration, multiprocess=False) diff --git a/tests/test_train.py b/tests/test_train.py index 8d9533a..47ce747 100644 --- a/tests/test_train.py +++ b/tests/test_train.py @@ -7,107 +7,102 @@ __email__ = 'research@deezer.com' __author__ = 'Deezer Research' __license__ = 'MIT License' -import filecmp -import itertools +import json import os + from os import makedirs -from os.path import splitext, basename, exists, join +from os.path import join from tempfile import TemporaryDirectory import numpy as np import pandas as pd -import json -import tensorflow as tf +from spleeter.audio.adapter import AudioAdapter +from spleeter.__main__ import spleeter +from typer.testing import CliRunner -from spleeter.audio.adapter import get_default_audio_adapter -from spleeter.commands import create_argument_parser - -from spleeter.commands import train - -from spleeter.utils.configuration import load_configuration TRAIN_CONFIG = { - "mix_name": "mix", - "instrument_list": ["vocals", "other"], - "sample_rate":44100, - "frame_length":4096, - "frame_step":1024, - "T":128, - "F":128, - "n_channels":2, - "chunk_duration":4, - "n_chunks_per_song":1, - "separation_exponent":2, - "mask_extension":"zeros", - "learning_rate": 1e-4, - "batch_size":2, - "train_max_steps": 10, - "throttle_secs":20, - "save_checkpoints_steps":100, - "save_summary_steps":5, - "random_seed":0, - "model":{ - "type":"unet.unet", - "params":{ - "conv_activation":"ELU", - "deconv_activation":"ELU" + 'mix_name': 'mix', + 'instrument_list': ['vocals', 'other'], + 'sample_rate': 44100, + 'frame_length': 4096, + 'frame_step': 1024, + 'T': 128, + 'F': 128, + 'n_channels': 2, + 'chunk_duration': 4, + 'n_chunks_per_song': 1, + 'separation_exponent': 2, + 'mask_extension': 'zeros', + 'learning_rate': 1e-4, + 'batch_size': 2, + 'train_max_steps': 10, + 'throttle_secs': 20, + 'save_checkpoints_steps': 100, + 'save_summary_steps': 5, + 'random_seed': 0, + 'model': { + 'type': 'unet.unet', + 'params': { + 'conv_activation': 'ELU', + 'deconv_activation': 'ELU' } } } -def generate_fake_training_dataset(path, instrument_list=["vocals", "other"]): +def generate_fake_training_dataset(path, instrument_list=['vocals', 'other']): """ generates a fake training dataset in path: - generates audio files - generates a csv file describing the dataset """ - aa = get_default_audio_adapter() + aa = AudioAdapter.default() n_songs = 2 fs = 44100 duration = 6 n_channels = 2 rng = np.random.RandomState(seed=0) - dataset_df = pd.DataFrame(columns=["mix_path"]+[f"{instr}_path" for instr in instrument_list]+["duration"]) + dataset_df = pd.DataFrame( + columns=['mix_path'] + [ + f'{instr}_path' for instr in instrument_list] + ['duration']) for song in range(n_songs): - song_path = join(path, "train", f"song{song}") + song_path = join(path, 'train', f'song{song}') makedirs(song_path, exist_ok=True) - dataset_df.loc[song, f"duration"] = duration - for instr in instrument_list+["mix"]: - filename = join(song_path, f"{instr}.wav") + dataset_df.loc[song, f'duration'] = duration + for instr in instrument_list+['mix']: + filename = join(song_path, f'{instr}.wav') data = rng.rand(duration*fs, n_channels)-0.5 aa.save(filename, data, fs) - dataset_df.loc[song, f"{instr}_path"] = join("train", f"song{song}", f"{instr}.wav") - - dataset_df.to_csv(join(path, "train", "train.csv"), index=False) - + dataset_df.loc[song, f'{instr}_path'] = join( + 'train', + f'song{song}', + f'{instr}.wav') + dataset_df.to_csv(join(path, 'train', 'train.csv'), index=False) def test_train(): - - with TemporaryDirectory() as path: - # generate training dataset generate_fake_training_dataset(path) - # set training command aruments - p = create_argument_parser() - arguments = p.parse_args(["train", "-p", "useless_config.json", "-d", path]) - TRAIN_CONFIG["train_csv"] = join(path, "train", "train.csv") - TRAIN_CONFIG["validation_csv"] = join(path, "train", "train.csv") - TRAIN_CONFIG["model_dir"] = join(path, "model") - TRAIN_CONFIG["training_cache"] = join(path, "cache", "training") - TRAIN_CONFIG["validation_cache"] = join(path, "cache", "validation") - + runner = CliRunner() + TRAIN_CONFIG['train_csv'] = join(path, 'train', 'train.csv') + TRAIN_CONFIG['validation_csv'] = join(path, 'train', 'train.csv') + TRAIN_CONFIG['model_dir'] = join(path, 'model') + TRAIN_CONFIG['training_cache'] = join(path, 'cache', 'training') + TRAIN_CONFIG['validation_cache'] = join(path, 'cache', 'validation') + with open('useless_config.json', 'w') as stream: + json.dump(TRAIN_CONFIG, stream) # execute training - res = train.entrypoint(arguments, TRAIN_CONFIG) - + result = runner.invoke(spleeter, [ + 'train', + '-p', 'useless_config.json', + '-d', path + ]) # assert that model checkpoint was created. - assert os.path.exists(join(path,'model','model.ckpt-10.index')) - assert os.path.exists(join(path,'model','checkpoint')) - assert os.path.exists(join(path,'model','model.ckpt-0.meta')) - -if __name__=="__main__": - test_train() \ No newline at end of file + assert os.path.exists(join(path, 'model', 'model.ckpt-10.index')) + assert os.path.exists(join(path, 'model', 'checkpoint')) + assert os.path.exists(join(path, 'model', 'model.ckpt-0.meta')) + assert result.exit_code == 0