From 3fcc4ea28f024db77e0be2c8481372ca9c609ddc Mon Sep 17 00:00:00 2001 From: romi1502 Date: Fri, 24 Jul 2020 15:02:34 +0200 Subject: [PATCH] Added eval test for both backends --- tests/test_eval.py | 101 +++++++++++++++++++++++++++++++-------------- 1 file changed, 69 insertions(+), 32 deletions(-) diff --git a/tests/test_eval.py b/tests/test_eval.py index 6a3634b..82e0034 100644 --- a/tests/test_eval.py +++ b/tests/test_eval.py @@ -25,33 +25,64 @@ from spleeter.commands import evaluate from spleeter.utils.configuration import load_configuration -res_4stems = { "vocals": { - "SDR": -0.007, - "SAR": -19.231, - "SIR": -4.528, - "ISR": 0.000 +BACKENDS = ["tensorflow", "librosa"] +TEST_CONFIGURATIONS = {el:el for el in BACKENDS} + +res_4stems = { + "librosa": { + "vocals": { + "SDR": -0.007, + "SAR": -19.231, + "SIR": -4.528, + "ISR": 0.000 + }, + "drums": { + "SDR": -0.071, + "SAR": -14.496, + "SIR": -4.987, + "ISR": 0.001 + }, + "bass":{ + "SDR": -0.001, + "SAR": -12.426, + "SIR": -7.198, + "ISR": -0.001 + }, + "other":{ + "SDR": -1.453, + "SAR": -14.899, + "SIR": -4.678, + "ISR": -0.015 + } }, - "drums": { - "SDR": -0.071, - "SAR": -14.496, - "SIR": -4.987, - "ISR": 0.001 - }, - "bass":{ - "SDR": -0.001, - "SAR": -12.426, - "SIR": -7.198, - "ISR": -0.001 - }, - "other":{ - "SDR": -1.453, - "SAR": -14.899, - "SIR": -4.678, - "ISR": -0.015 + "tensorflow": { + "vocals": { + "SDR": 3.25e-05, + "SAR": -11.153575, + "SIR": -1.3849, + "ISR": 2.75e-05 + }, + "drums": { + "SDR": -0.079505, + "SAR": -15.7073575, + "SIR": -4.972755, + "ISR": 0.0013575 + }, + "bass":{ + "SDR": 2.5e-06, + "SAR": -10.3520575, + "SIR": -4.272325, + "ISR": 2.5e-06 + }, + "other":{ + "SDR": -1.359175, + "SAR": -14.7076775, + "SIR": -4.761505, + "ISR": -0.01528 + } } } - def generate_fake_eval_dataset(path): aa = get_default_audio_adapter() n_songs = 2 @@ -68,12 +99,18 @@ def generate_fake_eval_dataset(path): aa.save(filename, data, fs) -def test_evaluate(path="FAKE_MUSDB_DIR"): - generate_fake_eval_dataset(path) - p = create_argument_parser() - arguments = p.parse_args(["evaluate", "-p", "spleeter:4stems", "--mus_dir", path]) - params = load_configuration(arguments.configuration) - metrics = evaluate.entrypoint(arguments, params) - for instrument, metric in metrics.items(): - for metric, value in metric.items(): - assert np.allclose(np.median(value), res_4stems[instrument][metric], atol=1e-3) \ No newline at end of file +@pytest.mark.parametrize('backend', TEST_CONFIGURATIONS) +def test_evaluate(backend): + with TemporaryDirectory() as directory: + + generate_fake_eval_dataset(directory) + p = create_argument_parser() + arguments = p.parse_args(["evaluate", "-p", "spleeter:4stems", "--mus_dir", directory, "-B", backend]) + params = load_configuration(arguments.configuration) + metrics = evaluate.entrypoint(arguments, params) + for instrument, metric in metrics.items(): + for metric, value in metric.items(): + assert np.allclose(np.median(value), res_4stems[backend][instrument][metric], atol=1e-3) + + +# test_evaluate("tensorflow") \ No newline at end of file