fix evaluation testing

This commit is contained in:
Faylixe
2020-12-11 13:05:49 +01:00
parent 944ad7cb87
commit 9a248653b7

View File

@@ -69,19 +69,20 @@ def generate_fake_eval_dataset(path):
@pytest.mark.parametrize('backend', TEST_CONFIGURATIONS) @pytest.mark.parametrize('backend', TEST_CONFIGURATIONS)
def test_evaluate(backend): def test_evaluate(backend):
with TemporaryDirectory() as directory: with TemporaryDirectory() as dataset:
generate_fake_eval_dataset(directory) with TemporaryDirectory() as evaluation:
metrics = evaluate( generate_fake_eval_dataset(dataset)
adapter='spleeter.audio.ffmpeg.FFMPEGProcessAudioAdapter', metrics = evaluate(
output_path='eval', adapter='spleeter.audio.ffmpeg.FFMPEGProcessAudioAdapter',
stft_backend=backend, output_path=evaluation,
params_filename='spleeter:4stems', stft_backend=backend,
mus_dir=directory, params_filename='spleeter:4stems',
mwf=False, mus_dir=dataset,
verbose=False) mwf=False,
for instrument, metric in metrics.items(): verbose=False)
for m, value in metric.items(): for instrument, metric in metrics.items():
assert np.allclose( for m, value in metric.items():
np.median(value), assert np.allclose(
res_4stems[instrument][m], np.median(value),
atol=1e-3) res_4stems[instrument][m],
atol=1e-3)