fix evaluation testing

This commit is contained in:
Faylixe
2020-12-11 13:05:49 +01:00
parent 944ad7cb87
commit 9a248653b7

View File

@@ -69,14 +69,15 @@ def generate_fake_eval_dataset(path):
@pytest.mark.parametrize('backend', TEST_CONFIGURATIONS) @pytest.mark.parametrize('backend', TEST_CONFIGURATIONS)
def test_evaluate(backend): def test_evaluate(backend):
with TemporaryDirectory() as directory: with TemporaryDirectory() as dataset:
generate_fake_eval_dataset(directory) with TemporaryDirectory() as evaluation:
generate_fake_eval_dataset(dataset)
metrics = evaluate( metrics = evaluate(
adapter='spleeter.audio.ffmpeg.FFMPEGProcessAudioAdapter', adapter='spleeter.audio.ffmpeg.FFMPEGProcessAudioAdapter',
output_path='eval', output_path=evaluation,
stft_backend=backend, stft_backend=backend,
params_filename='spleeter:4stems', params_filename='spleeter:4stems',
mus_dir=directory, mus_dir=dataset,
mwf=False, mwf=False,
verbose=False) verbose=False)
for instrument, metric in metrics.items(): for instrument, metric in metrics.items():