fix evaluation testing

This commit is contained in:
Faylixe
2020-12-11 13:05:49 +01:00
parent 944ad7cb87
commit 9a248653b7

View File

@@ -69,14 +69,15 @@ def generate_fake_eval_dataset(path):
@pytest.mark.parametrize('backend', TEST_CONFIGURATIONS)
def test_evaluate(backend):
with TemporaryDirectory() as directory:
generate_fake_eval_dataset(directory)
with TemporaryDirectory() as dataset:
with TemporaryDirectory() as evaluation:
generate_fake_eval_dataset(dataset)
metrics = evaluate(
adapter='spleeter.audio.ffmpeg.FFMPEGProcessAudioAdapter',
output_path='eval',
output_path=evaluation,
stft_backend=backend,
params_filename='spleeter:4stems',
mus_dir=directory,
mus_dir=dataset,
mwf=False,
verbose=False)
for instrument, metric in metrics.items():