return proc.returncode, ''.join(output)
-def read_stats(stats_path, stats: dict):
- """Read statistics from a file and extend provided statistics"""
- with open(stats_path, "r") as file:
- parsed_data = yaml.safe_load(file)
- return dict((step_name, stats.get(step_name, []) + [duration])
- for step_name, duration in parsed_data.items())
-
-
def aggregate_stats(stats: dict):
"""Aggregate provided statistics"""
return {step_name: {"avg": statistics.mean(duration_list),
for step_name, duration_list in stats.items()}
-def write_aggregated_stats(stats_path, stats: dict):
- """Write aggregated statistics to a file in YAML format"""
- with open(stats_path, "w") as file:
- yaml.safe_dump(stats, file)
-
-
def prepare_executable_cmd(args: dict):
"""Generate common part of cmd from arguments to execute"""
return [str(args["executable"].resolve(strict=True)),
"Statistics aggregation is skipped.".format(args["executable"], retcode, msg))
return retcode, {}
- stats = read_stats(tmp_stats_path, stats)
+ # Read raw statistics
+ with open(tmp_stats_path, "r") as file:
+ raw_data = yaml.safe_load(file)
+ log.debug("Raw statistics after run of executable #{}: {}".format(run_iter, raw_data))
+
+ # Combine statistics from several runs
+ stats = dict((step_name, stats.get(step_name, []) + [duration])
+ for step_name, duration in raw_data.items())
# Aggregate results
aggregated_stats = aggregate_stats(stats)
+ log.debug("Aggregated statistics after full run: {}".format(aggregated_stats))
return 0, aggregated_stats
if args.stats_path:
# Save aggregated results to a file
- write_aggregated_stats(args.stats_path, aggr_stats)
+ with open(args.stats_path, "w") as file:
+ yaml.safe_dump(aggr_stats, file)
logging.info("Aggregated statistics saved to a file: '{}'".format(
args.stats_path.resolve()))
else:
import hashlib
import shutil
import logging
+import tempfile
from test_runner.utils import upload_timetest_data, \
DATABASE, DB_COLLECTIONS
@pytest.fixture(scope="function")
+def temp_dir(pytestconfig):
+ """Create temporary directory for test purposes.
+ It will be cleaned up after every test run.
+ """
+ temp_dir = tempfile.TemporaryDirectory()
+ yield Path(temp_dir.name)
+ temp_dir.cleanup()
+
+
+@pytest.fixture(scope="function")
def cl_cache_dir(pytestconfig):
"""Generate directory to save OpenCL cache before test run and clean up after run.
from pathlib import Path
import logging
import os
+import shutil
from scripts.run_timetest import run_timetest
from test_runner.utils import expand_env_vars
REFS_FACTOR = 1.2 # 120%
-def test_timetest(instance, executable, niter, cl_cache_dir, test_info):
+def test_timetest(instance, executable, niter, cl_cache_dir, test_info, temp_dir):
"""Parameterized test.
:param instance: test instance. Should not be changed during test run
:param executable: timetest executable to run
:param niter: number of times to run executable
+ :param cl_cache_dir: directory to store OpenCL cache
:param test_info: custom `test_info` field of built-in `request` pytest fixture
+ :param temp_dir: path to a temporary directory. Will be cleaned up after test run
"""
# Prepare model to get model_path
model_path = instance["model"].get("path")
assert model_path, "Model path is empty"
+ model_path = Path(expand_env_vars(model_path))
+
+ # Copy model to a local temporary directory
+ model_dir = temp_dir / "model"
+ shutil.copytree(model_path.parent, model_dir)
+ model_path = model_dir / model_path.name
# Run executable
exe_args = {
"executable": Path(executable),
- "model": Path(expand_env_vars(model_path)),
+ "model": Path(model_path),
"device": instance["device"]["name"],
"niter": niter
}