test(core): extend coverage — console, errors, log, service, time, benchmark
Add missing test modules for previously untested core areas: - console: ForegroundColorEnum, BackgroundColorEnum, Console methods - errors: dependency_error, module_dependency_error - log: LogLevel ordering/values, LogSettings, Logger (should_log, format, file write, fatal) - service: HostedService, StartupTask, CronjobABC (start/stop/loop/task cancellation) - time: TimeFormatSettings properties and setters - utils: Benchmark.time / .memory / .all call-count and output Also fix existing test files: environment cleanup, cron exception specificity, json_processor kwargs bug doc, configuration_model_abc to_dict bug doc. All 199 tests pass, black clean. Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
This commit is contained in:
60
test/core/utils/benchmark_test.py
Normal file
60
test/core/utils/benchmark_test.py
Normal file
@@ -0,0 +1,60 @@
|
||||
import pytest
|
||||
from cpl.core.utils.benchmark import Benchmark
|
||||
|
||||
|
||||
def noop():
|
||||
pass
|
||||
|
||||
|
||||
def test_benchmark_time_does_not_raise(capsys):
|
||||
Benchmark.time("noop", noop, iterations=3)
|
||||
captured = capsys.readouterr()
|
||||
assert "noop" in captured.out
|
||||
assert "min" in captured.out
|
||||
assert "avg" in captured.out
|
||||
|
||||
|
||||
def test_benchmark_memory_does_not_raise(capsys):
|
||||
Benchmark.memory("noop_mem", noop, iterations=3)
|
||||
captured = capsys.readouterr()
|
||||
assert "noop_mem" in captured.out
|
||||
assert "mem" in captured.out
|
||||
|
||||
|
||||
def test_benchmark_all_does_not_raise(capsys):
|
||||
Benchmark.all("noop_all", noop, iterations=3)
|
||||
captured = capsys.readouterr()
|
||||
assert "noop_all" in captured.out
|
||||
assert "min" in captured.out
|
||||
assert "mem" in captured.out
|
||||
|
||||
|
||||
def test_benchmark_time_calls_func():
|
||||
calls = []
|
||||
|
||||
def tracked():
|
||||
calls.append(1)
|
||||
|
||||
Benchmark.time("tracked", tracked, iterations=4)
|
||||
assert len(calls) == 4
|
||||
|
||||
|
||||
def test_benchmark_memory_calls_func():
|
||||
calls = []
|
||||
|
||||
def tracked():
|
||||
calls.append(1)
|
||||
|
||||
Benchmark.memory("tracked_mem", tracked, iterations=3)
|
||||
assert len(calls) == 3
|
||||
|
||||
|
||||
def test_benchmark_all_calls_func_twice_per_iteration():
|
||||
calls = []
|
||||
|
||||
def tracked():
|
||||
calls.append(1)
|
||||
|
||||
Benchmark.all("tracked_all", tracked, iterations=2)
|
||||
# all() runs func once per iteration for time, once per iteration for memory = 2*iterations
|
||||
assert len(calls) == 4
|
||||
Reference in New Issue
Block a user