From 2762362c42847fd506cc9e75fa9e7f24d7e3fa51 Mon Sep 17 00:00:00 2001 From: Alexander Senier Date: Sat, 10 Feb 2024 20:34:21 +0100 Subject: [PATCH] Refactor coverage and corpus into common state Closes #15 --- CHANGELOG.md | 7 ++ cobrafuzz/corpus.py | 48 ------------- cobrafuzz/fuzzer.py | 71 ++++++++----------- cobrafuzz/state.py | 100 +++++++++++++++++++++++++++ tests/unit/test_corpus.py | 140 -------------------------------------- tests/unit/test_fuzzer.py | 14 ++++ tests/unit/test_state.py | 108 +++++++++++++++++++++++++++++ tests/unit/test_util.py | 62 +++++++++++++++++ 8 files changed, 320 insertions(+), 230 deletions(-) create mode 100644 cobrafuzz/state.py create mode 100644 tests/unit/test_state.py diff --git a/CHANGELOG.md b/CHANGELOG.md index 6092815..fda0311 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,12 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## [Unreleased] + +### Added + +- Saving fuzzer state to file (#15) + ## [2.1.0] - 2024-02-10 ### Added @@ -66,6 +72,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Rename to cobrafuzz - Enable GitHub CI +[Unreleased]: https://github.com/senier/cobrafuzz/compare/v2.1.0...main [2.1.0]: https://github.com/senier/cobrafuzz/compare/v2.0.0...v2.1.0 [2.0.0]: https://github.com/senier/cobrafuzz/compare/v1.0.12...v2.0.0 [1.0.12]: https://github.com/senier/cobrafuzz/compare/v1.0.11...v1.0.12 diff --git a/cobrafuzz/corpus.py b/cobrafuzz/corpus.py index bebac66..8eea447 100644 --- a/cobrafuzz/corpus.py +++ b/cobrafuzz/corpus.py @@ -1,10 +1,8 @@ from __future__ import annotations -import hashlib import secrets import struct from dataclasses import dataclass -from pathlib import Path from typing import Optional from . import util @@ -207,49 +205,3 @@ def mutate(buf: bytearray, max_input_size: Optional[int] = None) -> bytearray: if max_input_size and len(res) > max_input_size: res = res[:max_input_size] return res - - -class Corpus: - def __init__( - self, - seeds: Optional[list[Path]] = None, - max_input_size: int = 4096, - save_dir: Optional[Path] = None, - ): - self._max_input_size = max_input_size - self._save_dir = save_dir - self._seeds = seeds or [] - - self._inputs: list[bytearray] = [] - for path in [p for p in self._seeds if p.is_file()] + [ - f for p in self._seeds if not p.is_file() for f in p.glob("*") if f.is_file() - ]: - with path.open("rb") as f: - self._inputs.append(bytearray(f.read())) - if not self._inputs: - self._inputs.append(bytearray(0)) - - @property - def length(self) -> int: - return len(self._inputs) - - def put(self, buf: bytearray) -> None: - self._inputs.append(buf) - - def save(self) -> None: - if not self._save_dir: - return - - if not self._save_dir.exists(): - self._save_dir.mkdir() - - for buf in self._inputs: - fname = self._save_dir / hashlib.sha256(buf).hexdigest() - with fname.open("wb") as f: - f.write(buf) - - def generate_input(self) -> bytearray: - return mutate( - buf=self._inputs[util.rand(len(self._inputs))], - max_input_size=self._max_input_size, - ) diff --git a/cobrafuzz/fuzzer.py b/cobrafuzz/fuzzer.py index 2042c86..fdeea0c 100644 --- a/cobrafuzz/fuzzer.py +++ b/cobrafuzz/fuzzer.py @@ -12,7 +12,7 @@ import dill as pickle # type: ignore[import-untyped] -from cobrafuzz import corpus, tracer +from cobrafuzz import state as st, tracer logging.getLogger().addHandler(logging.StreamHandler(sys.stdout)) logging.getLogger().setLevel(logging.DEBUG) @@ -21,25 +21,6 @@ MPProcess = Union[mp.context.ForkProcess, mp.context.ForkServerProcess, mp.context.SpawnProcess] -class Coverage: - def __init__(self) -> None: - self._covered: set[tuple[Optional[str], Optional[int], str, int]] = set() - - def store_and_check_improvement( - self, - data: set[tuple[Optional[str], Optional[int], str, int]], - ) -> bool: - covered = len(self._covered) - self._covered |= data - if len(self._covered) > covered: - return True - return False - - @property - def total(self) -> int: - return len(self._covered) - - @dataclass class Update: data: bytes @@ -89,9 +70,8 @@ def worker( # noqa: PLR0913 result_queue: mp.Queue[Status], close_stdout: bool, close_stderr: bool, - max_input_size: int, stat_frequency: int, - seeds: list[Path], + state: st.State, ) -> None: class NullFile(io.StringIO): """No-op to trash stdout away.""" @@ -109,9 +89,6 @@ def write(self, arg: str) -> int: runs = 0 last_status = time.time() - corp = corpus.Corpus(seeds=seeds, max_input_size=max_input_size) - cov = Coverage() - target = cast(Callable[[bytes], None], pickle.loads(target_bytes)) # noqa: S301 tracer.initialize() @@ -121,11 +98,11 @@ def write(self, arg: str) -> int: while not update_queue.empty(): update = update_queue.get() - cov.store_and_check_improvement(update.covered) - corp.put(bytearray(update.data)) + state.store_coverage(update.covered) + state.put_input(bytearray(update.data)) runs += 1 - data = corp.generate_input() + data = state.get_input() try: target(data) @@ -134,7 +111,7 @@ def write(self, arg: str) -> int: runs = 0 last_status = time.time() else: - new_path = cov.store_and_check_improvement(data=tracer.get_covered()) + new_path = state.store_coverage(data=tracer.get_covered()) if new_path: result_queue.put( Report(wid=wid, runs=runs, data=data, covered=tracer.get_covered()), @@ -163,6 +140,7 @@ def __init__( # noqa: PLR0913, ref:#2 regression: bool = False, seeds: Optional[list[Path]] = None, start_method: Optional[str] = None, + state_file: Optional[Path] = None, ): """ Fuzz-test target and store crash artifacts into crash_dir. @@ -186,6 +164,8 @@ def __init__( # noqa: PLR0913, ref:#2 start_method: Multiprocessing start method to use (spawn, forkserver or fork). Defaults to "spawn". Do not use "fork" as it is unreliable and may lead to deadlocks. + state_file: File to load state from. Will be updated periodically. If no file is + specified, the state will be held in memory and discarded on exit. """ self._current_crashes = 0 @@ -216,6 +196,7 @@ def __init__( # noqa: PLR0913, ref:#2 self._max_time = max_time self._num_workers: int = num_workers or self._mp_ctx.cpu_count() - 1 self._seeds = seeds or [] + self._state_file = state_file if regression: for error_file in crash_dir.glob("*"): @@ -269,7 +250,7 @@ def _write_sample(self, buf: bytes, prefix: str = "crash-") -> None: if len(buf) < 200: logging.info("sample = %s", buf.hex()) - def _initialize_process(self, wid: int) -> tuple[MPProcess, mp.Queue[Update]]: + def _initialize_process(self, wid: int, state: st.State) -> tuple[MPProcess, mp.Queue[Update]]: queue: mp.Queue[Update] = self._mp_ctx.Queue() result = self._mp_ctx.Process( target=worker, @@ -280,9 +261,8 @@ def _initialize_process(self, wid: int) -> tuple[MPProcess, mp.Queue[Update]]: self._result_queue, self._close_stdout, self._close_stderr, - self._max_input_size, self._stat_frequency, - self._seeds, + state, ), ) result.start() @@ -290,16 +270,20 @@ def _initialize_process(self, wid: int) -> tuple[MPProcess, mp.Queue[Update]]: def start(self) -> None: # noqa: PLR0912 start_time = time.time() - coverage = Coverage() - corp = corpus.Corpus(self._seeds, self._max_input_size) + state = st.State(self._seeds, self._max_input_size) - self._workers = [self._initialize_process(wid) for wid in range(self._num_workers)] + if self._state_file: + state.load(self._state_file) + + self._workers = [ + self._initialize_process(wid=wid, state=state) for wid in range(self._num_workers) + ] logging.info( "#0 READ units: %d workers: %d seeds: %d", - corp.length, + state.size, self._num_workers, - corp.length, + len(self._seeds), ) while True: @@ -327,16 +311,19 @@ def start(self) -> None: # noqa: PLR0912 self._current_runs += result.runs if isinstance(result, Error): - improvement = coverage.store_and_check_improvement(result.covered) + improvement = state.store_coverage(result.covered) if improvement: self._current_crashes += 1 self._write_sample(result.data) elif isinstance(result, Report): - improvement = coverage.store_and_check_improvement(result.covered) + improvement = state.store_coverage(result.covered) if improvement: - self._log_stats("NEW", coverage.total, corp.length) - corp.put(bytearray(result.data)) + self._log_stats("NEW", state.total_coverage, state.size) + state.put_input(bytearray(result.data)) + + if self._state_file: + state.save(self._state_file) for wid, (_, queue) in enumerate(self._workers): if wid != result.wid: @@ -349,7 +336,7 @@ def start(self) -> None: # noqa: PLR0912 assert False, f"Unhandled result type: {type(result)}" if (time.time() - self._last_stats_time) > self._stat_frequency: - self._log_stats("PULSE", coverage.total, corp.length) + self._log_stats("PULSE", state.total_coverage, state.size) for _, queue in self._workers: queue.cancel_join_thread() diff --git a/cobrafuzz/state.py b/cobrafuzz/state.py new file mode 100644 index 0000000..2178196 --- /dev/null +++ b/cobrafuzz/state.py @@ -0,0 +1,100 @@ +from __future__ import annotations + +import ast +import json +import logging +from pathlib import Path +from typing import Optional + +from cobrafuzz import corpus, util + + +class LoadError(Exception): + pass + + +class State: + def __init__( + self, + seeds: Optional[list[Path]] = None, + max_input_size: int = 4096, + ): + self._VERSION = 1 + self._max_input_size = max_input_size + self._covered: set[tuple[Optional[str], Optional[int], str, int]] = set() + self._inputs: list[bytearray] = [] + + for path in [p for p in seeds or [] if p.is_file()] + [ + f for p in seeds or [] if not p.is_file() for f in p.glob("*") if f.is_file() + ]: + with path.open("rb") as f: + self._inputs.append(bytearray(f.read())) + if not self._inputs: + self._inputs.append(bytearray(0)) + + def save(self, filename: Path) -> None: + with filename.open(mode="w+") as sf: + json.dump( + obj={ + "version": self._VERSION, + "coverage": list(self._covered), + "population": [str(bytes(i))[2:-1] for i in self._inputs], + }, + fp=sf, + ensure_ascii=True, + ) + + def load(self, filename: Path) -> None: + try: + with filename.open() as sf: + data = json.load(sf) + if "version" not in data or data["version"] != self._VERSION: + raise LoadError( + f"Invalid version in state file {filename} (expected {self._VERSION})", + ) + self._covered |= {tuple(e) for e in data["coverage"]} + self._inputs.extend( + bytearray(ast.literal_eval(f"b'{i}'")) for i in data["population"] + ) + except FileNotFoundError: + pass + except (json.JSONDecodeError, TypeError): + filename.unlink() + logging.info("Malformed state file: %s", filename) + except OSError as e: + logging.info("Error opening state file: %s", e) + + def store_coverage( + self, + data: set[tuple[Optional[str], Optional[int], str, int]], + ) -> bool: + """ + Store coverage information. Return true if coverage has increased. + + Arguments: + --------- + data: coverage information to store. + """ + + covered = len(self._covered) + self._covered |= data + if len(self._covered) > covered: + return True + return False + + @property + def total_coverage(self) -> int: + return len(self._covered) + + @property + def size(self) -> int: + return len(self._inputs) + + def put_input(self, buf: bytearray) -> None: + self._inputs.append(buf) + + def get_input(self) -> bytearray: + return corpus.mutate( + buf=list(self._inputs)[util.rand(len(self._inputs))], + max_input_size=self._max_input_size, + ) diff --git a/tests/unit/test_corpus.py b/tests/unit/test_corpus.py index e2b2356..46a6ac0 100644 --- a/tests/unit/test_corpus.py +++ b/tests/unit/test_corpus.py @@ -1,152 +1,12 @@ from __future__ import annotations -import hashlib import secrets -from pathlib import Path -import numpy as np import pytest -from scipy.stats import chisquare from cobrafuzz import corpus, util -def test_length() -> None: - c = corpus.Corpus() - assert c.length == 1 - - -def test_add_file_constructor(tmpdir: Path) -> None: - filename = Path(tmpdir) / "input.dat" - with filename.open("wb") as f: - f.write(b"deadbeef") - c = corpus.Corpus(seeds=[filename]) - assert c._inputs == [bytearray(b"deadbeef")] # noqa: SLF001 - - -def test_add_files_constructor(tmpdir: Path) -> None: - basedir = Path(tmpdir) / "inputs" - basedir.mkdir() - (basedir / "subdir").mkdir() - - with (basedir / "f1").open("wb") as f: - f.write(b"deadbeef") - with (basedir / "f2").open("wb") as f: - f.write(b"deadc0de") - - c = corpus.Corpus(seeds=[basedir]) - assert sorted(c._inputs) == sorted( # noqa: SLF001 - [ - bytearray(b"deadc0de"), - bytearray(b"deadbeef"), - ], - ) - - -def test_create_save_dir(tmpdir: Path) -> None: - dirname = Path(tmpdir) / "input" - c = corpus.Corpus(save_dir=dirname) - c.save() - assert dirname.exists() - assert dirname.is_dir() - assert c._inputs == [bytearray(0)] # noqa: SLF001 - - -def test_create_no_save_dir() -> None: - c = corpus.Corpus() - c.save() - assert c._inputs == [bytearray(0)] # noqa: SLF001 - - -def test_rand_uniform() -> None: - assert util.rand(0) == 0 - assert util.rand(1) == 0 - - data = [util.rand(10) for _ in range(1, 1000000)] - result = chisquare(f_obs=list(np.bincount(data))) - assert result.pvalue > 0.05 - - -def test_rand_exponential() -> None: - expected = [round(200000 / 2 ** (n + 1)) for n in range(32)] - data = list( - np.bincount( - [util.rand_exp() for _ in range(sum(expected))], - minlength=32, - ), - ) - - # There should be more than 13 samples in each bin, - # c.f. https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.chisquare.html - # Starting at the position *before* the element that is <= 13, bin all remaining elements. - data_valid_samples = [i for i, v in enumerate(data) if v < 13] - assert len(data_valid_samples) > 0 - - expected_valid_samples = [i for i, v in enumerate(expected) if v < 13] - assert len(expected_valid_samples) > 0 - - index = min(data_valid_samples[0], expected_valid_samples[0]) - 1 - data = data[:index] + [sum(data[index:])] - expected = expected[:index] + [sum(expected[index:])] - - result = chisquare(f_obs=data, f_exp=expected) - assert result.pvalue > 0.05, result - - -def test_choose_length() -> None: - n = 1000 - lengths = [util.choose_len(n) for _ in range(10000)] - - assert n > 32 - assert len([v for v in lengths if v < 1]) == 0 - assert len([v for v in lengths if v > n]) == 0 - - data = [ - len([v for v in lengths if 1 <= v <= 8]), - len([v for v in lengths if 9 <= v <= 32]), - len([v for v in lengths if 33 <= v <= n]), - ] - - # Expected distribution for range 1..8, 9..32 and 33..n - expected = [ - round((0.9 + 0.0225 + (8 / (100 * n))) * sum(data)), - round((0.0675 + (24 / (100 * n))) * sum(data)), - round(((n - 32) / (100 * n)) * sum(data)), - ] - - result = chisquare(f_obs=data, f_exp=expected) - assert result.pvalue > 0.05, result - - -def test_put_corpus_not_saved() -> None: - c = corpus.Corpus() - c.put(bytearray(b"deadbeef")) - assert c._inputs == [bytearray(0), bytearray(b"deadbeef")] # noqa: SLF001 - - -def test_put_corpus_saved(tmpdir: Path) -> None: - c = corpus.Corpus(save_dir=Path(tmpdir)) - c.put(bytearray(b"deadbeef")) - outfile = Path(tmpdir) / hashlib.sha256(b"deadbeef").hexdigest() - assert c._inputs == [bytearray(0), bytearray(b"deadbeef")] # noqa: SLF001 - c.save() - assert outfile.exists() - with outfile.open("rb") as of: - assert of.read() == b"deadbeef" - - -def test_generate_input(tmpdir: Path, monkeypatch: pytest.MonkeyPatch) -> None: - filename = Path(tmpdir) / "input.dat" - with filename.open("wb") as f: - f.write(b"deadbeef") - c = corpus.Corpus(seeds=[filename]) - with monkeypatch.context() as mp: - mp.setattr(corpus, "mutate", lambda buf, max_input_size: buf[:max_input_size]) - mp.setattr(util, "rand", lambda _: 0) - assert c.generate_input() == bytearray(b"deadbeef") - assert c.generate_input() == bytearray(b"deadbeef") - - def test_mutate(monkeypatch: pytest.MonkeyPatch) -> None: with monkeypatch.context() as mp: mp.setattr(util, "rand_exp", lambda: 2) diff --git a/tests/unit/test_fuzzer.py b/tests/unit/test_fuzzer.py index fbbee07..e20b7cd 100644 --- a/tests/unit/test_fuzzer.py +++ b/tests/unit/test_fuzzer.py @@ -65,3 +65,17 @@ def test_regression(tmp_path: Path) -> None: (tmp_path / "subdir").mkdir() with pytest.raises(SystemExit, match="^0$"): fuzzer.Fuzzer(target=crashing_target, crash_dir=tmp_path, regression=True) + + +def test_state(tmp_path: Path) -> None: + state_file = tmp_path / "state.json" + for _ in range(2): + f = fuzzer.Fuzzer( + target=non_crashing_target, + crash_dir=tmp_path, + max_runs=10, + state_file=state_file, + ) + with pytest.raises(SystemExit, match="^0$"): + f.start() + assert state_file.exists() diff --git a/tests/unit/test_state.py b/tests/unit/test_state.py new file mode 100644 index 0000000..0c4074f --- /dev/null +++ b/tests/unit/test_state.py @@ -0,0 +1,108 @@ +import json +import logging +from pathlib import Path + +import pytest + +from cobrafuzz import corpus, state, util + + +def test_length() -> None: + c = state.State() + assert c.size == 1 + + +def test_add_file_constructor(tmp_path: Path) -> None: + filename = tmp_path / "input.dat" + with filename.open("wb") as f: + f.write(b"deadbeef") + c = state.State(seeds=[filename]) + assert c._inputs == [bytearray(b"deadbeef")] # noqa: SLF001 + + +def test_add_files_constructor(tmp_path: Path) -> None: + basedir = tmp_path / "inputs" + basedir.mkdir() + (basedir / "subdir").mkdir() + + with (basedir / "f1").open("wb") as f: + f.write(b"deadbeef") + with (basedir / "f2").open("wb") as f: + f.write(b"deadc0de") + + c = state.State(seeds=[basedir]) + assert sorted(c._inputs) == sorted( # noqa: SLF001 + [ + bytearray(b"deadc0de"), + bytearray(b"deadbeef"), + ], + ) + + +def test_put_state_not_saved() -> None: + c = state.State() + c.put_input(bytearray(b"deadbeef")) + assert c._inputs == [bytearray(0), bytearray(b"deadbeef")] # noqa: SLF001 + + +def test_put_state_saved(tmp_path: Path) -> None: + c1 = state.State() + c1.put_input(bytearray(b"deadbeef")) + assert c1._inputs == [bytearray(0), bytearray(b"deadbeef")] # noqa: SLF001 + + statefile = tmp_path / "state.json" + c1.save(statefile) + assert statefile.exists() + + c2 = state.State() + c2.load(statefile) + assert c2._inputs == [bytearray(0), bytearray(0), bytearray(b"deadbeef")] # noqa: SLF001 + + +def test_generate_input(tmp_path: Path, monkeypatch: pytest.MonkeyPatch) -> None: + filename = tmp_path / "input.dat" + with filename.open("wb") as f: + f.write(b"deadbeef") + c = state.State(seeds=[filename]) + with monkeypatch.context() as mp: + mp.setattr(corpus, "mutate", lambda buf, max_input_size: buf[:max_input_size]) + mp.setattr(util, "rand", lambda _: 0) + assert c.get_input() == bytearray(b"deadbeef") + assert c.get_input() == bytearray(b"deadbeef") + + +def test_fail_load_invalid_version(tmp_path: Path) -> None: + filename = tmp_path / "state.json" + with filename.open("w") as f: + json.dump(obj={"version": 99999}, fp=f) + c = state.State() + with pytest.raises( + state.LoadError, + match=rf"^Invalid version in state file {filename} \(expected {c._VERSION}\)$", # noqa: SLF001 + ): + c.load(filename=filename) + + +def test_fail_load_malformed_state_file(caplog: pytest.LogCaptureFixture, tmp_path: Path) -> None: + filename = tmp_path / "state.json" + with filename.open("w") as f: + f.write("MALFORMED!") + c = state.State() + with caplog.at_level(logging.INFO): + c.load(filename=filename) + assert f"Malformed state file: {filename}" in caplog.text, caplog.text + assert not filename.exists() + + +def test_fail_load_missing_file(tmp_path: Path) -> None: + missing = tmp_path / "missing" + c = state.State() + c.load(filename=missing) + assert not missing.exists() + + +def test_fail_load_invalid_file_path(caplog: pytest.LogCaptureFixture, tmp_path: Path) -> None: + c = state.State() + with caplog.at_level(logging.INFO): + c.load(filename=tmp_path) + assert f"[Errno 21] Is a directory: '{tmp_path}'" in caplog.text, caplog.text diff --git a/tests/unit/test_util.py b/tests/unit/test_util.py index 9a6aace..51c5962 100644 --- a/tests/unit/test_util.py +++ b/tests/unit/test_util.py @@ -1,4 +1,6 @@ +import numpy as np import pytest +from scipy.stats import chisquare from cobrafuzz import util @@ -88,3 +90,63 @@ def test_insert_valid(data: bytes, start: int, data_to_insert: bytes, expected: tmp = bytearray(data) util.insert(tmp, start, data_to_insert) assert tmp == expected + + +def test_rand_uniform() -> None: + assert util.rand(0) == 0 + assert util.rand(1) == 0 + + data = [util.rand(10) for _ in range(1, 1000000)] + result = chisquare(f_obs=list(np.bincount(data))) + assert result.pvalue > 0.05 + + +def test_rand_exponential() -> None: + expected = [round(200000 / 2 ** (n + 1)) for n in range(32)] + data = list( + np.bincount( + [util.rand_exp() for _ in range(sum(expected))], + minlength=32, + ), + ) + + # There should be more than 13 samples in each bin, + # c.f. https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.chisquare.html + # Starting at the position *before* the element that is <= 13, bin all remaining elements. + data_valid_samples = [i for i, v in enumerate(data) if v < 13] + assert len(data_valid_samples) > 0 + + expected_valid_samples = [i for i, v in enumerate(expected) if v < 13] + assert len(expected_valid_samples) > 0 + + index = min(data_valid_samples[0], expected_valid_samples[0]) - 1 + data = data[:index] + [sum(data[index:])] + expected = expected[:index] + [sum(expected[index:])] + + result = chisquare(f_obs=data, f_exp=expected) + assert result.pvalue > 0.05, result + + +def test_choose_length() -> None: + n = 1000 + lengths = [util.choose_len(n) for _ in range(10000)] + + assert n > 32 + assert len([v for v in lengths if v < 1]) == 0 + assert len([v for v in lengths if v > n]) == 0 + + data = [ + len([v for v in lengths if 1 <= v <= 8]), + len([v for v in lengths if 9 <= v <= 32]), + len([v for v in lengths if 33 <= v <= n]), + ] + + # Expected distribution for range 1..8, 9..32 and 33..n + expected = [ + round((0.9 + 0.0225 + (8 / (100 * n))) * sum(data)), + round((0.0675 + (24 / (100 * n))) * sum(data)), + round(((n - 32) / (100 * n)) * sum(data)), + ] + + result = chisquare(f_obs=data, f_exp=expected) + assert result.pvalue > 0.05, result