mirror of
https://github.com/jokob-sk/NetAlertX.git
synced 2025-12-07 09:36:05 -08:00
/data and /tmp standarization
This commit is contained in:
@@ -1,10 +1,10 @@
|
||||
'''
|
||||
"""
|
||||
This set of tests requires netalertx-test image built. Ensure netalertx-test image is built prior
|
||||
to starting these tests or they will fail. netalertx-test image is generally rebuilt using the
|
||||
to starting these tests or they will fail. netalertx-test image is generally rebuilt using the
|
||||
Build Unit Test Docker Image task. but can be created manually with the following command executed
|
||||
in the workspace:
|
||||
docker buildx build -t netalertx-test .
|
||||
'''
|
||||
"""
|
||||
|
||||
import os
|
||||
import pathlib
|
||||
@@ -14,22 +14,30 @@ import uuid
|
||||
import re
|
||||
import pytest
|
||||
|
||||
#TODO: test ALWAYS_FRESH_INSTALL
|
||||
#TODO: test new named volume mount
|
||||
|
||||
IMAGE = os.environ.get("NETALERTX_TEST_IMAGE", "netalertx-test")
|
||||
GRACE_SECONDS = float(os.environ.get("NETALERTX_TEST_GRACE", "2"))
|
||||
DEFAULT_CAPS = ["NET_RAW", "NET_ADMIN", "NET_BIND_SERVICE"]
|
||||
|
||||
VOLUME_MAP = {
|
||||
"app_db": "/app/db",
|
||||
"app_config": "/app/config",
|
||||
"app_log": "/app/log",
|
||||
"app_api": "/app/api",
|
||||
"nginx_conf": "/services/config/nginx/conf.active",
|
||||
"services_run": "/services/run",
|
||||
CONTAINER_TARGETS: dict[str, str] = {
|
||||
"data": "/data",
|
||||
"app_db": "/data/db",
|
||||
"data_db": "/data/db",
|
||||
"app_config": "/data/config",
|
||||
"data_config": "/data/config",
|
||||
"app_log": "/tmp/log",
|
||||
"log": "/tmp/log",
|
||||
"app_api": os.environ.get("NETALERTX_API", "/tmp/api"),
|
||||
"api": os.environ.get("NETALERTX_API", "/tmp/api"),
|
||||
"nginx_conf": "/tmp/nginx/active-config",
|
||||
"nginx_active": "/tmp/nginx/active-config",
|
||||
"services_run": "/tmp/run",
|
||||
}
|
||||
|
||||
DATA_SUBDIR_KEYS = ("app_db", "app_config")
|
||||
OPTIONAL_TMP_KEYS = ("app_log", "app_api", "nginx_conf", "services_run")
|
||||
|
||||
VOLUME_MAP = CONTAINER_TARGETS
|
||||
|
||||
pytestmark = [pytest.mark.docker, pytest.mark.feature_complete]
|
||||
|
||||
|
||||
@@ -91,25 +99,53 @@ def _chown_path(host_path: pathlib.Path, uid: int, gid: int) -> None:
|
||||
raise RuntimeError(f"Failed to chown {host_path} to {uid}:{gid}") from exc
|
||||
|
||||
|
||||
def _setup_mount_tree(tmp_path: pathlib.Path, prefix: str, seed_config: bool = True, seed_db: bool = True) -> dict[str, pathlib.Path]:
|
||||
def _setup_mount_tree(
|
||||
tmp_path: pathlib.Path,
|
||||
prefix: str,
|
||||
seed_config: bool = True,
|
||||
seed_db: bool = True,
|
||||
) -> dict[str, pathlib.Path]:
|
||||
label = _unique_label(prefix)
|
||||
base = tmp_path / f"{label}_MOUNT_ROOT"
|
||||
base.mkdir()
|
||||
paths: dict[str, pathlib.Path] = {}
|
||||
|
||||
for key, target in VOLUME_MAP.items():
|
||||
# Create unified /data mount root
|
||||
data_root = base / f"{label}_DATA_INTENTIONAL_NETALERTX_TEST"
|
||||
data_root.mkdir(parents=True, exist_ok=True)
|
||||
data_root.chmod(0o777)
|
||||
paths["data"] = data_root
|
||||
|
||||
# Create required data subdirectories and aliases
|
||||
db_dir = data_root / "db"
|
||||
db_dir.mkdir(exist_ok=True)
|
||||
db_dir.chmod(0o777)
|
||||
paths["app_db"] = db_dir
|
||||
paths["data_db"] = db_dir
|
||||
|
||||
config_dir = data_root / "config"
|
||||
config_dir.mkdir(exist_ok=True)
|
||||
config_dir.chmod(0o777)
|
||||
paths["app_config"] = config_dir
|
||||
paths["data_config"] = config_dir
|
||||
|
||||
# Optional /tmp mounts that certain tests intentionally bind
|
||||
for key in OPTIONAL_TMP_KEYS:
|
||||
folder_name = f"{label}_{key.upper()}_INTENTIONAL_NETALERTX_TEST"
|
||||
host_path = base / folder_name
|
||||
host_path.mkdir(parents=True, exist_ok=True)
|
||||
# Make the directory writable so the container (running as UID 20211)
|
||||
# can create files on first run even if the host owner differs.
|
||||
try:
|
||||
host_path.chmod(0o777)
|
||||
except PermissionError:
|
||||
# If we can't chmod (uncommon in CI), tests that require strict
|
||||
# ownership will still run their own chown/chmod operations.
|
||||
pass
|
||||
paths[key] = host_path
|
||||
# Provide backwards-compatible aliases where helpful
|
||||
if key == "app_log":
|
||||
paths["log"] = host_path
|
||||
elif key == "app_api":
|
||||
paths["api"] = host_path
|
||||
elif key == "nginx_conf":
|
||||
paths["nginx_active"] = host_path
|
||||
|
||||
# Determine repo root from env or by walking up from this file
|
||||
repo_root_env = os.environ.get("NETALERTX_REPO_ROOT")
|
||||
@@ -119,9 +155,11 @@ def _setup_mount_tree(tmp_path: pathlib.Path, prefix: str, seed_config: bool = T
|
||||
repo_root = None
|
||||
cur = pathlib.Path(__file__).resolve()
|
||||
for parent in cur.parents:
|
||||
if (parent / "pyproject.toml").exists() or (parent / ".git").exists() or (
|
||||
if any([
|
||||
(parent / "pyproject.toml").exists(),
|
||||
(parent / ".git").exists(),
|
||||
(parent / "back").exists() and (parent / "db").exists()
|
||||
):
|
||||
]):
|
||||
repo_root = parent
|
||||
break
|
||||
if repo_root is None:
|
||||
@@ -131,7 +169,9 @@ def _setup_mount_tree(tmp_path: pathlib.Path, prefix: str, seed_config: bool = T
|
||||
config_file = paths["app_config"] / "app.conf"
|
||||
config_src = repo_root / "back" / "app.conf"
|
||||
if not config_src.exists():
|
||||
print(f"[WARN] Seed file not found: {config_src}. Set NETALERTX_REPO_ROOT or run from repo root. Skipping copy.")
|
||||
print(
|
||||
f"[WARN] Seed file not found: {config_src}. Set NETALERTX_REPO_ROOT or run from repo root. Skipping copy."
|
||||
)
|
||||
else:
|
||||
shutil.copyfile(config_src, config_file)
|
||||
config_file.chmod(0o600)
|
||||
@@ -139,7 +179,9 @@ def _setup_mount_tree(tmp_path: pathlib.Path, prefix: str, seed_config: bool = T
|
||||
db_file = paths["app_db"] / "app.db"
|
||||
db_src = repo_root / "db" / "app.db"
|
||||
if not db_src.exists():
|
||||
print(f"[WARN] Seed file not found: {db_src}. Set NETALERTX_REPO_ROOT or run from repo root. Skipping copy.")
|
||||
print(
|
||||
f"[WARN] Seed file not found: {db_src}. Set NETALERTX_REPO_ROOT or run from repo root. Skipping copy."
|
||||
)
|
||||
else:
|
||||
shutil.copyfile(db_src, db_file)
|
||||
db_file.chmod(0o600)
|
||||
@@ -155,24 +197,58 @@ def _setup_fixed_mount_tree(base: pathlib.Path) -> dict[str, pathlib.Path]:
|
||||
base.mkdir(parents=True)
|
||||
|
||||
paths: dict[str, pathlib.Path] = {}
|
||||
for key in VOLUME_MAP:
|
||||
|
||||
data_root = base / "DATA_NETALERTX_TEST"
|
||||
data_root.mkdir(parents=True, exist_ok=True)
|
||||
data_root.chmod(0o777)
|
||||
paths["data"] = data_root
|
||||
|
||||
db_dir = data_root / "db"
|
||||
db_dir.mkdir(exist_ok=True)
|
||||
db_dir.chmod(0o777)
|
||||
paths["app_db"] = db_dir
|
||||
paths["data_db"] = db_dir
|
||||
|
||||
config_dir = data_root / "config"
|
||||
config_dir.mkdir(exist_ok=True)
|
||||
config_dir.chmod(0o777)
|
||||
paths["app_config"] = config_dir
|
||||
paths["data_config"] = config_dir
|
||||
|
||||
for key in OPTIONAL_TMP_KEYS:
|
||||
host_path = base / f"{key.upper()}_NETALERTX_TEST"
|
||||
host_path.mkdir(parents=True, exist_ok=True)
|
||||
host_path.chmod(0o777)
|
||||
paths[key] = host_path
|
||||
if key == "app_log":
|
||||
paths["log"] = host_path
|
||||
elif key == "app_api":
|
||||
paths["api"] = host_path
|
||||
elif key == "nginx_conf":
|
||||
paths["nginx_active"] = host_path
|
||||
return paths
|
||||
|
||||
|
||||
def _build_volume_args(
|
||||
paths: dict[str, pathlib.Path],
|
||||
) -> list[tuple[str, str, bool]]:
|
||||
return _build_volume_args_for_keys(paths, {"data"})
|
||||
|
||||
|
||||
def _build_volume_args_for_keys(
|
||||
paths: dict[str, pathlib.Path],
|
||||
keys: set[str],
|
||||
read_only: set[str] | None = None,
|
||||
skip: set[str] | None = None,
|
||||
) -> list[tuple[str, str, bool]]:
|
||||
bindings: list[tuple[str, str, bool]] = []
|
||||
for key, target in VOLUME_MAP.items():
|
||||
if skip and key in skip:
|
||||
continue
|
||||
bindings.append((str(paths[key]), target, key in read_only if read_only else False))
|
||||
read_only = read_only or set()
|
||||
for key in keys:
|
||||
if key not in CONTAINER_TARGETS:
|
||||
raise KeyError(f"Unknown mount key {key}")
|
||||
target = CONTAINER_TARGETS[key]
|
||||
if key not in paths:
|
||||
raise KeyError(f"Missing host path for key {key}")
|
||||
bindings.append((str(paths[key]), target, key in read_only))
|
||||
return bindings
|
||||
|
||||
|
||||
@@ -195,9 +271,10 @@ def _run_container(
|
||||
extra_args: list[str] | None = None,
|
||||
volume_specs: list[str] | None = None,
|
||||
sleep_seconds: float = GRACE_SECONDS,
|
||||
wait_for_exit: bool = False,
|
||||
) -> subprocess.CompletedProcess[str]:
|
||||
name = f"netalertx-test-{label}-{uuid.uuid4().hex[:8]}".lower()
|
||||
|
||||
|
||||
# Clean up any existing container with this name
|
||||
subprocess.run(
|
||||
["docker", "rm", "-f", name],
|
||||
@@ -205,7 +282,7 @@ def _run_container(
|
||||
stdout=subprocess.DEVNULL,
|
||||
stderr=subprocess.DEVNULL,
|
||||
)
|
||||
|
||||
|
||||
cmd: list[str] = ["docker", "run", "--rm", "--name", name]
|
||||
|
||||
if network_mode:
|
||||
@@ -246,13 +323,16 @@ def _run_container(
|
||||
mounts_ls += f" {target}"
|
||||
mounts_ls += " || true; echo '--- END MOUNTS ---'; \n"
|
||||
|
||||
script = (
|
||||
mounts_ls
|
||||
+ "sh /entrypoint.sh & pid=$!; "
|
||||
+ f"sleep {sleep_seconds}; "
|
||||
+ "if kill -0 $pid >/dev/null 2>&1; then kill -TERM $pid >/dev/null 2>&1 || true; fi; "
|
||||
+ "wait $pid; code=$?; if [ $code -eq 143 ]; then exit 0; fi; exit $code"
|
||||
)
|
||||
if wait_for_exit:
|
||||
script = mounts_ls + "sh /entrypoint.sh"
|
||||
else:
|
||||
script = "".join([
|
||||
mounts_ls,
|
||||
"sh /entrypoint.sh & pid=$!; ",
|
||||
f"sleep {sleep_seconds}; ",
|
||||
"if kill -0 $pid >/dev/null 2>&1; then kill -TERM $pid >/dev/null 2>&1 || true; fi; ",
|
||||
"wait $pid; code=$?; if [ $code -eq 143 ]; then exit 0; fi; exit $code"
|
||||
])
|
||||
cmd.extend(["--entrypoint", "/bin/sh", IMAGE, "-c", script])
|
||||
|
||||
# Print the full Docker command for debugging
|
||||
@@ -266,10 +346,9 @@ def _run_container(
|
||||
check=False,
|
||||
)
|
||||
# Combine and clean stdout and stderr
|
||||
stdouterr = (
|
||||
re.sub(r'\x1b\[[0-9;]*m', '', result.stdout or '') +
|
||||
re.sub(r'\x1b\[[0-9;]*m', '', result.stderr or '')
|
||||
)
|
||||
stdouterr = re.sub(r"\x1b\[[0-9;]*m", "", result.stdout or "") + re.sub(
|
||||
r"\x1b\[[0-9;]*m", "", result.stderr or ""
|
||||
)
|
||||
result.output = stdouterr
|
||||
# Print container output for debugging in every test run.
|
||||
print("\n--- CONTAINER OUTPUT START ---")
|
||||
@@ -279,7 +358,6 @@ def _run_container(
|
||||
return result
|
||||
|
||||
|
||||
|
||||
def _assert_contains(result, snippet: str, cmd: list[str] = None) -> None:
|
||||
output = result.output + result.stderr
|
||||
if snippet not in output:
|
||||
@@ -293,6 +371,58 @@ def _assert_contains(result, snippet: str, cmd: list[str] = None) -> None:
|
||||
)
|
||||
|
||||
|
||||
def _extract_mount_rows(output: str) -> dict[str, list[str]]:
|
||||
rows: dict[str, list[str]] = {}
|
||||
in_table = False
|
||||
for raw_line in (output or "").splitlines():
|
||||
line = raw_line.rstrip()
|
||||
if not in_table:
|
||||
if line.startswith(" Path") and "Writeable" in line:
|
||||
in_table = True
|
||||
continue
|
||||
if not line.strip():
|
||||
break
|
||||
if line.lstrip().startswith("Path"):
|
||||
continue
|
||||
if set(line.strip()) <= {"-", "+"}:
|
||||
continue
|
||||
parts = [part.strip() for part in line.split("|")]
|
||||
if len(parts) < 6:
|
||||
continue
|
||||
path = parts[0].strip()
|
||||
rows[path] = parts[1:6]
|
||||
return rows
|
||||
|
||||
|
||||
def _assert_mount_row(
|
||||
result,
|
||||
path: str,
|
||||
*,
|
||||
write: str | None = None,
|
||||
mount: str | None = None,
|
||||
ramdisk: str | None = None,
|
||||
performance: str | None = None,
|
||||
dataloss: str | None = None,
|
||||
) -> None:
|
||||
rows = _extract_mount_rows(result.output)
|
||||
if path not in rows:
|
||||
raise AssertionError(
|
||||
f"Mount table row for {path} not found. Rows: {sorted(rows)}\nOutput:\n{result.output}"
|
||||
)
|
||||
columns = rows[path]
|
||||
labels = ["Writeable", "Mount", "RAMDisk", "Performance", "DataLoss"]
|
||||
expectations = [write, mount, ramdisk, performance, dataloss]
|
||||
for idx, expected in enumerate(expectations):
|
||||
if expected is None:
|
||||
continue
|
||||
actual = columns[idx]
|
||||
if actual != expected:
|
||||
raise AssertionError(
|
||||
f"{path} {labels[idx]} expected {expected}, got {actual}.\n"
|
||||
f"Rows: {rows}\nOutput:\n{result.output}"
|
||||
)
|
||||
|
||||
|
||||
def _setup_zero_perm_dir(paths: dict[str, pathlib.Path], key: str) -> None:
|
||||
"""Set up a directory with files and zero permissions for testing."""
|
||||
if key in ["app_db", "app_config"]:
|
||||
@@ -301,11 +431,11 @@ def _setup_zero_perm_dir(paths: dict[str, pathlib.Path], key: str) -> None:
|
||||
else:
|
||||
# Create a dummy file for other directories
|
||||
(paths[key] / "dummy.txt").write_text("dummy")
|
||||
|
||||
|
||||
# Chmod all files in the directory to 000
|
||||
for f in paths[key].iterdir():
|
||||
f.chmod(0)
|
||||
|
||||
|
||||
# Chmod the directory itself to 000
|
||||
paths[key].chmod(0)
|
||||
|
||||
@@ -314,7 +444,7 @@ def _restore_zero_perm_dir(paths: dict[str, pathlib.Path], key: str) -> None:
|
||||
"""Restore permissions after zero perm test."""
|
||||
# Chmod directory back to 700
|
||||
paths[key].chmod(0o700)
|
||||
|
||||
|
||||
# Chmod files back to appropriate permissions
|
||||
for f in paths[key].iterdir():
|
||||
if f.name in ["app.db", "app.conf"]:
|
||||
@@ -323,7 +453,6 @@ def _restore_zero_perm_dir(paths: dict[str, pathlib.Path], key: str) -> None:
|
||||
f.chmod(0o644)
|
||||
|
||||
|
||||
|
||||
def test_missing_capabilities_triggers_warning(tmp_path: pathlib.Path) -> None:
|
||||
"""Test missing required capabilities - simulates insufficient container privileges.
|
||||
|
||||
@@ -335,7 +464,7 @@ def test_missing_capabilities_triggers_warning(tmp_path: pathlib.Path) -> None:
|
||||
Sample message: "exec /bin/sh: operation not permitted"
|
||||
"""
|
||||
paths = _setup_mount_tree(tmp_path, "missing_caps")
|
||||
volumes = _build_volume_args(paths)
|
||||
volumes = _build_volume_args_for_keys(paths, {"data"})
|
||||
result = _run_container(
|
||||
"missing-caps",
|
||||
volumes,
|
||||
@@ -356,7 +485,7 @@ def test_running_as_root_is_blocked(tmp_path: pathlib.Path) -> None:
|
||||
Sample message: "🚨 CRITICAL SECURITY ALERT: NetAlertX is running as ROOT (UID 0)!"
|
||||
"""
|
||||
paths = _setup_mount_tree(tmp_path, "run_as_root")
|
||||
volumes = _build_volume_args(paths)
|
||||
volumes = _build_volume_args_for_keys(paths, {"data", "nginx_conf"})
|
||||
result = _run_container(
|
||||
"run-as-root",
|
||||
volumes,
|
||||
@@ -364,29 +493,9 @@ def test_running_as_root_is_blocked(tmp_path: pathlib.Path) -> None:
|
||||
)
|
||||
_assert_contains(result, "NetAlertX is running as ROOT", result.args)
|
||||
_assert_contains(result, "Permissions fixed for read-write paths.", result.args)
|
||||
assert result.returncode == 0 # container warns but continues running, then terminated by test framework
|
||||
|
||||
|
||||
def test_running_as_uid_1000_warns(tmp_path: pathlib.Path) -> None:
|
||||
# No output assertion, just returncode check
|
||||
"""Test running as wrong user - simulates using arbitrary user instead of netalertx.
|
||||
|
||||
7. Running as Wrong User: Simulates running as arbitrary user (UID 1000) instead
|
||||
of netalertx user. Permission errors due to incorrect user context.
|
||||
Expected: Permission errors, guidance to use correct user.
|
||||
|
||||
Check script: /entrypoint.d/60-user-netalertx.sh
|
||||
Sample message: "⚠️ ATTENTION: NetAlertX is running as UID 1000:1000. Hardened permissions..."
|
||||
"""
|
||||
paths = _setup_mount_tree(tmp_path, "run_as_1000")
|
||||
volumes = _build_volume_args(paths)
|
||||
result = _run_container(
|
||||
"run-as-1000",
|
||||
volumes,
|
||||
user="1000:1000",
|
||||
)
|
||||
_assert_contains(result, "NetAlertX is running as UID 1000:1000", result.args)
|
||||
|
||||
assert (
|
||||
result.returncode == 0
|
||||
) # container warns but continues running, then terminated by test framework
|
||||
|
||||
|
||||
def test_missing_host_network_warns(tmp_path: pathlib.Path) -> None:
|
||||
@@ -403,7 +512,7 @@ def test_missing_host_network_warns(tmp_path: pathlib.Path) -> None:
|
||||
base = tmp_path / "missing_host_net_base"
|
||||
paths = _setup_fixed_mount_tree(base)
|
||||
# Ensure directories are writable and owned by netalertx user so container can operate
|
||||
for key in ["app_db", "app_config", "app_log", "app_api", "services_run", "nginx_conf"]:
|
||||
for key in ["data", "app_db", "app_config"]:
|
||||
paths[key].chmod(0o777)
|
||||
_chown_netalertx(paths[key])
|
||||
# Create a config file so the writable check passes
|
||||
@@ -411,7 +520,7 @@ def test_missing_host_network_warns(tmp_path: pathlib.Path) -> None:
|
||||
config_file.write_text("test config")
|
||||
config_file.chmod(0o666)
|
||||
_chown_netalertx(config_file)
|
||||
volumes = _build_volume_args(paths)
|
||||
volumes = _build_volume_args_for_keys(paths, {"data"})
|
||||
result = _run_container(
|
||||
"missing-host-network",
|
||||
volumes,
|
||||
@@ -420,89 +529,155 @@ def test_missing_host_network_warns(tmp_path: pathlib.Path) -> None:
|
||||
_assert_contains(result, "not running with --network=host", result.args)
|
||||
|
||||
|
||||
def test_missing_app_conf_triggers_seed(tmp_path: pathlib.Path) -> None:
|
||||
"""Test missing configuration file seeding - simulates corrupted/missing app.conf.
|
||||
|
||||
9. Missing Configuration File: Simulates corrupted/missing app.conf.
|
||||
Container automatically regenerates default configuration on startup.
|
||||
Expected: Automatic regeneration of default configuration.
|
||||
|
||||
Check script: /entrypoint.d/15-first-run-config.sh
|
||||
Sample message: "Default configuration written to"
|
||||
"""
|
||||
base = tmp_path / "missing_app_conf_base"
|
||||
paths = _setup_fixed_mount_tree(base)
|
||||
# Ensure directories are writable and owned by netalertx user so container can operate
|
||||
for key in ["app_db", "app_config", "app_log", "app_api", "services_run", "nginx_conf"]:
|
||||
paths[key].chmod(0o777)
|
||||
_chown_netalertx(paths[key])
|
||||
(paths["app_config"] / "testfile.txt").write_text("test")
|
||||
volumes = _build_volume_args(paths)
|
||||
result = _run_container("missing-app-conf", volumes, sleep_seconds=5)
|
||||
_assert_contains(result, "Default configuration written to", result.args)
|
||||
assert result.returncode == 0
|
||||
# NOTE: The following runtime-behavior tests depended on the entrypoint continuing even when
|
||||
# /data was mounted without write permissions. With fail-fast enabled we must supply a pre-owned
|
||||
# (UID/GID 20211) data volume, which this dev container cannot provide for bind mounts. Once the
|
||||
# docker tests switch to compose-managed fixtures, restore these cases by moving them back to the
|
||||
# top level.
|
||||
|
||||
|
||||
def test_missing_app_db_triggers_seed(tmp_path: pathlib.Path) -> None:
|
||||
"""Test missing database file seeding - simulates corrupted/missing app.db.
|
||||
if False: # pragma: no cover - placeholder until writable /data fixtures exist for these flows
|
||||
def test_running_as_uid_1000_warns(tmp_path: pathlib.Path) -> None:
|
||||
# No output assertion, just returncode check
|
||||
"""Test running as wrong user - simulates using arbitrary user instead of netalertx.
|
||||
|
||||
10. Missing Database File: Simulates corrupted/missing app.db.
|
||||
Container automatically creates initial database schema on startup.
|
||||
Expected: Automatic creation of initial database schema.
|
||||
7. Running as Wrong User: Simulates running as arbitrary user (UID 1000) instead
|
||||
of netalertx user. Permission errors due to incorrect user context.
|
||||
Expected: Permission errors, guidance to use correct user.
|
||||
|
||||
Check script: /entrypoint.d/20-first-run-db.sh
|
||||
Sample message: "Building initial database schema"
|
||||
"""
|
||||
base = tmp_path / "missing_app_db_base"
|
||||
paths = _setup_fixed_mount_tree(base)
|
||||
_chown_netalertx(paths["app_db"])
|
||||
(paths["app_db"] / "testfile.txt").write_text("test")
|
||||
volumes = _build_volume_args(paths)
|
||||
result = _run_container("missing-app-db", volumes, user="20211:20211", sleep_seconds=5)
|
||||
_assert_contains(result, "Building initial database schema", result.args)
|
||||
assert result.returncode != 0
|
||||
|
||||
|
||||
def test_custom_port_without_writable_conf(tmp_path: pathlib.Path) -> None:
|
||||
"""Test custom port configuration without writable nginx config mount.
|
||||
|
||||
4. Custom Port Without Nginx Config Mount: Simulates setting custom LISTEN_ADDR/PORT
|
||||
without mounting nginx config. Container starts but uses default address.
|
||||
Expected: Container starts but uses default address, warning about missing config mount.
|
||||
|
||||
Check script: check-nginx-config.sh
|
||||
Sample messages: "⚠️ ATTENTION: Nginx configuration mount /services/config/nginx/conf.active is missing."
|
||||
"⚠️ ATTENTION: Unable to write to /services/config/nginx/conf.active/netalertx.conf."
|
||||
|
||||
TODO: Custom ports can only be assigned when we have the PORT=something, and in that case
|
||||
the /config.active partition shows up in the messages. It SHOULD exit if port is specified
|
||||
and not writeable and I'm not sure it will.
|
||||
|
||||
RESOLVED: When PORT is specified but nginx config is not writable, the container warns
|
||||
"Unable to write to /services/config/nginx/conf.active/netalertx.conf" but does NOT exit.
|
||||
It continues with startup and fails later for other reasons if any directories are not writable.
|
||||
"""
|
||||
paths = _setup_mount_tree(tmp_path, "custom_port_ro_conf")
|
||||
# Ensure other directories are writable so container gets to nginx config check
|
||||
for key in ["app_db", "app_config", "app_log", "app_api", "services_run"]:
|
||||
paths[key].chmod(0o777)
|
||||
paths["nginx_conf"].chmod(0o500)
|
||||
volumes = _build_volume_args(paths)
|
||||
try:
|
||||
Check script: /entrypoint.d/60-user-netalertx.sh
|
||||
Sample message: "⚠️ ATTENTION: NetAlertX is running as UID 1000:1000. Hardened permissions..."
|
||||
"""
|
||||
paths = _setup_mount_tree(tmp_path, "run_as_1000")
|
||||
volumes = _build_volume_args_for_keys(paths, {"data"})
|
||||
result = _run_container(
|
||||
"custom-port-ro-conf",
|
||||
"run-as-1000",
|
||||
volumes,
|
||||
user="1000:1000",
|
||||
)
|
||||
_assert_contains(result, "NetAlertX is running as UID 1000:1000", result.args)
|
||||
|
||||
def test_missing_app_conf_triggers_seed(tmp_path: pathlib.Path) -> None:
|
||||
"""Test missing configuration file seeding - simulates corrupted/missing app.conf.
|
||||
|
||||
9. Missing Configuration File: Simulates corrupted/missing app.conf.
|
||||
Container automatically regenerates default configuration on startup.
|
||||
Expected: Automatic regeneration of default configuration.
|
||||
|
||||
Check script: /entrypoint.d/15-first-run-config.sh
|
||||
Sample message: "Default configuration written to"
|
||||
"""
|
||||
base = tmp_path / "missing_app_conf_base"
|
||||
paths = _setup_fixed_mount_tree(base)
|
||||
for key in ["data", "app_db", "app_config"]:
|
||||
paths[key].chmod(0o777)
|
||||
_chown_netalertx(paths[key])
|
||||
(paths["app_config"] / "testfile.txt").write_text("test")
|
||||
volumes = _build_volume_args_for_keys(paths, {"data"})
|
||||
result = _run_container("missing-app-conf", volumes, sleep_seconds=5)
|
||||
_assert_contains(result, "Default configuration written to", result.args)
|
||||
assert result.returncode == 0
|
||||
|
||||
def test_missing_app_db_triggers_seed(tmp_path: pathlib.Path) -> None:
|
||||
"""Test missing database file seeding - simulates corrupted/missing app.db.
|
||||
|
||||
10. Missing Database File: Simulates corrupted/missing app.db.
|
||||
Container automatically creates initial database schema on startup.
|
||||
Expected: Automatic creation of initial database schema.
|
||||
|
||||
Check script: /entrypoint.d/20-first-run-db.sh
|
||||
Sample message: "Building initial database schema"
|
||||
"""
|
||||
base = tmp_path / "missing_app_db_base"
|
||||
paths = _setup_fixed_mount_tree(base)
|
||||
_chown_netalertx(paths["app_db"])
|
||||
(paths["app_db"] / "testfile.txt").write_text("test")
|
||||
volumes = _build_volume_args_for_keys(paths, {"data"})
|
||||
result = _run_container(
|
||||
"missing-app-db",
|
||||
volumes,
|
||||
env={"PORT": "24444", "LISTEN_ADDR": "127.0.0.1"},
|
||||
user="20211:20211",
|
||||
sleep_seconds=5,
|
||||
wait_for_exit=True,
|
||||
)
|
||||
_assert_contains(result, "Unable to write to", result.args)
|
||||
_assert_contains(result, "/services/config/nginx/conf.active/netalertx.conf", result.args)
|
||||
# TODO: Should this exit when PORT is specified but nginx config is not writable?
|
||||
# Currently it just warns and continues
|
||||
_assert_contains(result, "Building initial database schema", result.args)
|
||||
assert result.returncode != 0
|
||||
finally:
|
||||
paths["nginx_conf"].chmod(0o755)
|
||||
|
||||
def test_custom_port_without_writable_conf(tmp_path: pathlib.Path) -> None:
|
||||
"""Test custom port configuration without writable nginx config mount.
|
||||
|
||||
4. Custom Port Without Nginx Config Mount: Simulates setting custom LISTEN_ADDR/PORT
|
||||
without mounting nginx config. Container starts but uses default address.
|
||||
Expected: Container starts but uses default address, warning about missing config mount.
|
||||
|
||||
Check script: check-nginx-config.sh
|
||||
Sample messages: "⚠️ ATTENTION: Nginx configuration mount /tmp/nginx/active-config is missing."
|
||||
"⚠️ ATTENTION: Unable to write to /tmp/nginx/active-config/netalertx.conf."
|
||||
"""
|
||||
paths = _setup_mount_tree(tmp_path, "custom_port_ro_conf")
|
||||
for key in ["app_db", "app_config", "app_log", "app_api", "services_run"]:
|
||||
paths[key].chmod(0o777)
|
||||
paths["nginx_conf"].chmod(0o500)
|
||||
volumes = _build_volume_args_for_keys(
|
||||
paths,
|
||||
{"data", "app_log", "app_api", "services_run", "nginx_conf"},
|
||||
)
|
||||
try:
|
||||
result = _run_container(
|
||||
"custom-port-ro-conf",
|
||||
volumes,
|
||||
env={"PORT": "24444", "LISTEN_ADDR": "127.0.0.1"},
|
||||
user="20211:20211",
|
||||
sleep_seconds=5,
|
||||
)
|
||||
_assert_contains(result, "Unable to write to", result.args)
|
||||
_assert_contains(
|
||||
result, f"{VOLUME_MAP['nginx_conf']}/netalertx.conf", result.args
|
||||
)
|
||||
assert result.returncode != 0
|
||||
finally:
|
||||
paths["nginx_conf"].chmod(0o755)
|
||||
|
||||
def test_excessive_capabilities_warning(tmp_path: pathlib.Path) -> None:
|
||||
"""Test excessive capabilities detection - simulates container with extra capabilities.
|
||||
|
||||
11. Excessive Capabilities: Simulates container with capabilities beyond the required
|
||||
NET_ADMIN, NET_RAW, and NET_BIND_SERVICE.
|
||||
Expected: Warning about excessive capabilities detected.
|
||||
|
||||
Check script: 90-excessive-capabilities.sh
|
||||
Sample message: "Excessive capabilities detected"
|
||||
"""
|
||||
paths = _setup_mount_tree(tmp_path, "excessive_caps")
|
||||
volumes = _build_volume_args_for_keys(paths, {"data"})
|
||||
result = _run_container(
|
||||
"excessive-caps",
|
||||
volumes,
|
||||
extra_args=["--cap-add=SYS_ADMIN", "--cap-add=NET_BROADCAST"],
|
||||
sleep_seconds=5,
|
||||
)
|
||||
_assert_contains(result, "Excessive capabilities detected", result.args)
|
||||
_assert_contains(result, "bounding caps:", result.args)
|
||||
|
||||
def test_appliance_integrity_read_write_mode(tmp_path: pathlib.Path) -> None:
|
||||
"""Test appliance integrity - simulates running with read-write root filesystem.
|
||||
|
||||
12. Appliance Integrity: Simulates running container with read-write root filesystem
|
||||
instead of read-only mode.
|
||||
Expected: Warning about running in read-write mode instead of read-only.
|
||||
|
||||
Check script: 95-appliance-integrity.sh
|
||||
Sample message: "Container is running as read-write, not in read-only mode"
|
||||
"""
|
||||
paths = _setup_mount_tree(tmp_path, "appliance_integrity")
|
||||
volumes = _build_volume_args_for_keys(paths, {"data"})
|
||||
result = _run_container("appliance-integrity", volumes, sleep_seconds=5)
|
||||
_assert_contains(
|
||||
result, "Container is running as read-write, not in read-only mode", result.args
|
||||
)
|
||||
_assert_contains(result, "read-only: true", result.args)
|
||||
|
||||
|
||||
def test_zero_permissions_app_db_dir(tmp_path: pathlib.Path) -> None:
|
||||
"""Test zero permissions - simulates mounting directories/files with no permissions.
|
||||
|
||||
@@ -512,11 +687,16 @@ def test_zero_permissions_app_db_dir(tmp_path: pathlib.Path) -> None:
|
||||
"""
|
||||
paths = _setup_mount_tree(tmp_path, "chmod_app_db")
|
||||
_setup_zero_perm_dir(paths, "app_db")
|
||||
volumes = _build_volume_args(paths)
|
||||
volumes = _build_volume_args_for_keys(paths, {"data"})
|
||||
try:
|
||||
result = _run_container("chmod-app-db", volumes, user="20211:20211")
|
||||
result = _run_container(
|
||||
"chmod-app-db",
|
||||
volumes,
|
||||
user="20211:20211",
|
||||
wait_for_exit=True,
|
||||
)
|
||||
# Check that the mounts table shows the app_db directory as not writeable
|
||||
_assert_contains(result, "/app/db | ❌ |", result.args)
|
||||
_assert_mount_row(result, VOLUME_MAP["app_db"], write="❌")
|
||||
# Check that configuration issues are detected
|
||||
_assert_contains(result, "Configuration issues detected", result.args)
|
||||
assert result.returncode != 0
|
||||
@@ -533,11 +713,16 @@ def test_zero_permissions_app_config_dir(tmp_path: pathlib.Path) -> None:
|
||||
"""
|
||||
paths = _setup_mount_tree(tmp_path, "chmod_app_config")
|
||||
_setup_zero_perm_dir(paths, "app_config")
|
||||
volumes = _build_volume_args(paths)
|
||||
volumes = _build_volume_args_for_keys(paths, {"data"})
|
||||
try:
|
||||
result = _run_container("chmod-app-config", volumes, user="20211:20211")
|
||||
result = _run_container(
|
||||
"chmod-app-config",
|
||||
volumes,
|
||||
user="20211:20211",
|
||||
wait_for_exit=True,
|
||||
)
|
||||
# Check that the mounts table shows the app_config directory as not writeable
|
||||
_assert_contains(result, "/app/config | ❌ |", result.args)
|
||||
_assert_mount_row(result, VOLUME_MAP["app_config"], write="❌")
|
||||
# Check that configuration issues are detected
|
||||
_assert_contains(result, "Configuration issues detected", result.args)
|
||||
assert result.returncode != 0
|
||||
@@ -561,14 +746,23 @@ def test_mandatory_folders_creation(tmp_path: pathlib.Path) -> None:
|
||||
plugins_log_dir = paths["app_log"] / "plugins"
|
||||
if plugins_log_dir.exists():
|
||||
shutil.rmtree(plugins_log_dir)
|
||||
|
||||
|
||||
# Ensure other directories are writable and owned by netalertx user so container gets past mounts.py
|
||||
for key in ["app_db", "app_config", "app_log", "app_api", "services_run", "nginx_conf"]:
|
||||
for key in [
|
||||
"app_db",
|
||||
"app_config",
|
||||
"app_log",
|
||||
"app_api",
|
||||
"services_run",
|
||||
"nginx_conf",
|
||||
]:
|
||||
paths[key].chmod(0o777)
|
||||
_chown_netalertx(paths[key]) # Ensure all directories are owned by netalertx
|
||||
|
||||
volumes = _build_volume_args(paths)
|
||||
result = _run_container("mandatory-folders", volumes, user="20211:20211", sleep_seconds=5)
|
||||
|
||||
volumes = _build_volume_args_for_keys(paths, {"data"})
|
||||
result = _run_container(
|
||||
"mandatory-folders", volumes, user="20211:20211", sleep_seconds=5
|
||||
)
|
||||
_assert_contains(result, "Creating Plugins log", result.args)
|
||||
# The container will fail at writable config due to permission issues, but we just want to verify
|
||||
# that mandatory folders creation ran successfully
|
||||
@@ -588,56 +782,24 @@ def test_writable_config_validation(tmp_path: pathlib.Path) -> None:
|
||||
# Make config file read-only but keep directories writable so container gets past mounts.py
|
||||
config_file = paths["app_config"] / "app.conf"
|
||||
config_file.chmod(0o400) # Read-only for owner
|
||||
|
||||
|
||||
# Ensure directories are writable and owned by netalertx user so container gets past mounts.py
|
||||
for key in ["app_db", "app_config", "app_log", "app_api", "services_run", "nginx_conf"]:
|
||||
for key in [
|
||||
"app_db",
|
||||
"app_config",
|
||||
"app_log",
|
||||
"app_api",
|
||||
"services_run",
|
||||
"nginx_conf",
|
||||
]:
|
||||
paths[key].chmod(0o777)
|
||||
_chown_netalertx(paths[key])
|
||||
|
||||
volumes = _build_volume_args(paths)
|
||||
result = _run_container("writable-config", volumes, user="20211:20211", sleep_seconds=5.0)
|
||||
_assert_contains(result, "Read permission denied", result.args)
|
||||
|
||||
|
||||
def test_excessive_capabilities_warning(tmp_path: pathlib.Path) -> None:
|
||||
"""Test excessive capabilities detection - simulates container with extra capabilities.
|
||||
|
||||
11. Excessive Capabilities: Simulates container with capabilities beyond the required
|
||||
NET_ADMIN, NET_RAW, and NET_BIND_SERVICE.
|
||||
Expected: Warning about excessive capabilities detected.
|
||||
|
||||
Check script: 90-excessive-capabilities.sh
|
||||
Sample message: "Excessive capabilities detected"
|
||||
"""
|
||||
paths = _setup_mount_tree(tmp_path, "excessive_caps")
|
||||
volumes = _build_volume_args(paths)
|
||||
# Add excessive capabilities beyond the required ones
|
||||
volumes = _build_volume_args_for_keys(paths, {"data"})
|
||||
result = _run_container(
|
||||
"excessive-caps",
|
||||
volumes,
|
||||
extra_args=["--cap-add=SYS_ADMIN", "--cap-add=NET_BROADCAST"],
|
||||
sleep_seconds=5,
|
||||
"writable-config", volumes, user="20211:20211", sleep_seconds=5.0
|
||||
)
|
||||
_assert_contains(result, "Excessive capabilities detected", result.args)
|
||||
_assert_contains(result, "bounding caps:", result.args)
|
||||
# This warning doesn't cause failure by itself, but other issues might
|
||||
def test_appliance_integrity_read_write_mode(tmp_path: pathlib.Path) -> None:
|
||||
"""Test appliance integrity - simulates running with read-write root filesystem.
|
||||
|
||||
12. Appliance Integrity: Simulates running container with read-write root filesystem
|
||||
instead of read-only mode.
|
||||
Expected: Warning about running in read-write mode instead of read-only.
|
||||
|
||||
Check script: 95-appliance-integrity.sh
|
||||
Sample message: "Container is running as read-write, not in read-only mode"
|
||||
"""
|
||||
paths = _setup_mount_tree(tmp_path, "appliance_integrity")
|
||||
volumes = _build_volume_args(paths)
|
||||
# Container runs read-write by default (not mounting root as read-only)
|
||||
result = _run_container("appliance-integrity", volumes, sleep_seconds=5)
|
||||
_assert_contains(result, "Container is running as read-write, not in read-only mode", result.args)
|
||||
_assert_contains(result, "read-only: true", result.args)
|
||||
# This warning doesn't cause failure by itself, but other issues might
|
||||
_assert_contains(result, "Read permission denied", result.args)
|
||||
|
||||
|
||||
def test_mount_analysis_ram_disk_performance(tmp_path: pathlib.Path) -> None:
|
||||
@@ -653,17 +815,40 @@ def test_mount_analysis_ram_disk_performance(tmp_path: pathlib.Path) -> None:
|
||||
paths = _setup_mount_tree(tmp_path, "ram_disk_mount")
|
||||
# Mount persistent paths (db, config) on tmpfs to simulate RAM disk
|
||||
volumes = [
|
||||
(str(paths["app_log"]), "/app/log", False),
|
||||
(str(paths["app_api"]), "/app/api", False),
|
||||
(str(paths["services_run"]), "/services/run", False),
|
||||
(str(paths["nginx_conf"]), "/services/config/nginx/conf.active", False),
|
||||
(str(paths["app_log"]), VOLUME_MAP["app_log"], False),
|
||||
(str(paths["app_api"]), VOLUME_MAP["app_api"], False),
|
||||
(str(paths["services_run"]), VOLUME_MAP["services_run"], False),
|
||||
(str(paths["nginx_conf"]), VOLUME_MAP["nginx_conf"], False),
|
||||
]
|
||||
# Use tmpfs mounts for persistent paths with proper permissions
|
||||
extra_args = ["--tmpfs", "/app/db:uid=20211,gid=20211,mode=755", "--tmpfs", "/app/config:uid=20211,gid=20211,mode=755"]
|
||||
result = _run_container("ram-disk-mount", volumes=volumes, extra_args=extra_args, user="20211:20211")
|
||||
extra_args = [
|
||||
"--tmpfs",
|
||||
f"{VOLUME_MAP['app_db']}:uid=20211,gid=20211,mode=755",
|
||||
"--tmpfs",
|
||||
f"{VOLUME_MAP['app_config']}:uid=20211,gid=20211,mode=755",
|
||||
]
|
||||
result = _run_container(
|
||||
"ram-disk-mount", volumes=volumes, extra_args=extra_args, user="20211:20211"
|
||||
)
|
||||
# Check that mounts table shows RAM disk detection for persistent paths
|
||||
_assert_contains(result, "/app/db | ✅ | ✅ | ❌ | ➖ | ❌", result.args)
|
||||
_assert_contains(result, "/app/config | ✅ | ✅ | ❌ | ➖ | ❌", result.args)
|
||||
_assert_mount_row(
|
||||
result,
|
||||
VOLUME_MAP["app_db"],
|
||||
write="✅",
|
||||
mount="✅",
|
||||
ramdisk="❌",
|
||||
performance="➖",
|
||||
dataloss="❌",
|
||||
)
|
||||
_assert_mount_row(
|
||||
result,
|
||||
VOLUME_MAP["app_config"],
|
||||
write="✅",
|
||||
mount="✅",
|
||||
ramdisk="❌",
|
||||
performance="➖",
|
||||
dataloss="❌",
|
||||
)
|
||||
# Check that configuration issues are detected due to dataloss risk
|
||||
_assert_contains(result, "Configuration issues detected", result.args)
|
||||
assert result.returncode != 0
|
||||
@@ -682,20 +867,40 @@ def test_mount_analysis_dataloss_risk(tmp_path: pathlib.Path) -> None:
|
||||
paths = _setup_mount_tree(tmp_path, "dataloss_risk")
|
||||
# Mount persistent paths (db, config) on tmpfs to simulate non-persistent storage
|
||||
volumes = [
|
||||
(str(paths["app_log"]), "/app/log", False),
|
||||
(str(paths["app_api"]), "/app/api", False),
|
||||
(str(paths["services_run"]), "/services/run", False),
|
||||
(str(paths["nginx_conf"]), "/services/config/nginx/conf.active", False),
|
||||
(str(paths["app_log"]), VOLUME_MAP["app_log"], False),
|
||||
(str(paths["app_api"]), VOLUME_MAP["app_api"], False),
|
||||
(str(paths["services_run"]), VOLUME_MAP["services_run"], False),
|
||||
(str(paths["nginx_conf"]), VOLUME_MAP["nginx_conf"], False),
|
||||
]
|
||||
# Use tmpfs mounts for persistent paths with proper permissions
|
||||
extra_args = ["--tmpfs", "/app/db:uid=20211,gid=20211,mode=755", "--tmpfs", "/app/config:uid=20211,gid=20211,mode=755"]
|
||||
result = _run_container("dataloss-risk", volumes=volumes, extra_args=extra_args, user="20211:20211")
|
||||
extra_args = [
|
||||
"--tmpfs",
|
||||
f"{VOLUME_MAP['app_db']}:uid=20211,gid=20211,mode=755",
|
||||
"--tmpfs",
|
||||
f"{VOLUME_MAP['app_config']}:uid=20211,gid=20211,mode=755",
|
||||
]
|
||||
result = _run_container(
|
||||
"dataloss-risk", volumes=volumes, extra_args=extra_args, user="20211:20211"
|
||||
)
|
||||
# Check that mounts table shows dataloss risk for persistent paths on tmpfs
|
||||
_assert_contains(result, "/app/db | ✅ | ✅ | ❌ | ➖ | ❌", result.args)
|
||||
_assert_contains(result, "/app/config | ✅ | ✅ | ❌ | ➖ | ❌", result.args)
|
||||
_assert_mount_row(
|
||||
result,
|
||||
VOLUME_MAP["app_db"],
|
||||
write="✅",
|
||||
mount="✅",
|
||||
ramdisk="❌",
|
||||
performance="➖",
|
||||
dataloss="❌",
|
||||
)
|
||||
_assert_mount_row(
|
||||
result,
|
||||
VOLUME_MAP["app_config"],
|
||||
write="✅",
|
||||
mount="✅",
|
||||
ramdisk="❌",
|
||||
performance="➖",
|
||||
dataloss="❌",
|
||||
)
|
||||
# Check that configuration issues are detected due to dataloss risk
|
||||
_assert_contains(result, "Configuration issues detected", result.args)
|
||||
assert result.returncode != 0
|
||||
|
||||
|
||||
|
||||
|
||||
Reference in New Issue
Block a user