adjust tests and allow other users

This commit is contained in:
Adam Outler
2025-12-21 00:42:35 +00:00
parent 0889741864
commit f9b724931f
11 changed files with 614 additions and 163 deletions

View File

@@ -221,15 +221,17 @@ RUN addgroup -g ${READONLY_GID} "${READ_ONLY_GROUP}" && \
RUN chown -R ${READ_ONLY_USER}:${READ_ONLY_GROUP} ${READ_ONLY_FOLDERS} && \ RUN chown -R ${READ_ONLY_USER}:${READ_ONLY_GROUP} ${READ_ONLY_FOLDERS} && \
chmod -R 004 ${READ_ONLY_FOLDERS} && \ chmod -R 004 ${READ_ONLY_FOLDERS} && \
find ${READ_ONLY_FOLDERS} -type d -exec chmod 005 {} + && \ find ${READ_ONLY_FOLDERS} -type d -exec chmod 005 {} + && \
install -d -o ${NETALERTX_USER} -g ${NETALERTX_GROUP} -m 700 ${READ_WRITE_FOLDERS} && \ install -d -o ${NETALERTX_USER} -g ${NETALERTX_GROUP} -m 0777 ${READ_WRITE_FOLDERS} && \
chown -R ${NETALERTX_USER}:${NETALERTX_GROUP} ${READ_WRITE_FOLDERS} && \
chmod -R 600 ${READ_WRITE_FOLDERS} && \
find ${READ_WRITE_FOLDERS} -type d -exec chmod 700 {} + && \
chown ${READ_ONLY_USER}:${READ_ONLY_GROUP} /entrypoint.sh /opt /opt/venv && \ chown ${READ_ONLY_USER}:${READ_ONLY_GROUP} /entrypoint.sh /opt /opt/venv && \
chmod 005 /entrypoint.sh ${SYSTEM_SERVICES}/*.sh ${SYSTEM_SERVICES_SCRIPTS}/* ${ENTRYPOINT_CHECKS}/* /app /opt /opt/venv && \ chmod 005 /entrypoint.sh ${SYSTEM_SERVICES}/*.sh ${SYSTEM_SERVICES_SCRIPTS}/* ${ENTRYPOINT_CHECKS}/* /app /opt /opt/venv && \
for dir in ${READ_WRITE_FOLDERS}; do \ # Do not bake first-run artifacts into the image. If present, Docker volume copy-up
install -d -o ${NETALERTX_USER} -g ${NETALERTX_GROUP} -m 700 "$dir"; \ # will persist restrictive ownership/modes into fresh named volumes, breaking
done && \ # arbitrary non-root UID/GID runs.
rm -f \
"${NETALERTX_CONFIG}/app.conf" \
"${NETALERTX_DB_FILE}" \
"${NETALERTX_DB_FILE}-shm" \
"${NETALERTX_DB_FILE}-wal" || true && \
apk del apk-tools && \ apk del apk-tools && \
rm -Rf /var /etc/sudoers.d/* /etc/shadow /etc/gshadow /etc/sudoers \ rm -Rf /var /etc/sudoers.d/* /etc/shadow /etc/gshadow /etc/sudoers \
/lib/apk /lib/firmware /lib/modules-load.d /lib/sysctl.d /mnt /home/ /root \ /lib/apk /lib/firmware /lib/modules-load.d /lib/sysctl.d /mnt /home/ /root \

View File

@@ -218,15 +218,17 @@ RUN addgroup -g ${READONLY_GID} "${READ_ONLY_GROUP}" && \
RUN chown -R ${READ_ONLY_USER}:${READ_ONLY_GROUP} ${READ_ONLY_FOLDERS} && \ RUN chown -R ${READ_ONLY_USER}:${READ_ONLY_GROUP} ${READ_ONLY_FOLDERS} && \
chmod -R 004 ${READ_ONLY_FOLDERS} && \ chmod -R 004 ${READ_ONLY_FOLDERS} && \
find ${READ_ONLY_FOLDERS} -type d -exec chmod 005 {} + && \ find ${READ_ONLY_FOLDERS} -type d -exec chmod 005 {} + && \
install -d -o ${NETALERTX_USER} -g ${NETALERTX_GROUP} -m 700 ${READ_WRITE_FOLDERS} && \ install -d -o ${NETALERTX_USER} -g ${NETALERTX_GROUP} -m 0777 ${READ_WRITE_FOLDERS} && \
chown -R ${NETALERTX_USER}:${NETALERTX_GROUP} ${READ_WRITE_FOLDERS} && \
chmod -R 600 ${READ_WRITE_FOLDERS} && \
find ${READ_WRITE_FOLDERS} -type d -exec chmod 700 {} + && \
chown ${READ_ONLY_USER}:${READ_ONLY_GROUP} /entrypoint.sh /opt /opt/venv && \ chown ${READ_ONLY_USER}:${READ_ONLY_GROUP} /entrypoint.sh /opt /opt/venv && \
chmod 005 /entrypoint.sh ${SYSTEM_SERVICES}/*.sh ${SYSTEM_SERVICES_SCRIPTS}/* ${ENTRYPOINT_CHECKS}/* /app /opt /opt/venv && \ chmod 005 /entrypoint.sh ${SYSTEM_SERVICES}/*.sh ${SYSTEM_SERVICES_SCRIPTS}/* ${ENTRYPOINT_CHECKS}/* /app /opt /opt/venv && \
for dir in ${READ_WRITE_FOLDERS}; do \ # Do not bake first-run artifacts into the image. If present, Docker volume copy-up
install -d -o ${NETALERTX_USER} -g ${NETALERTX_GROUP} -m 700 "$dir"; \ # will persist restrictive ownership/modes into fresh named volumes, breaking
done && \ # arbitrary non-root UID/GID runs.
rm -f \
"${NETALERTX_CONFIG}/app.conf" \
"${NETALERTX_DB_FILE}" \
"${NETALERTX_DB_FILE}-shm" \
"${NETALERTX_DB_FILE}-wal" || true && \
apk del apk-tools && \ apk del apk-tools && \
rm -Rf /var /etc/sudoers.d/* /etc/shadow /etc/gshadow /etc/sudoers \ rm -Rf /var /etc/sudoers.d/* /etc/shadow /etc/gshadow /etc/sudoers \
/lib/apk /lib/firmware /lib/modules-load.d /lib/sysctl.d /mnt /home/ /root \ /lib/apk /lib/firmware /lib/modules-load.d /lib/sysctl.d /mnt /home/ /root \

View File

@@ -37,7 +37,7 @@ def read_config_file():
configFile = read_config_file() configFile = read_config_file()
timeZoneSetting = configFile['TIMEZONE'] timeZoneSetting = configFile.get('TIMEZONE', default_tz)
if timeZoneSetting not in all_timezones: if timeZoneSetting not in all_timezones:
timeZoneSetting = default_tz timeZoneSetting = default_tz
timeZone = pytz.timezone(timeZoneSetting) timeZone = pytz.timezone(timeZoneSetting)

View File

@@ -1,5 +1,20 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
"""
Mount Diagnostic Tool
Analyzes container mount points for permission issues, persistence risks, and performance problems.
TODO: Future Enhancements (Roadmap Step 3 & 4)
1. Text-based Output: Replace emoji status indicators (✅, ❌) with plain text (e.g., [OK], [FAIL])
to ensure compatibility with all terminal types and logging systems.
2. OverlayFS/Copy-up Support: Improve detection logic for filesystems like Synology's OverlayFS
where files may appear writable but fail on specific operations (locking, mmap).
3. Root-to-User Context: Ensure this tool remains accurate when the container starts as root
to fix permissions and then drops privileges to the 'netalertx' user. The check should
reflect the *effective* permissions of the application user.
"""
import os import os
import sys import sys
from dataclasses import dataclass from dataclasses import dataclass
@@ -80,7 +95,21 @@ def _resolve_writeable_state(target_path: str) -> bool:
seen.add(current) seen.add(current)
if os.path.exists(current): if os.path.exists(current):
return os.access(current, os.W_OK) if not os.access(current, os.W_OK):
return False
# OverlayFS/Copy-up check: Try to actually write a file to verify
if os.path.isdir(current):
test_file = os.path.join(current, f".netalertx_write_test_{os.getpid()}")
try:
with open(test_file, "w") as f:
f.write("test")
os.remove(test_file)
return True
except OSError:
return False
return True
parent_dir = os.path.dirname(current) parent_dir = os.path.dirname(current)
if not parent_dir or parent_dir == current: if not parent_dir or parent_dir == current:

View File

@@ -7,7 +7,7 @@ if [ ! -f "${NETALERTX_CONFIG}/app.conf" ]; then
>&2 echo "ERROR: Failed to create config directory ${NETALERTX_CONFIG}" >&2 echo "ERROR: Failed to create config directory ${NETALERTX_CONFIG}"
exit 1 exit 1
} }
install -m 600 -o ${NETALERTX_USER} -g ${NETALERTX_GROUP} /app/back/app.conf "${NETALERTX_CONFIG}/app.conf" || { install -m 600 /app/back/app.conf "${NETALERTX_CONFIG}/app.conf" || {
>&2 echo "ERROR: Failed to deploy default config to ${NETALERTX_CONFIG}/app.conf" >&2 echo "ERROR: Failed to deploy default config to ${NETALERTX_CONFIG}/app.conf"
exit 2 exit 2
} }

View File

@@ -13,9 +13,7 @@ mkdir -p "$(dirname "$NETALERTX_CONFIG")" || {
rm -f "$OVERRIDE_FILE" rm -f "$OVERRIDE_FILE"
# Check if APP_CONF_OVERRIDE is set # Check if APP_CONF_OVERRIDE is set
if [ -z "$APP_CONF_OVERRIDE" ]; then if [ -n "$APP_CONF_OVERRIDE" ]; then
>&2 echo "APP_CONF_OVERRIDE is not set. Skipping override config file creation."
else
# Save the APP_CONF_OVERRIDE env variable as a JSON file # Save the APP_CONF_OVERRIDE env variable as a JSON file
echo "$APP_CONF_OVERRIDE" > "$OVERRIDE_FILE" || { echo "$APP_CONF_OVERRIDE" > "$OVERRIDE_FILE" || {
>&2 echo "ERROR: Failed to write override config to $OVERRIDE_FILE" >&2 echo "ERROR: Failed to write override config to $OVERRIDE_FILE"

View File

@@ -50,8 +50,7 @@ fi
RED='\033[1;31m' RED='\033[1;31m'
GREY='\033[90m' GREY='\033[90m'
RESET='\033[0m' RESET='\033[0m'
printf "%s" "${RED}" NAX='
echo '
_ _ _ ___ _ _ __ __ _ _ _ ___ _ _ __ __
| \ | | | | / _ \| | | | \ \ / / | \ | | | | / _ \| | | | \ \ / /
| \| | ___| |_/ /_\ \ | ___ _ __| |_ \ V / | \| | ___| |_/ /_\ \ | ___ _ __| |_ \ V /
@@ -60,13 +59,12 @@ echo '
\_| \_/\___|\__\_| |_/_|\___|_| \__\/ \/ \_| \_/\___|\__\_| |_/_|\___|_| \__\/ \/
' '
printf "%s" "${RESET}" printf "%b%s%b" "${RED}" "${NAX}" "${RESET}"
echo ' Network intruder and presence detector. echo ' Network intruder and presence detector.
https://netalertx.com https://netalertx.com
' '
set -u set -u
FAILED_STATUS="" FAILED_STATUS=""
echo "Startup pre-checks" echo "Startup pre-checks"
for script in "${ENTRYPOINT_CHECKS}"/*; do for script in "${ENTRYPOINT_CHECKS}"/*; do
@@ -123,7 +121,6 @@ fi
# Set APP_CONF_OVERRIDE based on GRAPHQL_PORT if not already set # Set APP_CONF_OVERRIDE based on GRAPHQL_PORT if not already set
if [ -n "${GRAPHQL_PORT:-}" ] && [ -z "${APP_CONF_OVERRIDE:-}" ]; then if [ -n "${GRAPHQL_PORT:-}" ] && [ -z "${APP_CONF_OVERRIDE:-}" ]; then
export APP_CONF_OVERRIDE='{"GRAPHQL_PORT":"'"${GRAPHQL_PORT}"'"}' export APP_CONF_OVERRIDE='{"GRAPHQL_PORT":"'"${GRAPHQL_PORT}"'"}'
echo "Setting APP_CONF_OVERRIDE to $APP_CONF_OVERRIDE"
fi fi
@@ -283,15 +280,6 @@ add_service "${SYSTEM_SERVICES}/start-php-fpm.sh" "php-fpm83"
add_service "${SYSTEM_SERVICES}/start-nginx.sh" "nginx" add_service "${SYSTEM_SERVICES}/start-nginx.sh" "nginx"
add_service "${SYSTEM_SERVICES}/start-backend.sh" "python3" add_service "${SYSTEM_SERVICES}/start-backend.sh" "python3"
################################################################################
# Development Mode Debug Switch
################################################################################
# If NETALERTX_DEBUG=1, skip automatic service restart on failure
# Useful for devcontainer debugging where individual services need to be debugged
if [ "${NETALERTX_DEBUG:-0}" -eq 1 ]; then
echo "NETALERTX_DEBUG is set to 1, will not shut down other services if one fails."
fi
################################################################################ ################################################################################
# Service Monitoring Loop (Production Mode) # Service Monitoring Loop (Production Mode)
################################################################################ ################################################################################

View File

@@ -0,0 +1,66 @@
#!/bin/sh
# 0-storage-permission.sh: Fix permissions if running as root.
#
# This script checks if running as root and fixes ownership and permissions
# for read-write paths to ensure proper operation.
# --- Color Codes ---
MAGENTA=$(printf '\033[1;35m')
RESET=$(printf '\033[0m')
# --- Main Logic ---
# Define paths that need read-write access
READ_WRITE_PATHS="
${NETALERTX_DATA}
${NETALERTX_DB}
${NETALERTX_API}
${NETALERTX_LOG}
${SYSTEM_SERVICES_RUN}
${NETALERTX_CONFIG}
${NETALERTX_CONFIG_FILE}
${NETALERTX_DB_FILE}
"
TARGET_USER="${NETALERTX_USER:-netalertx}"
# If running as root, fix permissions first
if [ "$(id -u)" -eq 0 ]; then
>&2 printf "%s" "${MAGENTA}"
>&2 cat <<'EOF'
══════════════════════════════════════════════════════════════════════════════
🚨 CRITICAL SECURITY ALERT: NetAlertX is running as ROOT (UID 0)! 🚨
This configuration bypasses all built-in security hardening measures.
You've granted a network monitoring application unrestricted access to
your host system. A successful compromise here could jeopardize your
entire infrastructure.
IMMEDIATE ACTION REQUIRED: Switch to the dedicated 'netalertx' user:
* Remove any 'user:' directive specifying UID 0 from docker-compose.yml or
* switch to the default USER in the image (20211:20211)
IMPORTANT: This corrective mode automatically adjusts ownership of
/data/db and /data/config directories to the netalertx user, ensuring
proper operation in subsequent runs.
Remember: Never operate security-critical tools as root unless you're
actively trying to get pwned.
https://github.com/jokob-sk/NetAlertX/blob/main/docs/docker-troubleshooting/running-as-root.md
══════════════════════════════════════════════════════════════════════════════
EOF
>&2 printf "%s" "${RESET}"
# Set ownership and permissions for each read-write path individually
printf '%s\n' "${READ_WRITE_PATHS}" | while IFS= read -r path; do
[ -n "${path}" ] || continue
echo "DEBUG: Processing $path"
chown -v -R "${TARGET_USER}" "${path}" || echo "DEBUG: chown failed for $path"
find "${path}" -type d -exec chmod -v u+rwx {} \;
find "${path}" -type f -exec chmod -v u+rw {} \;
done
echo Permissions fixed for read-write paths. Please restart the container as user ${TARGET_USER}.
sleep infinity & wait $!
fi

View File

@@ -45,40 +45,91 @@ def _unique_label(prefix: str) -> str:
return f"{prefix.upper()}__NETALERTX_INTENTIONAL__{uuid.uuid4().hex[:6]}" return f"{prefix.upper()}__NETALERTX_INTENTIONAL__{uuid.uuid4().hex[:6]}"
def test_nonroot_custom_uid_logs_note( def _repo_root() -> pathlib.Path:
tmp_path: pathlib.Path, env = os.environ.get("NETALERTX_REPO_ROOT")
uid_gid: tuple[int, int], if env:
) -> None: return pathlib.Path(env)
cur = pathlib.Path(__file__).resolve()
for parent in cur.parents:
if any(
[
(parent / "pyproject.toml").exists(),
(parent / ".git").exists(),
(parent / "back").exists() and (parent / "db").exists(),
]
):
return parent
return cur.parents[2]
def _docker_visible_tmp_root() -> pathlib.Path:
"""Return a docker-daemon-visible scratch directory for bind mounts.
Pytest's default tmp_path lives under /tmp inside the devcontainer, which may
not be visible to the Docker daemon that evaluates bind mount source paths.
We use /tmp/pytest-docker-mounts instead of the repo.
"""
root = pathlib.Path("/tmp/pytest-docker-mounts")
root.mkdir(parents=True, exist_ok=True)
try:
root.chmod(0o777)
except PermissionError:
# Best-effort; the directory only needs to be writable by the current user.
pass
return root
def _docker_visible_path(path: pathlib.Path) -> pathlib.Path:
"""Map a path into `_docker_visible_tmp_root()` when it lives under /tmp."""
try:
if str(path).startswith("/tmp/"):
return _docker_visible_tmp_root() / path.name
except Exception:
pass
return path
def _setup_mount_tree( def _setup_mount_tree(
tmp_path: pathlib.Path, tmp_path: pathlib.Path,
prefix: str, prefix: str,
paths = _setup_mount_tree(tmp_path, f"note_uid_{uid}") *,
for key in ["data", "app_db", "app_config"]: seed_config: bool = True,
paths[key].chmod(0o777) seed_db: bool = True,
volumes = _build_volume_args_for_keys(paths, {"data"}) ) -> dict[str, pathlib.Path]:
result = _run_container( """Create a compose-like host tree with permissive perms for arbitrary UID/GID."""
f"note-uid-{uid}",
volumes, label = _unique_label(prefix)
user=f"{uid}:{gid}", base = _docker_visible_tmp_root() / f"{label}_MOUNT_ROOT"
sleep_seconds=5, base.mkdir()
) base.chmod(0o777)
_assert_contains(result, f"NetAlertX note: current UID {uid} GID {gid}", result.args)
assert "expected UID" in result.output paths: dict[str, pathlib.Path] = {}
assert result.returncode == 0
data_root = base / f"{label}_DATA_INTENTIONAL_NETALERTX_TEST"
data_root.mkdir(parents=True, exist_ok=True)
data_root.chmod(0o777)
paths["data"] = data_root
db_dir = data_root / "db"
db_dir.mkdir(exist_ok=True)
db_dir.chmod(0o777)
paths["app_db"] = db_dir
paths["data_db"] = db_dir
config_dir = data_root / "config"
config_dir.mkdir(exist_ok=True)
config_dir.chmod(0o777)
paths["app_config"] = config_dir paths["app_config"] = config_dir
paths["data_config"] = config_dir paths["data_config"] = config_dir
# Optional /tmp mounts that certain tests intentionally bind
for key in OPTIONAL_TMP_KEYS: for key in OPTIONAL_TMP_KEYS:
folder_name = f"{label}_{key.upper()}_INTENTIONAL_NETALERTX_TEST" folder_name = f"{label}_{key.upper()}_INTENTIONAL_NETALERTX_TEST"
host_path = base / folder_name host_path = base / folder_name
host_path.mkdir(parents=True, exist_ok=True) host_path.mkdir(parents=True, exist_ok=True)
try: host_path.chmod(0o777)
host_path.chmod(0o777)
except PermissionError:
pass
paths[key] = host_path paths[key] = host_path
# Provide backwards-compatible aliases where helpful
if key == "app_log": if key == "app_log":
paths["log"] = host_path paths["log"] = host_path
elif key == "app_api": elif key == "app_api":
@@ -86,54 +137,45 @@ def _setup_mount_tree(
elif key == "nginx_conf": elif key == "nginx_conf":
paths["nginx_active"] = host_path paths["nginx_active"] = host_path
# Determine repo root from env or by walking up from this file repo_root = _repo_root()
repo_root_env = os.environ.get("NETALERTX_REPO_ROOT")
if repo_root_env:
repo_root = pathlib.Path(repo_root_env)
else:
repo_root = None
cur = pathlib.Path(__file__).resolve()
for parent in cur.parents:
if any([
(parent / "pyproject.toml").exists(),
(parent / ".git").exists(),
(parent / "back").exists() and (parent / "db").exists()
]):
repo_root = parent
break
if repo_root is None:
repo_root = cur.parents[2]
if seed_config: if seed_config:
config_file = paths["app_config"] / "app.conf"
config_src = repo_root / "back" / "app.conf" config_src = repo_root / "back" / "app.conf"
if not config_src.exists(): config_dst = paths["app_config"] / "app.conf"
print( if config_src.exists():
f"[WARN] Seed file not found: {config_src}. Set NETALERTX_REPO_ROOT or run from repo root. Skipping copy." shutil.copyfile(config_src, config_dst)
) config_dst.chmod(0o666)
else:
shutil.copyfile(config_src, config_file)
config_file.chmod(0o600)
if seed_db: if seed_db:
db_file = paths["app_db"] / "app.db"
db_src = repo_root / "db" / "app.db" db_src = repo_root / "db" / "app.db"
if not db_src.exists(): db_dst = paths["app_db"] / "app.db"
print( if db_src.exists():
f"[WARN] Seed file not found: {db_src}. Set NETALERTX_REPO_ROOT or run from repo root. Skipping copy." shutil.copyfile(db_src, db_dst)
) db_dst.chmod(0o666)
else:
shutil.copyfile(db_src, db_file)
db_file.chmod(0o600)
_chown_netalertx(base) # Ensure every mount point is world-writable so arbitrary UID/GID can write
for p in paths.values():
if p.is_dir():
p.chmod(0o777)
for child in p.iterdir():
if child.is_dir():
child.chmod(0o777)
else:
child.chmod(0o666)
else:
p.chmod(0o666)
return paths return paths
def _setup_fixed_mount_tree(base: pathlib.Path) -> dict[str, pathlib.Path]: def _setup_fixed_mount_tree(base: pathlib.Path) -> dict[str, pathlib.Path]:
base = _docker_visible_path(base)
if base.exists(): if base.exists():
shutil.rmtree(base) shutil.rmtree(base)
base.mkdir(parents=True) base.mkdir(parents=True)
try:
base.chmod(0o777)
except PermissionError:
pass
paths: dict[str, pathlib.Path] = {} paths: dict[str, pathlib.Path] = {}
@@ -191,6 +233,41 @@ def _build_volume_args_for_keys(
return bindings return bindings
def _chown_path(host_path: pathlib.Path, uid: int, gid: int) -> None:
"""Chown a host path using the test image with host user namespace."""
if not host_path.exists():
raise RuntimeError(f"Cannot chown missing path {host_path}")
cmd = [
"docker",
"run",
"--rm",
"--userns",
"host",
"--user",
"0:0",
"--entrypoint",
"/bin/chown",
"-v",
f"{host_path}:/mnt",
IMAGE,
"-R",
f"{uid}:{gid}",
"/mnt",
]
try:
subprocess.run(
cmd,
check=True,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
)
except subprocess.CalledProcessError as exc:
raise RuntimeError(f"Failed to chown {host_path} to {uid}:{gid}") from exc
def _chown_root(host_path: pathlib.Path) -> None: def _chown_root(host_path: pathlib.Path) -> None:
_chown_path(host_path, 0, 0) _chown_path(host_path, 0, 0)
@@ -199,6 +276,161 @@ def _chown_netalertx(host_path: pathlib.Path) -> None:
_chown_path(host_path, 20211, 20211) _chown_path(host_path, 20211, 20211)
def _docker_volume_rm(volume_name: str) -> None:
subprocess.run(
["docker", "volume", "rm", "-f", volume_name],
check=False,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
)
def _docker_volume_create(volume_name: str) -> None:
subprocess.run(
["docker", "volume", "create", volume_name],
check=True,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
)
def _fresh_named_volume(prefix: str) -> str:
name = _unique_label(prefix).lower().replace("__", "-")
# Ensure we're exercising Docker's fresh-volume copy-up behavior.
_docker_volume_rm(name)
return name
def _ensure_volume_copy_up(volume_name: str) -> None:
"""Ensure a named volume is initialized from the NetAlertX image.
If we write into the volume first (e.g., with an Alpine helper container),
Docker will not perform the image-to-volume copy-up and the volume root may
stay root:root 0755, breaking arbitrary UID/GID runs.
"""
subprocess.run(
[
"docker",
"run",
"--rm",
"--userns",
"host",
"-v",
f"{volume_name}:/data",
"--entrypoint",
"/bin/sh",
IMAGE,
"-c",
"true",
],
check=True,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
)
def _seed_volume_text_file(
volume_name: str,
container_path: str,
content: str,
*,
chmod_mode: str = "644",
user: str | None = None,
) -> None:
"""Create/overwrite a text file inside a named volume.
Uses a tiny helper container so we don't rely on bind mounts (which are
resolved on the Docker daemon host).
"""
cmd = [
"docker",
"run",
"--rm",
"--userns",
"host",
]
if user:
cmd.extend(["--user", user])
cmd.extend(
[
"-v",
f"{volume_name}:/data",
"alpine:3.22",
"sh",
"-c",
f"set -eu; mkdir -p \"$(dirname '{container_path}')\"; cat > '{container_path}'; chmod {chmod_mode} '{container_path}'",
]
)
subprocess.run(
cmd,
input=content,
text=True,
check=True,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
)
def _volume_has_file(volume_name: str, container_path: str) -> bool:
return (
subprocess.run(
[
"docker",
"run",
"--rm",
"--userns",
"host",
"-v",
f"{volume_name}:/data",
"alpine:3.22",
"sh",
"-c",
f"test -f '{container_path}'",
],
check=False,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
).returncode
== 0
)
@pytest.mark.parametrize(
"uid_gid",
[
(1001, 1001),
(1502, 1502),
],
)
def test_nonroot_custom_uid_logs_note(
tmp_path: pathlib.Path,
uid_gid: tuple[int, int],
) -> None:
"""Ensure arbitrary non-root UID/GID can run with compose-like mounts."""
uid, gid = uid_gid
vol = _fresh_named_volume(f"note_uid_{uid}")
try:
# Fresh named volume at /data: matches default docker-compose UX.
result = _run_container(
f"note-uid-{uid}",
volumes=None,
volume_specs=[f"{vol}:/data"],
user=f"{uid}:{gid}",
sleep_seconds=5,
)
finally:
_docker_volume_rm(vol)
_assert_contains(result, f"NetAlertX note: current UID {uid} GID {gid}", result.args)
assert "expected UID" in result.output
assert result.returncode == 0
def _run_container( def _run_container(
label: str, label: str,
volumes: list[tuple[str, str, bool]] | None = None, volumes: list[tuple[str, str, bool]] | None = None,
@@ -212,9 +444,23 @@ def _run_container(
sleep_seconds: float = GRACE_SECONDS, sleep_seconds: float = GRACE_SECONDS,
wait_for_exit: bool = False, wait_for_exit: bool = False,
pre_entrypoint: str | None = None, pre_entrypoint: str | None = None,
userns_mode: str | None = "host",
image: str = IMAGE,
) -> subprocess.CompletedProcess[str]: ) -> subprocess.CompletedProcess[str]:
name = f"netalertx-test-{label}-{uuid.uuid4().hex[:8]}".lower() name = f"netalertx-test-{label}-{uuid.uuid4().hex[:8]}".lower()
tmp_uid = 20211
tmp_gid = 20211
if user:
try:
u_str, g_str = user.split(":", 1)
tmp_uid = int(u_str)
tmp_gid = int(g_str)
except Exception:
# Keep defaults if user format is unexpected.
tmp_uid = 20211
tmp_gid = 20211
# Clean up any existing container with this name # Clean up any existing container with this name
subprocess.run( subprocess.run(
["docker", "rm", "-f", name], ["docker", "rm", "-f", name],
@@ -225,21 +471,35 @@ def _run_container(
cmd: list[str] = ["docker", "run", "--rm", "--name", name] cmd: list[str] = ["docker", "run", "--rm", "--name", name]
# Avoid flakiness in host-network runs when the host already uses the
# default NetAlertX ports. Tests can still override explicitly via `env`.
effective_env: dict[str, str] = dict(env or {})
if network_mode == "host":
if "PORT" not in effective_env:
effective_env["PORT"] = str(30000 + (int(uuid.uuid4().hex[:4], 16) % 20000))
if "GRAPHQL_PORT" not in effective_env:
gql = 30000 + (int(uuid.uuid4().hex[4:8], 16) % 20000)
if str(gql) == effective_env["PORT"]:
gql = 30000 + ((gql + 1) % 20000)
effective_env["GRAPHQL_PORT"] = str(gql)
if network_mode: if network_mode:
cmd.extend(["--network", network_mode]) cmd.extend(["--network", network_mode])
cmd.extend(["--userns", "host"]) if userns_mode:
# Add default ramdisk to /tmp with permissions 777 cmd.extend(["--userns", userns_mode])
cmd.extend(["--tmpfs", "/tmp:mode=777"]) # Match docker-compose UX: /tmp is tmpfs with 1700 and owned by the runtime UID/GID.
cmd.extend(["--tmpfs", f"/tmp:mode=1700,uid={tmp_uid},gid={tmp_gid}"])
if user: if user:
cmd.extend(["--user", user]) cmd.extend(["--user", user])
if drop_caps: if drop_caps is not None:
for cap in drop_caps: for cap in drop_caps:
cmd.extend(["--cap-drop", cap]) cmd.extend(["--cap-drop", cap])
else: else:
cmd.extend(["--cap-drop", "ALL"])
for cap in DEFAULT_CAPS: for cap in DEFAULT_CAPS:
cmd.extend(["--cap-add", cap]) cmd.extend(["--cap-add", cap])
if env: if effective_env:
for key, value in env.items(): for key, value in effective_env.items():
cmd.extend(["-e", f"{key}={value}"]) cmd.extend(["-e", f"{key}={value}"])
if extra_args: if extra_args:
cmd.extend(extra_args) cmd.extend(extra_args)
@@ -280,7 +540,7 @@ def _run_container(
"if kill -0 $pid >/dev/null 2>&1; then kill -TERM $pid >/dev/null 2>&1 || true; fi; ", "if kill -0 $pid >/dev/null 2>&1; then kill -TERM $pid >/dev/null 2>&1 || true; fi; ",
"wait $pid; code=$?; if [ $code -eq 143 ]; then exit 0; fi; exit $code" "wait $pid; code=$?; if [ $code -eq 143 ]; then exit 0; fi; exit $code"
]) ])
cmd.extend(["--entrypoint", "/bin/sh", IMAGE, "-c", script]) cmd.extend(["--entrypoint", "/bin/sh", image, "-c", script])
# Print the full Docker command for debugging # Print the full Docker command for debugging
print("\n--- DOCKER CMD ---\n", " ".join(cmd), "\n--- END CMD ---\n") print("\n--- DOCKER CMD ---\n", " ".join(cmd), "\n--- END CMD ---\n")
@@ -456,14 +716,17 @@ def test_missing_host_network_warns(tmp_path: pathlib.Path) -> None:
Check script: check-network-mode.sh Check script: check-network-mode.sh
Sample message: "⚠️ ATTENTION: NetAlertX is not running with --network=host. Bridge networking..." Sample message: "⚠️ ATTENTION: NetAlertX is not running with --network=host. Bridge networking..."
""" """
paths = _setup_mount_tree(tmp_path, "missing_host_network") vol = _fresh_named_volume("missing_host_network")
volumes = _build_volume_args_for_keys(paths, {"data"}) try:
result = _run_container( result = _run_container(
"missing-host-network", "missing-host-network",
volumes, volumes=None,
network_mode=None, volume_specs=[f"{vol}:/data"],
sleep_seconds=5, network_mode=None,
) sleep_seconds=15,
)
finally:
_docker_volume_rm(vol)
_assert_contains(result, "not running with --network=host", result.args) _assert_contains(result, "not running with --network=host", result.args)
@@ -485,14 +748,16 @@ def test_missing_app_conf_triggers_seed(tmp_path: pathlib.Path) -> None:
Check script: /entrypoint.d/15-first-run-config.sh Check script: /entrypoint.d/15-first-run-config.sh
Sample message: "Default configuration written to" Sample message: "Default configuration written to"
""" """
base = tmp_path / "missing_app_conf_base" vol = _fresh_named_volume("missing_app_conf")
paths = _setup_fixed_mount_tree(base) try:
for key in ["data", "app_db", "app_config"]: result = _run_container(
paths[key].chmod(0o777) "missing-app-conf",
_chown_netalertx(paths[key]) volumes=None,
(paths["app_config"] / "testfile.txt").write_text("test") volume_specs=[f"{vol}:/data"],
volumes = _build_volume_args_for_keys(paths, {"data"}) sleep_seconds=15,
result = _run_container("missing-app-conf", volumes, sleep_seconds=5) )
finally:
_docker_volume_rm(vol)
_assert_contains(result, "Default configuration written to", result.args) _assert_contains(result, "Default configuration written to", result.args)
assert result.returncode == 0 assert result.returncode == 0
@@ -507,20 +772,28 @@ def test_missing_app_db_triggers_seed(tmp_path: pathlib.Path) -> None:
Check script: /entrypoint.d/20-first-run-db.sh Check script: /entrypoint.d/20-first-run-db.sh
Sample message: "Building initial database schema" Sample message: "Building initial database schema"
""" """
paths = _setup_mount_tree(tmp_path, "missing_app_db", seed_db=False) vol = _fresh_named_volume("missing_app_db")
config_file = paths["app_config"] / "app.conf" try:
config_file.write_text("TIMEZONE='UTC'\n") _ensure_volume_copy_up(vol)
config_file.chmod(0o600) # Seed only app.conf; leave app.db missing to trigger first-run DB schema creation.
volumes = _build_volume_args_for_keys(paths, {"data"}) _seed_volume_text_file(
result = _run_container( vol,
"missing-app-db", "/data/config/app.conf",
volumes, "TIMEZONE='UTC'\n",
user="20211:20211", chmod_mode="644",
sleep_seconds=5, user="20211:20211",
wait_for_exit=True, )
) result = _run_container(
assert (paths["app_db"] / "app.db").exists() "missing-app-db",
assert result.returncode != 0 volumes=None,
volume_specs=[f"{vol}:/data"],
user="20211:20211",
sleep_seconds=20,
)
assert _volume_has_file(vol, "/data/db/app.db")
finally:
_docker_volume_rm(vol)
assert result.returncode == 0
def test_custom_port_without_writable_conf(tmp_path: pathlib.Path) -> None: def test_custom_port_without_writable_conf(tmp_path: pathlib.Path) -> None:
@@ -534,26 +807,23 @@ def test_custom_port_without_writable_conf(tmp_path: pathlib.Path) -> None:
Sample messages: "⚠️ ATTENTION: Nginx configuration mount /tmp/nginx/active-config is missing." Sample messages: "⚠️ ATTENTION: Nginx configuration mount /tmp/nginx/active-config is missing."
"⚠️ ATTENTION: Unable to write to /tmp/nginx/active-config/netalertx.conf." "⚠️ ATTENTION: Unable to write to /tmp/nginx/active-config/netalertx.conf."
""" """
paths = _setup_mount_tree(tmp_path, "custom_port_ro_conf") vol = _fresh_named_volume("custom_port_ro_conf")
for key in ["data", "app_db", "app_config", "app_log", "app_api", "services_run"]:
paths[key].chmod(0o777)
volumes = _build_volume_args_for_keys(paths, {"data"})
volumes += _build_volume_args_for_keys(
paths,
{"app_log", "app_api", "services_run"},
)
extra_args = [ extra_args = [
"--tmpfs", "--tmpfs",
f"{VOLUME_MAP['nginx_conf']}:uid=20211,gid=20211,mode=500", f"{VOLUME_MAP['nginx_conf']}:uid=20211,gid=20211,mode=500",
] ]
result = _run_container( try:
"custom-port-ro-conf", result = _run_container(
volumes, "custom-port-ro-conf",
env={"PORT": "24444", "LISTEN_ADDR": "127.0.0.1"}, volumes=None,
user="20211:20211", volume_specs=[f"{vol}:/data"],
extra_args=extra_args, env={"PORT": "24444", "LISTEN_ADDR": "127.0.0.1"},
sleep_seconds=5, user="20211:20211",
) extra_args=extra_args,
sleep_seconds=15,
)
finally:
_docker_volume_rm(vol)
_assert_contains(result, "Unable to write to", result.args) _assert_contains(result, "Unable to write to", result.args)
_assert_contains( _assert_contains(
result, f"{VOLUME_MAP['nginx_conf']}/netalertx.conf", result.args result, f"{VOLUME_MAP['nginx_conf']}/netalertx.conf", result.args
@@ -570,14 +840,17 @@ def test_excessive_capabilities_warning(tmp_path: pathlib.Path) -> None:
Check script: 90-excessive-capabilities.sh Check script: 90-excessive-capabilities.sh
Sample message: "Excessive capabilities detected" Sample message: "Excessive capabilities detected"
""" """
paths = _setup_mount_tree(tmp_path, "excessive_caps") vol = _fresh_named_volume("excessive_caps")
volumes = _build_volume_args_for_keys(paths, {"data"}) try:
result = _run_container( result = _run_container(
"excessive-caps", "excessive-caps",
volumes, volumes=None,
extra_args=["--cap-add=SYS_ADMIN", "--cap-add=NET_BROADCAST"], volume_specs=[f"{vol}:/data"],
sleep_seconds=5, extra_args=["--cap-add=SYS_ADMIN", "--cap-add=NET_BROADCAST"],
) sleep_seconds=15,
)
finally:
_docker_volume_rm(vol)
_assert_contains(result, "Excessive capabilities detected", result.args) _assert_contains(result, "Excessive capabilities detected", result.args)
_assert_contains(result, "bounding caps:", result.args) _assert_contains(result, "bounding caps:", result.args)
@@ -591,13 +864,16 @@ def test_appliance_integrity_read_write_mode(tmp_path: pathlib.Path) -> None:
Check script: 95-appliance-integrity.sh Check script: 95-appliance-integrity.sh
Sample message: "Container is running as read-write, not in read-only mode" Sample message: "Container is running as read-write, not in read-only mode"
""" """
paths = _setup_mount_tree(tmp_path, "appliance_integrity") vol = _fresh_named_volume("appliance_integrity")
volumes = _build_volume_args_for_keys(paths, {"data"}) try:
result = _run_container( result = _run_container(
"appliance-integrity", "appliance-integrity",
volumes, volumes=None,
sleep_seconds=5, volume_specs=[f"{vol}:/data"],
) sleep_seconds=15,
)
finally:
_docker_volume_rm(vol)
_assert_contains( _assert_contains(
result, "Container is running as read-write, not in read-only mode", result.args result, "Container is running as read-write, not in read-only mode", result.args
) )
@@ -830,3 +1106,84 @@ def test_mount_analysis_dataloss_risk(tmp_path: pathlib.Path) -> None:
# Check that configuration issues are detected due to dataloss risk # Check that configuration issues are detected due to dataloss risk
_assert_contains(result, "Configuration issues detected", result.args) _assert_contains(result, "Configuration issues detected", result.args)
assert result.returncode != 0 assert result.returncode != 0
def test_restrictive_permissions_handling(tmp_path: pathlib.Path) -> None:
"""Test handling of restrictive permissions on bind mounts.
Simulates a user mounting a directory with restrictive permissions (e.g., 755 root:root).
The container should either fail gracefully or handle it if running as root (which triggers fix).
If running as non-root (default), it should fail to write if it doesn't have access.
"""
paths = _setup_mount_tree(tmp_path, "restrictive_perms")
# Helper to chown without userns host (workaround for potential devcontainer hang)
def _chown_root_safe(host_path: pathlib.Path) -> None:
cmd = [
"docker", "run", "--rm",
# "--userns", "host", # Removed to avoid hang
"--user", "0:0",
"--entrypoint", "/bin/chown",
"-v", f"{host_path}:/mnt",
IMAGE,
"-R", "0:0", "/mnt",
]
subprocess.run(cmd, check=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
# Set up a restrictive directory (root owned, 755)
target_dir = paths["app_db"]
_chown_root_safe(target_dir)
target_dir.chmod(0o755)
# Mount ALL volumes to avoid 'find' errors in 0-storage-permission.sh
keys = {"data", "app_db", "app_config", "app_log", "app_api", "services_run", "nginx_conf"}
volumes = _build_volume_args_for_keys(paths, keys)
# Case 1: Running as non-root (default) - Should fail to write
# We disable host network/userns to avoid potential hangs in devcontainer environment
result = _run_container(
"restrictive-perms-user",
volumes,
user="20211:20211",
sleep_seconds=5,
network_mode=None,
userns_mode=None
)
assert result.returncode != 0 or "Permission denied" in result.output or "Unable to write" in result.output
# Case 2: Running as root - Should trigger the fix script
result_root = _run_container(
"restrictive-perms-root",
volumes,
user="0:0",
sleep_seconds=5,
network_mode=None,
userns_mode=None
)
_assert_contains(result_root, "NetAlertX is running as ROOT", result_root.args)
_assert_contains(result_root, "Permissions fixed for read-write paths", result_root.args)
# Verify the fix actually happened
data_host_path = paths["data"]
check_cmd = [
"docker", "run", "--rm",
"--entrypoint", "/bin/sh",
"--user", "20211:20211",
IMAGE,
"-c", "ls -ldn /data/db && touch /data/db/test_write_after_fix"
]
# Add all volumes to check_cmd too
for host_path, target, readonly in volumes:
check_cmd.extend(["-v", f"{host_path}:{target}"])
check_result = subprocess.run(check_cmd, capture_output=True, text=True)
if check_result.returncode != 0:
print(f"Check command failed. Cmd: {check_cmd}")
print(f"Stderr: {check_result.stderr}")
print(f"Stdout: {check_result.stdout}")
assert check_result.returncode == 0, f"Should be able to write after root fix script runs. Stderr: {check_result.stderr}. Stdout: {check_result.stdout}"

View File

@@ -49,11 +49,11 @@ def test_skip_tests_env_var():
@pytest.mark.feature_complete @pytest.mark.feature_complete
def test_app_conf_override_from_graphql_port(): def test_app_conf_override_from_graphql_port():
# If GRAPHQL_PORT is set and APP_CONF_OVERRIDE is not set, the entrypoint should set # If GRAPHQL_PORT is set and APP_CONF_OVERRIDE is not set, the entrypoint should set
# APP_CONF_OVERRIDE to a JSON string containing the GRAPHQL_PORT value and print a message # APP_CONF_OVERRIDE to a JSON string containing the GRAPHQL_PORT value.
# about it.
# The script should exit successfully. # The script should exit successfully.
result = _run_entrypoint(env={"GRAPHQL_PORT": "20212", "SKIP_TESTS": "1"}, check_only=True) result = _run_entrypoint(env={"GRAPHQL_PORT": "20212", "SKIP_TESTS": "1"}, check_only=True)
assert 'Setting APP_CONF_OVERRIDE to {"GRAPHQL_PORT":"20212"}' in result.stdout assert 'Setting APP_CONF_OVERRIDE to' not in result.stdout
assert 'APP_CONF_OVERRIDE detected' in result.stderr
assert result.returncode == 0 assert result.returncode == 0

View File

@@ -5,6 +5,14 @@ Pytest-based Mount Diagnostic Tests for NetAlertX
Tests all possible mount configurations for each path to validate the diagnostic tool. Tests all possible mount configurations for each path to validate the diagnostic tool.
Uses pytest framework for proper test discovery and execution. Uses pytest framework for proper test discovery and execution.
TODO: Future Robustness & Compatibility Tests
1. Symlink Attacks: Verify behavior when a writable directory is mounted via a symlink.
Hypothesis: The tool might misidentify the mount status or path.
2. OverlayFS/Copy-up Scenarios: Investigate behavior on filesystems like Synology's OverlayFS.
Hypothesis: Files might appear writable but fail on specific operations (locking, mmap).
3. Text-based Output: Refactor output to support text-based status (e.g., [OK], [FAIL])
instead of emojis for better compatibility with terminals that don't support unicode.
All tests use the mounts table. For reference, the mounts table looks like this: All tests use the mounts table. For reference, the mounts table looks like this:
Path | Writeable | Mount | RAMDisk | Performance | DataLoss Path | Writeable | Mount | RAMDisk | Performance | DataLoss
@@ -604,3 +612,4 @@ def test_table_parsing():
performance=True, performance=True,
dataloss=True, dataloss=True,
) )