Address coderabbit-discoverd issues

This commit is contained in:
Adam Outler
2025-11-01 18:18:32 +00:00
parent 79887f0bd7
commit 70373b1fbd
8 changed files with 136 additions and 198 deletions

View File

@@ -33,6 +33,4 @@ If you don't need a custom port, simply omit the PORT environment variable and t
Docker Compose setup can be complex. We recommend starting with the default docker-compose.yml as a base and modifying it incrementally.
For detailed Docker Compose configuration guidance, see: [DOCKER_COMPOSE.md](https://github.com/jokob-sk/NetAlertX/blob/main/docs/DOCKER_COMPOSE.md)
For detailed Docker Compose configuration guidance, see: [DOCKER_COMPOSE.md](https://github.com/jokob-sk/NetAlertX/blob/main/docs/DOCKER_COMPOSE.md)

View File

@@ -51,13 +51,13 @@ EOF
>&2 printf "%s" "${RESET}"
# Set ownership to netalertx user for all read-write paths
chown -R netalertx ${READ_WRITE_PATHS}
chown -R netalertx ${READ_WRITE_PATHS} 2>/dev/null || true
# Set directory and file permissions for all read-write paths
find ${READ_WRITE_PATHS} -type d -exec chmod u+rwx {} + 2>/dev/null
find ${READ_WRITE_PATHS} -type f -exec chmod u+rw {} + 2>/dev/null
find ${READ_WRITE_PATHS} -type d -exec chmod u+rwx {}
find ${READ_WRITE_PATHS} -type f -exec chmod u+rw {}
echo Permissions fixed for read-write paths. Please restart the container as user 20211.
sleep infinity & wait $!; exit 211
sleep infinity & wait $!
fi

View File

@@ -46,8 +46,8 @@ fi
YELLOW=$(printf '\033[1;33m')
RESET=$(printf '\033[0m')
printf "%s" "${YELLOW}"
cat <<EOF
>&2 printf "%s" "${YELLOW}"
>&2 cat <<EOF
══════════════════════════════════════════════════════════════════════════════
⚠️ ATTENTION: NetAlertX is not running with --network=host.
@@ -62,5 +62,5 @@ cat <<EOF
https://github.com/jokob-sk/NetAlertX/blob/main/docs/docker-troubleshooting/network-mode.md
══════════════════════════════════════════════════════════════════════════════
EOF
printf "%s" "${RESET}"
>&2 printf "%s" "${RESET}"
exit 0

View File

@@ -1,12 +1,17 @@
#!/bin/bash
# Bash used in this check for simplicty of math operations.
# excessive-capabilities.sh checks that no more than the necessary
# NET_ADMIN NET_BIND_SERVICE and NET_RAW capabilities are present.
# Get bounding capabilities from /proc/self/status (what can be acquired)
BND_HEX=$(grep '^CapBnd:' /proc/self/status | awk '{print $2}' | tr -d '\t')
BND_HEX=$(grep '^CapBnd:' /proc/self/status 2>/dev/null | awk '{print $2}' | tr -d '\t')
if [ -z "$BND_HEX" ]; then
exit 0
fi
# Convert hex to decimal
BND_DEC=$(( 16#$BND_HEX ))
BND_DEC=$(( 16#$BND_HEX )) || exit 0
# Allowed capabilities: NET_BIND_SERVICE (10), NET_ADMIN (12), NET_RAW (13)
ALLOWED_DEC=$(( ( 1 << 10 ) | ( 1 << 12 ) | ( 1 << 13 ) ))

View File

@@ -36,17 +36,21 @@ extract_comments() {
# Function to run docker-compose test
run_test() {
local file="$1"
local dirname=$(dirname "$file")
local basename=$(basename "$file")
local dirname
dirname=$(dirname "$file")
local basename
basename=$(basename "$file")
local basename
basename=$(basename "$file")
echo "Testing: $basename" >> "$LOG_FILE"
echo "Directory: $dirname" >> "$LOG_FILE"
echo "" >> "$LOG_FILE"
cd "$dirname" || exit 1
# Change to the directory containing the docker-compose file
cd "$dirname"
# Run docker-compose up with timeout
timeout 10s docker-compose -f "$basename" up >> "$LOG_FILE" 2>&1
echo "Running docker-compose up..." >> "$LOG_FILE"
timeout 10s docker-compose -f "$basename" up 2>&1 >> "$LOG_FILE"
@@ -58,12 +62,12 @@ run_test() {
echo "==========================================" >> "$LOG_FILE"
echo "" >> "$LOG_FILE"
}
# Find all docker-compose files
find "$SCRIPT_DIR" -name "docker-compose*.yml" -type f | sort | while read -r file; do
find "$SCRIPT_DIR" -name "docker-compose*.yml" -type f -print0 | sort -z | while IFS= read -r -d '' file; do
extract_comments "$file"
run_test "$file"
done
run_test "$file"
done
echo "All tests completed - $(date)" >> "$LOG_FILE"
echo "Results saved to: $LOG_FILE"

View File

@@ -8,65 +8,14 @@ Ensure netalertx-test image is built prior to starting these tests.
import os
import pathlib
import subprocess
import time
import pytest
IMAGE = os.environ.get("NETALERTX_TEST_IMAGE", "netalertx-test")
import yaml
# Path to test configurations
CONFIG_DIR = pathlib.Path(__file__).parent / "configurations"
pytestmark = [pytest.mark.docker, pytest.mark.compose]
def _run_docker_compose(compose_file: pathlib.Path, project_name: str, timeout: int = 30, env_vars: dict = None) -> subprocess.CompletedProcess:
"""Run docker compose up and capture output."""
cmd = [
"docker", "compose",
"-f", str(compose_file),
"-p", project_name,
"up",
"--abort-on-container-exit",
"--timeout", str(timeout)
]
env = os.environ.copy()
if env_vars:
env.update(env_vars)
try:
result = subprocess.run(
cmd,
cwd=compose_file.parent,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
text=True,
timeout=timeout + 10,
env=env,
check=False,
)
except subprocess.TimeoutExpired:
# Clean up on timeout
subprocess.run(["docker", "compose", "-f", str(compose_file), "-p", project_name, "down", "-v"],
cwd=compose_file.parent, check=False)
raise
# Always clean up
subprocess.run(["docker", "compose", "-f", str(compose_file), "-p", project_name, "down", "-v"],
cwd=compose_file.parent, check=False)
# Combine stdout and stderr
result.output = result.stdout + result.stderr
return result
import os
import pathlib
import subprocess
import tempfile
import time
import pytest
import yaml
IMAGE = os.environ.get("NETALERTX_TEST_IMAGE", "netalertx-test")
# Docker Compose configurations for different test scenarios
@@ -215,46 +164,6 @@ def test_missing_capabilities_compose() -> None:
assert result.returncode != 0
def test_host_network_compose(tmp_path: pathlib.Path) -> None:
"""Test host networking mode using docker compose.
Simulates running with network_mode: host.
Expected: Container starts successfully with host networking.
"""
base_dir = tmp_path / "host_network"
base_dir.mkdir()
# Create test data directories
_create_test_data_dirs(base_dir)
# Create compose file
compose_config = COMPOSE_CONFIGS["host_network"].copy()
compose_file = base_dir / "docker-compose.yml"
with open(compose_file, 'w') as f:
yaml.dump(compose_config, f)
# Run docker compose
result = _run_docker_compose(compose_file, "netalertx-host-net")
# Check that it doesn't fail with network-related errors
assert "not running with --network=host" not in result.output
# Container should start (may fail later for other reasons, but network should be OK)
def test_host_network_compose() -> None:
"""Test host networking mode using docker compose.
Uses docker-compose.readonly.yml with host networking.
Expected: Container starts successfully with host networking.
"""
compose_file = CONFIG_DIR / "docker-compose.readonly.yml"
result = _run_docker_compose(compose_file, "netalertx-host-net")
# Check that it doesn't fail with network-related errors
assert "not running with --network=host" not in result.output
# Container should start (may fail later for other reasons, but network should be OK)
def test_custom_port_with_unwritable_nginx_config_compose() -> None:
"""Test custom port configuration with unwritable nginx config using docker compose.

View File

@@ -227,6 +227,83 @@ def create_test_scenarios() -> List[TestScenario]:
return scenarios
def validate_scenario_table_output(output: str, test_scenario: TestScenario) -> None:
"""Validate the diagnostic table for scenarios that should report issues."""
if not test_scenario.expected_issues:
return
try:
if test_scenario.name.startswith('db_'):
if test_scenario.name == 'db_ramdisk':
# db on ramdisk: mount=True, ramdisk=False (detected), dataloss=False (risk)
assert_table_row(output, '/app/db', mount=True, ramdisk=False, dataloss=False)
elif test_scenario.name == 'db_no-mount':
# db not mounted: mount=False, dataloss=False (risk)
assert_table_row(output, '/app/db', mount=False, dataloss=False)
elif test_scenario.name == 'db_unwritable':
# db read-only: writeable=False
assert_table_row(output, '/app/db', writeable=False)
elif test_scenario.name.startswith('config_'):
if test_scenario.name == 'config_ramdisk':
# config on ramdisk: mount=True, ramdisk=False (detected), dataloss=False (risk)
assert_table_row(output, '/app/config', mount=True, ramdisk=False, dataloss=False)
elif test_scenario.name == 'config_no-mount':
# config not mounted: mount=False, dataloss=False (risk)
assert_table_row(output, '/app/config', mount=False, dataloss=False)
elif test_scenario.name == 'config_unwritable':
# config read-only: writeable=False
assert_table_row(output, '/app/config', writeable=False)
elif test_scenario.name.startswith('api_'):
if test_scenario.name == 'api_mounted':
# api with volume mount: mount=True, performance=False (not ramdisk)
assert_table_row(output, '/app/api', mount=True, performance=False)
elif test_scenario.name == 'api_no-mount':
# api not mounted: mount=False, performance=False (not ramdisk)
assert_table_row(output, '/app/api', mount=False, performance=False)
elif test_scenario.name == 'api_unwritable':
# api read-only: writeable=False
assert_table_row(output, '/app/api', writeable=False)
elif test_scenario.name.startswith('log_'):
if test_scenario.name == 'log_mounted':
# log with volume mount: mount=True, performance=False (not ramdisk)
assert_table_row(output, '/app/log', mount=True, performance=False)
elif test_scenario.name == 'log_no-mount':
# log not mounted: mount=False, performance=False (not ramdisk)
assert_table_row(output, '/app/log', mount=False, performance=False)
elif test_scenario.name == 'log_unwritable':
# log read-only: writeable=False
assert_table_row(output, '/app/log', writeable=False)
elif test_scenario.name.startswith('run_'):
if test_scenario.name == 'run_mounted':
# run with volume mount: mount=True, performance=False (not ramdisk)
assert_table_row(output, '/services/run', mount=True, performance=False)
elif test_scenario.name == 'run_no-mount':
# run not mounted: mount=False, performance=False (not ramdisk)
assert_table_row(output, '/services/run', mount=False, performance=False)
elif test_scenario.name == 'run_unwritable':
# run read-only: writeable=False
assert_table_row(output, '/services/run', writeable=False)
elif test_scenario.name.startswith('active_config_'):
if test_scenario.name == 'active_config_mounted':
# active_config with volume mount: mount=True, performance=False (not ramdisk)
assert_table_row(output, '/services/config/nginx/conf.active', mount=True, performance=False)
elif test_scenario.name == 'active_config_no-mount':
# active_config not mounted: mount=False, performance=False (not ramdisk)
assert_table_row(output, '/services/config/nginx/conf.active', mount=False, performance=False)
elif test_scenario.name == 'active_config_unwritable':
# active_config unwritable: RAM disk issues detected
assert_table_row(output, '/services/config/nginx/conf.active', ramdisk=False, performance=False)
except AssertionError as e:
pytest.fail(f"Table validation failed for {test_scenario.name}: {e}")
@pytest.mark.parametrize("test_scenario", create_test_scenarios(), ids=lambda s: s.name)
@pytest.mark.docker
def test_mount_diagnostic(netalertx_test_image, test_scenario):
@@ -291,98 +368,43 @@ def test_mount_diagnostic(netalertx_test_image, test_scenario):
logs = result_logs.stdout + result_logs.stderr
# For tests that expect issues, validate the table content
if test_scenario.expected_issues:
# Parse and validate the table for the specific path being tested
try:
if test_scenario.name.startswith('db_'):
if test_scenario.name == 'db_ramdisk':
# db on ramdisk: mount=True, ramdisk=False (detected), dataloss=False (risk)
assert_table_row(logs, '/app/db', mount=True, ramdisk=False, dataloss=False)
elif test_scenario.name == 'db_no-mount':
# db not mounted: mount=False, dataloss=False (risk)
assert_table_row(logs, '/app/db', mount=False, dataloss=False)
elif test_scenario.name == 'db_unwritable':
# db read-only: writeable=False
assert_table_row(logs, '/app/db', writeable=False)
elif test_scenario.name.startswith('config_'):
if test_scenario.name == 'config_ramdisk':
# config on ramdisk: mount=True, ramdisk=False (detected), dataloss=False (risk)
assert_table_row(logs, '/app/config', mount=True, ramdisk=False, dataloss=False)
elif test_scenario.name == 'config_no-mount':
# config not mounted: mount=False, dataloss=False (risk)
assert_table_row(logs, '/app/config', mount=False, dataloss=False)
elif test_scenario.name == 'config_unwritable':
# config read-only: writeable=False
assert_table_row(logs, '/app/config', writeable=False)
elif test_scenario.name.startswith('api_'):
if test_scenario.name == 'api_mounted':
# api with volume mount: mount=True, performance=False (not ramdisk)
assert_table_row(logs, '/app/api', mount=True, performance=False)
elif test_scenario.name == 'api_no-mount':
# api not mounted: mount=False, performance=False (not ramdisk)
assert_table_row(logs, '/app/api', mount=False, performance=False)
elif test_scenario.name == 'api_unwritable':
# api read-only: writeable=False
assert_table_row(logs, '/app/api', writeable=False)
elif test_scenario.name.startswith('log_'):
if test_scenario.name == 'log_mounted':
# log with volume mount: mount=True, performance=False (not ramdisk)
assert_table_row(logs, '/app/log', mount=True, performance=False)
elif test_scenario.name == 'log_no-mount':
# log not mounted: mount=False, performance=False (not ramdisk)
assert_table_row(logs, '/app/log', mount=False, performance=False)
elif test_scenario.name == 'log_unwritable':
# log read-only: writeable=False
assert_table_row(logs, '/app/log', writeable=False)
elif test_scenario.name.startswith('run_'):
if test_scenario.name == 'run_mounted':
# run with volume mount: mount=True, performance=False (not ramdisk)
assert_table_row(logs, '/services/run', mount=True, performance=False)
elif test_scenario.name == 'run_no-mount':
# run not mounted: mount=False, performance=False (not ramdisk)
assert_table_row(logs, '/services/run', mount=False, performance=False)
elif test_scenario.name == 'run_unwritable':
# run read-only: writeable=False
assert_table_row(logs, '/services/run', writeable=False)
elif test_scenario.name.startswith('active_config_'):
if test_scenario.name == 'active_config_mounted':
# active_config with volume mount: mount=True, performance=False (not ramdisk)
assert_table_row(logs, '/services/config/nginx/conf.active', mount=True, performance=False)
elif test_scenario.name == 'active_config_no-mount':
# active_config not mounted: mount=False, performance=False (not ramdisk)
assert_table_row(logs, '/services/config/nginx/conf.active', mount=False, performance=False)
elif test_scenario.name == 'active_config_unwritable':
# active_config unwritable: RAM disk issues detected
assert_table_row(logs, '/services/config/nginx/conf.active', ramdisk=False, performance=False)
except AssertionError as e:
pytest.fail(f"Table validation failed for {test_scenario.name}: {e}")
validate_scenario_table_output(logs, test_scenario)
return # Test passed - container correctly detected issues and exited
# Container is still running - run diagnostic tool
cmd_exec = [
"docker", "exec", "--user", "netalertx", container_name,
"python3", "/entrypoint.d/10-mounts.py"
]
result_exec = subprocess.run(cmd_exec, capture_output=True, text=True, timeout=30)
# Diagnostic tool returns 1 if there are write errors, 0 otherwise
expected_tool_exit = 1 if "unwritable" in test_scenario.name else 0
assert result_exec.returncode == expected_tool_exit, f"Diagnostic tool failed: {result_exec.stderr}"
# Container is still running - run diagnostic tool
cmd_exec = [
"docker", "exec", "--user", "netalertx", container_name,
"python3", "/entrypoint.d/10-mounts.py"
]
result_exec = subprocess.run(cmd_exec, capture_output=True, text=True, timeout=30)
diagnostic_output = result_exec.stdout + result_exec.stderr
# For good configurations (no issues expected), verify table output but no warning
if not test_scenario.expected_issues:
# Should have table output but no warning message
assert "Path" in result_exec.stdout, f"Good config {test_scenario.name} should show table, got: {result_exec.stdout}"
assert "⚠️" not in result_exec.stderr, f"Good config {test_scenario.name} should not show warning, got stderr: {result_exec.stderr}"
return # Test passed - good configuration correctly produces no warnings
# The diagnostic tool returns 1 for unwritable paths except active_config, which only warns
if test_scenario.name.startswith('active_config_') and 'unwritable' in test_scenario.name:
expected_tool_exit = 0
elif 'unwritable' in test_scenario.name:
expected_tool_exit = 1
else:
expected_tool_exit = 0
assert result_exec.returncode == expected_tool_exit, (
f"Diagnostic tool failed: {result_exec.stderr}"
)
if test_scenario.expected_issues:
validate_scenario_table_output(diagnostic_output, test_scenario)
assert "⚠️" in diagnostic_output, (
f"Issue scenario {test_scenario.name} should include a warning symbol, got: {result_exec.stderr}"
)
else:
# Should have table output but no warning message
assert "Path" in result_exec.stdout, f"Good config {test_scenario.name} should show table, got: {result_exec.stdout}"
assert "⚠️" not in diagnostic_output, (
f"Good config {test_scenario.name} should not show warning, got stderr: {result_exec.stderr}"
)
return # Test passed - diagnostic output validated
finally:
# Stop container

View File

@@ -119,7 +119,7 @@ def _run_container(
cmd.extend(["-v", mount])
# Copy the script content and run it
script_path = "/workspaces/NetAlertX/install/production-filesystem/entrypoint.d/99-ports-available.sh"
script_path = "install/production-filesystem/entrypoint.d/99-ports-available.sh"
with open(script_path, 'r') as f:
script_content = f.read()