Compare commits

..

21 Commits

Author SHA1 Message Date
jokob-sk
8acb0a876a DOCS: cleanup
Some checks failed
Code checks / check-url-paths (push) Has been cancelled
Code checks / lint (push) Has been cancelled
Code checks / docker-tests (push) Has been cancelled
docker / docker_dev (push) Has been cancelled
Deploy MkDocs / deploy (push) Has been cancelled
Signed-off-by: jokob-sk <jokob.sk@gmail.com>
2025-11-26 10:20:19 +11:00
jokob-sk
d1be41eca4 DOCS: cleanup
Signed-off-by: jokob-sk <jokob.sk@gmail.com>
2025-11-26 10:02:15 +11:00
jokob-sk
00e953a7ce DOCS: cleanup
Signed-off-by: jokob-sk <jokob.sk@gmail.com>
2025-11-26 09:52:12 +11:00
jokob-sk
b9ef9ad041 DOCS: tmpfs cleanup
Signed-off-by: jokob-sk <jokob.sk@gmail.com>
2025-11-26 09:25:37 +11:00
jokob-sk
e90fbf17d3 DOCS: Network parent
Some checks failed
Code checks / check-url-paths (push) Has been cancelled
Code checks / lint (push) Has been cancelled
Code checks / docker-tests (push) Has been cancelled
docker / docker_dev (push) Has been cancelled
Deploy MkDocs / deploy (push) Has been cancelled
Signed-off-by: jokob-sk <jokob.sk@gmail.com>
2025-11-25 08:16:39 +11:00
jokob-sk
139447b253 BE: mylog() better code radability
Signed-off-by: jokob-sk <jokob.sk@gmail.com>
2025-11-25 07:54:17 +11:00
Jokob @NetAlertX
fa9fc2c8e3 Merge pull request #1304 from adamoutler/hadolint-fixes
Some checks failed
Code checks / check-url-paths (push) Has been cancelled
Code checks / lint (push) Has been cancelled
Code checks / docker-tests (push) Has been cancelled
docker / docker_dev (push) Has been cancelled
Deploy MkDocs / deploy (push) Has been cancelled
Fix Hadolint Linting Issues Across Dockerfiles
2025-11-24 17:28:50 +11:00
Adam Outler
30071c6848 Merge branch 'main' into hadolint-fixes 2025-11-23 19:25:45 -05:00
Adam Outler
b0bd3c8191 fix hadolint errors 2025-11-24 00:20:42 +00:00
Jokob @NetAlertX
c753da9e15 Merge pull request #1303 from adamoutler/shell-check-fixes
Some checks failed
Code checks / check-url-paths (push) Has been cancelled
Code checks / lint (push) Has been cancelled
Code checks / docker-tests (push) Has been cancelled
docker / docker_dev (push) Has been cancelled
Deploy MkDocs / deploy (push) Has been cancelled
ShellCheck Lint: Fix All Reported Issues in Service Scripts
2025-11-24 10:24:54 +11:00
Adam Outler
4770ee5942 undo previous change for unwritable 2025-11-23 23:19:12 +00:00
Adam Outler
5cd53bc8f9 Storage permission fix 2025-11-23 22:58:45 +00:00
Adam Outler
5e47ccc9ef Shell Check fixes 2025-11-23 22:13:01 +00:00
Jokob @NetAlertX
f5d7c0f9a0 Merge pull request #1302 from adamoutler/supercronic
Replace crond with Supercronic, improve cron logging & backend restart behavior
2025-11-24 07:29:50 +11:00
Adam Outler
35b7e80be4 Remove additional "tests" from instructions. 2025-11-23 16:43:28 +00:00
Adam Outler
07eeac0a0b remove redefined variable 2025-11-23 16:38:03 +00:00
Adam Outler
240d86bf1e docker tests 2025-11-23 16:31:04 +00:00
Adam Outler
274fd50a92 Adjust healthchecks and fix docker test scripts 2025-11-23 15:56:42 +00:00
Adam Outler
bbf49c3686 Don't kill container on backend restart commanded 2025-11-23 01:27:51 +00:00
Adam Outler
e3458630ba Convert from crond to supercronic 2025-11-23 01:14:21 +00:00
Jokob @NetAlertX
2f6f1e49e9 Merge pull request #1300 from jokob-sk/linting-fixes
Some checks failed
Code checks / docker-tests (push) Has been cancelled
Code checks / check-url-paths (push) Has been cancelled
Code checks / lint (push) Has been cancelled
docker / docker_dev (push) Has been cancelled
Deploy MkDocs / deploy (push) Has been cancelled
BE: linting fixes
2025-11-22 21:55:54 +11:00
92 changed files with 1927 additions and 1613 deletions

View File

@@ -35,7 +35,7 @@ RUN apk add --no-cache bash shadow python3 python3-dev gcc musl-dev libffi-dev o
# Create virtual environment owned by root, but readable by everyone else. This makes it easy to copy
# into hardened stage without worrying about permissions and keeps image size small. Keeping the commands
# together makes for a slightly smaller image size.
RUN pip install -r /tmp/requirements.txt && \
RUN pip install --no-cache-dir -r /tmp/requirements.txt && \
chmod -R u-rwx,g-rwx /opt
# second stage is the main runtime stage with just the minimum required to run the application
@@ -71,7 +71,7 @@ ENV LOG_APP_PHP_ERRORS=${NETALERTX_LOG}/app.php_errors.log
ENV LOG_EXECUTION_QUEUE=${NETALERTX_LOG}/execution_queue.log
ENV LOG_REPORT_OUTPUT_JSON=${NETALERTX_LOG}/report_output.json
ENV LOG_STDOUT=${NETALERTX_LOG}/stdout.log
ENV LOG_CROND=${NETALERTX_LOG}/crond.log
ENV LOG_CRON=${NETALERTX_LOG}/cron.log
ENV LOG_NGINX_ERROR=${NETALERTX_LOG}/nginx-error.log
# System Services configuration files
@@ -81,11 +81,11 @@ ENV SYSTEM_SERVICES_SCRIPTS=${SYSTEM_SERVICES}/scripts
ENV SYSTEM_SERVICES_CONFIG=${SYSTEM_SERVICES}/config
ENV SYSTEM_NGINX_CONFIG=${SYSTEM_SERVICES_CONFIG}/nginx
ENV SYSTEM_NGINX_CONFIG_TEMPLATE=${SYSTEM_NGINX_CONFIG}/netalertx.conf.template
ENV SYSTEM_SERVICES_CONFIG_CRON=${SYSTEM_SERVICES_CONFIG}/cron
ENV SYSTEM_SERVICES_ACTIVE_CONFIG=/tmp/nginx/active-config
ENV SYSTEM_SERVICES_ACTIVE_CONFIG_FILE=${SYSTEM_SERVICES_ACTIVE_CONFIG}/nginx.conf
ENV SYSTEM_SERVICES_PHP_FOLDER=${SYSTEM_SERVICES_CONFIG}/php
ENV SYSTEM_SERVICES_PHP_FPM_D=${SYSTEM_SERVICES_PHP_FOLDER}/php-fpm.d
ENV SYSTEM_SERVICES_CROND=${SYSTEM_SERVICES_CONFIG}/crond
ENV SYSTEM_SERVICES_RUN=/tmp/run
ENV SYSTEM_SERVICES_RUN_TMP=${SYSTEM_SERVICES_RUN}/tmp
ENV SYSTEM_SERVICES_RUN_LOG=${SYSTEM_SERVICES_RUN}/logs
@@ -119,7 +119,7 @@ ENV LANG=C.UTF-8
RUN apk add --no-cache bash mtr libbsd zip lsblk tzdata curl arp-scan iproute2 iproute2-ss nmap \
nmap-scripts traceroute nbtscan net-tools net-snmp-tools bind-tools awake ca-certificates \
sqlite php83 php83-fpm php83-cgi php83-curl php83-sqlite3 php83-session python3 envsubst \
nginx shadow && \
nginx supercronic shadow && \
rm -Rf /var/cache/apk/* && \
rm -Rf /etc/nginx && \
addgroup -g 20211 ${NETALERTX_GROUP} && \
@@ -150,26 +150,26 @@ COPY --from=builder --chown=20212:20212 ${VIRTUAL_ENV} ${VIRTUAL_ENV}
# This is done after the copy of the venv to ensure the venv is in place
# although it may be quicker to do it before the copy, it keeps the image
# layers smaller to do it after.
RUN if [ -f .VERSION ]; then \
cp .VERSION ${NETALERTX_APP}/.VERSION; \
RUN if [ -f '.VERSION' ]; then \
cp '.VERSION' "${NETALERTX_APP}/.VERSION"; \
else \
echo "DEVELOPMENT 00000000" > ${NETALERTX_APP}/.VERSION; \
echo "DEVELOPMENT 00000000" > "${NETALERTX_APP}/.VERSION"; \
fi && \
chown 20212:20212 ${NETALERTX_APP}/.VERSION && \
apk add libcap && \
chown 20212:20212 "${NETALERTX_APP}/.VERSION" && \
apk add --no-cache libcap && \
setcap cap_net_raw+ep /bin/busybox && \
setcap cap_net_raw,cap_net_admin+eip /usr/bin/nmap && \
setcap cap_net_raw,cap_net_admin+eip /usr/bin/arp-scan && \
setcap cap_net_raw,cap_net_admin,cap_net_bind_service+eip /usr/bin/nbtscan && \
setcap cap_net_raw,cap_net_admin+eip /usr/bin/traceroute && \
setcap cap_net_raw,cap_net_admin+eip $(readlink -f ${VIRTUAL_ENV_BIN}/python) && \
setcap cap_net_raw,cap_net_admin+eip "$(readlink -f ${VIRTUAL_ENV_BIN}/python)" && \
/bin/sh /build/init-nginx.sh && \
/bin/sh /build/init-php-fpm.sh && \
/bin/sh /build/init-crond.sh && \
/bin/sh /build/init-cron.sh && \
/bin/sh /build/init-backend.sh && \
rm -rf /build && \
apk del libcap && \
date +%s > ${NETALERTX_FRONT}/buildtimestamp.txt
date +%s > "${NETALERTX_FRONT}/buildtimestamp.txt"
ENTRYPOINT ["/bin/sh","/entrypoint.sh"]
@@ -186,13 +186,15 @@ ENV UMASK=0077
# AI may claim this is stupid, but it's actually least possible permissions as
# read-only user cannot login, cannot sudo, has no write permission, and cannot even
# read the files it owns. The read-only user is ownership-as-a-lock hardening pattern.
RUN addgroup -g 20212 ${READ_ONLY_GROUP} && \
adduser -u 20212 -G ${READ_ONLY_GROUP} -D -h /app ${READ_ONLY_USER}
RUN addgroup -g 20212 "${READ_ONLY_GROUP}" && \
adduser -u 20212 -G "${READ_ONLY_GROUP}" -D -h /app "${READ_ONLY_USER}"
# reduce permissions to minimum necessary for all NetAlertX files and folders
# Permissions 005 and 004 are not typos, they enable read-only. Everyone can
# read the read-only files, and nobody can write to them, even the readonly user.
# hadolint ignore=SC2114
RUN chown -R ${READ_ONLY_USER}:${READ_ONLY_GROUP} ${READ_ONLY_FOLDERS} && \
chmod -R 004 ${READ_ONLY_FOLDERS} && \
find ${READ_ONLY_FOLDERS} -type d -exec chmod 005 {} + && \
@@ -211,7 +213,7 @@ RUN chown -R ${READ_ONLY_USER}:${READ_ONLY_GROUP} ${READ_ONLY_FOLDERS} && \
/srv /media && \
sed -i "/^\(${READ_ONLY_USER}\|${NETALERTX_USER}\):/!d" /etc/passwd && \
sed -i "/^\(${READ_ONLY_GROUP}\|${NETALERTX_GROUP}\):/!d" /etc/group && \
echo -ne '#!/bin/sh\n"$@"\n' > /usr/bin/sudo && chmod +x /usr/bin/sudo
printf '#!/bin/sh\n"$@"\n' > /usr/bin/sudo && chmod +x /usr/bin/sudo
USER netalertx
@@ -230,6 +232,7 @@ HEALTHCHECK --interval=30s --timeout=10s --start-period=60s --retries=3 \
# Open and wide to avoid permission issues during development allowing max
# flexibility.
# hadolint ignore=DL3006
FROM runner AS netalertx-devcontainer
ENV INSTALL_DIR=/app
@@ -243,9 +246,14 @@ ENV PYDEVD_DISABLE_FILE_VALIDATION=1
COPY .devcontainer/resources/devcontainer-overlay/ /
USER root
# Install common tools, create user, and set up sudo
RUN apk add --no-cache git nano vim jq php83-pecl-xdebug py3-pip nodejs sudo gpgconf pytest \
pytest-cov zsh alpine-zsh-config shfmt github-cli py3-yaml py3-docker-py docker-cli docker-cli-buildx \
docker-cli-compose
docker-cli-compose shellcheck
# Install hadolint (Dockerfile linter)
RUN curl -L https://github.com/hadolint/hadolint/releases/latest/download/hadolint-Linux-x86_64 -o /usr/local/bin/hadolint && \
chmod +x /usr/local/bin/hadolint
RUN install -d -o netalertx -g netalertx -m 755 /services/php/modules && \
cp -a /usr/lib/php83/modules/. /services/php/modules/ && \

View File

@@ -75,7 +75,9 @@
"alexcvzz.vscode-sqlite",
"mkhl.shfmt",
"charliermarsh.ruff",
"ms-python.flake8"
"ms-python.flake8",
"exiasr.hadolint",
"timonwong.shellcheck"
],
"settings": {
"terminal.integrated.cwd": "${containerWorkspaceFolder}",

View File

@@ -7,6 +7,7 @@
# Open and wide to avoid permission issues during development allowing max
# flexibility.
# hadolint ignore=DL3006
FROM runner AS netalertx-devcontainer
ENV INSTALL_DIR=/app
@@ -20,9 +21,14 @@ ENV PYDEVD_DISABLE_FILE_VALIDATION=1
COPY .devcontainer/resources/devcontainer-overlay/ /
USER root
# Install common tools, create user, and set up sudo
RUN apk add --no-cache git nano vim jq php83-pecl-xdebug py3-pip nodejs sudo gpgconf pytest \
pytest-cov zsh alpine-zsh-config shfmt github-cli py3-yaml py3-docker-py docker-cli docker-cli-buildx \
docker-cli-compose
docker-cli-compose shellcheck
# Install hadolint (Dockerfile linter)
RUN curl -L https://github.com/hadolint/hadolint/releases/latest/download/hadolint-Linux-x86_64 -o /usr/local/bin/hadolint && \
chmod +x /usr/local/bin/hadolint
RUN install -d -o netalertx -g netalertx -m 755 /services/php/modules && \
cp -a /usr/lib/php83/modules/. /services/php/modules/ && \

View File

@@ -7,27 +7,28 @@
# the final .devcontainer/Dockerfile used by the devcontainer.
echo "Generating .devcontainer/Dockerfile"
SCRIPT_DIR="$(CDPATH= cd -- "$(dirname -- "$0")" && pwd)"
SCRIPT_PATH=$(set -- "$0"; dirname -- "$1")
SCRIPT_DIR=$(cd "$SCRIPT_PATH" && pwd -P)
DEVCONTAINER_DIR="${SCRIPT_DIR%/scripts}"
ROOT_DIR="${DEVCONTAINER_DIR%/.devcontainer}"
OUT_FILE="${DEVCONTAINER_DIR}/Dockerfile"
echo "Adding base Dockerfile from $ROOT_DIR..."
echo "Adding base Dockerfile from $ROOT_DIR and merging to devcontainer-Dockerfile"
{
echo "# DO NOT MODIFY THIS FILE DIRECTLY. IT IS AUTO-GENERATED BY .devcontainer/scripts/generate-configs.sh" > "$OUT_FILE"
echo "" >> "$OUT_FILE"
echo "# ---/Dockerfile---" >> "$OUT_FILE"
echo "# DO NOT MODIFY THIS FILE DIRECTLY. IT IS AUTO-GENERATED BY .devcontainer/scripts/generate-configs.sh"
echo ""
echo "# ---/Dockerfile---"
cat "${ROOT_DIR}/Dockerfile" >> "$OUT_FILE"
cat "${ROOT_DIR}/Dockerfile"
echo "" >> "$OUT_FILE"
echo "# ---/resources/devcontainer-Dockerfile---" >> "$OUT_FILE"
echo "" >> "$OUT_FILE"
echo ""
echo "# ---/resources/devcontainer-Dockerfile---"
echo ""
cat "${DEVCONTAINER_DIR}/resources/devcontainer-Dockerfile"
} > "$OUT_FILE"
echo "Adding devcontainer-Dockerfile from $DEVCONTAINER_DIR/resources..."
cat "${DEVCONTAINER_DIR}/resources/devcontainer-Dockerfile" >> "$OUT_FILE"
echo "Generated $OUT_FILE using root dir $ROOT_DIR" >&2
echo "Generated $OUT_FILE using root dir $ROOT_DIR"
echo "Done."

View File

@@ -16,7 +16,6 @@
SOURCE_DIR=${SOURCE_DIR:-/workspaces/NetAlertX}
PY_SITE_PACKAGES="${VIRTUAL_ENV:-/opt/venv}/lib/python3.12/site-packages"
SOURCE_SERVICES_DIR="${SOURCE_DIR}/install/production-filesystem/services"
LOG_FILES=(
LOG_APP
@@ -26,7 +25,7 @@ LOG_FILES=(
LOG_EXECUTION_QUEUE
LOG_APP_PHP_ERRORS
LOG_IP_CHANGES
LOG_CROND
LOG_CRON
LOG_REPORT_OUTPUT_TXT
LOG_REPORT_OUTPUT_HTML
LOG_REPORT_OUTPUT_JSON

View File

@@ -83,3 +83,9 @@ Backend loop phases (see `server/__main__.py` and `server/plugin.py`): `once`, `
- Be sure to offer choices when appropriate.
- Always understand the intent of the user's request and undo/redo as needed.
- Above all, use the simplest possible code that meets the need so it can be easily audited and maintained.
- Always leave logging enabled. If there is a possiblity it will be difficult to debug with current logging, add more logging.
- Always run the testFailure tool before executing any tests to gather current failure information and avoid redundant runs.
- Always prioritize using the appropriate tools in the environment first. As an example if a test is failing use `testFailure` then `runTests`. Never `runTests` first.
- Docker tests take an extremely long time to run. Avoid changes to docker or tests until you've examined the exisiting testFailures and runTests results.
- Environment tools are designed specifically for your use in this project and running them in this order will give you the best results.

View File

@@ -84,7 +84,7 @@ jobs:
continue-on-error: true
run: |
echo "🔍 Linting Dockerfiles..."
/tmp/hadolint Dockerfile* || true
/tmp/hadolint --config .hadolint.yaml Dockerfile* || true
docker-tests:
runs-on: ubuntu-latest
@@ -95,5 +95,5 @@ jobs:
- name: Run Docker-based tests
run: |
echo "🐳 Running Docker-based tests..."
chmod +x ./run_docker_tests.sh
./run_docker_tests.sh
chmod +x ./test/docker_tests/run_docker_tests.sh
./test/docker_tests/run_docker_tests.sh

2
.hadolint.yaml Normal file
View File

@@ -0,0 +1,2 @@
ignored:
- DL3018

View File

@@ -32,7 +32,7 @@ RUN apk add --no-cache bash shadow python3 python3-dev gcc musl-dev libffi-dev o
# Create virtual environment owned by root, but readable by everyone else. This makes it easy to copy
# into hardened stage without worrying about permissions and keeps image size small. Keeping the commands
# together makes for a slightly smaller image size.
RUN pip install -r /tmp/requirements.txt && \
RUN pip install --no-cache-dir -r /tmp/requirements.txt && \
chmod -R u-rwx,g-rwx /opt
# second stage is the main runtime stage with just the minimum required to run the application
@@ -68,7 +68,7 @@ ENV LOG_APP_PHP_ERRORS=${NETALERTX_LOG}/app.php_errors.log
ENV LOG_EXECUTION_QUEUE=${NETALERTX_LOG}/execution_queue.log
ENV LOG_REPORT_OUTPUT_JSON=${NETALERTX_LOG}/report_output.json
ENV LOG_STDOUT=${NETALERTX_LOG}/stdout.log
ENV LOG_CROND=${NETALERTX_LOG}/crond.log
ENV LOG_CRON=${NETALERTX_LOG}/cron.log
ENV LOG_NGINX_ERROR=${NETALERTX_LOG}/nginx-error.log
# System Services configuration files
@@ -78,11 +78,11 @@ ENV SYSTEM_SERVICES_SCRIPTS=${SYSTEM_SERVICES}/scripts
ENV SYSTEM_SERVICES_CONFIG=${SYSTEM_SERVICES}/config
ENV SYSTEM_NGINX_CONFIG=${SYSTEM_SERVICES_CONFIG}/nginx
ENV SYSTEM_NGINX_CONFIG_TEMPLATE=${SYSTEM_NGINX_CONFIG}/netalertx.conf.template
ENV SYSTEM_SERVICES_CONFIG_CRON=${SYSTEM_SERVICES_CONFIG}/cron
ENV SYSTEM_SERVICES_ACTIVE_CONFIG=/tmp/nginx/active-config
ENV SYSTEM_SERVICES_ACTIVE_CONFIG_FILE=${SYSTEM_SERVICES_ACTIVE_CONFIG}/nginx.conf
ENV SYSTEM_SERVICES_PHP_FOLDER=${SYSTEM_SERVICES_CONFIG}/php
ENV SYSTEM_SERVICES_PHP_FPM_D=${SYSTEM_SERVICES_PHP_FOLDER}/php-fpm.d
ENV SYSTEM_SERVICES_CROND=${SYSTEM_SERVICES_CONFIG}/crond
ENV SYSTEM_SERVICES_RUN=/tmp/run
ENV SYSTEM_SERVICES_RUN_TMP=${SYSTEM_SERVICES_RUN}/tmp
ENV SYSTEM_SERVICES_RUN_LOG=${SYSTEM_SERVICES_RUN}/logs
@@ -116,7 +116,7 @@ ENV LANG=C.UTF-8
RUN apk add --no-cache bash mtr libbsd zip lsblk tzdata curl arp-scan iproute2 iproute2-ss nmap \
nmap-scripts traceroute nbtscan net-tools net-snmp-tools bind-tools awake ca-certificates \
sqlite php83 php83-fpm php83-cgi php83-curl php83-sqlite3 php83-session python3 envsubst \
nginx shadow && \
nginx supercronic shadow && \
rm -Rf /var/cache/apk/* && \
rm -Rf /etc/nginx && \
addgroup -g 20211 ${NETALERTX_GROUP} && \
@@ -147,26 +147,26 @@ COPY --from=builder --chown=20212:20212 ${VIRTUAL_ENV} ${VIRTUAL_ENV}
# This is done after the copy of the venv to ensure the venv is in place
# although it may be quicker to do it before the copy, it keeps the image
# layers smaller to do it after.
RUN if [ -f .VERSION ]; then \
cp .VERSION ${NETALERTX_APP}/.VERSION; \
RUN if [ -f '.VERSION' ]; then \
cp '.VERSION' "${NETALERTX_APP}/.VERSION"; \
else \
echo "DEVELOPMENT 00000000" > ${NETALERTX_APP}/.VERSION; \
echo "DEVELOPMENT 00000000" > "${NETALERTX_APP}/.VERSION"; \
fi && \
chown 20212:20212 ${NETALERTX_APP}/.VERSION && \
apk add libcap && \
chown 20212:20212 "${NETALERTX_APP}/.VERSION" && \
apk add --no-cache libcap && \
setcap cap_net_raw+ep /bin/busybox && \
setcap cap_net_raw,cap_net_admin+eip /usr/bin/nmap && \
setcap cap_net_raw,cap_net_admin+eip /usr/bin/arp-scan && \
setcap cap_net_raw,cap_net_admin,cap_net_bind_service+eip /usr/bin/nbtscan && \
setcap cap_net_raw,cap_net_admin+eip /usr/bin/traceroute && \
setcap cap_net_raw,cap_net_admin+eip $(readlink -f ${VIRTUAL_ENV_BIN}/python) && \
setcap cap_net_raw,cap_net_admin+eip "$(readlink -f ${VIRTUAL_ENV_BIN}/python)" && \
/bin/sh /build/init-nginx.sh && \
/bin/sh /build/init-php-fpm.sh && \
/bin/sh /build/init-crond.sh && \
/bin/sh /build/init-cron.sh && \
/bin/sh /build/init-backend.sh && \
rm -rf /build && \
apk del libcap && \
date +%s > ${NETALERTX_FRONT}/buildtimestamp.txt
date +%s > "${NETALERTX_FRONT}/buildtimestamp.txt"
ENTRYPOINT ["/bin/sh","/entrypoint.sh"]
@@ -183,13 +183,15 @@ ENV UMASK=0077
# AI may claim this is stupid, but it's actually least possible permissions as
# read-only user cannot login, cannot sudo, has no write permission, and cannot even
# read the files it owns. The read-only user is ownership-as-a-lock hardening pattern.
RUN addgroup -g 20212 ${READ_ONLY_GROUP} && \
adduser -u 20212 -G ${READ_ONLY_GROUP} -D -h /app ${READ_ONLY_USER}
RUN addgroup -g 20212 "${READ_ONLY_GROUP}" && \
adduser -u 20212 -G "${READ_ONLY_GROUP}" -D -h /app "${READ_ONLY_USER}"
# reduce permissions to minimum necessary for all NetAlertX files and folders
# Permissions 005 and 004 are not typos, they enable read-only. Everyone can
# read the read-only files, and nobody can write to them, even the readonly user.
# hadolint ignore=SC2114
RUN chown -R ${READ_ONLY_USER}:${READ_ONLY_GROUP} ${READ_ONLY_FOLDERS} && \
chmod -R 004 ${READ_ONLY_FOLDERS} && \
find ${READ_ONLY_FOLDERS} -type d -exec chmod 005 {} + && \
@@ -208,7 +210,7 @@ RUN chown -R ${READ_ONLY_USER}:${READ_ONLY_GROUP} ${READ_ONLY_FOLDERS} && \
/srv /media && \
sed -i "/^\(${READ_ONLY_USER}\|${NETALERTX_USER}\):/!d" /etc/passwd && \
sed -i "/^\(${READ_ONLY_GROUP}\|${NETALERTX_GROUP}\):/!d" /etc/group && \
echo -ne '#!/bin/sh\n"$@"\n' > /usr/bin/sudo && chmod +x /usr/bin/sudo
printf '#!/bin/sh\n"$@"\n' > /usr/bin/sudo && chmod +x /usr/bin/sudo
USER netalertx

View File

@@ -72,7 +72,7 @@ ENV LOG_APP_PHP_ERRORS=${NETALERTX_LOG}/app.php_errors.log
ENV LOG_EXECUTION_QUEUE=${NETALERTX_LOG}/execution_queue.log
ENV LOG_REPORT_OUTPUT_JSON=${NETALERTX_LOG}/report_output.json
ENV LOG_STDOUT=${NETALERTX_LOG}/stdout.log
ENV LOG_CROND=${NETALERTX_LOG}/crond.log
ENV LOG_CRON=${NETALERTX_LOG}/cron.log
ENV LOG_NGINX_ERROR=${NETALERTX_LOG}/nginx-error.log
# System Services configuration files
@@ -132,25 +132,29 @@ COPY --chmod=775 --chown=${USER_ID}:${USER_GID} . ${INSTALL_DIR}/
# ❗ IMPORTANT - if you modify this file modify the /install/install_dependecies.debian.sh file as well ❗
RUN apt update && apt-get install -y \
# hadolint ignore=DL3008,DL3027
RUN apt-get update && apt-get install -y --no-install-recommends \
tini snmp ca-certificates curl libwww-perl arp-scan sudo gettext-base \
nginx-light php php-cgi php-fpm php-sqlite3 php-curl sqlite3 dnsutils net-tools \
python3 python3-dev iproute2 nmap python3-pip zip git systemctl usbutils traceroute nbtscan openrc \
busybox nginx nginx-core mtr python3-venv
busybox nginx nginx-core mtr python3-venv && \
rm -rf /var/lib/apt/lists/*
# While php8.3 is in debian bookworm repos, php-fpm is not included so we need to add sury.org repo
# (Ondřej Surý maintains php packages for debian. This is temp until debian includes php-fpm in their
# repos. Likely it will be in Debian Trixie.). This keeps the image up-to-date with the alpine version.
# hadolint ignore=DL3008
RUN apt-get install -y --no-install-recommends \
apt-transport-https \
ca-certificates \
lsb-release \
wget && \
wget -O /etc/apt/trusted.gpg.d/php.gpg https://packages.sury.org/php/apt.gpg && \
wget -q -O /etc/apt/trusted.gpg.d/php.gpg https://packages.sury.org/php/apt.gpg && \
echo "deb https://packages.sury.org/php/ $(lsb_release -sc) main" > /etc/apt/sources.list.d/php.list && \
apt-get update && \
apt-get install -y php8.3-fpm php8.3-cli php8.3-sqlite3 php8.3-common php8.3-curl php8.3-cgi && \
ln -s /usr/sbin/php-fpm8.3 /usr/sbin/php-fpm83 # make it compatible with alpine version
apt-get install -y --no-install-recommends php8.3-fpm php8.3-cli php8.3-sqlite3 php8.3-common php8.3-curl php8.3-cgi && \
ln -s /usr/sbin/php-fpm8.3 /usr/sbin/php-fpm83 && \
rm -rf /var/lib/apt/lists/* # make it compatible with alpine version
# Setup virtual python environment and use pip3 to install packages
RUN python3 -m venv ${VIRTUAL_ENV} && \

View File

@@ -34,20 +34,22 @@ Get visibility of what's going on on your WIFI/LAN network and enable presence d
## 🚀 Quick Start
> [!WARNING]
> ⚠️ **Important:** The documentation has been recently updated and some instructions may have changed.
> If you are using the currently live production image, please follow the instructions on [Docker Hub](https://hub.docker.com/r/jokobsk/netalertx) for building and running the container.
> ⚠️ **Important:** The documentation has been recently updated and some instructions may have changed.
> If you are using the currently live production image, please follow the instructions on [Docker Hub](https://hub.docker.com/r/jokobsk/netalertx) for building and running the container.
> These docs reflect the latest development version and may differ from the production image.
Start NetAlertX in seconds with Docker:
```bash
docker run -d --rm --network=host \
docker run -d \
--network=host \
--restart unless-stopped \
-v /local_data_dir/config:/data/config \
-v /local_data_dir/db:/data/db \
-v /etc/localtime:/etc/localtime \
--mount type=tmpfs,target=/tmp/api \
-v /etc/localtime:/etc/localtime:ro \
--tmpfs /tmp:uid=20211,gid=20211,mode=1700 \
-e PORT=20211 \
-e APP_CONF_OVERRIDE={"GRAPHQL_PORT":"20214"} \
-e APP_CONF_OVERRIDE='{"GRAPHQL_PORT":"20214"}' \
ghcr.io/jokob-sk/netalertx:latest
```
@@ -67,9 +69,9 @@ For other install methods, check the [installation docs](#-documentation)
| [📑 Docker guide](https://github.com/jokob-sk/NetAlertX/blob/main/docs/DOCKER_INSTALLATION.md) | [🚀 Releases](https://github.com/jokob-sk/NetAlertX/releases) | [📚 Docs](https://jokob-sk.github.io/NetAlertX/) | [🔌 Plugins](https://github.com/jokob-sk/NetAlertX/blob/main/docs/PLUGINS.md) | [🤖 Ask AI](https://gurubase.io/g/netalertx)
|----------------------| ----------------------| ----------------------| ----------------------| ----------------------|
|----------------------| ----------------------| ----------------------| ----------------------| ----------------------|
![showcase][showcase]
![showcase][showcase]
<details>
<summary>📷 Click for more screenshots</summary>
@@ -87,15 +89,15 @@ For other install methods, check the [installation docs](#-documentation)
### Scanners
The app scans your network for **New devices**, **New connections** (re-connections), **Disconnections**, **"Always Connected" devices down**, Devices **IP changes** and **Internet IP address changes**. Discovery & scan methods include: **arp-scan**, **Pi-hole - DB import**, **Pi-hole - DHCP leases import**, **Generic DHCP leases import**, **UNIFI controller import**, **SNMP-enabled router import**. Check the [Plugins](https://github.com/jokob-sk/NetAlertX/tree/main/docs/PLUGINS.md#readme) docs for a full list of avaliable plugins.
The app scans your network for **New devices**, **New connections** (re-connections), **Disconnections**, **"Always Connected" devices down**, Devices **IP changes** and **Internet IP address changes**. Discovery & scan methods include: **arp-scan**, **Pi-hole - DB import**, **Pi-hole - DHCP leases import**, **Generic DHCP leases import**, **UNIFI controller import**, **SNMP-enabled router import**. Check the [Plugins](https://github.com/jokob-sk/NetAlertX/tree/main/docs/PLUGINS.md#readme) docs for a full list of avaliable plugins.
### Notification gateways
Send notifications to more than 80+ services, including Telegram via [Apprise](https://hub.docker.com/r/caronc/apprise), or use native [Pushsafer](https://www.pushsafer.com/), [Pushover](https://www.pushover.net/), or [NTFY](https://ntfy.sh/) publishers.
Send notifications to more than 80+ services, including Telegram via [Apprise](https://hub.docker.com/r/caronc/apprise), or use native [Pushsafer](https://www.pushsafer.com/), [Pushover](https://www.pushover.net/), or [NTFY](https://ntfy.sh/) publishers.
### Integrations and Plugins
Feed your data and device changes into [Home Assistant](https://github.com/jokob-sk/NetAlertX/blob/main/docs/HOME_ASSISTANT.md), read [API endpoints](https://github.com/jokob-sk/NetAlertX/blob/main/docs/API.md), or use [Webhooks](https://github.com/jokob-sk/NetAlertX/blob/main/docs/WEBHOOK_N8N.md) to setup custom automation flows. You can also
Feed your data and device changes into [Home Assistant](https://github.com/jokob-sk/NetAlertX/blob/main/docs/HOME_ASSISTANT.md), read [API endpoints](https://github.com/jokob-sk/NetAlertX/blob/main/docs/API.md), or use [Webhooks](https://github.com/jokob-sk/NetAlertX/blob/main/docs/WEBHOOK_N8N.md) to setup custom automation flows. You can also
build your own scanners with the [Plugin system](https://github.com/jokob-sk/NetAlertX/tree/main/docs/PLUGINS.md#readme) in as little as [15 minutes](https://www.youtube.com/watch?v=cdbxlwiWhv8).
### Workflows
@@ -108,10 +110,10 @@ The [workflows module](https://github.com/jokob-sk/NetAlertX/blob/main/docs/WORK
Supported browsers: Chrome, Firefox
- [[Installation] Docker](https://github.com/jokob-sk/NetAlertX/blob/main/docs/DOCKER_INSTALLATION.md)
- [[Installation] Home Assistant](https://github.com/alexbelgium/hassio-addons/tree/master/netalertx)
- [[Installation] Bare metal](https://github.com/jokob-sk/NetAlertX/blob/main/docs/HW_INSTALL.md)
- [[Installation] Unraid App](https://unraid.net/community/apps)
- [[Installation] Docker](https://github.com/jokob-sk/NetAlertX/blob/main/docs/DOCKER_INSTALLATION.md)
- [[Installation] Home Assistant](https://github.com/alexbelgium/hassio-addons/tree/master/netalertx)
- [[Installation] Bare metal](https://github.com/jokob-sk/NetAlertX/blob/main/docs/HW_INSTALL.md)
- [[Installation] Unraid App](https://unraid.net/community/apps)
- [[Setup] Usage and Configuration](https://github.com/jokob-sk/NetAlertX/blob/main/docs/README.md)
- [[Development] API docs](https://github.com/jokob-sk/NetAlertX/blob/main/docs/API.md)
- [[Development] Custom Plugins](https://github.com/jokob-sk/NetAlertX/blob/main/docs/PLUGINS_DEV.md)
@@ -132,19 +134,19 @@ See [Security Best Practices](https://github.com/jokob-sk/NetAlertX/security) fo
## ❓ FAQ
**Q: Why dont I see any devices?**
**Q: Why dont I see any devices?**
A: Ensure the container has proper network access (e.g., use `--network host` on Linux). Also check that your scan method is properly configured in the UI.
**Q: Does this work on Wi-Fi-only devices like Raspberry Pi?**
**Q: Does this work on Wi-Fi-only devices like Raspberry Pi?**
A: Yes, but some scanners (e.g. ARP) work best on Ethernet. For Wi-Fi, try SNMP, DHCP, or Pi-hole import.
**Q: Will this send any data to the internet?**
**Q: Will this send any data to the internet?**
A: No. All scans and data remain local, unless you set up cloud-based notifications.
**Q: Can I use this without Docker?**
**Q: Can I use this without Docker?**
A: Yes! You can install it bare-metal. See the [bare metal installation guide](https://github.com/jokob-sk/NetAlertX/blob/main/docs/HW_INSTALL.md).
**Q: Where is the data stored?**
**Q: Where is the data stored?**
A: In the `/data/config` and `/data/db` folders. Back up these folders regularly.
@@ -162,9 +164,9 @@ Check the [GitHub Issues](https://github.com/jokob-sk/NetAlertX/issues) for the
### 📧 Get notified what's new
Get notified about a new release, what new functionality you can use and about breaking changes.
Get notified about a new release, what new functionality you can use and about breaking changes.
![Follow and star][follow_star]
![Follow and star][follow_star]
### 🔀 Other Alternative Apps
@@ -175,15 +177,15 @@ Get notified about a new release, what new functionality you can use and about b
### 💙 Donations
Thank you to everyone who appreciates this tool and donates.
Thank you to everyone who appreciates this tool and donates.
<details>
<summary>Click for more ways to donate</summary>
<hr>
| [![GitHub](https://i.imgur.com/emsRCPh.png)](https://github.com/sponsors/jokob-sk) | [![Buy Me A Coffee](https://i.imgur.com/pIM6YXL.png)](https://www.buymeacoffee.com/jokobsk) | [![Patreon](https://i.imgur.com/MuYsrq1.png)](https://www.patreon.com/user?u=84385063) |
| --- | --- | --- |
| [![GitHub](https://i.imgur.com/emsRCPh.png)](https://github.com/sponsors/jokob-sk) | [![Buy Me A Coffee](https://i.imgur.com/pIM6YXL.png)](https://www.buymeacoffee.com/jokobsk) | [![Patreon](https://i.imgur.com/MuYsrq1.png)](https://www.patreon.com/user?u=84385063) |
| --- | --- | --- |
- Bitcoin: `1N8tupjeCK12qRVU2XrV17WvKK7LCawyZM`
- Ethereum: `0x6e2749Cb42F4411bc98501406BdcD82244e3f9C7`
@@ -194,11 +196,11 @@ Thank you to everyone who appreciates this tool and donates.
### 🏗 Contributors
This project would be nothing without the amazing work of the community, with special thanks to:
This project would be nothing without the amazing work of the community, with special thanks to:
> [pucherot/Pi.Alert](https://github.com/pucherot/Pi.Alert) (the original creator of PiAlert), [leiweibau](https://github.com/leiweibau/Pi.Alert): Dark mode (and much more), [Macleykun](https://github.com/Macleykun) (Help with Dockerfile clean-up), [vladaurosh](https://github.com/vladaurosh) for Alpine re-base help, [Final-Hawk](https://github.com/Final-Hawk) (Help with NTFY, styling and other fixes), [TeroRERO](https://github.com/terorero) (Spanish translations), [Data-Monkey](https://github.com/Data-Monkey), (Split-up of the python.py file and more), [cvc90](https://github.com/cvc90) (Spanish translation and various UI work) to name a few. Check out all the [amazing contributors](https://github.com/jokob-sk/NetAlertX/graphs/contributors).
> [pucherot/Pi.Alert](https://github.com/pucherot/Pi.Alert) (the original creator of PiAlert), [leiweibau](https://github.com/leiweibau/Pi.Alert): Dark mode (and much more), [Macleykun](https://github.com/Macleykun) (Help with Dockerfile clean-up), [vladaurosh](https://github.com/vladaurosh) for Alpine re-base help, [Final-Hawk](https://github.com/Final-Hawk) (Help with NTFY, styling and other fixes), [TeroRERO](https://github.com/terorero) (Spanish translations), [Data-Monkey](https://github.com/Data-Monkey), (Split-up of the python.py file and more), [cvc90](https://github.com/cvc90) (Spanish translation and various UI work) to name a few. Check out all the [amazing contributors](https://github.com/jokob-sk/NetAlertX/graphs/contributors).
### 🌍 Translations
### 🌍 Translations
Proudly using [Weblate](https://hosted.weblate.org/projects/pialert/). Help out and suggest languages in the [online portal of Weblate](https://hosted.weblate.org/projects/pialert/core/).

View File

@@ -1,14 +1,17 @@
#!/bin/bash
export INSTALL_DIR=/app
LOG_FILE="${INSTALL_DIR}/log/execution_queue.log"
# Check if there are any entries with cron_restart_backend
if grep -q "cron_restart_backend" "$LOG_FILE"; then
# Restart python application using s6
s6-svc -r /var/run/s6-rc/servicedirs/netalertx
echo 'done'
if [ -f "${LOG_EXECUTION_QUEUE}" ] && grep -q "cron_restart_backend" "${LOG_EXECUTION_QUEUE}"; then
echo "$(date): Restarting backend triggered by cron_restart_backend"
killall python3 || echo "killall python3 failed or no process found"
sleep 2
/services/start-backend.sh &
# Remove all lines containing cron_restart_backend from the log file
sed -i '/cron_restart_backend/d' "$LOG_FILE"
# Atomic replacement with temp file. grep returns 1 if no lines selected (file becomes empty), which is valid here.
grep -v "cron_restart_backend" "${LOG_EXECUTION_QUEUE}" > "${LOG_EXECUTION_QUEUE}.tmp"
RC=$?
if [ $RC -eq 0 ] || [ $RC -eq 1 ]; then
mv "${LOG_EXECUTION_QUEUE}.tmp" "${LOG_EXECUTION_QUEUE}"
fi
fi

View File

@@ -1,66 +1,114 @@
### Loading...
# Troubleshooting Common Issues
Often if the application is misconfigured the `Loading...` dialog is continuously displayed. This is most likely caused by the backed failing to start. The **Maintenance -> Logs** section should give you more details on what's happening. If there is no exception, check the Portainer log, or start the container in the foreground (without the `-d` parameter) to observe any exceptions. It's advisable to enable `trace` or `debug`. Check the [Debug tips](./DEBUG_TIPS.md) on detailed instructions.
> [!TIP]
> Before troubleshooting, ensure you have set the correct [Debugging and LOG_LEVEL](./DEBUG_TIPS.md).
The issue might be related to the backend server, so please check [Debugging GraphQL issues](./DEBUG_API_SERVER.md).
---
Please also check the browser logs (usually accessible by pressing `F12`):
## Docker Container Doesn't Start
1. Switch to the Console tab and refresh the page
2. Switch to teh Network tab and refresh the page
If you are not sure how to resolve the errors yourself, please post screenshots of the above into the issue, or discord discussion, where your problem is being solved.
### Incorrect SCAN_SUBNETS
One of the most common issues is not configuring `SCAN_SUBNETS` correctly. If this setting is misconfigured you will only see one or two devices in your devices list after a scan. Please read the [subnets docs](./SUBNETS.md) carefully to resolve this.
### Duplicate devices and notifications
The app uses the MAC address as an unique identifier for devices. If a new MAC is detected a new device is added to the application and corresponding notifications are triggered. This means that if the MAC of an existing device changes, the device will be logged as a new device. You can usually prevent this from happening by changing the device configuration (in Android, iOS, or Windows) for your network. See the [Random Macs](./RANDOM_MAC.md) guide for details.
Initial setup issues are often caused by **missing permissions** or **incorrectly mapped volumes**. Always double-check your `docker run` or `docker-compose.yml` against the [official setup guide](./DOCKER_INSTALLATION.md) before proceeding.
### Permissions
Make sure you [File permissions](./FILE_PERMISSIONS.md) are set correctly.
Make sure your [file permissions](./FILE_PERMISSIONS.md) are correctly set:
* If facing issues (AJAX errors, can't write to DB, empty screen, etc,) make sure permissions are set correctly, and check the logs under `/tmp/log`.
* To solve permission issues you can try setting the owner and group of the `app.db` by executing the following on the host system: `docker exec netalertx chown -R www-data:www-data /data/db/app.db`.
* If still facing issues, try to map the app.db file (⚠ not folder) to `:/data/db/app.db` (see [docker-compose Examples](https://github.com/jokob-sk/NetAlertX/blob/main/dockerfiles/README.md#-docker-composeyml-examples) for details)
* If you encounter AJAX errors, cannot write to the database, or see an empty screen, check that permissions are correct and review the logs under `/tmp/log`.
* To fix permission issues with the database, update the owner and group of `app.db` as described in the [File Permissions guide](./FILE_PERMISSIONS.md).
### Container restarts / crashes
### Container Restarts / Crashes
* Check the logs for details. Often a required setting for a notification method is missing.
* Check the logs for details. Often, required settings are missing.
* For more detailed troubleshooting, see [Debug and Troubleshooting Tips](./DEBUG_TIPS.md).
* To observe errors directly, run the container in the foreground instead of `-d`:
### unable to resolve host
```bash
docker run --rm -it <your_image>
```
* Check that your `SCAN_SUBNETS` variable is using the correct mask and `--interface`. See the [subnets docs for details](./SUBNETS.md).
---
### Invalid JSON
## Docker Container Starts, But the Application Misbehaves
Check the [Invalid JSON errors debug help](./DEBUG_INVALID_JSON.md) docs on how to proceed.
If the container starts but the app shows unexpected behavior, the cause is often **data corruption**, **incorrect configuration**, or **unexpected input data**.
### sudo execution failing (e.g.: on arpscan) on a Raspberry Pi 4
### Continuous "Loading..." Screen
> sudo: unexpected child termination condition: 0
A misconfigured application may display a persistent `Loading...` dialog. This is usually caused by the backend failing to start.
Resolution based on [this issue](https://github.com/linuxserver/docker-papermerge/issues/4#issuecomment-1003657581)
**Steps to troubleshoot:**
1. Check **Maintenance → Logs** for exceptions.
2. If no exception is visible, check the Portainer logs.
3. Start the container in the foreground to observe exceptions.
4. Enable `trace` or `debug` logging for detailed output (see [Debug Tips](./DEBUG_TIPS.md)).
5. Verify that `GRAPHQL_PORT` is correctly configured.
6. Check browser logs (press `F12`):
* **Console tab** → refresh the page
* **Network tab** → refresh the page
If you are unsure how to resolve errors, provide screenshots or log excerpts in your issue report or Discord discussion.
---
### Common Configuration Issues
#### Incorrect `SCAN_SUBNETS`
If `SCAN_SUBNETS` is misconfigured, you may see only a few devices in your device list after a scan. See the [Subnets Documentation](./SUBNETS.md) for proper configuration.
#### Duplicate Devices and Notifications
* Devices are identified by their **MAC address**.
* If a device's MAC changes, it will be treated as a new device, triggering notifications.
* Prevent this by adjusting your device configuration for Android, iOS, or Windows. See the [Random MACs Guide](./RANDOM_MAC.md).
#### Unable to Resolve Host
* Ensure `SCAN_SUBNETS` uses the correct mask and `--interface`.
* Refer to the [Subnets Documentation](./SUBNETS.md) for detailed guidance.
#### Invalid JSON Errors
* Follow the steps in [Invalid JSON Errors Debug Help](./DEBUG_INVALID_JSON.md).
#### Sudo Execution Fails (e.g., on arpscan on Raspberry Pi 4)
Error:
```
sudo: unexpected child termination condition: 0
```
**Resolution**:
```bash
wget ftp.us.debian.org/debian/pool/main/libs/libseccomp/libseccomp2_2.5.3-2_armhf.deb
sudo dpkg -i libseccomp2_2.5.3-2_armhf.deb
```
The link above will probably break in time too. Go to https://packages.debian.org/sid/armhf/libseccomp2/download to find the new version number and put that in the url.
> ⚠️ The link may break over time. Check [Debian Packages](https://packages.debian.org/sid/armhf/libseccomp2/download) for the latest version.
### Only Router and own device show up
#### Only Router and Own Device Show Up
Make sure that the subnet and interface in `SCAN_SUBNETS` are correct. If your device/NAS has multiple ethernet ports, you probably need to change `eth0` to something else.
* Verify the subnet and interface in `SCAN_SUBNETS`.
* On devices with multiple Ethernet ports, you may need to change `eth0` to the correct interface.
### Losing my settings and devices after an update
#### Losing Settings or Devices After Update
If you lose your devices and/or settings after an update that means you don't have the `/data/db` and `/data/config` folders mapped to a permanent storage. That means every time you update these folders are re-created. Make sure you have the [volumes specified correctly](./DOCKER_COMPOSE.md) in your `docker-compose.yml` or run command.
* Ensure `/data/db` and `/data/config` are mapped to persistent storage.
* Without persistent volumes, these folders are recreated on every update.
* See [Docker Volumes Setup](./DOCKER_COMPOSE.md) for proper configuration.
#### Application Performance Issues
### The application is slow
Slowness can be caused by:
* Incorrect settings (causing app restarts) → check `app.log`.
* Too many background processes → disable unnecessary scanners.
* Long scans → limit the number of scanned devices.
* Excessive disk operations or failing maintenance plugins.
> See [Performance Tips](./PERFORMANCE.md) for detailed optimization steps.
Slowness is usually caused by incorrect settings (the app might restart, so check the `app.log`), too many background processes (disable unnecessary scanners), too long scans (limit the number of scanned devices), too many disk operations, or some maintenance plugins might have failed. See the [Performance tips](./PERFORMANCE.md) docs for details.

View File

@@ -1,12 +1,12 @@
# Debugging GraphQL server issues
The GraphQL server is an API middle layer, running on it's own port specified by `GRAPHQL_PORT`, to retrieve and show the data in the UI. It can also be used to retrieve data for custom third party integarions. Check the [API documentation](./API.md) for details.
The GraphQL server is an API middle layer, running on it's own port specified by `GRAPHQL_PORT`, to retrieve and show the data in the UI. It can also be used to retrieve data for custom third party integarions. Check the [API documentation](./API.md) for details.
The most common issue is that the GraphQL server doesn't start properly, usually due to a **port conflict**. If you are running multiple NetAlertX instances, make sure to use **unique ports** by changing the `GRAPHQL_PORT` setting. The default is `20212`.
## How to update the `GRAPHQL_PORT` in case of issues
As a first troubleshooting step try changing the default `GRAPHQL_PORT` setting. Please remember NetAlertX is running on the host so any application uising the same port will cause issues.
As a first troubleshooting step try changing the default `GRAPHQL_PORT` setting. Please remember NetAlertX is running on the host so any application uising the same port will cause issues.
### Updating the setting via the Settings UI
@@ -14,7 +14,7 @@ Ideally use the Settings UI to update the setting under General -> Core -> Graph
![GrapQL settings](./img/DEBUG_API_SERVER/graphql_settings_port_token.png)
You might need to temporarily stop other applications or NetAlertX instances causing conflicts to update the setting. The `API_TOKEN` is used to authenticate any API calls, including GraphQL requests.
You might need to temporarily stop other applications or NetAlertX instances causing conflicts to update the setting. The `API_TOKEN` is used to authenticate any API calls, including GraphQL requests.
### Updating the `app.conf` file
@@ -24,7 +24,7 @@ If the UI is not accessible, you can directly edit the `app.conf` file in your `
### Using a docker variable
All application settings can also be initialized via the `APP_CONF_OVERRIDE` docker env variable.
All application settings can also be initialized via the `APP_CONF_OVERRIDE` docker env variable.
```yaml
...

View File

@@ -3,13 +3,13 @@
Check the the HTTP response of the failing backend call by following these steps:
- Open developer console in your browser (usually, e. g. for Chrome, key F12 on the keyboard).
- Follow the steps in this screenshot:
- Follow the steps in this screenshot:
![F12DeveloperConsole][F12DeveloperConsole]
- Copy the URL causing the error and enter it in the address bar of your browser directly and hit enter. The copied URLs could look something like this (notice the query strings at the end):
- `http://<NetAlertX URL>:20211/api/table_devices.json?nocache=1704141103121`
- `http://<NetAlertX URL>:20211/php/server/devices.php?action=getDevicesTotals`
- `http://<server>:20211/api/table_devices.json?nocache=1704141103121`
- `http://<server>:20211/php/server/devices.php?action=getDevicesTotals`
- Post the error response in the existing issue thread on GitHub or create a new issue and include the redacted response of the failing query.

View File

@@ -1,5 +1,8 @@
# Troubleshooting plugins
> [!TIP]
> Before troubleshooting, please ensure you have the right [Debugging and LOG_LEVEL set](./DEBUG_TIPS.md).
## High-level overview
If a Plugin supplies data to the main app it's done either vie a SQL query or via a script that updates the `last_result.log` file in the plugin log folder (`app/log/plugins/`).
@@ -9,7 +12,7 @@ For a more in-depth overview on how plugins work check the [Plugins development
### Prerequisites
- Make sure you read and followed the specific plugin setup instructions.
- Ensure you have [debug enabled (see More Logging)](./DEBUG_TIPS.md)
- Ensure you have [debug enabled (see More Logging)](./DEBUG_TIPS.md)
### Potential issues
@@ -47,9 +50,9 @@ Input data from the plugin might cause mapping issues in specific edge cases. Lo
17:31:05 [Plugins] history_to_insert count: 4
17:31:05 [Plugins] objects_to_insert count: 0
17:31:05 [Plugins] objects_to_update count: 4
17:31:05 [Plugin utils] In pluginEvents there are 2 events with the status "watched-not-changed"
17:31:05 [Plugin utils] In pluginObjects there are 2 events with the status "missing-in-last-scan"
17:31:05 [Plugin utils] In pluginObjects there are 2 events with the status "watched-not-changed"
17:31:05 [Plugin utils] In pluginEvents there are 2 events with the status "watched-not-changed"
17:31:05 [Plugin utils] In pluginObjects there are 2 events with the status "missing-in-last-scan"
17:31:05 [Plugin utils] In pluginObjects there are 2 events with the status "watched-not-changed"
17:31:05 [Plugins] Mapping objects to database table: CurrentScan
17:31:05 [Plugins] SQL query for mapping: INSERT into CurrentScan ( "cur_MAC", "cur_IP", "cur_LastQuery", "cur_Name", "cur_Vendor", "cur_ScanMethod") VALUES ( ?, ?, ?, ?, ?, ?)
17:31:05 [Plugins] SQL sqlParams for mapping: [('01:01:01:01:01:01', '172.30.0.1', 0, 'aaaa', 'vvvvvvvvv', 'PIHOLE'), ('02:42:ac:1e:00:02', '172.30.0.2', 0, 'dddd', 'vvvvv2222', 'PIHOLE')]
@@ -80,7 +83,7 @@ These values, if formatted correctly, will also show up in the UI:
### Sharing application state
Sometimes specific log sections are needed to debug issues. The Devices and CurrentScan table data is sometimes needed to figure out what's wrong.
Sometimes specific log sections are needed to debug issues. The Devices and CurrentScan table data is sometimes needed to figure out what's wrong.
1. Please set `LOG_LEVEL` to `trace` (Disable it once you have the info as this produces big log files).
2. Wait for the issue to occur.

View File

@@ -1,30 +1,35 @@
# Debugging and troubleshooting
Please follow tips 1 - 4 to get a more detailed error.
Please follow tips 1 - 4 to get a more detailed error.
## 1. More Logging
## 1. More Logging
When debugging an issue always set the highest log level:
`LOG_LEVEL='trace'`
## 2. Surfacing errors when container restarts
## 2. Surfacing errors when container restarts
Start the container via the **terminal** with a command similar to this one:
```bash
docker run --rm --network=host \
-v /local_data_dir/netalertx/config:/data/config \
-v /local_data_dir/netalertx/db:/data/db \
-v /etc/localtime:/etc/localtime \
docker run \
--network=host \
--restart unless-stopped \
-v /local_data_dir/config:/data/config \
-v /local_data_dir/db:/data/db \
-v /etc/localtime:/etc/localtime:ro \
--tmpfs /tmp:uid=20211,gid=20211,mode=1700 \
-e PORT=20211 \
-e APP_CONF_OVERRIDE='{"GRAPHQL_PORT":"20214"}' \
ghcr.io/jokob-sk/netalertx:latest
```
> ⚠ Please note, don't use the `-d` parameter so you see the error when the container crashes. Use this error in your issue description.
> [!NOTE]
> ⚠ The most important part is NOT to use the `-d` parameter so you see the error when the container crashes. Use this error in your issue description.
## 3. Check the _dev image and open issues
## 3. Check the _dev image and open issues
If possible, check if your issue got fixed in the `_dev` image before opening a new issue. The container is:
@@ -34,7 +39,7 @@ If possible, check if your issue got fixed in the `_dev` image before opening a
Please also search [open issues](https://github.com/jokob-sk/NetAlertX/issues).
## 4. Disable restart behavior
## 4. Disable restart behavior
To prevent a Docker container from automatically restarting in a Docker Compose file, specify the restart policy as `no`:
@@ -48,9 +53,14 @@ services:
# Other service configurations...
```
## 5. Sharing application state
## 5. TMP mount directories to rule host out permission issues
Sometimes specific log sections are needed to debug issues. The Devices and CurrentScan table data is sometimes needed to figure out what's wrong.
Try starting the container with all data to be in non-persistent volumes. If this works, the issue might be related to the permissions of your persistent data mount locations on your server. See teh [Permissions guide](./FILE_PERMISSIONS.md) for details.
## 6. Sharing application state
Sometimes specific log sections are needed to debug issues. The Devices and CurrentScan table data is sometimes needed to figure out what's wrong.
1. Please set `LOG_LEVEL` to `trace` (Disable it once you have the info as this produces big log files).
2. Wait for the issue to occur.
@@ -61,4 +71,4 @@ Sometimes specific log sections are needed to debug issues. The Devices and Curr
## Common issues
See [Common issues](./COMMON_ISSUES.md) for details.
See [Common issues](./COMMON_ISSUES.md) for additional troubleshooting tips.

View File

@@ -4,8 +4,8 @@ NetAlertX allows you to mass-edit devices via a CSV export and import feature, o
## UI multi edit
> [!NOTE]
> Make sure you have your backups saved and restorable before doing any mass edits. Check [Backup strategies](./BACKUPS.md).
> [!NOTE]
> Make sure you have your backups saved and restorable before doing any mass edits. Check [Backup strategies](./BACKUPS.md).
You can select devices in the _Devices_ view by selecting devices to edit and then clicking the _Multi-edit_ button or via the _Maintenance_ > _Multi-Edit_ section.
@@ -16,23 +16,23 @@ You can select devices in the _Devices_ view by selecting devices to edit and th
The database and device structure may change with new releases. When using the CSV import functionality, ensure the format matches what the application expects. To avoid issues, you can first export the devices and review the column formats before importing any custom data.
> [!NOTE]
> [!NOTE]
> As always, backup everything, just in case.
1. In _Maintenance_ > _Backup / Restore_ click the _CSV Export_ button.
1. In _Maintenance_ > _Backup / Restore_ click the _CSV Export_ button.
2. A `devices.csv` is generated in the `/config` folder
3. Edit the `devices.csv` file however you like.
3. Edit the `devices.csv` file however you like.
![Maintenance > CSV Export](./img/DEVICES_BULK_EDITING/MAINTENANCE_CSV_EXPORT.png)
> [!NOTE]
> The file containing a list of Devices including the Network relationships between Network Nodes and connected devices. You can also trigger this by acessing this URL: `<your netalertx url>/php/server/devices.php?action=ExportCSV` or via the `CSV Backup` plugin. (💡 You can schedule this)
> [!NOTE]
> The file containing a list of Devices including the Network relationships between Network Nodes and connected devices. You can also trigger this by acessing this URL: `<server>:20211/php/server/devices.php?action=ExportCSV` or via the `CSV Backup` plugin. (💡 You can schedule this)
![Settings > CSV Backup](./img/DEVICES_BULK_EDITING/CSV_BACKUP_SETTINGS.png)
### File encoding format
> [!NOTE]
> [!NOTE]
> Keep Linux line endings (suggested editors: Nano, Notepad++)
![Nodepad++ line endings](./img/DEVICES_BULK_EDITING/NOTEPAD++.png)

View File

@@ -7,7 +7,7 @@
# NetAlertX - Network scanner & notification framework
| [📑 Docker guide](https://github.com/jokob-sk/NetAlertX/blob/main/docs/DOCKER_INSTALLATION.md) | [🚀 Releases](https://github.com/jokob-sk/NetAlertX/releases) | [📚 Docs](https://jokob-sk.github.io/NetAlertX/) | [🔌 Plugins](https://github.com/jokob-sk/NetAlertX/blob/main/docs/PLUGINS.md) | [🤖 Ask AI](https://gurubase.io/g/netalertx)
|----------------------| ----------------------| ----------------------| ----------------------| ----------------------|
|----------------------| ----------------------| ----------------------| ----------------------| ----------------------|
<a href="https://raw.githubusercontent.com/jokob-sk/NetAlertX/main/docs/img/GENERAL/github_social_image.jpg" target="_blank">
<img src="https://raw.githubusercontent.com/jokob-sk/NetAlertX/main/docs/img/GENERAL/github_social_image.jpg" width="1000px" />
@@ -16,9 +16,9 @@
Head to [https://netalertx.com/](https://netalertx.com/) for more gifs and screenshots 📷.
> [!NOTE]
> There is also an experimental 🧪 [bare-metal install](https://github.com/jokob-sk/NetAlertX/blob/main/docs/HW_INSTALL.md) method available.
> There is also an experimental 🧪 [bare-metal install](https://github.com/jokob-sk/NetAlertX/blob/main/docs/HW_INSTALL.md) method available.
## 📕 Basic Usage
## 📕 Basic Usage
> [!WARNING]
> You will have to run the container on the `host` network and specify `SCAN_SUBNETS` unless you use other [plugin scanners](https://github.com/jokob-sk/NetAlertX/blob/main/docs/PLUGINS.md). The initial scan can take a few minutes, so please wait 5-10 minutes for the initial discovery to finish.
@@ -28,7 +28,7 @@ docker run -d --rm --network=host \
-v /local_data_dir/config:/data/config \
-v /local_data_dir/db:/data/db \
-v /etc/localtime:/etc/localtime \
--mount type=tmpfs,target=/tmp/api \
--tmpfs /tmp:uid=20211,gid=20211,mode=1700 \
-e PORT=20211 \
-e APP_CONF_OVERRIDE={"GRAPHQL_PORT":"20214"} \
ghcr.io/jokob-sk/netalertx:latest
@@ -58,49 +58,49 @@ See alternative [docked-compose examples](https://github.com/jokob-sk/NetAlertX/
### Docker paths
> [!NOTE]
> See also [Backup strategies](https://github.com/jokob-sk/NetAlertX/blob/main/docs/BACKUPS.md).
> See also [Backup strategies](https://github.com/jokob-sk/NetAlertX/blob/main/docs/BACKUPS.md).
| Required | Path | Description |
| :------------- | :------------- | :-------------|
| ✅ | `:/data/config` | Folder which will contain the `app.conf` & `devices.csv` ([read about devices.csv](https://github.com/jokob-sk/NetAlertX/blob/main/docs/DEVICES_BULK_EDITING.md)) files |
| ✅ | `:/data/db` | Folder which will contain the `app.db` database file |
| ✅ | `/etc/localtime:/etc/localtime:ro` | Ensuring the timezone is teh same as on teh server. |
| | `:/tmp/log` | Logs folder useful for debugging if you have issues setting up the container |
| | `:/tmp/api` | The [API endpoint](https://github.com/jokob-sk/NetAlertX/blob/main/docs/API.md) containing static (but regularly updated) json and other files. Path configurable via `NETALERTX_API` environment variable. |
| | `:/app/front/plugins/<plugin>/ignore_plugin` | Map a file `ignore_plugin` to ignore a plugin. Plugins can be soft-disabled via settings. More in the [Plugin docs](https://github.com/jokob-sk/NetAlertX/blob/main/docs/PLUGINS.md). |
| | `:/etc/resolv.conf` | Use a custom `resolv.conf` file for [better name resolution](https://github.com/jokob-sk/NetAlertX/blob/main/docs/REVERSE_DNS.md). |
| :------------- | :------------- | :-------------|
| ✅ | `:/data/config` | Folder which will contain the `app.conf` & `devices.csv` ([read about devices.csv](https://github.com/jokob-sk/NetAlertX/blob/main/docs/DEVICES_BULK_EDITING.md)) files |
| ✅ | `:/data/db` | Folder which will contain the `app.db` database file |
| ✅ | `/etc/localtime:/etc/localtime:ro` | Ensuring the timezone is teh same as on teh server. |
| | `:/tmp/log` | Logs folder useful for debugging if you have issues setting up the container |
| | `:/tmp/api` | The [API endpoint](https://github.com/jokob-sk/NetAlertX/blob/main/docs/API.md) containing static (but regularly updated) json and other files. Path configurable via `NETALERTX_API` environment variable. |
| | `:/app/front/plugins/<plugin>/ignore_plugin` | Map a file `ignore_plugin` to ignore a plugin. Plugins can be soft-disabled via settings. More in the [Plugin docs](https://github.com/jokob-sk/NetAlertX/blob/main/docs/PLUGINS.md). |
| | `:/etc/resolv.conf` | Use a custom `resolv.conf` file for [better name resolution](https://github.com/jokob-sk/NetAlertX/blob/main/docs/REVERSE_DNS.md). |
> Use separate `db` and `config` directories, do not nest them.
### Initial setup
- If unavailable, the app generates a default `app.conf` and `app.db` file on the first run.
- The preferred way is to manage the configuration via the Settings section in the UI, if UI is inaccessible you can modify [app.conf](https://github.com/jokob-sk/NetAlertX/tree/main/back) in the `/data/config/` folder directly
- The preferred way is to manage the configuration via the Settings section in the UI, if UI is inaccessible you can modify [app.conf](https://github.com/jokob-sk/NetAlertX/tree/main/back) in the `/data/config/` folder directly
#### Setting up scanners
You have to specify which network(s) should be scanned. This is done by entering subnets that are accessible from the host. If you use the default `ARPSCAN` plugin, you have to specify at least one valid subnet and interface in the `SCAN_SUBNETS` setting. See the documentation on [How to set up multiple SUBNETS, VLANs and what are limitations](https://github.com/jokob-sk/NetAlertX/blob/main/docs/SUBNETS.md) for troubleshooting and more advanced scenarios.
You have to specify which network(s) should be scanned. This is done by entering subnets that are accessible from the host. If you use the default `ARPSCAN` plugin, you have to specify at least one valid subnet and interface in the `SCAN_SUBNETS` setting. See the documentation on [How to set up multiple SUBNETS, VLANs and what are limitations](https://github.com/jokob-sk/NetAlertX/blob/main/docs/SUBNETS.md) for troubleshooting and more advanced scenarios.
If you are running PiHole you can synchronize devices directly. Check the [PiHole configuration guide](https://github.com/jokob-sk/NetAlertX/blob/main/docs/PIHOLE_GUIDE.md) for details.
If you are running PiHole you can synchronize devices directly. Check the [PiHole configuration guide](https://github.com/jokob-sk/NetAlertX/blob/main/docs/PIHOLE_GUIDE.md) for details.
> [!NOTE]
> You can bulk-import devices via the [CSV import method](https://github.com/jokob-sk/NetAlertX/blob/main/docs/DEVICES_BULK_EDITING.md).
#### Community guides
You can read or watch several [community configuration guides](https://github.com/jokob-sk/NetAlertX/blob/main/docs/COMMUNITY_GUIDES.md) in Chinese, Korean, German, or French.
You can read or watch several [community configuration guides](https://github.com/jokob-sk/NetAlertX/blob/main/docs/COMMUNITY_GUIDES.md) in Chinese, Korean, German, or French.
> Please note these might be outdated. Rely on official documentation first.
> Please note these might be outdated. Rely on official documentation first.
#### Common issues
- Before creating a new issue, please check if a similar issue was [already resolved](https://github.com/jokob-sk/NetAlertX/issues?q=is%3Aissue+is%3Aclosed).
- Check also common issues and [debugging tips](https://github.com/jokob-sk/NetAlertX/blob/main/docs/DEBUG_TIPS.md).
- Before creating a new issue, please check if a similar issue was [already resolved](https://github.com/jokob-sk/NetAlertX/issues?q=is%3Aissue+is%3Aclosed).
- Check also common issues and [debugging tips](https://github.com/jokob-sk/NetAlertX/blob/main/docs/DEBUG_TIPS.md).
## 💙 Support me
## 💙 Support me
| [![GitHub](https://i.imgur.com/emsRCPh.png)](https://github.com/sponsors/jokob-sk) | [![Buy Me A Coffee](https://i.imgur.com/pIM6YXL.png)](https://www.buymeacoffee.com/jokobsk) | [![Patreon](https://i.imgur.com/MuYsrq1.png)](https://www.patreon.com/user?u=84385063) |
| --- | --- | --- |
| [![GitHub](https://i.imgur.com/emsRCPh.png)](https://github.com/sponsors/jokob-sk) | [![Buy Me A Coffee](https://i.imgur.com/pIM6YXL.png)](https://www.buymeacoffee.com/jokobsk) | [![Patreon](https://i.imgur.com/MuYsrq1.png)](https://www.patreon.com/user?u=84385063) |
| --- | --- | --- |
- Bitcoin: `1N8tupjeCK12qRVU2XrV17WvKK7LCawyZM`
- Ethereum: `0x6e2749Cb42F4411bc98501406BdcD82244e3f9C7`

View File

@@ -34,30 +34,26 @@ Copy and paste the following YAML into the **Web editor**:
services:
netalertx:
container_name: netalertx
# Use this line for stable release
image: "ghcr.io/jokob-sk/netalertx:latest"
image: "ghcr.io/jokob-sk/netalertx:latest"
# Or, use this for the latest development build
# image: "ghcr.io/jokob-sk/netalertx-dev:latest"
# image: "ghcr.io/jokob-sk/netalertx-dev:latest"
network_mode: "host"
restart: unless-stopped
cap_drop: # Drop all capabilities for enhanced security
- ALL
cap_add: # Re-add necessary capabilities
- NET_RAW
- NET_ADMIN
- NET_BIND_SERVICE
volumes:
- ${APP_FOLDER}/netalertx/config:/data/config
- ${APP_FOLDER}/netalertx/db:/data/db
# Optional: logs (useful for debugging setup issues, comment out for performance)
- ${APP_FOLDER}/netalertx/log:/tmp/log
# API storage options:
# (Option 1) tmpfs (default, best performance)
- type: tmpfs
target: /tmp/api
# (Option 2) bind mount (useful for debugging)
# - ${APP_FOLDER}/netalertx/api:/tmp/api
# to sync with system time
- /etc/localtime:/etc/localtime:ro
tmpfs:
# All writable runtime state resides under /tmp; comment out to persist logs between restarts
- "/tmp:uid=20211,gid=20211,mode=1700,rw,noexec,nosuid,nodev,async,noatime,nodiratime"
environment:
- PORT=${PORT}
- APP_CONF_OVERRIDE=${APP_CONF_OVERRIDE}
@@ -78,11 +74,12 @@ In the **Environment variables** section of Portainer, add the following:
## 5. Ensure permissions
> [!TIP]
> If you are facing permissions issues run the following commands on your server. This will change the owner and assure sufficient access to the database and config files that are stored in the `/local_data_dir/db` and `/local_data_dir/config` folders (replace `local_data_dir` with the location where your `/db` and `/config` folders are located).
> ```bash
> sudo chown -R 20211:20211 /local_data_dir
> sudo chmod -R a+rwx /local_data_dir
> ```
> If you are facing permissions issues run the following commands on your server. This will change the owner and assure sufficient access to the database and config files that are stored in the `/local_data_dir/db` and `/local_data_dir/config` folders (replace `local_data_dir` with the location where your `/db` and `/config` folders are located).
>
> `sudo chown -R 20211:20211 /local_data_dir`
>
> `sudo chmod -R a+rwx /local_data_dir1`
>
---
@@ -104,4 +101,4 @@ http://<your-docker-host-ip>:22022
* Check logs via Portainer → **Containers**`netalertx`**Logs**.
* Logs are stored under `${APP_FOLDER}/netalertx/log` if you enabled that volume.
Once the application is running, configure it by reading the [initial setup](INITIAL_SETUP.md) guide, or [troubleshoot common issues](COMMON_ISSUES.md).
Once the application is running, configure it by reading the [initial setup](INITIAL_SETUP.md) guide, or [troubleshoot common issues](COMMON_ISSUES.md).

View File

@@ -41,15 +41,7 @@ Use the following Compose snippet to deploy NetAlertX with a **static LAN IP** a
services:
netalertx:
image: ghcr.io/jokob-sk/netalertx:latest
ports:
- 20211:20211
volumes:
- /mnt/YOUR_SERVER/netalertx/config:/data/config:rw
- /mnt/YOUR_SERVER/netalertx/db:/netalertx/data/db:rw
- /mnt/YOUR_SERVER/netalertx/logs:/netalertx/tmp/log:rw
- /etc/localtime:/etc/localtime:ro
environment:
- PORT=20211
...
networks:
swarm-ipvlan:
ipv4_address: 192.168.1.240 # ⚠️ Choose a free IP from your LAN

View File

@@ -1,8 +1,23 @@
# Managing File Permissions for NetAlertX on a Read-Only Container
Sometimes, permission issues arise if your existing host directories were created by a previous container running as root or another UID. The container will fail to start with "Permission Denied" errors.
> [!TIP]
> NetAlertX runs in a **secure, read-only Alpine-based container** under a dedicated `netalertx` user (UID 20211, GID 20211). All writable paths are either mounted as **persistent volumes** or **`tmpfs` filesystems**. This ensures consistent file ownership and prevents privilege escalation.
Try starting the container with all data to be in non-persistent volumes. If this works, the issue might be related to the permissions of your persistent data mount locations on your server.
```bash
docker run --rm --network=host \
-v /etc/localtime:/etc/localtime:ro \
--tmpfs /tmp:uid=20211,gid=20211,mode=1700 \
-e PORT=20211 \
ghcr.io/jokob-sk/netalertx:latest
```
> [!WARNING]
> The above should be only used as a test - once the container restarts, all data is lost.
---
## Writable Paths
@@ -25,10 +40,6 @@ NetAlertX requires certain paths to be writable at runtime. These paths should b
---
## Fixing Permission Problems
Sometimes, permission issues arise if your existing host directories were created by a previous container running as root or another UID. The container will fail to start with "Permission Denied" errors.
### Solution
1. **Run the container once as root** (`--user "0"`) to allow it to correct permissions automatically:
@@ -37,6 +48,7 @@ Sometimes, permission issues arise if your existing host directories were create
docker run -it --rm --name netalertx --user "0" \
-v /local_data_dir/config:/data/config \
-v /local_data_dir/db:/data/db \
--tmpfs /tmp:uid=20211,gid=20211,mode=1700 \
ghcr.io/jokob-sk/netalertx:latest
```
@@ -47,11 +59,12 @@ docker run -it --rm --name netalertx --user "0" \
> The container startup script detects `root` and runs `chown -R 20211:20211` on all volumes, fixing ownership for the secure `netalertx` user.
> [!TIP]
> If you are facing permissions issues run the following commands on your server. This will change the owner and assure sufficient access to the database and config files that are stored in the `/local_data_dir/db` and `/local_data_dir/config` folders (replace `local_data_dir` with the location where your `/db` and `/config` folders are located).
> ```bash
> sudo chown -R 20211:20211 /local_data_dir
> sudo chmod -R a+rwx /local_data_dir
> ```
> If you are facing permissions issues run the following commands on your server. This will change the owner and assure sufficient access to the database and config files that are stored in the `/local_data_dir/db` and `/local_data_dir/config` folders (replace `local_data_dir` with the location where your `/db` and `/config` folders are located).
>
> `sudo chown -R 20211:20211 /local_data_dir`
>
> `sudo chmod -R a+rwx /local_data_dir1`
>
---
@@ -59,22 +72,22 @@ docker run -it --rm --name netalertx --user "0" \
```yaml
services:
netalertx:
container_name: netalertx
image: "ghcr.io/jokob-sk/netalertx"
network_mode: "host"
netalertx:
container_name: netalertx
image: "ghcr.io/jokob-sk/netalertx"
network_mode: "host"
cap_drop: # Drop all capabilities for enhanced security
- ALL
- ALL
cap_add: # Add only the necessary capabilities
- NET_ADMIN # Required for ARP scanning
- NET_RAW # Required for raw socket operations
- NET_BIND_SERVICE # Required to bind to privileged ports (nbtscan)
restart: unless-stopped
volumes:
- /local_data_dir/config:/data/config
- /local_data_dir/db:/data/db
- /etc/localtime:/etc/localtime
environment:
- /local_data_dir/config:/data/config
- /local_data_dir/db:/data/db
- /etc/localtime:/etc/localtime
environment:
- PORT=20211
tmpfs:
- "/tmp:uid=20211,gid=20211,mode=1700,rw,noexec,nosuid,nodev,async,noatime,nodiratime"

View File

@@ -1,8 +1,8 @@
# Migration
# Migration
> [!WARNING]
> ⚠️ **Important:** The documentation has been recently updated and some instructions may have changed.
> If you are using the currently live production image, please follow the instructions on [Docker Hub](https://hub.docker.com/r/jokobsk/netalertx) for building and running the container.
> ⚠️ **Important:** The documentation has been recently updated and some instructions may have changed.
> If you are using the currently live production image, please follow the instructions on [Docker Hub](https://hub.docker.com/r/jokobsk/netalertx) for building and running the container.
> These docs reflect the latest development version and may differ from the production image.
@@ -13,13 +13,13 @@ When upgrading from older versions of NetAlertX (or PiAlert by jokob-sk), follow
## Migration scenarios
- You are running PiAlert (by jokob-sk)
- You are running PiAlert (by jokob-sk)
→ [Read the 1.1 Migration from PiAlert to NetAlertX `v25.5.24`](#11-migration-from-pialert-to-netalertx-v25524)
- You are running NetAlertX (by jokob-sk) `25.5.24` or older
- You are running NetAlertX (by jokob-sk) `25.5.24` or older
→ [Read the 1.2 Migration from NetAlertX `v25.5.24`](#12-migration-from-netalertx-v25524)
- You are running NetAlertX (by jokob-sk) (`v25.6.7` to `v25.10.1`)
- You are running NetAlertX (by jokob-sk) (`v25.6.7` to `v25.10.1`)
→ [Read the 1.3 Migration from NetAlertX `v25.10.1`](#13-migration-from-netalertx-v25101)
@@ -30,40 +30,40 @@ You can migrate data manually, for example by exporting and importing devices us
### 1.1 Migration from PiAlert to NetAlertX `v25.5.24`
#### STEPS:
#### STEPS:
The application will automatically migrate the database, configuration, and all device information.
A banner message will appear at the top of the web UI reminding you to update your Docker mount points.
1. Stop the container
2. [Back up your setup](./BACKUPS.md)
3. Update the Docker file mount locations in your `docker-compose.yml` or docker run command (See below **New Docker mount locations**).
1. Stop the container
2. [Back up your setup](./BACKUPS.md)
3. Update the Docker file mount locations in your `docker-compose.yml` or docker run command (See below **New Docker mount locations**).
4. Rename the DB and conf files to `app.db` and `app.conf` and place them in the appropriate location.
5. Start the container
> [!TIP]
> If you have trouble accessing past backups, config or database files you can copy them into the newly mapped directories, for example by running this command in the container: `cp -r /data/config /home/pi/pialert/config/old_backup_files`. This should create a folder in the `config` directory called `old_backup_files` containing all the files in that location. Another approach is to map the old location and the new one at the same time to copy things over.
> [!TIP]
> If you have trouble accessing past backups, config or database files you can copy them into the newly mapped directories, for example by running this command in the container: `cp -r /data/config /home/pi/pialert/config/old_backup_files`. This should create a folder in the `config` directory called `old_backup_files` containing all the files in that location. Another approach is to map the old location and the new one at the same time to copy things over.
#### New Docker mount locations
The internal application path in the container has changed from `/home/pi/pialert` to `/app`. Update your volume mounts as follows:
| Old mount point | New mount point |
|----------------------|---------------|
| Old mount point | New mount point |
|----------------------|---------------|
| `/home/pi/pialert/config` | `/data/config` |
| `/home/pi/pialert/db` | `/data/db` |
If you were mounting files directly, please note the file names have changed:
| Old file name | New file name |
|----------------------|---------------|
| Old file name | New file name |
|----------------------|---------------|
| `pialert.conf` | `app.conf` |
| `pialert.db` | `app.db` |
> [!NOTE]
> [!NOTE]
> The application automatically creates symlinks from the old database and config locations to the new ones, so data loss should not occur. Read the [backup strategies](./BACKUPS.md) guide to backup your setup.
@@ -80,17 +80,17 @@ services:
pialert:
container_name: pialert
# use the below line if you want to test the latest dev image
# image: "ghcr.io/jokob-sk/netalertx-dev:latest"
image: "jokobsk/pialert:latest"
network_mode: "host"
# image: "ghcr.io/jokob-sk/netalertx-dev:latest"
image: "jokobsk/pialert:latest"
network_mode: "host"
restart: unless-stopped
volumes:
- /local_data_dir/config:/home/pi/pialert/config
- /local_data_dir/db:/home/pi/pialert/db
- /local_data_dir/config:/home/pi/pialert/config
- /local_data_dir/db:/home/pi/pialert/db
# (optional) useful for debugging if you have issues setting up the container
- /local_data_dir/logs:/home/pi/pialert/front/log
environment:
- TZ=Europe/Berlin
- TZ=Europe/Berlin
- PORT=20211
```
@@ -98,26 +98,26 @@ services:
```yaml
services:
netalertx: # 🆕 This has changed
container_name: netalertx # 🆕 This has changed
image: "ghcr.io/jokob-sk/netalertx:25.5.24" # 🆕 This has changed
network_mode: "host"
netalertx: # 🆕 This has changed
container_name: netalertx # 🆕 This has changed
image: "ghcr.io/jokob-sk/netalertx:25.5.24" # 🆕 This has changed
network_mode: "host"
restart: unless-stopped
volumes:
- /local_data_dir/config:/data/config # 🆕 This has changed
- /local_data_dir/db:/data/db # 🆕 This has changed
- /local_data_dir/config:/data/config # 🆕 This has changed
- /local_data_dir/db:/data/db # 🆕 This has changed
# (optional) useful for debugging if you have issues setting up the container
- /local_data_dir/logs:/tmp/log # 🆕 This has changed
- /local_data_dir/logs:/tmp/log # 🆕 This has changed
environment:
- TZ=Europe/Berlin
- TZ=Europe/Berlin
- PORT=20211
```
##### Example 2: Mapping files
> [!NOTE]
> The recommendation is to map folders as in Example 1, map files directly only when needed.
> [!NOTE]
> The recommendation is to map folders as in Example 1, map files directly only when needed.
###### Old docker-compose.yml
@@ -126,17 +126,17 @@ services:
pialert:
container_name: pialert
# use the below line if you want to test the latest dev image
# image: "ghcr.io/jokob-sk/netalertx-dev:latest"
image: "jokobsk/pialert:latest"
network_mode: "host"
# image: "ghcr.io/jokob-sk/netalertx-dev:latest"
image: "jokobsk/pialert:latest"
network_mode: "host"
restart: unless-stopped
volumes:
- /local_data_dir/config/pialert.conf:/home/pi/pialert/config/pialert.conf
- /local_data_dir/db/pialert.db:/home/pi/pialert/db/pialert.db
- /local_data_dir/config/pialert.conf:/home/pi/pialert/config/pialert.conf
- /local_data_dir/db/pialert.db:/home/pi/pialert/db/pialert.db
# (optional) useful for debugging if you have issues setting up the container
- /local_data_dir/logs:/home/pi/pialert/front/log
environment:
- TZ=Europe/Berlin
- TZ=Europe/Berlin
- PORT=20211
```
@@ -144,18 +144,18 @@ services:
```yaml
services:
netalertx: # 🆕 This has changed
container_name: netalertx # 🆕 This has changed
image: "ghcr.io/jokob-sk/netalertx:25.5.24" # 🆕 This has changed
network_mode: "host"
netalertx: # 🆕 This has changed
container_name: netalertx # 🆕 This has changed
image: "ghcr.io/jokob-sk/netalertx:25.5.24" # 🆕 This has changed
network_mode: "host"
restart: unless-stopped
volumes:
- /local_data_dir/config/app.conf:/data/config/app.conf # 🆕 This has changed
- /local_data_dir/db/app.db:/data/db/app.db # 🆕 This has changed
- /local_data_dir/config/app.conf:/data/config/app.conf # 🆕 This has changed
- /local_data_dir/db/app.db:/data/db/app.db # 🆕 This has changed
# (optional) useful for debugging if you have issues setting up the container
- /local_data_dir/logs:/tmp/log # 🆕 This has changed
- /local_data_dir/logs:/tmp/log # 🆕 This has changed
environment:
- TZ=Europe/Berlin
- TZ=Europe/Berlin
- PORT=20211
```
@@ -164,13 +164,13 @@ services:
Versions before `v25.10.1` require an intermediate migration through `v25.5.24` to ensure database compatibility. Skipping this step may cause compatibility issues due to database schema changes introduced after `v25.5.24`.
#### STEPS:
#### STEPS:
1. Stop the container
2. [Back up your setup](./BACKUPS.md)
1. Stop the container
2. [Back up your setup](./BACKUPS.md)
3. Upgrade to `v25.5.24` by pinning the release version (See Examples below)
4. Start the container and verify everything works as expected.
5. Stop the container
5. Stop the container
6. Upgrade to `v25.10.1` by pinning the release version (See Examples below)
7. Start the container and verify everything works as expected.
@@ -184,62 +184,62 @@ Examples of docker files with the tagged version.
```yaml
services:
netalertx:
container_name: netalertx
image: "ghcr.io/jokob-sk/netalertx:25.5.24" # 🆕 This is important
network_mode: "host"
netalertx:
container_name: netalertx
image: "ghcr.io/jokob-sk/netalertx:25.5.24" # 🆕 This is important
network_mode: "host"
restart: unless-stopped
volumes:
- /local_data_dir/config:/data/config
- /local_data_dir/db:/data/db
- /local_data_dir/config:/data/config
- /local_data_dir/db:/data/db
# (optional) useful for debugging if you have issues setting up the container
- /local_data_dir/logs:/tmp/log
- /local_data_dir/logs:/tmp/log
environment:
- TZ=Europe/Berlin
- TZ=Europe/Berlin
- PORT=20211
```
```yaml
services:
netalertx:
container_name: netalertx
image: "ghcr.io/jokob-sk/netalertx:25.10.1" # 🆕 This is important
network_mode: "host"
netalertx:
container_name: netalertx
image: "ghcr.io/jokob-sk/netalertx:25.10.1" # 🆕 This is important
network_mode: "host"
restart: unless-stopped
volumes:
- /local_data_dir/config:/data/config
- /local_data_dir/db:/data/db
- /local_data_dir/config:/data/config
- /local_data_dir/db:/data/db
# (optional) useful for debugging if you have issues setting up the container
- /local_data_dir/logs:/tmp/log
- /local_data_dir/logs:/tmp/log
environment:
- TZ=Europe/Berlin
- TZ=Europe/Berlin
- PORT=20211
```
### 1.3 Migration from NetAlertX `v25.10.1`
Starting from v25.10.1, the container uses a [more secure, read-only runtime environment](./SECURITY_FEATURES.md), which requires all writable paths (e.g., logs, API cache, temporary data) to be mounted as `tmpfs` or permanent writable volumes, with sufficient access [permissions](./FILE_PERMISSIONS.md).
Starting from v25.10.1, the container uses a [more secure, read-only runtime environment](./SECURITY_FEATURES.md), which requires all writable paths (e.g., logs, API cache, temporary data) to be mounted as `tmpfs` or permanent writable volumes, with sufficient access [permissions](./FILE_PERMISSIONS.md).
#### STEPS:
#### STEPS:
1. Stop the container
2. [Back up your setup](./BACKUPS.md)
1. Stop the container
2. [Back up your setup](./BACKUPS.md)
3. Upgrade to `v25.10.1` by pinning the release version (See the example below)
```yaml
services:
netalertx:
container_name: netalertx
image: "ghcr.io/jokob-sk/netalertx:25.10.1" # 🆕 This is important
network_mode: "host"
netalertx:
container_name: netalertx
image: "ghcr.io/jokob-sk/netalertx:25.10.1" # 🆕 This is important
network_mode: "host"
restart: unless-stopped
volumes:
- /local_data_dir/config:/data/config
- /local_data_dir/db:/data/db
- /local_data_dir/config:/data/config
- /local_data_dir/db:/data/db
# (optional) useful for debugging if you have issues setting up the container
- /local_data_dir/logs:/tmp/log
- /local_data_dir/logs:/tmp/log
environment:
- TZ=Europe/Berlin
- TZ=Europe/Berlin
- PORT=20211
```
@@ -248,13 +248,14 @@ services:
6. Perform a one-off migration to the latest `netalertx` image and `20211` user:
> [!NOTE]
> The example below assumes your `/config` and `/db` folders are stored in `local_data_dir`.
> The example below assumes your `/config` and `/db` folders are stored in `local_data_dir`.
> Replace this path with your actual configuration directory. `netalertx` is the container name, which might differ from your setup.
```sh
docker run -it --rm --name netalertx --user "0" \
-v /local_data_dir/config:/data/config \
-v /local_data_dir/db:/data/db \
--tmpfs /tmp:uid=20211,gid=20211,mode=1700 \
ghcr.io/jokob-sk/netalertx:latest
```
@@ -271,22 +272,22 @@ sudo chmod -R a+rwx /local_data_dir/
```yaml
services:
netalertx:
container_name: netalertx
image: "ghcr.io/jokob-sk/netalertx" # 🆕 This is important
network_mode: "host"
netalertx:
container_name: netalertx
image: "ghcr.io/jokob-sk/netalertx" # 🆕 This has changed
network_mode: "host"
cap_drop: # 🆕 New line
- ALL # 🆕 New line
cap_add: # 🆕 New line
- NET_RAW # 🆕 New line
- NET_RAW # 🆕 New line
- NET_ADMIN # 🆕 New line
- NET_BIND_SERVICE # 🆕 New line
- NET_BIND_SERVICE # 🆕 New line
restart: unless-stopped
volumes:
- /local_data_dir/config:/data/config
- /local_data_dir/db:/data/db
- /local_data_dir/config:/data/config
- /local_data_dir/db:/data/db
# (optional) useful for debugging if you have issues setting up the container
#- /local_data_dir/logs:/tmp/log
#- /local_data_dir/logs:/tmp/log
# Ensuring the timezone is the same as on the server - make sure also the TIMEZONE setting is configured
- /etc/localtime:/etc/localtime:ro # 🆕 New line
environment:

View File

@@ -1,6 +1,6 @@
## How to Set Up Your Network Page
The **Network** page lets you map how devices connect — visually and logically.
The **Network** page lets you map how devices connect — visually and logically.
Its especially useful for planning infrastructure, assigning parent-child relationships, and spotting gaps.
![Network tree details](./img/NETWORK_TREE/Network_Sample.png)
@@ -9,11 +9,11 @@ To get started, youll need to define at least one root node and mark certain
---
Start by creating a root device with the MAC address `Internet`, if the application didnt create one already.
This special MAC address (`Internet`) is required for the root network node — no other value is currently supported.
Start by creating a root device with the MAC address `Internet`, if the application didnt create one already.
This special MAC address (`Internet`) is required for the root network node — no other value is currently supported.
Set its **Type** to a valid network type — such as `Router` or `Gateway`.
> [!TIP]
> [!TIP]
> If you dont have one, use the [Create new device](./DEVICE_MANAGEMENT.md#dummy-devices) button on the **Devices** page to add a root device.
---
@@ -21,15 +21,15 @@ Set its **Type** to a valid network type — such as `Router` or `Gateway`.
## ⚡ Quick Setup
1. Open the device you want to use as a network node (e.g. a Switch).
2. Set its **Type** to one of the following:
`AP`, `Firewall`, `Gateway`, `PLC`, `Powerline`, `Router`, `Switch`, `USB LAN Adapter`, `USB WIFI Adapter`, `WLAN`
2. Set its **Type** to one of the following:
`AP`, `Firewall`, `Gateway`, `PLC`, `Powerline`, `Router`, `Switch`, `USB LAN Adapter`, `USB WIFI Adapter`, `WLAN`
*(Or add custom types under **Settings → General → `NETWORK_DEVICE_TYPES`**.)*
3. Save the device.
4. Go to the **Network** page — supported device types will appear as tabs.
5. Use the **Assign** button to connect unassigned devices to a network node.
6. If the **Port** is `0` or empty, a Wi-Fi icon is shown. Otherwise, an Ethernet icon appears.
> [!NOTE]
> [!NOTE]
> Use [bulk editing](./DEVICES_BULK_EDITING.md) with _CSV Export_ to fix `Internet` root assignments or update many devices at once.
---
@@ -42,20 +42,22 @@ Lets walk through setting up a device named `raspberrypi` to act as a network
### 1. Set Device Type and Parent
- Go to the **Devices** page
- Go to the **Devices** page
- Open the device detail view for `raspberrypi`
- In the **Type** dropdown, select `Switch`
![Device details](./img/NETWORK_TREE/Network_Device_Details.png)
- Optionally assign a **Parent Node** (where this device connects to) and the **Relationship type** of the connection.
- Optionally assign a **Parent Node** (where this device connects to) and the **Relationship type** of the connection.
The `nic` relationship type can affect parent notifications — see the setting description and [Notifications documentation](./NOTIFICATIONS.md) for more.
- A devices parent MAC will be overwritten by plugins if its current value is any of the following: "null", "(unknown)" "(Unknown)".
- If you want plugins to be able to overwrite the parent value (for example, when mixing plugins that do not provide parent MACs like `ARPSCAN` with those that do, like `UNIFIAPI`), you must set the setting `NEWDEV_devParentMAC` to None.
![Device details](./img/NETWORK_TREE/Network_Device_Details_Parent.png)
![Device details](./img/NETWORK_TREE/Network_Device_Details_Parent.png)
> [!NOTE]
> Only certain device types can act as network nodes:
> `AP`, `Firewall`, `Gateway`, `Hypervisor`, `PLC`, `Powerline`, `Router`, `Switch`, `USB LAN Adapter`, `USB WIFI Adapter`, `WLAN`
> [!NOTE]
> Only certain device types can act as network nodes:
> `AP`, `Firewall`, `Gateway`, `Hypervisor`, `PLC`, `Powerline`, `Router`, `Switch`, `USB LAN Adapter`, `USB WIFI Adapter`, `WLAN`
> You can add custom types via the `NETWORK_DEVICE_TYPES` setting.
- Click **Save**
@@ -81,7 +83,7 @@ You can confirm that `raspberrypi` now acts as a network device in two places:
### 3. Assign Connected Devices
- Use the **Assign** button to link other devices (e.g. PCs) to `raspberrypi`.
- After assigning, connected devices will appear beneath the `raspberrypi` switch node.
- After assigning, connected devices will appear beneath the `raspberrypi` switch node.
![Assigned nodes](./img/NETWORK_TREE/Network_Assigned_Nodes.png)
@@ -92,9 +94,9 @@ You can confirm that `raspberrypi` now acts as a network device in two places:
> Hovering over devices in the tree reveals connection details and tooltips for quick inspection.
> [!NOTE]
> Selecting certain relationship types hides the device in the default device views.
> You can change this behavior by adjusting the `UI_hide_rel_types` setting, which by default is set to `["nic","virtual"]`.
> This means devices with `devParentRelType` set to `nic` or `virtual` will not be shown.
> Selecting certain relationship types hides the device in the default device views.
> You can change this behavior by adjusting the `UI_hide_rel_types` setting, which by default is set to `["nic","virtual"]`.
> This means devices with `devParentRelType` set to `nic` or `virtual` will not be shown.
> All devices, regardless of relationship type, are always accessible in the **All devices** view.
---

View File

@@ -1,47 +1,50 @@
# Performance Optimization Guide
There are several ways to improve the application's performance. The application has been tested on a range of devices, from a Raspberry Pi 4 to NAS and NUC systems. If you are running the application on a lower-end device, carefully fine-tune the performance settings to ensure an optimal user experience.
There are several ways to improve the application's performance. The application has been tested on a range of devices, from Raspberry Pi 4 units to NAS and NUC systems. If you are running the application on a lower-end device, fine-tuning the performance settings can significantly improve the user experience.
## Common Causes of Slowness
Performance issues are usually caused by:
- **Incorrect settings** The app may restart unexpectedly. Check `app.log` under **Maintenance → Logs** for details.
- **Too many background processes** Disable unnecessary scanners.
- **Long scan durations** Limit the number of scanned devices.
- **Excessive disk operations** Optimize scanning and logging settings.
- **Failed maintenance plugins** Ensure maintenance tasks are running properly.
* **Incorrect settings** The app may restart unexpectedly. Check `app.log` under **Maintenance → Logs** for details.
* **Too many background processes** Disable unnecessary scanners.
* **Long scan durations** Limit the number of scanned devices.
* **Excessive disk operations** Optimize scanning and logging settings.
* **Maintenance plugin failures** If cleanup tasks fail, performance can degrade over time.
The application performs regular maintenance and database cleanup. If these tasks fail, performance may degrade.
The application performs regular maintenance and database cleanup. If these tasks are failing, you will see slowdowns.
### Database and Log File Size
A large database or oversized log files can slow down performance. You can check database and table sizes on the **Maintenance** page.
A large database or oversized log files can impact performance. You can check database and table sizes on the **Maintenance** page.
![DB size check](./img/PERFORMANCE/db_size_check.png)
> [!NOTE]
> - For **~100 devices**, the database should be around **50MB**.
> - No table should exceed **10,000 rows** in a healthy system.
> - These numbers vary based on network activity and settings.
>
> * For **~100 devices**, the database should be around **50 MB**.
> * No table should exceed **10,000 rows** in a healthy system.
> * Actual values vary based on network activity and plugin settings.
---
## Maintenance Plugins
Two plugins help maintain the applications performance:
Two plugins help maintain the systems performance:
### **1. Database Cleanup (DBCLNP)**
- Responsible for database maintenance.
- Check settings in the [DB Cleanup Plugin Docs](/front/plugins/db_cleanup/README.md).
- Ensure its not failing by checking logs.
- Adjust the schedule (`DBCLNP_RUN_SCHD`) and timeout (`DBCLNP_RUN_TIMEOUT`) if needed.
* Handles database maintenance and cleanup.
* See the [DB Cleanup Plugin Docs](/front/plugins/db_cleanup/README.md).
* Ensure its not failing by checking logs.
* Adjust the schedule (`DBCLNP_RUN_SCHD`) and timeout (`DBCLNP_RUN_TIMEOUT`) if necessary.
### **2. Maintenance (MAINT)**
- Handles log cleanup and other maintenance tasks.
- Check settings in the [Maintenance Plugin Docs](/front/plugins/maintenance/README.md).
- Ensure its running correctly by checking logs.
- Adjust the schedule (`MAINT_RUN_SCHD`) and timeout (`MAINT_RUN_TIMEOUT`) if needed.
* Cleans logs and performs general maintenance tasks.
* See the [Maintenance Plugin Docs](/front/plugins/maintenance/README.md).
* Verify proper operation via logs.
* Adjust the schedule (`MAINT_RUN_SCHD`) and timeout (`MAINT_RUN_TIMEOUT`) if needed.
---
@@ -50,48 +53,56 @@ Two plugins help maintain the applications performance:
Frequent scans increase resource usage, network traffic, and database read/write cycles.
### **Optimizations**
- **Increase scan intervals** (`<PLUGIN>_RUN_SCHD`) on busy networks or low-end hardware.
- **Extend scan timeouts** (`<PLUGIN>_RUN_TIMEOUT`) to prevent failures.
- **Reduce the subnet size** e.g., from `/16` to `/24` to lower scan loads.
Some plugins have additional options to limit the number of scanned devices. If certain plugins take too long to complete, check if you can optimize scan times by selecting a scan range.
* **Increase scan intervals** (`<PLUGIN>_RUN_SCHD`) on busy networks or low-end hardware.
* **Increase timeouts** (`<PLUGIN>_RUN_TIMEOUT`) to avoid plugin failures.
* **Reduce subnet size** e.g., use `/24` instead of `/16` to reduce scan load.
For example, the **ICMP plugin** allows you to specify a regular expression to scan only IPs that match a specific pattern.
Some plugins also include options to limit which devices are scanned. If certain plugins consistently run long, consider narrowing their scope.
For example, the **ICMP plugin** allows scanning only IPs that match a specific regular expression.
---
## Storing Temporary Files in Memory
On systems with slower I/O speeds, you can optimize performance by storing temporary files in memory. This primarily applies to the API directory (default: `/tmp/api`, configurable via `NETALERTX_API`) and `/tmp/log` folders.
On devices with slower I/O, you can improve performance by storing temporary files (and optionally the database) in memory using `tmpfs`.
Using `tmpfs` reduces disk writes and improves performance. However, it should be **disabled** if persistent logs or API data storage are required.
> [!WARNING]
> Storing the **database** in `tmpfs` is generally discouraged. Use this only if device data and historical records are not required to persist. If needed, you can pair this setup with the `SYNC` plugin to store important persistent data on another node. See the [Plugins docs](./PLUGINS.md) for details.
Below is an optimized `docker-compose.yml` snippet:
Using `tmpfs` reduces disk writes and speeds up I/O, but **all data stored in memory will be lost on restart**.
Below is an optimized `docker-compose.yml` snippet using non-persistent logs, API data, and DB:
```yaml
version: "3"
services:
netalertx:
container_name: netalertx
# Uncomment the line below to test the latest dev image
# Use this line for the stable release
image: "ghcr.io/jokob-sk/netalertx:latest"
# Or use this line for the latest development build
# image: "ghcr.io/jokob-sk/netalertx-dev:latest"
image: "ghcr.io/jokob-sk/netalertx:latest"
network_mode: "host"
network_mode: "host"
restart: unless-stopped
volumes:
- /local_data_dir/config:/data/config
- /local_data_dir/db:/data/db
# (Optional) Useful for debugging setup issues
- /local_data_dir/logs:/tmp/log
# (API: OPTION 1) Store temporary files in memory (recommended for performance)
- type: tmpfs # ◀ 🔺
target: /tmp/api # ◀ 🔺
# (API: OPTION 2) Store API data on disk (useful for debugging)
# - /local_data_dir/api:/tmp/api
# Ensuring the timezone is the same as on the server - make sure also the TIMEZONE setting is configured
- /etc/localtime:/etc/localtime:ro
environment:
- PORT=20211
cap_drop: # Drop all capabilities for enhanced security
- ALL
cap_add: # Re-add necessary capabilities
- NET_RAW
- NET_ADMIN
- NET_BIND_SERVICE
volumes:
- ${APP_FOLDER}/netalertx/config:/data/config
- /etc/localtime:/etc/localtime:ro
tmpfs:
# All writable runtime state resides under /tmp; comment out to persist logs between restarts
- "/tmp:uid=20211,gid=20211,mode=1700,rw,noexec,nosuid,nodev,async,noatime,nodiratime"
- "/data/db:uid=20211,gid=20211,mode=1700" # ⚠ You will lose historical data on restart
environment:
- PORT=${PORT}
- APP_CONF_OVERRIDE=${APP_CONF_OVERRIDE}
```

View File

@@ -2,21 +2,21 @@
If you are running a DNS server, such as **AdGuard**, set up **Private reverse DNS servers** for a better name resolution on your network. Enabling this setting will enable NetAlertX to execute dig and nslookup commands to automatically resolve device names based on their IP addresses.
> [!TIP]
> Before proceeding, ensure that [name resolution plugins](/local_data_dir/NAME_RESOLUTION.md) are enabled.
> You can customize how names are cleaned using the `NEWDEV_NAME_CLEANUP_REGEX` setting.
> [!TIP]
> Before proceeding, ensure that [name resolution plugins](/local_data_dir/NAME_RESOLUTION.md) are enabled.
> You can customize how names are cleaned using the `NEWDEV_NAME_CLEANUP_REGEX` setting.
> To auto-update Fully Qualified Domain Names (FQDN), enable the `REFRESH_FQDN` setting.
> Example 1: Reverse DNS `disabled`
>
>
> ```
> jokob@Synology-NAS:/$ nslookup 192.168.1.58
> ** server can't find 58.1.168.192.in-addr.arpa: NXDOMAIN
> ```
> Example 2: Reverse DNS `enabled`
>
>
> ```
> jokob@Synology-NAS:/$ nslookup 192.168.1.58
> 45.1.168.192.in-addr.arpa name = jokob-NUC.localdomain.
@@ -33,23 +33,14 @@ If you are running a DNS server, such as **AdGuard**, set up **Private reverse D
### Specifying the DNS in the container
You can specify the DNS server in the docker-compose to improve name resolution on your network.
You can specify the DNS server in the docker-compose to improve name resolution on your network.
```yaml
services:
netalertx:
container_name: netalertx
image: "ghcr.io/jokob-sk/netalertx:latest"
restart: unless-stopped
volumes:
- /local_data_dir/config:/data/config
- /local_data_dir/db:/data/db
# - /local_data_dir/log:/tmp/log
# Ensuring the timezone is the same as on the server - make sure also the TIMEZONE setting is configured
- /etc/localtime:/etc/localtime:ro
environment:
- PORT=20211
network_mode: host
...
dns: # specifying the DNS servers used for the container
- 10.8.0.1
- 10.8.0.17
@@ -57,7 +48,7 @@ services:
### Using a custom resolv.conf file
You can configure a custom **/etc/resolv.conf** file in **docker-compose.yml** and set the nameserver to your LAN DNS server (e.g.: Pi-Hole). See the relevant [resolv.conf man](https://www.man7.org/linux/man-pages/man5/resolv.conf.5.html) entry for details.
You can configure a custom **/etc/resolv.conf** file in **docker-compose.yml** and set the nameserver to your LAN DNS server (e.g.: Pi-Hole). See the relevant [resolv.conf man](https://www.man7.org/linux/man-pages/man5/resolv.conf.5.html) entry for details.
#### docker-compose.yml:
@@ -66,18 +57,10 @@ version: "3"
services:
netalertx:
container_name: netalertx
image: "ghcr.io/jokob-sk/netalertx:latest"
restart: unless-stopped
volumes:
- /local_data_dir/config/app.conf:/data/config/app.conf
- /local_data_dir/db:/data/db
- /local_data_dir/log:/tmp/log
...
- /local_data_dir/config/resolv.conf:/etc/resolv.conf # ⚠ Mapping the /resolv.conf file for better name resolution
# Ensuring the timezone is the same as on the server - make sure also the TIMEZONE setting is configured
- /etc/localtime:/etc/localtime:ro
environment:
- PORT=20211
network_mode: host
...
```
#### /local_data_dir/config/resolv.conf:

View File

@@ -2,9 +2,9 @@
> Submitted by amazing [cvc90](https://github.com/cvc90) 🙏
> [!NOTE]
> [!NOTE]
> There are various NGINX config files for NetAlertX, some for the bare-metal install, currently Debian 12 and Ubuntu 24 (`netalertx.conf`), and one for the docker container (`netalertx.template.conf`).
>
>
> The first one you can find in the respective bare metal installer folder `/app/install/\<system\>/netalertx.conf`.
> The docker one can be found in the [install](https://github.com/jokob-sk/NetAlertX/tree/main/install) folder. Map, or use, the one appropriate for your setup.
@@ -17,14 +17,14 @@
2. In this file, paste the following code:
```
server {
listen 80;
server_name netalertx;
proxy_preserve_host on;
proxy_pass http://localhost:20211/;
proxy_pass_reverse http://localhost:20211/;
server {
listen 80;
server_name netalertx;
proxy_preserve_host on;
proxy_pass http://localhost:20211/;
proxy_pass_reverse http://localhost:20211/;
}
```
```
3. Activate the new website by running the following command:
@@ -43,18 +43,18 @@
2. In this file, paste the following code:
```
server {
listen 80;
server_name netalertx;
proxy_preserve_host on;
server {
listen 80;
server_name netalertx;
proxy_preserve_host on;
location ^~ /netalertx/ {
proxy_pass http://localhost:20211/;
proxy_pass_reverse http://localhost:20211/;
proxy_pass_reverse http://localhost:20211/;
proxy_redirect ~^/(.*)$ /netalertx/$1;
rewrite ^/netalertx/?(.*)$ /$1 break;
rewrite ^/netalertx/?(.*)$ /$1 break;
}
}
```
```
3. Check your config with `nginx -t`. If there are any issues, it will tell you.
@@ -73,13 +73,13 @@
2. In this file, paste the following code:
```
server {
listen 80;
server_name netalertx;
proxy_preserve_host on;
server {
listen 80;
server_name netalertx;
proxy_preserve_host on;
location ^~ /netalertx/ {
proxy_pass http://localhost:20211/;
proxy_pass_reverse http://localhost:20211/;
proxy_pass_reverse http://localhost:20211/;
proxy_redirect ~^/(.*)$ /netalertx/$1;
rewrite ^/netalertx/?(.*)$ /$1 break;
sub_filter_once off;
@@ -89,13 +89,13 @@
sub_filter '(?>$host)/js' '/netalertx/js';
sub_filter '/img' '/netalertx/img';
sub_filter '/lib' '/netalertx/lib';
sub_filter '/php' '/netalertx/php';
sub_filter '/php' '/netalertx/php';
}
}
```
```
3. Check your config with `nginx -t`. If there are any issues, it will tell you.
4. Activate the new website by running the following command:
`nginx -s reload` or `systemctl restart nginx`
@@ -111,17 +111,17 @@
2. In this file, paste the following code:
```
server {
listen 443;
server_name netalertx;
server {
listen 443;
server_name netalertx;
SSLEngine On;
SSLCertificateFile /etc/ssl/certs/netalertx.pem;
SSLCertificateKeyFile /etc/ssl/private/netalertx.key;
proxy_preserve_host on;
proxy_pass http://localhost:20211/;
proxy_pass_reverse http://localhost:20211/;
proxy_preserve_host on;
proxy_pass http://localhost:20211/;
proxy_pass_reverse http://localhost:20211/;
}
```
```
3. Check your config with `nginx -t`. If there are any issues, it will tell you.
@@ -140,23 +140,23 @@
2. In this file, paste the following code:
```
server {
listen 443;
server_name netalertx;
server {
listen 443;
server_name netalertx;
SSLEngine On;
SSLCertificateFile /etc/ssl/certs/netalertx.pem;
SSLCertificateKeyFile /etc/ssl/private/netalertx.key;
location ^~ /netalertx/ {
proxy_pass http://localhost:20211/;
proxy_pass_reverse http://localhost:20211/;
proxy_pass_reverse http://localhost:20211/;
proxy_redirect ~^/(.*)$ /netalertx/$1;
rewrite ^/netalertx/?(.*)$ /$1 break;
rewrite ^/netalertx/?(.*)$ /$1 break;
}
}
```
```
3. Check your config with `nginx -t`. If there are any issues, it will tell you.
4. Activate the new website by running the following command:
`nginx -s reload` or `systemctl restart nginx`
@@ -172,15 +172,15 @@
2. In this file, paste the following code:
```
server {
listen 443;
server_name netalertx;
server {
listen 443;
server_name netalertx;
SSLEngine On;
SSLCertificateFile /etc/ssl/certs/netalertx.pem;
SSLCertificateKeyFile /etc/ssl/private/netalertx.key;
location ^~ /netalertx/ {
proxy_pass http://localhost:20211/;
proxy_pass_reverse http://localhost:20211/;
proxy_pass_reverse http://localhost:20211/;
proxy_redirect ~^/(.*)$ /netalertx/$1;
rewrite ^/netalertx/?(.*)$ /$1 break;
sub_filter_once off;
@@ -190,13 +190,13 @@
sub_filter '(?>$host)/js' '/netalertx/js';
sub_filter '/img' '/netalertx/img';
sub_filter '/lib' '/netalertx/lib';
sub_filter '/php' '/netalertx/php';
sub_filter '/php' '/netalertx/php';
}
}
```
```
3. Check your config with `nginx -t`. If there are any issues, it will tell you.
4. Activate the new website by running the following command:
`nginx -s reload` or `systemctl restart nginx`
@@ -218,10 +218,10 @@
ProxyPass / http://localhost:20211/
ProxyPassReverse / http://localhost:20211/
</VirtualHost>
```
```
3. Check your config with `httpd -t` (or `apache2ctl -t` on Debian/Ubuntu). If there are any issues, it will tell you.
4. Activate the new website by running the following command:
`a2ensite netalertx` or `service apache2 reload`
@@ -245,10 +245,10 @@
ProxyPassReverse / http://localhost:20211/
}
</VirtualHost>
```
```
3. Check your config with `httpd -t` (or `apache2ctl -t` on Debian/Ubuntu). If there are any issues, it will tell you.
4. Activate the new website by running the following command:
`a2ensite netalertx` or `service apache2 reload`
@@ -273,10 +273,10 @@
ProxyPass / http://localhost:20211/
ProxyPassReverse / http://localhost:20211/
</VirtualHost>
```
```
3. Check your config with `httpd -t` (or `apache2ctl -t` on Debian/Ubuntu). If there are any issues, it will tell you.
4. Activate the new website by running the following command:
`a2ensite netalertx` or `service apache2 reload`
@@ -290,11 +290,11 @@
1. On your Apache server, create a new file called /etc/apache2/sites-available/netalertx.conf.
2. In this file, paste the following code:
```
<VirtualHost *:443>
<VirtualHost *:443>
ServerName netalertx
SSLEngine On
SSLEngine On
SSLCertificateFile /etc/ssl/certs/netalertx.pem
SSLCertificateKeyFile /etc/ssl/private/netalertx.key
location ^~ /netalertx/ {
@@ -303,10 +303,10 @@
ProxyPassReverse / http://localhost:20211/
}
</VirtualHost>
```
```
3. Check your config with `httpd -t` (or `apache2ctl -t` on Debian/Ubuntu). If there are any issues, it will tell you.
4. Activate the new website by running the following command:
`a2ensite netalertx` or `service apache2 reload`
@@ -381,7 +381,7 @@ location ^~ /netalertx/ {
> Submitted by [Isegrimm](https://github.com/Isegrimm) 🙏 (based on this [discussion](https://github.com/jokob-sk/NetAlertX/discussions/449#discussioncomment-7281442))
Assuming the user already has a working Traefik setup, this is what's needed to make NetAlertX work at a URL like www.domain.com/netalertx/.
Assuming the user already has a working Traefik setup, this is what's needed to make NetAlertX work at a URL like www.domain.com/netalertx/.
Note: Everything in these configs assumes '**www.domain.com**' as your domainname and '**section31**' as an arbitrary name for your certificate setup. You will have to substitute these with your own.
@@ -496,14 +496,9 @@ server {
Mapping the updated file (on the local filesystem at `/appl/docker/netalertx/default`) into the docker container:
```bash
docker run -d --rm --network=host \
--name=netalertx \
-v /appl/docker/netalertx/config:/data/config \
-v /appl/docker/netalertx/db:/data/db \
-v /etc/localtime:/etc/localtime \
-v /appl/docker/netalertx/default:/etc/nginx/sites-available/default \
-e PORT=20211 \
ghcr.io/jokob-sk/netalertx:latest
```yaml
...
volumes:
- /appl/docker/netalertx/default:/etc/nginx/sites-available/default
...
```

View File

@@ -1,10 +1,10 @@
# Installation on a Synology NAS
There are different ways to install NetAlertX on a Synology, including SSH-ing into the machine and using the command line. For this guide, we will use the Project option in Container manager.
There are different ways to install NetAlertX on a Synology, including SSH-ing into the machine and using the command line. For this guide, we will use the Project option in Container manager.
## Create the folder structure
The folders you are creating below will contain the configuration and the database. Back them up regularly.
The folders you are creating below will contain the configuration and the database. Back them up regularly.
1. Create a parent folder named `netalertx`
2. Create a `db` sub-folder
@@ -29,23 +29,31 @@ The folders you are creating below will contain the configuration and the databa
- Path: `/app_storage/netalertx` (will differ from yours)
- Paste in the following template:
```yaml
version: "3"
services:
netalertx:
container_name: netalertx
# use the below line if you want to test the latest dev image
# image: "ghcr.io/jokob-sk/netalertx-dev:latest"
image: "ghcr.io/jokob-sk/netalertx:latest"
network_mode: "host"
# image: "ghcr.io/jokob-sk/netalertx-dev:latest"
image: "ghcr.io/jokob-sk/netalertx:latest"
network_mode: "host"
restart: unless-stopped
cap_drop: # Drop all capabilities for enhanced security
- ALL
cap_add: # Re-add necessary capabilities
- NET_RAW
- NET_ADMIN
- NET_BIND_SERVICE
volumes:
- local/path/config:/data/config
- local/path/db:/data/db
# (optional) useful for debugging if you have issues setting up the container
- local/path/logs:/tmp/log
# Ensuring the timezone is the same as on the server - make sure also the TIMEZONE setting is configured
- /etc/localtime:/etc/localtime:ro
- /app_storage/netalertx/config:/data/config
- /app_storage/netalertx/db:/data/db
# to sync with system time
- /etc/localtime:/etc/localtime:ro
tmpfs:
# All writable runtime state resides under /tmp; comment out to persist logs between restarts
- "/tmp:uid=20211,gid=20211,mode=1700,rw,noexec,nosuid,nodev,async,noatime,nodiratime"
environment:
- PORT=20211
```
@@ -59,7 +67,7 @@ services:
```yaml
volumes:
- /volume1/app_storage/netalertx/config:/data/config
- /volume1/app_storage/netalertx/db:/data/db
- /volume1/app_storage/netalertx/db:/data/db
# (optional) useful for debugging if you have issues setting up the container
# - local/path/logs:/tmp/log <- commented out with # ⚠
```
@@ -72,4 +80,13 @@ services:
![Build](./img/SYNOLOGY/09_Run_and_build.png)
10. Navigate to `<Synology URL>:20211` (or your custom port).
11. Read the [Subnets](./SUBNETS.md) and [Plugins](/docs/PLUGINS.md) docs to complete your setup.
11. Read the [Subnets](./SUBNETS.md) and [Plugins](/docs/PLUGINS.md) docs to complete your setup.
> [!TIP]
> If you are facing permissions issues run the following commands on your server. This will change the owner and assure sufficient access to the database and config files that are stored in the `/local_data_dir/db` and `/local_data_dir/config` folders (replace `local_data_dir` with the location where your `/db` and `/config` folders are located).
>
> `sudo chown -R 20211:20211 /local_data_dir`
>
> `sudo chmod -R a+rwx /local_data_dir1`
>

View File

@@ -2,7 +2,7 @@
The application uses the following default ports:
- **Web UI**: `20211`
- **Web UI**: `20211`
- **GraphQL API**: `20212`
The **Web UI** is served by an **nginx** server, while the **API backend** runs on a **Flask (Python)** server.
@@ -25,8 +25,8 @@ Follow all of the below in order to disqualify potential causes of issues and to
When opening an issue or debugging:
1. Include a screenshot of what you see when accessing `HTTP://<your rpi IP>/20211` (or your custom port)
1. [Follow steps 1, 2, 3, 4 on this page](./DEBUG_TIPS.md)
1. Include a screenshot of what you see when accessing `HTTP://<your_server>:20211` (or your custom port)
1. [Follow steps 1, 2, 3, 4 on this page](./DEBUG_TIPS.md)
1. Execute the following in the container to see the processes and their ports and submit a screenshot of the result:
- `sudo apk add lsof`
- `sudo lsof -i`
@@ -36,21 +36,21 @@ When opening an issue or debugging:
![lsof ports](./img/WEB_UI_PORT_DEBUG/container_port.png)
### 2. JavaScript issues
### 2. JavaScript issues
Check for browser console (F12 browser dev console) errors + check different browsers.
### 3. Clear the app cache and cached JavaScript files
Refresh the browser cache (usually shoft + refresh), try a private window, or different browsers. Please also refresh the app cache by clicking the 🔃 (reload) button in the header of the application.
Refresh the browser cache (usually shoft + refresh), try a private window, or different browsers. Please also refresh the app cache by clicking the 🔃 (reload) button in the header of the application.
### 4. Disable proxies
If you have any reverse proxy or similar, try disabling it.
If you have any reverse proxy or similar, try disabling it.
### 5. Disable your firewall
If you are using a firewall, try to temporarily disabling it.
If you are using a firewall, try to temporarily disabling it.
### 6. Post your docker start details
@@ -67,6 +67,6 @@ In the container execute and investigate:
### 8. Make sure permissions are correct
> [!TIP]
> You can try to start the container without mapping the `/data/config` and `/data/db` dirs and if the UI shows up then the issue is most likely related to your file system permissions or file ownership.
> You can try to start the container without mapping the `/data/config` and `/data/db` dirs and if the UI shows up then the issue is most likely related to your file system permissions or file ownership.
Please read the [Permissions troubleshooting guide](./FILE_PERMISSIONS.md) and provide a screesnhot of the permissions and ownership in the `/data/db` and `app/config` directories.
Please read the [Permissions troubleshooting guide](./FILE_PERMISSIONS.md) and provide a screesnhot of the permissions and ownership in the `/data/db` and `app/config` directories.

View File

@@ -1,22 +1,22 @@
# Workflows debugging and troubleshooting
> [!TIP]
> Before troubleshooting, please ensure you have [Debugging enabled](./DEBUG_TIPS.md).
> Before troubleshooting, please ensure you have the right [Debugging and LOG_LEVEL set](./DEBUG_TIPS.md).
Workflows are triggered by various events. These events are captured and listed in the _Integrations -> App Events_ section of the application.
Workflows are triggered by various events. These events are captured and listed in the _Integrations -> App Events_ section of the application.
## Troubleshooting triggers
> [!NOTE]
> Workflow events are processed once every 5 seconds. However, if a scan or other background tasks are running, this can cause a delay up to a few minutes.
> Workflow events are processed once every 5 seconds. However, if a scan or other background tasks are running, this can cause a delay up to a few minutes.
If an event doesn't trigger a workflow as expected, check the _App Events_ section for the event. You can filter these by the ID of the device (`devMAC` or `devGUID`).
If an event doesn't trigger a workflow as expected, check the _App Events_ section for the event. You can filter these by the ID of the device (`devMAC` or `devGUID`).
![App events search](./img/WORKFLOWS/workflows_app_events_search.png)
Once you find the _Event Guid_ and _Object GUID_, use them to find relevant debug entries.
Once you find the _Event Guid_ and _Object GUID_, use them to find relevant debug entries.
Navigate to _Mainetenace -> Logs_ where you can filter the logs based on the _Event or Object GUID_.
Navigate to _Mainetenace -> Logs_ where you can filter the logs based on the _Event or Object GUID_.
![Log events search](./img/WORKFLOWS/workflows_logs_search.png)
@@ -24,9 +24,9 @@ Below you can find some example `app.log` entries that will help you understand
```bash
16:27:03 [WF] Checking if '13f0ce26-1835-4c48-ae03-cdaf38f328fe' triggers the workflow 'Sample Device Update Workflow'
16:27:03 [WF] self.triggered 'False' for event '[[155], ['13f0ce26-1835-4c48-ae03-cdaf38f328fe'], [0], ['2025-04-02 05:26:56'], ['Devices'], ['050b6980-7af6-4409-950d-08e9786b7b33'], ['DEVICES'], ['00:11:32:ef:a5:6c'], ['192.168.1.82'], ['050b6980-7af6-4409-950d-08e9786b7b33'], [None], [0], [0], ['devPresentLastScan'], ['online'], ['update'], [None], [None], [None], [None]] and trigger {"object_type": "Devices", "event_type": "insert"}'
16:27:03 [WF] self.triggered 'False' for event '[[155], ['13f0ce26-1835-4c48-ae03-cdaf38f328fe'], [0], ['2025-04-02 05:26:56'], ['Devices'], ['050b6980-7af6-4409-950d-08e9786b7b33'], ['DEVICES'], ['00:11:32:ef:a5:6c'], ['192.168.1.82'], ['050b6980-7af6-4409-950d-08e9786b7b33'], [None], [0], [0], ['devPresentLastScan'], ['online'], ['update'], [None], [None], [None], [None]] and trigger {"object_type": "Devices", "event_type": "insert"}'
16:27:03 [WF] Checking if '13f0ce26-1835-4c48-ae03-cdaf38f328fe' triggers the workflow 'Location Change'
16:27:03 [WF] self.triggered 'True' for event '[[155], ['13f0ce26-1835-4c48-ae03-cdaf38f328fe'], [0], ['2025-04-02 05:26:56'], ['Devices'], ['050b6980-7af6-4409-950d-08e9786b7b33'], ['DEVICES'], ['00:11:32:ef:a5:6c'], ['192.168.1.82'], ['050b6980-7af6-4409-950d-08e9786b7b33'], [None], [0], [0], ['devPresentLastScan'], ['online'], ['update'], [None], [None], [None], [None]] and trigger {"object_type": "Devices", "event_type": "update"}'
16:27:03 [WF] self.triggered 'True' for event '[[155], ['13f0ce26-1835-4c48-ae03-cdaf38f328fe'], [0], ['2025-04-02 05:26:56'], ['Devices'], ['050b6980-7af6-4409-950d-08e9786b7b33'], ['DEVICES'], ['00:11:32:ef:a5:6c'], ['192.168.1.82'], ['050b6980-7af6-4409-950d-08e9786b7b33'], [None], [0], [0], ['devPresentLastScan'], ['online'], ['update'], [None], [None], [None], [None]] and trigger {"object_type": "Devices", "event_type": "update"}'
16:27:03 [WF] Event with GUID '13f0ce26-1835-4c48-ae03-cdaf38f328fe' triggered the workflow 'Location Change'
```

View File

Before

Width:  |  Height:  |  Size: 135 KiB

After

Width:  |  Height:  |  Size: 135 KiB

View File

Before

Width:  |  Height:  |  Size: 11 KiB

After

Width:  |  Height:  |  Size: 11 KiB

View File

Before

Width:  |  Height:  |  Size: 32 KiB

After

Width:  |  Height:  |  Size: 32 KiB

View File

Before

Width:  |  Height:  |  Size: 36 KiB

After

Width:  |  Height:  |  Size: 36 KiB

View File

Before

Width:  |  Height:  |  Size: 34 KiB

After

Width:  |  Height:  |  Size: 34 KiB

View File

Before

Width:  |  Height:  |  Size: 35 KiB

After

Width:  |  Height:  |  Size: 35 KiB

View File

@@ -107,11 +107,11 @@
"buttons": [
{
"labelStringCode": "Maint_PurgeLog",
"event": "logManage('crond.log', 'cleanLog')"
"event": "logManage('cron.log', 'cleanLog')"
}
],
"fileName": "crond.log",
"filePath": "__NETALERTX_LOG__/crond.log",
"fileName": "cron.log",
"filePath": "__NETALERTX_LOG__/cron.log",
"textAreaCssClass": "logs logs-small"
}
]

View File

@@ -274,7 +274,7 @@ function cleanLog($logFile)
$path = "";
$allowedFiles = ['app.log', 'app_front.log', 'IP_changes.log', 'stdout.log', 'stderr.log', 'app.php_errors.log', 'execution_queue.log', 'db_is_locked.log', 'nginx-error.log', 'crond.log'];
$allowedFiles = ['app.log', 'app_front.log', 'IP_changes.log', 'stdout.log', 'stderr.log', 'app.php_errors.log', 'execution_queue.log', 'db_is_locked.log', 'nginx-error.log', 'cron.log'];
if(in_array($logFile, $allowedFiles))
{

View File

@@ -36,12 +36,7 @@ def main():
# Check if basic config settings supplied
if check_config() is False:
mylog(
"none",
[
f"[{pluginName}] ⚠ ERROR: Publisher notification gateway not set up correctly. Check your {confFileName} {pluginName}_* variables."
],
)
mylog("none", f"[{pluginName}] ⚠ ERROR: Publisher notification gateway not set up correctly. Check your {confFileName} {pluginName}_* variables.")
return
# Create a database connection

View File

@@ -1,6 +1,6 @@
#!/usr/bin/env python3
import conf
from const import confFileName, logPath
from const import logPath
from pytz import timezone
import os
@@ -36,11 +36,7 @@ def main():
# Check if basic config settings supplied
if not validate_config():
mylog(
"none",
f"[{pluginName}] ⚠ ERROR: Publisher notification gateway not set up correctly. "
f"Check your {confFileName} {pluginName}_* variables.",
)
mylog("none", f"[{pluginName}] ⚠ ERROR: Publisher not set up correctly. Check your {pluginName}_* variables.",)
return
# Create a database connection

View File

@@ -138,10 +138,7 @@ def execute_arpscan(userSubnets):
mylog("verbose", [f"[{pluginName}] All devices List len:", len(devices_list)])
mylog("verbose", [f"[{pluginName}] Devices List:", devices_list])
mylog(
"verbose",
[f"[{pluginName}] Found: Devices without duplicates ", len(unique_devices)],
)
mylog("verbose", [f"[{pluginName}] Found: Devices without duplicates ", len(unique_devices)],)
return unique_devices
@@ -174,10 +171,7 @@ def execute_arpscan_on_interface(interface):
except subprocess.CalledProcessError:
result = ""
except subprocess.TimeoutExpired:
mylog(
"warning",
[f"[{pluginName}] arp-scan timed out after {timeout_seconds}s"],
)
mylog("warning", [f"[{pluginName}] arp-scan timed out after {timeout_seconds}s"],)
result = ""
# stop looping if duration not set or expired
if scan_duration == 0 or (time.time() - start_time) > scan_duration:

View File

@@ -33,10 +33,7 @@ def main():
device_data = get_device_data()
mylog(
"verbose",
[f"[{pluginName}] Found '{len(device_data)}' devices"],
)
mylog("verbose", f"[{pluginName}] Found '{len(device_data)}' devices")
filtered_devices = [
(key, device)
@@ -44,10 +41,7 @@ def main():
if device.state == ConnectionState.CONNECTED
]
mylog(
"verbose",
[f"[{pluginName}] Processing '{len(filtered_devices)}' connected devices"],
)
mylog("verbose", f"[{pluginName}] Processing '{len(filtered_devices)}' connected devices")
for mac, device in filtered_devices:
entry_mac = str(device.description.mac).lower()

View File

@@ -75,10 +75,7 @@ def cleanup_database(
# -----------------------------------------------------
# Cleanup Online History
mylog(
"verbose",
[f"[{pluginName}] Online_History: Delete all but keep latest 150 entries"],
)
mylog("verbose", [f"[{pluginName}] Online_History: Delete all but keep latest 150 entries"],)
cursor.execute(
"""DELETE from Online_History where "Index" not in (
SELECT "Index" from Online_History
@@ -87,24 +84,14 @@ def cleanup_database(
# -----------------------------------------------------
# Cleanup Events
mylog(
"verbose",
[
f"[{pluginName}] Events: Delete all older than {str(DAYS_TO_KEEP_EVENTS)} days (DAYS_TO_KEEP_EVENTS setting)"
],
)
mylog("verbose", f"[{pluginName}] Events: Delete all older than {str(DAYS_TO_KEEP_EVENTS)} days (DAYS_TO_KEEP_EVENTS setting)")
cursor.execute(
f"""DELETE FROM Events
WHERE eve_DateTime <= date('now', '-{str(DAYS_TO_KEEP_EVENTS)} day')"""
)
# -----------------------------------------------------
# Trim Plugins_History entries to less than PLUGINS_KEEP_HIST setting per unique "Plugin" column entry
mylog(
"verbose",
[
f"[{pluginName}] Plugins_History: Trim Plugins_History entries to less than {str(PLUGINS_KEEP_HIST)} per Plugin (PLUGINS_KEEP_HIST setting)"
],
)
mylog("verbose", f"[{pluginName}] Plugins_History: Trim Plugins_History entries to less than {str(PLUGINS_KEEP_HIST)} per Plugin (PLUGINS_KEEP_HIST setting)")
# Build the SQL query to delete entries that exceed the limit per unique "Plugin" column entry
delete_query = f"""DELETE FROM Plugins_History
@@ -125,12 +112,7 @@ def cleanup_database(
histCount = get_setting_value("DBCLNP_NOTIFI_HIST")
mylog(
"verbose",
[
f"[{pluginName}] Plugins_History: Trim Notifications entries to less than {histCount}"
],
)
mylog("verbose", f"[{pluginName}] Plugins_History: Trim Notifications entries to less than {histCount}")
# Build the SQL query to delete entries
delete_query = f"""DELETE FROM Notifications
@@ -170,12 +152,7 @@ def cleanup_database(
# -----------------------------------------------------
# Cleanup New Devices
if HRS_TO_KEEP_NEWDEV != 0:
mylog(
"verbose",
[
f"[{pluginName}] Devices: Delete all New Devices older than {str(HRS_TO_KEEP_NEWDEV)} hours (HRS_TO_KEEP_NEWDEV setting)"
],
)
mylog("verbose", f"[{pluginName}] Devices: Delete all New Devices older than {str(HRS_TO_KEEP_NEWDEV)} hours (HRS_TO_KEEP_NEWDEV setting)")
query = f"""DELETE FROM Devices WHERE devIsNew = 1 AND devFirstConnection < date('now', '-{str(HRS_TO_KEEP_NEWDEV)} hour')"""
mylog("verbose", [f"[{pluginName}] Query: {query} "])
cursor.execute(query)
@@ -183,12 +160,7 @@ def cleanup_database(
# -----------------------------------------------------
# Cleanup Offline Devices
if HRS_TO_KEEP_OFFDEV != 0:
mylog(
"verbose",
[
f"[{pluginName}] Devices: Delete all New Devices older than {str(HRS_TO_KEEP_OFFDEV)} hours (HRS_TO_KEEP_OFFDEV setting)"
],
)
mylog("verbose", f"[{pluginName}] Devices: Delete all New Devices older than {str(HRS_TO_KEEP_OFFDEV)} hours (HRS_TO_KEEP_OFFDEV setting)")
query = f"""DELETE FROM Devices WHERE devPresentLastScan = 0 AND devLastConnection < date('now', '-{str(HRS_TO_KEEP_OFFDEV)} hour')"""
mylog("verbose", [f"[{pluginName}] Query: {query} "])
cursor.execute(query)
@@ -196,12 +168,7 @@ def cleanup_database(
# -----------------------------------------------------
# Clear New Flag
if CLEAR_NEW_FLAG != 0:
mylog(
"verbose",
[
f'[{pluginName}] Devices: Clear "New Device" flag for all devices older than {str(CLEAR_NEW_FLAG)} hours (CLEAR_NEW_FLAG setting)'
],
)
mylog("verbose", f'[{pluginName}] Devices: Clear "New Device" flag for all devices older than {str(CLEAR_NEW_FLAG)} hours (CLEAR_NEW_FLAG setting)')
query = f"""UPDATE Devices SET devIsNew = 0 WHERE devIsNew = 1 AND date(devFirstConnection, '+{str(CLEAR_NEW_FLAG)} hour') < date('now')"""
# select * from Devices where devIsNew = 1 AND date(devFirstConnection, '+3 hour' ) < date('now')
mylog("verbose", [f"[{pluginName}] Query: {query} "])

View File

@@ -71,10 +71,7 @@ def get_entries(plugin_objects: Plugin_Objects) -> Plugin_Objects:
status = lease.get('status')
device_name = comment or host_name or "(unknown)"
mylog(
'verbose',
[f"ID: {lease_id}, Address: {address}, MAC Address: {mac_address}, Host Name: {host_name}, Comment: {comment}, Last Seen: {last_seen}, Status: {status}"]
)
mylog('verbose', f"ID: {lease_id}, Address: {address}, MAC: {mac_address}, Host Name: {host_name}, Comment: {comment}, Last Seen: {last_seen}, Status: {status}")
if (status == "bound"):
plugin_objects.add_object(

View File

@@ -24,7 +24,7 @@ apt-get install sudo -y
apt-get install -y git
# Clean the directory
rm -R $INSTALL_DIR/
rm -R ${INSTALL_DIR:?}/
# Clone the application repository
git clone https://github.com/jokob-sk/NetAlertX "$INSTALL_DIR/"

View File

@@ -34,6 +34,8 @@ sudo phpenmod -v 8.2 sqlite3
# setup virtual python environment so we can use pip3 to install packages
apt-get install python3-venv -y
python3 -m venv /opt/venv
# Shell check doesn't recognize source command because it's not in the repo, it is in the system at runtime
# shellcheck disable=SC1091
source /opt/venv/bin/activate
update-alternatives --install /usr/bin/python python /usr/bin/python3 10

View File

@@ -175,6 +175,8 @@ nginx -t || { echo "[INSTALL] nginx config test failed"; exit 1; }
# sudo systemctl restart nginx
# Activate the virtual python environment
# Shell check doesn't recognize source command because it's not in the repo, it is in the system at runtime
# shellcheck disable=SC1091
source /opt/venv/bin/activate
echo "[INSTALL] 🚀 Starting app - navigate to your <server IP>:${PORT}"

View File

@@ -0,0 +1,5 @@
#!/bin/bash
echo "Initializing cron..."
# Placeholder for cron initialization commands
echo "cron initialized."

View File

@@ -1,4 +0,0 @@
#!/bin/bash
echo "Initializing crond..."
#Future crond initializations can go here.
echo "crond initialized."

View File

@@ -1,4 +1,4 @@
#!/bin/bash
echo "Initializing nginx..."
install -d -o netalertx -g netalertx -m 700 ${SYSTEM_SERVICES_RUN_TMP}/client_body;
install -d -o netalertx -g netalertx -m 700 "${SYSTEM_SERVICES_RUN_TMP}/client_body";
echo "nginx initialized."

View File

@@ -51,12 +51,13 @@ if [ "$(id -u)" -eq 0 ]; then
EOF
>&2 printf "%s" "${RESET}"
# Set ownership to netalertx user for all read-write paths
chown -R netalertx ${READ_WRITE_PATHS} 2>/dev/null || true
# Set directory and file permissions for all read-write paths
find ${READ_WRITE_PATHS} -type d -exec chmod u+rwx {} \;
find ${READ_WRITE_PATHS} -type f -exec chmod u+rw {} \;
# Set ownership and permissions for each read-write path individually
printf '%s\n' "${READ_WRITE_PATHS}" | while IFS= read -r path; do
[ -n "${path}" ] || continue
chown -R netalertx "${path}" 2>/dev/null || true
find "${path}" -type d -exec chmod u+rwx {} \;
find "${path}" -type f -exec chmod u+rw {} \;
done
echo Permissions fixed for read-write paths. Please restart the container as user 20211.
sleep infinity & wait $!
fi

View File

@@ -16,11 +16,11 @@ LEGACY_DB=/app/db
MARKER_NAME=.migration
is_mounted() {
local path="$1"
if [ ! -d "${path}" ]; then
my_path="$1"
if [ ! -d "${my_path}" ]; then
return 1
fi
mountpoint -q "${path}" 2>/dev/null
mountpoint -q "${my_path}" 2>/dev/null
}
warn_unmount_legacy() {

View File

@@ -2,7 +2,7 @@
# first-run-check.sh - Checks and initializes configuration files on first run
# Check for app.conf and deploy if required
if [ ! -f ${NETALERTX_CONFIG}/app.conf ]; then
if [ ! -f "${NETALERTX_CONFIG}/app.conf" ]; then
mkdir -p "${NETALERTX_CONFIG}" || {
>&2 echo "ERROR: Failed to create config directory ${NETALERTX_CONFIG}"
exit 1

View File

@@ -441,7 +441,9 @@ CREATE TRIGGER "trg_delete_devices"
END;
end-of-database-schema
if [ $? -ne 0 ]; then
database_creation_status=$?
if [ $database_creation_status -ne 0 ]; then
RED=$(printf '\033[1;31m')
RESET=$(printf '\033[0m')
>&2 printf "%s" "${RED}"

View File

@@ -50,7 +50,7 @@ fi
RED='\033[1;31m'
GREY='\033[90m'
RESET='\033[0m'
printf "${RED}"
printf "%s" "${RED}"
echo '
_ _ _ ___ _ _ __ __
| \ | | | | / _ \| | | | \ \ / /
@@ -60,7 +60,7 @@ echo '
\_| \_/\___|\__\_| |_/_|\___|_| \__\/ \/
'
printf "\033[0m"
printf "%s" "${RESET}"
echo ' Network intruder and presence detector.
https://netalertx.com
@@ -69,7 +69,7 @@ set -u
FAILED_STATUS=""
echo "Startup pre-checks"
for script in ${ENTRYPOINT_CHECKS}/*; do
for script in "${ENTRYPOINT_CHECKS}"/*; do
if [ -n "${SKIP_TESTS:-}" ]; then
echo "Skipping startup checks as SKIP_TESTS is set."
break
@@ -77,7 +77,7 @@ for script in ${ENTRYPOINT_CHECKS}/*; do
script_name=$(basename "$script" | sed 's/^[0-9]*-//;s/\.(sh|py)$//;s/-/ /g')
echo "--> ${script_name} "
if [ -n "${SKIP_STARTUP_CHECKS:-}" ] && echo "${SKIP_STARTUP_CHECKS}" | grep -q "\b${script_name}\b"; then
printf "${GREY}skip${RESET}\n"
printf "%sskip%s\n" "${GREY}" "${RESET}"
continue
fi
@@ -134,7 +134,7 @@ fi
# Update vendor data (MAC address OUI database) in the background
# This happens concurrently with service startup to avoid blocking container readiness
bash ${SYSTEM_SERVICES_SCRIPTS}/update_vendors.sh &
bash "${SYSTEM_SERVICES_SCRIPTS}/update_vendors.sh" &
@@ -274,7 +274,7 @@ trap on_signal INT TERM
# Only start crond scheduler on Alpine (non-Debian) environments
# Debian typically uses systemd or other schedulers
if [ "${ENVIRONMENT:-}" ] && [ "${ENVIRONMENT:-}" != "debian" ]; then
add_service "/services/start-crond.sh" "crond"
add_service "/services/start-cron.sh" "supercronic"
fi
# Start core frontend and backend services
@@ -290,8 +290,6 @@ add_service "${SYSTEM_SERVICES}/start-backend.sh" "python3"
# Useful for devcontainer debugging where individual services need to be debugged
if [ "${NETALERTX_DEBUG:-0}" -eq 1 ]; then
echo "NETALERTX_DEBUG is set to 1, will not shut down other services if one fails."
wait
exit $?
fi
################################################################################
@@ -316,10 +314,25 @@ while [ -n "${SERVICES}" ]; do
if ! is_pid_active "${pid}"; then
wait "${pid}" 2>/dev/null
status=$?
# Handle intentional backend restart
if [ "${name}" = "python3" ] && [ -f "/tmp/backend_restart_pending" ]; then
echo "🔄 Backend restart requested via marker file."
rm -f "/tmp/backend_restart_pending"
remove_service "${pid}"
add_service "${SYSTEM_SERVICES}/start-backend.sh" "python3"
continue
fi
FAILED_STATUS=$status
FAILED_NAME="${name}"
remove_service "${pid}"
handle_exit
if [ "${NETALERTX_DEBUG:-0}" -eq 1 ]; then
echo "⚠️ Service ${name} exited with status ${status}. Debug mode active - continuing."
else
handle_exit
fi
fi
done

View File

@@ -1,4 +1,4 @@
# Every minute check for cron jobs
* * * * * /services/scripts/cron_script.sh
# Update vendors 4x/d
0 */6 * * * /services/scripts/update_vendors.sh
0 */6 * * * /services/scripts/update_vendors.sh

View File

@@ -21,10 +21,10 @@ log_success() {
}
# 1. Check if crond is running
if pgrep -f "crond" > /dev/null; then
log_success "crond is running"
if pgrep -f "supercronic" > /dev/null; then
log_success "supercronic is running"
else
log_error "crond is not running"
log_error "supercronic is not running"
fi
# 2. Check if php-fpm is running

View File

@@ -5,12 +5,15 @@ export INSTALL_DIR=/app
# Check if there are any entries with cron_restart_backend
if grep -q "cron_restart_backend" "${LOG_EXECUTION_QUEUE}"; then
killall python3
sleep 2
/services/start-backend.sh &
echo "$(date): Restarting backend triggered by cron_restart_backend"
# Create marker for entrypoint.sh to restart the service instead of killing the container
touch /tmp/backend_restart_pending
killall python3 || echo "killall python3 failed or no process found"
# Remove all lines containing cron_restart_backend from the log file
# Atomic replacement with temp file
grep -v "cron_restart_backend" "${LOG_EXECUTION_QUEUE}" > "${LOG_EXECUTION_QUEUE}.tmp" && \
mv "${LOG_EXECUTION_QUEUE}.tmp" "${LOG_EXECUTION_QUEUE}"
grep -v "cron_restart_backend" "${LOG_EXECUTION_QUEUE}" > "${LOG_EXECUTION_QUEUE}.tmp"
mv "${LOG_EXECUTION_QUEUE}.tmp" "${LOG_EXECUTION_QUEUE}"
fi

View File

@@ -3,7 +3,7 @@
cd "${NETALERTX_APP}" || exit 1
max_attempts=50 # 10 seconds total (50 * 0.2s)
attempt=0
while ps ax | grep -v grep | grep -q python3 && [ $attempt -lt $max_attempts ]; do
while pgrep -x python3 >/dev/null && [ $attempt -lt $max_attempts ]; do
killall -TERM python3 &>/dev/null
sleep 0.2
((attempt++))
@@ -12,4 +12,5 @@ done
killall -KILL python3 &>/dev/null
echo "Starting python3 $(cat /services/config/python/backend-extra-launch-parameters 2>/dev/null) -m server > ${NETALERTX_LOG}/stdout.log 2> >(tee ${NETALERTX_LOG}/stderr.log >&2)"
exec python3 $(cat /services/config/python/backend-extra-launch-parameters 2>/dev/null) -m server > ${NETALERTX_LOG}/stdout.log 2> >(tee ${NETALERTX_LOG}/stderr.log >&2)
read -ra EXTRA_PARAMS < <(cat /services/config/python/backend-extra-launch-parameters 2>/dev/null)
exec python3 "${EXTRA_PARAMS[@]}" -m server > "${NETALERTX_LOG}/stdout.log" 2> >(tee "${NETALERTX_LOG}/stderr.log" >&2)

View File

@@ -0,0 +1,42 @@
#!/bin/bash
set -euo pipefail
crond_pid=""
# Called externally, but shellcheck does not see that and claims it is unused.
# shellcheck disable=SC2329,SC2317
cleanup() {
status=$?
echo "Supercronic stopped! (exit ${status})"
}
# Called externally, but shellcheck does not see that and claims it is unused.
# shellcheck disable=SC2329,SC2317
forward_signal() {
if [[ -n "${crond_pid}" ]]; then
kill -TERM "${crond_pid}" 2>/dev/null || true
fi
}
while pgrep -x crond >/dev/null 2>&1; do
killall crond &>/dev/null
sleep 0.2
done
trap cleanup EXIT
trap forward_signal INT TERM
CRON_OPTS="--quiet"
if [ "${NETALERTX_DEBUG:-0}" -eq 1 ]; then
CRON_OPTS="--debug"
fi
echo "Starting supercronic ${CRON_OPTS} \"${SYSTEM_SERVICES_CONFIG_CRON}/crontab\" >>\"${LOG_CRON}\" 2>&1 &"
supercronic ${CRON_OPTS} "${SYSTEM_SERVICES_CONFIG_CRON}/crontab" >>"${LOG_CRON}" 2>&1 &
crond_pid=$!
wait "${crond_pid}"; status=$?
echo -ne " done"
exit ${status}

View File

@@ -1,33 +0,0 @@
#!/bin/bash
set -euo pipefail
crond_pid=""
cleanup() {
status=$?
echo "Crond stopped! (exit ${status})"
}
forward_signal() {
if [[ -n "${crond_pid}" ]]; then
kill -TERM "${crond_pid}" 2>/dev/null || true
fi
}
while ps ax | grep -v -e grep -e '.sh' | grep crond >/dev/null 2>&1; do
killall crond &>/dev/null
sleep 0.2
done
trap cleanup EXIT
trap forward_signal INT TERM
echo "Starting /usr/sbin/crond -c \"${SYSTEM_SERVICES_CROND}\" -f -L \"${LOG_CROND}\" >>\"${LOG_CROND}\" 2>&1 &"
/usr/sbin/crond -c "${SYSTEM_SERVICES_CROND}" -f -L "${LOG_CROND}" >>"${LOG_CROND}" 2>&1 &
crond_pid=$!
wait "${crond_pid}"; status=$?
echo -ne " done"
exit ${status}

View File

@@ -11,11 +11,15 @@ mkdir -p "${LOG_DIR}" "${RUN_DIR}" "${TMP_DIR}"
nginx_pid=""
# Called externally, but shellcheck does not see that and claims it is unused.
# shellcheck disable=SC2329,SC2317
cleanup() {
status=$?
echo "nginx stopped! (exit ${status})"
}
# Called externally, but shellcheck does not see that and claims it is unused.
# shellcheck disable=SC2329,SC2317
forward_signal() {
if [[ -n "${nginx_pid}" ]]; then
kill -TERM "${nginx_pid}" 2>/dev/null || true
@@ -24,12 +28,15 @@ forward_signal() {
# When in devcontainer we must kill any existing nginx processes
while ps ax | grep -v -e "grep" -e "nginx.sh" | grep nginx >/dev/null 2>&1; do
while pgrep -x nginx >/dev/null 2>&1; do
killall nginx &>/dev/null || true
sleep 0.2
done
TEMP_CONFIG_FILE=$(mktemp "${TMP_DIR}/netalertx.conf.XXXXXX")
# Shell check doesn't recognize envsubst variables
# shellcheck disable=SC2016
if envsubst '${LISTEN_ADDR} ${PORT}' < "${SYSTEM_NGINX_CONFIG_TEMPLATE}" > "${TEMP_CONFIG_FILE}" 2>/dev/null; then
mv "${TEMP_CONFIG_FILE}" "${SYSTEM_SERVICES_ACTIVE_CONFIG_FILE}"
else

View File

@@ -3,18 +3,22 @@ set -euo pipefail
php_fpm_pid=""
# Called externally, but shellcheck does not see that and claims it is unused.
# shellcheck disable=SC2329,SC2317
cleanup() {
status=$?
echo "php-fpm stopped! (exit ${status})"
}
# Called externally, but shellcheck does not see that and claims it is unused.
# shellcheck disable=SC2329,SC2317
forward_signal() {
if [[ -n "${php_fpm_pid}" ]]; then
kill -TERM "${php_fpm_pid}" 2>/dev/null || true
fi
}
while ps ax | grep -v grep | grep php-fpm83 >/dev/null; do
while pgrep -x php-fpm83 >/dev/null; do
killall php-fpm83 &>/dev/null
sleep 0.2
done
@@ -27,5 +31,6 @@ echo "Starting /usr/sbin/php-fpm83 -y \"${PHP_FPM_CONFIG_FILE}\" -F >>\"${LOG_AP
php_fpm_pid=$!
wait "${php_fpm_pid}"
exit_status=$?
echo -ne " done"
exit $?
exit $exit_status

View File

@@ -127,6 +127,8 @@ apt-get install -y --no-install-recommends \
ca-certificates lsb-release curl gnupg
# Detect OS
# Shell check doesn't recognize source command because it's not in the repo, it is in the system at runtime
# shellcheck disable=SC1091
. /etc/os-release
OS_ID="${ID:-}"
OS_VER="${VERSION_ID:-}"
@@ -203,6 +205,8 @@ printf "%b\n" "-----------------------------------------------------------------
printf "%b\n" "${GREEN}[INSTALLING] ${RESET}Setting up Python environment"
printf "%b\n" "--------------------------------------------------------------------------"
python3 -m venv /opt/myenv
# Shell check doesn't recognize source command because it's not in the repo, it is in the system at runtime
# shellcheck disable=SC1091
source /opt/myenv/bin/activate
python -m pip install --upgrade pip
python -m pip install -r "${INSTALLER_DIR}/requirements.txt"

View File

@@ -22,7 +22,6 @@ NGINX_CONF_FILE=netalertx.conf
WEB_UI_DIR=/var/www/html/netalertx
NGINX_CONFIG_FILE=/etc/nginx/conf.d/$NGINX_CONF_FILE
OUI_FILE="/usr/share/arp-scan/ieee-oui.txt" # Define the path to ieee-oui.txt and ieee-iab.txt
SCRIPT_DIR="$(cd -- "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
FILEDB=${INSTALL_DIR}/db/${DB_FILE}
PHPVERSION="8.3"
VENV_DIR="/opt/netalertx-python"
@@ -106,7 +105,7 @@ if [ -d "${INSTALL_DIR}" ]; then
if [ "$1" == "install" ] || [ "$1" == "update" ] || [ "$1" == "start" ]; then
confirmation=$1
else
read -p "Enter your choice: " confirmation
read -rp "Enter your choice: " confirmation
fi
if [ "$confirmation" == "install" ]; then
# Ensure INSTALL_DIR is safe to wipe
@@ -118,7 +117,7 @@ if [ -d "${INSTALL_DIR}" ]; then
mountpoint -q "${INSTALL_DIR}/front" && umount "${INSTALL_DIR}/front" 2>/dev/null
# Remove all contents safely
rm -rf -- "${INSTALL_DIR}"/* "${INSTALL_DIR}"/.[!.]* "${INSTALL_DIR}"/..?* 2>/dev/null
rm -rf -- "${INSTALL_DIR:?}"/* "${INSTALL_DIR}"/.[!.]* "${INSTALL_DIR}"/..?* 2>/dev/null
# Re-clone repository
git clone "${GITHUB_REPO}" "${INSTALL_DIR}/"
@@ -152,6 +151,8 @@ echo "---------------------------------------------------------"
echo
# update-alternatives --install /usr/bin/python python /usr/bin/python3 10
python3 -m venv "${VENV_DIR}"
# Shell check doesn't recognize source command because it's not in the repo, it is in the system at runtime
# shellcheck disable=SC1091
source "${VENV_DIR}/bin/activate"
if [[ ! -f "${REQUIREMENTS_FILE}" ]]; then

View File

@@ -9,11 +9,11 @@ site_description: >-
nav:
- Home: index.md
- Installation:
- Installation:
- Installation options: INSTALLATION.md
- Quick setup: INITIAL_SETUP.md
- Quick setup: INITIAL_SETUP.md
- Docker:
- Docker Guide: DOCKER_INSTALLATION.md
- Docker Guide: DOCKER_INSTALLATION.md
- Docker Compose: DOCKER_COMPOSE.md
- Docker File Permissions: FILE_PERMISSIONS.md
- Docker Updates: UPDATES.md
@@ -25,24 +25,24 @@ nav:
- Bare-metal (Experimental): HW_INSTALL.md
- Migration Guide: MIGRATION.md
- Help:
- Common issues: COMMON_ISSUES.md
- Common issues: COMMON_ISSUES.md
- Setup:
- Getting started:
- Subnets: SUBNETS.md
- Enable Plugins: PLUGINS.md
- Enable Plugins: PLUGINS.md
- Pi-hole Guide: PIHOLE_GUIDE.md
- Home Assistant: HOME_ASSISTANT.md
- Emails: SMTP.md
- Backups: BACKUPS.md
- Backups: BACKUPS.md
- Security Features: SECURITY_FEATURES.md
- Security Considerations: SECURITY.md
- Advanced guides:
- Remote Networks: REMOTE_NETWORKS.md
- Notifications Guide: NOTIFICATIONS.md
- Name Resolution: NAME_RESOLUTION.md
- Authelia: AUTHELIA.md
- Performance: PERFORMANCE.md
- Reverse DNS: REVERSE_DNS.md
- Remote Networks: REMOTE_NETWORKS.md
- Notifications Guide: NOTIFICATIONS.md
- Name Resolution: NAME_RESOLUTION.md
- Authelia: AUTHELIA.md
- Performance: PERFORMANCE.md
- Reverse DNS: REVERSE_DNS.md
- Reverse Proxy: REVERSE_PROXY.md
- Webhooks (n8n): WEBHOOK_N8N.md
- Workflows: WORKFLOWS.md
@@ -63,14 +63,15 @@ nav:
- Icons: ICONS.md
- Network Topology: NETWORK_TREE.md
- Troubleshooting:
- General Tips: DEBUG_TIPS.md
- Common Issues: COMMON_ISSUES.md
- Inspecting Logs: LOGGING.md
- Debugging Tips: DEBUG_TIPS.md
- Debugging GraphQL: DEBUG_GRAPHQL.md
- Debugging Invalid JSON: DEBUG_INVALID_JSON.md
- Debugging PHP: DEBUG_PHP.md
- Debugging Plugins: DEBUG_PLUGINS.md
- Debugging Web UI Port: WEB_UI_PORT_DEBUG.md
- Debugging Workflows: WORKFLOWS_DEBUGGING.md
- API Server Issues: DEBUG_API_SERVER.md
- Invalid JSON Issues: DEBUG_INVALID_JSON.md
- PHP Issues: DEBUG_PHP.md
- Plugin Issues: DEBUG_PLUGINS.md
- Web UI Port Issues: WEB_UI_PORT_DEBUG.md
- Workflows Issues: WORKFLOWS_DEBUGGING.md
- Development:
- Plugin and app development:
- Environment Setup: DEV_ENV_SETUP.md
@@ -83,8 +84,8 @@ nav:
- Settings: SETTINGS_SYSTEM.md
- Versions: VERSIONS.md
- Icon and Type guessing: DEVICE_HEURISTICS.md
- API:
- Overview: API.md
- API:
- Overview: API.md
- Devices Collection: API_DEVICES.md
- Device: API_DEVICE.md
- Sessions: API_SESSIONS.md
@@ -98,9 +99,9 @@ nav:
- GraphQL: API_GRAPHQL.md
- DB query: API_DBQUERY.md
- Tests: API_TESTS.md
- SUPERSEDED OLD API Overview: API_OLD.md
- SUPERSEDED OLD API Overview: API_OLD.md
- Integrations:
- Webhook Secret: WEBHOOK_SECRET.md
- Webhook Secret: WEBHOOK_SECRET.md
- Helper scripts: HELPER_SCRIPTS.md

View File

@@ -4,10 +4,10 @@
NETALERTX_DB_FILE=${NETALERTX_DB:-/data/db}/app.db
#remove the old database
rm ${NETALERTX_DB_FILE}
rm "${NETALERTX_DB_FILE}"
# Write schema to text to app.db file until we see "end-of-database-schema"
cat << end-of-database-schema > ${NETALERTX_DB_FILE}.sql
cat << end-of-database-schema > "${NETALERTX_DB_FILE}.sql"
CREATE TABLE sqlite_stat1(tbl,idx,stat);
CREATE TABLE Events (eve_MAC STRING (50) NOT NULL COLLATE NOCASE, eve_IP STRING (50) NOT NULL COLLATE NOCASE, eve_DateTime DATETIME NOT NULL, eve_EventType STRING (30) NOT NULL COLLATE NOCASE, eve_AdditionalInfo STRING (250) DEFAULT (''), eve_PendingAlertEmail BOOLEAN NOT NULL CHECK (eve_PendingAlertEmail IN (0, 1)) DEFAULT (1), eve_PairEventRowid INTEGER);
CREATE TABLE Sessions (ses_MAC STRING (50) COLLATE NOCASE, ses_IP STRING (50) COLLATE NOCASE, ses_EventTypeConnection STRING (30) COLLATE NOCASE, ses_DateTimeConnection DATETIME, ses_EventTypeDisconnection STRING (30) COLLATE NOCASE, ses_DateTimeDisconnection DATETIME, ses_StillConnected BOOLEAN, ses_AdditionalInfo STRING (250));
@@ -421,4 +421,4 @@ CREATE TRIGGER "trg_delete_devices"
end-of-database-schema
# Import the database schema into the new database file
sqlite3 ${NETALERTX_DB_FILE} < ${NETALERTX_DB_FILE}.sql
sqlite3 "${NETALERTX_DB_FILE}" < "${NETALERTX_DB_FILE}.sql"

View File

@@ -16,4 +16,4 @@ for p in $PORTS; do
done
# Show any other NetAlertX-related listeners (nginx, php-fpm, python backend)
ss -ltnp 2>/dev/null | egrep 'nginx|php-fpm|python' || true
ss -ltnp 2>/dev/null | grep -e 'nginx\|php-fpm\|python' || true

View File

@@ -63,9 +63,7 @@ main structure of NetAlertX
def main():
mylog(
"none", ["[MAIN] Setting up ..."]
) # has to be level 'none' as user config not loaded yet
mylog("none", ["[MAIN] Setting up ..."]) # has to be level 'none' as user config not loaded yet
mylog("none", [f"[conf.tz] Setting up ...{conf.tz}"])
@@ -221,22 +219,14 @@ def main():
# Fetch new unprocessed events
new_events = workflow_manager.get_new_app_events()
mylog(
"debug",
[
f"[MAIN] Processing WORKFLOW new_events from get_new_app_events: {len(new_events)}"
],
)
mylog("debug", [f"[MAIN] Processing WORKFLOW new_events from get_new_app_events: {len(new_events)}"],)
# Process each new event and check triggers
if len(new_events) > 0:
updateState("Workflows: Start")
update_api_flag = False
for event in new_events:
mylog(
"debug",
[f"[MAIN] Processing WORKFLOW app event with GUID {event['GUID']}"],
)
mylog("debug", [f"[MAIN] Processing WORKFLOW app event with GUID {event['GUID']}"],)
# proceed to process events
workflow_manager.process_event(event)
@@ -253,12 +243,7 @@ def main():
# check if devices list needs updating
userUpdatedDevices = UserEventsQueueInstance().has_update_devices()
mylog(
"debug",
[
f"[Plugins] Should I update API (userUpdatedDevices): {userUpdatedDevices}"
],
)
mylog("debug", [f"[Plugins] Should I update API (userUpdatedDevices): {userUpdatedDevices}"],)
if userUpdatedDevices:
update_api(db, all_plugins, True, ["devices"], userUpdatedDevices)

View File

@@ -96,16 +96,9 @@ def update_api(
) # Ensure port is an integer
start_server(graphql_port_value, app_state) # Start the server
except ValueError:
mylog(
"none",
[
f"[API] Invalid GRAPHQL_PORT value, must be an integer: {graphql_port_value}"
],
)
mylog("none", [f"[API] Invalid GRAPHQL_PORT value, must be an integer: {graphql_port_value}"],)
else:
mylog(
"none", ["[API] GRAPHQL_PORT or API_TOKEN is not set, will try later."]
)
mylog("none", ["[API] GRAPHQL_PORT or API_TOKEN is not set, will try later."])
# -------------------------------------------------------------------------------
@@ -135,12 +128,7 @@ class api_endpoint_class:
# Match SQL and API endpoint path
if endpoint.query == self.query and endpoint.path == self.path:
found = True
mylog(
"trace",
[
f"[API] api_endpoint_class: Hashes (file|old|new): ({self.fileName}|{endpoint.hash}|{self.hash})"
],
)
mylog("trace", [f"[API] api_endpoint_class: Hashes (file|old|new): ({self.fileName}|{endpoint.hash}|{self.hash})"],)
if endpoint.hash != self.hash:
self.needsUpdate = True
# Only update changeDetectedWhen if it hasn't been set recently
@@ -190,10 +178,7 @@ class api_endpoint_class:
)
)
):
mylog(
"debug",
[f"[API] api_endpoint_class: Writing {self.fileName} after debounce."],
)
mylog("debug", [f"[API] api_endpoint_class: Writing {self.fileName} after debounce."],)
write_file(self.path, json.dumps(self.jsonData))

View File

@@ -173,13 +173,8 @@ class Query(ObjectType):
network_dev_types = get_setting_value("NETWORK_DEVICE_TYPES")
mylog("trace", f"[graphql_schema] allowed_statuses: {allowed_statuses}")
mylog(
"trace",
f"[graphql_schema] hidden_relationships: {hidden_relationships}",
)
mylog(
"trace", f"[graphql_schema] network_dev_types: {network_dev_types}"
)
mylog("trace", f"[graphql_schema] hidden_relationships: {hidden_relationships}",)
mylog("trace", f"[graphql_schema] network_dev_types: {network_dev_types}")
# Filtering based on the "status"
if status == "my_devices":

View File

@@ -71,9 +71,7 @@ class app_state_class:
with open(stateFile, "r") as json_file:
previousState = json.load(json_file)
except json.decoder.JSONDecodeError as e:
mylog(
"none", [f"[app_state_class] Failed to handle app_state.json: {e}"]
)
mylog("none", [f"[app_state_class] Failed to handle app_state.json: {e}"])
# Check if the file exists and recover previous values
if previousState != "":
@@ -151,10 +149,7 @@ class app_state_class:
with open(stateFile, "w") as json_file:
json_file.write(json_data)
except (TypeError, ValueError) as e:
mylog(
"none",
[f"[app_state_class] Failed to serialize object to JSON: {e}"],
)
mylog("none", [f"[app_state_class] Failed to serialize object to JSON: {e}"],)
return

View File

@@ -233,15 +233,7 @@ class DB:
rows = self.sql.fetchall()
return rows
except AssertionError:
mylog(
"minimal",
[
"[Database] - ERROR: inconsistent query and/or arguments.",
query,
" params: ",
args,
],
)
mylog("minimal", ["[Database] - ERROR: inconsistent query and/or arguments.", query, " params: ", args,],)
except sqlite3.Error as e:
mylog("minimal", ["[Database] - SQL ERROR: ", e])
return None
@@ -258,15 +250,7 @@ class DB:
if len(rows) == 1:
return rows[0]
if len(rows) > 1:
mylog(
"verbose",
[
"[Database] - Warning!: query returns multiple rows, only first row is passed on!",
query,
" params: ",
args,
],
)
mylog("verbose", ["[Database] - Warning!: query returns multiple rows, only first row is passed on!", query, " params: ", args,],)
return rows[0]
# empty result set
return None

View File

@@ -88,10 +88,7 @@ def ensure_column(sql, table: str, column_name: str, column_type: str) -> bool:
mylog("none", [msg])
# Add missing column
mylog(
"verbose",
[f"[db_upgrade] Adding '{column_name}' ({column_type}) to {table} table"],
)
mylog("verbose", [f"[db_upgrade] Adding '{column_name}' ({column_type}) to {table} table"],)
sql.execute(f'ALTER TABLE "{table}" ADD "{column_name}" {column_type}')
return True

View File

@@ -586,16 +586,11 @@ class SafeConditionBuilder:
# Validate each component
if not self._validate_column_name(column):
mylog(
"verbose", [f"[SafeConditionBuilder] Invalid column: {column}"]
)
mylog("verbose", [f"[SafeConditionBuilder] Invalid column: {column}"])
return "", {}
if not self._validate_operator(operator):
mylog(
"verbose",
[f"[SafeConditionBuilder] Invalid operator: {operator}"],
)
mylog("verbose", [f"[SafeConditionBuilder] Invalid operator: {operator}"])
return "", {}
# Create parameter binding
@@ -607,10 +602,7 @@ class SafeConditionBuilder:
condition_parts.append(condition_part)
except Exception as e:
mylog(
"verbose",
[f"[SafeConditionBuilder] Error processing condition: {e}"],
)
mylog("verbose", [f"[SafeConditionBuilder] Error processing condition: {e}"],)
return "", {}
if not condition_parts:
@@ -644,10 +636,7 @@ class SafeConditionBuilder:
if event_type in self.ALLOWED_EVENT_TYPES:
valid_types.append(event_type)
else:
mylog(
"verbose",
f"[SafeConditionBuilder] Invalid event type filtered out: {event_type}",
)
mylog("verbose", f"[SafeConditionBuilder] Invalid event type filtered out: {event_type}",)
if not valid_types:
return "", {}
@@ -682,10 +671,7 @@ class SafeConditionBuilder:
return self.build_safe_condition(condition_setting)
except ValueError as e:
# Log the error and return empty condition for safety
mylog(
"verbose",
f"[SafeConditionBuilder] Unsafe condition rejected: {condition_setting}, Error: {e}",
)
mylog("verbose", f"[SafeConditionBuilder] Unsafe condition rejected: {condition_setting}, Error: {e}",)
return "", {}

View File

@@ -36,12 +36,7 @@ def checkPermissionsOK():
dbW_access = os.access(fullDbPath, os.W_OK)
mylog("none", ["\n"])
mylog(
"none",
[
"The backend restarted (started). If this is unexpected check https://bit.ly/NetAlertX_debug for troubleshooting tips."
],
)
mylog("none", "The backend restarted (started). If this is unexpected check https://bit.ly/NetAlertX_debug for troubleshooting tips.")
mylog("none", ["\n"])
mylog("none", ["Permissions check (All should be True)"])
mylog("none", ["------------------------------------------------"])
@@ -59,12 +54,7 @@ def checkPermissionsOK():
def initialiseFile(pathToCheck, defaultFile):
# if file not readable (missing?) try to copy over the backed-up (default) one
if str(os.access(pathToCheck, os.R_OK)) == "False":
mylog(
"none",
[
"[Setup] (" + pathToCheck + ") file is not readable or missing. Trying to copy over the default one."
],
)
mylog("none", ["[Setup] (" + pathToCheck + ") file is not readable or missing. Trying to copy over the default one."],)
try:
# try runnning a subprocess
p = subprocess.Popen(
@@ -75,31 +65,16 @@ def initialiseFile(pathToCheck, defaultFile):
stdout, stderr = p.communicate()
if str(os.access(pathToCheck, os.R_OK)) == "False":
mylog(
"none",
[
"[Setup] ⚠ ERROR copying (" + defaultFile + ") to (" + pathToCheck + "). Make sure the app has Read & Write access to the parent directory."
],
)
mylog("none", "[Setup] ⚠ ERROR copying (" + defaultFile + ") to (" + pathToCheck + "). Ensure Read & Write access to the parent directory.")
else:
mylog(
"none",
[
"[Setup] (" + defaultFile + ") copied over successfully to (" + pathToCheck + ")."
],
)
mylog("none", ["[Setup] (" + defaultFile + ") copied over successfully to (" + pathToCheck + ")."],)
# write stdout and stderr into .log files for debugging if needed
logResult(stdout, stderr) # TO-DO should be changed to mylog
except subprocess.CalledProcessError as e:
# An error occured, handle it
mylog(
"none",
[
"[Setup] ⚠ ERROR copying (" + defaultFile + "). Make sure the app has Read & Write access to " + pathToCheck
],
)
mylog("none", ["[Setup] ⚠ ERROR copying (" + defaultFile + "). Make sure the app has Read & Write access to " + pathToCheck],)
mylog("none", [e.output])
@@ -187,14 +162,7 @@ def get_setting(key):
mylog("none", [f"[Settings] ⚠ File not found: {settingsFile}"])
return None
mylog(
"trace",
[
"[Import table_settings.json] checking table_settings.json file",
f"SETTINGS_LASTCACHEDATE: {SETTINGS_LASTCACHEDATE}",
f"fileModifiedTime: {fileModifiedTime}",
],
)
mylog("trace", f"[Import table_settings.json] checking table_settings.json file SETTINGS_LASTCACHEDATE: {SETTINGS_LASTCACHEDATE} fileModifiedTime: {fileModifiedTime}")
# Use cache if file hasn't changed
if fileModifiedTime == SETTINGS_LASTCACHEDATE and SETTINGS_CACHE:
@@ -221,10 +189,7 @@ def get_setting(key):
SETTINGS_LASTCACHEDATE = fileModifiedTime
if key not in SETTINGS_CACHE:
mylog(
"none",
[f"[Settings] ⚠ ERROR - setting_missing - {key} not in {settingsFile}"],
)
mylog("none", [f"[Settings] ⚠ ERROR - setting_missing - {key} not in {settingsFile}"],)
return None
return SETTINGS_CACHE[key]
@@ -357,10 +322,7 @@ def setting_value_to_python_type(set_type, set_value):
value = json.loads(set_value.replace("'", "\""))
except json.JSONDecodeError as e:
mylog(
"none",
[f"[setting_value_to_python_type] Error decoding JSON object: {e}"],
)
mylog("none", [f"[setting_value_to_python_type] Error decoding JSON object: {e}"],)
mylog("none", [set_value])
value = []
@@ -375,10 +337,7 @@ def setting_value_to_python_type(set_type, set_value):
try:
value = reverseTransformers(json.loads(set_value), transformers)
except json.JSONDecodeError as e:
mylog(
"none",
[f"[setting_value_to_python_type] Error decoding JSON object: {e}"],
)
mylog("none", [f"[setting_value_to_python_type] Error decoding JSON object: {e}"],)
mylog("none", [{set_value}])
value = {}
@@ -766,9 +725,7 @@ def checkNewVersion():
try:
data = json.loads(text)
except json.JSONDecodeError:
mylog(
"minimal", ["[Version check] ⚠ ERROR: Invalid JSON response from GitHub."]
)
mylog("minimal", ["[Version check] ⚠ ERROR: Invalid JSON response from GitHub."])
return False
# make sure we received a valid response and not an API rate limit exceeded message
@@ -784,10 +741,7 @@ def checkNewVersion():
else:
mylog("none", ["[Version check] Running the latest version."])
else:
mylog(
"minimal",
["[Version check] ⚠ ERROR: Received unexpected response from GitHub."],
)
mylog("minimal", ["[Version check] ⚠ ERROR: Received unexpected response from GitHub."],)
return False

View File

@@ -180,10 +180,7 @@ def importConfigs(pm, db, all_plugins):
fileModifiedTime = os.path.getmtime(config_file)
mylog("debug", ["[Import Config] checking config file "])
mylog(
"debug",
["[Import Config] lastImportedConfFile :", conf.lastImportedConfFile],
)
mylog("debug", ["[Import Config] lastImportedConfFile :", conf.lastImportedConfFile],)
mylog("debug", ["[Import Config] fileModifiedTime :", fileModifiedTime])
if (fileModifiedTime == conf.lastImportedConfFile) and all_plugins is not None:
@@ -399,12 +396,7 @@ def importConfigs(pm, db, all_plugins):
conf.TIMEZONE = ccd(
"TIMEZONE", conf.tz, c_d, "_KEEP_", "_KEEP_", "[]", "General"
)
mylog(
"none",
[
f"[Config] Invalid timezone '{conf.TIMEZONE}', defaulting to {default_tz}."
],
)
mylog("none", [f"[Config] Invalid timezone '{conf.TIMEZONE}', defaulting to {default_tz}."],)
# TODO cleanup later ----------------------------------------------------------------------------------
# init all time values as we have timezone - all this shoudl be moved into plugin/plugin settings
@@ -450,13 +442,7 @@ def importConfigs(pm, db, all_plugins):
all_plugins = get_plugins_configs(conf.DISCOVER_PLUGINS)
mylog(
"none",
[
"[Config] Plugins: Number of all plugins (including not loaded): ",
len(all_plugins),
],
)
mylog("none", ["[Config] Plugins: Number of all plugins (including not loaded): ", len(all_plugins),],)
plugin_indexes_to_remove = []
all_plugins_prefixes = [] # to init the LOADED_PLUGINS setting with correct options
@@ -580,9 +566,7 @@ def importConfigs(pm, db, all_plugins):
"General",
)
mylog(
"none", ["[Config] Number of Plugins to load: ", len(loaded_plugins_prefixes)]
)
mylog("none", ["[Config] Number of Plugins to load: ", len(loaded_plugins_prefixes)])
mylog("none", ["[Config] Plugins to load: ", loaded_plugins_prefixes])
conf.plugins_once_run = False
@@ -606,12 +590,7 @@ def importConfigs(pm, db, all_plugins):
# Log the value being passed
# ccd(key, default, config_dir, name, inputtype, options, group, events=None, desc="", setJsonMetadata=None, overrideTemplate=None, forceDefault=False)
mylog(
"verbose",
[
f"[Config] Setting override {setting_name} with value: {value}"
],
)
mylog("verbose", [f"[Config] Setting override {setting_name} with value: {value}"],)
ccd(
setting_name,
value,
@@ -630,12 +609,7 @@ def importConfigs(pm, db, all_plugins):
)
except json.JSONDecodeError:
mylog(
"none",
[
f"[Config] [ERROR] Setting override decoding JSON from {app_conf_override_path}"
],
)
mylog("none", [f"[Config] [ERROR] Setting override decoding JSON from {app_conf_override_path}"],)
else:
mylog("debug", [f"[Config] File {app_conf_override_path} does not exist."])
@@ -777,10 +751,7 @@ def renameSettings(config_file):
timestamp = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
backup_file = f"{config_file}_old_setting_names_{timestamp}.bak"
mylog(
"debug",
f"[Config] Old setting names will be replaced and a backup ({backup_file}) of the config created.",
)
mylog("debug", f"[Config] Old setting names will be replaced and a backup ({backup_file}) of the config created.",)
shutil.copy(str(config_file), backup_file) # Convert config_file to a string
@@ -807,6 +778,4 @@ def renameSettings(config_file):
) # Convert config_file to a string
else:
mylog(
"debug", "[Config] No old setting names found in the file. No changes made."
)
mylog("debug", "[Config] No old setting names found in the file. No changes made.")

View File

@@ -119,10 +119,7 @@ def remove_old(keepNumberOfEntries):
try:
with open(NOTIFICATION_API_FILE, "w") as file:
json.dump(trimmed, file, indent=4)
mylog(
"verbose",
f"[Notification] Trimmed notifications to latest {keepNumberOfEntries}",
)
mylog("verbose", f"[Notification] Trimmed notifications to latest {keepNumberOfEntries}",)
except Exception as e:
mylog("none", f"Error writing trimmed notifications file: {e}")

View File

@@ -295,9 +295,7 @@ class NotificationInstance:
(f"-{minutes} minutes", tz_offset),
)
mylog(
"minimal", ["[Notification] Notifications changes: ", self.db.sql.rowcount]
)
mylog("minimal", ["[Notification] Notifications changes: ", self.db.sql.rowcount])
# clear plugin events
self.clearPluginEvents()

View File

@@ -31,10 +31,7 @@ class UserEventsQueueInstance:
Returns an empty list if the file doesn't exist.
"""
if not os.path.exists(self.log_file):
mylog(
"none",
["[UserEventsQueueInstance] Log file not found: ", self.log_file],
)
mylog("none", ["[UserEventsQueueInstance] Log file not found: ", self.log_file],)
return [] # No log file, return empty list
with open(self.log_file, "r") as file:
return file.readlines()

View File

@@ -123,9 +123,7 @@ def update_devices_data_from_scan(db):
)""")
# Update only devices with empty or NULL devParentMAC
mylog(
"debug", "[Update Devices] - (if not empty) cur_NetworkNodeMAC -> devParentMAC"
)
mylog("debug", "[Update Devices] - (if not empty) cur_NetworkNodeMAC -> devParentMAC")
sql.execute("""UPDATE Devices
SET devParentMAC = (
SELECT cur_NetworkNodeMAC
@@ -144,10 +142,7 @@ def update_devices_data_from_scan(db):
""")
# Update only devices with empty or NULL devSite
mylog(
"debug",
"[Update Devices] - (if not empty) cur_NetworkSite -> (if empty) devSite",
)
mylog("debug", "[Update Devices] - (if not empty) cur_NetworkSite -> (if empty) devSite",)
sql.execute("""UPDATE Devices
SET devSite = (
SELECT cur_NetworkSite
@@ -325,9 +320,7 @@ def save_scanned_devices(db):
.strip()
)
mylog(
"debug", ["[Save Devices] Saving this IP into the CurrentScan table:", local_ip]
)
mylog("debug", ["[Save Devices] Saving this IP into the CurrentScan table:", local_ip])
if check_IP_format(local_ip) == "":
local_ip = "0.0.0.0"
@@ -361,23 +354,12 @@ def print_scan_stats(db):
sql.execute(query)
stats = sql.fetchall()
mylog(
"verbose",
f"[Scan Stats] Devices Detected.......: {stats[0]['devices_detected']}",
)
mylog("verbose", f"[Scan Stats] Devices Detected.......: {stats[0]['devices_detected']}",)
mylog("verbose", f"[Scan Stats] New Devices............: {stats[0]['new_devices']}")
mylog("verbose", f"[Scan Stats] Down Alerts............: {stats[0]['down_alerts']}")
mylog(
"verbose",
f"[Scan Stats] New Down Alerts........: {stats[0]['new_down_alerts']}",
)
mylog(
"verbose",
f"[Scan Stats] New Connections........: {stats[0]['new_connections']}",
)
mylog(
"verbose", f"[Scan Stats] Disconnections.........: {stats[0]['disconnections']}"
)
mylog("verbose", f"[Scan Stats] New Down Alerts........: {stats[0]['new_down_alerts']}",)
mylog("verbose", f"[Scan Stats] New Connections........: {stats[0]['new_connections']}",)
mylog("verbose", f"[Scan Stats] Disconnections.........: {stats[0]['disconnections']}")
mylog("verbose", f"[Scan Stats] IP Changes.............: {stats[0]['ip_changes']}")
# if str(stats[0]["new_devices"]) != '0':
@@ -395,10 +377,7 @@ def print_scan_stats(db):
row_dict = dict(row)
mylog("trace", f" {row_dict}")
mylog(
"trace",
" ================ Events table content where eve_PendingAlertEmail = 1 ================",
)
mylog("trace", " ================ Events table content where eve_PendingAlertEmail = 1 ================",)
sql.execute("select * from Events where eve_PendingAlertEmail = 1")
rows = sql.fetchall()
for row in rows:
@@ -654,10 +633,7 @@ def check_plugin_data_changed(pm, plugins_to_check):
# Continue if changes detected
for p in plugins_changed:
mylog(
'debug',
f'[check_plugin_data_changed] {p} changed (last_data_change|last_data_check): ({pm.plugin_states.get(p, {}).get("lastDataChange")}|{pm.plugin_checks.get(p)})'
)
mylog('debug', f'[check_plugin_data_changed] {p} changed (last_change|last_check): ({pm.plugin_states.get(p, {}).get("lastDataChange")}|{pm.plugin_checks.get(p)})')
return True
@@ -741,10 +717,7 @@ def update_devices_names(pm):
# --- Step 1: Update device names for unknown devices ---
unknownDevices = device_handler.getUnknown()
if unknownDevices:
mylog(
"verbose",
f"[Update Device Name] Trying to resolve devices without name. Unknown devices count: {len(unknownDevices)}",
)
mylog("verbose", f"[Update Device Name] Trying to resolve devices without name. Unknown devices count: {len(unknownDevices)}",)
# Try resolving both name and FQDN
recordsToUpdate, recordsNotFound, fs, notFound = resolve_devices(
@@ -752,10 +725,8 @@ def update_devices_names(pm):
)
# Log summary
mylog(
"verbose",
f"[Update Device Name] Names Found (DIGSCAN/AVAHISCAN/NSLOOKUP/NBTSCAN): {len(recordsToUpdate)} ({fs['DIGSCAN']}/{fs['AVAHISCAN']}/{fs['NSLOOKUP']}/{fs['NBTSCAN']})",
)
res_string = f"{fs['DIGSCAN']}/{fs['AVAHISCAN']}/{fs['NSLOOKUP']}/{fs['NBTSCAN']}"
mylog("verbose", f"[Update Device Name] Names Found (DIGSCAN/AVAHISCAN/NSLOOKUP/NBTSCAN): {len(recordsToUpdate)} ({res_string})",)
mylog("verbose", f"[Update Device Name] Names Not Found : {notFound}")
# Apply updates to database
@@ -771,10 +742,7 @@ def update_devices_names(pm):
if get_setting_value("REFRESH_FQDN"):
allDevices = device_handler.getAll()
if allDevices:
mylog(
"verbose",
f"[Update FQDN] Trying to resolve FQDN. Devices count: {len(allDevices)}",
)
mylog("verbose", f"[Update FQDN] Trying to resolve FQDN. Devices count: {len(allDevices)}",)
# Try resolving only FQDN
recordsToUpdate, _, fs, notFound = resolve_devices(
@@ -782,10 +750,8 @@ def update_devices_names(pm):
)
# Log summary
mylog(
"verbose",
f"[Update FQDN] Names Found (DIGSCAN/AVAHISCAN/NSLOOKUP/NBTSCAN): {len(recordsToUpdate)}({fs['DIGSCAN']}/{fs['AVAHISCAN']}/{fs['NSLOOKUP']}/{fs['NBTSCAN']})",
)
res_string = f"{fs['DIGSCAN']}/{fs['AVAHISCAN']}/{fs['NSLOOKUP']}/{fs['NBTSCAN']}"
mylog("verbose", f"[Update FQDN] Names Found (DIGSCAN/AVAHISCAN/NSLOOKUP/NBTSCAN): {len(recordsToUpdate)}({res_string})",)
mylog("verbose", f"[Update FQDN] Names Not Found : {notFound}")
# Apply FQDN-only updates
@@ -907,25 +873,13 @@ def query_MAC_vendor(pMAC):
parts = line.split("\t", 1)
if len(parts) > 1:
vendor = parts[1].strip()
mylog(
"debug",
[
f"[Vendor Check] Found '{vendor}' for '{pMAC}' in {vendorsPath}"
],
)
mylog("debug", [f"[Vendor Check] Found '{vendor}' for '{pMAC}' in {vendorsPath}"], )
return vendor
else:
mylog(
"debug",
[
f'[Vendor Check] ⚠ ERROR: Match found, but line could not be processed: "{line_lower}"'
],
)
mylog("debug", [f'[Vendor Check] ⚠ ERROR: Match found, but line could not be processed: "{line_lower}"'],)
return -1
return -1 # MAC address not found in the database
except FileNotFoundError:
mylog(
"none", [f"[Vendor Check] ⚠ ERROR: Vendors file {vendorsPath} not found."]
)
mylog("none", [f"[Vendor Check] ⚠ ERROR: Vendors file {vendorsPath} not found."])
return -1

View File

@@ -25,10 +25,7 @@ try:
rule["icon_base64"] = ""
except Exception as e:
MAC_TYPE_ICON_RULES = []
mylog(
"none",
f"[guess_device_attributes] Failed to load device_heuristics_rules.json: {e}",
)
mylog("none", f"[guess_device_attributes] Failed to load device_heuristics_rules.json: {e}",)
# -----------------------------------------
@@ -169,10 +166,8 @@ def guess_device_attributes(
default_icon: str,
default_type: str,
) -> Tuple[str, str]:
mylog(
"debug",
f"[guess_device_attributes] Guessing attributes for (vendor|mac|ip|name): ('{vendor}'|'{mac}'|'{ip}'|'{name}')",
)
mylog("debug", f"[guess_device_attributes] Guessing attributes for (vendor|mac|ip|name): ('{vendor}'|'{mac}'|'{ip}'|'{name}')",)
# --- Normalize inputs ---
vendor = str(vendor).lower().strip() if vendor else "unknown"
@@ -207,10 +202,7 @@ def guess_device_attributes(
type_ = type_ or default_type
icon = icon or default_icon
mylog(
"debug",
f"[guess_device_attributes] Guessed attributes (icon|type_): ('{icon}'|'{type_}')",
)
mylog("debug", f"[guess_device_attributes] Guessed attributes (icon|type_): ('{icon}'|'{type_}')",)
return icon, type_

View File

@@ -50,9 +50,7 @@ def process_scan(db):
update_devices_data_from_scan(db)
# Pair session events (Connection / Disconnection)
mylog(
"verbose", "[Process Scan] Pairing session events (connection / disconnection) "
)
mylog("verbose", "[Process Scan] Pairing session events (connection / disconnection) ")
pair_sessions_events(db)
# Sessions snapshot
@@ -221,10 +219,7 @@ def insertOnlineHistory(db):
VALUES (?, ?, ?, ?, ?, ?)
"""
mylog(
"debug",
f"[Presence graph] Sql query: {insert_query} with values: {scanTimestamp}, {onlineDevices}, {downDevices}, {allDevices}, {archivedDevices}, {offlineDevices}",
)
mylog("debug", f"[Presence graph] Sql query: {insert_query} with values: {scanTimestamp}, {onlineDevices}, {downDevices}, {allDevices}, {archivedDevices}, {offlineDevices}",)
# Debug output
print_table_schema(db, "Online_History")

View File

@@ -26,12 +26,7 @@ def logEventStatusCounts(objName, pluginEvents):
status_counts[status] = 1
for status, count in status_counts.items():
mylog(
"debug",
[
f'[{module_name}] In {objName} there are {count} events with the status "{status}" '
],
)
mylog("debug", [f'[{module_name}] In {objName} there are {count} events with the status "{status}" '],)
# -------------------------------------------------------------------------------
@@ -100,10 +95,7 @@ def list_to_csv(arr):
mylog("debug", f"[{module_name}] Flattening the below array")
mylog("debug", arr)
mylog(
"debug",
f"[{module_name}] isinstance(arr, list) : {isinstance(arr, list)} | isinstance(arr, str) : {isinstance(arr, str)}",
)
mylog("debug", f"[{module_name}] isinstance(arr, list) : {isinstance(arr, list)} | isinstance(arr, str) : {isinstance(arr, str)}",)
if isinstance(arr, str):
tmpStr = (
@@ -227,19 +219,9 @@ def get_plugins_configs(loadAll):
except (FileNotFoundError, json.JSONDecodeError):
# Handle the case when the file is not found or JSON decoding fails
mylog(
"none",
[
f"[{module_name}] ⚠ ERROR - JSONDecodeError or FileNotFoundError for file {config_path}"
],
)
mylog("none", f"[{module_name}] ⚠ ERROR - JSONDecodeError or FileNotFoundError for file {config_path}")
except Exception as e:
mylog(
"none",
[
f"[{module_name}] ⚠ ERROR - Exception for file {config_path}: {str(e)}"
],
)
mylog("none", f"[{module_name}] ⚠ ERROR - Exception for file {config_path}: {str(e)}")
# Sort pluginsList based on "execution_order"
pluginsListSorted = sorted(pluginsList, key=get_layer)
@@ -285,23 +267,13 @@ def getPluginObject(keyValues):
if all_match:
return item
mylog(
"verbose",
[
f"[{module_name}] 💬 INFO - Object not found {json.dumps(keyValues)} "
],
)
mylog("verbose", f"[{module_name}] 💬 INFO - Object not found {json.dumps(keyValues)} ")
return {}
except (FileNotFoundError, json.JSONDecodeError, ValueError):
# Handle the case when the file is not found, JSON decoding fails, or data is not in the expected format
mylog(
"verbose",
[
f"[{module_name}] ⚠ ERROR - JSONDecodeError or FileNotFoundError for file {plugins_objects}"
],
)
mylog("verbose", f"[{module_name}] ⚠ ERROR - JSONDecodeError or FileNotFoundError for file {plugins_objects}")
return {}

View File

@@ -29,10 +29,7 @@ class UpdateFieldAction(Action):
self.db = db
def execute(self):
mylog(
"verbose",
f"[WF] Updating field '{self.field}' to '{self.value}' for event object {self.trigger.object_type}",
)
mylog("verbose", f"[WF] Updating field '{self.field}' to '{self.value}' for event object {self.trigger.object_type}")
obj = self.trigger.object
@@ -109,12 +106,7 @@ class RunPluginAction(Action):
def execute(self):
obj = self.trigger.object
mylog(
"verbose",
[
f"Executing plugin '{self.plugin_name}' with parameters {self.params} for object {obj}"
],
)
mylog("verbose", f"Executing plugin '{self.plugin_name}' with parameters {self.params} for object {obj}")
# PluginManager.run(self.plugin_name, self.parameters)
return obj
@@ -129,12 +121,7 @@ class SendNotificationAction(Action):
def execute(self):
obj = self.trigger.object
mylog(
"verbose",
[
f"Sending notification via '{self.method}': {self.message} for object {obj}"
],
)
mylog("verbose", f"Sending notification via '{self.method}': {self.message} for object {obj}")
# NotificationManager.send(self.method, self.message)
return obj

View File

@@ -52,10 +52,7 @@ class ConditionGroup:
"""Handles condition groups with AND, OR logic, supporting nested groups."""
def __init__(self, group_json):
mylog(
"verbose",
[f"[WF] ConditionGroup json.dumps(group_json): {json.dumps(group_json)}"],
)
mylog("verbose", f"[WF] ConditionGroup json.dumps(group_json): {json.dumps(group_json)}")
self.logic = group_json.get("logic", "AND").upper()
self.conditions = []

View File

@@ -53,21 +53,13 @@ class WorkflowManager:
# Ensure workflow is enabled before proceeding
if workflow.get("enabled", "No").lower() == "yes":
wfName = workflow["name"]
mylog(
"debug",
[f"[WF] Checking if '{evGuid}' triggers the workflow '{wfName}'"],
)
mylog("debug", f"[WF] Checking if '{evGuid}' triggers the workflow '{wfName}'")
# construct trigger object which also evaluates if the current event triggers it
trigger = Trigger(workflow["trigger"], event, self.db)
if trigger.triggered:
mylog(
"verbose",
[
f"[WF] Event with GUID '{evGuid}' triggered the workflow '{wfName}'"
],
)
mylog("verbose", f"[WF] Event with GUID '{evGuid}' triggered the workflow '{wfName}'")
self.execute_workflow(workflow, trigger)
@@ -98,12 +90,7 @@ class WorkflowManager:
evaluator = ConditionGroup(condition_group)
if evaluator.evaluate(trigger): # If any group evaluates to True
mylog(
"none",
[
f"[WF] Workflow {wfName} will be executed - conditions were evaluated as TRUE"
],
)
mylog("none", f"[WF] Workflow {wfName} will be executed - conditions were evaluated as TRUE")
mylog("debug", [f"[WF] Workflow condition_group: {condition_group}"])
self.execute_actions(workflow["actions"], trigger)

View File

@@ -24,12 +24,7 @@ class Trigger:
self.object_type == event["ObjectType"] and self.event_type == event["AppEventType"]
)
mylog(
"debug",
[
f"""[WF] self.triggered '{self.triggered}' for event '{get_array_from_sql_rows(event)} and trigger {json.dumps(triggerJson)}' """
],
)
mylog("debug", f"""[WF] self.triggered '{self.triggered}' for event '{get_array_from_sql_rows(event)} and trigger {json.dumps(triggerJson)}' """)
if self.triggered:
# object type corresponds with the DB table name

View File

@@ -11,26 +11,29 @@ echo "==========================================" >> "$LOG_FILE"
# Function to extract comments from docker-compose file
extract_comments() {
local file="$1"
echo "File: $(basename "$file")" >> "$LOG_FILE"
echo "----------------------------------------" >> "$LOG_FILE"
{
# Extract lines starting with # until we hit a non-comment line
awk '
/^#/ {
# Remove the # and any leading/trailing whitespace
comment = substr($0, 2)
sub(/^ */, "", comment)
sub(/ *$/, "", comment)
if (comment != "") {
print comment
}
}
/^[^#]/ && !/^$/ {
exit
}
' "$file" >> "$LOG_FILE"
echo "File: $(basename "$file")"
echo "----------------------------------------"
echo "" >> "$LOG_FILE"
# Extract lines starting with # until we hit a non-comment line
awk '
/^#/ {
# Remove the # and any leading/trailing whitespace
comment = substr($0, 2)
sub(/^ */, "", comment)
sub(/ *$/, "", comment)
if (comment != "") {
print comment
}
}
/^[^#]/ && !/^$/ {
exit
}
' "$file"
echo ""
} >> "$LOG_FILE"
}
# Function to run docker-compose test
@@ -40,16 +43,17 @@ run_test() {
dirname=$(dirname "$file")
local basename
basename=$(basename "$file")
echo "Testing: $basename" >> "$LOG_FILE"
echo "Directory: $dirname" >> "$LOG_FILE"
echo "" >> "$LOG_FILE"
echo "Running docker-compose up..." >> "$LOG_FILE"
timeout 10s docker-compose -f "$file" up 2>&1 >> "$LOG_FILE"
{
echo "Testing: $basename"
echo "Directory: $dirname"
echo ""
echo "Running docker-compose up..."
timeout 10s docker-compose -f "$file" up 2>&1
} >> "$LOG_FILE"
# Clean up
docker-compose -f "$file" down -v 2>/dev/null || true
docker volume prune -f 2>/dev/null || true
}
find "$SCRIPT_DIR" -name "docker-compose*.yml" -type f -print0 | sort -z | while IFS= read -r -d '' file; do
extract_comments "$file"

File diff suppressed because it is too large Load Diff

View File

@@ -57,7 +57,7 @@ for i in $(seq 1 $WAIT_SECONDS); do
echo "--- Services are healthy! ---"
break
fi
if [ $i -eq $WAIT_SECONDS ]; then
if [ "$i" -eq "$WAIT_SECONDS" ]; then
echo "--- Timeout: Services did not become healthy after $WAIT_SECONDS seconds. ---"
docker logs netalertx-test-container
exit 1

View File

@@ -271,7 +271,7 @@ def create_test_scenarios() -> List[TestScenario]:
compose_file = f"docker-compose.mount-test.{path_name}_{scenario_name}.yml"
# Determine expected exit code
expected_exit_code = 1 if scenario_name == "unwritable" else 0
expected_exit_code = 1 if expected_issues else 0
scenarios.append(
TestScenario(