Compare commits
22 Commits
linting-fi
...
067336dcc1
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
067336dcc1 | ||
|
|
8acb0a876a | ||
|
|
d1be41eca4 | ||
|
|
00e953a7ce | ||
|
|
b9ef9ad041 | ||
|
|
e90fbf17d3 | ||
|
|
139447b253 | ||
|
|
fa9fc2c8e3 | ||
|
|
30071c6848 | ||
|
|
b0bd3c8191 | ||
|
|
c753da9e15 | ||
|
|
4770ee5942 | ||
|
|
5cd53bc8f9 | ||
|
|
5e47ccc9ef | ||
|
|
f5d7c0f9a0 | ||
|
|
35b7e80be4 | ||
|
|
07eeac0a0b | ||
|
|
240d86bf1e | ||
|
|
274fd50a92 | ||
|
|
bbf49c3686 | ||
|
|
e3458630ba | ||
|
|
2f6f1e49e9 |
@@ -35,7 +35,7 @@ RUN apk add --no-cache bash shadow python3 python3-dev gcc musl-dev libffi-dev o
|
||||
# Create virtual environment owned by root, but readable by everyone else. This makes it easy to copy
|
||||
# into hardened stage without worrying about permissions and keeps image size small. Keeping the commands
|
||||
# together makes for a slightly smaller image size.
|
||||
RUN pip install -r /tmp/requirements.txt && \
|
||||
RUN pip install --no-cache-dir -r /tmp/requirements.txt && \
|
||||
chmod -R u-rwx,g-rwx /opt
|
||||
|
||||
# second stage is the main runtime stage with just the minimum required to run the application
|
||||
@@ -71,7 +71,7 @@ ENV LOG_APP_PHP_ERRORS=${NETALERTX_LOG}/app.php_errors.log
|
||||
ENV LOG_EXECUTION_QUEUE=${NETALERTX_LOG}/execution_queue.log
|
||||
ENV LOG_REPORT_OUTPUT_JSON=${NETALERTX_LOG}/report_output.json
|
||||
ENV LOG_STDOUT=${NETALERTX_LOG}/stdout.log
|
||||
ENV LOG_CROND=${NETALERTX_LOG}/crond.log
|
||||
ENV LOG_CRON=${NETALERTX_LOG}/cron.log
|
||||
ENV LOG_NGINX_ERROR=${NETALERTX_LOG}/nginx-error.log
|
||||
|
||||
# System Services configuration files
|
||||
@@ -81,11 +81,11 @@ ENV SYSTEM_SERVICES_SCRIPTS=${SYSTEM_SERVICES}/scripts
|
||||
ENV SYSTEM_SERVICES_CONFIG=${SYSTEM_SERVICES}/config
|
||||
ENV SYSTEM_NGINX_CONFIG=${SYSTEM_SERVICES_CONFIG}/nginx
|
||||
ENV SYSTEM_NGINX_CONFIG_TEMPLATE=${SYSTEM_NGINX_CONFIG}/netalertx.conf.template
|
||||
ENV SYSTEM_SERVICES_CONFIG_CRON=${SYSTEM_SERVICES_CONFIG}/cron
|
||||
ENV SYSTEM_SERVICES_ACTIVE_CONFIG=/tmp/nginx/active-config
|
||||
ENV SYSTEM_SERVICES_ACTIVE_CONFIG_FILE=${SYSTEM_SERVICES_ACTIVE_CONFIG}/nginx.conf
|
||||
ENV SYSTEM_SERVICES_PHP_FOLDER=${SYSTEM_SERVICES_CONFIG}/php
|
||||
ENV SYSTEM_SERVICES_PHP_FPM_D=${SYSTEM_SERVICES_PHP_FOLDER}/php-fpm.d
|
||||
ENV SYSTEM_SERVICES_CROND=${SYSTEM_SERVICES_CONFIG}/crond
|
||||
ENV SYSTEM_SERVICES_RUN=/tmp/run
|
||||
ENV SYSTEM_SERVICES_RUN_TMP=${SYSTEM_SERVICES_RUN}/tmp
|
||||
ENV SYSTEM_SERVICES_RUN_LOG=${SYSTEM_SERVICES_RUN}/logs
|
||||
@@ -119,7 +119,7 @@ ENV LANG=C.UTF-8
|
||||
RUN apk add --no-cache bash mtr libbsd zip lsblk tzdata curl arp-scan iproute2 iproute2-ss nmap \
|
||||
nmap-scripts traceroute nbtscan net-tools net-snmp-tools bind-tools awake ca-certificates \
|
||||
sqlite php83 php83-fpm php83-cgi php83-curl php83-sqlite3 php83-session python3 envsubst \
|
||||
nginx shadow && \
|
||||
nginx supercronic shadow && \
|
||||
rm -Rf /var/cache/apk/* && \
|
||||
rm -Rf /etc/nginx && \
|
||||
addgroup -g 20211 ${NETALERTX_GROUP} && \
|
||||
@@ -150,26 +150,26 @@ COPY --from=builder --chown=20212:20212 ${VIRTUAL_ENV} ${VIRTUAL_ENV}
|
||||
# This is done after the copy of the venv to ensure the venv is in place
|
||||
# although it may be quicker to do it before the copy, it keeps the image
|
||||
# layers smaller to do it after.
|
||||
RUN if [ -f .VERSION ]; then \
|
||||
cp .VERSION ${NETALERTX_APP}/.VERSION; \
|
||||
RUN if [ -f '.VERSION' ]; then \
|
||||
cp '.VERSION' "${NETALERTX_APP}/.VERSION"; \
|
||||
else \
|
||||
echo "DEVELOPMENT 00000000" > ${NETALERTX_APP}/.VERSION; \
|
||||
echo "DEVELOPMENT 00000000" > "${NETALERTX_APP}/.VERSION"; \
|
||||
fi && \
|
||||
chown 20212:20212 ${NETALERTX_APP}/.VERSION && \
|
||||
apk add libcap && \
|
||||
chown 20212:20212 "${NETALERTX_APP}/.VERSION" && \
|
||||
apk add --no-cache libcap && \
|
||||
setcap cap_net_raw+ep /bin/busybox && \
|
||||
setcap cap_net_raw,cap_net_admin+eip /usr/bin/nmap && \
|
||||
setcap cap_net_raw,cap_net_admin+eip /usr/bin/arp-scan && \
|
||||
setcap cap_net_raw,cap_net_admin,cap_net_bind_service+eip /usr/bin/nbtscan && \
|
||||
setcap cap_net_raw,cap_net_admin+eip /usr/bin/traceroute && \
|
||||
setcap cap_net_raw,cap_net_admin+eip $(readlink -f ${VIRTUAL_ENV_BIN}/python) && \
|
||||
setcap cap_net_raw,cap_net_admin+eip "$(readlink -f ${VIRTUAL_ENV_BIN}/python)" && \
|
||||
/bin/sh /build/init-nginx.sh && \
|
||||
/bin/sh /build/init-php-fpm.sh && \
|
||||
/bin/sh /build/init-crond.sh && \
|
||||
/bin/sh /build/init-cron.sh && \
|
||||
/bin/sh /build/init-backend.sh && \
|
||||
rm -rf /build && \
|
||||
apk del libcap && \
|
||||
date +%s > ${NETALERTX_FRONT}/buildtimestamp.txt
|
||||
date +%s > "${NETALERTX_FRONT}/buildtimestamp.txt"
|
||||
|
||||
|
||||
ENTRYPOINT ["/bin/sh","/entrypoint.sh"]
|
||||
@@ -186,13 +186,15 @@ ENV UMASK=0077
|
||||
# AI may claim this is stupid, but it's actually least possible permissions as
|
||||
# read-only user cannot login, cannot sudo, has no write permission, and cannot even
|
||||
# read the files it owns. The read-only user is ownership-as-a-lock hardening pattern.
|
||||
RUN addgroup -g 20212 ${READ_ONLY_GROUP} && \
|
||||
adduser -u 20212 -G ${READ_ONLY_GROUP} -D -h /app ${READ_ONLY_USER}
|
||||
RUN addgroup -g 20212 "${READ_ONLY_GROUP}" && \
|
||||
adduser -u 20212 -G "${READ_ONLY_GROUP}" -D -h /app "${READ_ONLY_USER}"
|
||||
|
||||
|
||||
# reduce permissions to minimum necessary for all NetAlertX files and folders
|
||||
# Permissions 005 and 004 are not typos, they enable read-only. Everyone can
|
||||
# read the read-only files, and nobody can write to them, even the readonly user.
|
||||
|
||||
# hadolint ignore=SC2114
|
||||
RUN chown -R ${READ_ONLY_USER}:${READ_ONLY_GROUP} ${READ_ONLY_FOLDERS} && \
|
||||
chmod -R 004 ${READ_ONLY_FOLDERS} && \
|
||||
find ${READ_ONLY_FOLDERS} -type d -exec chmod 005 {} + && \
|
||||
@@ -211,7 +213,7 @@ RUN chown -R ${READ_ONLY_USER}:${READ_ONLY_GROUP} ${READ_ONLY_FOLDERS} && \
|
||||
/srv /media && \
|
||||
sed -i "/^\(${READ_ONLY_USER}\|${NETALERTX_USER}\):/!d" /etc/passwd && \
|
||||
sed -i "/^\(${READ_ONLY_GROUP}\|${NETALERTX_GROUP}\):/!d" /etc/group && \
|
||||
echo -ne '#!/bin/sh\n"$@"\n' > /usr/bin/sudo && chmod +x /usr/bin/sudo
|
||||
printf '#!/bin/sh\n"$@"\n' > /usr/bin/sudo && chmod +x /usr/bin/sudo
|
||||
|
||||
USER netalertx
|
||||
|
||||
@@ -230,6 +232,7 @@ HEALTHCHECK --interval=30s --timeout=10s --start-period=60s --retries=3 \
|
||||
# Open and wide to avoid permission issues during development allowing max
|
||||
# flexibility.
|
||||
|
||||
# hadolint ignore=DL3006
|
||||
FROM runner AS netalertx-devcontainer
|
||||
ENV INSTALL_DIR=/app
|
||||
|
||||
@@ -243,9 +246,14 @@ ENV PYDEVD_DISABLE_FILE_VALIDATION=1
|
||||
COPY .devcontainer/resources/devcontainer-overlay/ /
|
||||
USER root
|
||||
# Install common tools, create user, and set up sudo
|
||||
|
||||
RUN apk add --no-cache git nano vim jq php83-pecl-xdebug py3-pip nodejs sudo gpgconf pytest \
|
||||
pytest-cov zsh alpine-zsh-config shfmt github-cli py3-yaml py3-docker-py docker-cli docker-cli-buildx \
|
||||
docker-cli-compose
|
||||
docker-cli-compose shellcheck
|
||||
|
||||
# Install hadolint (Dockerfile linter)
|
||||
RUN curl -L https://github.com/hadolint/hadolint/releases/latest/download/hadolint-Linux-x86_64 -o /usr/local/bin/hadolint && \
|
||||
chmod +x /usr/local/bin/hadolint
|
||||
|
||||
RUN install -d -o netalertx -g netalertx -m 755 /services/php/modules && \
|
||||
cp -a /usr/lib/php83/modules/. /services/php/modules/ && \
|
||||
|
||||
@@ -75,7 +75,9 @@
|
||||
"alexcvzz.vscode-sqlite",
|
||||
"mkhl.shfmt",
|
||||
"charliermarsh.ruff",
|
||||
"ms-python.flake8"
|
||||
"ms-python.flake8",
|
||||
"exiasr.hadolint",
|
||||
"timonwong.shellcheck"
|
||||
],
|
||||
"settings": {
|
||||
"terminal.integrated.cwd": "${containerWorkspaceFolder}",
|
||||
|
||||
@@ -7,6 +7,7 @@
|
||||
# Open and wide to avoid permission issues during development allowing max
|
||||
# flexibility.
|
||||
|
||||
# hadolint ignore=DL3006
|
||||
FROM runner AS netalertx-devcontainer
|
||||
ENV INSTALL_DIR=/app
|
||||
|
||||
@@ -20,9 +21,14 @@ ENV PYDEVD_DISABLE_FILE_VALIDATION=1
|
||||
COPY .devcontainer/resources/devcontainer-overlay/ /
|
||||
USER root
|
||||
# Install common tools, create user, and set up sudo
|
||||
|
||||
RUN apk add --no-cache git nano vim jq php83-pecl-xdebug py3-pip nodejs sudo gpgconf pytest \
|
||||
pytest-cov zsh alpine-zsh-config shfmt github-cli py3-yaml py3-docker-py docker-cli docker-cli-buildx \
|
||||
docker-cli-compose
|
||||
docker-cli-compose shellcheck
|
||||
|
||||
# Install hadolint (Dockerfile linter)
|
||||
RUN curl -L https://github.com/hadolint/hadolint/releases/latest/download/hadolint-Linux-x86_64 -o /usr/local/bin/hadolint && \
|
||||
chmod +x /usr/local/bin/hadolint
|
||||
|
||||
RUN install -d -o netalertx -g netalertx -m 755 /services/php/modules && \
|
||||
cp -a /usr/lib/php83/modules/. /services/php/modules/ && \
|
||||
|
||||
@@ -7,27 +7,28 @@
|
||||
# the final .devcontainer/Dockerfile used by the devcontainer.
|
||||
|
||||
echo "Generating .devcontainer/Dockerfile"
|
||||
SCRIPT_DIR="$(CDPATH= cd -- "$(dirname -- "$0")" && pwd)"
|
||||
SCRIPT_PATH=$(set -- "$0"; dirname -- "$1")
|
||||
SCRIPT_DIR=$(cd "$SCRIPT_PATH" && pwd -P)
|
||||
DEVCONTAINER_DIR="${SCRIPT_DIR%/scripts}"
|
||||
ROOT_DIR="${DEVCONTAINER_DIR%/.devcontainer}"
|
||||
|
||||
OUT_FILE="${DEVCONTAINER_DIR}/Dockerfile"
|
||||
|
||||
echo "Adding base Dockerfile from $ROOT_DIR..."
|
||||
echo "Adding base Dockerfile from $ROOT_DIR and merging to devcontainer-Dockerfile"
|
||||
{
|
||||
|
||||
echo "# DO NOT MODIFY THIS FILE DIRECTLY. IT IS AUTO-GENERATED BY .devcontainer/scripts/generate-configs.sh" > "$OUT_FILE"
|
||||
echo "" >> "$OUT_FILE"
|
||||
echo "# ---/Dockerfile---" >> "$OUT_FILE"
|
||||
echo "# DO NOT MODIFY THIS FILE DIRECTLY. IT IS AUTO-GENERATED BY .devcontainer/scripts/generate-configs.sh"
|
||||
echo ""
|
||||
echo "# ---/Dockerfile---"
|
||||
|
||||
cat "${ROOT_DIR}/Dockerfile" >> "$OUT_FILE"
|
||||
cat "${ROOT_DIR}/Dockerfile"
|
||||
|
||||
echo "" >> "$OUT_FILE"
|
||||
echo "# ---/resources/devcontainer-Dockerfile---" >> "$OUT_FILE"
|
||||
echo "" >> "$OUT_FILE"
|
||||
echo ""
|
||||
echo "# ---/resources/devcontainer-Dockerfile---"
|
||||
echo ""
|
||||
cat "${DEVCONTAINER_DIR}/resources/devcontainer-Dockerfile"
|
||||
} > "$OUT_FILE"
|
||||
|
||||
echo "Adding devcontainer-Dockerfile from $DEVCONTAINER_DIR/resources..."
|
||||
cat "${DEVCONTAINER_DIR}/resources/devcontainer-Dockerfile" >> "$OUT_FILE"
|
||||
|
||||
echo "Generated $OUT_FILE using root dir $ROOT_DIR" >&2
|
||||
echo "Generated $OUT_FILE using root dir $ROOT_DIR"
|
||||
|
||||
echo "Done."
|
||||
@@ -16,7 +16,6 @@
|
||||
|
||||
SOURCE_DIR=${SOURCE_DIR:-/workspaces/NetAlertX}
|
||||
PY_SITE_PACKAGES="${VIRTUAL_ENV:-/opt/venv}/lib/python3.12/site-packages"
|
||||
SOURCE_SERVICES_DIR="${SOURCE_DIR}/install/production-filesystem/services"
|
||||
|
||||
LOG_FILES=(
|
||||
LOG_APP
|
||||
@@ -26,7 +25,7 @@ LOG_FILES=(
|
||||
LOG_EXECUTION_QUEUE
|
||||
LOG_APP_PHP_ERRORS
|
||||
LOG_IP_CHANGES
|
||||
LOG_CROND
|
||||
LOG_CRON
|
||||
LOG_REPORT_OUTPUT_TXT
|
||||
LOG_REPORT_OUTPUT_HTML
|
||||
LOG_REPORT_OUTPUT_JSON
|
||||
|
||||
6
.github/copilot-instructions.md
vendored
@@ -83,3 +83,9 @@ Backend loop phases (see `server/__main__.py` and `server/plugin.py`): `once`, `
|
||||
- Be sure to offer choices when appropriate.
|
||||
- Always understand the intent of the user's request and undo/redo as needed.
|
||||
- Above all, use the simplest possible code that meets the need so it can be easily audited and maintained.
|
||||
- Always leave logging enabled. If there is a possiblity it will be difficult to debug with current logging, add more logging.
|
||||
- Always run the testFailure tool before executing any tests to gather current failure information and avoid redundant runs.
|
||||
- Always prioritize using the appropriate tools in the environment first. As an example if a test is failing use `testFailure` then `runTests`. Never `runTests` first.
|
||||
- Docker tests take an extremely long time to run. Avoid changes to docker or tests until you've examined the exisiting testFailures and runTests results.
|
||||
- Environment tools are designed specifically for your use in this project and running them in this order will give you the best results.
|
||||
|
||||
|
||||
6
.github/workflows/code_checks.yml
vendored
@@ -84,7 +84,7 @@ jobs:
|
||||
continue-on-error: true
|
||||
run: |
|
||||
echo "🔍 Linting Dockerfiles..."
|
||||
/tmp/hadolint Dockerfile* || true
|
||||
/tmp/hadolint --config .hadolint.yaml Dockerfile* || true
|
||||
|
||||
docker-tests:
|
||||
runs-on: ubuntu-latest
|
||||
@@ -95,5 +95,5 @@ jobs:
|
||||
- name: Run Docker-based tests
|
||||
run: |
|
||||
echo "🐳 Running Docker-based tests..."
|
||||
chmod +x ./run_docker_tests.sh
|
||||
./run_docker_tests.sh
|
||||
chmod +x ./test/docker_tests/run_docker_tests.sh
|
||||
./test/docker_tests/run_docker_tests.sh
|
||||
|
||||
2
.hadolint.yaml
Normal file
@@ -0,0 +1,2 @@
|
||||
ignored:
|
||||
- DL3018
|
||||
32
Dockerfile
@@ -32,7 +32,7 @@ RUN apk add --no-cache bash shadow python3 python3-dev gcc musl-dev libffi-dev o
|
||||
# Create virtual environment owned by root, but readable by everyone else. This makes it easy to copy
|
||||
# into hardened stage without worrying about permissions and keeps image size small. Keeping the commands
|
||||
# together makes for a slightly smaller image size.
|
||||
RUN pip install -r /tmp/requirements.txt && \
|
||||
RUN pip install --no-cache-dir -r /tmp/requirements.txt && \
|
||||
chmod -R u-rwx,g-rwx /opt
|
||||
|
||||
# second stage is the main runtime stage with just the minimum required to run the application
|
||||
@@ -68,7 +68,7 @@ ENV LOG_APP_PHP_ERRORS=${NETALERTX_LOG}/app.php_errors.log
|
||||
ENV LOG_EXECUTION_QUEUE=${NETALERTX_LOG}/execution_queue.log
|
||||
ENV LOG_REPORT_OUTPUT_JSON=${NETALERTX_LOG}/report_output.json
|
||||
ENV LOG_STDOUT=${NETALERTX_LOG}/stdout.log
|
||||
ENV LOG_CROND=${NETALERTX_LOG}/crond.log
|
||||
ENV LOG_CRON=${NETALERTX_LOG}/cron.log
|
||||
ENV LOG_NGINX_ERROR=${NETALERTX_LOG}/nginx-error.log
|
||||
|
||||
# System Services configuration files
|
||||
@@ -78,11 +78,11 @@ ENV SYSTEM_SERVICES_SCRIPTS=${SYSTEM_SERVICES}/scripts
|
||||
ENV SYSTEM_SERVICES_CONFIG=${SYSTEM_SERVICES}/config
|
||||
ENV SYSTEM_NGINX_CONFIG=${SYSTEM_SERVICES_CONFIG}/nginx
|
||||
ENV SYSTEM_NGINX_CONFIG_TEMPLATE=${SYSTEM_NGINX_CONFIG}/netalertx.conf.template
|
||||
ENV SYSTEM_SERVICES_CONFIG_CRON=${SYSTEM_SERVICES_CONFIG}/cron
|
||||
ENV SYSTEM_SERVICES_ACTIVE_CONFIG=/tmp/nginx/active-config
|
||||
ENV SYSTEM_SERVICES_ACTIVE_CONFIG_FILE=${SYSTEM_SERVICES_ACTIVE_CONFIG}/nginx.conf
|
||||
ENV SYSTEM_SERVICES_PHP_FOLDER=${SYSTEM_SERVICES_CONFIG}/php
|
||||
ENV SYSTEM_SERVICES_PHP_FPM_D=${SYSTEM_SERVICES_PHP_FOLDER}/php-fpm.d
|
||||
ENV SYSTEM_SERVICES_CROND=${SYSTEM_SERVICES_CONFIG}/crond
|
||||
ENV SYSTEM_SERVICES_RUN=/tmp/run
|
||||
ENV SYSTEM_SERVICES_RUN_TMP=${SYSTEM_SERVICES_RUN}/tmp
|
||||
ENV SYSTEM_SERVICES_RUN_LOG=${SYSTEM_SERVICES_RUN}/logs
|
||||
@@ -116,7 +116,7 @@ ENV LANG=C.UTF-8
|
||||
RUN apk add --no-cache bash mtr libbsd zip lsblk tzdata curl arp-scan iproute2 iproute2-ss nmap \
|
||||
nmap-scripts traceroute nbtscan net-tools net-snmp-tools bind-tools awake ca-certificates \
|
||||
sqlite php83 php83-fpm php83-cgi php83-curl php83-sqlite3 php83-session python3 envsubst \
|
||||
nginx shadow && \
|
||||
nginx supercronic shadow && \
|
||||
rm -Rf /var/cache/apk/* && \
|
||||
rm -Rf /etc/nginx && \
|
||||
addgroup -g 20211 ${NETALERTX_GROUP} && \
|
||||
@@ -147,26 +147,26 @@ COPY --from=builder --chown=20212:20212 ${VIRTUAL_ENV} ${VIRTUAL_ENV}
|
||||
# This is done after the copy of the venv to ensure the venv is in place
|
||||
# although it may be quicker to do it before the copy, it keeps the image
|
||||
# layers smaller to do it after.
|
||||
RUN if [ -f .VERSION ]; then \
|
||||
cp .VERSION ${NETALERTX_APP}/.VERSION; \
|
||||
RUN if [ -f '.VERSION' ]; then \
|
||||
cp '.VERSION' "${NETALERTX_APP}/.VERSION"; \
|
||||
else \
|
||||
echo "DEVELOPMENT 00000000" > ${NETALERTX_APP}/.VERSION; \
|
||||
echo "DEVELOPMENT 00000000" > "${NETALERTX_APP}/.VERSION"; \
|
||||
fi && \
|
||||
chown 20212:20212 ${NETALERTX_APP}/.VERSION && \
|
||||
apk add libcap && \
|
||||
chown 20212:20212 "${NETALERTX_APP}/.VERSION" && \
|
||||
apk add --no-cache libcap && \
|
||||
setcap cap_net_raw+ep /bin/busybox && \
|
||||
setcap cap_net_raw,cap_net_admin+eip /usr/bin/nmap && \
|
||||
setcap cap_net_raw,cap_net_admin+eip /usr/bin/arp-scan && \
|
||||
setcap cap_net_raw,cap_net_admin,cap_net_bind_service+eip /usr/bin/nbtscan && \
|
||||
setcap cap_net_raw,cap_net_admin+eip /usr/bin/traceroute && \
|
||||
setcap cap_net_raw,cap_net_admin+eip $(readlink -f ${VIRTUAL_ENV_BIN}/python) && \
|
||||
setcap cap_net_raw,cap_net_admin+eip "$(readlink -f ${VIRTUAL_ENV_BIN}/python)" && \
|
||||
/bin/sh /build/init-nginx.sh && \
|
||||
/bin/sh /build/init-php-fpm.sh && \
|
||||
/bin/sh /build/init-crond.sh && \
|
||||
/bin/sh /build/init-cron.sh && \
|
||||
/bin/sh /build/init-backend.sh && \
|
||||
rm -rf /build && \
|
||||
apk del libcap && \
|
||||
date +%s > ${NETALERTX_FRONT}/buildtimestamp.txt
|
||||
date +%s > "${NETALERTX_FRONT}/buildtimestamp.txt"
|
||||
|
||||
|
||||
ENTRYPOINT ["/bin/sh","/entrypoint.sh"]
|
||||
@@ -183,13 +183,15 @@ ENV UMASK=0077
|
||||
# AI may claim this is stupid, but it's actually least possible permissions as
|
||||
# read-only user cannot login, cannot sudo, has no write permission, and cannot even
|
||||
# read the files it owns. The read-only user is ownership-as-a-lock hardening pattern.
|
||||
RUN addgroup -g 20212 ${READ_ONLY_GROUP} && \
|
||||
adduser -u 20212 -G ${READ_ONLY_GROUP} -D -h /app ${READ_ONLY_USER}
|
||||
RUN addgroup -g 20212 "${READ_ONLY_GROUP}" && \
|
||||
adduser -u 20212 -G "${READ_ONLY_GROUP}" -D -h /app "${READ_ONLY_USER}"
|
||||
|
||||
|
||||
# reduce permissions to minimum necessary for all NetAlertX files and folders
|
||||
# Permissions 005 and 004 are not typos, they enable read-only. Everyone can
|
||||
# read the read-only files, and nobody can write to them, even the readonly user.
|
||||
|
||||
# hadolint ignore=SC2114
|
||||
RUN chown -R ${READ_ONLY_USER}:${READ_ONLY_GROUP} ${READ_ONLY_FOLDERS} && \
|
||||
chmod -R 004 ${READ_ONLY_FOLDERS} && \
|
||||
find ${READ_ONLY_FOLDERS} -type d -exec chmod 005 {} + && \
|
||||
@@ -208,7 +210,7 @@ RUN chown -R ${READ_ONLY_USER}:${READ_ONLY_GROUP} ${READ_ONLY_FOLDERS} && \
|
||||
/srv /media && \
|
||||
sed -i "/^\(${READ_ONLY_USER}\|${NETALERTX_USER}\):/!d" /etc/passwd && \
|
||||
sed -i "/^\(${READ_ONLY_GROUP}\|${NETALERTX_GROUP}\):/!d" /etc/group && \
|
||||
echo -ne '#!/bin/sh\n"$@"\n' > /usr/bin/sudo && chmod +x /usr/bin/sudo
|
||||
printf '#!/bin/sh\n"$@"\n' > /usr/bin/sudo && chmod +x /usr/bin/sudo
|
||||
|
||||
USER netalertx
|
||||
|
||||
|
||||
@@ -72,7 +72,7 @@ ENV LOG_APP_PHP_ERRORS=${NETALERTX_LOG}/app.php_errors.log
|
||||
ENV LOG_EXECUTION_QUEUE=${NETALERTX_LOG}/execution_queue.log
|
||||
ENV LOG_REPORT_OUTPUT_JSON=${NETALERTX_LOG}/report_output.json
|
||||
ENV LOG_STDOUT=${NETALERTX_LOG}/stdout.log
|
||||
ENV LOG_CROND=${NETALERTX_LOG}/crond.log
|
||||
ENV LOG_CRON=${NETALERTX_LOG}/cron.log
|
||||
ENV LOG_NGINX_ERROR=${NETALERTX_LOG}/nginx-error.log
|
||||
|
||||
# System Services configuration files
|
||||
@@ -132,25 +132,29 @@ COPY --chmod=775 --chown=${USER_ID}:${USER_GID} . ${INSTALL_DIR}/
|
||||
|
||||
|
||||
# ❗ IMPORTANT - if you modify this file modify the /install/install_dependecies.debian.sh file as well ❗
|
||||
RUN apt update && apt-get install -y \
|
||||
# hadolint ignore=DL3008,DL3027
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
tini snmp ca-certificates curl libwww-perl arp-scan sudo gettext-base \
|
||||
nginx-light php php-cgi php-fpm php-sqlite3 php-curl sqlite3 dnsutils net-tools \
|
||||
python3 python3-dev iproute2 nmap python3-pip zip git systemctl usbutils traceroute nbtscan openrc \
|
||||
busybox nginx nginx-core mtr python3-venv
|
||||
busybox nginx nginx-core mtr python3-venv && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# While php8.3 is in debian bookworm repos, php-fpm is not included so we need to add sury.org repo
|
||||
# (Ondřej Surý maintains php packages for debian. This is temp until debian includes php-fpm in their
|
||||
# repos. Likely it will be in Debian Trixie.). This keeps the image up-to-date with the alpine version.
|
||||
# hadolint ignore=DL3008
|
||||
RUN apt-get install -y --no-install-recommends \
|
||||
apt-transport-https \
|
||||
ca-certificates \
|
||||
lsb-release \
|
||||
wget && \
|
||||
wget -O /etc/apt/trusted.gpg.d/php.gpg https://packages.sury.org/php/apt.gpg && \
|
||||
wget -q -O /etc/apt/trusted.gpg.d/php.gpg https://packages.sury.org/php/apt.gpg && \
|
||||
echo "deb https://packages.sury.org/php/ $(lsb_release -sc) main" > /etc/apt/sources.list.d/php.list && \
|
||||
apt-get update && \
|
||||
apt-get install -y php8.3-fpm php8.3-cli php8.3-sqlite3 php8.3-common php8.3-curl php8.3-cgi && \
|
||||
ln -s /usr/sbin/php-fpm8.3 /usr/sbin/php-fpm83 # make it compatible with alpine version
|
||||
apt-get install -y --no-install-recommends php8.3-fpm php8.3-cli php8.3-sqlite3 php8.3-common php8.3-curl php8.3-cgi && \
|
||||
ln -s /usr/sbin/php-fpm8.3 /usr/sbin/php-fpm83 && \
|
||||
rm -rf /var/lib/apt/lists/* # make it compatible with alpine version
|
||||
|
||||
# Setup virtual python environment and use pip3 to install packages
|
||||
RUN python3 -m venv ${VIRTUAL_ENV} && \
|
||||
|
||||
10
README.md
@@ -41,13 +41,15 @@ Get visibility of what's going on on your WIFI/LAN network and enable presence d
|
||||
Start NetAlertX in seconds with Docker:
|
||||
|
||||
```bash
|
||||
docker run -d --rm --network=host \
|
||||
docker run -d \
|
||||
--network=host \
|
||||
--restart unless-stopped \
|
||||
-v /local_data_dir/config:/data/config \
|
||||
-v /local_data_dir/db:/data/db \
|
||||
-v /etc/localtime:/etc/localtime \
|
||||
--mount type=tmpfs,target=/tmp/api \
|
||||
-v /etc/localtime:/etc/localtime:ro \
|
||||
--tmpfs /tmp:uid=20211,gid=20211,mode=1700 \
|
||||
-e PORT=20211 \
|
||||
-e APP_CONF_OVERRIDE={"GRAPHQL_PORT":"20214"} \
|
||||
-e APP_CONF_OVERRIDE='{"GRAPHQL_PORT":"20214"}' \
|
||||
ghcr.io/jokob-sk/netalertx:latest
|
||||
```
|
||||
|
||||
|
||||
@@ -1,14 +1,17 @@
|
||||
#!/bin/bash
|
||||
export INSTALL_DIR=/app
|
||||
|
||||
LOG_FILE="${INSTALL_DIR}/log/execution_queue.log"
|
||||
|
||||
# Check if there are any entries with cron_restart_backend
|
||||
if grep -q "cron_restart_backend" "$LOG_FILE"; then
|
||||
# Restart python application using s6
|
||||
s6-svc -r /var/run/s6-rc/servicedirs/netalertx
|
||||
echo 'done'
|
||||
if [ -f "${LOG_EXECUTION_QUEUE}" ] && grep -q "cron_restart_backend" "${LOG_EXECUTION_QUEUE}"; then
|
||||
echo "$(date): Restarting backend triggered by cron_restart_backend"
|
||||
killall python3 || echo "killall python3 failed or no process found"
|
||||
sleep 2
|
||||
/services/start-backend.sh &
|
||||
|
||||
# Remove all lines containing cron_restart_backend from the log file
|
||||
sed -i '/cron_restart_backend/d' "$LOG_FILE"
|
||||
# Atomic replacement with temp file. grep returns 1 if no lines selected (file becomes empty), which is valid here.
|
||||
grep -v "cron_restart_backend" "${LOG_EXECUTION_QUEUE}" > "${LOG_EXECUTION_QUEUE}.tmp"
|
||||
RC=$?
|
||||
if [ $RC -eq 0 ] || [ $RC -eq 1 ]; then
|
||||
mv "${LOG_EXECUTION_QUEUE}.tmp" "${LOG_EXECUTION_QUEUE}"
|
||||
fi
|
||||
fi
|
||||
|
||||
@@ -1,66 +1,114 @@
|
||||
### Loading...
|
||||
# Troubleshooting Common Issues
|
||||
|
||||
Often if the application is misconfigured the `Loading...` dialog is continuously displayed. This is most likely caused by the backed failing to start. The **Maintenance -> Logs** section should give you more details on what's happening. If there is no exception, check the Portainer log, or start the container in the foreground (without the `-d` parameter) to observe any exceptions. It's advisable to enable `trace` or `debug`. Check the [Debug tips](./DEBUG_TIPS.md) on detailed instructions.
|
||||
> [!TIP]
|
||||
> Before troubleshooting, ensure you have set the correct [Debugging and LOG_LEVEL](./DEBUG_TIPS.md).
|
||||
|
||||
The issue might be related to the backend server, so please check [Debugging GraphQL issues](./DEBUG_API_SERVER.md).
|
||||
---
|
||||
|
||||
Please also check the browser logs (usually accessible by pressing `F12`):
|
||||
## Docker Container Doesn't Start
|
||||
|
||||
1. Switch to the Console tab and refresh the page
|
||||
2. Switch to teh Network tab and refresh the page
|
||||
|
||||
If you are not sure how to resolve the errors yourself, please post screenshots of the above into the issue, or discord discussion, where your problem is being solved.
|
||||
|
||||
### Incorrect SCAN_SUBNETS
|
||||
|
||||
One of the most common issues is not configuring `SCAN_SUBNETS` correctly. If this setting is misconfigured you will only see one or two devices in your devices list after a scan. Please read the [subnets docs](./SUBNETS.md) carefully to resolve this.
|
||||
|
||||
### Duplicate devices and notifications
|
||||
|
||||
The app uses the MAC address as an unique identifier for devices. If a new MAC is detected a new device is added to the application and corresponding notifications are triggered. This means that if the MAC of an existing device changes, the device will be logged as a new device. You can usually prevent this from happening by changing the device configuration (in Android, iOS, or Windows) for your network. See the [Random Macs](./RANDOM_MAC.md) guide for details.
|
||||
Initial setup issues are often caused by **missing permissions** or **incorrectly mapped volumes**. Always double-check your `docker run` or `docker-compose.yml` against the [official setup guide](./DOCKER_INSTALLATION.md) before proceeding.
|
||||
|
||||
### Permissions
|
||||
|
||||
Make sure you [File permissions](./FILE_PERMISSIONS.md) are set correctly.
|
||||
Make sure your [file permissions](./FILE_PERMISSIONS.md) are correctly set:
|
||||
|
||||
* If facing issues (AJAX errors, can't write to DB, empty screen, etc,) make sure permissions are set correctly, and check the logs under `/tmp/log`.
|
||||
* To solve permission issues you can try setting the owner and group of the `app.db` by executing the following on the host system: `docker exec netalertx chown -R www-data:www-data /data/db/app.db`.
|
||||
* If still facing issues, try to map the app.db file (⚠ not folder) to `:/data/db/app.db` (see [docker-compose Examples](https://github.com/jokob-sk/NetAlertX/blob/main/dockerfiles/README.md#-docker-composeyml-examples) for details)
|
||||
* If you encounter AJAX errors, cannot write to the database, or see an empty screen, check that permissions are correct and review the logs under `/tmp/log`.
|
||||
* To fix permission issues with the database, update the owner and group of `app.db` as described in the [File Permissions guide](./FILE_PERMISSIONS.md).
|
||||
|
||||
### Container restarts / crashes
|
||||
### Container Restarts / Crashes
|
||||
|
||||
* Check the logs for details. Often a required setting for a notification method is missing.
|
||||
* Check the logs for details. Often, required settings are missing.
|
||||
* For more detailed troubleshooting, see [Debug and Troubleshooting Tips](./DEBUG_TIPS.md).
|
||||
* To observe errors directly, run the container in the foreground instead of `-d`:
|
||||
|
||||
### unable to resolve host
|
||||
```bash
|
||||
docker run --rm -it <your_image>
|
||||
```
|
||||
|
||||
* Check that your `SCAN_SUBNETS` variable is using the correct mask and `--interface`. See the [subnets docs for details](./SUBNETS.md).
|
||||
---
|
||||
|
||||
### Invalid JSON
|
||||
## Docker Container Starts, But the Application Misbehaves
|
||||
|
||||
Check the [Invalid JSON errors debug help](./DEBUG_INVALID_JSON.md) docs on how to proceed.
|
||||
If the container starts but the app shows unexpected behavior, the cause is often **data corruption**, **incorrect configuration**, or **unexpected input data**.
|
||||
|
||||
### sudo execution failing (e.g.: on arpscan) on a Raspberry Pi 4
|
||||
### Continuous "Loading..." Screen
|
||||
|
||||
> sudo: unexpected child termination condition: 0
|
||||
A misconfigured application may display a persistent `Loading...` dialog. This is usually caused by the backend failing to start.
|
||||
|
||||
Resolution based on [this issue](https://github.com/linuxserver/docker-papermerge/issues/4#issuecomment-1003657581)
|
||||
**Steps to troubleshoot:**
|
||||
|
||||
1. Check **Maintenance → Logs** for exceptions.
|
||||
2. If no exception is visible, check the Portainer logs.
|
||||
3. Start the container in the foreground to observe exceptions.
|
||||
4. Enable `trace` or `debug` logging for detailed output (see [Debug Tips](./DEBUG_TIPS.md)).
|
||||
5. Verify that `GRAPHQL_PORT` is correctly configured.
|
||||
6. Check browser logs (press `F12`):
|
||||
|
||||
* **Console tab** → refresh the page
|
||||
* **Network tab** → refresh the page
|
||||
|
||||
If you are unsure how to resolve errors, provide screenshots or log excerpts in your issue report or Discord discussion.
|
||||
|
||||
---
|
||||
|
||||
### Common Configuration Issues
|
||||
|
||||
#### Incorrect `SCAN_SUBNETS`
|
||||
|
||||
If `SCAN_SUBNETS` is misconfigured, you may see only a few devices in your device list after a scan. See the [Subnets Documentation](./SUBNETS.md) for proper configuration.
|
||||
|
||||
#### Duplicate Devices and Notifications
|
||||
|
||||
* Devices are identified by their **MAC address**.
|
||||
* If a device's MAC changes, it will be treated as a new device, triggering notifications.
|
||||
* Prevent this by adjusting your device configuration for Android, iOS, or Windows. See the [Random MACs Guide](./RANDOM_MAC.md).
|
||||
|
||||
#### Unable to Resolve Host
|
||||
|
||||
* Ensure `SCAN_SUBNETS` uses the correct mask and `--interface`.
|
||||
* Refer to the [Subnets Documentation](./SUBNETS.md) for detailed guidance.
|
||||
|
||||
#### Invalid JSON Errors
|
||||
|
||||
* Follow the steps in [Invalid JSON Errors Debug Help](./DEBUG_INVALID_JSON.md).
|
||||
|
||||
#### Sudo Execution Fails (e.g., on arpscan on Raspberry Pi 4)
|
||||
|
||||
Error:
|
||||
|
||||
```
|
||||
sudo: unexpected child termination condition: 0
|
||||
```
|
||||
|
||||
**Resolution**:
|
||||
|
||||
```bash
|
||||
wget ftp.us.debian.org/debian/pool/main/libs/libseccomp/libseccomp2_2.5.3-2_armhf.deb
|
||||
sudo dpkg -i libseccomp2_2.5.3-2_armhf.deb
|
||||
```
|
||||
|
||||
The link above will probably break in time too. Go to https://packages.debian.org/sid/armhf/libseccomp2/download to find the new version number and put that in the url.
|
||||
> ⚠️ The link may break over time. Check [Debian Packages](https://packages.debian.org/sid/armhf/libseccomp2/download) for the latest version.
|
||||
|
||||
### Only Router and own device show up
|
||||
#### Only Router and Own Device Show Up
|
||||
|
||||
Make sure that the subnet and interface in `SCAN_SUBNETS` are correct. If your device/NAS has multiple ethernet ports, you probably need to change `eth0` to something else.
|
||||
* Verify the subnet and interface in `SCAN_SUBNETS`.
|
||||
* On devices with multiple Ethernet ports, you may need to change `eth0` to the correct interface.
|
||||
|
||||
### Losing my settings and devices after an update
|
||||
#### Losing Settings or Devices After Update
|
||||
|
||||
If you lose your devices and/or settings after an update that means you don't have the `/data/db` and `/data/config` folders mapped to a permanent storage. That means every time you update these folders are re-created. Make sure you have the [volumes specified correctly](./DOCKER_COMPOSE.md) in your `docker-compose.yml` or run command.
|
||||
* Ensure `/data/db` and `/data/config` are mapped to persistent storage.
|
||||
* Without persistent volumes, these folders are recreated on every update.
|
||||
* See [Docker Volumes Setup](./DOCKER_COMPOSE.md) for proper configuration.
|
||||
|
||||
#### Application Performance Issues
|
||||
|
||||
### The application is slow
|
||||
Slowness can be caused by:
|
||||
|
||||
* Incorrect settings (causing app restarts) → check `app.log`.
|
||||
* Too many background processes → disable unnecessary scanners.
|
||||
* Long scans → limit the number of scanned devices.
|
||||
* Excessive disk operations or failing maintenance plugins.
|
||||
|
||||
> See [Performance Tips](./PERFORMANCE.md) for detailed optimization steps.
|
||||
|
||||
Slowness is usually caused by incorrect settings (the app might restart, so check the `app.log`), too many background processes (disable unnecessary scanners), too long scans (limit the number of scanned devices), too many disk operations, or some maintenance plugins might have failed. See the [Performance tips](./PERFORMANCE.md) docs for details.
|
||||
@@ -8,8 +8,8 @@ Check the the HTTP response of the failing backend call by following these steps
|
||||
![F12DeveloperConsole][F12DeveloperConsole]
|
||||
|
||||
- Copy the URL causing the error and enter it in the address bar of your browser directly and hit enter. The copied URLs could look something like this (notice the query strings at the end):
|
||||
- `http://<NetAlertX URL>:20211/api/table_devices.json?nocache=1704141103121`
|
||||
- `http://<NetAlertX URL>:20211/php/server/devices.php?action=getDevicesTotals`
|
||||
- `http://<server>:20211/api/table_devices.json?nocache=1704141103121`
|
||||
- `http://<server>:20211/php/server/devices.php?action=getDevicesTotals`
|
||||
|
||||
|
||||
- Post the error response in the existing issue thread on GitHub or create a new issue and include the redacted response of the failing query.
|
||||
|
||||
@@ -1,5 +1,8 @@
|
||||
# Troubleshooting plugins
|
||||
|
||||
> [!TIP]
|
||||
> Before troubleshooting, please ensure you have the right [Debugging and LOG_LEVEL set](./DEBUG_TIPS.md).
|
||||
|
||||
## High-level overview
|
||||
|
||||
If a Plugin supplies data to the main app it's done either vie a SQL query or via a script that updates the `last_result.log` file in the plugin log folder (`app/log/plugins/`).
|
||||
|
||||
@@ -13,16 +13,21 @@ When debugging an issue always set the highest log level:
|
||||
Start the container via the **terminal** with a command similar to this one:
|
||||
|
||||
```bash
|
||||
docker run --rm --network=host \
|
||||
-v /local_data_dir/netalertx/config:/data/config \
|
||||
-v /local_data_dir/netalertx/db:/data/db \
|
||||
-v /etc/localtime:/etc/localtime \
|
||||
docker run \
|
||||
--network=host \
|
||||
--restart unless-stopped \
|
||||
-v /local_data_dir/config:/data/config \
|
||||
-v /local_data_dir/db:/data/db \
|
||||
-v /etc/localtime:/etc/localtime:ro \
|
||||
--tmpfs /tmp:uid=20211,gid=20211,mode=1700 \
|
||||
-e PORT=20211 \
|
||||
-e APP_CONF_OVERRIDE='{"GRAPHQL_PORT":"20214"}' \
|
||||
ghcr.io/jokob-sk/netalertx:latest
|
||||
|
||||
```
|
||||
|
||||
> ⚠ Please note, don't use the `-d` parameter so you see the error when the container crashes. Use this error in your issue description.
|
||||
> [!NOTE]
|
||||
> ⚠ The most important part is NOT to use the `-d` parameter so you see the error when the container crashes. Use this error in your issue description.
|
||||
|
||||
## 3. Check the _dev image and open issues
|
||||
|
||||
@@ -48,7 +53,12 @@ services:
|
||||
# Other service configurations...
|
||||
```
|
||||
|
||||
## 5. Sharing application state
|
||||
## 5. TMP mount directories to rule host out permission issues
|
||||
|
||||
Try starting the container with all data to be in non-persistent volumes. If this works, the issue might be related to the permissions of your persistent data mount locations on your server. See teh [Permissions guide](./FILE_PERMISSIONS.md) for details.
|
||||
|
||||
|
||||
## 6. Sharing application state
|
||||
|
||||
Sometimes specific log sections are needed to debug issues. The Devices and CurrentScan table data is sometimes needed to figure out what's wrong.
|
||||
|
||||
@@ -61,4 +71,4 @@ Sometimes specific log sections are needed to debug issues. The Devices and Curr
|
||||
|
||||
## Common issues
|
||||
|
||||
See [Common issues](./COMMON_ISSUES.md) for details.
|
||||
See [Common issues](./COMMON_ISSUES.md) for additional troubleshooting tips.
|
||||
|
||||
@@ -26,7 +26,7 @@ The database and device structure may change with new releases. When using the C
|
||||

|
||||
|
||||
> [!NOTE]
|
||||
> The file containing a list of Devices including the Network relationships between Network Nodes and connected devices. You can also trigger this by acessing this URL: `<your netalertx url>/php/server/devices.php?action=ExportCSV` or via the `CSV Backup` plugin. (💡 You can schedule this)
|
||||
> The file containing a list of Devices including the Network relationships between Network Nodes and connected devices. You can also trigger this by acessing this URL: `<server>:20211/php/server/devices.php?action=ExportCSV` or via the `CSV Backup` plugin. (💡 You can schedule this)
|
||||
|
||||

|
||||
|
||||
|
||||
@@ -28,7 +28,7 @@ docker run -d --rm --network=host \
|
||||
-v /local_data_dir/config:/data/config \
|
||||
-v /local_data_dir/db:/data/db \
|
||||
-v /etc/localtime:/etc/localtime \
|
||||
--mount type=tmpfs,target=/tmp/api \
|
||||
--tmpfs /tmp:uid=20211,gid=20211,mode=1700 \
|
||||
-e PORT=20211 \
|
||||
-e APP_CONF_OVERRIDE={"GRAPHQL_PORT":"20214"} \
|
||||
ghcr.io/jokob-sk/netalertx:latest
|
||||
|
||||
@@ -34,30 +34,26 @@ Copy and paste the following YAML into the **Web editor**:
|
||||
services:
|
||||
netalertx:
|
||||
container_name: netalertx
|
||||
|
||||
# Use this line for stable release
|
||||
image: "ghcr.io/jokob-sk/netalertx:latest"
|
||||
|
||||
# Or, use this for the latest development build
|
||||
# image: "ghcr.io/jokob-sk/netalertx-dev:latest"
|
||||
|
||||
network_mode: "host"
|
||||
restart: unless-stopped
|
||||
|
||||
cap_drop: # Drop all capabilities for enhanced security
|
||||
- ALL
|
||||
cap_add: # Re-add necessary capabilities
|
||||
- NET_RAW
|
||||
- NET_ADMIN
|
||||
- NET_BIND_SERVICE
|
||||
volumes:
|
||||
- ${APP_FOLDER}/netalertx/config:/data/config
|
||||
- ${APP_FOLDER}/netalertx/db:/data/db
|
||||
# Optional: logs (useful for debugging setup issues, comment out for performance)
|
||||
- ${APP_FOLDER}/netalertx/log:/tmp/log
|
||||
|
||||
# API storage options:
|
||||
# (Option 1) tmpfs (default, best performance)
|
||||
- type: tmpfs
|
||||
target: /tmp/api
|
||||
|
||||
# (Option 2) bind mount (useful for debugging)
|
||||
# - ${APP_FOLDER}/netalertx/api:/tmp/api
|
||||
|
||||
# to sync with system time
|
||||
- /etc/localtime:/etc/localtime:ro
|
||||
tmpfs:
|
||||
# All writable runtime state resides under /tmp; comment out to persist logs between restarts
|
||||
- "/tmp:uid=20211,gid=20211,mode=1700,rw,noexec,nosuid,nodev,async,noatime,nodiratime"
|
||||
environment:
|
||||
- PORT=${PORT}
|
||||
- APP_CONF_OVERRIDE=${APP_CONF_OVERRIDE}
|
||||
@@ -79,10 +75,11 @@ In the **Environment variables** section of Portainer, add the following:
|
||||
|
||||
> [!TIP]
|
||||
> If you are facing permissions issues run the following commands on your server. This will change the owner and assure sufficient access to the database and config files that are stored in the `/local_data_dir/db` and `/local_data_dir/config` folders (replace `local_data_dir` with the location where your `/db` and `/config` folders are located).
|
||||
> ```bash
|
||||
> sudo chown -R 20211:20211 /local_data_dir
|
||||
> sudo chmod -R a+rwx /local_data_dir
|
||||
> ```
|
||||
>
|
||||
> `sudo chown -R 20211:20211 /local_data_dir`
|
||||
>
|
||||
> `sudo chmod -R a+rwx /local_data_dir1`
|
||||
>
|
||||
|
||||
|
||||
---
|
||||
|
||||
@@ -41,15 +41,7 @@ Use the following Compose snippet to deploy NetAlertX with a **static LAN IP** a
|
||||
services:
|
||||
netalertx:
|
||||
image: ghcr.io/jokob-sk/netalertx:latest
|
||||
ports:
|
||||
- 20211:20211
|
||||
volumes:
|
||||
- /mnt/YOUR_SERVER/netalertx/config:/data/config:rw
|
||||
- /mnt/YOUR_SERVER/netalertx/db:/netalertx/data/db:rw
|
||||
- /mnt/YOUR_SERVER/netalertx/logs:/netalertx/tmp/log:rw
|
||||
- /etc/localtime:/etc/localtime:ro
|
||||
environment:
|
||||
- PORT=20211
|
||||
...
|
||||
networks:
|
||||
swarm-ipvlan:
|
||||
ipv4_address: 192.168.1.240 # ⚠️ Choose a free IP from your LAN
|
||||
|
||||
@@ -1,8 +1,23 @@
|
||||
# Managing File Permissions for NetAlertX on a Read-Only Container
|
||||
|
||||
Sometimes, permission issues arise if your existing host directories were created by a previous container running as root or another UID. The container will fail to start with "Permission Denied" errors.
|
||||
|
||||
> [!TIP]
|
||||
> NetAlertX runs in a **secure, read-only Alpine-based container** under a dedicated `netalertx` user (UID 20211, GID 20211). All writable paths are either mounted as **persistent volumes** or **`tmpfs` filesystems**. This ensures consistent file ownership and prevents privilege escalation.
|
||||
|
||||
Try starting the container with all data to be in non-persistent volumes. If this works, the issue might be related to the permissions of your persistent data mount locations on your server.
|
||||
|
||||
```bash
|
||||
docker run --rm --network=host \
|
||||
-v /etc/localtime:/etc/localtime:ro \
|
||||
--tmpfs /tmp:uid=20211,gid=20211,mode=1700 \
|
||||
-e PORT=20211 \
|
||||
ghcr.io/jokob-sk/netalertx:latest
|
||||
```
|
||||
|
||||
> [!WARNING]
|
||||
> The above should be only used as a test - once the container restarts, all data is lost.
|
||||
|
||||
---
|
||||
|
||||
## Writable Paths
|
||||
@@ -25,10 +40,6 @@ NetAlertX requires certain paths to be writable at runtime. These paths should b
|
||||
|
||||
---
|
||||
|
||||
## Fixing Permission Problems
|
||||
|
||||
Sometimes, permission issues arise if your existing host directories were created by a previous container running as root or another UID. The container will fail to start with "Permission Denied" errors.
|
||||
|
||||
### Solution
|
||||
|
||||
1. **Run the container once as root** (`--user "0"`) to allow it to correct permissions automatically:
|
||||
@@ -37,6 +48,7 @@ Sometimes, permission issues arise if your existing host directories were create
|
||||
docker run -it --rm --name netalertx --user "0" \
|
||||
-v /local_data_dir/config:/data/config \
|
||||
-v /local_data_dir/db:/data/db \
|
||||
--tmpfs /tmp:uid=20211,gid=20211,mode=1700 \
|
||||
ghcr.io/jokob-sk/netalertx:latest
|
||||
```
|
||||
|
||||
@@ -48,10 +60,11 @@ docker run -it --rm --name netalertx --user "0" \
|
||||
|
||||
> [!TIP]
|
||||
> If you are facing permissions issues run the following commands on your server. This will change the owner and assure sufficient access to the database and config files that are stored in the `/local_data_dir/db` and `/local_data_dir/config` folders (replace `local_data_dir` with the location where your `/db` and `/config` folders are located).
|
||||
> ```bash
|
||||
> sudo chown -R 20211:20211 /local_data_dir
|
||||
> sudo chmod -R a+rwx /local_data_dir
|
||||
> ```
|
||||
>
|
||||
> `sudo chown -R 20211:20211 /local_data_dir`
|
||||
>
|
||||
> `sudo chmod -R a+rwx /local_data_dir1`
|
||||
>
|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -255,6 +255,7 @@ services:
|
||||
docker run -it --rm --name netalertx --user "0" \
|
||||
-v /local_data_dir/config:/data/config \
|
||||
-v /local_data_dir/db:/data/db \
|
||||
--tmpfs /tmp:uid=20211,gid=20211,mode=1700 \
|
||||
ghcr.io/jokob-sk/netalertx:latest
|
||||
```
|
||||
|
||||
@@ -273,7 +274,7 @@ sudo chmod -R a+rwx /local_data_dir/
|
||||
services:
|
||||
netalertx:
|
||||
container_name: netalertx
|
||||
image: "ghcr.io/jokob-sk/netalertx" # 🆕 This is important
|
||||
image: "ghcr.io/jokob-sk/netalertx" # 🆕 This has changed
|
||||
network_mode: "host"
|
||||
cap_drop: # 🆕 New line
|
||||
- ALL # 🆕 New line
|
||||
|
||||
@@ -50,6 +50,8 @@ Let’s walk through setting up a device named `raspberrypi` to act as a network
|
||||
|
||||
- Optionally assign a **Parent Node** (where this device connects to) and the **Relationship type** of the connection.
|
||||
The `nic` relationship type can affect parent notifications — see the setting description and [Notifications documentation](./NOTIFICATIONS.md) for more.
|
||||
- A device’s parent MAC will be overwritten by plugins if its current value is any of the following: "null", "(unknown)" "(Unknown)".
|
||||
- If you want plugins to be able to overwrite the parent value (for example, when mixing plugins that do not provide parent MACs like `ARPSCAN` with those that do, like `UNIFIAPI`), you must set the setting `NEWDEV_devParentMAC` to None.
|
||||
|
||||

|
||||
|
||||
|
||||
@@ -1,47 +1,50 @@
|
||||
# Performance Optimization Guide
|
||||
|
||||
There are several ways to improve the application's performance. The application has been tested on a range of devices, from a Raspberry Pi 4 to NAS and NUC systems. If you are running the application on a lower-end device, carefully fine-tune the performance settings to ensure an optimal user experience.
|
||||
There are several ways to improve the application's performance. The application has been tested on a range of devices, from Raspberry Pi 4 units to NAS and NUC systems. If you are running the application on a lower-end device, fine-tuning the performance settings can significantly improve the user experience.
|
||||
|
||||
## Common Causes of Slowness
|
||||
|
||||
Performance issues are usually caused by:
|
||||
|
||||
- **Incorrect settings** – The app may restart unexpectedly. Check `app.log` under **Maintenance → Logs** for details.
|
||||
- **Too many background processes** – Disable unnecessary scanners.
|
||||
- **Long scan durations** – Limit the number of scanned devices.
|
||||
- **Excessive disk operations** – Optimize scanning and logging settings.
|
||||
- **Failed maintenance plugins** – Ensure maintenance tasks are running properly.
|
||||
* **Incorrect settings** – The app may restart unexpectedly. Check `app.log` under **Maintenance → Logs** for details.
|
||||
* **Too many background processes** – Disable unnecessary scanners.
|
||||
* **Long scan durations** – Limit the number of scanned devices.
|
||||
* **Excessive disk operations** – Optimize scanning and logging settings.
|
||||
* **Maintenance plugin failures** – If cleanup tasks fail, performance can degrade over time.
|
||||
|
||||
The application performs regular maintenance and database cleanup. If these tasks fail, performance may degrade.
|
||||
The application performs regular maintenance and database cleanup. If these tasks are failing, you will see slowdowns.
|
||||
|
||||
### Database and Log File Size
|
||||
|
||||
A large database or oversized log files can slow down performance. You can check database and table sizes on the **Maintenance** page.
|
||||
A large database or oversized log files can impact performance. You can check database and table sizes on the **Maintenance** page.
|
||||
|
||||

|
||||
|
||||
> [!NOTE]
|
||||
> - For **~100 devices**, the database should be around **50MB**.
|
||||
> - No table should exceed **10,000 rows** in a healthy system.
|
||||
> - These numbers vary based on network activity and settings.
|
||||
>
|
||||
> * For **~100 devices**, the database should be around **50 MB**.
|
||||
> * No table should exceed **10,000 rows** in a healthy system.
|
||||
> * Actual values vary based on network activity and plugin settings.
|
||||
|
||||
---
|
||||
|
||||
## Maintenance Plugins
|
||||
|
||||
Two plugins help maintain the application’s performance:
|
||||
Two plugins help maintain the system’s performance:
|
||||
|
||||
### **1. Database Cleanup (DBCLNP)**
|
||||
- Responsible for database maintenance.
|
||||
- Check settings in the [DB Cleanup Plugin Docs](/front/plugins/db_cleanup/README.md).
|
||||
- Ensure it’s not failing by checking logs.
|
||||
- Adjust the schedule (`DBCLNP_RUN_SCHD`) and timeout (`DBCLNP_RUN_TIMEOUT`) if needed.
|
||||
|
||||
* Handles database maintenance and cleanup.
|
||||
* See the [DB Cleanup Plugin Docs](/front/plugins/db_cleanup/README.md).
|
||||
* Ensure it’s not failing by checking logs.
|
||||
* Adjust the schedule (`DBCLNP_RUN_SCHD`) and timeout (`DBCLNP_RUN_TIMEOUT`) if necessary.
|
||||
|
||||
### **2. Maintenance (MAINT)**
|
||||
- Handles log cleanup and other maintenance tasks.
|
||||
- Check settings in the [Maintenance Plugin Docs](/front/plugins/maintenance/README.md).
|
||||
- Ensure it’s running correctly by checking logs.
|
||||
- Adjust the schedule (`MAINT_RUN_SCHD`) and timeout (`MAINT_RUN_TIMEOUT`) if needed.
|
||||
|
||||
* Cleans logs and performs general maintenance tasks.
|
||||
* See the [Maintenance Plugin Docs](/front/plugins/maintenance/README.md).
|
||||
* Verify proper operation via logs.
|
||||
* Adjust the schedule (`MAINT_RUN_SCHD`) and timeout (`MAINT_RUN_TIMEOUT`) if needed.
|
||||
|
||||
---
|
||||
|
||||
@@ -50,48 +53,56 @@ Two plugins help maintain the application’s performance:
|
||||
Frequent scans increase resource usage, network traffic, and database read/write cycles.
|
||||
|
||||
### **Optimizations**
|
||||
- **Increase scan intervals** (`<PLUGIN>_RUN_SCHD`) on busy networks or low-end hardware.
|
||||
- **Extend scan timeouts** (`<PLUGIN>_RUN_TIMEOUT`) to prevent failures.
|
||||
- **Reduce the subnet size** – e.g., from `/16` to `/24` to lower scan loads.
|
||||
|
||||
Some plugins have additional options to limit the number of scanned devices. If certain plugins take too long to complete, check if you can optimize scan times by selecting a scan range.
|
||||
* **Increase scan intervals** (`<PLUGIN>_RUN_SCHD`) on busy networks or low-end hardware.
|
||||
* **Increase timeouts** (`<PLUGIN>_RUN_TIMEOUT`) to avoid plugin failures.
|
||||
* **Reduce subnet size** – e.g., use `/24` instead of `/16` to reduce scan load.
|
||||
|
||||
For example, the **ICMP plugin** allows you to specify a regular expression to scan only IPs that match a specific pattern.
|
||||
Some plugins also include options to limit which devices are scanned. If certain plugins consistently run long, consider narrowing their scope.
|
||||
|
||||
For example, the **ICMP plugin** allows scanning only IPs that match a specific regular expression.
|
||||
|
||||
---
|
||||
|
||||
## Storing Temporary Files in Memory
|
||||
|
||||
On systems with slower I/O speeds, you can optimize performance by storing temporary files in memory. This primarily applies to the API directory (default: `/tmp/api`, configurable via `NETALERTX_API`) and `/tmp/log` folders.
|
||||
On devices with slower I/O, you can improve performance by storing temporary files (and optionally the database) in memory using `tmpfs`.
|
||||
|
||||
Using `tmpfs` reduces disk writes and improves performance. However, it should be **disabled** if persistent logs or API data storage are required.
|
||||
> [!WARNING]
|
||||
> Storing the **database** in `tmpfs` is generally discouraged. Use this only if device data and historical records are not required to persist. If needed, you can pair this setup with the `SYNC` plugin to store important persistent data on another node. See the [Plugins docs](./PLUGINS.md) for details.
|
||||
|
||||
Below is an optimized `docker-compose.yml` snippet:
|
||||
Using `tmpfs` reduces disk writes and speeds up I/O, but **all data stored in memory will be lost on restart**.
|
||||
|
||||
Below is an optimized `docker-compose.yml` snippet using non-persistent logs, API data, and DB:
|
||||
|
||||
```yaml
|
||||
version: "3"
|
||||
services:
|
||||
netalertx:
|
||||
container_name: netalertx
|
||||
# Uncomment the line below to test the latest dev image
|
||||
# image: "ghcr.io/jokob-sk/netalertx-dev:latest"
|
||||
# Use this line for the stable release
|
||||
image: "ghcr.io/jokob-sk/netalertx:latest"
|
||||
# Or use this line for the latest development build
|
||||
# image: "ghcr.io/jokob-sk/netalertx-dev:latest"
|
||||
network_mode: "host"
|
||||
restart: unless-stopped
|
||||
volumes:
|
||||
- /local_data_dir/config:/data/config
|
||||
- /local_data_dir/db:/data/db
|
||||
# (Optional) Useful for debugging setup issues
|
||||
- /local_data_dir/logs:/tmp/log
|
||||
# (API: OPTION 1) Store temporary files in memory (recommended for performance)
|
||||
- type: tmpfs # ◀ 🔺
|
||||
target: /tmp/api # ◀ 🔺
|
||||
# (API: OPTION 2) Store API data on disk (useful for debugging)
|
||||
# - /local_data_dir/api:/tmp/api
|
||||
# Ensuring the timezone is the same as on the server - make sure also the TIMEZONE setting is configured
|
||||
- /etc/localtime:/etc/localtime:ro
|
||||
environment:
|
||||
- PORT=20211
|
||||
|
||||
cap_drop: # Drop all capabilities for enhanced security
|
||||
- ALL
|
||||
cap_add: # Re-add necessary capabilities
|
||||
- NET_RAW
|
||||
- NET_ADMIN
|
||||
- NET_BIND_SERVICE
|
||||
|
||||
volumes:
|
||||
- ${APP_FOLDER}/netalertx/config:/data/config
|
||||
- /etc/localtime:/etc/localtime:ro
|
||||
|
||||
tmpfs:
|
||||
# All writable runtime state resides under /tmp; comment out to persist logs between restarts
|
||||
- "/tmp:uid=20211,gid=20211,mode=1700,rw,noexec,nosuid,nodev,async,noatime,nodiratime"
|
||||
- "/data/db:uid=20211,gid=20211,mode=1700" # ⚠ You will lose historical data on restart
|
||||
|
||||
environment:
|
||||
- PORT=${PORT}
|
||||
- APP_CONF_OVERRIDE=${APP_CONF_OVERRIDE}
|
||||
```
|
||||
|
||||
@@ -40,16 +40,7 @@ services:
|
||||
netalertx:
|
||||
container_name: netalertx
|
||||
image: "ghcr.io/jokob-sk/netalertx:latest"
|
||||
restart: unless-stopped
|
||||
volumes:
|
||||
- /local_data_dir/config:/data/config
|
||||
- /local_data_dir/db:/data/db
|
||||
# - /local_data_dir/log:/tmp/log
|
||||
# Ensuring the timezone is the same as on the server - make sure also the TIMEZONE setting is configured
|
||||
- /etc/localtime:/etc/localtime:ro
|
||||
environment:
|
||||
- PORT=20211
|
||||
network_mode: host
|
||||
...
|
||||
dns: # specifying the DNS servers used for the container
|
||||
- 10.8.0.1
|
||||
- 10.8.0.17
|
||||
@@ -66,18 +57,10 @@ version: "3"
|
||||
services:
|
||||
netalertx:
|
||||
container_name: netalertx
|
||||
image: "ghcr.io/jokob-sk/netalertx:latest"
|
||||
restart: unless-stopped
|
||||
volumes:
|
||||
- /local_data_dir/config/app.conf:/data/config/app.conf
|
||||
- /local_data_dir/db:/data/db
|
||||
- /local_data_dir/log:/tmp/log
|
||||
...
|
||||
- /local_data_dir/config/resolv.conf:/etc/resolv.conf # ⚠ Mapping the /resolv.conf file for better name resolution
|
||||
# Ensuring the timezone is the same as on the server - make sure also the TIMEZONE setting is configured
|
||||
- /etc/localtime:/etc/localtime:ro
|
||||
environment:
|
||||
- PORT=20211
|
||||
network_mode: host
|
||||
...
|
||||
```
|
||||
|
||||
#### /local_data_dir/config/resolv.conf:
|
||||
|
||||
@@ -496,14 +496,9 @@ server {
|
||||
Mapping the updated file (on the local filesystem at `/appl/docker/netalertx/default`) into the docker container:
|
||||
|
||||
|
||||
```bash
|
||||
docker run -d --rm --network=host \
|
||||
--name=netalertx \
|
||||
-v /appl/docker/netalertx/config:/data/config \
|
||||
-v /appl/docker/netalertx/db:/data/db \
|
||||
-v /etc/localtime:/etc/localtime \
|
||||
-v /appl/docker/netalertx/default:/etc/nginx/sites-available/default \
|
||||
-e PORT=20211 \
|
||||
ghcr.io/jokob-sk/netalertx:latest
|
||||
|
||||
```yaml
|
||||
...
|
||||
volumes:
|
||||
- /appl/docker/netalertx/default:/etc/nginx/sites-available/default
|
||||
...
|
||||
```
|
||||
|
||||
@@ -29,6 +29,7 @@ The folders you are creating below will contain the configuration and the databa
|
||||
- Path: `/app_storage/netalertx` (will differ from yours)
|
||||
- Paste in the following template:
|
||||
|
||||
|
||||
```yaml
|
||||
version: "3"
|
||||
services:
|
||||
@@ -39,13 +40,20 @@ services:
|
||||
image: "ghcr.io/jokob-sk/netalertx:latest"
|
||||
network_mode: "host"
|
||||
restart: unless-stopped
|
||||
cap_drop: # Drop all capabilities for enhanced security
|
||||
- ALL
|
||||
cap_add: # Re-add necessary capabilities
|
||||
- NET_RAW
|
||||
- NET_ADMIN
|
||||
- NET_BIND_SERVICE
|
||||
volumes:
|
||||
- local/path/config:/data/config
|
||||
- local/path/db:/data/db
|
||||
# (optional) useful for debugging if you have issues setting up the container
|
||||
- local/path/logs:/tmp/log
|
||||
# Ensuring the timezone is the same as on the server - make sure also the TIMEZONE setting is configured
|
||||
- /app_storage/netalertx/config:/data/config
|
||||
- /app_storage/netalertx/db:/data/db
|
||||
# to sync with system time
|
||||
- /etc/localtime:/etc/localtime:ro
|
||||
tmpfs:
|
||||
# All writable runtime state resides under /tmp; comment out to persist logs between restarts
|
||||
- "/tmp:uid=20211,gid=20211,mode=1700,rw,noexec,nosuid,nodev,async,noatime,nodiratime"
|
||||
environment:
|
||||
- PORT=20211
|
||||
```
|
||||
@@ -73,3 +81,12 @@ services:
|
||||
|
||||
10. Navigate to `<Synology URL>:20211` (or your custom port).
|
||||
11. Read the [Subnets](./SUBNETS.md) and [Plugins](/docs/PLUGINS.md) docs to complete your setup.
|
||||
|
||||
|
||||
> [!TIP]
|
||||
> If you are facing permissions issues run the following commands on your server. This will change the owner and assure sufficient access to the database and config files that are stored in the `/local_data_dir/db` and `/local_data_dir/config` folders (replace `local_data_dir` with the location where your `/db` and `/config` folders are located).
|
||||
>
|
||||
> `sudo chown -R 20211:20211 /local_data_dir`
|
||||
>
|
||||
> `sudo chmod -R a+rwx /local_data_dir1`
|
||||
>
|
||||
|
||||
@@ -25,7 +25,7 @@ Follow all of the below in order to disqualify potential causes of issues and to
|
||||
|
||||
When opening an issue or debugging:
|
||||
|
||||
1. Include a screenshot of what you see when accessing `HTTP://<your rpi IP>/20211` (or your custom port)
|
||||
1. Include a screenshot of what you see when accessing `HTTP://<your_server>:20211` (or your custom port)
|
||||
1. [Follow steps 1, 2, 3, 4 on this page](./DEBUG_TIPS.md)
|
||||
1. Execute the following in the container to see the processes and their ports and submit a screenshot of the result:
|
||||
- `sudo apk add lsof`
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
# Workflows debugging and troubleshooting
|
||||
|
||||
> [!TIP]
|
||||
> Before troubleshooting, please ensure you have [Debugging enabled](./DEBUG_TIPS.md).
|
||||
> Before troubleshooting, please ensure you have the right [Debugging and LOG_LEVEL set](./DEBUG_TIPS.md).
|
||||
|
||||
Workflows are triggered by various events. These events are captured and listed in the _Integrations -> App Events_ section of the application.
|
||||
|
||||
|
||||
0
docs/img/DEBUG_GRAPHQL/Init_check.png → docs/img/DEBUG_API_SERVER/Init_check.png
Executable file → Normal file
|
Before Width: | Height: | Size: 135 KiB After Width: | Height: | Size: 135 KiB |
0
docs/img/DEBUG_GRAPHQL/app_conf_graphql_port.png → docs/img/DEBUG_API_SERVER/app_conf_graphql_port.png
Executable file → Normal file
|
Before Width: | Height: | Size: 11 KiB After Width: | Height: | Size: 11 KiB |
0
docs/img/DEBUG_GRAPHQL/dev_console_graphql_json.png → docs/img/DEBUG_API_SERVER/dev_console_graphql_json.png
Executable file → Normal file
|
Before Width: | Height: | Size: 32 KiB After Width: | Height: | Size: 32 KiB |
0
docs/img/DEBUG_GRAPHQL/graphql_running_logs.png → docs/img/DEBUG_API_SERVER/graphql_running_logs.png
Executable file → Normal file
|
Before Width: | Height: | Size: 36 KiB After Width: | Height: | Size: 36 KiB |
0
docs/img/DEBUG_GRAPHQL/graphql_settings_port_token.png → docs/img/DEBUG_API_SERVER/graphql_settings_port_token.png
Executable file → Normal file
|
Before Width: | Height: | Size: 34 KiB After Width: | Height: | Size: 34 KiB |
0
docs/img/DEBUG_GRAPHQL/network_graphql.png → docs/img/DEBUG_API_SERVER/network_graphql.png
Executable file → Normal file
|
Before Width: | Height: | Size: 35 KiB After Width: | Height: | Size: 35 KiB |
@@ -107,11 +107,11 @@
|
||||
"buttons": [
|
||||
{
|
||||
"labelStringCode": "Maint_PurgeLog",
|
||||
"event": "logManage('crond.log', 'cleanLog')"
|
||||
"event": "logManage('cron.log', 'cleanLog')"
|
||||
}
|
||||
],
|
||||
"fileName": "crond.log",
|
||||
"filePath": "__NETALERTX_LOG__/crond.log",
|
||||
"fileName": "cron.log",
|
||||
"filePath": "__NETALERTX_LOG__/cron.log",
|
||||
"textAreaCssClass": "logs logs-small"
|
||||
}
|
||||
]
|
||||
@@ -274,7 +274,7 @@ function cleanLog($logFile)
|
||||
|
||||
$path = "";
|
||||
|
||||
$allowedFiles = ['app.log', 'app_front.log', 'IP_changes.log', 'stdout.log', 'stderr.log', 'app.php_errors.log', 'execution_queue.log', 'db_is_locked.log', 'nginx-error.log', 'crond.log'];
|
||||
$allowedFiles = ['app.log', 'app_front.log', 'IP_changes.log', 'stdout.log', 'stderr.log', 'app.php_errors.log', 'execution_queue.log', 'db_is_locked.log', 'nginx-error.log', 'cron.log'];
|
||||
|
||||
if(in_array($logFile, $allowedFiles))
|
||||
{
|
||||
|
||||
210
front/php/templates/language/pt_pt.json
Executable file → Normal file
@@ -60,7 +60,7 @@
|
||||
"BackDevices_darkmode_disabled": "Modo Noturno Desativado",
|
||||
"BackDevices_darkmode_enabled": "Modo Noturno Ativado",
|
||||
"CLEAR_NEW_FLAG_description": "Se ativado (<code>0</code> está desativado), dispositivos marcados como<b>Novo Dispositivo</b> serão desmarcados se o limite (especificado em horas) exceder o tempo da <b>Primeira Sessão </b>.",
|
||||
"CLEAR_NEW_FLAG_name": "",
|
||||
"CLEAR_NEW_FLAG_name": "Limpar a flag nova",
|
||||
"CustProps_cant_remove": "Não é possível remover, é necessária pelo menos uma propriedade.",
|
||||
"DAYS_TO_KEEP_EVENTS_description": "Esta é uma definição de manutenção. Especifica o número de dias de entradas de eventos que serão mantidas. Todos os eventos mais antigos serão apagados periodicamente. Também se aplica ao Histórico de eventos do plug-in.",
|
||||
"DAYS_TO_KEEP_EVENTS_name": "Apagar eventos mais antigos que",
|
||||
@@ -73,10 +73,10 @@
|
||||
"DevDetail_CustomProps_reset_info": "Isto irá remover as suas propriedades personalizadas neste dispositivo e repô-las para o valor predefinido.",
|
||||
"DevDetail_DisplayFields_Title": "Visualização",
|
||||
"DevDetail_EveandAl_AlertAllEvents": "Eventos de alerta",
|
||||
"DevDetail_EveandAl_AlertDown": "",
|
||||
"DevDetail_EveandAl_AlertDown": "Alerta apagado",
|
||||
"DevDetail_EveandAl_Archived": "Arquivado",
|
||||
"DevDetail_EveandAl_NewDevice": "Novo dispositivo",
|
||||
"DevDetail_EveandAl_NewDevice_Tooltip": "",
|
||||
"DevDetail_EveandAl_NewDevice_Tooltip": "Mostrará o estado “Novo” para o dispositivo e irá incluí-lo nas listas quando o filtro de “Novos dispositivos” estiver ativo. Não afeta as notificações.",
|
||||
"DevDetail_EveandAl_RandomMAC": "MAC Aleatório",
|
||||
"DevDetail_EveandAl_ScanCycle": "Rastrear dispositivo",
|
||||
"DevDetail_EveandAl_ScanCycle_a": "Rastear dispositivo",
|
||||
@@ -103,11 +103,11 @@
|
||||
"DevDetail_MainInfo_Type": "Tipo",
|
||||
"DevDetail_MainInfo_Vendor": "Fornecedor",
|
||||
"DevDetail_MainInfo_mac": "MAC",
|
||||
"DevDetail_NavToChildNode": "",
|
||||
"DevDetail_NavToChildNode": "Expandir subelemento",
|
||||
"DevDetail_Network_Node_hover": "Selecione o dispositivo de rede principal ao qual o dispositivo atual está conectado, para preencher a árvore Rede.",
|
||||
"DevDetail_Network_Port_hover": "A porta a que este dispositivo está ligado no dispositivo de rede principal. Se for deixado vazio, é apresentado um ícone wifi na árvore Rede.",
|
||||
"DevDetail_Nmap_Scans": "Varreduras manuais do Nmap",
|
||||
"DevDetail_Nmap_Scans_desc": "",
|
||||
"DevDetail_Nmap_Scans_desc": "Aqui pode executar análises NMAP manuais. Também pode agendar análises NMAP automáticas regulares através do plugin Serviços & Portos (NMAP). Aceda à https://github.com/jokob-sk/NetAlertX/tree/main/front/plugins/nmap_scan para saber mais",
|
||||
"DevDetail_Nmap_buttonDefault": "Verificação predefinida",
|
||||
"DevDetail_Nmap_buttonDefault_text": "Scan padrão: Nmap verifica as 1.000 portas superiores para cada protocolo de digitalização solicitado. Isto atinge cerca de 93% das portas TCP e 49% das portas UDP. (cerca de 5 segundos)",
|
||||
"DevDetail_Nmap_buttonDetail": "Verificação Detalhada",
|
||||
@@ -155,34 +155,34 @@
|
||||
"DevDetail_Tab_NmapTablePort": "Porta",
|
||||
"DevDetail_Tab_NmapTableService": "Serviço",
|
||||
"DevDetail_Tab_NmapTableState": "Estado",
|
||||
"DevDetail_Tab_NmapTableText": "",
|
||||
"DevDetail_Tab_NmapTableText": "Configurar uma programação em <a href=\"/settings.php#NMAP_ACTIVE\">Definições</a>",
|
||||
"DevDetail_Tab_NmapTableTime": "Tempo",
|
||||
"DevDetail_Tab_Plugins": "Plugins",
|
||||
"DevDetail_Tab_Presence": "Presença",
|
||||
"DevDetail_Tab_Sessions": "Sessões",
|
||||
"DevDetail_Tab_Tools": "Ferramentas",
|
||||
"DevDetail_Tab_Tools_Internet_Info_Description": "",
|
||||
"DevDetail_Tab_Tools_Internet_Info_Description": "A ferramenta de informações da Internet apresenta dados sobre a ligação à Internet, como endereço IP, cidade, país, código de área e fuso horário.",
|
||||
"DevDetail_Tab_Tools_Internet_Info_Error": "Ocorreu um erro",
|
||||
"DevDetail_Tab_Tools_Internet_Info_Start": "",
|
||||
"DevDetail_Tab_Tools_Internet_Info_Title": "",
|
||||
"DevDetail_Tab_Tools_Nslookup_Description": "",
|
||||
"DevDetail_Tab_Tools_Nslookup_Error": "",
|
||||
"DevDetail_Tab_Tools_Nslookup_Start": "",
|
||||
"DevDetail_Tab_Tools_Nslookup_Title": "",
|
||||
"DevDetail_Tab_Tools_Speedtest_Description": "",
|
||||
"DevDetail_Tab_Tools_Speedtest_Start": "",
|
||||
"DevDetail_Tab_Tools_Speedtest_Title": "",
|
||||
"DevDetail_Tab_Tools_Traceroute_Description": "",
|
||||
"DevDetail_Tab_Tools_Traceroute_Error": "",
|
||||
"DevDetail_Tab_Tools_Traceroute_Start": "",
|
||||
"DevDetail_Tab_Tools_Traceroute_Title": "",
|
||||
"DevDetail_Tools_WOL": "",
|
||||
"DevDetail_Tools_WOL_noti": "",
|
||||
"DevDetail_Tools_WOL_noti_text": "",
|
||||
"DevDetail_Type_hover": "",
|
||||
"DevDetail_Vendor_hover": "",
|
||||
"DevDetail_WOL_Title": "",
|
||||
"DevDetail_button_AddIcon": "",
|
||||
"DevDetail_Tab_Tools_Internet_Info_Start": "Start Internet Info",
|
||||
"DevDetail_Tab_Tools_Internet_Info_Title": "Internet Info",
|
||||
"DevDetail_Tab_Tools_Nslookup_Description": "Nslookup é uma ferramenta de linha de comandos usada para consultar o Sistema de Nomes de Domínio (DNS). O DNS é um sistema que traduz nomes de domínio, como www.google.com, em endereços IP, como 172.217.0.142.",
|
||||
"DevDetail_Tab_Tools_Nslookup_Error": "Erro: O endereço IP não é válido",
|
||||
"DevDetail_Tab_Tools_Nslookup_Start": "Inicia Nslookup",
|
||||
"DevDetail_Tab_Tools_Nslookup_Title": "Nslookup",
|
||||
"DevDetail_Tab_Tools_Speedtest_Description": "A ferramenta Speedtest mede a velocidade de download, a velocidade de upload e a latência da ligação à Internet.",
|
||||
"DevDetail_Tab_Tools_Speedtest_Start": "Iniciar Speedtest",
|
||||
"DevDetail_Tab_Tools_Speedtest_Title": "Speedtest Online",
|
||||
"DevDetail_Tab_Tools_Traceroute_Description": "Traceroute é um comando de diagnóstico de rede usado para rastrear o caminho que os pacotes de dados percorrem de um anfitrião para outro.<br><br>O comando utiliza o Protocolo de Mensagens de Controlo da Internet (ICMP) para enviar pacotes aos nós intermédios na rota, cada node intermédio responde com um pacote ICMP de tempo limite (TTL expirado).<br><br>O comando utiliza o Protocolo de Mensagens de Controlo da Internet (ICMP) para enviar pacotes aos nodes intermédios na rota, cada node intermédio responde com um pacote ICMP de tempo limite (TTL expirado).<br><br>A saída do comando traceroute apresenta o endereço IP de cada node intermédio na rota.<br><br>O comando traceroute pode ser usado para diagnosticar problemas de rede, como atrasos, perda de pacotes e rotas bloqueadas.",
|
||||
"DevDetail_Tab_Tools_Traceroute_Error": "Erro: O endereço IP não é válido",
|
||||
"DevDetail_Tab_Tools_Traceroute_Start": "Iniciar Traceroute",
|
||||
"DevDetail_Tab_Tools_Traceroute_Title": "Traceroute",
|
||||
"DevDetail_Tools_WOL": "Enviar comando WoL para ",
|
||||
"DevDetail_Tools_WOL_noti": "Wake-on-LAN",
|
||||
"DevDetail_Tools_WOL_noti_text": "O comando Wake-on-LAN é enviado para o endereço de broadcast. Se o destino não estiver na sub-rede/VLAN do NetAlertX, o dispositivo de destino não irá responder.",
|
||||
"DevDetail_Type_hover": "O tipo do dispositivo. Se selecionar um dos dispositivos de rede predefinidos (por exemplo: AP, Firewall, Router, Switch…), eles aparecerão na configuração da árvore de rede como possíveis nós de rede principais.",
|
||||
"DevDetail_Vendor_hover": "O fabricante deve ser detetado automaticamente. Pode substituir ou adicionar um valor personalizado.",
|
||||
"DevDetail_WOL_Title": "<i class=\"fa fa-power-off\"></i> Wake-on-LAN",
|
||||
"DevDetail_button_AddIcon": "Adicionar novo ícone",
|
||||
"DevDetail_button_AddIcon_Help": "Cole uma tag HTML SVG ou um ícone de tag HTML Font Awesome. Leia a <a href=\"https://github.com/jokob-sk/NetAlertX/blob/main/docs/ICONS.md\" target=\"_blank\">documentação sobre ícones</a> para obter pormenores.",
|
||||
"DevDetail_button_AddIcon_Tooltip": "Adicione um novo ícone a este dispositivo que ainda não esteja disponível no menu suspenso.",
|
||||
"DevDetail_button_Delete": "Apagar dispositivo",
|
||||
@@ -199,23 +199,23 @@
|
||||
"Device_MultiEdit_Backup": "",
|
||||
"Device_MultiEdit_Fields": "Editar campos:",
|
||||
"Device_MultiEdit_MassActions": "Ações em massa:",
|
||||
"Device_MultiEdit_No_Devices": "",
|
||||
"Device_MultiEdit_No_Devices": "Nenhum dispositivo selecionado.",
|
||||
"Device_MultiEdit_Tooltip": "Cuidadoso. Clicar aqui aplicará o valor à esquerda a todos os dispositivos selecionados acima.",
|
||||
"Device_Searchbox": "Procurar",
|
||||
"Device_Shortcut_AllDevices": "",
|
||||
"Device_Shortcut_AllNodes": "",
|
||||
"Device_Shortcut_AllDevices": "Os meus dispositivos",
|
||||
"Device_Shortcut_AllNodes": "Todos os Nodes",
|
||||
"Device_Shortcut_Archived": "Arquivado",
|
||||
"Device_Shortcut_Connected": "Conectado",
|
||||
"Device_Shortcut_Devices": "Dispositivos",
|
||||
"Device_Shortcut_DownAlerts": "Inativo e off-line",
|
||||
"Device_Shortcut_DownOnly": "Inativo",
|
||||
"Device_Shortcut_Favorites": "Favoritos",
|
||||
"Device_Shortcut_NewDevices": "",
|
||||
"Device_Shortcut_NewDevices": "Novo dispostivo",
|
||||
"Device_Shortcut_OnlineChart": "Presença do dispositivo",
|
||||
"Device_TableHead_AlertDown": "Alerta em baixo",
|
||||
"Device_TableHead_Connected_Devices": "Conexões",
|
||||
"Device_TableHead_CustomProps": "",
|
||||
"Device_TableHead_FQDN": "",
|
||||
"Device_TableHead_CustomProps": "Propriedades / Ações",
|
||||
"Device_TableHead_FQDN": "FQDN",
|
||||
"Device_TableHead_Favorite": "Favorito",
|
||||
"Device_TableHead_FirstSession": "Primeira sessão",
|
||||
"Device_TableHead_GUID": "GUID",
|
||||
@@ -230,11 +230,11 @@
|
||||
"Device_TableHead_Name": "Nome",
|
||||
"Device_TableHead_NetworkSite": "Site da rede",
|
||||
"Device_TableHead_Owner": "Proprietário",
|
||||
"Device_TableHead_ParentRelType": "",
|
||||
"Device_TableHead_Parent_MAC": "",
|
||||
"Device_TableHead_ParentRelType": "Tipo de relação",
|
||||
"Device_TableHead_Parent_MAC": "Node de rede anterior",
|
||||
"Device_TableHead_Port": "Porta",
|
||||
"Device_TableHead_PresentLastScan": "Presença",
|
||||
"Device_TableHead_ReqNicsOnline": "",
|
||||
"Device_TableHead_ReqNicsOnline": "Exigir NICs online",
|
||||
"Device_TableHead_RowID": "ID da linha",
|
||||
"Device_TableHead_Rowid": "ID da linha",
|
||||
"Device_TableHead_SSID": "SSID",
|
||||
@@ -257,7 +257,7 @@
|
||||
"ENCRYPTION_KEY_name": "Chave de encriptação",
|
||||
"Email_display_name": "Email",
|
||||
"Email_icon": "<i class=\"fa fa-at\"></i>",
|
||||
"Events_Loading": "",
|
||||
"Events_Loading": "A carregar…",
|
||||
"Events_Periodselect_All": "Todas as informações",
|
||||
"Events_Periodselect_LastMonth": "Mês passado",
|
||||
"Events_Periodselect_LastWeek": "Semana passada",
|
||||
@@ -268,7 +268,7 @@
|
||||
"Events_Shortcut_DownAlerts": "Alertas de queda",
|
||||
"Events_Shortcut_Events": "Eventos",
|
||||
"Events_Shortcut_MissSessions": "Sessões ausentes",
|
||||
"Events_Shortcut_NewDevices": "",
|
||||
"Events_Shortcut_NewDevices": "Novos dispositivos",
|
||||
"Events_Shortcut_Sessions": "Sessões",
|
||||
"Events_Shortcut_VoidSessions": "Sessões anuladas",
|
||||
"Events_TableHead_AdditionalInfo": "Informação adicional",
|
||||
@@ -278,7 +278,7 @@
|
||||
"Events_TableHead_Disconnection": "Desconexão",
|
||||
"Events_TableHead_Duration": "Duração",
|
||||
"Events_TableHead_DurationOrder": "Duração do pedido",
|
||||
"Events_TableHead_EventType": "",
|
||||
"Events_TableHead_EventType": "Tipos de eventos",
|
||||
"Events_TableHead_IP": "IP",
|
||||
"Events_TableHead_IPOrder": "Pedido de IP",
|
||||
"Events_TableHead_Order": "Ordem",
|
||||
@@ -294,15 +294,15 @@
|
||||
"GRAPHQL_PORT_name": "Porta GraphQL",
|
||||
"Gen_Action": "Ação",
|
||||
"Gen_Add": "Adicionar",
|
||||
"Gen_AddDevice": "",
|
||||
"Gen_AddDevice": "Adicionar dispositivo",
|
||||
"Gen_Add_All": "Adicionar todos",
|
||||
"Gen_All_Devices": "",
|
||||
"Gen_All_Devices": "Todos os dispostivos",
|
||||
"Gen_AreYouSure": "Tem certeza?",
|
||||
"Gen_Backup": "Executar backup",
|
||||
"Gen_Cancel": "Cancelar",
|
||||
"Gen_Change": "Alterar",
|
||||
"Gen_Copy": "Executar",
|
||||
"Gen_CopyToClipboard": "",
|
||||
"Gen_CopyToClipboard": "Copiar para a área de transferência",
|
||||
"Gen_DataUpdatedUITakesTime": "OK - Pode levar um tempo para a interface do utilizador ser atualizada se uma verificação estiver em execução.",
|
||||
"Gen_Delete": "Apagar",
|
||||
"Gen_DeleteAll": "Apagar todos",
|
||||
@@ -310,9 +310,9 @@
|
||||
"Gen_Error": "Erro",
|
||||
"Gen_Filter": "Filtro",
|
||||
"Gen_Generate": "Gerar",
|
||||
"Gen_InvalidMac": "",
|
||||
"Gen_InvalidMac": "Endereço MAC Inválido.",
|
||||
"Gen_LockedDB": "ERRO - A base de dados pode estar bloqueada - Verifique F12 Ferramentas de desenvolvimento -> Console ou tente mais tarde.",
|
||||
"Gen_NetworkMask": "",
|
||||
"Gen_NetworkMask": "Máscara de Rede",
|
||||
"Gen_Offline": "Offline",
|
||||
"Gen_Okay": "Ok",
|
||||
"Gen_Online": "Online",
|
||||
@@ -329,8 +329,8 @@
|
||||
"Gen_Select": "Selecionar",
|
||||
"Gen_SelectIcon": "<i class=\"fa-solid fa-chevron-down fa-fade\"></i>",
|
||||
"Gen_SelectToPreview": "Selecionar para pré-visualizar",
|
||||
"Gen_Selected_Devices": "",
|
||||
"Gen_Subnet": "",
|
||||
"Gen_Selected_Devices": "Seleciona dispostivos:",
|
||||
"Gen_Subnet": "Sub-rede",
|
||||
"Gen_Switch": "Trocar",
|
||||
"Gen_Upd": "Atualizado com sucesso",
|
||||
"Gen_Upd_Fail": "A atualização falhou",
|
||||
@@ -344,14 +344,14 @@
|
||||
"General_display_name": "Geral",
|
||||
"General_icon": "<i class=\"fa fa-gears\"></i>",
|
||||
"HRS_TO_KEEP_NEWDEV_description": "",
|
||||
"HRS_TO_KEEP_NEWDEV_name": "",
|
||||
"HRS_TO_KEEP_NEWDEV_name": "Remover novos dispostivos depois",
|
||||
"HRS_TO_KEEP_OFFDEV_description": "",
|
||||
"HRS_TO_KEEP_OFFDEV_name": "Apagar dispositivos offline após",
|
||||
"LOADED_PLUGINS_description": "Quais plugins carregar. Adicionar plugins pode deixar a aplicação lenta. Leia mais sobre quais plugins precisam ser ativados, tipos ou opções de escaneamento na <a target=\"_blank\" href=\"https://github.com/jokob-sk/NetAlertX/tree/main/docs/PLUGINS.md\">documentação de plugins</a>. Plugins descarregados perderão as suas configurações. Somente plugins <code>desativados</code> podem ser descarregados.",
|
||||
"LOADED_PLUGINS_name": "Plugins carregados",
|
||||
"LOG_LEVEL_description": "Esta definição permite um registo mais detalhado. Útil para depurar eventos gravados na base de dados.",
|
||||
"LOG_LEVEL_name": "Imprimir registo adicional",
|
||||
"Loading": "",
|
||||
"Loading": "A carregar…",
|
||||
"Login_Box": "Introduza a sua palavra-passe",
|
||||
"Login_Default_PWD": "A palavra-passe predefinida “123456” ainda está ativa.",
|
||||
"Login_Info": "As palavra-passes são definidas por meio do plugin Definir palavra-passe. Verifique a <a target=\"_blank\" href=\"https://github.com/jokob-sk/NetAlertX/tree/main/front/plugins/set_password\">documentação do SETPWD</a> se tiver problemas para fazer login.",
|
||||
@@ -369,20 +369,20 @@
|
||||
"Maint_PurgeLog": "Limpar o registo",
|
||||
"Maint_RestartServer": "Reiniciar o servidor",
|
||||
"Maint_Restart_Server_noti_text": "Tem certeza de que deseja reiniciar o servidor backend? Isto pode causar inconsistência na app. Faça primeiro um backup da sua configuração. <br/> <br/> Nota: Isto pode levar alguns minutos.",
|
||||
"Maintenance_InitCheck": "",
|
||||
"Maintenance_InitCheck_Checking": "",
|
||||
"Maintenance_InitCheck_QuickSetupGuide": "",
|
||||
"Maintenance_InitCheck_Success": "",
|
||||
"Maintenance_ReCheck": "",
|
||||
"Maintenance_InitCheck": "Verificação inicial",
|
||||
"Maintenance_InitCheck_Checking": "A verificar…",
|
||||
"Maintenance_InitCheck_QuickSetupGuide": "Certifique-se de que seguiu o <a href=\"https://jokob-sk.github.io/NetAlertX/INITIAL_SETUP/\" target=\"_blank\">guia de configuração rápida</a>.",
|
||||
"Maintenance_InitCheck_Success": "Aplicação inicializada com sucesso!",
|
||||
"Maintenance_ReCheck": "Verificar novamente",
|
||||
"Maintenance_Running_Version": "Versão instalada",
|
||||
"Maintenance_Status": "Situação",
|
||||
"Maintenance_Title": "Ferramentas de manutenção",
|
||||
"Maintenance_Tool_DownloadConfig": "",
|
||||
"Maintenance_Tool_DownloadConfig": "Exportar Definições",
|
||||
"Maintenance_Tool_DownloadConfig_text": "Descarregue um backup completo da configuração das Configurações armazenada no ficheiro <code>app.conf</code>.",
|
||||
"Maintenance_Tool_DownloadWorkflows": "",
|
||||
"Maintenance_Tool_DownloadWorkflows_text": "",
|
||||
"Maintenance_Tool_ExportCSV": "",
|
||||
"Maintenance_Tool_ExportCSV_noti": "",
|
||||
"Maintenance_Tool_DownloadWorkflows": "Exportar Workflows",
|
||||
"Maintenance_Tool_DownloadWorkflows_text": "Descarregue uma cópia completa de segurança dos seus Workflows armazenados no ficheiro <code>workflows.json</code> .",
|
||||
"Maintenance_Tool_ExportCSV": "Exportar dispostivos (csv)",
|
||||
"Maintenance_Tool_ExportCSV_noti": "Exportar dispostivos (csv)",
|
||||
"Maintenance_Tool_ExportCSV_noti_text": "Tem a certeza de que pretende gerar um ficheiro CSV?",
|
||||
"Maintenance_Tool_ExportCSV_text": "Gere um ficheiro CSV (valor separado por vírgula) contendo a lista de dispositivos, incluindo os relacionamentos de rede entre os nós de rede e os dispositivos conectados. Também pode acionar isto a aceder esta URL <code>your_NetAlertX_url/php/server/devices.php?action=ExportCSV</code> ou ativando o plugin <a href=\"settings.php#CSVBCKP_header\">CSV Backup</a>.",
|
||||
"Maintenance_Tool_ImportCSV": "Importação de dispositivos (csv)",
|
||||
@@ -413,31 +413,31 @@
|
||||
"Maintenance_Tool_del_ActHistory_noti": "Apagar atividade de rede",
|
||||
"Maintenance_Tool_del_ActHistory_noti_text": "Tem certeza de que deseja redefinir a atividade da rede?",
|
||||
"Maintenance_Tool_del_ActHistory_text": "O gráfico de atividade da rede é redefinido. Isto não afeta os eventos.",
|
||||
"Maintenance_Tool_del_alldev": "",
|
||||
"Maintenance_Tool_del_alldev_noti": "",
|
||||
"Maintenance_Tool_del_alldev": "Remover todos os dispositivo",
|
||||
"Maintenance_Tool_del_alldev_noti": "Remover dispositivos",
|
||||
"Maintenance_Tool_del_alldev_noti_text": "Tem certeza de que deseja apagar todos os dispositivos?",
|
||||
"Maintenance_Tool_del_alldev_text": "Antes de usar esta função, faça um backup. Apagar não pode ser desfeito. Todos os dispositivos serão apagados da base de dados.",
|
||||
"Maintenance_Tool_del_allevents": "Apagar eventos (Repor presença)",
|
||||
"Maintenance_Tool_del_allevents30": "Apagar todos os eventos com mais que 30 dias",
|
||||
"Maintenance_Tool_del_allevents30_noti": "Apagar eventos",
|
||||
"Maintenance_Tool_del_allevents30_noti_text": "",
|
||||
"Maintenance_Tool_del_allevents30_noti_text": "Tem a certeza de que pretende eliminar todos os Eventos com mais de 30 dias? Isto repõe a presença de todos os dispositivos.",
|
||||
"Maintenance_Tool_del_allevents30_text": "Antes de utilizar esta função, faça uma cópia de segurança. Apagar não pode ser anulado. Todos os eventos com mais que 30 dias na base de dados serão eliminados. Nesse momento, a presença de todos os dispositivos será reiniciada. Este facto pode dar origem a sessões inválidas. Isto significa que os dispositivos são apresentados como “presentes” apesar de estarem offline. Uma verificação enquanto o dispositivo em questão está online resolve o problema.",
|
||||
"Maintenance_Tool_del_allevents_noti": "Apagar eventos",
|
||||
"Maintenance_Tool_del_allevents_noti_text": "",
|
||||
"Maintenance_Tool_del_allevents_noti_text": "Tem a certeza de que pretende eliminar todos os Eventos? Isto repõe a presença de todos os dispositivos.",
|
||||
"Maintenance_Tool_del_allevents_text": "Antes de usar esta função, faça um backup. Apagar não pode ser desfeito. Todos os eventos na base de dados serão apagados. Nesse momento, a presença de todos os dispositivos será redefinida. Isto pode levar a sessões inválidas. Isto significa que os dispositivos são exibidos como \"presente\" embora estejam offline. Uma varredura enquanto o dispositivo em questão é on-line resolve o problema.",
|
||||
"Maintenance_Tool_del_empty_macs": "",
|
||||
"Maintenance_Tool_del_empty_macs_noti": "",
|
||||
"Maintenance_Tool_del_empty_macs": "Eliminar dispositivos com endereços MACs vazios",
|
||||
"Maintenance_Tool_del_empty_macs_noti": "Elimitar dispositivos",
|
||||
"Maintenance_Tool_del_empty_macs_noti_text": "Tem certeza que deseja apagar todos os dispositivos com endereços MAC vazios?<br>(talvez prefira arquivá-los)",
|
||||
"Maintenance_Tool_del_empty_macs_text": "Antes de usar esta função, faça um backup. Apagar não pode ser desfeito. Todos os dispositivos sem MAC serão apagados da base de dados.",
|
||||
"Maintenance_Tool_del_selecteddev": "Apagar dispositivos selecionados",
|
||||
"Maintenance_Tool_del_selecteddev_text": "Antes de usar esta função, faça um backup. Apagar não pode ser desfeito. Dispositivos selecionados serão apagados da base de dados.",
|
||||
"Maintenance_Tool_del_unknowndev": "",
|
||||
"Maintenance_Tool_del_unknowndev_noti": "",
|
||||
"Maintenance_Tool_del_unknowndev": "Eliminar dispositivos desconhecidos",
|
||||
"Maintenance_Tool_del_unknowndev_noti": "Eliminar dispositivos desconhecidos",
|
||||
"Maintenance_Tool_del_unknowndev_noti_text": "Tem certeza que deseja apagar todos (desconhecidos) e (nome não encontrados) dispositivos?",
|
||||
"Maintenance_Tool_del_unknowndev_text": "Antes de usar esta função, faça um backup. Apagar não pode ser desfeito. Todos os dispositivos nomeados (não conhecidos) serão apagados da base de dados.",
|
||||
"Maintenance_Tool_displayed_columns_text": "Altere a visibilidade e a ordem das colunas na página <a href=\"devices.php\"><b> <i class=\"fa fa-portátil\"></i> Dispositivos</b></a>.",
|
||||
"Maintenance_Tool_drag_me": "Arraste-me para reordenar colunas.",
|
||||
"Maintenance_Tool_order_columns_text": "",
|
||||
"Maintenance_Tool_order_columns_text": "Maintenance_Tool_order_columns_text",
|
||||
"Maintenance_Tool_purgebackup": "Limpar cópias de segurança",
|
||||
"Maintenance_Tool_purgebackup_noti": "Limpar cópias de segurança",
|
||||
"Maintenance_Tool_purgebackup_noti_text": "Tem certeza que deseja apagar todos os backups exceto os últimos 3?",
|
||||
@@ -450,13 +450,13 @@
|
||||
"Maintenance_Tool_upgrade_database_noti_text": "Tem certeza de que deseja atualizar a base de dados?<br>(talvez prefira arquivá-la)",
|
||||
"Maintenance_Tool_upgrade_database_text": "Este botão atualizará a base de dados para ativar o gráfico Atividade de rede nas últimas 12 horas. Faça uma cópia de segurança da sua base de dados em caso de problemas.",
|
||||
"Maintenance_Tools_Tab_BackupRestore": "Backup / Restauração",
|
||||
"Maintenance_Tools_Tab_Logging": "",
|
||||
"Maintenance_Tools_Tab_Logging": "Logs",
|
||||
"Maintenance_Tools_Tab_Settings": "Configurações",
|
||||
"Maintenance_Tools_Tab_Tools": "Ferramentas",
|
||||
"Maintenance_Tools_Tab_UISettings": "Configurações de interface",
|
||||
"Maintenance_arp_status": "Estado de digitalização",
|
||||
"Maintenance_arp_status_off": "está atualmente desativado",
|
||||
"Maintenance_arp_status_on": "",
|
||||
"Maintenance_arp_status_on": "Scan em curso",
|
||||
"Maintenance_built_on": "Construído em",
|
||||
"Maintenance_current_version": "Você está atualizado. Confira o que <a href=\"https://github.com/jokob-sk/NetAlertX/issues/138\" target=\"_blank\"> estou a trabalhar em</a>.",
|
||||
"Maintenance_database_backup": "Backups DB",
|
||||
@@ -467,8 +467,8 @@
|
||||
"Maintenance_database_rows": "Tabela (linhas)",
|
||||
"Maintenance_database_size": "Tamanho da base de dados",
|
||||
"Maintenance_lang_selector_apply": "Aplicar",
|
||||
"Maintenance_lang_selector_empty": "",
|
||||
"Maintenance_lang_selector_lable": "",
|
||||
"Maintenance_lang_selector_empty": "Escolha a lingua",
|
||||
"Maintenance_lang_selector_lable": "Escolha a lingua",
|
||||
"Maintenance_lang_selector_text": "A mudança ocorre no lado do cliente, por isso afeta apenas o navegador atual.",
|
||||
"Maintenance_new_version": "Uma nova versão está disponível. Confira as <a href=\"https://github.com/jokob-sk/NetAlertX/releases\" target=\"_blank\">notas de lançamento</a>.",
|
||||
"Maintenance_themeselector_apply": "Aplicar",
|
||||
@@ -476,10 +476,10 @@
|
||||
"Maintenance_themeselector_lable": "Selecionar Skin",
|
||||
"Maintenance_themeselector_text": "A mudança ocorre no lado do servidor, por isso afeta todos os dispositivos em uso.",
|
||||
"Maintenance_version": "Atualizações de apps",
|
||||
"NETWORK_DEVICE_TYPES_description": "",
|
||||
"NETWORK_DEVICE_TYPES_description": "Quais os tipos de dispositivos que podem ser usados como dispositivos de rede na vista de Rede. O tipo de dispositivo tem de corresponder exatamente à definição <code>Type</code> um dispositivo específico em Detalhes do dispositivo. Adicione-o ao dispositivo através do botão <code>+</code>. Não remova tipos existentes, apenas adicione novos.",
|
||||
"NETWORK_DEVICE_TYPES_name": "Tipos de dispositivo de rede",
|
||||
"Navigation_About": "Sobre a",
|
||||
"Navigation_AppEvents": "",
|
||||
"Navigation_AppEvents": "Eventos de aplicações",
|
||||
"Navigation_Devices": "Dispositivos",
|
||||
"Navigation_Donations": "Doações",
|
||||
"Navigation_Events": "Eventos",
|
||||
@@ -489,38 +489,38 @@
|
||||
"Navigation_Network": "Rede",
|
||||
"Navigation_Notifications": "Notificações",
|
||||
"Navigation_Plugins": "Plugins",
|
||||
"Navigation_Presence": "",
|
||||
"Navigation_Report": "",
|
||||
"Navigation_Settings": "",
|
||||
"Navigation_SystemInfo": "",
|
||||
"Navigation_Workflows": "",
|
||||
"Network_Assign": "",
|
||||
"Network_Cant_Assign": "",
|
||||
"Network_Cant_Assign_No_Node_Selected": "",
|
||||
"Network_Configuration_Error": "",
|
||||
"Network_Connected": "",
|
||||
"Network_Devices": "",
|
||||
"Network_ManageAdd": "",
|
||||
"Network_ManageAdd_Name": "",
|
||||
"Network_ManageAdd_Name_text": "",
|
||||
"Network_ManageAdd_Port": "",
|
||||
"Network_ManageAdd_Port_text": "",
|
||||
"Network_ManageAdd_Submit": "",
|
||||
"Network_ManageAdd_Type": "",
|
||||
"Network_ManageAdd_Type_text": "",
|
||||
"Network_ManageAssign": "",
|
||||
"Network_ManageDel": "",
|
||||
"Network_ManageDel_Name": "",
|
||||
"Network_ManageDel_Name_text": "",
|
||||
"Network_ManageDel_Submit": "",
|
||||
"Network_ManageDevices": "",
|
||||
"Network_ManageEdit": "",
|
||||
"Network_ManageEdit_ID": "",
|
||||
"Network_ManageEdit_ID_text": "",
|
||||
"Network_ManageEdit_Name": "",
|
||||
"Network_ManageEdit_Name_text": "",
|
||||
"Network_ManageEdit_Port": "",
|
||||
"Network_ManageEdit_Port_text": "",
|
||||
"Navigation_Presence": "Presença",
|
||||
"Navigation_Report": "Reports enviados",
|
||||
"Navigation_Settings": "Definições",
|
||||
"Navigation_SystemInfo": "Informação de sistema",
|
||||
"Navigation_Workflows": "Workflows",
|
||||
"Network_Assign": "Conectar ao nodo de network <i class=\"fa fa-server\"></i> em cima",
|
||||
"Network_Cant_Assign": "Não é possível atribuir o node raiz da Internet como um node folha filho.",
|
||||
"Network_Cant_Assign_No_Node_Selected": "Não é possível atribuir, nenhum node pai selecionado.",
|
||||
"Network_Configuration_Error": "Erro de configuração",
|
||||
"Network_Connected": "Dispositivos conectados",
|
||||
"Network_Devices": "Dispositivos de rede",
|
||||
"Network_ManageAdd": "Adicionar dispositivo",
|
||||
"Network_ManageAdd_Name": "Nome do dispositivo",
|
||||
"Network_ManageAdd_Name_text": "Nome sem caracteres especiais",
|
||||
"Network_ManageAdd_Port": "Contagem de portas",
|
||||
"Network_ManageAdd_Port_text": "Deixe em branco para Wi-Fi e Powerline",
|
||||
"Network_ManageAdd_Submit": "Adicionar dispositivo",
|
||||
"Network_ManageAdd_Type": "Tipo de dispositivo",
|
||||
"Network_ManageAdd_Type_text": "-- Selecionar Tipo --",
|
||||
"Network_ManageAssign": "Asignar",
|
||||
"Network_ManageDel": "Eliminar dispositivo",
|
||||
"Network_ManageDel_Name": "Dispositivo a eliminar",
|
||||
"Network_ManageDel_Name_text": "-- Seleciona dispositivo --",
|
||||
"Network_ManageDel_Submit": "Eliminar",
|
||||
"Network_ManageDevices": "Gerir dispositivos",
|
||||
"Network_ManageEdit": "Actualizar dispositivos",
|
||||
"Network_ManageEdit_ID": "Dispositivos a actualizar",
|
||||
"Network_ManageEdit_ID_text": "-- Selecionar dispositivo para edição --",
|
||||
"Network_ManageEdit_Name": "Novo nome de dispositivo",
|
||||
"Network_ManageEdit_Name_text": "Nome sem caracteres especiais",
|
||||
"Network_ManageEdit_Port": " Nova contagem de portas",
|
||||
"Network_ManageEdit_Port_text": "Deixe em branco para Wi-Fi e Powerline.",
|
||||
"Network_ManageEdit_Submit": "",
|
||||
"Network_ManageEdit_Type": "",
|
||||
"Network_ManageEdit_Type_text": "",
|
||||
|
||||
@@ -36,12 +36,7 @@ def main():
|
||||
|
||||
# Check if basic config settings supplied
|
||||
if check_config() is False:
|
||||
mylog(
|
||||
"none",
|
||||
[
|
||||
f"[{pluginName}] ⚠ ERROR: Publisher notification gateway not set up correctly. Check your {confFileName} {pluginName}_* variables."
|
||||
],
|
||||
)
|
||||
mylog("none", f"[{pluginName}] ⚠ ERROR: Publisher notification gateway not set up correctly. Check your {confFileName} {pluginName}_* variables.")
|
||||
return
|
||||
|
||||
# Create a database connection
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
#!/usr/bin/env python3
|
||||
import conf
|
||||
from const import confFileName, logPath
|
||||
from const import logPath
|
||||
from pytz import timezone
|
||||
|
||||
import os
|
||||
@@ -36,11 +36,7 @@ def main():
|
||||
|
||||
# Check if basic config settings supplied
|
||||
if not validate_config():
|
||||
mylog(
|
||||
"none",
|
||||
f"[{pluginName}] ⚠ ERROR: Publisher notification gateway not set up correctly. "
|
||||
f"Check your {confFileName} {pluginName}_* variables.",
|
||||
)
|
||||
mylog("none", f"[{pluginName}] ⚠ ERROR: Publisher not set up correctly. Check your {pluginName}_* variables.",)
|
||||
return
|
||||
|
||||
# Create a database connection
|
||||
|
||||
@@ -138,10 +138,7 @@ def execute_arpscan(userSubnets):
|
||||
mylog("verbose", [f"[{pluginName}] All devices List len:", len(devices_list)])
|
||||
mylog("verbose", [f"[{pluginName}] Devices List:", devices_list])
|
||||
|
||||
mylog(
|
||||
"verbose",
|
||||
[f"[{pluginName}] Found: Devices without duplicates ", len(unique_devices)],
|
||||
)
|
||||
mylog("verbose", [f"[{pluginName}] Found: Devices without duplicates ", len(unique_devices)],)
|
||||
|
||||
return unique_devices
|
||||
|
||||
@@ -174,10 +171,7 @@ def execute_arpscan_on_interface(interface):
|
||||
except subprocess.CalledProcessError:
|
||||
result = ""
|
||||
except subprocess.TimeoutExpired:
|
||||
mylog(
|
||||
"warning",
|
||||
[f"[{pluginName}] arp-scan timed out after {timeout_seconds}s"],
|
||||
)
|
||||
mylog("warning", [f"[{pluginName}] arp-scan timed out after {timeout_seconds}s"],)
|
||||
result = ""
|
||||
# stop looping if duration not set or expired
|
||||
if scan_duration == 0 or (time.time() - start_time) > scan_duration:
|
||||
|
||||
@@ -33,10 +33,7 @@ def main():
|
||||
|
||||
device_data = get_device_data()
|
||||
|
||||
mylog(
|
||||
"verbose",
|
||||
[f"[{pluginName}] Found '{len(device_data)}' devices"],
|
||||
)
|
||||
mylog("verbose", f"[{pluginName}] Found '{len(device_data)}' devices")
|
||||
|
||||
filtered_devices = [
|
||||
(key, device)
|
||||
@@ -44,10 +41,7 @@ def main():
|
||||
if device.state == ConnectionState.CONNECTED
|
||||
]
|
||||
|
||||
mylog(
|
||||
"verbose",
|
||||
[f"[{pluginName}] Processing '{len(filtered_devices)}' connected devices"],
|
||||
)
|
||||
mylog("verbose", f"[{pluginName}] Processing '{len(filtered_devices)}' connected devices")
|
||||
|
||||
for mac, device in filtered_devices:
|
||||
entry_mac = str(device.description.mac).lower()
|
||||
|
||||
@@ -75,10 +75,7 @@ def cleanup_database(
|
||||
|
||||
# -----------------------------------------------------
|
||||
# Cleanup Online History
|
||||
mylog(
|
||||
"verbose",
|
||||
[f"[{pluginName}] Online_History: Delete all but keep latest 150 entries"],
|
||||
)
|
||||
mylog("verbose", [f"[{pluginName}] Online_History: Delete all but keep latest 150 entries"],)
|
||||
cursor.execute(
|
||||
"""DELETE from Online_History where "Index" not in (
|
||||
SELECT "Index" from Online_History
|
||||
@@ -87,24 +84,14 @@ def cleanup_database(
|
||||
|
||||
# -----------------------------------------------------
|
||||
# Cleanup Events
|
||||
mylog(
|
||||
"verbose",
|
||||
[
|
||||
f"[{pluginName}] Events: Delete all older than {str(DAYS_TO_KEEP_EVENTS)} days (DAYS_TO_KEEP_EVENTS setting)"
|
||||
],
|
||||
)
|
||||
mylog("verbose", f"[{pluginName}] Events: Delete all older than {str(DAYS_TO_KEEP_EVENTS)} days (DAYS_TO_KEEP_EVENTS setting)")
|
||||
cursor.execute(
|
||||
f"""DELETE FROM Events
|
||||
WHERE eve_DateTime <= date('now', '-{str(DAYS_TO_KEEP_EVENTS)} day')"""
|
||||
)
|
||||
# -----------------------------------------------------
|
||||
# Trim Plugins_History entries to less than PLUGINS_KEEP_HIST setting per unique "Plugin" column entry
|
||||
mylog(
|
||||
"verbose",
|
||||
[
|
||||
f"[{pluginName}] Plugins_History: Trim Plugins_History entries to less than {str(PLUGINS_KEEP_HIST)} per Plugin (PLUGINS_KEEP_HIST setting)"
|
||||
],
|
||||
)
|
||||
mylog("verbose", f"[{pluginName}] Plugins_History: Trim Plugins_History entries to less than {str(PLUGINS_KEEP_HIST)} per Plugin (PLUGINS_KEEP_HIST setting)")
|
||||
|
||||
# Build the SQL query to delete entries that exceed the limit per unique "Plugin" column entry
|
||||
delete_query = f"""DELETE FROM Plugins_History
|
||||
@@ -125,12 +112,7 @@ def cleanup_database(
|
||||
|
||||
histCount = get_setting_value("DBCLNP_NOTIFI_HIST")
|
||||
|
||||
mylog(
|
||||
"verbose",
|
||||
[
|
||||
f"[{pluginName}] Plugins_History: Trim Notifications entries to less than {histCount}"
|
||||
],
|
||||
)
|
||||
mylog("verbose", f"[{pluginName}] Plugins_History: Trim Notifications entries to less than {histCount}")
|
||||
|
||||
# Build the SQL query to delete entries
|
||||
delete_query = f"""DELETE FROM Notifications
|
||||
@@ -170,12 +152,7 @@ def cleanup_database(
|
||||
# -----------------------------------------------------
|
||||
# Cleanup New Devices
|
||||
if HRS_TO_KEEP_NEWDEV != 0:
|
||||
mylog(
|
||||
"verbose",
|
||||
[
|
||||
f"[{pluginName}] Devices: Delete all New Devices older than {str(HRS_TO_KEEP_NEWDEV)} hours (HRS_TO_KEEP_NEWDEV setting)"
|
||||
],
|
||||
)
|
||||
mylog("verbose", f"[{pluginName}] Devices: Delete all New Devices older than {str(HRS_TO_KEEP_NEWDEV)} hours (HRS_TO_KEEP_NEWDEV setting)")
|
||||
query = f"""DELETE FROM Devices WHERE devIsNew = 1 AND devFirstConnection < date('now', '-{str(HRS_TO_KEEP_NEWDEV)} hour')"""
|
||||
mylog("verbose", [f"[{pluginName}] Query: {query} "])
|
||||
cursor.execute(query)
|
||||
@@ -183,12 +160,7 @@ def cleanup_database(
|
||||
# -----------------------------------------------------
|
||||
# Cleanup Offline Devices
|
||||
if HRS_TO_KEEP_OFFDEV != 0:
|
||||
mylog(
|
||||
"verbose",
|
||||
[
|
||||
f"[{pluginName}] Devices: Delete all New Devices older than {str(HRS_TO_KEEP_OFFDEV)} hours (HRS_TO_KEEP_OFFDEV setting)"
|
||||
],
|
||||
)
|
||||
mylog("verbose", f"[{pluginName}] Devices: Delete all New Devices older than {str(HRS_TO_KEEP_OFFDEV)} hours (HRS_TO_KEEP_OFFDEV setting)")
|
||||
query = f"""DELETE FROM Devices WHERE devPresentLastScan = 0 AND devLastConnection < date('now', '-{str(HRS_TO_KEEP_OFFDEV)} hour')"""
|
||||
mylog("verbose", [f"[{pluginName}] Query: {query} "])
|
||||
cursor.execute(query)
|
||||
@@ -196,12 +168,7 @@ def cleanup_database(
|
||||
# -----------------------------------------------------
|
||||
# Clear New Flag
|
||||
if CLEAR_NEW_FLAG != 0:
|
||||
mylog(
|
||||
"verbose",
|
||||
[
|
||||
f'[{pluginName}] Devices: Clear "New Device" flag for all devices older than {str(CLEAR_NEW_FLAG)} hours (CLEAR_NEW_FLAG setting)'
|
||||
],
|
||||
)
|
||||
mylog("verbose", f'[{pluginName}] Devices: Clear "New Device" flag for all devices older than {str(CLEAR_NEW_FLAG)} hours (CLEAR_NEW_FLAG setting)')
|
||||
query = f"""UPDATE Devices SET devIsNew = 0 WHERE devIsNew = 1 AND date(devFirstConnection, '+{str(CLEAR_NEW_FLAG)} hour') < date('now')"""
|
||||
# select * from Devices where devIsNew = 1 AND date(devFirstConnection, '+3 hour' ) < date('now')
|
||||
mylog("verbose", [f"[{pluginName}] Query: {query} "])
|
||||
|
||||
@@ -71,10 +71,7 @@ def get_entries(plugin_objects: Plugin_Objects) -> Plugin_Objects:
|
||||
status = lease.get('status')
|
||||
device_name = comment or host_name or "(unknown)"
|
||||
|
||||
mylog(
|
||||
'verbose',
|
||||
[f"ID: {lease_id}, Address: {address}, MAC Address: {mac_address}, Host Name: {host_name}, Comment: {comment}, Last Seen: {last_seen}, Status: {status}"]
|
||||
)
|
||||
mylog('verbose', f"ID: {lease_id}, Address: {address}, MAC: {mac_address}, Host Name: {host_name}, Comment: {comment}, Last Seen: {last_seen}, Status: {status}")
|
||||
|
||||
if (status == "bound"):
|
||||
plugin_objects.add_object(
|
||||
|
||||
@@ -24,7 +24,7 @@ apt-get install sudo -y
|
||||
apt-get install -y git
|
||||
|
||||
# Clean the directory
|
||||
rm -R $INSTALL_DIR/
|
||||
rm -R ${INSTALL_DIR:?}/
|
||||
|
||||
# Clone the application repository
|
||||
git clone https://github.com/jokob-sk/NetAlertX "$INSTALL_DIR/"
|
||||
|
||||
@@ -34,6 +34,8 @@ sudo phpenmod -v 8.2 sqlite3
|
||||
# setup virtual python environment so we can use pip3 to install packages
|
||||
apt-get install python3-venv -y
|
||||
python3 -m venv /opt/venv
|
||||
# Shell check doesn't recognize source command because it's not in the repo, it is in the system at runtime
|
||||
# shellcheck disable=SC1091
|
||||
source /opt/venv/bin/activate
|
||||
|
||||
update-alternatives --install /usr/bin/python python /usr/bin/python3 10
|
||||
|
||||
@@ -175,6 +175,8 @@ nginx -t || { echo "[INSTALL] nginx config test failed"; exit 1; }
|
||||
# sudo systemctl restart nginx
|
||||
|
||||
# Activate the virtual python environment
|
||||
# Shell check doesn't recognize source command because it's not in the repo, it is in the system at runtime
|
||||
# shellcheck disable=SC1091
|
||||
source /opt/venv/bin/activate
|
||||
|
||||
echo "[INSTALL] 🚀 Starting app - navigate to your <server IP>:${PORT}"
|
||||
|
||||
5
install/production-filesystem/build/init-cron.sh
Normal file
@@ -0,0 +1,5 @@
|
||||
#!/bin/bash
|
||||
|
||||
echo "Initializing cron..."
|
||||
# Placeholder for cron initialization commands
|
||||
echo "cron initialized."
|
||||
@@ -1,4 +0,0 @@
|
||||
#!/bin/bash
|
||||
echo "Initializing crond..."
|
||||
#Future crond initializations can go here.
|
||||
echo "crond initialized."
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/bin/bash
|
||||
echo "Initializing nginx..."
|
||||
install -d -o netalertx -g netalertx -m 700 ${SYSTEM_SERVICES_RUN_TMP}/client_body;
|
||||
install -d -o netalertx -g netalertx -m 700 "${SYSTEM_SERVICES_RUN_TMP}/client_body";
|
||||
echo "nginx initialized."
|
||||
@@ -51,12 +51,13 @@ if [ "$(id -u)" -eq 0 ]; then
|
||||
EOF
|
||||
>&2 printf "%s" "${RESET}"
|
||||
|
||||
# Set ownership to netalertx user for all read-write paths
|
||||
chown -R netalertx ${READ_WRITE_PATHS} 2>/dev/null || true
|
||||
|
||||
# Set directory and file permissions for all read-write paths
|
||||
find ${READ_WRITE_PATHS} -type d -exec chmod u+rwx {} \;
|
||||
find ${READ_WRITE_PATHS} -type f -exec chmod u+rw {} \;
|
||||
# Set ownership and permissions for each read-write path individually
|
||||
printf '%s\n' "${READ_WRITE_PATHS}" | while IFS= read -r path; do
|
||||
[ -n "${path}" ] || continue
|
||||
chown -R netalertx "${path}" 2>/dev/null || true
|
||||
find "${path}" -type d -exec chmod u+rwx {} \;
|
||||
find "${path}" -type f -exec chmod u+rw {} \;
|
||||
done
|
||||
echo Permissions fixed for read-write paths. Please restart the container as user 20211.
|
||||
sleep infinity & wait $!
|
||||
fi
|
||||
|
||||
@@ -16,11 +16,11 @@ LEGACY_DB=/app/db
|
||||
MARKER_NAME=.migration
|
||||
|
||||
is_mounted() {
|
||||
local path="$1"
|
||||
if [ ! -d "${path}" ]; then
|
||||
my_path="$1"
|
||||
if [ ! -d "${my_path}" ]; then
|
||||
return 1
|
||||
fi
|
||||
mountpoint -q "${path}" 2>/dev/null
|
||||
mountpoint -q "${my_path}" 2>/dev/null
|
||||
}
|
||||
|
||||
warn_unmount_legacy() {
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
# first-run-check.sh - Checks and initializes configuration files on first run
|
||||
|
||||
# Check for app.conf and deploy if required
|
||||
if [ ! -f ${NETALERTX_CONFIG}/app.conf ]; then
|
||||
if [ ! -f "${NETALERTX_CONFIG}/app.conf" ]; then
|
||||
mkdir -p "${NETALERTX_CONFIG}" || {
|
||||
>&2 echo "ERROR: Failed to create config directory ${NETALERTX_CONFIG}"
|
||||
exit 1
|
||||
|
||||
@@ -441,7 +441,9 @@ CREATE TRIGGER "trg_delete_devices"
|
||||
END;
|
||||
end-of-database-schema
|
||||
|
||||
if [ $? -ne 0 ]; then
|
||||
database_creation_status=$?
|
||||
|
||||
if [ $database_creation_status -ne 0 ]; then
|
||||
RED=$(printf '\033[1;31m')
|
||||
RESET=$(printf '\033[0m')
|
||||
>&2 printf "%s" "${RED}"
|
||||
|
||||
@@ -50,7 +50,7 @@ fi
|
||||
RED='\033[1;31m'
|
||||
GREY='\033[90m'
|
||||
RESET='\033[0m'
|
||||
printf "${RED}"
|
||||
printf "%s" "${RED}"
|
||||
echo '
|
||||
_ _ _ ___ _ _ __ __
|
||||
| \ | | | | / _ \| | | | \ \ / /
|
||||
@@ -60,7 +60,7 @@ echo '
|
||||
\_| \_/\___|\__\_| |_/_|\___|_| \__\/ \/
|
||||
'
|
||||
|
||||
printf "\033[0m"
|
||||
printf "%s" "${RESET}"
|
||||
echo ' Network intruder and presence detector.
|
||||
https://netalertx.com
|
||||
|
||||
@@ -69,7 +69,7 @@ set -u
|
||||
|
||||
FAILED_STATUS=""
|
||||
echo "Startup pre-checks"
|
||||
for script in ${ENTRYPOINT_CHECKS}/*; do
|
||||
for script in "${ENTRYPOINT_CHECKS}"/*; do
|
||||
if [ -n "${SKIP_TESTS:-}" ]; then
|
||||
echo "Skipping startup checks as SKIP_TESTS is set."
|
||||
break
|
||||
@@ -77,7 +77,7 @@ for script in ${ENTRYPOINT_CHECKS}/*; do
|
||||
script_name=$(basename "$script" | sed 's/^[0-9]*-//;s/\.(sh|py)$//;s/-/ /g')
|
||||
echo "--> ${script_name} "
|
||||
if [ -n "${SKIP_STARTUP_CHECKS:-}" ] && echo "${SKIP_STARTUP_CHECKS}" | grep -q "\b${script_name}\b"; then
|
||||
printf "${GREY}skip${RESET}\n"
|
||||
printf "%sskip%s\n" "${GREY}" "${RESET}"
|
||||
continue
|
||||
fi
|
||||
|
||||
@@ -134,7 +134,7 @@ fi
|
||||
|
||||
# Update vendor data (MAC address OUI database) in the background
|
||||
# This happens concurrently with service startup to avoid blocking container readiness
|
||||
bash ${SYSTEM_SERVICES_SCRIPTS}/update_vendors.sh &
|
||||
bash "${SYSTEM_SERVICES_SCRIPTS}/update_vendors.sh" &
|
||||
|
||||
|
||||
|
||||
@@ -274,7 +274,7 @@ trap on_signal INT TERM
|
||||
# Only start crond scheduler on Alpine (non-Debian) environments
|
||||
# Debian typically uses systemd or other schedulers
|
||||
if [ "${ENVIRONMENT:-}" ] && [ "${ENVIRONMENT:-}" != "debian" ]; then
|
||||
add_service "/services/start-crond.sh" "crond"
|
||||
add_service "/services/start-cron.sh" "supercronic"
|
||||
fi
|
||||
|
||||
# Start core frontend and backend services
|
||||
@@ -290,8 +290,6 @@ add_service "${SYSTEM_SERVICES}/start-backend.sh" "python3"
|
||||
# Useful for devcontainer debugging where individual services need to be debugged
|
||||
if [ "${NETALERTX_DEBUG:-0}" -eq 1 ]; then
|
||||
echo "NETALERTX_DEBUG is set to 1, will not shut down other services if one fails."
|
||||
wait
|
||||
exit $?
|
||||
fi
|
||||
|
||||
################################################################################
|
||||
@@ -316,10 +314,25 @@ while [ -n "${SERVICES}" ]; do
|
||||
if ! is_pid_active "${pid}"; then
|
||||
wait "${pid}" 2>/dev/null
|
||||
status=$?
|
||||
|
||||
# Handle intentional backend restart
|
||||
if [ "${name}" = "python3" ] && [ -f "/tmp/backend_restart_pending" ]; then
|
||||
echo "🔄 Backend restart requested via marker file."
|
||||
rm -f "/tmp/backend_restart_pending"
|
||||
remove_service "${pid}"
|
||||
add_service "${SYSTEM_SERVICES}/start-backend.sh" "python3"
|
||||
continue
|
||||
fi
|
||||
|
||||
FAILED_STATUS=$status
|
||||
FAILED_NAME="${name}"
|
||||
remove_service "${pid}"
|
||||
handle_exit
|
||||
|
||||
if [ "${NETALERTX_DEBUG:-0}" -eq 1 ]; then
|
||||
echo "⚠️ Service ${name} exited with status ${status}. Debug mode active - continuing."
|
||||
else
|
||||
handle_exit
|
||||
fi
|
||||
fi
|
||||
|
||||
done
|
||||
|
||||
0
install/production-filesystem/services/config/crond/netalertx → install/production-filesystem/services/config/cron/crontab
Executable file → Normal file
@@ -21,10 +21,10 @@ log_success() {
|
||||
}
|
||||
|
||||
# 1. Check if crond is running
|
||||
if pgrep -f "crond" > /dev/null; then
|
||||
log_success "crond is running"
|
||||
if pgrep -f "supercronic" > /dev/null; then
|
||||
log_success "supercronic is running"
|
||||
else
|
||||
log_error "crond is not running"
|
||||
log_error "supercronic is not running"
|
||||
fi
|
||||
|
||||
# 2. Check if php-fpm is running
|
||||
|
||||
@@ -5,12 +5,15 @@ export INSTALL_DIR=/app
|
||||
|
||||
# Check if there are any entries with cron_restart_backend
|
||||
if grep -q "cron_restart_backend" "${LOG_EXECUTION_QUEUE}"; then
|
||||
killall python3
|
||||
sleep 2
|
||||
/services/start-backend.sh &
|
||||
echo "$(date): Restarting backend triggered by cron_restart_backend"
|
||||
|
||||
# Create marker for entrypoint.sh to restart the service instead of killing the container
|
||||
touch /tmp/backend_restart_pending
|
||||
|
||||
killall python3 || echo "killall python3 failed or no process found"
|
||||
|
||||
# Remove all lines containing cron_restart_backend from the log file
|
||||
# Atomic replacement with temp file
|
||||
grep -v "cron_restart_backend" "${LOG_EXECUTION_QUEUE}" > "${LOG_EXECUTION_QUEUE}.tmp" && \
|
||||
mv "${LOG_EXECUTION_QUEUE}.tmp" "${LOG_EXECUTION_QUEUE}"
|
||||
grep -v "cron_restart_backend" "${LOG_EXECUTION_QUEUE}" > "${LOG_EXECUTION_QUEUE}.tmp"
|
||||
mv "${LOG_EXECUTION_QUEUE}.tmp" "${LOG_EXECUTION_QUEUE}"
|
||||
fi
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
cd "${NETALERTX_APP}" || exit 1
|
||||
max_attempts=50 # 10 seconds total (50 * 0.2s)
|
||||
attempt=0
|
||||
while ps ax | grep -v grep | grep -q python3 && [ $attempt -lt $max_attempts ]; do
|
||||
while pgrep -x python3 >/dev/null && [ $attempt -lt $max_attempts ]; do
|
||||
killall -TERM python3 &>/dev/null
|
||||
sleep 0.2
|
||||
((attempt++))
|
||||
@@ -12,4 +12,5 @@ done
|
||||
killall -KILL python3 &>/dev/null
|
||||
|
||||
echo "Starting python3 $(cat /services/config/python/backend-extra-launch-parameters 2>/dev/null) -m server > ${NETALERTX_LOG}/stdout.log 2> >(tee ${NETALERTX_LOG}/stderr.log >&2)"
|
||||
exec python3 $(cat /services/config/python/backend-extra-launch-parameters 2>/dev/null) -m server > ${NETALERTX_LOG}/stdout.log 2> >(tee ${NETALERTX_LOG}/stderr.log >&2)
|
||||
read -ra EXTRA_PARAMS < <(cat /services/config/python/backend-extra-launch-parameters 2>/dev/null)
|
||||
exec python3 "${EXTRA_PARAMS[@]}" -m server > "${NETALERTX_LOG}/stdout.log" 2> >(tee "${NETALERTX_LOG}/stderr.log" >&2)
|
||||
|
||||
42
install/production-filesystem/services/start-cron.sh
Executable file
@@ -0,0 +1,42 @@
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
|
||||
|
||||
crond_pid=""
|
||||
|
||||
# Called externally, but shellcheck does not see that and claims it is unused.
|
||||
# shellcheck disable=SC2329,SC2317
|
||||
cleanup() {
|
||||
status=$?
|
||||
echo "Supercronic stopped! (exit ${status})"
|
||||
}
|
||||
|
||||
# Called externally, but shellcheck does not see that and claims it is unused.
|
||||
# shellcheck disable=SC2329,SC2317
|
||||
forward_signal() {
|
||||
if [[ -n "${crond_pid}" ]]; then
|
||||
kill -TERM "${crond_pid}" 2>/dev/null || true
|
||||
fi
|
||||
}
|
||||
|
||||
while pgrep -x crond >/dev/null 2>&1; do
|
||||
killall crond &>/dev/null
|
||||
sleep 0.2
|
||||
done
|
||||
|
||||
trap cleanup EXIT
|
||||
trap forward_signal INT TERM
|
||||
|
||||
CRON_OPTS="--quiet"
|
||||
if [ "${NETALERTX_DEBUG:-0}" -eq 1 ]; then
|
||||
CRON_OPTS="--debug"
|
||||
fi
|
||||
|
||||
echo "Starting supercronic ${CRON_OPTS} \"${SYSTEM_SERVICES_CONFIG_CRON}/crontab\" >>\"${LOG_CRON}\" 2>&1 &"
|
||||
|
||||
supercronic ${CRON_OPTS} "${SYSTEM_SERVICES_CONFIG_CRON}/crontab" >>"${LOG_CRON}" 2>&1 &
|
||||
crond_pid=$!
|
||||
|
||||
wait "${crond_pid}"; status=$?
|
||||
echo -ne " done"
|
||||
exit ${status}
|
||||
@@ -1,33 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
|
||||
|
||||
crond_pid=""
|
||||
|
||||
cleanup() {
|
||||
status=$?
|
||||
echo "Crond stopped! (exit ${status})"
|
||||
}
|
||||
|
||||
forward_signal() {
|
||||
if [[ -n "${crond_pid}" ]]; then
|
||||
kill -TERM "${crond_pid}" 2>/dev/null || true
|
||||
fi
|
||||
}
|
||||
|
||||
while ps ax | grep -v -e grep -e '.sh' | grep crond >/dev/null 2>&1; do
|
||||
killall crond &>/dev/null
|
||||
sleep 0.2
|
||||
done
|
||||
|
||||
trap cleanup EXIT
|
||||
trap forward_signal INT TERM
|
||||
|
||||
echo "Starting /usr/sbin/crond -c \"${SYSTEM_SERVICES_CROND}\" -f -L \"${LOG_CROND}\" >>\"${LOG_CROND}\" 2>&1 &"
|
||||
|
||||
/usr/sbin/crond -c "${SYSTEM_SERVICES_CROND}" -f -L "${LOG_CROND}" >>"${LOG_CROND}" 2>&1 &
|
||||
crond_pid=$!
|
||||
|
||||
wait "${crond_pid}"; status=$?
|
||||
echo -ne " done"
|
||||
exit ${status}
|
||||
@@ -11,11 +11,15 @@ mkdir -p "${LOG_DIR}" "${RUN_DIR}" "${TMP_DIR}"
|
||||
|
||||
nginx_pid=""
|
||||
|
||||
# Called externally, but shellcheck does not see that and claims it is unused.
|
||||
# shellcheck disable=SC2329,SC2317
|
||||
cleanup() {
|
||||
status=$?
|
||||
echo "nginx stopped! (exit ${status})"
|
||||
}
|
||||
|
||||
# Called externally, but shellcheck does not see that and claims it is unused.
|
||||
# shellcheck disable=SC2329,SC2317
|
||||
forward_signal() {
|
||||
if [[ -n "${nginx_pid}" ]]; then
|
||||
kill -TERM "${nginx_pid}" 2>/dev/null || true
|
||||
@@ -24,12 +28,15 @@ forward_signal() {
|
||||
|
||||
|
||||
# When in devcontainer we must kill any existing nginx processes
|
||||
while ps ax | grep -v -e "grep" -e "nginx.sh" | grep nginx >/dev/null 2>&1; do
|
||||
while pgrep -x nginx >/dev/null 2>&1; do
|
||||
killall nginx &>/dev/null || true
|
||||
sleep 0.2
|
||||
done
|
||||
|
||||
TEMP_CONFIG_FILE=$(mktemp "${TMP_DIR}/netalertx.conf.XXXXXX")
|
||||
|
||||
# Shell check doesn't recognize envsubst variables
|
||||
# shellcheck disable=SC2016
|
||||
if envsubst '${LISTEN_ADDR} ${PORT}' < "${SYSTEM_NGINX_CONFIG_TEMPLATE}" > "${TEMP_CONFIG_FILE}" 2>/dev/null; then
|
||||
mv "${TEMP_CONFIG_FILE}" "${SYSTEM_SERVICES_ACTIVE_CONFIG_FILE}"
|
||||
else
|
||||
|
||||
@@ -3,18 +3,22 @@ set -euo pipefail
|
||||
|
||||
php_fpm_pid=""
|
||||
|
||||
# Called externally, but shellcheck does not see that and claims it is unused.
|
||||
# shellcheck disable=SC2329,SC2317
|
||||
cleanup() {
|
||||
status=$?
|
||||
echo "php-fpm stopped! (exit ${status})"
|
||||
}
|
||||
|
||||
# Called externally, but shellcheck does not see that and claims it is unused.
|
||||
# shellcheck disable=SC2329,SC2317
|
||||
forward_signal() {
|
||||
if [[ -n "${php_fpm_pid}" ]]; then
|
||||
kill -TERM "${php_fpm_pid}" 2>/dev/null || true
|
||||
fi
|
||||
}
|
||||
|
||||
while ps ax | grep -v grep | grep php-fpm83 >/dev/null; do
|
||||
while pgrep -x php-fpm83 >/dev/null; do
|
||||
killall php-fpm83 &>/dev/null
|
||||
sleep 0.2
|
||||
done
|
||||
@@ -27,5 +31,6 @@ echo "Starting /usr/sbin/php-fpm83 -y \"${PHP_FPM_CONFIG_FILE}\" -F >>\"${LOG_AP
|
||||
php_fpm_pid=$!
|
||||
|
||||
wait "${php_fpm_pid}"
|
||||
exit_status=$?
|
||||
echo -ne " done"
|
||||
exit $?
|
||||
exit $exit_status
|
||||
@@ -127,6 +127,8 @@ apt-get install -y --no-install-recommends \
|
||||
ca-certificates lsb-release curl gnupg
|
||||
|
||||
# Detect OS
|
||||
# Shell check doesn't recognize source command because it's not in the repo, it is in the system at runtime
|
||||
# shellcheck disable=SC1091
|
||||
. /etc/os-release
|
||||
OS_ID="${ID:-}"
|
||||
OS_VER="${VERSION_ID:-}"
|
||||
@@ -203,6 +205,8 @@ printf "%b\n" "-----------------------------------------------------------------
|
||||
printf "%b\n" "${GREEN}[INSTALLING] ${RESET}Setting up Python environment"
|
||||
printf "%b\n" "--------------------------------------------------------------------------"
|
||||
python3 -m venv /opt/myenv
|
||||
# Shell check doesn't recognize source command because it's not in the repo, it is in the system at runtime
|
||||
# shellcheck disable=SC1091
|
||||
source /opt/myenv/bin/activate
|
||||
python -m pip install --upgrade pip
|
||||
python -m pip install -r "${INSTALLER_DIR}/requirements.txt"
|
||||
|
||||
@@ -22,7 +22,6 @@ NGINX_CONF_FILE=netalertx.conf
|
||||
WEB_UI_DIR=/var/www/html/netalertx
|
||||
NGINX_CONFIG_FILE=/etc/nginx/conf.d/$NGINX_CONF_FILE
|
||||
OUI_FILE="/usr/share/arp-scan/ieee-oui.txt" # Define the path to ieee-oui.txt and ieee-iab.txt
|
||||
SCRIPT_DIR="$(cd -- "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
FILEDB=${INSTALL_DIR}/db/${DB_FILE}
|
||||
PHPVERSION="8.3"
|
||||
VENV_DIR="/opt/netalertx-python"
|
||||
@@ -106,7 +105,7 @@ if [ -d "${INSTALL_DIR}" ]; then
|
||||
if [ "$1" == "install" ] || [ "$1" == "update" ] || [ "$1" == "start" ]; then
|
||||
confirmation=$1
|
||||
else
|
||||
read -p "Enter your choice: " confirmation
|
||||
read -rp "Enter your choice: " confirmation
|
||||
fi
|
||||
if [ "$confirmation" == "install" ]; then
|
||||
# Ensure INSTALL_DIR is safe to wipe
|
||||
@@ -118,7 +117,7 @@ if [ -d "${INSTALL_DIR}" ]; then
|
||||
mountpoint -q "${INSTALL_DIR}/front" && umount "${INSTALL_DIR}/front" 2>/dev/null
|
||||
|
||||
# Remove all contents safely
|
||||
rm -rf -- "${INSTALL_DIR}"/* "${INSTALL_DIR}"/.[!.]* "${INSTALL_DIR}"/..?* 2>/dev/null
|
||||
rm -rf -- "${INSTALL_DIR:?}"/* "${INSTALL_DIR}"/.[!.]* "${INSTALL_DIR}"/..?* 2>/dev/null
|
||||
|
||||
# Re-clone repository
|
||||
git clone "${GITHUB_REPO}" "${INSTALL_DIR}/"
|
||||
@@ -152,6 +151,8 @@ echo "---------------------------------------------------------"
|
||||
echo
|
||||
# update-alternatives --install /usr/bin/python python /usr/bin/python3 10
|
||||
python3 -m venv "${VENV_DIR}"
|
||||
# Shell check doesn't recognize source command because it's not in the repo, it is in the system at runtime
|
||||
# shellcheck disable=SC1091
|
||||
source "${VENV_DIR}/bin/activate"
|
||||
|
||||
if [[ ! -f "${REQUIREMENTS_FILE}" ]]; then
|
||||
|
||||
15
mkdocs.yml
@@ -63,14 +63,15 @@ nav:
|
||||
- Icons: ICONS.md
|
||||
- Network Topology: NETWORK_TREE.md
|
||||
- Troubleshooting:
|
||||
- General Tips: DEBUG_TIPS.md
|
||||
- Common Issues: COMMON_ISSUES.md
|
||||
- Inspecting Logs: LOGGING.md
|
||||
- Debugging Tips: DEBUG_TIPS.md
|
||||
- Debugging GraphQL: DEBUG_GRAPHQL.md
|
||||
- Debugging Invalid JSON: DEBUG_INVALID_JSON.md
|
||||
- Debugging PHP: DEBUG_PHP.md
|
||||
- Debugging Plugins: DEBUG_PLUGINS.md
|
||||
- Debugging Web UI Port: WEB_UI_PORT_DEBUG.md
|
||||
- Debugging Workflows: WORKFLOWS_DEBUGGING.md
|
||||
- API Server Issues: DEBUG_API_SERVER.md
|
||||
- Invalid JSON Issues: DEBUG_INVALID_JSON.md
|
||||
- PHP Issues: DEBUG_PHP.md
|
||||
- Plugin Issues: DEBUG_PLUGINS.md
|
||||
- Web UI Port Issues: WEB_UI_PORT_DEBUG.md
|
||||
- Workflows Issues: WORKFLOWS_DEBUGGING.md
|
||||
- Development:
|
||||
- Plugin and app development:
|
||||
- Environment Setup: DEV_ENV_SETUP.md
|
||||
|
||||
@@ -4,10 +4,10 @@
|
||||
NETALERTX_DB_FILE=${NETALERTX_DB:-/data/db}/app.db
|
||||
|
||||
#remove the old database
|
||||
rm ${NETALERTX_DB_FILE}
|
||||
rm "${NETALERTX_DB_FILE}"
|
||||
|
||||
# Write schema to text to app.db file until we see "end-of-database-schema"
|
||||
cat << end-of-database-schema > ${NETALERTX_DB_FILE}.sql
|
||||
cat << end-of-database-schema > "${NETALERTX_DB_FILE}.sql"
|
||||
CREATE TABLE sqlite_stat1(tbl,idx,stat);
|
||||
CREATE TABLE Events (eve_MAC STRING (50) NOT NULL COLLATE NOCASE, eve_IP STRING (50) NOT NULL COLLATE NOCASE, eve_DateTime DATETIME NOT NULL, eve_EventType STRING (30) NOT NULL COLLATE NOCASE, eve_AdditionalInfo STRING (250) DEFAULT (''), eve_PendingAlertEmail BOOLEAN NOT NULL CHECK (eve_PendingAlertEmail IN (0, 1)) DEFAULT (1), eve_PairEventRowid INTEGER);
|
||||
CREATE TABLE Sessions (ses_MAC STRING (50) COLLATE NOCASE, ses_IP STRING (50) COLLATE NOCASE, ses_EventTypeConnection STRING (30) COLLATE NOCASE, ses_DateTimeConnection DATETIME, ses_EventTypeDisconnection STRING (30) COLLATE NOCASE, ses_DateTimeDisconnection DATETIME, ses_StillConnected BOOLEAN, ses_AdditionalInfo STRING (250));
|
||||
@@ -421,4 +421,4 @@ CREATE TRIGGER "trg_delete_devices"
|
||||
end-of-database-schema
|
||||
|
||||
# Import the database schema into the new database file
|
||||
sqlite3 ${NETALERTX_DB_FILE} < ${NETALERTX_DB_FILE}.sql
|
||||
sqlite3 "${NETALERTX_DB_FILE}" < "${NETALERTX_DB_FILE}.sql"
|
||||
|
||||
@@ -16,4 +16,4 @@ for p in $PORTS; do
|
||||
done
|
||||
|
||||
# Show any other NetAlertX-related listeners (nginx, php-fpm, python backend)
|
||||
ss -ltnp 2>/dev/null | egrep 'nginx|php-fpm|python' || true
|
||||
ss -ltnp 2>/dev/null | grep -e 'nginx\|php-fpm\|python' || true
|
||||
|
||||
@@ -63,9 +63,7 @@ main structure of NetAlertX
|
||||
|
||||
|
||||
def main():
|
||||
mylog(
|
||||
"none", ["[MAIN] Setting up ..."]
|
||||
) # has to be level 'none' as user config not loaded yet
|
||||
mylog("none", ["[MAIN] Setting up ..."]) # has to be level 'none' as user config not loaded yet
|
||||
|
||||
mylog("none", [f"[conf.tz] Setting up ...{conf.tz}"])
|
||||
|
||||
@@ -221,22 +219,14 @@ def main():
|
||||
# Fetch new unprocessed events
|
||||
new_events = workflow_manager.get_new_app_events()
|
||||
|
||||
mylog(
|
||||
"debug",
|
||||
[
|
||||
f"[MAIN] Processing WORKFLOW new_events from get_new_app_events: {len(new_events)}"
|
||||
],
|
||||
)
|
||||
mylog("debug", [f"[MAIN] Processing WORKFLOW new_events from get_new_app_events: {len(new_events)}"],)
|
||||
|
||||
# Process each new event and check triggers
|
||||
if len(new_events) > 0:
|
||||
updateState("Workflows: Start")
|
||||
update_api_flag = False
|
||||
for event in new_events:
|
||||
mylog(
|
||||
"debug",
|
||||
[f"[MAIN] Processing WORKFLOW app event with GUID {event['GUID']}"],
|
||||
)
|
||||
mylog("debug", [f"[MAIN] Processing WORKFLOW app event with GUID {event['GUID']}"],)
|
||||
|
||||
# proceed to process events
|
||||
workflow_manager.process_event(event)
|
||||
@@ -253,12 +243,7 @@ def main():
|
||||
# check if devices list needs updating
|
||||
userUpdatedDevices = UserEventsQueueInstance().has_update_devices()
|
||||
|
||||
mylog(
|
||||
"debug",
|
||||
[
|
||||
f"[Plugins] Should I update API (userUpdatedDevices): {userUpdatedDevices}"
|
||||
],
|
||||
)
|
||||
mylog("debug", [f"[Plugins] Should I update API (userUpdatedDevices): {userUpdatedDevices}"],)
|
||||
|
||||
if userUpdatedDevices:
|
||||
update_api(db, all_plugins, True, ["devices"], userUpdatedDevices)
|
||||
|
||||
@@ -96,16 +96,9 @@ def update_api(
|
||||
) # Ensure port is an integer
|
||||
start_server(graphql_port_value, app_state) # Start the server
|
||||
except ValueError:
|
||||
mylog(
|
||||
"none",
|
||||
[
|
||||
f"[API] Invalid GRAPHQL_PORT value, must be an integer: {graphql_port_value}"
|
||||
],
|
||||
)
|
||||
mylog("none", [f"[API] Invalid GRAPHQL_PORT value, must be an integer: {graphql_port_value}"],)
|
||||
else:
|
||||
mylog(
|
||||
"none", ["[API] GRAPHQL_PORT or API_TOKEN is not set, will try later."]
|
||||
)
|
||||
mylog("none", ["[API] GRAPHQL_PORT or API_TOKEN is not set, will try later."])
|
||||
|
||||
|
||||
# -------------------------------------------------------------------------------
|
||||
@@ -135,12 +128,7 @@ class api_endpoint_class:
|
||||
# Match SQL and API endpoint path
|
||||
if endpoint.query == self.query and endpoint.path == self.path:
|
||||
found = True
|
||||
mylog(
|
||||
"trace",
|
||||
[
|
||||
f"[API] api_endpoint_class: Hashes (file|old|new): ({self.fileName}|{endpoint.hash}|{self.hash})"
|
||||
],
|
||||
)
|
||||
mylog("trace", [f"[API] api_endpoint_class: Hashes (file|old|new): ({self.fileName}|{endpoint.hash}|{self.hash})"],)
|
||||
if endpoint.hash != self.hash:
|
||||
self.needsUpdate = True
|
||||
# Only update changeDetectedWhen if it hasn't been set recently
|
||||
@@ -190,10 +178,7 @@ class api_endpoint_class:
|
||||
)
|
||||
)
|
||||
):
|
||||
mylog(
|
||||
"debug",
|
||||
[f"[API] api_endpoint_class: Writing {self.fileName} after debounce."],
|
||||
)
|
||||
mylog("debug", [f"[API] api_endpoint_class: Writing {self.fileName} after debounce."],)
|
||||
|
||||
write_file(self.path, json.dumps(self.jsonData))
|
||||
|
||||
|
||||
@@ -173,13 +173,8 @@ class Query(ObjectType):
|
||||
network_dev_types = get_setting_value("NETWORK_DEVICE_TYPES")
|
||||
|
||||
mylog("trace", f"[graphql_schema] allowed_statuses: {allowed_statuses}")
|
||||
mylog(
|
||||
"trace",
|
||||
f"[graphql_schema] hidden_relationships: {hidden_relationships}",
|
||||
)
|
||||
mylog(
|
||||
"trace", f"[graphql_schema] network_dev_types: {network_dev_types}"
|
||||
)
|
||||
mylog("trace", f"[graphql_schema] hidden_relationships: {hidden_relationships}",)
|
||||
mylog("trace", f"[graphql_schema] network_dev_types: {network_dev_types}")
|
||||
|
||||
# Filtering based on the "status"
|
||||
if status == "my_devices":
|
||||
|
||||
@@ -71,9 +71,7 @@ class app_state_class:
|
||||
with open(stateFile, "r") as json_file:
|
||||
previousState = json.load(json_file)
|
||||
except json.decoder.JSONDecodeError as e:
|
||||
mylog(
|
||||
"none", [f"[app_state_class] Failed to handle app_state.json: {e}"]
|
||||
)
|
||||
mylog("none", [f"[app_state_class] Failed to handle app_state.json: {e}"])
|
||||
|
||||
# Check if the file exists and recover previous values
|
||||
if previousState != "":
|
||||
@@ -151,10 +149,7 @@ class app_state_class:
|
||||
with open(stateFile, "w") as json_file:
|
||||
json_file.write(json_data)
|
||||
except (TypeError, ValueError) as e:
|
||||
mylog(
|
||||
"none",
|
||||
[f"[app_state_class] Failed to serialize object to JSON: {e}"],
|
||||
)
|
||||
mylog("none", [f"[app_state_class] Failed to serialize object to JSON: {e}"],)
|
||||
|
||||
return
|
||||
|
||||
|
||||
@@ -233,15 +233,7 @@ class DB:
|
||||
rows = self.sql.fetchall()
|
||||
return rows
|
||||
except AssertionError:
|
||||
mylog(
|
||||
"minimal",
|
||||
[
|
||||
"[Database] - ERROR: inconsistent query and/or arguments.",
|
||||
query,
|
||||
" params: ",
|
||||
args,
|
||||
],
|
||||
)
|
||||
mylog("minimal", ["[Database] - ERROR: inconsistent query and/or arguments.", query, " params: ", args,],)
|
||||
except sqlite3.Error as e:
|
||||
mylog("minimal", ["[Database] - SQL ERROR: ", e])
|
||||
return None
|
||||
@@ -258,15 +250,7 @@ class DB:
|
||||
if len(rows) == 1:
|
||||
return rows[0]
|
||||
if len(rows) > 1:
|
||||
mylog(
|
||||
"verbose",
|
||||
[
|
||||
"[Database] - Warning!: query returns multiple rows, only first row is passed on!",
|
||||
query,
|
||||
" params: ",
|
||||
args,
|
||||
],
|
||||
)
|
||||
mylog("verbose", ["[Database] - Warning!: query returns multiple rows, only first row is passed on!", query, " params: ", args,],)
|
||||
return rows[0]
|
||||
# empty result set
|
||||
return None
|
||||
|
||||
@@ -88,10 +88,7 @@ def ensure_column(sql, table: str, column_name: str, column_type: str) -> bool:
|
||||
mylog("none", [msg])
|
||||
|
||||
# Add missing column
|
||||
mylog(
|
||||
"verbose",
|
||||
[f"[db_upgrade] Adding '{column_name}' ({column_type}) to {table} table"],
|
||||
)
|
||||
mylog("verbose", [f"[db_upgrade] Adding '{column_name}' ({column_type}) to {table} table"],)
|
||||
sql.execute(f'ALTER TABLE "{table}" ADD "{column_name}" {column_type}')
|
||||
return True
|
||||
|
||||
|
||||
@@ -586,16 +586,11 @@ class SafeConditionBuilder:
|
||||
|
||||
# Validate each component
|
||||
if not self._validate_column_name(column):
|
||||
mylog(
|
||||
"verbose", [f"[SafeConditionBuilder] Invalid column: {column}"]
|
||||
)
|
||||
mylog("verbose", [f"[SafeConditionBuilder] Invalid column: {column}"])
|
||||
return "", {}
|
||||
|
||||
if not self._validate_operator(operator):
|
||||
mylog(
|
||||
"verbose",
|
||||
[f"[SafeConditionBuilder] Invalid operator: {operator}"],
|
||||
)
|
||||
mylog("verbose", [f"[SafeConditionBuilder] Invalid operator: {operator}"])
|
||||
return "", {}
|
||||
|
||||
# Create parameter binding
|
||||
@@ -607,10 +602,7 @@ class SafeConditionBuilder:
|
||||
condition_parts.append(condition_part)
|
||||
|
||||
except Exception as e:
|
||||
mylog(
|
||||
"verbose",
|
||||
[f"[SafeConditionBuilder] Error processing condition: {e}"],
|
||||
)
|
||||
mylog("verbose", [f"[SafeConditionBuilder] Error processing condition: {e}"],)
|
||||
return "", {}
|
||||
|
||||
if not condition_parts:
|
||||
@@ -644,10 +636,7 @@ class SafeConditionBuilder:
|
||||
if event_type in self.ALLOWED_EVENT_TYPES:
|
||||
valid_types.append(event_type)
|
||||
else:
|
||||
mylog(
|
||||
"verbose",
|
||||
f"[SafeConditionBuilder] Invalid event type filtered out: {event_type}",
|
||||
)
|
||||
mylog("verbose", f"[SafeConditionBuilder] Invalid event type filtered out: {event_type}",)
|
||||
|
||||
if not valid_types:
|
||||
return "", {}
|
||||
@@ -682,10 +671,7 @@ class SafeConditionBuilder:
|
||||
return self.build_safe_condition(condition_setting)
|
||||
except ValueError as e:
|
||||
# Log the error and return empty condition for safety
|
||||
mylog(
|
||||
"verbose",
|
||||
f"[SafeConditionBuilder] Unsafe condition rejected: {condition_setting}, Error: {e}",
|
||||
)
|
||||
mylog("verbose", f"[SafeConditionBuilder] Unsafe condition rejected: {condition_setting}, Error: {e}",)
|
||||
return "", {}
|
||||
|
||||
|
||||
|
||||
@@ -36,12 +36,7 @@ def checkPermissionsOK():
|
||||
dbW_access = os.access(fullDbPath, os.W_OK)
|
||||
|
||||
mylog("none", ["\n"])
|
||||
mylog(
|
||||
"none",
|
||||
[
|
||||
"The backend restarted (started). If this is unexpected check https://bit.ly/NetAlertX_debug for troubleshooting tips."
|
||||
],
|
||||
)
|
||||
mylog("none", "The backend restarted (started). If this is unexpected check https://bit.ly/NetAlertX_debug for troubleshooting tips.")
|
||||
mylog("none", ["\n"])
|
||||
mylog("none", ["Permissions check (All should be True)"])
|
||||
mylog("none", ["------------------------------------------------"])
|
||||
@@ -59,12 +54,7 @@ def checkPermissionsOK():
|
||||
def initialiseFile(pathToCheck, defaultFile):
|
||||
# if file not readable (missing?) try to copy over the backed-up (default) one
|
||||
if str(os.access(pathToCheck, os.R_OK)) == "False":
|
||||
mylog(
|
||||
"none",
|
||||
[
|
||||
"[Setup] (" + pathToCheck + ") file is not readable or missing. Trying to copy over the default one."
|
||||
],
|
||||
)
|
||||
mylog("none", ["[Setup] (" + pathToCheck + ") file is not readable or missing. Trying to copy over the default one."],)
|
||||
try:
|
||||
# try runnning a subprocess
|
||||
p = subprocess.Popen(
|
||||
@@ -75,31 +65,16 @@ def initialiseFile(pathToCheck, defaultFile):
|
||||
stdout, stderr = p.communicate()
|
||||
|
||||
if str(os.access(pathToCheck, os.R_OK)) == "False":
|
||||
mylog(
|
||||
"none",
|
||||
[
|
||||
"[Setup] ⚠ ERROR copying (" + defaultFile + ") to (" + pathToCheck + "). Make sure the app has Read & Write access to the parent directory."
|
||||
],
|
||||
)
|
||||
mylog("none", "[Setup] ⚠ ERROR copying (" + defaultFile + ") to (" + pathToCheck + "). Ensure Read & Write access to the parent directory.")
|
||||
else:
|
||||
mylog(
|
||||
"none",
|
||||
[
|
||||
"[Setup] (" + defaultFile + ") copied over successfully to (" + pathToCheck + ")."
|
||||
],
|
||||
)
|
||||
mylog("none", ["[Setup] (" + defaultFile + ") copied over successfully to (" + pathToCheck + ")."],)
|
||||
|
||||
# write stdout and stderr into .log files for debugging if needed
|
||||
logResult(stdout, stderr) # TO-DO should be changed to mylog
|
||||
|
||||
except subprocess.CalledProcessError as e:
|
||||
# An error occured, handle it
|
||||
mylog(
|
||||
"none",
|
||||
[
|
||||
"[Setup] ⚠ ERROR copying (" + defaultFile + "). Make sure the app has Read & Write access to " + pathToCheck
|
||||
],
|
||||
)
|
||||
mylog("none", ["[Setup] ⚠ ERROR copying (" + defaultFile + "). Make sure the app has Read & Write access to " + pathToCheck],)
|
||||
mylog("none", [e.output])
|
||||
|
||||
|
||||
@@ -187,14 +162,7 @@ def get_setting(key):
|
||||
mylog("none", [f"[Settings] ⚠ File not found: {settingsFile}"])
|
||||
return None
|
||||
|
||||
mylog(
|
||||
"trace",
|
||||
[
|
||||
"[Import table_settings.json] checking table_settings.json file",
|
||||
f"SETTINGS_LASTCACHEDATE: {SETTINGS_LASTCACHEDATE}",
|
||||
f"fileModifiedTime: {fileModifiedTime}",
|
||||
],
|
||||
)
|
||||
mylog("trace", f"[Import table_settings.json] checking table_settings.json file SETTINGS_LASTCACHEDATE: {SETTINGS_LASTCACHEDATE} fileModifiedTime: {fileModifiedTime}")
|
||||
|
||||
# Use cache if file hasn't changed
|
||||
if fileModifiedTime == SETTINGS_LASTCACHEDATE and SETTINGS_CACHE:
|
||||
@@ -221,10 +189,7 @@ def get_setting(key):
|
||||
SETTINGS_LASTCACHEDATE = fileModifiedTime
|
||||
|
||||
if key not in SETTINGS_CACHE:
|
||||
mylog(
|
||||
"none",
|
||||
[f"[Settings] ⚠ ERROR - setting_missing - {key} not in {settingsFile}"],
|
||||
)
|
||||
mylog("none", [f"[Settings] ⚠ ERROR - setting_missing - {key} not in {settingsFile}"],)
|
||||
return None
|
||||
|
||||
return SETTINGS_CACHE[key]
|
||||
@@ -357,10 +322,7 @@ def setting_value_to_python_type(set_type, set_value):
|
||||
value = json.loads(set_value.replace("'", "\""))
|
||||
|
||||
except json.JSONDecodeError as e:
|
||||
mylog(
|
||||
"none",
|
||||
[f"[setting_value_to_python_type] Error decoding JSON object: {e}"],
|
||||
)
|
||||
mylog("none", [f"[setting_value_to_python_type] Error decoding JSON object: {e}"],)
|
||||
mylog("none", [set_value])
|
||||
value = []
|
||||
|
||||
@@ -375,10 +337,7 @@ def setting_value_to_python_type(set_type, set_value):
|
||||
try:
|
||||
value = reverseTransformers(json.loads(set_value), transformers)
|
||||
except json.JSONDecodeError as e:
|
||||
mylog(
|
||||
"none",
|
||||
[f"[setting_value_to_python_type] Error decoding JSON object: {e}"],
|
||||
)
|
||||
mylog("none", [f"[setting_value_to_python_type] Error decoding JSON object: {e}"],)
|
||||
mylog("none", [{set_value}])
|
||||
value = {}
|
||||
|
||||
@@ -766,9 +725,7 @@ def checkNewVersion():
|
||||
try:
|
||||
data = json.loads(text)
|
||||
except json.JSONDecodeError:
|
||||
mylog(
|
||||
"minimal", ["[Version check] ⚠ ERROR: Invalid JSON response from GitHub."]
|
||||
)
|
||||
mylog("minimal", ["[Version check] ⚠ ERROR: Invalid JSON response from GitHub."])
|
||||
return False
|
||||
|
||||
# make sure we received a valid response and not an API rate limit exceeded message
|
||||
@@ -784,10 +741,7 @@ def checkNewVersion():
|
||||
else:
|
||||
mylog("none", ["[Version check] Running the latest version."])
|
||||
else:
|
||||
mylog(
|
||||
"minimal",
|
||||
["[Version check] ⚠ ERROR: Received unexpected response from GitHub."],
|
||||
)
|
||||
mylog("minimal", ["[Version check] ⚠ ERROR: Received unexpected response from GitHub."],)
|
||||
|
||||
return False
|
||||
|
||||
|
||||
@@ -180,10 +180,7 @@ def importConfigs(pm, db, all_plugins):
|
||||
fileModifiedTime = os.path.getmtime(config_file)
|
||||
|
||||
mylog("debug", ["[Import Config] checking config file "])
|
||||
mylog(
|
||||
"debug",
|
||||
["[Import Config] lastImportedConfFile :", conf.lastImportedConfFile],
|
||||
)
|
||||
mylog("debug", ["[Import Config] lastImportedConfFile :", conf.lastImportedConfFile],)
|
||||
mylog("debug", ["[Import Config] fileModifiedTime :", fileModifiedTime])
|
||||
|
||||
if (fileModifiedTime == conf.lastImportedConfFile) and all_plugins is not None:
|
||||
@@ -399,12 +396,7 @@ def importConfigs(pm, db, all_plugins):
|
||||
conf.TIMEZONE = ccd(
|
||||
"TIMEZONE", conf.tz, c_d, "_KEEP_", "_KEEP_", "[]", "General"
|
||||
)
|
||||
mylog(
|
||||
"none",
|
||||
[
|
||||
f"[Config] Invalid timezone '{conf.TIMEZONE}', defaulting to {default_tz}."
|
||||
],
|
||||
)
|
||||
mylog("none", [f"[Config] Invalid timezone '{conf.TIMEZONE}', defaulting to {default_tz}."],)
|
||||
|
||||
# TODO cleanup later ----------------------------------------------------------------------------------
|
||||
# init all time values as we have timezone - all this shoudl be moved into plugin/plugin settings
|
||||
@@ -450,13 +442,7 @@ def importConfigs(pm, db, all_plugins):
|
||||
|
||||
all_plugins = get_plugins_configs(conf.DISCOVER_PLUGINS)
|
||||
|
||||
mylog(
|
||||
"none",
|
||||
[
|
||||
"[Config] Plugins: Number of all plugins (including not loaded): ",
|
||||
len(all_plugins),
|
||||
],
|
||||
)
|
||||
mylog("none", ["[Config] Plugins: Number of all plugins (including not loaded): ", len(all_plugins),],)
|
||||
|
||||
plugin_indexes_to_remove = []
|
||||
all_plugins_prefixes = [] # to init the LOADED_PLUGINS setting with correct options
|
||||
@@ -580,9 +566,7 @@ def importConfigs(pm, db, all_plugins):
|
||||
"General",
|
||||
)
|
||||
|
||||
mylog(
|
||||
"none", ["[Config] Number of Plugins to load: ", len(loaded_plugins_prefixes)]
|
||||
)
|
||||
mylog("none", ["[Config] Number of Plugins to load: ", len(loaded_plugins_prefixes)])
|
||||
mylog("none", ["[Config] Plugins to load: ", loaded_plugins_prefixes])
|
||||
|
||||
conf.plugins_once_run = False
|
||||
@@ -606,12 +590,7 @@ def importConfigs(pm, db, all_plugins):
|
||||
|
||||
# Log the value being passed
|
||||
# ccd(key, default, config_dir, name, inputtype, options, group, events=None, desc="", setJsonMetadata=None, overrideTemplate=None, forceDefault=False)
|
||||
mylog(
|
||||
"verbose",
|
||||
[
|
||||
f"[Config] Setting override {setting_name} with value: {value}"
|
||||
],
|
||||
)
|
||||
mylog("verbose", [f"[Config] Setting override {setting_name} with value: {value}"],)
|
||||
ccd(
|
||||
setting_name,
|
||||
value,
|
||||
@@ -630,12 +609,7 @@ def importConfigs(pm, db, all_plugins):
|
||||
)
|
||||
|
||||
except json.JSONDecodeError:
|
||||
mylog(
|
||||
"none",
|
||||
[
|
||||
f"[Config] [ERROR] Setting override decoding JSON from {app_conf_override_path}"
|
||||
],
|
||||
)
|
||||
mylog("none", [f"[Config] [ERROR] Setting override decoding JSON from {app_conf_override_path}"],)
|
||||
else:
|
||||
mylog("debug", [f"[Config] File {app_conf_override_path} does not exist."])
|
||||
|
||||
@@ -777,10 +751,7 @@ def renameSettings(config_file):
|
||||
timestamp = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
|
||||
backup_file = f"{config_file}_old_setting_names_{timestamp}.bak"
|
||||
|
||||
mylog(
|
||||
"debug",
|
||||
f"[Config] Old setting names will be replaced and a backup ({backup_file}) of the config created.",
|
||||
)
|
||||
mylog("debug", f"[Config] Old setting names will be replaced and a backup ({backup_file}) of the config created.",)
|
||||
|
||||
shutil.copy(str(config_file), backup_file) # Convert config_file to a string
|
||||
|
||||
@@ -807,6 +778,4 @@ def renameSettings(config_file):
|
||||
) # Convert config_file to a string
|
||||
|
||||
else:
|
||||
mylog(
|
||||
"debug", "[Config] No old setting names found in the file. No changes made."
|
||||
)
|
||||
mylog("debug", "[Config] No old setting names found in the file. No changes made.")
|
||||
|
||||
@@ -119,10 +119,7 @@ def remove_old(keepNumberOfEntries):
|
||||
try:
|
||||
with open(NOTIFICATION_API_FILE, "w") as file:
|
||||
json.dump(trimmed, file, indent=4)
|
||||
mylog(
|
||||
"verbose",
|
||||
f"[Notification] Trimmed notifications to latest {keepNumberOfEntries}",
|
||||
)
|
||||
mylog("verbose", f"[Notification] Trimmed notifications to latest {keepNumberOfEntries}",)
|
||||
except Exception as e:
|
||||
mylog("none", f"Error writing trimmed notifications file: {e}")
|
||||
|
||||
|
||||
@@ -295,9 +295,7 @@ class NotificationInstance:
|
||||
(f"-{minutes} minutes", tz_offset),
|
||||
)
|
||||
|
||||
mylog(
|
||||
"minimal", ["[Notification] Notifications changes: ", self.db.sql.rowcount]
|
||||
)
|
||||
mylog("minimal", ["[Notification] Notifications changes: ", self.db.sql.rowcount])
|
||||
|
||||
# clear plugin events
|
||||
self.clearPluginEvents()
|
||||
|
||||
@@ -31,10 +31,7 @@ class UserEventsQueueInstance:
|
||||
Returns an empty list if the file doesn't exist.
|
||||
"""
|
||||
if not os.path.exists(self.log_file):
|
||||
mylog(
|
||||
"none",
|
||||
["[UserEventsQueueInstance] Log file not found: ", self.log_file],
|
||||
)
|
||||
mylog("none", ["[UserEventsQueueInstance] Log file not found: ", self.log_file],)
|
||||
return [] # No log file, return empty list
|
||||
with open(self.log_file, "r") as file:
|
||||
return file.readlines()
|
||||
|
||||
@@ -123,9 +123,7 @@ def update_devices_data_from_scan(db):
|
||||
)""")
|
||||
|
||||
# Update only devices with empty or NULL devParentMAC
|
||||
mylog(
|
||||
"debug", "[Update Devices] - (if not empty) cur_NetworkNodeMAC -> devParentMAC"
|
||||
)
|
||||
mylog("debug", "[Update Devices] - (if not empty) cur_NetworkNodeMAC -> devParentMAC")
|
||||
sql.execute("""UPDATE Devices
|
||||
SET devParentMAC = (
|
||||
SELECT cur_NetworkNodeMAC
|
||||
@@ -144,10 +142,7 @@ def update_devices_data_from_scan(db):
|
||||
""")
|
||||
|
||||
# Update only devices with empty or NULL devSite
|
||||
mylog(
|
||||
"debug",
|
||||
"[Update Devices] - (if not empty) cur_NetworkSite -> (if empty) devSite",
|
||||
)
|
||||
mylog("debug", "[Update Devices] - (if not empty) cur_NetworkSite -> (if empty) devSite",)
|
||||
sql.execute("""UPDATE Devices
|
||||
SET devSite = (
|
||||
SELECT cur_NetworkSite
|
||||
@@ -325,9 +320,7 @@ def save_scanned_devices(db):
|
||||
.strip()
|
||||
)
|
||||
|
||||
mylog(
|
||||
"debug", ["[Save Devices] Saving this IP into the CurrentScan table:", local_ip]
|
||||
)
|
||||
mylog("debug", ["[Save Devices] Saving this IP into the CurrentScan table:", local_ip])
|
||||
|
||||
if check_IP_format(local_ip) == "":
|
||||
local_ip = "0.0.0.0"
|
||||
@@ -361,23 +354,12 @@ def print_scan_stats(db):
|
||||
sql.execute(query)
|
||||
stats = sql.fetchall()
|
||||
|
||||
mylog(
|
||||
"verbose",
|
||||
f"[Scan Stats] Devices Detected.......: {stats[0]['devices_detected']}",
|
||||
)
|
||||
mylog("verbose", f"[Scan Stats] Devices Detected.......: {stats[0]['devices_detected']}",)
|
||||
mylog("verbose", f"[Scan Stats] New Devices............: {stats[0]['new_devices']}")
|
||||
mylog("verbose", f"[Scan Stats] Down Alerts............: {stats[0]['down_alerts']}")
|
||||
mylog(
|
||||
"verbose",
|
||||
f"[Scan Stats] New Down Alerts........: {stats[0]['new_down_alerts']}",
|
||||
)
|
||||
mylog(
|
||||
"verbose",
|
||||
f"[Scan Stats] New Connections........: {stats[0]['new_connections']}",
|
||||
)
|
||||
mylog(
|
||||
"verbose", f"[Scan Stats] Disconnections.........: {stats[0]['disconnections']}"
|
||||
)
|
||||
mylog("verbose", f"[Scan Stats] New Down Alerts........: {stats[0]['new_down_alerts']}",)
|
||||
mylog("verbose", f"[Scan Stats] New Connections........: {stats[0]['new_connections']}",)
|
||||
mylog("verbose", f"[Scan Stats] Disconnections.........: {stats[0]['disconnections']}")
|
||||
mylog("verbose", f"[Scan Stats] IP Changes.............: {stats[0]['ip_changes']}")
|
||||
|
||||
# if str(stats[0]["new_devices"]) != '0':
|
||||
@@ -395,10 +377,7 @@ def print_scan_stats(db):
|
||||
row_dict = dict(row)
|
||||
mylog("trace", f" {row_dict}")
|
||||
|
||||
mylog(
|
||||
"trace",
|
||||
" ================ Events table content where eve_PendingAlertEmail = 1 ================",
|
||||
)
|
||||
mylog("trace", " ================ Events table content where eve_PendingAlertEmail = 1 ================",)
|
||||
sql.execute("select * from Events where eve_PendingAlertEmail = 1")
|
||||
rows = sql.fetchall()
|
||||
for row in rows:
|
||||
@@ -654,10 +633,7 @@ def check_plugin_data_changed(pm, plugins_to_check):
|
||||
|
||||
# Continue if changes detected
|
||||
for p in plugins_changed:
|
||||
mylog(
|
||||
'debug',
|
||||
f'[check_plugin_data_changed] {p} changed (last_data_change|last_data_check): ({pm.plugin_states.get(p, {}).get("lastDataChange")}|{pm.plugin_checks.get(p)})'
|
||||
)
|
||||
mylog('debug', f'[check_plugin_data_changed] {p} changed (last_change|last_check): ({pm.plugin_states.get(p, {}).get("lastDataChange")}|{pm.plugin_checks.get(p)})')
|
||||
|
||||
return True
|
||||
|
||||
@@ -741,10 +717,7 @@ def update_devices_names(pm):
|
||||
# --- Step 1: Update device names for unknown devices ---
|
||||
unknownDevices = device_handler.getUnknown()
|
||||
if unknownDevices:
|
||||
mylog(
|
||||
"verbose",
|
||||
f"[Update Device Name] Trying to resolve devices without name. Unknown devices count: {len(unknownDevices)}",
|
||||
)
|
||||
mylog("verbose", f"[Update Device Name] Trying to resolve devices without name. Unknown devices count: {len(unknownDevices)}",)
|
||||
|
||||
# Try resolving both name and FQDN
|
||||
recordsToUpdate, recordsNotFound, fs, notFound = resolve_devices(
|
||||
@@ -752,10 +725,8 @@ def update_devices_names(pm):
|
||||
)
|
||||
|
||||
# Log summary
|
||||
mylog(
|
||||
"verbose",
|
||||
f"[Update Device Name] Names Found (DIGSCAN/AVAHISCAN/NSLOOKUP/NBTSCAN): {len(recordsToUpdate)} ({fs['DIGSCAN']}/{fs['AVAHISCAN']}/{fs['NSLOOKUP']}/{fs['NBTSCAN']})",
|
||||
)
|
||||
res_string = f"{fs['DIGSCAN']}/{fs['AVAHISCAN']}/{fs['NSLOOKUP']}/{fs['NBTSCAN']}"
|
||||
mylog("verbose", f"[Update Device Name] Names Found (DIGSCAN/AVAHISCAN/NSLOOKUP/NBTSCAN): {len(recordsToUpdate)} ({res_string})",)
|
||||
mylog("verbose", f"[Update Device Name] Names Not Found : {notFound}")
|
||||
|
||||
# Apply updates to database
|
||||
@@ -771,10 +742,7 @@ def update_devices_names(pm):
|
||||
if get_setting_value("REFRESH_FQDN"):
|
||||
allDevices = device_handler.getAll()
|
||||
if allDevices:
|
||||
mylog(
|
||||
"verbose",
|
||||
f"[Update FQDN] Trying to resolve FQDN. Devices count: {len(allDevices)}",
|
||||
)
|
||||
mylog("verbose", f"[Update FQDN] Trying to resolve FQDN. Devices count: {len(allDevices)}",)
|
||||
|
||||
# Try resolving only FQDN
|
||||
recordsToUpdate, _, fs, notFound = resolve_devices(
|
||||
@@ -782,10 +750,8 @@ def update_devices_names(pm):
|
||||
)
|
||||
|
||||
# Log summary
|
||||
mylog(
|
||||
"verbose",
|
||||
f"[Update FQDN] Names Found (DIGSCAN/AVAHISCAN/NSLOOKUP/NBTSCAN): {len(recordsToUpdate)}({fs['DIGSCAN']}/{fs['AVAHISCAN']}/{fs['NSLOOKUP']}/{fs['NBTSCAN']})",
|
||||
)
|
||||
res_string = f"{fs['DIGSCAN']}/{fs['AVAHISCAN']}/{fs['NSLOOKUP']}/{fs['NBTSCAN']}"
|
||||
mylog("verbose", f"[Update FQDN] Names Found (DIGSCAN/AVAHISCAN/NSLOOKUP/NBTSCAN): {len(recordsToUpdate)}({res_string})",)
|
||||
mylog("verbose", f"[Update FQDN] Names Not Found : {notFound}")
|
||||
|
||||
# Apply FQDN-only updates
|
||||
@@ -907,25 +873,13 @@ def query_MAC_vendor(pMAC):
|
||||
parts = line.split("\t", 1)
|
||||
if len(parts) > 1:
|
||||
vendor = parts[1].strip()
|
||||
mylog(
|
||||
"debug",
|
||||
[
|
||||
f"[Vendor Check] Found '{vendor}' for '{pMAC}' in {vendorsPath}"
|
||||
],
|
||||
)
|
||||
mylog("debug", [f"[Vendor Check] Found '{vendor}' for '{pMAC}' in {vendorsPath}"], )
|
||||
return vendor
|
||||
else:
|
||||
mylog(
|
||||
"debug",
|
||||
[
|
||||
f'[Vendor Check] ⚠ ERROR: Match found, but line could not be processed: "{line_lower}"'
|
||||
],
|
||||
)
|
||||
mylog("debug", [f'[Vendor Check] ⚠ ERROR: Match found, but line could not be processed: "{line_lower}"'],)
|
||||
return -1
|
||||
|
||||
return -1 # MAC address not found in the database
|
||||
except FileNotFoundError:
|
||||
mylog(
|
||||
"none", [f"[Vendor Check] ⚠ ERROR: Vendors file {vendorsPath} not found."]
|
||||
)
|
||||
mylog("none", [f"[Vendor Check] ⚠ ERROR: Vendors file {vendorsPath} not found."])
|
||||
return -1
|
||||
|
||||
@@ -25,10 +25,7 @@ try:
|
||||
rule["icon_base64"] = ""
|
||||
except Exception as e:
|
||||
MAC_TYPE_ICON_RULES = []
|
||||
mylog(
|
||||
"none",
|
||||
f"[guess_device_attributes] Failed to load device_heuristics_rules.json: {e}",
|
||||
)
|
||||
mylog("none", f"[guess_device_attributes] Failed to load device_heuristics_rules.json: {e}",)
|
||||
|
||||
|
||||
# -----------------------------------------
|
||||
@@ -169,10 +166,8 @@ def guess_device_attributes(
|
||||
default_icon: str,
|
||||
default_type: str,
|
||||
) -> Tuple[str, str]:
|
||||
mylog(
|
||||
"debug",
|
||||
f"[guess_device_attributes] Guessing attributes for (vendor|mac|ip|name): ('{vendor}'|'{mac}'|'{ip}'|'{name}')",
|
||||
)
|
||||
|
||||
mylog("debug", f"[guess_device_attributes] Guessing attributes for (vendor|mac|ip|name): ('{vendor}'|'{mac}'|'{ip}'|'{name}')",)
|
||||
|
||||
# --- Normalize inputs ---
|
||||
vendor = str(vendor).lower().strip() if vendor else "unknown"
|
||||
@@ -207,10 +202,7 @@ def guess_device_attributes(
|
||||
type_ = type_ or default_type
|
||||
icon = icon or default_icon
|
||||
|
||||
mylog(
|
||||
"debug",
|
||||
f"[guess_device_attributes] Guessed attributes (icon|type_): ('{icon}'|'{type_}')",
|
||||
)
|
||||
mylog("debug", f"[guess_device_attributes] Guessed attributes (icon|type_): ('{icon}'|'{type_}')",)
|
||||
return icon, type_
|
||||
|
||||
|
||||
|
||||
@@ -50,9 +50,7 @@ def process_scan(db):
|
||||
update_devices_data_from_scan(db)
|
||||
|
||||
# Pair session events (Connection / Disconnection)
|
||||
mylog(
|
||||
"verbose", "[Process Scan] Pairing session events (connection / disconnection) "
|
||||
)
|
||||
mylog("verbose", "[Process Scan] Pairing session events (connection / disconnection) ")
|
||||
pair_sessions_events(db)
|
||||
|
||||
# Sessions snapshot
|
||||
@@ -221,10 +219,7 @@ def insertOnlineHistory(db):
|
||||
VALUES (?, ?, ?, ?, ?, ?)
|
||||
"""
|
||||
|
||||
mylog(
|
||||
"debug",
|
||||
f"[Presence graph] Sql query: {insert_query} with values: {scanTimestamp}, {onlineDevices}, {downDevices}, {allDevices}, {archivedDevices}, {offlineDevices}",
|
||||
)
|
||||
mylog("debug", f"[Presence graph] Sql query: {insert_query} with values: {scanTimestamp}, {onlineDevices}, {downDevices}, {allDevices}, {archivedDevices}, {offlineDevices}",)
|
||||
|
||||
# Debug output
|
||||
print_table_schema(db, "Online_History")
|
||||
|
||||
@@ -26,12 +26,7 @@ def logEventStatusCounts(objName, pluginEvents):
|
||||
status_counts[status] = 1
|
||||
|
||||
for status, count in status_counts.items():
|
||||
mylog(
|
||||
"debug",
|
||||
[
|
||||
f'[{module_name}] In {objName} there are {count} events with the status "{status}" '
|
||||
],
|
||||
)
|
||||
mylog("debug", [f'[{module_name}] In {objName} there are {count} events with the status "{status}" '],)
|
||||
|
||||
|
||||
# -------------------------------------------------------------------------------
|
||||
@@ -100,10 +95,7 @@ def list_to_csv(arr):
|
||||
|
||||
mylog("debug", f"[{module_name}] Flattening the below array")
|
||||
mylog("debug", arr)
|
||||
mylog(
|
||||
"debug",
|
||||
f"[{module_name}] isinstance(arr, list) : {isinstance(arr, list)} | isinstance(arr, str) : {isinstance(arr, str)}",
|
||||
)
|
||||
mylog("debug", f"[{module_name}] isinstance(arr, list) : {isinstance(arr, list)} | isinstance(arr, str) : {isinstance(arr, str)}",)
|
||||
|
||||
if isinstance(arr, str):
|
||||
tmpStr = (
|
||||
@@ -227,19 +219,9 @@ def get_plugins_configs(loadAll):
|
||||
|
||||
except (FileNotFoundError, json.JSONDecodeError):
|
||||
# Handle the case when the file is not found or JSON decoding fails
|
||||
mylog(
|
||||
"none",
|
||||
[
|
||||
f"[{module_name}] ⚠ ERROR - JSONDecodeError or FileNotFoundError for file {config_path}"
|
||||
],
|
||||
)
|
||||
mylog("none", f"[{module_name}] ⚠ ERROR - JSONDecodeError or FileNotFoundError for file {config_path}")
|
||||
except Exception as e:
|
||||
mylog(
|
||||
"none",
|
||||
[
|
||||
f"[{module_name}] ⚠ ERROR - Exception for file {config_path}: {str(e)}"
|
||||
],
|
||||
)
|
||||
mylog("none", f"[{module_name}] ⚠ ERROR - Exception for file {config_path}: {str(e)}")
|
||||
|
||||
# Sort pluginsList based on "execution_order"
|
||||
pluginsListSorted = sorted(pluginsList, key=get_layer)
|
||||
@@ -285,23 +267,13 @@ def getPluginObject(keyValues):
|
||||
if all_match:
|
||||
return item
|
||||
|
||||
mylog(
|
||||
"verbose",
|
||||
[
|
||||
f"[{module_name}] 💬 INFO - Object not found {json.dumps(keyValues)} "
|
||||
],
|
||||
)
|
||||
mylog("verbose", f"[{module_name}] 💬 INFO - Object not found {json.dumps(keyValues)} ")
|
||||
|
||||
return {}
|
||||
|
||||
except (FileNotFoundError, json.JSONDecodeError, ValueError):
|
||||
# Handle the case when the file is not found, JSON decoding fails, or data is not in the expected format
|
||||
mylog(
|
||||
"verbose",
|
||||
[
|
||||
f"[{module_name}] ⚠ ERROR - JSONDecodeError or FileNotFoundError for file {plugins_objects}"
|
||||
],
|
||||
)
|
||||
mylog("verbose", f"[{module_name}] ⚠ ERROR - JSONDecodeError or FileNotFoundError for file {plugins_objects}")
|
||||
|
||||
return {}
|
||||
|
||||
|
||||
@@ -29,10 +29,7 @@ class UpdateFieldAction(Action):
|
||||
self.db = db
|
||||
|
||||
def execute(self):
|
||||
mylog(
|
||||
"verbose",
|
||||
f"[WF] Updating field '{self.field}' to '{self.value}' for event object {self.trigger.object_type}",
|
||||
)
|
||||
mylog("verbose", f"[WF] Updating field '{self.field}' to '{self.value}' for event object {self.trigger.object_type}")
|
||||
|
||||
obj = self.trigger.object
|
||||
|
||||
@@ -109,12 +106,7 @@ class RunPluginAction(Action):
|
||||
def execute(self):
|
||||
obj = self.trigger.object
|
||||
|
||||
mylog(
|
||||
"verbose",
|
||||
[
|
||||
f"Executing plugin '{self.plugin_name}' with parameters {self.params} for object {obj}"
|
||||
],
|
||||
)
|
||||
mylog("verbose", f"Executing plugin '{self.plugin_name}' with parameters {self.params} for object {obj}")
|
||||
# PluginManager.run(self.plugin_name, self.parameters)
|
||||
return obj
|
||||
|
||||
@@ -129,12 +121,7 @@ class SendNotificationAction(Action):
|
||||
|
||||
def execute(self):
|
||||
obj = self.trigger.object
|
||||
mylog(
|
||||
"verbose",
|
||||
[
|
||||
f"Sending notification via '{self.method}': {self.message} for object {obj}"
|
||||
],
|
||||
)
|
||||
mylog("verbose", f"Sending notification via '{self.method}': {self.message} for object {obj}")
|
||||
# NotificationManager.send(self.method, self.message)
|
||||
return obj
|
||||
|
||||
|
||||
@@ -52,10 +52,7 @@ class ConditionGroup:
|
||||
"""Handles condition groups with AND, OR logic, supporting nested groups."""
|
||||
|
||||
def __init__(self, group_json):
|
||||
mylog(
|
||||
"verbose",
|
||||
[f"[WF] ConditionGroup json.dumps(group_json): {json.dumps(group_json)}"],
|
||||
)
|
||||
mylog("verbose", f"[WF] ConditionGroup json.dumps(group_json): {json.dumps(group_json)}")
|
||||
|
||||
self.logic = group_json.get("logic", "AND").upper()
|
||||
self.conditions = []
|
||||
|
||||
@@ -53,21 +53,13 @@ class WorkflowManager:
|
||||
# Ensure workflow is enabled before proceeding
|
||||
if workflow.get("enabled", "No").lower() == "yes":
|
||||
wfName = workflow["name"]
|
||||
mylog(
|
||||
"debug",
|
||||
[f"[WF] Checking if '{evGuid}' triggers the workflow '{wfName}'"],
|
||||
)
|
||||
mylog("debug", f"[WF] Checking if '{evGuid}' triggers the workflow '{wfName}'")
|
||||
|
||||
# construct trigger object which also evaluates if the current event triggers it
|
||||
trigger = Trigger(workflow["trigger"], event, self.db)
|
||||
|
||||
if trigger.triggered:
|
||||
mylog(
|
||||
"verbose",
|
||||
[
|
||||
f"[WF] Event with GUID '{evGuid}' triggered the workflow '{wfName}'"
|
||||
],
|
||||
)
|
||||
mylog("verbose", f"[WF] Event with GUID '{evGuid}' triggered the workflow '{wfName}'")
|
||||
|
||||
self.execute_workflow(workflow, trigger)
|
||||
|
||||
@@ -98,12 +90,7 @@ class WorkflowManager:
|
||||
evaluator = ConditionGroup(condition_group)
|
||||
|
||||
if evaluator.evaluate(trigger): # If any group evaluates to True
|
||||
mylog(
|
||||
"none",
|
||||
[
|
||||
f"[WF] Workflow {wfName} will be executed - conditions were evaluated as TRUE"
|
||||
],
|
||||
)
|
||||
mylog("none", f"[WF] Workflow {wfName} will be executed - conditions were evaluated as TRUE")
|
||||
mylog("debug", [f"[WF] Workflow condition_group: {condition_group}"])
|
||||
|
||||
self.execute_actions(workflow["actions"], trigger)
|
||||
|
||||
@@ -24,12 +24,7 @@ class Trigger:
|
||||
self.object_type == event["ObjectType"] and self.event_type == event["AppEventType"]
|
||||
)
|
||||
|
||||
mylog(
|
||||
"debug",
|
||||
[
|
||||
f"""[WF] self.triggered '{self.triggered}' for event '{get_array_from_sql_rows(event)} and trigger {json.dumps(triggerJson)}' """
|
||||
],
|
||||
)
|
||||
mylog("debug", f"""[WF] self.triggered '{self.triggered}' for event '{get_array_from_sql_rows(event)} and trigger {json.dumps(triggerJson)}' """)
|
||||
|
||||
if self.triggered:
|
||||
# object type corresponds with the DB table name
|
||||
|
||||
@@ -11,26 +11,29 @@ echo "==========================================" >> "$LOG_FILE"
|
||||
# Function to extract comments from docker-compose file
|
||||
extract_comments() {
|
||||
local file="$1"
|
||||
echo "File: $(basename "$file")" >> "$LOG_FILE"
|
||||
echo "----------------------------------------" >> "$LOG_FILE"
|
||||
{
|
||||
|
||||
# Extract lines starting with # until we hit a non-comment line
|
||||
awk '
|
||||
/^#/ {
|
||||
# Remove the # and any leading/trailing whitespace
|
||||
comment = substr($0, 2)
|
||||
sub(/^ */, "", comment)
|
||||
sub(/ *$/, "", comment)
|
||||
if (comment != "") {
|
||||
print comment
|
||||
}
|
||||
}
|
||||
/^[^#]/ && !/^$/ {
|
||||
exit
|
||||
}
|
||||
' "$file" >> "$LOG_FILE"
|
||||
echo "File: $(basename "$file")"
|
||||
echo "----------------------------------------"
|
||||
|
||||
echo "" >> "$LOG_FILE"
|
||||
# Extract lines starting with # until we hit a non-comment line
|
||||
awk '
|
||||
/^#/ {
|
||||
# Remove the # and any leading/trailing whitespace
|
||||
comment = substr($0, 2)
|
||||
sub(/^ */, "", comment)
|
||||
sub(/ *$/, "", comment)
|
||||
if (comment != "") {
|
||||
print comment
|
||||
}
|
||||
}
|
||||
/^[^#]/ && !/^$/ {
|
||||
exit
|
||||
}
|
||||
' "$file"
|
||||
|
||||
echo ""
|
||||
} >> "$LOG_FILE"
|
||||
}
|
||||
|
||||
# Function to run docker-compose test
|
||||
@@ -40,16 +43,17 @@ run_test() {
|
||||
dirname=$(dirname "$file")
|
||||
local basename
|
||||
basename=$(basename "$file")
|
||||
|
||||
echo "Testing: $basename" >> "$LOG_FILE"
|
||||
echo "Directory: $dirname" >> "$LOG_FILE"
|
||||
echo "" >> "$LOG_FILE"
|
||||
echo "Running docker-compose up..." >> "$LOG_FILE"
|
||||
timeout 10s docker-compose -f "$file" up 2>&1 >> "$LOG_FILE"
|
||||
|
||||
{
|
||||
echo "Testing: $basename"
|
||||
echo "Directory: $dirname"
|
||||
echo ""
|
||||
echo "Running docker-compose up..."
|
||||
timeout 10s docker-compose -f "$file" up 2>&1
|
||||
} >> "$LOG_FILE"
|
||||
# Clean up
|
||||
docker-compose -f "$file" down -v 2>/dev/null || true
|
||||
docker volume prune -f 2>/dev/null || true
|
||||
}
|
||||
|
||||
find "$SCRIPT_DIR" -name "docker-compose*.yml" -type f -print0 | sort -z | while IFS= read -r -d '' file; do
|
||||
extract_comments "$file"
|
||||
|
||||
@@ -57,7 +57,7 @@ for i in $(seq 1 $WAIT_SECONDS); do
|
||||
echo "--- Services are healthy! ---"
|
||||
break
|
||||
fi
|
||||
if [ $i -eq $WAIT_SECONDS ]; then
|
||||
if [ "$i" -eq "$WAIT_SECONDS" ]; then
|
||||
echo "--- Timeout: Services did not become healthy after $WAIT_SECONDS seconds. ---"
|
||||
docker logs netalertx-test-container
|
||||
exit 1
|
||||
@@ -271,7 +271,7 @@ def create_test_scenarios() -> List[TestScenario]:
|
||||
compose_file = f"docker-compose.mount-test.{path_name}_{scenario_name}.yml"
|
||||
|
||||
# Determine expected exit code
|
||||
expected_exit_code = 1 if scenario_name == "unwritable" else 0
|
||||
expected_exit_code = 1 if expected_issues else 0
|
||||
|
||||
scenarios.append(
|
||||
TestScenario(
|
||||
|
||||