mirror of
https://github.com/jokob-sk/NetAlertX.git
synced 2025-12-07 09:36:05 -08:00
Compare commits
17 Commits
linting-fi
...
e90fbf17d3
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e90fbf17d3 | ||
|
|
139447b253 | ||
|
|
fa9fc2c8e3 | ||
|
|
30071c6848 | ||
|
|
b0bd3c8191 | ||
|
|
c753da9e15 | ||
|
|
4770ee5942 | ||
|
|
5cd53bc8f9 | ||
|
|
5e47ccc9ef | ||
|
|
f5d7c0f9a0 | ||
|
|
35b7e80be4 | ||
|
|
07eeac0a0b | ||
|
|
240d86bf1e | ||
|
|
274fd50a92 | ||
|
|
bbf49c3686 | ||
|
|
e3458630ba | ||
|
|
2f6f1e49e9 |
@@ -35,7 +35,7 @@ RUN apk add --no-cache bash shadow python3 python3-dev gcc musl-dev libffi-dev o
|
||||
# Create virtual environment owned by root, but readable by everyone else. This makes it easy to copy
|
||||
# into hardened stage without worrying about permissions and keeps image size small. Keeping the commands
|
||||
# together makes for a slightly smaller image size.
|
||||
RUN pip install -r /tmp/requirements.txt && \
|
||||
RUN pip install --no-cache-dir -r /tmp/requirements.txt && \
|
||||
chmod -R u-rwx,g-rwx /opt
|
||||
|
||||
# second stage is the main runtime stage with just the minimum required to run the application
|
||||
@@ -71,7 +71,7 @@ ENV LOG_APP_PHP_ERRORS=${NETALERTX_LOG}/app.php_errors.log
|
||||
ENV LOG_EXECUTION_QUEUE=${NETALERTX_LOG}/execution_queue.log
|
||||
ENV LOG_REPORT_OUTPUT_JSON=${NETALERTX_LOG}/report_output.json
|
||||
ENV LOG_STDOUT=${NETALERTX_LOG}/stdout.log
|
||||
ENV LOG_CROND=${NETALERTX_LOG}/crond.log
|
||||
ENV LOG_CRON=${NETALERTX_LOG}/cron.log
|
||||
ENV LOG_NGINX_ERROR=${NETALERTX_LOG}/nginx-error.log
|
||||
|
||||
# System Services configuration files
|
||||
@@ -81,11 +81,11 @@ ENV SYSTEM_SERVICES_SCRIPTS=${SYSTEM_SERVICES}/scripts
|
||||
ENV SYSTEM_SERVICES_CONFIG=${SYSTEM_SERVICES}/config
|
||||
ENV SYSTEM_NGINX_CONFIG=${SYSTEM_SERVICES_CONFIG}/nginx
|
||||
ENV SYSTEM_NGINX_CONFIG_TEMPLATE=${SYSTEM_NGINX_CONFIG}/netalertx.conf.template
|
||||
ENV SYSTEM_SERVICES_CONFIG_CRON=${SYSTEM_SERVICES_CONFIG}/cron
|
||||
ENV SYSTEM_SERVICES_ACTIVE_CONFIG=/tmp/nginx/active-config
|
||||
ENV SYSTEM_SERVICES_ACTIVE_CONFIG_FILE=${SYSTEM_SERVICES_ACTIVE_CONFIG}/nginx.conf
|
||||
ENV SYSTEM_SERVICES_PHP_FOLDER=${SYSTEM_SERVICES_CONFIG}/php
|
||||
ENV SYSTEM_SERVICES_PHP_FPM_D=${SYSTEM_SERVICES_PHP_FOLDER}/php-fpm.d
|
||||
ENV SYSTEM_SERVICES_CROND=${SYSTEM_SERVICES_CONFIG}/crond
|
||||
ENV SYSTEM_SERVICES_RUN=/tmp/run
|
||||
ENV SYSTEM_SERVICES_RUN_TMP=${SYSTEM_SERVICES_RUN}/tmp
|
||||
ENV SYSTEM_SERVICES_RUN_LOG=${SYSTEM_SERVICES_RUN}/logs
|
||||
@@ -119,7 +119,7 @@ ENV LANG=C.UTF-8
|
||||
RUN apk add --no-cache bash mtr libbsd zip lsblk tzdata curl arp-scan iproute2 iproute2-ss nmap \
|
||||
nmap-scripts traceroute nbtscan net-tools net-snmp-tools bind-tools awake ca-certificates \
|
||||
sqlite php83 php83-fpm php83-cgi php83-curl php83-sqlite3 php83-session python3 envsubst \
|
||||
nginx shadow && \
|
||||
nginx supercronic shadow && \
|
||||
rm -Rf /var/cache/apk/* && \
|
||||
rm -Rf /etc/nginx && \
|
||||
addgroup -g 20211 ${NETALERTX_GROUP} && \
|
||||
@@ -150,26 +150,26 @@ COPY --from=builder --chown=20212:20212 ${VIRTUAL_ENV} ${VIRTUAL_ENV}
|
||||
# This is done after the copy of the venv to ensure the venv is in place
|
||||
# although it may be quicker to do it before the copy, it keeps the image
|
||||
# layers smaller to do it after.
|
||||
RUN if [ -f .VERSION ]; then \
|
||||
cp .VERSION ${NETALERTX_APP}/.VERSION; \
|
||||
RUN if [ -f '.VERSION' ]; then \
|
||||
cp '.VERSION' "${NETALERTX_APP}/.VERSION"; \
|
||||
else \
|
||||
echo "DEVELOPMENT 00000000" > ${NETALERTX_APP}/.VERSION; \
|
||||
echo "DEVELOPMENT 00000000" > "${NETALERTX_APP}/.VERSION"; \
|
||||
fi && \
|
||||
chown 20212:20212 ${NETALERTX_APP}/.VERSION && \
|
||||
apk add libcap && \
|
||||
chown 20212:20212 "${NETALERTX_APP}/.VERSION" && \
|
||||
apk add --no-cache libcap && \
|
||||
setcap cap_net_raw+ep /bin/busybox && \
|
||||
setcap cap_net_raw,cap_net_admin+eip /usr/bin/nmap && \
|
||||
setcap cap_net_raw,cap_net_admin+eip /usr/bin/arp-scan && \
|
||||
setcap cap_net_raw,cap_net_admin,cap_net_bind_service+eip /usr/bin/nbtscan && \
|
||||
setcap cap_net_raw,cap_net_admin+eip /usr/bin/traceroute && \
|
||||
setcap cap_net_raw,cap_net_admin+eip $(readlink -f ${VIRTUAL_ENV_BIN}/python) && \
|
||||
setcap cap_net_raw,cap_net_admin+eip "$(readlink -f ${VIRTUAL_ENV_BIN}/python)" && \
|
||||
/bin/sh /build/init-nginx.sh && \
|
||||
/bin/sh /build/init-php-fpm.sh && \
|
||||
/bin/sh /build/init-crond.sh && \
|
||||
/bin/sh /build/init-cron.sh && \
|
||||
/bin/sh /build/init-backend.sh && \
|
||||
rm -rf /build && \
|
||||
apk del libcap && \
|
||||
date +%s > ${NETALERTX_FRONT}/buildtimestamp.txt
|
||||
date +%s > "${NETALERTX_FRONT}/buildtimestamp.txt"
|
||||
|
||||
|
||||
ENTRYPOINT ["/bin/sh","/entrypoint.sh"]
|
||||
@@ -186,13 +186,15 @@ ENV UMASK=0077
|
||||
# AI may claim this is stupid, but it's actually least possible permissions as
|
||||
# read-only user cannot login, cannot sudo, has no write permission, and cannot even
|
||||
# read the files it owns. The read-only user is ownership-as-a-lock hardening pattern.
|
||||
RUN addgroup -g 20212 ${READ_ONLY_GROUP} && \
|
||||
adduser -u 20212 -G ${READ_ONLY_GROUP} -D -h /app ${READ_ONLY_USER}
|
||||
RUN addgroup -g 20212 "${READ_ONLY_GROUP}" && \
|
||||
adduser -u 20212 -G "${READ_ONLY_GROUP}" -D -h /app "${READ_ONLY_USER}"
|
||||
|
||||
|
||||
# reduce permissions to minimum necessary for all NetAlertX files and folders
|
||||
# Permissions 005 and 004 are not typos, they enable read-only. Everyone can
|
||||
# read the read-only files, and nobody can write to them, even the readonly user.
|
||||
|
||||
# hadolint ignore=SC2114
|
||||
RUN chown -R ${READ_ONLY_USER}:${READ_ONLY_GROUP} ${READ_ONLY_FOLDERS} && \
|
||||
chmod -R 004 ${READ_ONLY_FOLDERS} && \
|
||||
find ${READ_ONLY_FOLDERS} -type d -exec chmod 005 {} + && \
|
||||
@@ -211,7 +213,7 @@ RUN chown -R ${READ_ONLY_USER}:${READ_ONLY_GROUP} ${READ_ONLY_FOLDERS} && \
|
||||
/srv /media && \
|
||||
sed -i "/^\(${READ_ONLY_USER}\|${NETALERTX_USER}\):/!d" /etc/passwd && \
|
||||
sed -i "/^\(${READ_ONLY_GROUP}\|${NETALERTX_GROUP}\):/!d" /etc/group && \
|
||||
echo -ne '#!/bin/sh\n"$@"\n' > /usr/bin/sudo && chmod +x /usr/bin/sudo
|
||||
printf '#!/bin/sh\n"$@"\n' > /usr/bin/sudo && chmod +x /usr/bin/sudo
|
||||
|
||||
USER netalertx
|
||||
|
||||
@@ -230,6 +232,7 @@ HEALTHCHECK --interval=30s --timeout=10s --start-period=60s --retries=3 \
|
||||
# Open and wide to avoid permission issues during development allowing max
|
||||
# flexibility.
|
||||
|
||||
# hadolint ignore=DL3006
|
||||
FROM runner AS netalertx-devcontainer
|
||||
ENV INSTALL_DIR=/app
|
||||
|
||||
@@ -243,9 +246,14 @@ ENV PYDEVD_DISABLE_FILE_VALIDATION=1
|
||||
COPY .devcontainer/resources/devcontainer-overlay/ /
|
||||
USER root
|
||||
# Install common tools, create user, and set up sudo
|
||||
|
||||
RUN apk add --no-cache git nano vim jq php83-pecl-xdebug py3-pip nodejs sudo gpgconf pytest \
|
||||
pytest-cov zsh alpine-zsh-config shfmt github-cli py3-yaml py3-docker-py docker-cli docker-cli-buildx \
|
||||
docker-cli-compose
|
||||
docker-cli-compose shellcheck
|
||||
|
||||
# Install hadolint (Dockerfile linter)
|
||||
RUN curl -L https://github.com/hadolint/hadolint/releases/latest/download/hadolint-Linux-x86_64 -o /usr/local/bin/hadolint && \
|
||||
chmod +x /usr/local/bin/hadolint
|
||||
|
||||
RUN install -d -o netalertx -g netalertx -m 755 /services/php/modules && \
|
||||
cp -a /usr/lib/php83/modules/. /services/php/modules/ && \
|
||||
|
||||
@@ -75,7 +75,9 @@
|
||||
"alexcvzz.vscode-sqlite",
|
||||
"mkhl.shfmt",
|
||||
"charliermarsh.ruff",
|
||||
"ms-python.flake8"
|
||||
"ms-python.flake8",
|
||||
"exiasr.hadolint",
|
||||
"timonwong.shellcheck"
|
||||
],
|
||||
"settings": {
|
||||
"terminal.integrated.cwd": "${containerWorkspaceFolder}",
|
||||
|
||||
@@ -7,6 +7,7 @@
|
||||
# Open and wide to avoid permission issues during development allowing max
|
||||
# flexibility.
|
||||
|
||||
# hadolint ignore=DL3006
|
||||
FROM runner AS netalertx-devcontainer
|
||||
ENV INSTALL_DIR=/app
|
||||
|
||||
@@ -20,9 +21,14 @@ ENV PYDEVD_DISABLE_FILE_VALIDATION=1
|
||||
COPY .devcontainer/resources/devcontainer-overlay/ /
|
||||
USER root
|
||||
# Install common tools, create user, and set up sudo
|
||||
|
||||
RUN apk add --no-cache git nano vim jq php83-pecl-xdebug py3-pip nodejs sudo gpgconf pytest \
|
||||
pytest-cov zsh alpine-zsh-config shfmt github-cli py3-yaml py3-docker-py docker-cli docker-cli-buildx \
|
||||
docker-cli-compose
|
||||
docker-cli-compose shellcheck
|
||||
|
||||
# Install hadolint (Dockerfile linter)
|
||||
RUN curl -L https://github.com/hadolint/hadolint/releases/latest/download/hadolint-Linux-x86_64 -o /usr/local/bin/hadolint && \
|
||||
chmod +x /usr/local/bin/hadolint
|
||||
|
||||
RUN install -d -o netalertx -g netalertx -m 755 /services/php/modules && \
|
||||
cp -a /usr/lib/php83/modules/. /services/php/modules/ && \
|
||||
|
||||
@@ -7,27 +7,28 @@
|
||||
# the final .devcontainer/Dockerfile used by the devcontainer.
|
||||
|
||||
echo "Generating .devcontainer/Dockerfile"
|
||||
SCRIPT_DIR="$(CDPATH= cd -- "$(dirname -- "$0")" && pwd)"
|
||||
SCRIPT_PATH=$(set -- "$0"; dirname -- "$1")
|
||||
SCRIPT_DIR=$(cd "$SCRIPT_PATH" && pwd -P)
|
||||
DEVCONTAINER_DIR="${SCRIPT_DIR%/scripts}"
|
||||
ROOT_DIR="${DEVCONTAINER_DIR%/.devcontainer}"
|
||||
|
||||
OUT_FILE="${DEVCONTAINER_DIR}/Dockerfile"
|
||||
|
||||
echo "Adding base Dockerfile from $ROOT_DIR..."
|
||||
echo "Adding base Dockerfile from $ROOT_DIR and merging to devcontainer-Dockerfile"
|
||||
{
|
||||
|
||||
echo "# DO NOT MODIFY THIS FILE DIRECTLY. IT IS AUTO-GENERATED BY .devcontainer/scripts/generate-configs.sh" > "$OUT_FILE"
|
||||
echo "" >> "$OUT_FILE"
|
||||
echo "# ---/Dockerfile---" >> "$OUT_FILE"
|
||||
echo "# DO NOT MODIFY THIS FILE DIRECTLY. IT IS AUTO-GENERATED BY .devcontainer/scripts/generate-configs.sh"
|
||||
echo ""
|
||||
echo "# ---/Dockerfile---"
|
||||
|
||||
cat "${ROOT_DIR}/Dockerfile" >> "$OUT_FILE"
|
||||
cat "${ROOT_DIR}/Dockerfile"
|
||||
|
||||
echo "" >> "$OUT_FILE"
|
||||
echo "# ---/resources/devcontainer-Dockerfile---" >> "$OUT_FILE"
|
||||
echo "" >> "$OUT_FILE"
|
||||
echo ""
|
||||
echo "# ---/resources/devcontainer-Dockerfile---"
|
||||
echo ""
|
||||
cat "${DEVCONTAINER_DIR}/resources/devcontainer-Dockerfile"
|
||||
} > "$OUT_FILE"
|
||||
|
||||
echo "Adding devcontainer-Dockerfile from $DEVCONTAINER_DIR/resources..."
|
||||
cat "${DEVCONTAINER_DIR}/resources/devcontainer-Dockerfile" >> "$OUT_FILE"
|
||||
|
||||
echo "Generated $OUT_FILE using root dir $ROOT_DIR" >&2
|
||||
echo "Generated $OUT_FILE using root dir $ROOT_DIR"
|
||||
|
||||
echo "Done."
|
||||
@@ -16,7 +16,6 @@
|
||||
|
||||
SOURCE_DIR=${SOURCE_DIR:-/workspaces/NetAlertX}
|
||||
PY_SITE_PACKAGES="${VIRTUAL_ENV:-/opt/venv}/lib/python3.12/site-packages"
|
||||
SOURCE_SERVICES_DIR="${SOURCE_DIR}/install/production-filesystem/services"
|
||||
|
||||
LOG_FILES=(
|
||||
LOG_APP
|
||||
@@ -26,7 +25,7 @@ LOG_FILES=(
|
||||
LOG_EXECUTION_QUEUE
|
||||
LOG_APP_PHP_ERRORS
|
||||
LOG_IP_CHANGES
|
||||
LOG_CROND
|
||||
LOG_CRON
|
||||
LOG_REPORT_OUTPUT_TXT
|
||||
LOG_REPORT_OUTPUT_HTML
|
||||
LOG_REPORT_OUTPUT_JSON
|
||||
|
||||
6
.github/copilot-instructions.md
vendored
6
.github/copilot-instructions.md
vendored
@@ -83,3 +83,9 @@ Backend loop phases (see `server/__main__.py` and `server/plugin.py`): `once`, `
|
||||
- Be sure to offer choices when appropriate.
|
||||
- Always understand the intent of the user's request and undo/redo as needed.
|
||||
- Above all, use the simplest possible code that meets the need so it can be easily audited and maintained.
|
||||
- Always leave logging enabled. If there is a possiblity it will be difficult to debug with current logging, add more logging.
|
||||
- Always run the testFailure tool before executing any tests to gather current failure information and avoid redundant runs.
|
||||
- Always prioritize using the appropriate tools in the environment first. As an example if a test is failing use `testFailure` then `runTests`. Never `runTests` first.
|
||||
- Docker tests take an extremely long time to run. Avoid changes to docker or tests until you've examined the exisiting testFailures and runTests results.
|
||||
- Environment tools are designed specifically for your use in this project and running them in this order will give you the best results.
|
||||
|
||||
|
||||
6
.github/workflows/code_checks.yml
vendored
6
.github/workflows/code_checks.yml
vendored
@@ -84,7 +84,7 @@ jobs:
|
||||
continue-on-error: true
|
||||
run: |
|
||||
echo "🔍 Linting Dockerfiles..."
|
||||
/tmp/hadolint Dockerfile* || true
|
||||
/tmp/hadolint --config .hadolint.yaml Dockerfile* || true
|
||||
|
||||
docker-tests:
|
||||
runs-on: ubuntu-latest
|
||||
@@ -95,5 +95,5 @@ jobs:
|
||||
- name: Run Docker-based tests
|
||||
run: |
|
||||
echo "🐳 Running Docker-based tests..."
|
||||
chmod +x ./run_docker_tests.sh
|
||||
./run_docker_tests.sh
|
||||
chmod +x ./test/docker_tests/run_docker_tests.sh
|
||||
./test/docker_tests/run_docker_tests.sh
|
||||
|
||||
2
.hadolint.yaml
Normal file
2
.hadolint.yaml
Normal file
@@ -0,0 +1,2 @@
|
||||
ignored:
|
||||
- DL3018
|
||||
32
Dockerfile
32
Dockerfile
@@ -32,7 +32,7 @@ RUN apk add --no-cache bash shadow python3 python3-dev gcc musl-dev libffi-dev o
|
||||
# Create virtual environment owned by root, but readable by everyone else. This makes it easy to copy
|
||||
# into hardened stage without worrying about permissions and keeps image size small. Keeping the commands
|
||||
# together makes for a slightly smaller image size.
|
||||
RUN pip install -r /tmp/requirements.txt && \
|
||||
RUN pip install --no-cache-dir -r /tmp/requirements.txt && \
|
||||
chmod -R u-rwx,g-rwx /opt
|
||||
|
||||
# second stage is the main runtime stage with just the minimum required to run the application
|
||||
@@ -68,7 +68,7 @@ ENV LOG_APP_PHP_ERRORS=${NETALERTX_LOG}/app.php_errors.log
|
||||
ENV LOG_EXECUTION_QUEUE=${NETALERTX_LOG}/execution_queue.log
|
||||
ENV LOG_REPORT_OUTPUT_JSON=${NETALERTX_LOG}/report_output.json
|
||||
ENV LOG_STDOUT=${NETALERTX_LOG}/stdout.log
|
||||
ENV LOG_CROND=${NETALERTX_LOG}/crond.log
|
||||
ENV LOG_CRON=${NETALERTX_LOG}/cron.log
|
||||
ENV LOG_NGINX_ERROR=${NETALERTX_LOG}/nginx-error.log
|
||||
|
||||
# System Services configuration files
|
||||
@@ -78,11 +78,11 @@ ENV SYSTEM_SERVICES_SCRIPTS=${SYSTEM_SERVICES}/scripts
|
||||
ENV SYSTEM_SERVICES_CONFIG=${SYSTEM_SERVICES}/config
|
||||
ENV SYSTEM_NGINX_CONFIG=${SYSTEM_SERVICES_CONFIG}/nginx
|
||||
ENV SYSTEM_NGINX_CONFIG_TEMPLATE=${SYSTEM_NGINX_CONFIG}/netalertx.conf.template
|
||||
ENV SYSTEM_SERVICES_CONFIG_CRON=${SYSTEM_SERVICES_CONFIG}/cron
|
||||
ENV SYSTEM_SERVICES_ACTIVE_CONFIG=/tmp/nginx/active-config
|
||||
ENV SYSTEM_SERVICES_ACTIVE_CONFIG_FILE=${SYSTEM_SERVICES_ACTIVE_CONFIG}/nginx.conf
|
||||
ENV SYSTEM_SERVICES_PHP_FOLDER=${SYSTEM_SERVICES_CONFIG}/php
|
||||
ENV SYSTEM_SERVICES_PHP_FPM_D=${SYSTEM_SERVICES_PHP_FOLDER}/php-fpm.d
|
||||
ENV SYSTEM_SERVICES_CROND=${SYSTEM_SERVICES_CONFIG}/crond
|
||||
ENV SYSTEM_SERVICES_RUN=/tmp/run
|
||||
ENV SYSTEM_SERVICES_RUN_TMP=${SYSTEM_SERVICES_RUN}/tmp
|
||||
ENV SYSTEM_SERVICES_RUN_LOG=${SYSTEM_SERVICES_RUN}/logs
|
||||
@@ -116,7 +116,7 @@ ENV LANG=C.UTF-8
|
||||
RUN apk add --no-cache bash mtr libbsd zip lsblk tzdata curl arp-scan iproute2 iproute2-ss nmap \
|
||||
nmap-scripts traceroute nbtscan net-tools net-snmp-tools bind-tools awake ca-certificates \
|
||||
sqlite php83 php83-fpm php83-cgi php83-curl php83-sqlite3 php83-session python3 envsubst \
|
||||
nginx shadow && \
|
||||
nginx supercronic shadow && \
|
||||
rm -Rf /var/cache/apk/* && \
|
||||
rm -Rf /etc/nginx && \
|
||||
addgroup -g 20211 ${NETALERTX_GROUP} && \
|
||||
@@ -147,26 +147,26 @@ COPY --from=builder --chown=20212:20212 ${VIRTUAL_ENV} ${VIRTUAL_ENV}
|
||||
# This is done after the copy of the venv to ensure the venv is in place
|
||||
# although it may be quicker to do it before the copy, it keeps the image
|
||||
# layers smaller to do it after.
|
||||
RUN if [ -f .VERSION ]; then \
|
||||
cp .VERSION ${NETALERTX_APP}/.VERSION; \
|
||||
RUN if [ -f '.VERSION' ]; then \
|
||||
cp '.VERSION' "${NETALERTX_APP}/.VERSION"; \
|
||||
else \
|
||||
echo "DEVELOPMENT 00000000" > ${NETALERTX_APP}/.VERSION; \
|
||||
echo "DEVELOPMENT 00000000" > "${NETALERTX_APP}/.VERSION"; \
|
||||
fi && \
|
||||
chown 20212:20212 ${NETALERTX_APP}/.VERSION && \
|
||||
apk add libcap && \
|
||||
chown 20212:20212 "${NETALERTX_APP}/.VERSION" && \
|
||||
apk add --no-cache libcap && \
|
||||
setcap cap_net_raw+ep /bin/busybox && \
|
||||
setcap cap_net_raw,cap_net_admin+eip /usr/bin/nmap && \
|
||||
setcap cap_net_raw,cap_net_admin+eip /usr/bin/arp-scan && \
|
||||
setcap cap_net_raw,cap_net_admin,cap_net_bind_service+eip /usr/bin/nbtscan && \
|
||||
setcap cap_net_raw,cap_net_admin+eip /usr/bin/traceroute && \
|
||||
setcap cap_net_raw,cap_net_admin+eip $(readlink -f ${VIRTUAL_ENV_BIN}/python) && \
|
||||
setcap cap_net_raw,cap_net_admin+eip "$(readlink -f ${VIRTUAL_ENV_BIN}/python)" && \
|
||||
/bin/sh /build/init-nginx.sh && \
|
||||
/bin/sh /build/init-php-fpm.sh && \
|
||||
/bin/sh /build/init-crond.sh && \
|
||||
/bin/sh /build/init-cron.sh && \
|
||||
/bin/sh /build/init-backend.sh && \
|
||||
rm -rf /build && \
|
||||
apk del libcap && \
|
||||
date +%s > ${NETALERTX_FRONT}/buildtimestamp.txt
|
||||
date +%s > "${NETALERTX_FRONT}/buildtimestamp.txt"
|
||||
|
||||
|
||||
ENTRYPOINT ["/bin/sh","/entrypoint.sh"]
|
||||
@@ -183,13 +183,15 @@ ENV UMASK=0077
|
||||
# AI may claim this is stupid, but it's actually least possible permissions as
|
||||
# read-only user cannot login, cannot sudo, has no write permission, and cannot even
|
||||
# read the files it owns. The read-only user is ownership-as-a-lock hardening pattern.
|
||||
RUN addgroup -g 20212 ${READ_ONLY_GROUP} && \
|
||||
adduser -u 20212 -G ${READ_ONLY_GROUP} -D -h /app ${READ_ONLY_USER}
|
||||
RUN addgroup -g 20212 "${READ_ONLY_GROUP}" && \
|
||||
adduser -u 20212 -G "${READ_ONLY_GROUP}" -D -h /app "${READ_ONLY_USER}"
|
||||
|
||||
|
||||
# reduce permissions to minimum necessary for all NetAlertX files and folders
|
||||
# Permissions 005 and 004 are not typos, they enable read-only. Everyone can
|
||||
# read the read-only files, and nobody can write to them, even the readonly user.
|
||||
|
||||
# hadolint ignore=SC2114
|
||||
RUN chown -R ${READ_ONLY_USER}:${READ_ONLY_GROUP} ${READ_ONLY_FOLDERS} && \
|
||||
chmod -R 004 ${READ_ONLY_FOLDERS} && \
|
||||
find ${READ_ONLY_FOLDERS} -type d -exec chmod 005 {} + && \
|
||||
@@ -208,7 +210,7 @@ RUN chown -R ${READ_ONLY_USER}:${READ_ONLY_GROUP} ${READ_ONLY_FOLDERS} && \
|
||||
/srv /media && \
|
||||
sed -i "/^\(${READ_ONLY_USER}\|${NETALERTX_USER}\):/!d" /etc/passwd && \
|
||||
sed -i "/^\(${READ_ONLY_GROUP}\|${NETALERTX_GROUP}\):/!d" /etc/group && \
|
||||
echo -ne '#!/bin/sh\n"$@"\n' > /usr/bin/sudo && chmod +x /usr/bin/sudo
|
||||
printf '#!/bin/sh\n"$@"\n' > /usr/bin/sudo && chmod +x /usr/bin/sudo
|
||||
|
||||
USER netalertx
|
||||
|
||||
|
||||
@@ -72,7 +72,7 @@ ENV LOG_APP_PHP_ERRORS=${NETALERTX_LOG}/app.php_errors.log
|
||||
ENV LOG_EXECUTION_QUEUE=${NETALERTX_LOG}/execution_queue.log
|
||||
ENV LOG_REPORT_OUTPUT_JSON=${NETALERTX_LOG}/report_output.json
|
||||
ENV LOG_STDOUT=${NETALERTX_LOG}/stdout.log
|
||||
ENV LOG_CROND=${NETALERTX_LOG}/crond.log
|
||||
ENV LOG_CRON=${NETALERTX_LOG}/cron.log
|
||||
ENV LOG_NGINX_ERROR=${NETALERTX_LOG}/nginx-error.log
|
||||
|
||||
# System Services configuration files
|
||||
@@ -132,25 +132,29 @@ COPY --chmod=775 --chown=${USER_ID}:${USER_GID} . ${INSTALL_DIR}/
|
||||
|
||||
|
||||
# ❗ IMPORTANT - if you modify this file modify the /install/install_dependecies.debian.sh file as well ❗
|
||||
RUN apt update && apt-get install -y \
|
||||
# hadolint ignore=DL3008,DL3027
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
tini snmp ca-certificates curl libwww-perl arp-scan sudo gettext-base \
|
||||
nginx-light php php-cgi php-fpm php-sqlite3 php-curl sqlite3 dnsutils net-tools \
|
||||
python3 python3-dev iproute2 nmap python3-pip zip git systemctl usbutils traceroute nbtscan openrc \
|
||||
busybox nginx nginx-core mtr python3-venv
|
||||
busybox nginx nginx-core mtr python3-venv && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# While php8.3 is in debian bookworm repos, php-fpm is not included so we need to add sury.org repo
|
||||
# (Ondřej Surý maintains php packages for debian. This is temp until debian includes php-fpm in their
|
||||
# repos. Likely it will be in Debian Trixie.). This keeps the image up-to-date with the alpine version.
|
||||
# hadolint ignore=DL3008
|
||||
RUN apt-get install -y --no-install-recommends \
|
||||
apt-transport-https \
|
||||
ca-certificates \
|
||||
lsb-release \
|
||||
wget && \
|
||||
wget -O /etc/apt/trusted.gpg.d/php.gpg https://packages.sury.org/php/apt.gpg && \
|
||||
wget -q -O /etc/apt/trusted.gpg.d/php.gpg https://packages.sury.org/php/apt.gpg && \
|
||||
echo "deb https://packages.sury.org/php/ $(lsb_release -sc) main" > /etc/apt/sources.list.d/php.list && \
|
||||
apt-get update && \
|
||||
apt-get install -y php8.3-fpm php8.3-cli php8.3-sqlite3 php8.3-common php8.3-curl php8.3-cgi && \
|
||||
ln -s /usr/sbin/php-fpm8.3 /usr/sbin/php-fpm83 # make it compatible with alpine version
|
||||
apt-get install -y --no-install-recommends php8.3-fpm php8.3-cli php8.3-sqlite3 php8.3-common php8.3-curl php8.3-cgi && \
|
||||
ln -s /usr/sbin/php-fpm8.3 /usr/sbin/php-fpm83 && \
|
||||
rm -rf /var/lib/apt/lists/* # make it compatible with alpine version
|
||||
|
||||
# Setup virtual python environment and use pip3 to install packages
|
||||
RUN python3 -m venv ${VIRTUAL_ENV} && \
|
||||
|
||||
@@ -1,14 +1,17 @@
|
||||
#!/bin/bash
|
||||
export INSTALL_DIR=/app
|
||||
|
||||
LOG_FILE="${INSTALL_DIR}/log/execution_queue.log"
|
||||
|
||||
# Check if there are any entries with cron_restart_backend
|
||||
if grep -q "cron_restart_backend" "$LOG_FILE"; then
|
||||
# Restart python application using s6
|
||||
s6-svc -r /var/run/s6-rc/servicedirs/netalertx
|
||||
echo 'done'
|
||||
if [ -f "${LOG_EXECUTION_QUEUE}" ] && grep -q "cron_restart_backend" "${LOG_EXECUTION_QUEUE}"; then
|
||||
echo "$(date): Restarting backend triggered by cron_restart_backend"
|
||||
killall python3 || echo "killall python3 failed or no process found"
|
||||
sleep 2
|
||||
/services/start-backend.sh &
|
||||
|
||||
# Remove all lines containing cron_restart_backend from the log file
|
||||
sed -i '/cron_restart_backend/d' "$LOG_FILE"
|
||||
# Atomic replacement with temp file. grep returns 1 if no lines selected (file becomes empty), which is valid here.
|
||||
grep -v "cron_restart_backend" "${LOG_EXECUTION_QUEUE}" > "${LOG_EXECUTION_QUEUE}.tmp"
|
||||
RC=$?
|
||||
if [ $RC -eq 0 ] || [ $RC -eq 1 ]; then
|
||||
mv "${LOG_EXECUTION_QUEUE}.tmp" "${LOG_EXECUTION_QUEUE}"
|
||||
fi
|
||||
fi
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
## How to Set Up Your Network Page
|
||||
|
||||
The **Network** page lets you map how devices connect — visually and logically.
|
||||
The **Network** page lets you map how devices connect — visually and logically.
|
||||
It’s especially useful for planning infrastructure, assigning parent-child relationships, and spotting gaps.
|
||||
|
||||

|
||||
@@ -9,11 +9,11 @@ To get started, you’ll need to define at least one root node and mark certain
|
||||
|
||||
---
|
||||
|
||||
Start by creating a root device with the MAC address `Internet`, if the application didn’t create one already.
|
||||
This special MAC address (`Internet`) is required for the root network node — no other value is currently supported.
|
||||
Start by creating a root device with the MAC address `Internet`, if the application didn’t create one already.
|
||||
This special MAC address (`Internet`) is required for the root network node — no other value is currently supported.
|
||||
Set its **Type** to a valid network type — such as `Router` or `Gateway`.
|
||||
|
||||
> [!TIP]
|
||||
> [!TIP]
|
||||
> If you don’t have one, use the [Create new device](./DEVICE_MANAGEMENT.md#dummy-devices) button on the **Devices** page to add a root device.
|
||||
|
||||
---
|
||||
@@ -21,15 +21,15 @@ Set its **Type** to a valid network type — such as `Router` or `Gateway`.
|
||||
## ⚡ Quick Setup
|
||||
|
||||
1. Open the device you want to use as a network node (e.g. a Switch).
|
||||
2. Set its **Type** to one of the following:
|
||||
`AP`, `Firewall`, `Gateway`, `PLC`, `Powerline`, `Router`, `Switch`, `USB LAN Adapter`, `USB WIFI Adapter`, `WLAN`
|
||||
2. Set its **Type** to one of the following:
|
||||
`AP`, `Firewall`, `Gateway`, `PLC`, `Powerline`, `Router`, `Switch`, `USB LAN Adapter`, `USB WIFI Adapter`, `WLAN`
|
||||
*(Or add custom types under **Settings → General → `NETWORK_DEVICE_TYPES`**.)*
|
||||
3. Save the device.
|
||||
4. Go to the **Network** page — supported device types will appear as tabs.
|
||||
5. Use the **Assign** button to connect unassigned devices to a network node.
|
||||
6. If the **Port** is `0` or empty, a Wi-Fi icon is shown. Otherwise, an Ethernet icon appears.
|
||||
|
||||
> [!NOTE]
|
||||
> [!NOTE]
|
||||
> Use [bulk editing](./DEVICES_BULK_EDITING.md) with _CSV Export_ to fix `Internet` root assignments or update many devices at once.
|
||||
|
||||
---
|
||||
@@ -42,20 +42,22 @@ Let’s walk through setting up a device named `raspberrypi` to act as a network
|
||||
|
||||
### 1. Set Device Type and Parent
|
||||
|
||||
- Go to the **Devices** page
|
||||
- Go to the **Devices** page
|
||||
- Open the device detail view for `raspberrypi`
|
||||
- In the **Type** dropdown, select `Switch`
|
||||
|
||||

|
||||
|
||||
- Optionally assign a **Parent Node** (where this device connects to) and the **Relationship type** of the connection.
|
||||
- Optionally assign a **Parent Node** (where this device connects to) and the **Relationship type** of the connection.
|
||||
The `nic` relationship type can affect parent notifications — see the setting description and [Notifications documentation](./NOTIFICATIONS.md) for more.
|
||||
- A device’s parent MAC will be overwritten by plugins if its current value is any of the following: "null", "(unknown)" "(Unknown)".
|
||||
- If you want plugins to be able to overwrite the parent value (for example, when mixing plugins that do not provide parent MACs like `ARPSCAN` with those that do, like `UNIFIAPI`), you must set the setting `NEWDEV_devParentMAC` to None.
|
||||
|
||||

|
||||

|
||||
|
||||
> [!NOTE]
|
||||
> Only certain device types can act as network nodes:
|
||||
> `AP`, `Firewall`, `Gateway`, `Hypervisor`, `PLC`, `Powerline`, `Router`, `Switch`, `USB LAN Adapter`, `USB WIFI Adapter`, `WLAN`
|
||||
> [!NOTE]
|
||||
> Only certain device types can act as network nodes:
|
||||
> `AP`, `Firewall`, `Gateway`, `Hypervisor`, `PLC`, `Powerline`, `Router`, `Switch`, `USB LAN Adapter`, `USB WIFI Adapter`, `WLAN`
|
||||
> You can add custom types via the `NETWORK_DEVICE_TYPES` setting.
|
||||
|
||||
- Click **Save**
|
||||
@@ -81,7 +83,7 @@ You can confirm that `raspberrypi` now acts as a network device in two places:
|
||||
### 3. Assign Connected Devices
|
||||
|
||||
- Use the **Assign** button to link other devices (e.g. PCs) to `raspberrypi`.
|
||||
- After assigning, connected devices will appear beneath the `raspberrypi` switch node.
|
||||
- After assigning, connected devices will appear beneath the `raspberrypi` switch node.
|
||||
|
||||

|
||||
|
||||
@@ -92,9 +94,9 @@ You can confirm that `raspberrypi` now acts as a network device in two places:
|
||||
> Hovering over devices in the tree reveals connection details and tooltips for quick inspection.
|
||||
|
||||
> [!NOTE]
|
||||
> Selecting certain relationship types hides the device in the default device views.
|
||||
> You can change this behavior by adjusting the `UI_hide_rel_types` setting, which by default is set to `["nic","virtual"]`.
|
||||
> This means devices with `devParentRelType` set to `nic` or `virtual` will not be shown.
|
||||
> Selecting certain relationship types hides the device in the default device views.
|
||||
> You can change this behavior by adjusting the `UI_hide_rel_types` setting, which by default is set to `["nic","virtual"]`.
|
||||
> This means devices with `devParentRelType` set to `nic` or `virtual` will not be shown.
|
||||
> All devices, regardless of relationship type, are always accessible in the **All devices** view.
|
||||
|
||||
---
|
||||
|
||||
@@ -107,11 +107,11 @@
|
||||
"buttons": [
|
||||
{
|
||||
"labelStringCode": "Maint_PurgeLog",
|
||||
"event": "logManage('crond.log', 'cleanLog')"
|
||||
"event": "logManage('cron.log', 'cleanLog')"
|
||||
}
|
||||
],
|
||||
"fileName": "crond.log",
|
||||
"filePath": "__NETALERTX_LOG__/crond.log",
|
||||
"fileName": "cron.log",
|
||||
"filePath": "__NETALERTX_LOG__/cron.log",
|
||||
"textAreaCssClass": "logs logs-small"
|
||||
}
|
||||
]
|
||||
@@ -274,7 +274,7 @@ function cleanLog($logFile)
|
||||
|
||||
$path = "";
|
||||
|
||||
$allowedFiles = ['app.log', 'app_front.log', 'IP_changes.log', 'stdout.log', 'stderr.log', 'app.php_errors.log', 'execution_queue.log', 'db_is_locked.log', 'nginx-error.log', 'crond.log'];
|
||||
$allowedFiles = ['app.log', 'app_front.log', 'IP_changes.log', 'stdout.log', 'stderr.log', 'app.php_errors.log', 'execution_queue.log', 'db_is_locked.log', 'nginx-error.log', 'cron.log'];
|
||||
|
||||
if(in_array($logFile, $allowedFiles))
|
||||
{
|
||||
|
||||
@@ -36,12 +36,7 @@ def main():
|
||||
|
||||
# Check if basic config settings supplied
|
||||
if check_config() is False:
|
||||
mylog(
|
||||
"none",
|
||||
[
|
||||
f"[{pluginName}] ⚠ ERROR: Publisher notification gateway not set up correctly. Check your {confFileName} {pluginName}_* variables."
|
||||
],
|
||||
)
|
||||
mylog("none", f"[{pluginName}] ⚠ ERROR: Publisher notification gateway not set up correctly. Check your {confFileName} {pluginName}_* variables.")
|
||||
return
|
||||
|
||||
# Create a database connection
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
#!/usr/bin/env python3
|
||||
import conf
|
||||
from const import confFileName, logPath
|
||||
from const import logPath
|
||||
from pytz import timezone
|
||||
|
||||
import os
|
||||
@@ -36,11 +36,7 @@ def main():
|
||||
|
||||
# Check if basic config settings supplied
|
||||
if not validate_config():
|
||||
mylog(
|
||||
"none",
|
||||
f"[{pluginName}] ⚠ ERROR: Publisher notification gateway not set up correctly. "
|
||||
f"Check your {confFileName} {pluginName}_* variables.",
|
||||
)
|
||||
mylog("none", f"[{pluginName}] ⚠ ERROR: Publisher not set up correctly. Check your {pluginName}_* variables.",)
|
||||
return
|
||||
|
||||
# Create a database connection
|
||||
|
||||
@@ -138,10 +138,7 @@ def execute_arpscan(userSubnets):
|
||||
mylog("verbose", [f"[{pluginName}] All devices List len:", len(devices_list)])
|
||||
mylog("verbose", [f"[{pluginName}] Devices List:", devices_list])
|
||||
|
||||
mylog(
|
||||
"verbose",
|
||||
[f"[{pluginName}] Found: Devices without duplicates ", len(unique_devices)],
|
||||
)
|
||||
mylog("verbose", [f"[{pluginName}] Found: Devices without duplicates ", len(unique_devices)],)
|
||||
|
||||
return unique_devices
|
||||
|
||||
@@ -174,10 +171,7 @@ def execute_arpscan_on_interface(interface):
|
||||
except subprocess.CalledProcessError:
|
||||
result = ""
|
||||
except subprocess.TimeoutExpired:
|
||||
mylog(
|
||||
"warning",
|
||||
[f"[{pluginName}] arp-scan timed out after {timeout_seconds}s"],
|
||||
)
|
||||
mylog("warning", [f"[{pluginName}] arp-scan timed out after {timeout_seconds}s"],)
|
||||
result = ""
|
||||
# stop looping if duration not set or expired
|
||||
if scan_duration == 0 or (time.time() - start_time) > scan_duration:
|
||||
|
||||
@@ -33,10 +33,7 @@ def main():
|
||||
|
||||
device_data = get_device_data()
|
||||
|
||||
mylog(
|
||||
"verbose",
|
||||
[f"[{pluginName}] Found '{len(device_data)}' devices"],
|
||||
)
|
||||
mylog("verbose", f"[{pluginName}] Found '{len(device_data)}' devices")
|
||||
|
||||
filtered_devices = [
|
||||
(key, device)
|
||||
@@ -44,10 +41,7 @@ def main():
|
||||
if device.state == ConnectionState.CONNECTED
|
||||
]
|
||||
|
||||
mylog(
|
||||
"verbose",
|
||||
[f"[{pluginName}] Processing '{len(filtered_devices)}' connected devices"],
|
||||
)
|
||||
mylog("verbose", f"[{pluginName}] Processing '{len(filtered_devices)}' connected devices")
|
||||
|
||||
for mac, device in filtered_devices:
|
||||
entry_mac = str(device.description.mac).lower()
|
||||
|
||||
@@ -75,10 +75,7 @@ def cleanup_database(
|
||||
|
||||
# -----------------------------------------------------
|
||||
# Cleanup Online History
|
||||
mylog(
|
||||
"verbose",
|
||||
[f"[{pluginName}] Online_History: Delete all but keep latest 150 entries"],
|
||||
)
|
||||
mylog("verbose", [f"[{pluginName}] Online_History: Delete all but keep latest 150 entries"],)
|
||||
cursor.execute(
|
||||
"""DELETE from Online_History where "Index" not in (
|
||||
SELECT "Index" from Online_History
|
||||
@@ -87,24 +84,14 @@ def cleanup_database(
|
||||
|
||||
# -----------------------------------------------------
|
||||
# Cleanup Events
|
||||
mylog(
|
||||
"verbose",
|
||||
[
|
||||
f"[{pluginName}] Events: Delete all older than {str(DAYS_TO_KEEP_EVENTS)} days (DAYS_TO_KEEP_EVENTS setting)"
|
||||
],
|
||||
)
|
||||
mylog("verbose", f"[{pluginName}] Events: Delete all older than {str(DAYS_TO_KEEP_EVENTS)} days (DAYS_TO_KEEP_EVENTS setting)")
|
||||
cursor.execute(
|
||||
f"""DELETE FROM Events
|
||||
WHERE eve_DateTime <= date('now', '-{str(DAYS_TO_KEEP_EVENTS)} day')"""
|
||||
)
|
||||
# -----------------------------------------------------
|
||||
# Trim Plugins_History entries to less than PLUGINS_KEEP_HIST setting per unique "Plugin" column entry
|
||||
mylog(
|
||||
"verbose",
|
||||
[
|
||||
f"[{pluginName}] Plugins_History: Trim Plugins_History entries to less than {str(PLUGINS_KEEP_HIST)} per Plugin (PLUGINS_KEEP_HIST setting)"
|
||||
],
|
||||
)
|
||||
mylog("verbose", f"[{pluginName}] Plugins_History: Trim Plugins_History entries to less than {str(PLUGINS_KEEP_HIST)} per Plugin (PLUGINS_KEEP_HIST setting)")
|
||||
|
||||
# Build the SQL query to delete entries that exceed the limit per unique "Plugin" column entry
|
||||
delete_query = f"""DELETE FROM Plugins_History
|
||||
@@ -125,12 +112,7 @@ def cleanup_database(
|
||||
|
||||
histCount = get_setting_value("DBCLNP_NOTIFI_HIST")
|
||||
|
||||
mylog(
|
||||
"verbose",
|
||||
[
|
||||
f"[{pluginName}] Plugins_History: Trim Notifications entries to less than {histCount}"
|
||||
],
|
||||
)
|
||||
mylog("verbose", f"[{pluginName}] Plugins_History: Trim Notifications entries to less than {histCount}")
|
||||
|
||||
# Build the SQL query to delete entries
|
||||
delete_query = f"""DELETE FROM Notifications
|
||||
@@ -170,12 +152,7 @@ def cleanup_database(
|
||||
# -----------------------------------------------------
|
||||
# Cleanup New Devices
|
||||
if HRS_TO_KEEP_NEWDEV != 0:
|
||||
mylog(
|
||||
"verbose",
|
||||
[
|
||||
f"[{pluginName}] Devices: Delete all New Devices older than {str(HRS_TO_KEEP_NEWDEV)} hours (HRS_TO_KEEP_NEWDEV setting)"
|
||||
],
|
||||
)
|
||||
mylog("verbose", f"[{pluginName}] Devices: Delete all New Devices older than {str(HRS_TO_KEEP_NEWDEV)} hours (HRS_TO_KEEP_NEWDEV setting)")
|
||||
query = f"""DELETE FROM Devices WHERE devIsNew = 1 AND devFirstConnection < date('now', '-{str(HRS_TO_KEEP_NEWDEV)} hour')"""
|
||||
mylog("verbose", [f"[{pluginName}] Query: {query} "])
|
||||
cursor.execute(query)
|
||||
@@ -183,12 +160,7 @@ def cleanup_database(
|
||||
# -----------------------------------------------------
|
||||
# Cleanup Offline Devices
|
||||
if HRS_TO_KEEP_OFFDEV != 0:
|
||||
mylog(
|
||||
"verbose",
|
||||
[
|
||||
f"[{pluginName}] Devices: Delete all New Devices older than {str(HRS_TO_KEEP_OFFDEV)} hours (HRS_TO_KEEP_OFFDEV setting)"
|
||||
],
|
||||
)
|
||||
mylog("verbose", f"[{pluginName}] Devices: Delete all New Devices older than {str(HRS_TO_KEEP_OFFDEV)} hours (HRS_TO_KEEP_OFFDEV setting)")
|
||||
query = f"""DELETE FROM Devices WHERE devPresentLastScan = 0 AND devLastConnection < date('now', '-{str(HRS_TO_KEEP_OFFDEV)} hour')"""
|
||||
mylog("verbose", [f"[{pluginName}] Query: {query} "])
|
||||
cursor.execute(query)
|
||||
@@ -196,12 +168,7 @@ def cleanup_database(
|
||||
# -----------------------------------------------------
|
||||
# Clear New Flag
|
||||
if CLEAR_NEW_FLAG != 0:
|
||||
mylog(
|
||||
"verbose",
|
||||
[
|
||||
f'[{pluginName}] Devices: Clear "New Device" flag for all devices older than {str(CLEAR_NEW_FLAG)} hours (CLEAR_NEW_FLAG setting)'
|
||||
],
|
||||
)
|
||||
mylog("verbose", f'[{pluginName}] Devices: Clear "New Device" flag for all devices older than {str(CLEAR_NEW_FLAG)} hours (CLEAR_NEW_FLAG setting)')
|
||||
query = f"""UPDATE Devices SET devIsNew = 0 WHERE devIsNew = 1 AND date(devFirstConnection, '+{str(CLEAR_NEW_FLAG)} hour') < date('now')"""
|
||||
# select * from Devices where devIsNew = 1 AND date(devFirstConnection, '+3 hour' ) < date('now')
|
||||
mylog("verbose", [f"[{pluginName}] Query: {query} "])
|
||||
|
||||
@@ -71,10 +71,7 @@ def get_entries(plugin_objects: Plugin_Objects) -> Plugin_Objects:
|
||||
status = lease.get('status')
|
||||
device_name = comment or host_name or "(unknown)"
|
||||
|
||||
mylog(
|
||||
'verbose',
|
||||
[f"ID: {lease_id}, Address: {address}, MAC Address: {mac_address}, Host Name: {host_name}, Comment: {comment}, Last Seen: {last_seen}, Status: {status}"]
|
||||
)
|
||||
mylog('verbose', f"ID: {lease_id}, Address: {address}, MAC: {mac_address}, Host Name: {host_name}, Comment: {comment}, Last Seen: {last_seen}, Status: {status}")
|
||||
|
||||
if (status == "bound"):
|
||||
plugin_objects.add_object(
|
||||
|
||||
@@ -24,7 +24,7 @@ apt-get install sudo -y
|
||||
apt-get install -y git
|
||||
|
||||
# Clean the directory
|
||||
rm -R $INSTALL_DIR/
|
||||
rm -R ${INSTALL_DIR:?}/
|
||||
|
||||
# Clone the application repository
|
||||
git clone https://github.com/jokob-sk/NetAlertX "$INSTALL_DIR/"
|
||||
|
||||
@@ -34,6 +34,8 @@ sudo phpenmod -v 8.2 sqlite3
|
||||
# setup virtual python environment so we can use pip3 to install packages
|
||||
apt-get install python3-venv -y
|
||||
python3 -m venv /opt/venv
|
||||
# Shell check doesn't recognize source command because it's not in the repo, it is in the system at runtime
|
||||
# shellcheck disable=SC1091
|
||||
source /opt/venv/bin/activate
|
||||
|
||||
update-alternatives --install /usr/bin/python python /usr/bin/python3 10
|
||||
|
||||
@@ -175,6 +175,8 @@ nginx -t || { echo "[INSTALL] nginx config test failed"; exit 1; }
|
||||
# sudo systemctl restart nginx
|
||||
|
||||
# Activate the virtual python environment
|
||||
# Shell check doesn't recognize source command because it's not in the repo, it is in the system at runtime
|
||||
# shellcheck disable=SC1091
|
||||
source /opt/venv/bin/activate
|
||||
|
||||
echo "[INSTALL] 🚀 Starting app - navigate to your <server IP>:${PORT}"
|
||||
|
||||
5
install/production-filesystem/build/init-cron.sh
Normal file
5
install/production-filesystem/build/init-cron.sh
Normal file
@@ -0,0 +1,5 @@
|
||||
#!/bin/bash
|
||||
|
||||
echo "Initializing cron..."
|
||||
# Placeholder for cron initialization commands
|
||||
echo "cron initialized."
|
||||
@@ -1,4 +0,0 @@
|
||||
#!/bin/bash
|
||||
echo "Initializing crond..."
|
||||
#Future crond initializations can go here.
|
||||
echo "crond initialized."
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/bin/bash
|
||||
echo "Initializing nginx..."
|
||||
install -d -o netalertx -g netalertx -m 700 ${SYSTEM_SERVICES_RUN_TMP}/client_body;
|
||||
install -d -o netalertx -g netalertx -m 700 "${SYSTEM_SERVICES_RUN_TMP}/client_body";
|
||||
echo "nginx initialized."
|
||||
@@ -51,12 +51,13 @@ if [ "$(id -u)" -eq 0 ]; then
|
||||
EOF
|
||||
>&2 printf "%s" "${RESET}"
|
||||
|
||||
# Set ownership to netalertx user for all read-write paths
|
||||
chown -R netalertx ${READ_WRITE_PATHS} 2>/dev/null || true
|
||||
|
||||
# Set directory and file permissions for all read-write paths
|
||||
find ${READ_WRITE_PATHS} -type d -exec chmod u+rwx {} \;
|
||||
find ${READ_WRITE_PATHS} -type f -exec chmod u+rw {} \;
|
||||
# Set ownership and permissions for each read-write path individually
|
||||
printf '%s\n' "${READ_WRITE_PATHS}" | while IFS= read -r path; do
|
||||
[ -n "${path}" ] || continue
|
||||
chown -R netalertx "${path}" 2>/dev/null || true
|
||||
find "${path}" -type d -exec chmod u+rwx {} \;
|
||||
find "${path}" -type f -exec chmod u+rw {} \;
|
||||
done
|
||||
echo Permissions fixed for read-write paths. Please restart the container as user 20211.
|
||||
sleep infinity & wait $!
|
||||
fi
|
||||
|
||||
@@ -16,11 +16,11 @@ LEGACY_DB=/app/db
|
||||
MARKER_NAME=.migration
|
||||
|
||||
is_mounted() {
|
||||
local path="$1"
|
||||
if [ ! -d "${path}" ]; then
|
||||
my_path="$1"
|
||||
if [ ! -d "${my_path}" ]; then
|
||||
return 1
|
||||
fi
|
||||
mountpoint -q "${path}" 2>/dev/null
|
||||
mountpoint -q "${my_path}" 2>/dev/null
|
||||
}
|
||||
|
||||
warn_unmount_legacy() {
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
# first-run-check.sh - Checks and initializes configuration files on first run
|
||||
|
||||
# Check for app.conf and deploy if required
|
||||
if [ ! -f ${NETALERTX_CONFIG}/app.conf ]; then
|
||||
if [ ! -f "${NETALERTX_CONFIG}/app.conf" ]; then
|
||||
mkdir -p "${NETALERTX_CONFIG}" || {
|
||||
>&2 echo "ERROR: Failed to create config directory ${NETALERTX_CONFIG}"
|
||||
exit 1
|
||||
|
||||
@@ -441,7 +441,9 @@ CREATE TRIGGER "trg_delete_devices"
|
||||
END;
|
||||
end-of-database-schema
|
||||
|
||||
if [ $? -ne 0 ]; then
|
||||
database_creation_status=$?
|
||||
|
||||
if [ $database_creation_status -ne 0 ]; then
|
||||
RED=$(printf '\033[1;31m')
|
||||
RESET=$(printf '\033[0m')
|
||||
>&2 printf "%s" "${RED}"
|
||||
|
||||
@@ -50,7 +50,7 @@ fi
|
||||
RED='\033[1;31m'
|
||||
GREY='\033[90m'
|
||||
RESET='\033[0m'
|
||||
printf "${RED}"
|
||||
printf "%s" "${RED}"
|
||||
echo '
|
||||
_ _ _ ___ _ _ __ __
|
||||
| \ | | | | / _ \| | | | \ \ / /
|
||||
@@ -60,7 +60,7 @@ echo '
|
||||
\_| \_/\___|\__\_| |_/_|\___|_| \__\/ \/
|
||||
'
|
||||
|
||||
printf "\033[0m"
|
||||
printf "%s" "${RESET}"
|
||||
echo ' Network intruder and presence detector.
|
||||
https://netalertx.com
|
||||
|
||||
@@ -69,7 +69,7 @@ set -u
|
||||
|
||||
FAILED_STATUS=""
|
||||
echo "Startup pre-checks"
|
||||
for script in ${ENTRYPOINT_CHECKS}/*; do
|
||||
for script in "${ENTRYPOINT_CHECKS}"/*; do
|
||||
if [ -n "${SKIP_TESTS:-}" ]; then
|
||||
echo "Skipping startup checks as SKIP_TESTS is set."
|
||||
break
|
||||
@@ -77,7 +77,7 @@ for script in ${ENTRYPOINT_CHECKS}/*; do
|
||||
script_name=$(basename "$script" | sed 's/^[0-9]*-//;s/\.(sh|py)$//;s/-/ /g')
|
||||
echo "--> ${script_name} "
|
||||
if [ -n "${SKIP_STARTUP_CHECKS:-}" ] && echo "${SKIP_STARTUP_CHECKS}" | grep -q "\b${script_name}\b"; then
|
||||
printf "${GREY}skip${RESET}\n"
|
||||
printf "%sskip%s\n" "${GREY}" "${RESET}"
|
||||
continue
|
||||
fi
|
||||
|
||||
@@ -134,7 +134,7 @@ fi
|
||||
|
||||
# Update vendor data (MAC address OUI database) in the background
|
||||
# This happens concurrently with service startup to avoid blocking container readiness
|
||||
bash ${SYSTEM_SERVICES_SCRIPTS}/update_vendors.sh &
|
||||
bash "${SYSTEM_SERVICES_SCRIPTS}/update_vendors.sh" &
|
||||
|
||||
|
||||
|
||||
@@ -274,7 +274,7 @@ trap on_signal INT TERM
|
||||
# Only start crond scheduler on Alpine (non-Debian) environments
|
||||
# Debian typically uses systemd or other schedulers
|
||||
if [ "${ENVIRONMENT:-}" ] && [ "${ENVIRONMENT:-}" != "debian" ]; then
|
||||
add_service "/services/start-crond.sh" "crond"
|
||||
add_service "/services/start-cron.sh" "supercronic"
|
||||
fi
|
||||
|
||||
# Start core frontend and backend services
|
||||
@@ -290,8 +290,6 @@ add_service "${SYSTEM_SERVICES}/start-backend.sh" "python3"
|
||||
# Useful for devcontainer debugging where individual services need to be debugged
|
||||
if [ "${NETALERTX_DEBUG:-0}" -eq 1 ]; then
|
||||
echo "NETALERTX_DEBUG is set to 1, will not shut down other services if one fails."
|
||||
wait
|
||||
exit $?
|
||||
fi
|
||||
|
||||
################################################################################
|
||||
@@ -316,10 +314,25 @@ while [ -n "${SERVICES}" ]; do
|
||||
if ! is_pid_active "${pid}"; then
|
||||
wait "${pid}" 2>/dev/null
|
||||
status=$?
|
||||
|
||||
# Handle intentional backend restart
|
||||
if [ "${name}" = "python3" ] && [ -f "/tmp/backend_restart_pending" ]; then
|
||||
echo "🔄 Backend restart requested via marker file."
|
||||
rm -f "/tmp/backend_restart_pending"
|
||||
remove_service "${pid}"
|
||||
add_service "${SYSTEM_SERVICES}/start-backend.sh" "python3"
|
||||
continue
|
||||
fi
|
||||
|
||||
FAILED_STATUS=$status
|
||||
FAILED_NAME="${name}"
|
||||
remove_service "${pid}"
|
||||
handle_exit
|
||||
|
||||
if [ "${NETALERTX_DEBUG:-0}" -eq 1 ]; then
|
||||
echo "⚠️ Service ${name} exited with status ${status}. Debug mode active - continuing."
|
||||
else
|
||||
handle_exit
|
||||
fi
|
||||
fi
|
||||
|
||||
done
|
||||
|
||||
2
install/production-filesystem/services/config/crond/netalertx → install/production-filesystem/services/config/cron/crontab
Executable file → Normal file
2
install/production-filesystem/services/config/crond/netalertx → install/production-filesystem/services/config/cron/crontab
Executable file → Normal file
@@ -1,4 +1,4 @@
|
||||
# Every minute check for cron jobs
|
||||
* * * * * /services/scripts/cron_script.sh
|
||||
# Update vendors 4x/d
|
||||
0 */6 * * * /services/scripts/update_vendors.sh
|
||||
0 */6 * * * /services/scripts/update_vendors.sh
|
||||
@@ -21,10 +21,10 @@ log_success() {
|
||||
}
|
||||
|
||||
# 1. Check if crond is running
|
||||
if pgrep -f "crond" > /dev/null; then
|
||||
log_success "crond is running"
|
||||
if pgrep -f "supercronic" > /dev/null; then
|
||||
log_success "supercronic is running"
|
||||
else
|
||||
log_error "crond is not running"
|
||||
log_error "supercronic is not running"
|
||||
fi
|
||||
|
||||
# 2. Check if php-fpm is running
|
||||
|
||||
@@ -5,12 +5,15 @@ export INSTALL_DIR=/app
|
||||
|
||||
# Check if there are any entries with cron_restart_backend
|
||||
if grep -q "cron_restart_backend" "${LOG_EXECUTION_QUEUE}"; then
|
||||
killall python3
|
||||
sleep 2
|
||||
/services/start-backend.sh &
|
||||
echo "$(date): Restarting backend triggered by cron_restart_backend"
|
||||
|
||||
# Create marker for entrypoint.sh to restart the service instead of killing the container
|
||||
touch /tmp/backend_restart_pending
|
||||
|
||||
killall python3 || echo "killall python3 failed or no process found"
|
||||
|
||||
# Remove all lines containing cron_restart_backend from the log file
|
||||
# Atomic replacement with temp file
|
||||
grep -v "cron_restart_backend" "${LOG_EXECUTION_QUEUE}" > "${LOG_EXECUTION_QUEUE}.tmp" && \
|
||||
mv "${LOG_EXECUTION_QUEUE}.tmp" "${LOG_EXECUTION_QUEUE}"
|
||||
grep -v "cron_restart_backend" "${LOG_EXECUTION_QUEUE}" > "${LOG_EXECUTION_QUEUE}.tmp"
|
||||
mv "${LOG_EXECUTION_QUEUE}.tmp" "${LOG_EXECUTION_QUEUE}"
|
||||
fi
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
cd "${NETALERTX_APP}" || exit 1
|
||||
max_attempts=50 # 10 seconds total (50 * 0.2s)
|
||||
attempt=0
|
||||
while ps ax | grep -v grep | grep -q python3 && [ $attempt -lt $max_attempts ]; do
|
||||
while pgrep -x python3 >/dev/null && [ $attempt -lt $max_attempts ]; do
|
||||
killall -TERM python3 &>/dev/null
|
||||
sleep 0.2
|
||||
((attempt++))
|
||||
@@ -12,4 +12,5 @@ done
|
||||
killall -KILL python3 &>/dev/null
|
||||
|
||||
echo "Starting python3 $(cat /services/config/python/backend-extra-launch-parameters 2>/dev/null) -m server > ${NETALERTX_LOG}/stdout.log 2> >(tee ${NETALERTX_LOG}/stderr.log >&2)"
|
||||
exec python3 $(cat /services/config/python/backend-extra-launch-parameters 2>/dev/null) -m server > ${NETALERTX_LOG}/stdout.log 2> >(tee ${NETALERTX_LOG}/stderr.log >&2)
|
||||
read -ra EXTRA_PARAMS < <(cat /services/config/python/backend-extra-launch-parameters 2>/dev/null)
|
||||
exec python3 "${EXTRA_PARAMS[@]}" -m server > "${NETALERTX_LOG}/stdout.log" 2> >(tee "${NETALERTX_LOG}/stderr.log" >&2)
|
||||
|
||||
42
install/production-filesystem/services/start-cron.sh
Executable file
42
install/production-filesystem/services/start-cron.sh
Executable file
@@ -0,0 +1,42 @@
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
|
||||
|
||||
crond_pid=""
|
||||
|
||||
# Called externally, but shellcheck does not see that and claims it is unused.
|
||||
# shellcheck disable=SC2329,SC2317
|
||||
cleanup() {
|
||||
status=$?
|
||||
echo "Supercronic stopped! (exit ${status})"
|
||||
}
|
||||
|
||||
# Called externally, but shellcheck does not see that and claims it is unused.
|
||||
# shellcheck disable=SC2329,SC2317
|
||||
forward_signal() {
|
||||
if [[ -n "${crond_pid}" ]]; then
|
||||
kill -TERM "${crond_pid}" 2>/dev/null || true
|
||||
fi
|
||||
}
|
||||
|
||||
while pgrep -x crond >/dev/null 2>&1; do
|
||||
killall crond &>/dev/null
|
||||
sleep 0.2
|
||||
done
|
||||
|
||||
trap cleanup EXIT
|
||||
trap forward_signal INT TERM
|
||||
|
||||
CRON_OPTS="--quiet"
|
||||
if [ "${NETALERTX_DEBUG:-0}" -eq 1 ]; then
|
||||
CRON_OPTS="--debug"
|
||||
fi
|
||||
|
||||
echo "Starting supercronic ${CRON_OPTS} \"${SYSTEM_SERVICES_CONFIG_CRON}/crontab\" >>\"${LOG_CRON}\" 2>&1 &"
|
||||
|
||||
supercronic ${CRON_OPTS} "${SYSTEM_SERVICES_CONFIG_CRON}/crontab" >>"${LOG_CRON}" 2>&1 &
|
||||
crond_pid=$!
|
||||
|
||||
wait "${crond_pid}"; status=$?
|
||||
echo -ne " done"
|
||||
exit ${status}
|
||||
@@ -1,33 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
|
||||
|
||||
crond_pid=""
|
||||
|
||||
cleanup() {
|
||||
status=$?
|
||||
echo "Crond stopped! (exit ${status})"
|
||||
}
|
||||
|
||||
forward_signal() {
|
||||
if [[ -n "${crond_pid}" ]]; then
|
||||
kill -TERM "${crond_pid}" 2>/dev/null || true
|
||||
fi
|
||||
}
|
||||
|
||||
while ps ax | grep -v -e grep -e '.sh' | grep crond >/dev/null 2>&1; do
|
||||
killall crond &>/dev/null
|
||||
sleep 0.2
|
||||
done
|
||||
|
||||
trap cleanup EXIT
|
||||
trap forward_signal INT TERM
|
||||
|
||||
echo "Starting /usr/sbin/crond -c \"${SYSTEM_SERVICES_CROND}\" -f -L \"${LOG_CROND}\" >>\"${LOG_CROND}\" 2>&1 &"
|
||||
|
||||
/usr/sbin/crond -c "${SYSTEM_SERVICES_CROND}" -f -L "${LOG_CROND}" >>"${LOG_CROND}" 2>&1 &
|
||||
crond_pid=$!
|
||||
|
||||
wait "${crond_pid}"; status=$?
|
||||
echo -ne " done"
|
||||
exit ${status}
|
||||
@@ -11,11 +11,15 @@ mkdir -p "${LOG_DIR}" "${RUN_DIR}" "${TMP_DIR}"
|
||||
|
||||
nginx_pid=""
|
||||
|
||||
# Called externally, but shellcheck does not see that and claims it is unused.
|
||||
# shellcheck disable=SC2329,SC2317
|
||||
cleanup() {
|
||||
status=$?
|
||||
echo "nginx stopped! (exit ${status})"
|
||||
}
|
||||
|
||||
# Called externally, but shellcheck does not see that and claims it is unused.
|
||||
# shellcheck disable=SC2329,SC2317
|
||||
forward_signal() {
|
||||
if [[ -n "${nginx_pid}" ]]; then
|
||||
kill -TERM "${nginx_pid}" 2>/dev/null || true
|
||||
@@ -24,12 +28,15 @@ forward_signal() {
|
||||
|
||||
|
||||
# When in devcontainer we must kill any existing nginx processes
|
||||
while ps ax | grep -v -e "grep" -e "nginx.sh" | grep nginx >/dev/null 2>&1; do
|
||||
while pgrep -x nginx >/dev/null 2>&1; do
|
||||
killall nginx &>/dev/null || true
|
||||
sleep 0.2
|
||||
done
|
||||
|
||||
TEMP_CONFIG_FILE=$(mktemp "${TMP_DIR}/netalertx.conf.XXXXXX")
|
||||
|
||||
# Shell check doesn't recognize envsubst variables
|
||||
# shellcheck disable=SC2016
|
||||
if envsubst '${LISTEN_ADDR} ${PORT}' < "${SYSTEM_NGINX_CONFIG_TEMPLATE}" > "${TEMP_CONFIG_FILE}" 2>/dev/null; then
|
||||
mv "${TEMP_CONFIG_FILE}" "${SYSTEM_SERVICES_ACTIVE_CONFIG_FILE}"
|
||||
else
|
||||
|
||||
@@ -3,18 +3,22 @@ set -euo pipefail
|
||||
|
||||
php_fpm_pid=""
|
||||
|
||||
# Called externally, but shellcheck does not see that and claims it is unused.
|
||||
# shellcheck disable=SC2329,SC2317
|
||||
cleanup() {
|
||||
status=$?
|
||||
echo "php-fpm stopped! (exit ${status})"
|
||||
}
|
||||
|
||||
# Called externally, but shellcheck does not see that and claims it is unused.
|
||||
# shellcheck disable=SC2329,SC2317
|
||||
forward_signal() {
|
||||
if [[ -n "${php_fpm_pid}" ]]; then
|
||||
kill -TERM "${php_fpm_pid}" 2>/dev/null || true
|
||||
fi
|
||||
}
|
||||
|
||||
while ps ax | grep -v grep | grep php-fpm83 >/dev/null; do
|
||||
while pgrep -x php-fpm83 >/dev/null; do
|
||||
killall php-fpm83 &>/dev/null
|
||||
sleep 0.2
|
||||
done
|
||||
@@ -27,5 +31,6 @@ echo "Starting /usr/sbin/php-fpm83 -y \"${PHP_FPM_CONFIG_FILE}\" -F >>\"${LOG_AP
|
||||
php_fpm_pid=$!
|
||||
|
||||
wait "${php_fpm_pid}"
|
||||
exit_status=$?
|
||||
echo -ne " done"
|
||||
exit $?
|
||||
exit $exit_status
|
||||
@@ -127,6 +127,8 @@ apt-get install -y --no-install-recommends \
|
||||
ca-certificates lsb-release curl gnupg
|
||||
|
||||
# Detect OS
|
||||
# Shell check doesn't recognize source command because it's not in the repo, it is in the system at runtime
|
||||
# shellcheck disable=SC1091
|
||||
. /etc/os-release
|
||||
OS_ID="${ID:-}"
|
||||
OS_VER="${VERSION_ID:-}"
|
||||
@@ -203,6 +205,8 @@ printf "%b\n" "-----------------------------------------------------------------
|
||||
printf "%b\n" "${GREEN}[INSTALLING] ${RESET}Setting up Python environment"
|
||||
printf "%b\n" "--------------------------------------------------------------------------"
|
||||
python3 -m venv /opt/myenv
|
||||
# Shell check doesn't recognize source command because it's not in the repo, it is in the system at runtime
|
||||
# shellcheck disable=SC1091
|
||||
source /opt/myenv/bin/activate
|
||||
python -m pip install --upgrade pip
|
||||
python -m pip install -r "${INSTALLER_DIR}/requirements.txt"
|
||||
|
||||
@@ -22,7 +22,6 @@ NGINX_CONF_FILE=netalertx.conf
|
||||
WEB_UI_DIR=/var/www/html/netalertx
|
||||
NGINX_CONFIG_FILE=/etc/nginx/conf.d/$NGINX_CONF_FILE
|
||||
OUI_FILE="/usr/share/arp-scan/ieee-oui.txt" # Define the path to ieee-oui.txt and ieee-iab.txt
|
||||
SCRIPT_DIR="$(cd -- "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
FILEDB=${INSTALL_DIR}/db/${DB_FILE}
|
||||
PHPVERSION="8.3"
|
||||
VENV_DIR="/opt/netalertx-python"
|
||||
@@ -106,7 +105,7 @@ if [ -d "${INSTALL_DIR}" ]; then
|
||||
if [ "$1" == "install" ] || [ "$1" == "update" ] || [ "$1" == "start" ]; then
|
||||
confirmation=$1
|
||||
else
|
||||
read -p "Enter your choice: " confirmation
|
||||
read -rp "Enter your choice: " confirmation
|
||||
fi
|
||||
if [ "$confirmation" == "install" ]; then
|
||||
# Ensure INSTALL_DIR is safe to wipe
|
||||
@@ -118,7 +117,7 @@ if [ -d "${INSTALL_DIR}" ]; then
|
||||
mountpoint -q "${INSTALL_DIR}/front" && umount "${INSTALL_DIR}/front" 2>/dev/null
|
||||
|
||||
# Remove all contents safely
|
||||
rm -rf -- "${INSTALL_DIR}"/* "${INSTALL_DIR}"/.[!.]* "${INSTALL_DIR}"/..?* 2>/dev/null
|
||||
rm -rf -- "${INSTALL_DIR:?}"/* "${INSTALL_DIR}"/.[!.]* "${INSTALL_DIR}"/..?* 2>/dev/null
|
||||
|
||||
# Re-clone repository
|
||||
git clone "${GITHUB_REPO}" "${INSTALL_DIR}/"
|
||||
@@ -152,6 +151,8 @@ echo "---------------------------------------------------------"
|
||||
echo
|
||||
# update-alternatives --install /usr/bin/python python /usr/bin/python3 10
|
||||
python3 -m venv "${VENV_DIR}"
|
||||
# Shell check doesn't recognize source command because it's not in the repo, it is in the system at runtime
|
||||
# shellcheck disable=SC1091
|
||||
source "${VENV_DIR}/bin/activate"
|
||||
|
||||
if [[ ! -f "${REQUIREMENTS_FILE}" ]]; then
|
||||
|
||||
@@ -4,10 +4,10 @@
|
||||
NETALERTX_DB_FILE=${NETALERTX_DB:-/data/db}/app.db
|
||||
|
||||
#remove the old database
|
||||
rm ${NETALERTX_DB_FILE}
|
||||
rm "${NETALERTX_DB_FILE}"
|
||||
|
||||
# Write schema to text to app.db file until we see "end-of-database-schema"
|
||||
cat << end-of-database-schema > ${NETALERTX_DB_FILE}.sql
|
||||
cat << end-of-database-schema > "${NETALERTX_DB_FILE}.sql"
|
||||
CREATE TABLE sqlite_stat1(tbl,idx,stat);
|
||||
CREATE TABLE Events (eve_MAC STRING (50) NOT NULL COLLATE NOCASE, eve_IP STRING (50) NOT NULL COLLATE NOCASE, eve_DateTime DATETIME NOT NULL, eve_EventType STRING (30) NOT NULL COLLATE NOCASE, eve_AdditionalInfo STRING (250) DEFAULT (''), eve_PendingAlertEmail BOOLEAN NOT NULL CHECK (eve_PendingAlertEmail IN (0, 1)) DEFAULT (1), eve_PairEventRowid INTEGER);
|
||||
CREATE TABLE Sessions (ses_MAC STRING (50) COLLATE NOCASE, ses_IP STRING (50) COLLATE NOCASE, ses_EventTypeConnection STRING (30) COLLATE NOCASE, ses_DateTimeConnection DATETIME, ses_EventTypeDisconnection STRING (30) COLLATE NOCASE, ses_DateTimeDisconnection DATETIME, ses_StillConnected BOOLEAN, ses_AdditionalInfo STRING (250));
|
||||
@@ -421,4 +421,4 @@ CREATE TRIGGER "trg_delete_devices"
|
||||
end-of-database-schema
|
||||
|
||||
# Import the database schema into the new database file
|
||||
sqlite3 ${NETALERTX_DB_FILE} < ${NETALERTX_DB_FILE}.sql
|
||||
sqlite3 "${NETALERTX_DB_FILE}" < "${NETALERTX_DB_FILE}.sql"
|
||||
|
||||
@@ -16,4 +16,4 @@ for p in $PORTS; do
|
||||
done
|
||||
|
||||
# Show any other NetAlertX-related listeners (nginx, php-fpm, python backend)
|
||||
ss -ltnp 2>/dev/null | egrep 'nginx|php-fpm|python' || true
|
||||
ss -ltnp 2>/dev/null | grep -e 'nginx\|php-fpm\|python' || true
|
||||
|
||||
@@ -63,9 +63,7 @@ main structure of NetAlertX
|
||||
|
||||
|
||||
def main():
|
||||
mylog(
|
||||
"none", ["[MAIN] Setting up ..."]
|
||||
) # has to be level 'none' as user config not loaded yet
|
||||
mylog("none", ["[MAIN] Setting up ..."]) # has to be level 'none' as user config not loaded yet
|
||||
|
||||
mylog("none", [f"[conf.tz] Setting up ...{conf.tz}"])
|
||||
|
||||
@@ -221,22 +219,14 @@ def main():
|
||||
# Fetch new unprocessed events
|
||||
new_events = workflow_manager.get_new_app_events()
|
||||
|
||||
mylog(
|
||||
"debug",
|
||||
[
|
||||
f"[MAIN] Processing WORKFLOW new_events from get_new_app_events: {len(new_events)}"
|
||||
],
|
||||
)
|
||||
mylog("debug", [f"[MAIN] Processing WORKFLOW new_events from get_new_app_events: {len(new_events)}"],)
|
||||
|
||||
# Process each new event and check triggers
|
||||
if len(new_events) > 0:
|
||||
updateState("Workflows: Start")
|
||||
update_api_flag = False
|
||||
for event in new_events:
|
||||
mylog(
|
||||
"debug",
|
||||
[f"[MAIN] Processing WORKFLOW app event with GUID {event['GUID']}"],
|
||||
)
|
||||
mylog("debug", [f"[MAIN] Processing WORKFLOW app event with GUID {event['GUID']}"],)
|
||||
|
||||
# proceed to process events
|
||||
workflow_manager.process_event(event)
|
||||
@@ -253,12 +243,7 @@ def main():
|
||||
# check if devices list needs updating
|
||||
userUpdatedDevices = UserEventsQueueInstance().has_update_devices()
|
||||
|
||||
mylog(
|
||||
"debug",
|
||||
[
|
||||
f"[Plugins] Should I update API (userUpdatedDevices): {userUpdatedDevices}"
|
||||
],
|
||||
)
|
||||
mylog("debug", [f"[Plugins] Should I update API (userUpdatedDevices): {userUpdatedDevices}"],)
|
||||
|
||||
if userUpdatedDevices:
|
||||
update_api(db, all_plugins, True, ["devices"], userUpdatedDevices)
|
||||
|
||||
@@ -96,16 +96,9 @@ def update_api(
|
||||
) # Ensure port is an integer
|
||||
start_server(graphql_port_value, app_state) # Start the server
|
||||
except ValueError:
|
||||
mylog(
|
||||
"none",
|
||||
[
|
||||
f"[API] Invalid GRAPHQL_PORT value, must be an integer: {graphql_port_value}"
|
||||
],
|
||||
)
|
||||
mylog("none", [f"[API] Invalid GRAPHQL_PORT value, must be an integer: {graphql_port_value}"],)
|
||||
else:
|
||||
mylog(
|
||||
"none", ["[API] GRAPHQL_PORT or API_TOKEN is not set, will try later."]
|
||||
)
|
||||
mylog("none", ["[API] GRAPHQL_PORT or API_TOKEN is not set, will try later."])
|
||||
|
||||
|
||||
# -------------------------------------------------------------------------------
|
||||
@@ -135,12 +128,7 @@ class api_endpoint_class:
|
||||
# Match SQL and API endpoint path
|
||||
if endpoint.query == self.query and endpoint.path == self.path:
|
||||
found = True
|
||||
mylog(
|
||||
"trace",
|
||||
[
|
||||
f"[API] api_endpoint_class: Hashes (file|old|new): ({self.fileName}|{endpoint.hash}|{self.hash})"
|
||||
],
|
||||
)
|
||||
mylog("trace", [f"[API] api_endpoint_class: Hashes (file|old|new): ({self.fileName}|{endpoint.hash}|{self.hash})"],)
|
||||
if endpoint.hash != self.hash:
|
||||
self.needsUpdate = True
|
||||
# Only update changeDetectedWhen if it hasn't been set recently
|
||||
@@ -190,10 +178,7 @@ class api_endpoint_class:
|
||||
)
|
||||
)
|
||||
):
|
||||
mylog(
|
||||
"debug",
|
||||
[f"[API] api_endpoint_class: Writing {self.fileName} after debounce."],
|
||||
)
|
||||
mylog("debug", [f"[API] api_endpoint_class: Writing {self.fileName} after debounce."],)
|
||||
|
||||
write_file(self.path, json.dumps(self.jsonData))
|
||||
|
||||
|
||||
@@ -173,13 +173,8 @@ class Query(ObjectType):
|
||||
network_dev_types = get_setting_value("NETWORK_DEVICE_TYPES")
|
||||
|
||||
mylog("trace", f"[graphql_schema] allowed_statuses: {allowed_statuses}")
|
||||
mylog(
|
||||
"trace",
|
||||
f"[graphql_schema] hidden_relationships: {hidden_relationships}",
|
||||
)
|
||||
mylog(
|
||||
"trace", f"[graphql_schema] network_dev_types: {network_dev_types}"
|
||||
)
|
||||
mylog("trace", f"[graphql_schema] hidden_relationships: {hidden_relationships}",)
|
||||
mylog("trace", f"[graphql_schema] network_dev_types: {network_dev_types}")
|
||||
|
||||
# Filtering based on the "status"
|
||||
if status == "my_devices":
|
||||
|
||||
@@ -71,9 +71,7 @@ class app_state_class:
|
||||
with open(stateFile, "r") as json_file:
|
||||
previousState = json.load(json_file)
|
||||
except json.decoder.JSONDecodeError as e:
|
||||
mylog(
|
||||
"none", [f"[app_state_class] Failed to handle app_state.json: {e}"]
|
||||
)
|
||||
mylog("none", [f"[app_state_class] Failed to handle app_state.json: {e}"])
|
||||
|
||||
# Check if the file exists and recover previous values
|
||||
if previousState != "":
|
||||
@@ -151,10 +149,7 @@ class app_state_class:
|
||||
with open(stateFile, "w") as json_file:
|
||||
json_file.write(json_data)
|
||||
except (TypeError, ValueError) as e:
|
||||
mylog(
|
||||
"none",
|
||||
[f"[app_state_class] Failed to serialize object to JSON: {e}"],
|
||||
)
|
||||
mylog("none", [f"[app_state_class] Failed to serialize object to JSON: {e}"],)
|
||||
|
||||
return
|
||||
|
||||
|
||||
@@ -233,15 +233,7 @@ class DB:
|
||||
rows = self.sql.fetchall()
|
||||
return rows
|
||||
except AssertionError:
|
||||
mylog(
|
||||
"minimal",
|
||||
[
|
||||
"[Database] - ERROR: inconsistent query and/or arguments.",
|
||||
query,
|
||||
" params: ",
|
||||
args,
|
||||
],
|
||||
)
|
||||
mylog("minimal", ["[Database] - ERROR: inconsistent query and/or arguments.", query, " params: ", args,],)
|
||||
except sqlite3.Error as e:
|
||||
mylog("minimal", ["[Database] - SQL ERROR: ", e])
|
||||
return None
|
||||
@@ -258,15 +250,7 @@ class DB:
|
||||
if len(rows) == 1:
|
||||
return rows[0]
|
||||
if len(rows) > 1:
|
||||
mylog(
|
||||
"verbose",
|
||||
[
|
||||
"[Database] - Warning!: query returns multiple rows, only first row is passed on!",
|
||||
query,
|
||||
" params: ",
|
||||
args,
|
||||
],
|
||||
)
|
||||
mylog("verbose", ["[Database] - Warning!: query returns multiple rows, only first row is passed on!", query, " params: ", args,],)
|
||||
return rows[0]
|
||||
# empty result set
|
||||
return None
|
||||
|
||||
@@ -88,10 +88,7 @@ def ensure_column(sql, table: str, column_name: str, column_type: str) -> bool:
|
||||
mylog("none", [msg])
|
||||
|
||||
# Add missing column
|
||||
mylog(
|
||||
"verbose",
|
||||
[f"[db_upgrade] Adding '{column_name}' ({column_type}) to {table} table"],
|
||||
)
|
||||
mylog("verbose", [f"[db_upgrade] Adding '{column_name}' ({column_type}) to {table} table"],)
|
||||
sql.execute(f'ALTER TABLE "{table}" ADD "{column_name}" {column_type}')
|
||||
return True
|
||||
|
||||
|
||||
@@ -586,16 +586,11 @@ class SafeConditionBuilder:
|
||||
|
||||
# Validate each component
|
||||
if not self._validate_column_name(column):
|
||||
mylog(
|
||||
"verbose", [f"[SafeConditionBuilder] Invalid column: {column}"]
|
||||
)
|
||||
mylog("verbose", [f"[SafeConditionBuilder] Invalid column: {column}"])
|
||||
return "", {}
|
||||
|
||||
if not self._validate_operator(operator):
|
||||
mylog(
|
||||
"verbose",
|
||||
[f"[SafeConditionBuilder] Invalid operator: {operator}"],
|
||||
)
|
||||
mylog("verbose", [f"[SafeConditionBuilder] Invalid operator: {operator}"])
|
||||
return "", {}
|
||||
|
||||
# Create parameter binding
|
||||
@@ -607,10 +602,7 @@ class SafeConditionBuilder:
|
||||
condition_parts.append(condition_part)
|
||||
|
||||
except Exception as e:
|
||||
mylog(
|
||||
"verbose",
|
||||
[f"[SafeConditionBuilder] Error processing condition: {e}"],
|
||||
)
|
||||
mylog("verbose", [f"[SafeConditionBuilder] Error processing condition: {e}"],)
|
||||
return "", {}
|
||||
|
||||
if not condition_parts:
|
||||
@@ -644,10 +636,7 @@ class SafeConditionBuilder:
|
||||
if event_type in self.ALLOWED_EVENT_TYPES:
|
||||
valid_types.append(event_type)
|
||||
else:
|
||||
mylog(
|
||||
"verbose",
|
||||
f"[SafeConditionBuilder] Invalid event type filtered out: {event_type}",
|
||||
)
|
||||
mylog("verbose", f"[SafeConditionBuilder] Invalid event type filtered out: {event_type}",)
|
||||
|
||||
if not valid_types:
|
||||
return "", {}
|
||||
@@ -682,10 +671,7 @@ class SafeConditionBuilder:
|
||||
return self.build_safe_condition(condition_setting)
|
||||
except ValueError as e:
|
||||
# Log the error and return empty condition for safety
|
||||
mylog(
|
||||
"verbose",
|
||||
f"[SafeConditionBuilder] Unsafe condition rejected: {condition_setting}, Error: {e}",
|
||||
)
|
||||
mylog("verbose", f"[SafeConditionBuilder] Unsafe condition rejected: {condition_setting}, Error: {e}",)
|
||||
return "", {}
|
||||
|
||||
|
||||
|
||||
@@ -36,12 +36,7 @@ def checkPermissionsOK():
|
||||
dbW_access = os.access(fullDbPath, os.W_OK)
|
||||
|
||||
mylog("none", ["\n"])
|
||||
mylog(
|
||||
"none",
|
||||
[
|
||||
"The backend restarted (started). If this is unexpected check https://bit.ly/NetAlertX_debug for troubleshooting tips."
|
||||
],
|
||||
)
|
||||
mylog("none", "The backend restarted (started). If this is unexpected check https://bit.ly/NetAlertX_debug for troubleshooting tips.")
|
||||
mylog("none", ["\n"])
|
||||
mylog("none", ["Permissions check (All should be True)"])
|
||||
mylog("none", ["------------------------------------------------"])
|
||||
@@ -59,12 +54,7 @@ def checkPermissionsOK():
|
||||
def initialiseFile(pathToCheck, defaultFile):
|
||||
# if file not readable (missing?) try to copy over the backed-up (default) one
|
||||
if str(os.access(pathToCheck, os.R_OK)) == "False":
|
||||
mylog(
|
||||
"none",
|
||||
[
|
||||
"[Setup] (" + pathToCheck + ") file is not readable or missing. Trying to copy over the default one."
|
||||
],
|
||||
)
|
||||
mylog("none", ["[Setup] (" + pathToCheck + ") file is not readable or missing. Trying to copy over the default one."],)
|
||||
try:
|
||||
# try runnning a subprocess
|
||||
p = subprocess.Popen(
|
||||
@@ -75,31 +65,16 @@ def initialiseFile(pathToCheck, defaultFile):
|
||||
stdout, stderr = p.communicate()
|
||||
|
||||
if str(os.access(pathToCheck, os.R_OK)) == "False":
|
||||
mylog(
|
||||
"none",
|
||||
[
|
||||
"[Setup] ⚠ ERROR copying (" + defaultFile + ") to (" + pathToCheck + "). Make sure the app has Read & Write access to the parent directory."
|
||||
],
|
||||
)
|
||||
mylog("none", "[Setup] ⚠ ERROR copying (" + defaultFile + ") to (" + pathToCheck + "). Ensure Read & Write access to the parent directory.")
|
||||
else:
|
||||
mylog(
|
||||
"none",
|
||||
[
|
||||
"[Setup] (" + defaultFile + ") copied over successfully to (" + pathToCheck + ")."
|
||||
],
|
||||
)
|
||||
mylog("none", ["[Setup] (" + defaultFile + ") copied over successfully to (" + pathToCheck + ")."],)
|
||||
|
||||
# write stdout and stderr into .log files for debugging if needed
|
||||
logResult(stdout, stderr) # TO-DO should be changed to mylog
|
||||
|
||||
except subprocess.CalledProcessError as e:
|
||||
# An error occured, handle it
|
||||
mylog(
|
||||
"none",
|
||||
[
|
||||
"[Setup] ⚠ ERROR copying (" + defaultFile + "). Make sure the app has Read & Write access to " + pathToCheck
|
||||
],
|
||||
)
|
||||
mylog("none", ["[Setup] ⚠ ERROR copying (" + defaultFile + "). Make sure the app has Read & Write access to " + pathToCheck],)
|
||||
mylog("none", [e.output])
|
||||
|
||||
|
||||
@@ -187,14 +162,7 @@ def get_setting(key):
|
||||
mylog("none", [f"[Settings] ⚠ File not found: {settingsFile}"])
|
||||
return None
|
||||
|
||||
mylog(
|
||||
"trace",
|
||||
[
|
||||
"[Import table_settings.json] checking table_settings.json file",
|
||||
f"SETTINGS_LASTCACHEDATE: {SETTINGS_LASTCACHEDATE}",
|
||||
f"fileModifiedTime: {fileModifiedTime}",
|
||||
],
|
||||
)
|
||||
mylog("trace", f"[Import table_settings.json] checking table_settings.json file SETTINGS_LASTCACHEDATE: {SETTINGS_LASTCACHEDATE} fileModifiedTime: {fileModifiedTime}")
|
||||
|
||||
# Use cache if file hasn't changed
|
||||
if fileModifiedTime == SETTINGS_LASTCACHEDATE and SETTINGS_CACHE:
|
||||
@@ -221,10 +189,7 @@ def get_setting(key):
|
||||
SETTINGS_LASTCACHEDATE = fileModifiedTime
|
||||
|
||||
if key not in SETTINGS_CACHE:
|
||||
mylog(
|
||||
"none",
|
||||
[f"[Settings] ⚠ ERROR - setting_missing - {key} not in {settingsFile}"],
|
||||
)
|
||||
mylog("none", [f"[Settings] ⚠ ERROR - setting_missing - {key} not in {settingsFile}"],)
|
||||
return None
|
||||
|
||||
return SETTINGS_CACHE[key]
|
||||
@@ -357,10 +322,7 @@ def setting_value_to_python_type(set_type, set_value):
|
||||
value = json.loads(set_value.replace("'", "\""))
|
||||
|
||||
except json.JSONDecodeError as e:
|
||||
mylog(
|
||||
"none",
|
||||
[f"[setting_value_to_python_type] Error decoding JSON object: {e}"],
|
||||
)
|
||||
mylog("none", [f"[setting_value_to_python_type] Error decoding JSON object: {e}"],)
|
||||
mylog("none", [set_value])
|
||||
value = []
|
||||
|
||||
@@ -375,10 +337,7 @@ def setting_value_to_python_type(set_type, set_value):
|
||||
try:
|
||||
value = reverseTransformers(json.loads(set_value), transformers)
|
||||
except json.JSONDecodeError as e:
|
||||
mylog(
|
||||
"none",
|
||||
[f"[setting_value_to_python_type] Error decoding JSON object: {e}"],
|
||||
)
|
||||
mylog("none", [f"[setting_value_to_python_type] Error decoding JSON object: {e}"],)
|
||||
mylog("none", [{set_value}])
|
||||
value = {}
|
||||
|
||||
@@ -766,9 +725,7 @@ def checkNewVersion():
|
||||
try:
|
||||
data = json.loads(text)
|
||||
except json.JSONDecodeError:
|
||||
mylog(
|
||||
"minimal", ["[Version check] ⚠ ERROR: Invalid JSON response from GitHub."]
|
||||
)
|
||||
mylog("minimal", ["[Version check] ⚠ ERROR: Invalid JSON response from GitHub."])
|
||||
return False
|
||||
|
||||
# make sure we received a valid response and not an API rate limit exceeded message
|
||||
@@ -784,10 +741,7 @@ def checkNewVersion():
|
||||
else:
|
||||
mylog("none", ["[Version check] Running the latest version."])
|
||||
else:
|
||||
mylog(
|
||||
"minimal",
|
||||
["[Version check] ⚠ ERROR: Received unexpected response from GitHub."],
|
||||
)
|
||||
mylog("minimal", ["[Version check] ⚠ ERROR: Received unexpected response from GitHub."],)
|
||||
|
||||
return False
|
||||
|
||||
|
||||
@@ -180,10 +180,7 @@ def importConfigs(pm, db, all_plugins):
|
||||
fileModifiedTime = os.path.getmtime(config_file)
|
||||
|
||||
mylog("debug", ["[Import Config] checking config file "])
|
||||
mylog(
|
||||
"debug",
|
||||
["[Import Config] lastImportedConfFile :", conf.lastImportedConfFile],
|
||||
)
|
||||
mylog("debug", ["[Import Config] lastImportedConfFile :", conf.lastImportedConfFile],)
|
||||
mylog("debug", ["[Import Config] fileModifiedTime :", fileModifiedTime])
|
||||
|
||||
if (fileModifiedTime == conf.lastImportedConfFile) and all_plugins is not None:
|
||||
@@ -399,12 +396,7 @@ def importConfigs(pm, db, all_plugins):
|
||||
conf.TIMEZONE = ccd(
|
||||
"TIMEZONE", conf.tz, c_d, "_KEEP_", "_KEEP_", "[]", "General"
|
||||
)
|
||||
mylog(
|
||||
"none",
|
||||
[
|
||||
f"[Config] Invalid timezone '{conf.TIMEZONE}', defaulting to {default_tz}."
|
||||
],
|
||||
)
|
||||
mylog("none", [f"[Config] Invalid timezone '{conf.TIMEZONE}', defaulting to {default_tz}."],)
|
||||
|
||||
# TODO cleanup later ----------------------------------------------------------------------------------
|
||||
# init all time values as we have timezone - all this shoudl be moved into plugin/plugin settings
|
||||
@@ -450,13 +442,7 @@ def importConfigs(pm, db, all_plugins):
|
||||
|
||||
all_plugins = get_plugins_configs(conf.DISCOVER_PLUGINS)
|
||||
|
||||
mylog(
|
||||
"none",
|
||||
[
|
||||
"[Config] Plugins: Number of all plugins (including not loaded): ",
|
||||
len(all_plugins),
|
||||
],
|
||||
)
|
||||
mylog("none", ["[Config] Plugins: Number of all plugins (including not loaded): ", len(all_plugins),],)
|
||||
|
||||
plugin_indexes_to_remove = []
|
||||
all_plugins_prefixes = [] # to init the LOADED_PLUGINS setting with correct options
|
||||
@@ -580,9 +566,7 @@ def importConfigs(pm, db, all_plugins):
|
||||
"General",
|
||||
)
|
||||
|
||||
mylog(
|
||||
"none", ["[Config] Number of Plugins to load: ", len(loaded_plugins_prefixes)]
|
||||
)
|
||||
mylog("none", ["[Config] Number of Plugins to load: ", len(loaded_plugins_prefixes)])
|
||||
mylog("none", ["[Config] Plugins to load: ", loaded_plugins_prefixes])
|
||||
|
||||
conf.plugins_once_run = False
|
||||
@@ -606,12 +590,7 @@ def importConfigs(pm, db, all_plugins):
|
||||
|
||||
# Log the value being passed
|
||||
# ccd(key, default, config_dir, name, inputtype, options, group, events=None, desc="", setJsonMetadata=None, overrideTemplate=None, forceDefault=False)
|
||||
mylog(
|
||||
"verbose",
|
||||
[
|
||||
f"[Config] Setting override {setting_name} with value: {value}"
|
||||
],
|
||||
)
|
||||
mylog("verbose", [f"[Config] Setting override {setting_name} with value: {value}"],)
|
||||
ccd(
|
||||
setting_name,
|
||||
value,
|
||||
@@ -630,12 +609,7 @@ def importConfigs(pm, db, all_plugins):
|
||||
)
|
||||
|
||||
except json.JSONDecodeError:
|
||||
mylog(
|
||||
"none",
|
||||
[
|
||||
f"[Config] [ERROR] Setting override decoding JSON from {app_conf_override_path}"
|
||||
],
|
||||
)
|
||||
mylog("none", [f"[Config] [ERROR] Setting override decoding JSON from {app_conf_override_path}"],)
|
||||
else:
|
||||
mylog("debug", [f"[Config] File {app_conf_override_path} does not exist."])
|
||||
|
||||
@@ -777,10 +751,7 @@ def renameSettings(config_file):
|
||||
timestamp = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
|
||||
backup_file = f"{config_file}_old_setting_names_{timestamp}.bak"
|
||||
|
||||
mylog(
|
||||
"debug",
|
||||
f"[Config] Old setting names will be replaced and a backup ({backup_file}) of the config created.",
|
||||
)
|
||||
mylog("debug", f"[Config] Old setting names will be replaced and a backup ({backup_file}) of the config created.",)
|
||||
|
||||
shutil.copy(str(config_file), backup_file) # Convert config_file to a string
|
||||
|
||||
@@ -807,6 +778,4 @@ def renameSettings(config_file):
|
||||
) # Convert config_file to a string
|
||||
|
||||
else:
|
||||
mylog(
|
||||
"debug", "[Config] No old setting names found in the file. No changes made."
|
||||
)
|
||||
mylog("debug", "[Config] No old setting names found in the file. No changes made.")
|
||||
|
||||
@@ -119,10 +119,7 @@ def remove_old(keepNumberOfEntries):
|
||||
try:
|
||||
with open(NOTIFICATION_API_FILE, "w") as file:
|
||||
json.dump(trimmed, file, indent=4)
|
||||
mylog(
|
||||
"verbose",
|
||||
f"[Notification] Trimmed notifications to latest {keepNumberOfEntries}",
|
||||
)
|
||||
mylog("verbose", f"[Notification] Trimmed notifications to latest {keepNumberOfEntries}",)
|
||||
except Exception as e:
|
||||
mylog("none", f"Error writing trimmed notifications file: {e}")
|
||||
|
||||
|
||||
@@ -295,9 +295,7 @@ class NotificationInstance:
|
||||
(f"-{minutes} minutes", tz_offset),
|
||||
)
|
||||
|
||||
mylog(
|
||||
"minimal", ["[Notification] Notifications changes: ", self.db.sql.rowcount]
|
||||
)
|
||||
mylog("minimal", ["[Notification] Notifications changes: ", self.db.sql.rowcount])
|
||||
|
||||
# clear plugin events
|
||||
self.clearPluginEvents()
|
||||
|
||||
@@ -31,10 +31,7 @@ class UserEventsQueueInstance:
|
||||
Returns an empty list if the file doesn't exist.
|
||||
"""
|
||||
if not os.path.exists(self.log_file):
|
||||
mylog(
|
||||
"none",
|
||||
["[UserEventsQueueInstance] Log file not found: ", self.log_file],
|
||||
)
|
||||
mylog("none", ["[UserEventsQueueInstance] Log file not found: ", self.log_file],)
|
||||
return [] # No log file, return empty list
|
||||
with open(self.log_file, "r") as file:
|
||||
return file.readlines()
|
||||
|
||||
@@ -123,9 +123,7 @@ def update_devices_data_from_scan(db):
|
||||
)""")
|
||||
|
||||
# Update only devices with empty or NULL devParentMAC
|
||||
mylog(
|
||||
"debug", "[Update Devices] - (if not empty) cur_NetworkNodeMAC -> devParentMAC"
|
||||
)
|
||||
mylog("debug", "[Update Devices] - (if not empty) cur_NetworkNodeMAC -> devParentMAC")
|
||||
sql.execute("""UPDATE Devices
|
||||
SET devParentMAC = (
|
||||
SELECT cur_NetworkNodeMAC
|
||||
@@ -144,10 +142,7 @@ def update_devices_data_from_scan(db):
|
||||
""")
|
||||
|
||||
# Update only devices with empty or NULL devSite
|
||||
mylog(
|
||||
"debug",
|
||||
"[Update Devices] - (if not empty) cur_NetworkSite -> (if empty) devSite",
|
||||
)
|
||||
mylog("debug", "[Update Devices] - (if not empty) cur_NetworkSite -> (if empty) devSite",)
|
||||
sql.execute("""UPDATE Devices
|
||||
SET devSite = (
|
||||
SELECT cur_NetworkSite
|
||||
@@ -325,9 +320,7 @@ def save_scanned_devices(db):
|
||||
.strip()
|
||||
)
|
||||
|
||||
mylog(
|
||||
"debug", ["[Save Devices] Saving this IP into the CurrentScan table:", local_ip]
|
||||
)
|
||||
mylog("debug", ["[Save Devices] Saving this IP into the CurrentScan table:", local_ip])
|
||||
|
||||
if check_IP_format(local_ip) == "":
|
||||
local_ip = "0.0.0.0"
|
||||
@@ -361,23 +354,12 @@ def print_scan_stats(db):
|
||||
sql.execute(query)
|
||||
stats = sql.fetchall()
|
||||
|
||||
mylog(
|
||||
"verbose",
|
||||
f"[Scan Stats] Devices Detected.......: {stats[0]['devices_detected']}",
|
||||
)
|
||||
mylog("verbose", f"[Scan Stats] Devices Detected.......: {stats[0]['devices_detected']}",)
|
||||
mylog("verbose", f"[Scan Stats] New Devices............: {stats[0]['new_devices']}")
|
||||
mylog("verbose", f"[Scan Stats] Down Alerts............: {stats[0]['down_alerts']}")
|
||||
mylog(
|
||||
"verbose",
|
||||
f"[Scan Stats] New Down Alerts........: {stats[0]['new_down_alerts']}",
|
||||
)
|
||||
mylog(
|
||||
"verbose",
|
||||
f"[Scan Stats] New Connections........: {stats[0]['new_connections']}",
|
||||
)
|
||||
mylog(
|
||||
"verbose", f"[Scan Stats] Disconnections.........: {stats[0]['disconnections']}"
|
||||
)
|
||||
mylog("verbose", f"[Scan Stats] New Down Alerts........: {stats[0]['new_down_alerts']}",)
|
||||
mylog("verbose", f"[Scan Stats] New Connections........: {stats[0]['new_connections']}",)
|
||||
mylog("verbose", f"[Scan Stats] Disconnections.........: {stats[0]['disconnections']}")
|
||||
mylog("verbose", f"[Scan Stats] IP Changes.............: {stats[0]['ip_changes']}")
|
||||
|
||||
# if str(stats[0]["new_devices"]) != '0':
|
||||
@@ -395,10 +377,7 @@ def print_scan_stats(db):
|
||||
row_dict = dict(row)
|
||||
mylog("trace", f" {row_dict}")
|
||||
|
||||
mylog(
|
||||
"trace",
|
||||
" ================ Events table content where eve_PendingAlertEmail = 1 ================",
|
||||
)
|
||||
mylog("trace", " ================ Events table content where eve_PendingAlertEmail = 1 ================",)
|
||||
sql.execute("select * from Events where eve_PendingAlertEmail = 1")
|
||||
rows = sql.fetchall()
|
||||
for row in rows:
|
||||
@@ -654,10 +633,7 @@ def check_plugin_data_changed(pm, plugins_to_check):
|
||||
|
||||
# Continue if changes detected
|
||||
for p in plugins_changed:
|
||||
mylog(
|
||||
'debug',
|
||||
f'[check_plugin_data_changed] {p} changed (last_data_change|last_data_check): ({pm.plugin_states.get(p, {}).get("lastDataChange")}|{pm.plugin_checks.get(p)})'
|
||||
)
|
||||
mylog('debug', f'[check_plugin_data_changed] {p} changed (last_change|last_check): ({pm.plugin_states.get(p, {}).get("lastDataChange")}|{pm.plugin_checks.get(p)})')
|
||||
|
||||
return True
|
||||
|
||||
@@ -741,10 +717,7 @@ def update_devices_names(pm):
|
||||
# --- Step 1: Update device names for unknown devices ---
|
||||
unknownDevices = device_handler.getUnknown()
|
||||
if unknownDevices:
|
||||
mylog(
|
||||
"verbose",
|
||||
f"[Update Device Name] Trying to resolve devices without name. Unknown devices count: {len(unknownDevices)}",
|
||||
)
|
||||
mylog("verbose", f"[Update Device Name] Trying to resolve devices without name. Unknown devices count: {len(unknownDevices)}",)
|
||||
|
||||
# Try resolving both name and FQDN
|
||||
recordsToUpdate, recordsNotFound, fs, notFound = resolve_devices(
|
||||
@@ -752,10 +725,8 @@ def update_devices_names(pm):
|
||||
)
|
||||
|
||||
# Log summary
|
||||
mylog(
|
||||
"verbose",
|
||||
f"[Update Device Name] Names Found (DIGSCAN/AVAHISCAN/NSLOOKUP/NBTSCAN): {len(recordsToUpdate)} ({fs['DIGSCAN']}/{fs['AVAHISCAN']}/{fs['NSLOOKUP']}/{fs['NBTSCAN']})",
|
||||
)
|
||||
res_string = f"{fs['DIGSCAN']}/{fs['AVAHISCAN']}/{fs['NSLOOKUP']}/{fs['NBTSCAN']}"
|
||||
mylog("verbose", f"[Update Device Name] Names Found (DIGSCAN/AVAHISCAN/NSLOOKUP/NBTSCAN): {len(recordsToUpdate)} ({res_string})",)
|
||||
mylog("verbose", f"[Update Device Name] Names Not Found : {notFound}")
|
||||
|
||||
# Apply updates to database
|
||||
@@ -771,10 +742,7 @@ def update_devices_names(pm):
|
||||
if get_setting_value("REFRESH_FQDN"):
|
||||
allDevices = device_handler.getAll()
|
||||
if allDevices:
|
||||
mylog(
|
||||
"verbose",
|
||||
f"[Update FQDN] Trying to resolve FQDN. Devices count: {len(allDevices)}",
|
||||
)
|
||||
mylog("verbose", f"[Update FQDN] Trying to resolve FQDN. Devices count: {len(allDevices)}",)
|
||||
|
||||
# Try resolving only FQDN
|
||||
recordsToUpdate, _, fs, notFound = resolve_devices(
|
||||
@@ -782,10 +750,8 @@ def update_devices_names(pm):
|
||||
)
|
||||
|
||||
# Log summary
|
||||
mylog(
|
||||
"verbose",
|
||||
f"[Update FQDN] Names Found (DIGSCAN/AVAHISCAN/NSLOOKUP/NBTSCAN): {len(recordsToUpdate)}({fs['DIGSCAN']}/{fs['AVAHISCAN']}/{fs['NSLOOKUP']}/{fs['NBTSCAN']})",
|
||||
)
|
||||
res_string = f"{fs['DIGSCAN']}/{fs['AVAHISCAN']}/{fs['NSLOOKUP']}/{fs['NBTSCAN']}"
|
||||
mylog("verbose", f"[Update FQDN] Names Found (DIGSCAN/AVAHISCAN/NSLOOKUP/NBTSCAN): {len(recordsToUpdate)}({res_string})",)
|
||||
mylog("verbose", f"[Update FQDN] Names Not Found : {notFound}")
|
||||
|
||||
# Apply FQDN-only updates
|
||||
@@ -907,25 +873,13 @@ def query_MAC_vendor(pMAC):
|
||||
parts = line.split("\t", 1)
|
||||
if len(parts) > 1:
|
||||
vendor = parts[1].strip()
|
||||
mylog(
|
||||
"debug",
|
||||
[
|
||||
f"[Vendor Check] Found '{vendor}' for '{pMAC}' in {vendorsPath}"
|
||||
],
|
||||
)
|
||||
mylog("debug", [f"[Vendor Check] Found '{vendor}' for '{pMAC}' in {vendorsPath}"], )
|
||||
return vendor
|
||||
else:
|
||||
mylog(
|
||||
"debug",
|
||||
[
|
||||
f'[Vendor Check] ⚠ ERROR: Match found, but line could not be processed: "{line_lower}"'
|
||||
],
|
||||
)
|
||||
mylog("debug", [f'[Vendor Check] ⚠ ERROR: Match found, but line could not be processed: "{line_lower}"'],)
|
||||
return -1
|
||||
|
||||
return -1 # MAC address not found in the database
|
||||
except FileNotFoundError:
|
||||
mylog(
|
||||
"none", [f"[Vendor Check] ⚠ ERROR: Vendors file {vendorsPath} not found."]
|
||||
)
|
||||
mylog("none", [f"[Vendor Check] ⚠ ERROR: Vendors file {vendorsPath} not found."])
|
||||
return -1
|
||||
|
||||
@@ -25,10 +25,7 @@ try:
|
||||
rule["icon_base64"] = ""
|
||||
except Exception as e:
|
||||
MAC_TYPE_ICON_RULES = []
|
||||
mylog(
|
||||
"none",
|
||||
f"[guess_device_attributes] Failed to load device_heuristics_rules.json: {e}",
|
||||
)
|
||||
mylog("none", f"[guess_device_attributes] Failed to load device_heuristics_rules.json: {e}",)
|
||||
|
||||
|
||||
# -----------------------------------------
|
||||
@@ -169,10 +166,8 @@ def guess_device_attributes(
|
||||
default_icon: str,
|
||||
default_type: str,
|
||||
) -> Tuple[str, str]:
|
||||
mylog(
|
||||
"debug",
|
||||
f"[guess_device_attributes] Guessing attributes for (vendor|mac|ip|name): ('{vendor}'|'{mac}'|'{ip}'|'{name}')",
|
||||
)
|
||||
|
||||
mylog("debug", f"[guess_device_attributes] Guessing attributes for (vendor|mac|ip|name): ('{vendor}'|'{mac}'|'{ip}'|'{name}')",)
|
||||
|
||||
# --- Normalize inputs ---
|
||||
vendor = str(vendor).lower().strip() if vendor else "unknown"
|
||||
@@ -207,10 +202,7 @@ def guess_device_attributes(
|
||||
type_ = type_ or default_type
|
||||
icon = icon or default_icon
|
||||
|
||||
mylog(
|
||||
"debug",
|
||||
f"[guess_device_attributes] Guessed attributes (icon|type_): ('{icon}'|'{type_}')",
|
||||
)
|
||||
mylog("debug", f"[guess_device_attributes] Guessed attributes (icon|type_): ('{icon}'|'{type_}')",)
|
||||
return icon, type_
|
||||
|
||||
|
||||
|
||||
@@ -50,9 +50,7 @@ def process_scan(db):
|
||||
update_devices_data_from_scan(db)
|
||||
|
||||
# Pair session events (Connection / Disconnection)
|
||||
mylog(
|
||||
"verbose", "[Process Scan] Pairing session events (connection / disconnection) "
|
||||
)
|
||||
mylog("verbose", "[Process Scan] Pairing session events (connection / disconnection) ")
|
||||
pair_sessions_events(db)
|
||||
|
||||
# Sessions snapshot
|
||||
@@ -221,10 +219,7 @@ def insertOnlineHistory(db):
|
||||
VALUES (?, ?, ?, ?, ?, ?)
|
||||
"""
|
||||
|
||||
mylog(
|
||||
"debug",
|
||||
f"[Presence graph] Sql query: {insert_query} with values: {scanTimestamp}, {onlineDevices}, {downDevices}, {allDevices}, {archivedDevices}, {offlineDevices}",
|
||||
)
|
||||
mylog("debug", f"[Presence graph] Sql query: {insert_query} with values: {scanTimestamp}, {onlineDevices}, {downDevices}, {allDevices}, {archivedDevices}, {offlineDevices}",)
|
||||
|
||||
# Debug output
|
||||
print_table_schema(db, "Online_History")
|
||||
|
||||
@@ -26,12 +26,7 @@ def logEventStatusCounts(objName, pluginEvents):
|
||||
status_counts[status] = 1
|
||||
|
||||
for status, count in status_counts.items():
|
||||
mylog(
|
||||
"debug",
|
||||
[
|
||||
f'[{module_name}] In {objName} there are {count} events with the status "{status}" '
|
||||
],
|
||||
)
|
||||
mylog("debug", [f'[{module_name}] In {objName} there are {count} events with the status "{status}" '],)
|
||||
|
||||
|
||||
# -------------------------------------------------------------------------------
|
||||
@@ -100,10 +95,7 @@ def list_to_csv(arr):
|
||||
|
||||
mylog("debug", f"[{module_name}] Flattening the below array")
|
||||
mylog("debug", arr)
|
||||
mylog(
|
||||
"debug",
|
||||
f"[{module_name}] isinstance(arr, list) : {isinstance(arr, list)} | isinstance(arr, str) : {isinstance(arr, str)}",
|
||||
)
|
||||
mylog("debug", f"[{module_name}] isinstance(arr, list) : {isinstance(arr, list)} | isinstance(arr, str) : {isinstance(arr, str)}",)
|
||||
|
||||
if isinstance(arr, str):
|
||||
tmpStr = (
|
||||
@@ -227,19 +219,9 @@ def get_plugins_configs(loadAll):
|
||||
|
||||
except (FileNotFoundError, json.JSONDecodeError):
|
||||
# Handle the case when the file is not found or JSON decoding fails
|
||||
mylog(
|
||||
"none",
|
||||
[
|
||||
f"[{module_name}] ⚠ ERROR - JSONDecodeError or FileNotFoundError for file {config_path}"
|
||||
],
|
||||
)
|
||||
mylog("none", f"[{module_name}] ⚠ ERROR - JSONDecodeError or FileNotFoundError for file {config_path}")
|
||||
except Exception as e:
|
||||
mylog(
|
||||
"none",
|
||||
[
|
||||
f"[{module_name}] ⚠ ERROR - Exception for file {config_path}: {str(e)}"
|
||||
],
|
||||
)
|
||||
mylog("none", f"[{module_name}] ⚠ ERROR - Exception for file {config_path}: {str(e)}")
|
||||
|
||||
# Sort pluginsList based on "execution_order"
|
||||
pluginsListSorted = sorted(pluginsList, key=get_layer)
|
||||
@@ -285,23 +267,13 @@ def getPluginObject(keyValues):
|
||||
if all_match:
|
||||
return item
|
||||
|
||||
mylog(
|
||||
"verbose",
|
||||
[
|
||||
f"[{module_name}] 💬 INFO - Object not found {json.dumps(keyValues)} "
|
||||
],
|
||||
)
|
||||
mylog("verbose", f"[{module_name}] 💬 INFO - Object not found {json.dumps(keyValues)} ")
|
||||
|
||||
return {}
|
||||
|
||||
except (FileNotFoundError, json.JSONDecodeError, ValueError):
|
||||
# Handle the case when the file is not found, JSON decoding fails, or data is not in the expected format
|
||||
mylog(
|
||||
"verbose",
|
||||
[
|
||||
f"[{module_name}] ⚠ ERROR - JSONDecodeError or FileNotFoundError for file {plugins_objects}"
|
||||
],
|
||||
)
|
||||
mylog("verbose", f"[{module_name}] ⚠ ERROR - JSONDecodeError or FileNotFoundError for file {plugins_objects}")
|
||||
|
||||
return {}
|
||||
|
||||
|
||||
@@ -29,10 +29,7 @@ class UpdateFieldAction(Action):
|
||||
self.db = db
|
||||
|
||||
def execute(self):
|
||||
mylog(
|
||||
"verbose",
|
||||
f"[WF] Updating field '{self.field}' to '{self.value}' for event object {self.trigger.object_type}",
|
||||
)
|
||||
mylog("verbose", f"[WF] Updating field '{self.field}' to '{self.value}' for event object {self.trigger.object_type}")
|
||||
|
||||
obj = self.trigger.object
|
||||
|
||||
@@ -109,12 +106,7 @@ class RunPluginAction(Action):
|
||||
def execute(self):
|
||||
obj = self.trigger.object
|
||||
|
||||
mylog(
|
||||
"verbose",
|
||||
[
|
||||
f"Executing plugin '{self.plugin_name}' with parameters {self.params} for object {obj}"
|
||||
],
|
||||
)
|
||||
mylog("verbose", f"Executing plugin '{self.plugin_name}' with parameters {self.params} for object {obj}")
|
||||
# PluginManager.run(self.plugin_name, self.parameters)
|
||||
return obj
|
||||
|
||||
@@ -129,12 +121,7 @@ class SendNotificationAction(Action):
|
||||
|
||||
def execute(self):
|
||||
obj = self.trigger.object
|
||||
mylog(
|
||||
"verbose",
|
||||
[
|
||||
f"Sending notification via '{self.method}': {self.message} for object {obj}"
|
||||
],
|
||||
)
|
||||
mylog("verbose", f"Sending notification via '{self.method}': {self.message} for object {obj}")
|
||||
# NotificationManager.send(self.method, self.message)
|
||||
return obj
|
||||
|
||||
|
||||
@@ -52,10 +52,7 @@ class ConditionGroup:
|
||||
"""Handles condition groups with AND, OR logic, supporting nested groups."""
|
||||
|
||||
def __init__(self, group_json):
|
||||
mylog(
|
||||
"verbose",
|
||||
[f"[WF] ConditionGroup json.dumps(group_json): {json.dumps(group_json)}"],
|
||||
)
|
||||
mylog("verbose", f"[WF] ConditionGroup json.dumps(group_json): {json.dumps(group_json)}")
|
||||
|
||||
self.logic = group_json.get("logic", "AND").upper()
|
||||
self.conditions = []
|
||||
|
||||
@@ -53,21 +53,13 @@ class WorkflowManager:
|
||||
# Ensure workflow is enabled before proceeding
|
||||
if workflow.get("enabled", "No").lower() == "yes":
|
||||
wfName = workflow["name"]
|
||||
mylog(
|
||||
"debug",
|
||||
[f"[WF] Checking if '{evGuid}' triggers the workflow '{wfName}'"],
|
||||
)
|
||||
mylog("debug", f"[WF] Checking if '{evGuid}' triggers the workflow '{wfName}'")
|
||||
|
||||
# construct trigger object which also evaluates if the current event triggers it
|
||||
trigger = Trigger(workflow["trigger"], event, self.db)
|
||||
|
||||
if trigger.triggered:
|
||||
mylog(
|
||||
"verbose",
|
||||
[
|
||||
f"[WF] Event with GUID '{evGuid}' triggered the workflow '{wfName}'"
|
||||
],
|
||||
)
|
||||
mylog("verbose", f"[WF] Event with GUID '{evGuid}' triggered the workflow '{wfName}'")
|
||||
|
||||
self.execute_workflow(workflow, trigger)
|
||||
|
||||
@@ -98,12 +90,7 @@ class WorkflowManager:
|
||||
evaluator = ConditionGroup(condition_group)
|
||||
|
||||
if evaluator.evaluate(trigger): # If any group evaluates to True
|
||||
mylog(
|
||||
"none",
|
||||
[
|
||||
f"[WF] Workflow {wfName} will be executed - conditions were evaluated as TRUE"
|
||||
],
|
||||
)
|
||||
mylog("none", f"[WF] Workflow {wfName} will be executed - conditions were evaluated as TRUE")
|
||||
mylog("debug", [f"[WF] Workflow condition_group: {condition_group}"])
|
||||
|
||||
self.execute_actions(workflow["actions"], trigger)
|
||||
|
||||
@@ -24,12 +24,7 @@ class Trigger:
|
||||
self.object_type == event["ObjectType"] and self.event_type == event["AppEventType"]
|
||||
)
|
||||
|
||||
mylog(
|
||||
"debug",
|
||||
[
|
||||
f"""[WF] self.triggered '{self.triggered}' for event '{get_array_from_sql_rows(event)} and trigger {json.dumps(triggerJson)}' """
|
||||
],
|
||||
)
|
||||
mylog("debug", f"""[WF] self.triggered '{self.triggered}' for event '{get_array_from_sql_rows(event)} and trigger {json.dumps(triggerJson)}' """)
|
||||
|
||||
if self.triggered:
|
||||
# object type corresponds with the DB table name
|
||||
|
||||
@@ -11,26 +11,29 @@ echo "==========================================" >> "$LOG_FILE"
|
||||
# Function to extract comments from docker-compose file
|
||||
extract_comments() {
|
||||
local file="$1"
|
||||
echo "File: $(basename "$file")" >> "$LOG_FILE"
|
||||
echo "----------------------------------------" >> "$LOG_FILE"
|
||||
{
|
||||
|
||||
# Extract lines starting with # until we hit a non-comment line
|
||||
awk '
|
||||
/^#/ {
|
||||
# Remove the # and any leading/trailing whitespace
|
||||
comment = substr($0, 2)
|
||||
sub(/^ */, "", comment)
|
||||
sub(/ *$/, "", comment)
|
||||
if (comment != "") {
|
||||
print comment
|
||||
}
|
||||
}
|
||||
/^[^#]/ && !/^$/ {
|
||||
exit
|
||||
}
|
||||
' "$file" >> "$LOG_FILE"
|
||||
echo "File: $(basename "$file")"
|
||||
echo "----------------------------------------"
|
||||
|
||||
echo "" >> "$LOG_FILE"
|
||||
# Extract lines starting with # until we hit a non-comment line
|
||||
awk '
|
||||
/^#/ {
|
||||
# Remove the # and any leading/trailing whitespace
|
||||
comment = substr($0, 2)
|
||||
sub(/^ */, "", comment)
|
||||
sub(/ *$/, "", comment)
|
||||
if (comment != "") {
|
||||
print comment
|
||||
}
|
||||
}
|
||||
/^[^#]/ && !/^$/ {
|
||||
exit
|
||||
}
|
||||
' "$file"
|
||||
|
||||
echo ""
|
||||
} >> "$LOG_FILE"
|
||||
}
|
||||
|
||||
# Function to run docker-compose test
|
||||
@@ -40,16 +43,17 @@ run_test() {
|
||||
dirname=$(dirname "$file")
|
||||
local basename
|
||||
basename=$(basename "$file")
|
||||
|
||||
echo "Testing: $basename" >> "$LOG_FILE"
|
||||
echo "Directory: $dirname" >> "$LOG_FILE"
|
||||
echo "" >> "$LOG_FILE"
|
||||
echo "Running docker-compose up..." >> "$LOG_FILE"
|
||||
timeout 10s docker-compose -f "$file" up 2>&1 >> "$LOG_FILE"
|
||||
|
||||
{
|
||||
echo "Testing: $basename"
|
||||
echo "Directory: $dirname"
|
||||
echo ""
|
||||
echo "Running docker-compose up..."
|
||||
timeout 10s docker-compose -f "$file" up 2>&1
|
||||
} >> "$LOG_FILE"
|
||||
# Clean up
|
||||
docker-compose -f "$file" down -v 2>/dev/null || true
|
||||
docker volume prune -f 2>/dev/null || true
|
||||
}
|
||||
|
||||
find "$SCRIPT_DIR" -name "docker-compose*.yml" -type f -print0 | sort -z | while IFS= read -r -d '' file; do
|
||||
extract_comments "$file"
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -57,7 +57,7 @@ for i in $(seq 1 $WAIT_SECONDS); do
|
||||
echo "--- Services are healthy! ---"
|
||||
break
|
||||
fi
|
||||
if [ $i -eq $WAIT_SECONDS ]; then
|
||||
if [ "$i" -eq "$WAIT_SECONDS" ]; then
|
||||
echo "--- Timeout: Services did not become healthy after $WAIT_SECONDS seconds. ---"
|
||||
docker logs netalertx-test-container
|
||||
exit 1
|
||||
@@ -271,7 +271,7 @@ def create_test_scenarios() -> List[TestScenario]:
|
||||
compose_file = f"docker-compose.mount-test.{path_name}_{scenario_name}.yml"
|
||||
|
||||
# Determine expected exit code
|
||||
expected_exit_code = 1 if scenario_name == "unwritable" else 0
|
||||
expected_exit_code = 1 if expected_issues else 0
|
||||
|
||||
scenarios.append(
|
||||
TestScenario(
|
||||
|
||||
Reference in New Issue
Block a user