@@ -35,6 +35,7 @@ RUN apk add --no-cache \
|
||||
shadow \
|
||||
python3 \
|
||||
python3-dev \
|
||||
py3-psutil \
|
||||
gcc \
|
||||
musl-dev \
|
||||
libffi-dev \
|
||||
@@ -136,7 +137,7 @@ ENV LANG=C.UTF-8
|
||||
|
||||
RUN apk add --no-cache bash mtr libbsd zip lsblk tzdata curl arp-scan iproute2 iproute2-ss nmap fping \
|
||||
nmap-scripts traceroute nbtscan net-tools net-snmp-tools bind-tools awake ca-certificates \
|
||||
sqlite php83 php83-fpm php83-cgi php83-curl php83-sqlite3 php83-session python3 envsubst \
|
||||
sqlite php83 php83-fpm php83-cgi php83-curl php83-sqlite3 php83-session python3 py3-psutil envsubst \
|
||||
nginx supercronic shadow su-exec jq && \
|
||||
rm -Rf /var/cache/apk/* && \
|
||||
rm -Rf /etc/nginx && \
|
||||
|
||||
19
.github/skills/code-standards/SKILL.md
vendored
@@ -5,6 +5,14 @@ description: NetAlertX coding standards and conventions. Use this when writing c
|
||||
|
||||
# Code Standards
|
||||
|
||||
- ask me to review before going to each next step (mention n step out of x)
|
||||
- before starting, prepare implementation plan
|
||||
- ask me to review it and ask any clarifying questions first
|
||||
- add test creation as last step - follow repo architecture patterns - do not place in the root of /test
|
||||
- code has to be maintainable, no duplicate code
|
||||
- follow DRY principle
|
||||
- code files should be less than 500 LOC for better maintainability
|
||||
|
||||
## File Length
|
||||
|
||||
Keep code files under 500 lines. Split larger files into modules.
|
||||
@@ -42,11 +50,18 @@ Nested subprocess calls need their own timeout—outer timeout won't save you.
|
||||
## Time Utilities
|
||||
|
||||
```python
|
||||
from utils.datetime_utils import timeNowDB
|
||||
from utils.datetime_utils import timeNowUTC
|
||||
|
||||
timestamp = timeNowDB()
|
||||
timestamp = timeNowUTC()
|
||||
```
|
||||
|
||||
This is the ONLY function that calls datetime.datetime.now() in the entire codebase.
|
||||
|
||||
⚠️ CRITICAL: ALL database timestamps MUST be stored in UTC
|
||||
This is the SINGLE SOURCE OF TRUTH for current time in NetAlertX
|
||||
Use timeNowUTC() for DB writes (returns UTC string by default)
|
||||
Use timeNowUTC(as_string=False) for datetime operations (scheduling, comparisons, logging)
|
||||
|
||||
## String Sanitization
|
||||
|
||||
Use sanitizers from `server/helper.py` before storing user input.
|
||||
|
||||
8
.github/skills/settings-management/SKILL.md
vendored
@@ -37,11 +37,3 @@ Define in plugin's `config.json` manifest under the settings section.
|
||||
## Environment Override
|
||||
|
||||
Use `APP_CONF_OVERRIDE` environment variable for settings that must be set before startup.
|
||||
|
||||
## Backend API URL
|
||||
|
||||
For Codespaces, set `BACKEND_API_URL` to your Codespace URL:
|
||||
|
||||
```
|
||||
BACKEND_API_URL=https://something-20212.app.github.dev/
|
||||
```
|
||||
|
||||
6
.github/workflows/run-all-tests.yml
vendored
@@ -12,7 +12,7 @@ on:
|
||||
type: boolean
|
||||
default: false
|
||||
run_backend:
|
||||
description: '📂 backend/ (SQL Builder & Security)'
|
||||
description: '📂 backend/ & db/ (SQL Builder, Security & Migration)'
|
||||
type: boolean
|
||||
default: false
|
||||
run_docker_env:
|
||||
@@ -43,9 +43,9 @@ jobs:
|
||||
run: |
|
||||
PATHS=""
|
||||
# Folder Mapping with 'test/' prefix
|
||||
if [ "${{ github.event.inputs.scan }}" == "true" ]; then PATHS="$PATHS test/scan/"; fi
|
||||
if [ "${{ github.event.inputs.run_scan }}" == "true" ]; then PATHS="$PATHS test/scan/"; fi
|
||||
if [ "${{ github.event.inputs.run_api }}" == "true" ]; then PATHS="$PATHS test/api_endpoints/ test/server/"; fi
|
||||
if [ "${{ github.event.inputs.run_backend }}" == "true" ]; then PATHS="$PATHS test/backend/"; fi
|
||||
if [ "${{ github.event.inputs.run_backend }}" == "true" ]; then PATHS="$PATHS test/backend/ test/db/"; fi
|
||||
if [ "${{ github.event.inputs.run_docker_env }}" == "true" ]; then PATHS="$PATHS test/docker_tests/"; fi
|
||||
if [ "${{ github.event.inputs.run_ui }}" == "true" ]; then PATHS="$PATHS test/ui/"; fi
|
||||
|
||||
|
||||
1
.gitignore
vendored
@@ -25,6 +25,7 @@ front/api/*
|
||||
**/plugins/**/*.log
|
||||
**/plugins/cloud_services/*
|
||||
**/plugins/cloud_connector/*
|
||||
**/plugins/heartbeat/*
|
||||
**/%40eaDir/
|
||||
**/@eaDir/
|
||||
|
||||
|
||||
@@ -32,6 +32,7 @@ RUN apk add --no-cache \
|
||||
shadow \
|
||||
python3 \
|
||||
python3-dev \
|
||||
py3-psutil \
|
||||
gcc \
|
||||
musl-dev \
|
||||
libffi-dev \
|
||||
@@ -133,7 +134,7 @@ ENV LANG=C.UTF-8
|
||||
|
||||
RUN apk add --no-cache bash mtr libbsd zip lsblk tzdata curl arp-scan iproute2 iproute2-ss nmap fping \
|
||||
nmap-scripts traceroute nbtscan net-tools net-snmp-tools bind-tools awake ca-certificates \
|
||||
sqlite php83 php83-fpm php83-cgi php83-curl php83-sqlite3 php83-session python3 envsubst \
|
||||
sqlite php83 php83-fpm php83-cgi php83-curl php83-sqlite3 php83-session python3 py3-psutil envsubst \
|
||||
nginx supercronic shadow su-exec jq && \
|
||||
rm -Rf /var/cache/apk/* && \
|
||||
rm -Rf /etc/nginx && \
|
||||
|
||||
@@ -10,6 +10,7 @@ RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
python3 \
|
||||
python3-dev \
|
||||
python3-pip \
|
||||
python3-psutil \
|
||||
python3-venv \
|
||||
gcc \
|
||||
git \
|
||||
@@ -193,7 +194,7 @@ RUN for vfile in .VERSION .VERSION_PREV; do \
|
||||
# setcap cap_net_raw,cap_net_admin+eip $(readlink -f ${VIRTUAL_ENV_BIN}/python) && \
|
||||
/bin/bash /build/init-nginx.sh && \
|
||||
/bin/bash /build/init-php-fpm.sh && \
|
||||
# /bin/bash /build/init-cron.sh && \
|
||||
# /bin/bash /build/init-cron.sh && \
|
||||
# Debian cron init might differ, skipping for now or need to check init-cron.sh content
|
||||
# Checking init-backend.sh
|
||||
/bin/bash /build/init-backend.sh && \
|
||||
|
||||
427
back/app.sql
@@ -1,427 +0,0 @@
|
||||
CREATE TABLE sqlite_stat1(tbl,idx,stat);
|
||||
CREATE TABLE Events (eve_MAC STRING (50) NOT NULL COLLATE NOCASE, eve_IP STRING (50) NOT NULL COLLATE NOCASE, eve_DateTime DATETIME NOT NULL, eve_EventType STRING (30) NOT NULL COLLATE NOCASE, eve_AdditionalInfo STRING (250) DEFAULT (''), eve_PendingAlertEmail BOOLEAN NOT NULL CHECK (eve_PendingAlertEmail IN (0, 1)) DEFAULT (1), eve_PairEventRowid INTEGER);
|
||||
CREATE TABLE Sessions (ses_MAC STRING (50) COLLATE NOCASE, ses_IP STRING (50) COLLATE NOCASE, ses_EventTypeConnection STRING (30) COLLATE NOCASE, ses_DateTimeConnection DATETIME, ses_EventTypeDisconnection STRING (30) COLLATE NOCASE, ses_DateTimeDisconnection DATETIME, ses_StillConnected BOOLEAN, ses_AdditionalInfo STRING (250));
|
||||
CREATE TABLE IF NOT EXISTS "Online_History" (
|
||||
"Index" INTEGER,
|
||||
"Scan_Date" TEXT,
|
||||
"Online_Devices" INTEGER,
|
||||
"Down_Devices" INTEGER,
|
||||
"All_Devices" INTEGER,
|
||||
"Archived_Devices" INTEGER,
|
||||
"Offline_Devices" INTEGER,
|
||||
PRIMARY KEY("Index" AUTOINCREMENT)
|
||||
);
|
||||
CREATE TABLE sqlite_sequence(name,seq);
|
||||
CREATE TABLE Devices (
|
||||
devMac STRING (50) PRIMARY KEY NOT NULL COLLATE NOCASE,
|
||||
devName STRING (50) NOT NULL DEFAULT "(unknown)",
|
||||
devOwner STRING (30) DEFAULT "(unknown)" NOT NULL,
|
||||
devType STRING (30),
|
||||
devVendor STRING (250),
|
||||
devFavorite BOOLEAN CHECK (devFavorite IN (0, 1)) DEFAULT (0) NOT NULL,
|
||||
devGroup STRING (10),
|
||||
devComments TEXT,
|
||||
devFirstConnection DATETIME NOT NULL,
|
||||
devLastConnection DATETIME NOT NULL,
|
||||
devLastIP STRING (50) NOT NULL COLLATE NOCASE,
|
||||
devPrimaryIPv4 TEXT,
|
||||
devPrimaryIPv6 TEXT,
|
||||
devVlan TEXT,
|
||||
devForceStatus TEXT,
|
||||
devStaticIP BOOLEAN DEFAULT (0) NOT NULL CHECK (devStaticIP IN (0, 1)),
|
||||
devScan INTEGER DEFAULT (1) NOT NULL,
|
||||
devLogEvents BOOLEAN NOT NULL DEFAULT (1) CHECK (devLogEvents IN (0, 1)),
|
||||
devAlertEvents BOOLEAN NOT NULL DEFAULT (1) CHECK (devAlertEvents IN (0, 1)),
|
||||
devAlertDown BOOLEAN NOT NULL DEFAULT (0) CHECK (devAlertDown IN (0, 1)),
|
||||
devSkipRepeated INTEGER DEFAULT 0 NOT NULL,
|
||||
devLastNotification DATETIME,
|
||||
devPresentLastScan BOOLEAN NOT NULL DEFAULT (0) CHECK (devPresentLastScan IN (0, 1)),
|
||||
devIsNew BOOLEAN NOT NULL DEFAULT (1) CHECK (devIsNew IN (0, 1)),
|
||||
devLocation STRING (250) COLLATE NOCASE,
|
||||
devIsArchived BOOLEAN NOT NULL DEFAULT (0) CHECK (devIsArchived IN (0, 1)),
|
||||
devParentMAC TEXT,
|
||||
devParentPort INTEGER,
|
||||
devParentRelType TEXT,
|
||||
devIcon TEXT,
|
||||
devGUID TEXT,
|
||||
devSite TEXT,
|
||||
devSSID TEXT,
|
||||
devSyncHubNode TEXT,
|
||||
devSourcePlugin TEXT,
|
||||
devMacSource TEXT,
|
||||
devNameSource TEXT,
|
||||
devFQDNSource TEXT,
|
||||
devLastIPSource TEXT,
|
||||
devVendorSource TEXT,
|
||||
devSSIDSource TEXT,
|
||||
devParentMACSource TEXT,
|
||||
devParentPortSource TEXT,
|
||||
devParentRelTypeSource TEXT,
|
||||
devVlanSource TEXT,
|
||||
"devCustomProps" TEXT);
|
||||
CREATE TABLE IF NOT EXISTS "Settings" (
|
||||
"setKey" TEXT,
|
||||
"setName" TEXT,
|
||||
"setDescription" TEXT,
|
||||
"setType" TEXT,
|
||||
"setOptions" TEXT,
|
||||
"setGroup" TEXT,
|
||||
"setValue" TEXT,
|
||||
"setEvents" TEXT,
|
||||
"setOverriddenByEnv" INTEGER
|
||||
);
|
||||
CREATE TABLE IF NOT EXISTS "Parameters" (
|
||||
"par_ID" TEXT PRIMARY KEY,
|
||||
"par_Value" TEXT
|
||||
);
|
||||
CREATE TABLE Plugins_Objects(
|
||||
"Index" INTEGER,
|
||||
Plugin TEXT NOT NULL,
|
||||
Object_PrimaryID TEXT NOT NULL,
|
||||
Object_SecondaryID TEXT NOT NULL,
|
||||
DateTimeCreated TEXT NOT NULL,
|
||||
DateTimeChanged TEXT NOT NULL,
|
||||
Watched_Value1 TEXT NOT NULL,
|
||||
Watched_Value2 TEXT NOT NULL,
|
||||
Watched_Value3 TEXT NOT NULL,
|
||||
Watched_Value4 TEXT NOT NULL,
|
||||
Status TEXT NOT NULL,
|
||||
Extra TEXT NOT NULL,
|
||||
UserData TEXT NOT NULL,
|
||||
ForeignKey TEXT NOT NULL,
|
||||
SyncHubNodeName TEXT,
|
||||
"HelpVal1" TEXT,
|
||||
"HelpVal2" TEXT,
|
||||
"HelpVal3" TEXT,
|
||||
"HelpVal4" TEXT,
|
||||
ObjectGUID TEXT,
|
||||
PRIMARY KEY("Index" AUTOINCREMENT)
|
||||
);
|
||||
CREATE TABLE Plugins_Events(
|
||||
"Index" INTEGER,
|
||||
Plugin TEXT NOT NULL,
|
||||
Object_PrimaryID TEXT NOT NULL,
|
||||
Object_SecondaryID TEXT NOT NULL,
|
||||
DateTimeCreated TEXT NOT NULL,
|
||||
DateTimeChanged TEXT NOT NULL,
|
||||
Watched_Value1 TEXT NOT NULL,
|
||||
Watched_Value2 TEXT NOT NULL,
|
||||
Watched_Value3 TEXT NOT NULL,
|
||||
Watched_Value4 TEXT NOT NULL,
|
||||
Status TEXT NOT NULL,
|
||||
Extra TEXT NOT NULL,
|
||||
UserData TEXT NOT NULL,
|
||||
ForeignKey TEXT NOT NULL,
|
||||
SyncHubNodeName TEXT,
|
||||
"HelpVal1" TEXT,
|
||||
"HelpVal2" TEXT,
|
||||
"HelpVal3" TEXT,
|
||||
"HelpVal4" TEXT, "ObjectGUID" TEXT,
|
||||
PRIMARY KEY("Index" AUTOINCREMENT)
|
||||
);
|
||||
CREATE TABLE Plugins_History(
|
||||
"Index" INTEGER,
|
||||
Plugin TEXT NOT NULL,
|
||||
Object_PrimaryID TEXT NOT NULL,
|
||||
Object_SecondaryID TEXT NOT NULL,
|
||||
DateTimeCreated TEXT NOT NULL,
|
||||
DateTimeChanged TEXT NOT NULL,
|
||||
Watched_Value1 TEXT NOT NULL,
|
||||
Watched_Value2 TEXT NOT NULL,
|
||||
Watched_Value3 TEXT NOT NULL,
|
||||
Watched_Value4 TEXT NOT NULL,
|
||||
Status TEXT NOT NULL,
|
||||
Extra TEXT NOT NULL,
|
||||
UserData TEXT NOT NULL,
|
||||
ForeignKey TEXT NOT NULL,
|
||||
SyncHubNodeName TEXT,
|
||||
"HelpVal1" TEXT,
|
||||
"HelpVal2" TEXT,
|
||||
"HelpVal3" TEXT,
|
||||
"HelpVal4" TEXT, "ObjectGUID" TEXT,
|
||||
PRIMARY KEY("Index" AUTOINCREMENT)
|
||||
);
|
||||
CREATE TABLE Plugins_Language_Strings(
|
||||
"Index" INTEGER,
|
||||
Language_Code TEXT NOT NULL,
|
||||
String_Key TEXT NOT NULL,
|
||||
String_Value TEXT NOT NULL,
|
||||
Extra TEXT NOT NULL,
|
||||
PRIMARY KEY("Index" AUTOINCREMENT)
|
||||
);
|
||||
CREATE TABLE CurrentScan (
|
||||
scanMac STRING(50) NOT NULL COLLATE NOCASE,
|
||||
scanLastIP STRING(50) NOT NULL COLLATE NOCASE,
|
||||
scanVendor STRING(250),
|
||||
scanSourcePlugin STRING(10),
|
||||
scanName STRING(250),
|
||||
scanLastQuery STRING(250),
|
||||
scanLastConnection STRING(250),
|
||||
scanSyncHubNode STRING(50),
|
||||
scanSite STRING(250),
|
||||
scanSSID STRING(250),
|
||||
scanVlan STRING(250),
|
||||
scanParentMAC STRING(250),
|
||||
scanParentPort STRING(250),
|
||||
scanType STRING(250),
|
||||
UNIQUE(scanMac)
|
||||
);
|
||||
CREATE TABLE IF NOT EXISTS "AppEvents" (
|
||||
"Index" INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
"GUID" TEXT UNIQUE,
|
||||
"AppEventProcessed" BOOLEAN,
|
||||
"DateTimeCreated" TEXT,
|
||||
"ObjectType" TEXT,
|
||||
"ObjectGUID" TEXT,
|
||||
"ObjectPlugin" TEXT,
|
||||
"ObjectPrimaryID" TEXT,
|
||||
"ObjectSecondaryID" TEXT,
|
||||
"ObjectForeignKey" TEXT,
|
||||
"ObjectIndex" TEXT,
|
||||
"ObjectIsNew" BOOLEAN,
|
||||
"ObjectIsArchived" BOOLEAN,
|
||||
"ObjectStatusColumn" TEXT,
|
||||
"ObjectStatus" TEXT,
|
||||
"AppEventType" TEXT,
|
||||
"Helper1" TEXT,
|
||||
"Helper2" TEXT,
|
||||
"Helper3" TEXT,
|
||||
"Extra" TEXT
|
||||
);
|
||||
CREATE TABLE IF NOT EXISTS "Notifications" (
|
||||
"Index" INTEGER,
|
||||
"GUID" TEXT UNIQUE,
|
||||
"DateTimeCreated" TEXT,
|
||||
"DateTimePushed" TEXT,
|
||||
"Status" TEXT,
|
||||
"JSON" TEXT,
|
||||
"Text" TEXT,
|
||||
"HTML" TEXT,
|
||||
"PublishedVia" TEXT,
|
||||
"Extra" TEXT,
|
||||
PRIMARY KEY("Index" AUTOINCREMENT)
|
||||
);
|
||||
CREATE INDEX IDX_eve_DateTime ON Events (eve_DateTime);
|
||||
CREATE INDEX IDX_eve_EventType ON Events (eve_EventType COLLATE NOCASE);
|
||||
CREATE INDEX IDX_eve_MAC ON Events (eve_MAC COLLATE NOCASE);
|
||||
CREATE INDEX IDX_eve_PairEventRowid ON Events (eve_PairEventRowid);
|
||||
CREATE INDEX IDX_ses_EventTypeDisconnection ON Sessions (ses_EventTypeDisconnection COLLATE NOCASE);
|
||||
CREATE INDEX IDX_ses_EventTypeConnection ON Sessions (ses_EventTypeConnection COLLATE NOCASE);
|
||||
CREATE INDEX IDX_ses_DateTimeDisconnection ON Sessions (ses_DateTimeDisconnection);
|
||||
CREATE INDEX IDX_ses_MAC ON Sessions (ses_MAC COLLATE NOCASE);
|
||||
CREATE INDEX IDX_ses_DateTimeConnection ON Sessions (ses_DateTimeConnection);
|
||||
CREATE INDEX IDX_dev_PresentLastScan ON Devices (devPresentLastScan);
|
||||
CREATE INDEX IDX_dev_FirstConnection ON Devices (devFirstConnection);
|
||||
CREATE INDEX IDX_dev_AlertDeviceDown ON Devices (devAlertDown);
|
||||
CREATE INDEX IDX_dev_StaticIP ON Devices (devStaticIP);
|
||||
CREATE INDEX IDX_dev_ScanCycle ON Devices (devScan);
|
||||
CREATE INDEX IDX_dev_Favorite ON Devices (devFavorite);
|
||||
CREATE INDEX IDX_dev_LastIP ON Devices (devLastIP);
|
||||
CREATE INDEX IDX_dev_NewDevice ON Devices (devIsNew);
|
||||
CREATE INDEX IDX_dev_Archived ON Devices (devIsArchived);
|
||||
CREATE VIEW Events_Devices AS
|
||||
SELECT *
|
||||
FROM Events
|
||||
LEFT JOIN Devices ON eve_MAC = devMac
|
||||
/* Events_Devices(eve_MAC,eve_IP,eve_DateTime,eve_EventType,eve_AdditionalInfo,eve_PendingAlertEmail,eve_PairEventRowid,devMac,devName,devOwner,devType,devVendor,devFavorite,devGroup,devComments,devFirstConnection,devLastConnection,devLastIP,devStaticIP,devScan,devLogEvents,devAlertEvents,devAlertDown,devSkipRepeated,devLastNotification,devPresentLastScan,devIsNew,devLocation,devIsArchived,devParentMAC,devParentPort,devIcon,devGUID,devSite,devSSID,devSyncHubNode,devSourcePlugin,devCustomProps) */;
|
||||
CREATE VIEW LatestEventsPerMAC AS
|
||||
WITH RankedEvents AS (
|
||||
SELECT
|
||||
e.*,
|
||||
ROW_NUMBER() OVER (PARTITION BY e.eve_MAC ORDER BY e.eve_DateTime DESC) AS row_num
|
||||
FROM Events AS e
|
||||
)
|
||||
SELECT
|
||||
e.*,
|
||||
d.*,
|
||||
c.*
|
||||
FROM RankedEvents AS e
|
||||
LEFT JOIN Devices AS d ON e.eve_MAC = d.devMac
|
||||
INNER JOIN CurrentScan AS c ON e.eve_MAC = c.scanMac
|
||||
WHERE e.row_num = 1
|
||||
/* LatestEventsPerMAC(eve_MAC,eve_IP,eve_DateTime,eve_EventType,eve_AdditionalInfo,eve_PendingAlertEmail,eve_PairEventRowid,row_num,devMac,devName,devOwner,devType,devVendor,devFavorite,devGroup,devComments,devFirstConnection,devLastConnection,devLastIP,devStaticIP,devScan,devLogEvents,devAlertEvents,devAlertDown,devSkipRepeated,devLastNotification,devPresentLastScan,devIsNew,devLocation,devIsArchived,devParentMAC,devParentPort,devIcon,devGUID,devSite,devSSID,devSyncHubNode,devSourcePlugin,devCustomProps,scanMac,scanLastIP,scanVendor,scanSourcePlugin,scanName,scanLastQuery,scanLastConnection,scanSyncHubNode,scanSite,scanSSID,scanParentMAC,scanParentPort,scanType) */;
|
||||
CREATE VIEW Sessions_Devices AS SELECT * FROM Sessions LEFT JOIN "Devices" ON ses_MAC = devMac
|
||||
/* Sessions_Devices(ses_MAC,ses_IP,ses_EventTypeConnection,ses_DateTimeConnection,ses_EventTypeDisconnection,ses_DateTimeDisconnection,ses_StillConnected,ses_AdditionalInfo,devMac,devName,devOwner,devType,devVendor,devFavorite,devGroup,devComments,devFirstConnection,devLastConnection,devLastIP,devStaticIP,devScan,devLogEvents,devAlertEvents,devAlertDown,devSkipRepeated,devLastNotification,devPresentLastScan,devIsNew,devLocation,devIsArchived,devParentMAC,devParentPort,devIcon,devGUID,devSite,devSSID,devSyncHubNode,devSourcePlugin,devCustomProps) */;
|
||||
CREATE VIEW Convert_Events_to_Sessions AS SELECT EVE1.eve_MAC,
|
||||
EVE1.eve_IP,
|
||||
EVE1.eve_EventType AS eve_EventTypeConnection,
|
||||
EVE1.eve_DateTime AS eve_DateTimeConnection,
|
||||
CASE WHEN EVE2.eve_EventType IN ('Disconnected', 'Device Down') OR
|
||||
EVE2.eve_EventType IS NULL THEN EVE2.eve_EventType ELSE '<missing event>' END AS eve_EventTypeDisconnection,
|
||||
CASE WHEN EVE2.eve_EventType IN ('Disconnected', 'Device Down') THEN EVE2.eve_DateTime ELSE NULL END AS eve_DateTimeDisconnection,
|
||||
CASE WHEN EVE2.eve_EventType IS NULL THEN 1 ELSE 0 END AS eve_StillConnected,
|
||||
EVE1.eve_AdditionalInfo
|
||||
FROM Events AS EVE1
|
||||
LEFT JOIN
|
||||
Events AS EVE2 ON EVE1.eve_PairEventRowID = EVE2.RowID
|
||||
WHERE EVE1.eve_EventType IN ('New Device', 'Connected','Down Reconnected')
|
||||
UNION
|
||||
SELECT eve_MAC,
|
||||
eve_IP,
|
||||
'<missing event>' AS eve_EventTypeConnection,
|
||||
NULL AS eve_DateTimeConnection,
|
||||
eve_EventType AS eve_EventTypeDisconnection,
|
||||
eve_DateTime AS eve_DateTimeDisconnection,
|
||||
0 AS eve_StillConnected,
|
||||
eve_AdditionalInfo
|
||||
FROM Events AS EVE1
|
||||
WHERE (eve_EventType = 'Device Down' OR
|
||||
eve_EventType = 'Disconnected') AND
|
||||
EVE1.eve_PairEventRowID IS NULL
|
||||
/* Convert_Events_to_Sessions(eve_MAC,eve_IP,eve_EventTypeConnection,eve_DateTimeConnection,eve_EventTypeDisconnection,eve_DateTimeDisconnection,eve_StillConnected,eve_AdditionalInfo) */;
|
||||
CREATE TRIGGER "trg_insert_devices"
|
||||
AFTER INSERT ON "Devices"
|
||||
WHEN NOT EXISTS (
|
||||
SELECT 1 FROM AppEvents
|
||||
WHERE AppEventProcessed = 0
|
||||
AND ObjectType = 'Devices'
|
||||
AND ObjectGUID = NEW.devGUID
|
||||
AND ObjectStatus = CASE WHEN NEW.devPresentLastScan = 1 THEN 'online' ELSE 'offline' END
|
||||
AND AppEventType = 'insert'
|
||||
)
|
||||
BEGIN
|
||||
INSERT INTO "AppEvents" (
|
||||
"GUID",
|
||||
"DateTimeCreated",
|
||||
"AppEventProcessed",
|
||||
"ObjectType",
|
||||
"ObjectGUID",
|
||||
"ObjectPrimaryID",
|
||||
"ObjectSecondaryID",
|
||||
"ObjectStatus",
|
||||
"ObjectStatusColumn",
|
||||
"ObjectIsNew",
|
||||
"ObjectIsArchived",
|
||||
"ObjectForeignKey",
|
||||
"ObjectPlugin",
|
||||
"AppEventType"
|
||||
)
|
||||
VALUES (
|
||||
|
||||
lower(
|
||||
hex(randomblob(4)) || '-' || hex(randomblob(2)) || '-' || '4' ||
|
||||
substr(hex( randomblob(2)), 2) || '-' ||
|
||||
substr('AB89', 1 + (abs(random()) % 4) , 1) ||
|
||||
substr(hex(randomblob(2)), 2) || '-' ||
|
||||
hex(randomblob(6))
|
||||
)
|
||||
,
|
||||
DATETIME('now'),
|
||||
FALSE,
|
||||
'Devices',
|
||||
NEW.devGUID, -- ObjectGUID
|
||||
NEW.devMac, -- ObjectPrimaryID
|
||||
NEW.devLastIP, -- ObjectSecondaryID
|
||||
CASE WHEN NEW.devPresentLastScan = 1 THEN 'online' ELSE 'offline' END, -- ObjectStatus
|
||||
'devPresentLastScan', -- ObjectStatusColumn
|
||||
NEW.devIsNew, -- ObjectIsNew
|
||||
NEW.devIsArchived, -- ObjectIsArchived
|
||||
NEW.devGUID, -- ObjectForeignKey
|
||||
'DEVICES', -- ObjectForeignKey
|
||||
'insert'
|
||||
);
|
||||
END;
|
||||
CREATE TRIGGER "trg_update_devices"
|
||||
AFTER UPDATE ON "Devices"
|
||||
WHEN NOT EXISTS (
|
||||
SELECT 1 FROM AppEvents
|
||||
WHERE AppEventProcessed = 0
|
||||
AND ObjectType = 'Devices'
|
||||
AND ObjectGUID = NEW.devGUID
|
||||
AND ObjectStatus = CASE WHEN NEW.devPresentLastScan = 1 THEN 'online' ELSE 'offline' END
|
||||
AND AppEventType = 'update'
|
||||
)
|
||||
BEGIN
|
||||
INSERT INTO "AppEvents" (
|
||||
"GUID",
|
||||
"DateTimeCreated",
|
||||
"AppEventProcessed",
|
||||
"ObjectType",
|
||||
"ObjectGUID",
|
||||
"ObjectPrimaryID",
|
||||
"ObjectSecondaryID",
|
||||
"ObjectStatus",
|
||||
"ObjectStatusColumn",
|
||||
"ObjectIsNew",
|
||||
"ObjectIsArchived",
|
||||
"ObjectForeignKey",
|
||||
"ObjectPlugin",
|
||||
"AppEventType"
|
||||
)
|
||||
VALUES (
|
||||
|
||||
lower(
|
||||
hex(randomblob(4)) || '-' || hex(randomblob(2)) || '-' || '4' ||
|
||||
substr(hex( randomblob(2)), 2) || '-' ||
|
||||
substr('AB89', 1 + (abs(random()) % 4) , 1) ||
|
||||
substr(hex(randomblob(2)), 2) || '-' ||
|
||||
hex(randomblob(6))
|
||||
)
|
||||
,
|
||||
DATETIME('now'),
|
||||
FALSE,
|
||||
'Devices',
|
||||
NEW.devGUID, -- ObjectGUID
|
||||
NEW.devMac, -- ObjectPrimaryID
|
||||
NEW.devLastIP, -- ObjectSecondaryID
|
||||
CASE WHEN NEW.devPresentLastScan = 1 THEN 'online' ELSE 'offline' END, -- ObjectStatus
|
||||
'devPresentLastScan', -- ObjectStatusColumn
|
||||
NEW.devIsNew, -- ObjectIsNew
|
||||
NEW.devIsArchived, -- ObjectIsArchived
|
||||
NEW.devGUID, -- ObjectForeignKey
|
||||
'DEVICES', -- ObjectForeignKey
|
||||
'update'
|
||||
);
|
||||
END;
|
||||
CREATE TRIGGER "trg_delete_devices"
|
||||
AFTER DELETE ON "Devices"
|
||||
WHEN NOT EXISTS (
|
||||
SELECT 1 FROM AppEvents
|
||||
WHERE AppEventProcessed = 0
|
||||
AND ObjectType = 'Devices'
|
||||
AND ObjectGUID = OLD.devGUID
|
||||
AND ObjectStatus = CASE WHEN OLD.devPresentLastScan = 1 THEN 'online' ELSE 'offline' END
|
||||
AND AppEventType = 'delete'
|
||||
)
|
||||
BEGIN
|
||||
INSERT INTO "AppEvents" (
|
||||
"GUID",
|
||||
"DateTimeCreated",
|
||||
"AppEventProcessed",
|
||||
"ObjectType",
|
||||
"ObjectGUID",
|
||||
"ObjectPrimaryID",
|
||||
"ObjectSecondaryID",
|
||||
"ObjectStatus",
|
||||
"ObjectStatusColumn",
|
||||
"ObjectIsNew",
|
||||
"ObjectIsArchived",
|
||||
"ObjectForeignKey",
|
||||
"ObjectPlugin",
|
||||
"AppEventType"
|
||||
)
|
||||
VALUES (
|
||||
|
||||
lower(
|
||||
hex(randomblob(4)) || '-' || hex(randomblob(2)) || '-' || '4' ||
|
||||
substr(hex( randomblob(2)), 2) || '-' ||
|
||||
substr('AB89', 1 + (abs(random()) % 4) , 1) ||
|
||||
substr(hex(randomblob(2)), 2) || '-' ||
|
||||
hex(randomblob(6))
|
||||
)
|
||||
,
|
||||
DATETIME('now'),
|
||||
FALSE,
|
||||
'Devices',
|
||||
OLD.devGUID, -- ObjectGUID
|
||||
OLD.devMac, -- ObjectPrimaryID
|
||||
OLD.devLastIP, -- ObjectSecondaryID
|
||||
CASE WHEN OLD.devPresentLastScan = 1 THEN 'online' ELSE 'offline' END, -- ObjectStatus
|
||||
'devPresentLastScan', -- ObjectStatusColumn
|
||||
OLD.devIsNew, -- ObjectIsNew
|
||||
OLD.devIsArchived, -- ObjectIsArchived
|
||||
OLD.devGUID, -- ObjectForeignKey
|
||||
'DEVICES', -- ObjectForeignKey
|
||||
'delete'
|
||||
);
|
||||
END;
|
||||
56
docs/ADVISORY_EYES_ON_GLASS.md
Normal file
@@ -0,0 +1,56 @@
|
||||
### Build an MSP Wallboard for Network Monitoring
|
||||
|
||||
For Managed Service Providers (MSPs) and Network Operations Centers (NOC), "Eyes on Glass" monitoring requires a UI that is both self-healing (auto-refreshing) and focused only on critical data. By leveraging the **UI Settings Plugin**, you can transform NetAlertX from a management tool into a dedicated live monitor.
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
### 1. Configure Auto-Refresh for Live Monitoring
|
||||
|
||||
Static dashboards are the enemy of real-time response. NetAlertX allows you to force the UI to pull fresh data without manual page reloads.
|
||||
|
||||
* **Setting:** Locate the `UI_REFRESH` (or similar "Auto-refresh UI") setting within the **UI Settings plugin**.
|
||||
* **Optimal Interval:** Set this between **60 to 120 seconds**.
|
||||
* *Note:* Refreshing too frequently (e.g., <30s) on large networks can lead to high browser and server CPU usage.
|
||||
|
||||

|
||||
|
||||
### 2. Streamlining the Dashboard (MSP Mode)
|
||||
|
||||
An MSP's focus is on what is *broken*, not what is working. Hide the noise to increase reaction speed.
|
||||
|
||||
* **Hide Unnecessary Blocks:** Under UI Settings, disable dashboard blocks that don't provide immediate utility, such as **Online presence** or **Tiles**.
|
||||
* **Hide virtual connections:** You can specify which relationships shoudl be hidden from the main view to remove any virtual devices that are not essential from your views.
|
||||
* **Browser Full-Screen:** Use the built-in "Full Screen" toggle in the top bar to remove browser chrome (URL bars/tabs) for a cleaner "Wallboard" look.
|
||||
|
||||
### 3. Creating Custom NOC Views
|
||||
|
||||
Use the UI Filters in tandem with UI Settings to create custom views.
|
||||
|
||||

|
||||
|
||||
| Feature | NOC/MSP Application |
|
||||
| --- | --- |
|
||||
| **Site-Specific Nodes** | Filter the view by a specific "Sync Node" or "Location" filter to monitor a single client site. |
|
||||
| **Filter by Criticality** | Filter devices where `Group == "Infrastructure"` or `"Server"`. (depending on your predefined values) |
|
||||
| **Predefined "Down" View** | Bookmark the URL with the `/devices.php#down` path to ensure the dashboard always loads into an "Alert Only" mode. |
|
||||
|
||||
### 4. Browser & Cache Stability
|
||||
|
||||
Because the UI is a web application, long-running sessions can occasionally experience cache drift.
|
||||
|
||||
* **Cache Refresh:** If you notice the "Show # Entries" resetting or icons failing to load after days of uptime, use the **Reload** icon in the application header (not the browser refresh) to clear the internal app cache.
|
||||
* **Dedicated Hardware:** For 24/7 monitoring, use a dedicated thin client or Raspberry Pi running in "Kiosk Mode" to prevent OS-level popups from obscuring the dashboard.
|
||||
|
||||
> [!TIP]
|
||||
> [NetAlertX - Detailed Dashboard Guide](https://www.youtube.com/watch?v=umh1c_40HW8)
|
||||
> This video provides a visual walkthrough of the NetAlertX dashboard features, including how to map and visualize devices which is crucial for setting up a clear "Eyes on Glass" monitoring environment.
|
||||
|
||||
### Summary Checklist
|
||||
|
||||
* [ ] **Automate Refresh:** Set `UI_REFRESH` to **60-120s** in UI Settings to ensure the dashboard stays current without manual intervention.
|
||||
* [ ] **Filter for Criticality:** Bookmark the **`/devices.php#down`** view to instantly focus on offline assets rather than the entire inventory.
|
||||
* [ ] **Remove UI Noise:** Use UI Settings to hide non-essential dashboard blocks (e.g., **Tiles** or remove **Virtual Connections** devices) to maximize screen real estate for alerts.
|
||||
* [ ] **Segment by Site:** Use **Location** or **Sync Node** filters to create dedicated views for specific client networks or physical branches.
|
||||
* [ ] **Ensure Stability:** Run on a dedicated "Kiosk" browser and use the internal **Reload icon** occasionally to maintain a clean application cache.
|
||||
121
docs/ADVISORY_MULTI_NETWORK.md
Normal file
@@ -0,0 +1,121 @@
|
||||
## ADVISORY: Best Practices for Monitoring Multiple Networks with NetAlertX
|
||||
|
||||
### 1. Define Monitoring Scope & Architecture
|
||||
|
||||
Effective multi-network monitoring starts with understanding how NetAlertX "sees" your traffic.
|
||||
|
||||
* **A. Understand Network Accessibility:** Local ARP-based scanning (**ARPSCAN**) only discovers devices on directly accessible subnets due to Layer 2 limitations. It cannot traverse VPNs or routed borders without specific configuration.
|
||||
* **B. Plan Subnet & Scan Interfaces:** Explicitly configure each accessible segment in `SCAN_SUBNETS` with the corresponding interfaces.
|
||||
* **C. Remote & Inaccessible Networks:** For networks unreachable via ARP, use these strategies:
|
||||
* **Alternate Plugins:** Supplement discovery with [SNMPDSC](SNMPDSC) or [DHCP lease imports](https://docs.netalertx.com/PLUGINS/?h=DHCPLSS#available-plugins).
|
||||
* **Centralized Multi-Tenant Management using Sync Nodes:** Run secondary NetAlertX instances on isolated networks and aggregate data using the **SYNC plugin**.
|
||||
* **Manual Entry:** For static assets where only ICMP (ping) status is needed.
|
||||
|
||||
> [!TIP]
|
||||
> Explore the [remote networks](./REMOTE_NETWORKS.md) documentation for more details on how to set up the approaches menationed above.
|
||||
|
||||
---
|
||||
|
||||
### 2. Automating IT Asset Inventory with Workflows
|
||||
|
||||
[Workflows](./WORKFLOWS.md) are the "engine" of NetAlertX, reducing manual overhead as your device list grows.
|
||||
|
||||
* **A. Logical Ownership & VLAN Tagging:** Create a workflow triggered on **Device Creation** to:
|
||||
1. Inspect the IP/Subnet.
|
||||
2. Set `devVlan` or `devOwner` custom fields automatically.
|
||||
|
||||
|
||||
* **B. Auto-Grouping:** Use conditional logic to categorize devices.
|
||||
* *Example:* If `devLastIP == 10.10.20.*`, then `Set devLocation = "BranchOffice"`.
|
||||
|
||||
```json
|
||||
{
|
||||
"name": "Assign Location - BranchOffice",
|
||||
"trigger": {
|
||||
"object_type": "Devices",
|
||||
"event_type": "update"
|
||||
},
|
||||
"conditions": [
|
||||
{
|
||||
"logic": "AND",
|
||||
"conditions": [
|
||||
{
|
||||
"field": "devLastIP",
|
||||
"operator": "contains",
|
||||
"value": "10.10.20."
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"actions": [
|
||||
{
|
||||
"type": "update_field",
|
||||
"field": "devLocation",
|
||||
"value": "BranchOffice"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
* **C. Sync Node Tracking:** When using multiple instances, ensure all synchub nodes have a descriptive `SYNC_node_name` name to distinguish between sites.
|
||||
|
||||
> [!TIP]
|
||||
> Always test new workflows in a "Staging" instance. A misconfigured workflow can trigger thousands of unintended updates across your database.
|
||||
|
||||
---
|
||||
|
||||
### 3. Notification Strategy: Low Noise, High Signal
|
||||
|
||||
A multi-network environment can generate significant "alert fatigue." Use a layered filtering approach.
|
||||
|
||||
| Level | Strategy | Recommended Action |
|
||||
| --- | --- | --- |
|
||||
| **Device** | Silence Flapping | Use "Skip repeated notifications" for unstable IoT devices. |
|
||||
| **Plugin** | Tune Watchers | Only enable `_WATCH` on reliable plugins (e.g., ICMP/SNMP). |
|
||||
| **Global** | Filter Sections | Limit `NTFPRCS_INCLUDED_SECTIONS` to `new_devices` and `down_devices`. |
|
||||
|
||||
|
||||
> [!TIP]
|
||||
> **Ignore Rules:** Maintain strict **Ignored MAC** (`NEWDEV_ignored_MACs`) and **Ignored IP** (`NEWDEV_ignored_IPs`) lists for guest networks or broadcast scanners to keep your logs clean.
|
||||
|
||||
---
|
||||
|
||||
### 4. UI Filters for Multi-Network Clarity
|
||||
|
||||
Don't let a massive device list overwhelm you. Use the [Multi-edit features](./DEVICES_BULK_EDITING.md) to categorize devices and create focused views:
|
||||
|
||||
* **By Zone:** Filter by "Location", "Site" or "Sync Node" you et up in Section 2.
|
||||
* **By Criticality:** Use custom the device Type field to separate "Core Infrastructure" from "Ephemeral Clients."
|
||||
* **By Status:** Use predefined views specifically for "Devices currently Down" to act as a Network Operations Center (NOC) dashboard.
|
||||
|
||||
> [!TIP]
|
||||
> If you are providing services as a Managed Service Provider (MSP) customize your default UI to be exactly how you need it, by hiding parts of the UI that you are not interested in, or by configuring a auto-refreshed screen monitoring your most important clients. See the [Eyes on glass](./ADVISORY_EYES_ON_GLASS.md) advisory for more details.
|
||||
|
||||
---
|
||||
|
||||
### 5. Operational Stability & Sync Health
|
||||
|
||||
* **Health Checks:** Regularly monitor the [Logs](https://docs.netalertx.com/LOGGING/?h=logs) to ensure remote nodes are reporting in.
|
||||
* **Backups:** Use the **CSV Devices Backup** plugin. Standardize your workflow templates and [back up](./BACKUPS.md) you `/config` folders so that if a node fails, you can redeploy it with the same logic instantly.
|
||||
|
||||
|
||||
### 6. Optimize Performance
|
||||
|
||||
As your environment grows, tuning the underlying engine is vital to maintain a snappy UI and reliable discovery cycles.
|
||||
|
||||
* **Plugin Scheduling:** Avoid "Scan Storms" by staggering plugin execution. Running intensive tasks like `NMAP` or `MASS_DNS` simultaneously can spike CPU and cause database locks.
|
||||
* **Database Health:** Large-scale monitoring generates massive event logs. Use the **[DBCLNP (Database Cleanup)](https://www.google.com/search?q=https://docs.netalertx.com/PLUGINS/%23dbclnp)** plugin to prune old records and keep the SQLite database performant.
|
||||
* **Resource Management:** For high-device counts, consider increasing the memory limit for the container and utilizing `tmpfs` for temporary files to reduce SD card/disk I/O bottlenecks.
|
||||
|
||||
> [!IMPORTANT]
|
||||
> For a deep dive into hardware requirements, database vacuuming, and specific environment variables for high-load instances, refer to the full **[Performance Optimization Guide](https://docs.netalertx.com/PERFORMANCE/)**.
|
||||
|
||||
---
|
||||
|
||||
### Summary Checklist
|
||||
|
||||
* [ ] **Discovery:** Are all subnets explicitly defined?
|
||||
* [ ] **Automation:** Do new devices get auto-assigned to a VLAN/Owner?
|
||||
* [ ] **Noise Control:** Are transient "Down" alerts delayed via `NTFPRCS_alert_down_time`?
|
||||
* [ ] **Remote Sites:** Is the SYNC plugin authenticated and heartbeat-active?
|
||||
@@ -39,9 +39,24 @@ The **MAC** field and the **Last IP** field will then become editable.
|
||||

|
||||
|
||||
|
||||
> [!NOTE]
|
||||
>
|
||||
> You can couple this with the `ICMP` plugin which can be used to monitor the status of these devices, if they are actual devices reachable with the `ping` command. If not, you can use a loopback IP address so they appear online, such as `0.0.0.0` or `127.0.0.1`.
|
||||
## Dummy or Manually Created Device Status
|
||||
|
||||
You can control a dummy device’s status either via `ICMP` (automatic) or the `Force Status` field (manual). Choose based on whether the device is real and how important **data hygiene** is.
|
||||
|
||||
### `ICMP` (Real Devices)
|
||||
|
||||
Use a real IP that responds to ping so status is updated automatically.
|
||||
|
||||
### `Force Status` (Best for Data Hygiene)
|
||||
|
||||
Manually set the status when the device is not reachable or is purely logical.
|
||||
This keeps your data clean and avoids fake IPs.
|
||||
|
||||
### Loopback IP (`127.0.0.1`, `0.0.0.0`)
|
||||
|
||||
Use when you want the device to always appear online via `ICMP`.
|
||||
Note this simulates reachability and introduces artificial data. This approach might be preferred, if you want to filter and distinguish dummy devices based on IP when filtering your asset lists.
|
||||
|
||||
|
||||
## Copying data from an existing device.
|
||||
|
||||
|
||||
@@ -215,7 +215,7 @@ services:
|
||||
|
||||
### 1.3 Migration from NetAlertX `v25.10.1`
|
||||
|
||||
Starting from v25.10.1, the container uses a [more secure, read-only runtime environment](./SECURITY_FEATURES.md), which requires all writable paths (e.g., logs, API cache, temporary data) to be mounted as `tmpfs` or permanent writable volumes, with sufficient access [permissions](./FILE_PERMISSIONS.md). The data location has also hanged from `/app/db` and `/app/config` to `/data/db` and `/data/config`. See detailed steps below.
|
||||
Starting from `v25.10.1`, the container uses a [more secure, read-only runtime environment](./SECURITY_FEATURES.md), which requires all writable paths (e.g., logs, API cache, temporary data) to be mounted as `tmpfs` or permanent writable volumes, with sufficient access [permissions](./FILE_PERMISSIONS.md). The data location has also hanged from `/app/db` and `/app/config` to `/data/db` and `/data/config`. See detailed steps below.
|
||||
|
||||
#### STEPS:
|
||||
|
||||
@@ -248,7 +248,7 @@ services:
|
||||
services:
|
||||
netalertx:
|
||||
container_name: netalertx
|
||||
image: "ghcr.io/jokob-sk/netalertx" # 🆕 This has changed
|
||||
image: "ghcr.io/jokob-sk/netalertx:25.11.29" # 🆕 This has changed
|
||||
network_mode: "host"
|
||||
cap_drop: # 🆕 New line
|
||||
- ALL # 🆕 New line
|
||||
|
||||
@@ -63,7 +63,7 @@ There is also an in-app Help / FAQ section that should be answering frequently a
|
||||
|
||||
#### ♻ Misc
|
||||
|
||||
- [Reverse proxy (Nginx, Apache, SWAG)](./REVERSE_PROXY.md)
|
||||
- [Reverse Proxy](./REVERSE_PROXY.md)
|
||||
- [Installing Updates](./UPDATES.md)
|
||||
- [Setting up Authelia](./AUTHELIA.md) (DRAFT)
|
||||
|
||||
|
||||
@@ -51,7 +51,7 @@ If you don't need to discover new devices and only need to report on their statu
|
||||
|
||||
For more information on how to add devices manually (or dummy devices), refer to the [Device Management](./DEVICE_MANAGEMENT.md) documentation.
|
||||
|
||||
To create truly dummy devices, you can use a loopback IP address (e.g., `0.0.0.0` or `127.0.0.1`) so they appear online.
|
||||
To create truly dummy devices, you can use a loopback IP address (e.g., `0.0.0.0` or `127.0.0.1`) or the `Force Status` field so they appear online.
|
||||
|
||||
## NMAP and Fake MAC Addresses
|
||||
|
||||
|
||||
577
docs/REVERSE_PROXY.md
Executable file → Normal file
@@ -1,526 +1,135 @@
|
||||
# Reverse Proxy Configuration
|
||||
|
||||
> [!NOTE]
|
||||
> This is community-contributed. Due to environment, setup, or networking differences, results may vary. Please open a PR to improve it instead of creating an issue, as the maintainer is not actively maintaining it.
|
||||
A reverse proxy is a server that sits between users and your NetAlertX instance. It allows you to:
|
||||
- Access NetAlertX via a domain name (e.g., `https://netalertx.example.com`).
|
||||
- Add HTTPS/SSL encryption.
|
||||
- Enforce authentication (like SSO).
|
||||
|
||||
> [!NOTE]
|
||||
> NetAlertX requires access to both the **web UI** (default `20211`) and the **GraphQL backend `GRAPHQL_PORT`** (default `20212`) ports.
|
||||
> Ensure your reverse proxy allows traffic to both for proper functionality.
|
||||
|
||||
> [!IMPORTANT]
|
||||
> You will need to specify 2 entries in your reverse proxy, one for the front end, one for the backend URL. The custom backend URL, including the `GRAPHQL_PORT`, needs to be aslo specified in the `BACKEND_API_URL` setting.This is the URL that points to the backend API server.
|
||||
>
|
||||
> 
|
||||
>
|
||||
> 
|
||||
|
||||
See also:
|
||||
|
||||
- [CADDY + AUTHENTIK](./REVERSE_PROXY_CADDY.md)
|
||||
- [TRAEFIK](./REVERSE_PROXY_TRAEFIK.md)
|
||||
|
||||
|
||||
## NGINX HTTP Configuration (Direct Path)
|
||||
|
||||
> Submitted by amazing [cvc90](https://github.com/cvc90) 🙏
|
||||
|
||||
> [!NOTE]
|
||||
> There are various NGINX config files for NetAlertX, some for the bare-metal install, currently Debian 12 and Ubuntu 24 (`netalertx.conf`), and one for the docker container (`netalertx.template.conf`).
|
||||
>
|
||||
> The first one you can find in the respective bare metal installer folder `/app/install/\<system\>/netalertx.conf`.
|
||||
> The docker one can be found in the [install](https://github.com/jokob-sk/NetAlertX/tree/main/install) folder. Map, or use, the one appropriate for your setup.
|
||||
|
||||
<br/>
|
||||
|
||||
1. On your NGINX server, create a new file called /etc/nginx/sites-available/netalertx
|
||||
|
||||
2. In this file, paste the following code:
|
||||
|
||||
```
|
||||
server {
|
||||
listen 80;
|
||||
server_name netalertx;
|
||||
proxy_preserve_host on;
|
||||
proxy_pass http://localhost:20211/;
|
||||
proxy_pass_reverse http://localhost:20211/;
|
||||
}
|
||||
```mermaid
|
||||
flowchart LR
|
||||
Browser --HTTPS--> Proxy[Reverse Proxy] --HTTP--> Container[NetAlertX Container]
|
||||
```
|
||||
|
||||
3. Activate the new website by running the following command:
|
||||
## NetAlertX Ports
|
||||
|
||||
`nginx -s reload` or `systemctl restart nginx`
|
||||
NetAlertX exposes two ports that serve different purposes. Your reverse proxy can target one or both, depending on your needs.
|
||||
|
||||
4. Check your config with `nginx -t`. If there are any issues, it will tell you.
|
||||
| Port | Service | Purpose |
|
||||
|------|---------|---------|
|
||||
| **20211** | Nginx (Web UI) | The main interface. |
|
||||
| **20212** | Backend API | Direct access to the API and GraphQL. Includes API docs you can view with a browser. |
|
||||
|
||||
5. Once NGINX restarts, you should be able to access the proxy website at http://netalertx/
|
||||
> [!WARNING]
|
||||
> **Do not document or use `/server` as an external API endpoint.** It is an internal route used by the Nginx frontend to communicate with the backend.
|
||||
|
||||
<br/>
|
||||
## Connection Patterns
|
||||
|
||||
## NGINX HTTP Configuration (Sub Path)
|
||||
### 1. Default (No Proxy)
|
||||
For local testing or LAN access. The browser accesses the UI on port 20211. Code and API docs are accessible on 20212.
|
||||
|
||||
1. On your NGINX server, create a new file called /etc/nginx/sites-available/netalertx
|
||||
|
||||
2. In this file, paste the following code:
|
||||
|
||||
```
|
||||
server {
|
||||
listen 80;
|
||||
server_name netalertx;
|
||||
proxy_preserve_host on;
|
||||
location ^~ /netalertx/ {
|
||||
proxy_pass http://localhost:20211/;
|
||||
proxy_pass_reverse http://localhost:20211/;
|
||||
proxy_redirect ~^/(.*)$ /netalertx/$1;
|
||||
rewrite ^/netalertx/?(.*)$ /$1 break;
|
||||
}
|
||||
}
|
||||
```mermaid
|
||||
flowchart LR
|
||||
B[Browser]
|
||||
subgraph NAC[NetAlertX Container]
|
||||
N[Nginx listening on port 20211]
|
||||
A[Service on port 20212]
|
||||
N -->|Proxy /server to localhost:20212| A
|
||||
end
|
||||
B -->|port 20211| NAC
|
||||
B -->|port 20212| NAC
|
||||
```
|
||||
|
||||
3. Check your config with `nginx -t`. If there are any issues, it will tell you.
|
||||
### 2. Direct API Consumer (Not Recommended)
|
||||
Connecting directly to the backend API port (20212).
|
||||
|
||||
4. Activate the new website by running the following command:
|
||||
> [!CAUTION]
|
||||
> This exposes the API directly to the network without additional protection. Avoid this on untrusted networks.
|
||||
|
||||
`nginx -s reload` or `systemctl restart nginx`
|
||||
|
||||
5. Once NGINX restarts, you should be able to access the proxy website at http://netalertx/netalertx/
|
||||
|
||||
<br/>
|
||||
|
||||
## NGINX HTTP Configuration (Sub Path) with module ngx_http_sub_module
|
||||
|
||||
1. On your NGINX server, create a new file called /etc/nginx/sites-available/netalertx
|
||||
|
||||
2. In this file, paste the following code:
|
||||
|
||||
```
|
||||
server {
|
||||
listen 80;
|
||||
server_name netalertx;
|
||||
proxy_preserve_host on;
|
||||
location ^~ /netalertx/ {
|
||||
proxy_pass http://localhost:20211/;
|
||||
proxy_pass_reverse http://localhost:20211/;
|
||||
proxy_redirect ~^/(.*)$ /netalertx/$1;
|
||||
rewrite ^/netalertx/?(.*)$ /$1 break;
|
||||
sub_filter_once off;
|
||||
sub_filter_types *;
|
||||
sub_filter 'href="/' 'href="/netalertx/';
|
||||
sub_filter '(?>$host)/css' '/netalertx/css';
|
||||
sub_filter '(?>$host)/js' '/netalertx/js';
|
||||
sub_filter '/img' '/netalertx/img';
|
||||
sub_filter '/lib' '/netalertx/lib';
|
||||
sub_filter '/php' '/netalertx/php';
|
||||
}
|
||||
}
|
||||
```mermaid
|
||||
flowchart LR
|
||||
B[Browser] -->|HTTPS| S[Any API Consumer app]
|
||||
subgraph NAC[NetAlertX Container]
|
||||
N[Nginx listening on port 20211]
|
||||
N -->|Proxy /server to localhost:20212| A[Service on port 20212]
|
||||
end
|
||||
S -->|Port 20212| NAC
|
||||
```
|
||||
|
||||
3. Check your config with `nginx -t`. If there are any issues, it will tell you.
|
||||
### 3. Recommended: Reverse Proxy to Web UI
|
||||
Using a reverse proxy (Nginx, Traefik, Caddy, etc.) to handle HTTPS and Auth in front of the main UI.
|
||||
|
||||
4. Activate the new website by running the following command:
|
||||
|
||||
`nginx -s reload` or `systemctl restart nginx`
|
||||
|
||||
5. Once NGINX restarts, you should be able to access the proxy website at http://netalertx/netalertx/
|
||||
|
||||
<br/>
|
||||
|
||||
**NGINX HTTPS Configuration (Direct Path)**
|
||||
|
||||
1. On your NGINX server, create a new file called /etc/nginx/sites-available/netalertx
|
||||
|
||||
2. In this file, paste the following code:
|
||||
|
||||
```
|
||||
server {
|
||||
listen 443;
|
||||
server_name netalertx;
|
||||
SSLEngine On;
|
||||
SSLCertificateFile /etc/ssl/certs/netalertx.pem;
|
||||
SSLCertificateKeyFile /etc/ssl/private/netalertx.key;
|
||||
proxy_preserve_host on;
|
||||
proxy_pass http://localhost:20211/;
|
||||
proxy_pass_reverse http://localhost:20211/;
|
||||
}
|
||||
```mermaid
|
||||
flowchart LR
|
||||
B[Browser] -->|HTTPS| S[Any Auth/SSL proxy]
|
||||
subgraph NAC[NetAlertX Container]
|
||||
N[Nginx listening on port 20211]
|
||||
N -->|Proxy /server to localhost:20212| A[Service on port 20212]
|
||||
end
|
||||
S -->|port 20211| NAC
|
||||
```
|
||||
|
||||
3. Check your config with `nginx -t`. If there are any issues, it will tell you.
|
||||
### 4. Recommended: Proxied API Consumer
|
||||
Using a proxy to secure API access with TLS or IP limiting.
|
||||
|
||||
4. Activate the new website by running the following command:
|
||||
**Why is this important?**
|
||||
The backend API (`:20212`) is powerful—more so than the Web UI, which is a safer, password-protectable interface. By using a reverse proxy to **limit sources** (e.g., allowing only your Home Assistant server's IP), you ensure that only trusted devices can talk to your backend.
|
||||
|
||||
`nginx -s reload` or `systemctl restart nginx`
|
||||
|
||||
5. Once NGINX restarts, you should be able to access the proxy website at https://netalertx/
|
||||
|
||||
<br/>
|
||||
|
||||
**NGINX HTTPS Configuration (Sub Path)**
|
||||
|
||||
1. On your NGINX server, create a new file called /etc/nginx/sites-available/netalertx
|
||||
|
||||
2. In this file, paste the following code:
|
||||
|
||||
```
|
||||
server {
|
||||
listen 443;
|
||||
server_name netalertx;
|
||||
SSLEngine On;
|
||||
SSLCertificateFile /etc/ssl/certs/netalertx.pem;
|
||||
SSLCertificateKeyFile /etc/ssl/private/netalertx.key;
|
||||
location ^~ /netalertx/ {
|
||||
proxy_pass http://localhost:20211/;
|
||||
proxy_pass_reverse http://localhost:20211/;
|
||||
proxy_redirect ~^/(.*)$ /netalertx/$1;
|
||||
rewrite ^/netalertx/?(.*)$ /$1 break;
|
||||
}
|
||||
}
|
||||
```mermaid
|
||||
flowchart LR
|
||||
B[Browser] -->|HTTPS| S[Any API Consumer app]
|
||||
C[HTTPS/source-limiting Proxy]
|
||||
subgraph NAC[NetAlertX Container]
|
||||
N[Nginx listening on port 20211]
|
||||
N -->|Proxy /server to localhost:20212| A[Service on port 20212]
|
||||
end
|
||||
S -->|HTTPS| C
|
||||
C -->|Port 20212| NAC
|
||||
```
|
||||
|
||||
3. Check your config with `nginx -t`. If there are any issues, it will tell you.
|
||||
## Getting Started: Nginx Proxy Manager
|
||||
|
||||
4. Activate the new website by running the following command:
|
||||
For beginners, we recommend **[Nginx Proxy Manager](https://nginxproxymanager.com/)**. It provides a user-friendly interface to manage proxy hosts and free SSL certificates via Let's Encrypt.
|
||||
|
||||
`nginx -s reload` or `systemctl restart nginx`
|
||||
1. Install Nginx Proxy Manager alongside NetAlertX.
|
||||
2. Create a **Proxy Host** pointing to your NetAlertX IP and Port `20211` for the Web UI.
|
||||
3. (Optional) Create a second host for the API on Port `20212`.
|
||||
|
||||
5. Once NGINX restarts, you should be able to access the proxy website at https://netalertx/netalertx/
|
||||

|
||||
|
||||
<br/>
|
||||
### Configuration Settings
|
||||
|
||||
## NGINX HTTPS Configuration (Sub Path) with module ngx_http_sub_module
|
||||
When using a reverse proxy, you should verify two settings in **Settings > Core > General**:
|
||||
|
||||
1. On your NGINX server, create a new file called /etc/nginx/sites-available/netalertx
|
||||
1. **BACKEND_API_URL**: This should be set to `/server`.
|
||||
* *Reason:* The frontend should communicate with the backend via the internal Nginx proxy rather than routing out to the internet and back.
|
||||
|
||||
2. In this file, paste the following code:
|
||||
2. **REPORT_DASHBOARD_URL**: Set this to your external proxy URL (e.g., `https://netalertx.example.com`).
|
||||
* *Reason:* This URL is used to generate proper clickable links in emails and HTML reports.
|
||||
|
||||
```
|
||||
server {
|
||||
listen 443;
|
||||
server_name netalertx;
|
||||
SSLEngine On;
|
||||
SSLCertificateFile /etc/ssl/certs/netalertx.pem;
|
||||
SSLCertificateKeyFile /etc/ssl/private/netalertx.key;
|
||||
location ^~ /netalertx/ {
|
||||
proxy_pass http://localhost:20211/;
|
||||
proxy_pass_reverse http://localhost:20211/;
|
||||
proxy_redirect ~^/(.*)$ /netalertx/$1;
|
||||
rewrite ^/netalertx/?(.*)$ /$1 break;
|
||||
sub_filter_once off;
|
||||
sub_filter_types *;
|
||||
sub_filter 'href="/' 'href="/netalertx/';
|
||||
sub_filter '(?>$host)/css' '/netalertx/css';
|
||||
sub_filter '(?>$host)/js' '/netalertx/js';
|
||||
sub_filter '/img' '/netalertx/img';
|
||||
sub_filter '/lib' '/netalertx/lib';
|
||||
sub_filter '/php' '/netalertx/php';
|
||||
}
|
||||
}
|
||||
```
|
||||

|
||||
|
||||
3. Check your config with `nginx -t`. If there are any issues, it will tell you.
|
||||
## Other Reverse Proxies
|
||||
|
||||
4. Activate the new website by running the following command:
|
||||
NetAlertX uses standard HTTP. Any reverse proxy will work. Simply forward traffic to the appropriate port (`20211` or `20212`).
|
||||
|
||||
`nginx -s reload` or `systemctl restart nginx`
|
||||
For configuration details, consult the documentation for your preferred proxy:
|
||||
|
||||
5. Once NGINX restarts, you should be able to access the proxy website at https://netalertx/netalertx/
|
||||
* **[NGINX](https://nginx.org/en/docs/http/ngx_http_proxy_module.html)**
|
||||
* **[Apache (mod_proxy)](https://httpd.apache.org/docs/current/mod/mod_proxy.html)**
|
||||
* **[Caddy](https://caddyserver.com/docs/caddyfile/directives/reverse_proxy)**
|
||||
* **[Traefik](https://doc.traefik.io/traefik/routing/services/)**
|
||||
|
||||
<br/>
|
||||
## Authentication
|
||||
|
||||
## Apache HTTP Configuration (Direct Path)
|
||||
If you wish to add Single Sign-On (SSO) or other authentication in front of NetAlertX, refer to the documentation for your identity provider:
|
||||
|
||||
1. On your Apache server, create a new file called /etc/apache2/sites-available/netalertx.conf.
|
||||
* **[Authentik](https://docs.goauthentik.io/)**
|
||||
* **[Authelia](https://www.authelia.com/docs/)**
|
||||
|
||||
2. In this file, paste the following code:
|
||||
|
||||
```
|
||||
<VirtualHost *:80>
|
||||
ServerName netalertx
|
||||
ProxyPreserveHost On
|
||||
ProxyPass / http://localhost:20211/
|
||||
ProxyPassReverse / http://localhost:20211/
|
||||
</VirtualHost>
|
||||
```
|
||||
|
||||
3. Check your config with `httpd -t` (or `apache2ctl -t` on Debian/Ubuntu). If there are any issues, it will tell you.
|
||||
|
||||
4. Activate the new website by running the following command:
|
||||
|
||||
`a2ensite netalertx` or `service apache2 reload`
|
||||
|
||||
5. Once Apache restarts, you should be able to access the proxy website at http://netalertx/
|
||||
|
||||
<br/>
|
||||
|
||||
## Apache HTTP Configuration (Sub Path)
|
||||
|
||||
1. On your Apache server, create a new file called /etc/apache2/sites-available/netalertx.conf.
|
||||
|
||||
2. In this file, paste the following code:
|
||||
|
||||
```
|
||||
<VirtualHost *:80>
|
||||
ServerName netalertx
|
||||
location ^~ /netalertx/ {
|
||||
ProxyPreserveHost On
|
||||
ProxyPass / http://localhost:20211/
|
||||
ProxyPassReverse / http://localhost:20211/
|
||||
}
|
||||
</VirtualHost>
|
||||
```
|
||||
|
||||
3. Check your config with `httpd -t` (or `apache2ctl -t` on Debian/Ubuntu). If there are any issues, it will tell you.
|
||||
|
||||
4. Activate the new website by running the following command:
|
||||
|
||||
`a2ensite netalertx` or `service apache2 reload`
|
||||
|
||||
5. Once Apache restarts, you should be able to access the proxy website at http://netalertx/
|
||||
|
||||
<br/>
|
||||
|
||||
## Apache HTTPS Configuration (Direct Path)
|
||||
|
||||
1. On your Apache server, create a new file called /etc/apache2/sites-available/netalertx.conf.
|
||||
|
||||
2. In this file, paste the following code:
|
||||
|
||||
```
|
||||
<VirtualHost *:443>
|
||||
ServerName netalertx
|
||||
SSLEngine On
|
||||
SSLCertificateFile /etc/ssl/certs/netalertx.pem
|
||||
SSLCertificateKeyFile /etc/ssl/private/netalertx.key
|
||||
ProxyPreserveHost On
|
||||
ProxyPass / http://localhost:20211/
|
||||
ProxyPassReverse / http://localhost:20211/
|
||||
</VirtualHost>
|
||||
```
|
||||
|
||||
3. Check your config with `httpd -t` (or `apache2ctl -t` on Debian/Ubuntu). If there are any issues, it will tell you.
|
||||
|
||||
4. Activate the new website by running the following command:
|
||||
|
||||
`a2ensite netalertx` or `service apache2 reload`
|
||||
|
||||
5. Once Apache restarts, you should be able to access the proxy website at https://netalertx/
|
||||
|
||||
<br/>
|
||||
|
||||
## Apache HTTPS Configuration (Sub Path)
|
||||
|
||||
1. On your Apache server, create a new file called /etc/apache2/sites-available/netalertx.conf.
|
||||
|
||||
2. In this file, paste the following code:
|
||||
|
||||
```
|
||||
<VirtualHost *:443>
|
||||
ServerName netalertx
|
||||
SSLEngine On
|
||||
SSLCertificateFile /etc/ssl/certs/netalertx.pem
|
||||
SSLCertificateKeyFile /etc/ssl/private/netalertx.key
|
||||
location ^~ /netalertx/ {
|
||||
ProxyPreserveHost On
|
||||
ProxyPass / http://localhost:20211/
|
||||
ProxyPassReverse / http://localhost:20211/
|
||||
}
|
||||
</VirtualHost>
|
||||
```
|
||||
|
||||
3. Check your config with `httpd -t` (or `apache2ctl -t` on Debian/Ubuntu). If there are any issues, it will tell you.
|
||||
|
||||
4. Activate the new website by running the following command:
|
||||
|
||||
`a2ensite netalertx` or `service apache2 reload`
|
||||
|
||||
5. Once Apache restarts, you should be able to access the proxy website at https://netalertx/netalertx/
|
||||
|
||||
<br/>
|
||||
|
||||
## Reverse proxy example by using LinuxServer's SWAG container.
|
||||
|
||||
> Submitted by [s33d1ing](https://github.com/s33d1ing). 🙏
|
||||
|
||||
## [linuxserver/swag](https://github.com/linuxserver/docker-swag)
|
||||
|
||||
In the SWAG container create `/config/nginx/proxy-confs/netalertx.subfolder.conf` with the following contents:
|
||||
|
||||
``` nginx
|
||||
## Version 2023/02/05
|
||||
# make sure that your netalertx container is named netalertx
|
||||
# netalertx does not require a base url setting
|
||||
|
||||
# Since NetAlertX uses a Host network, you may need to use the IP address of the system running NetAlertX for $upstream_app.
|
||||
|
||||
location /netalertx {
|
||||
return 301 $scheme://$host/netalertx/;
|
||||
}
|
||||
|
||||
location ^~ /netalertx/ {
|
||||
# enable the next two lines for http auth
|
||||
#auth_basic "Restricted";
|
||||
#auth_basic_user_file /config/nginx/.htpasswd;
|
||||
|
||||
# enable for ldap auth (requires ldap-server.conf in the server block)
|
||||
#include /config/nginx/ldap-location.conf;
|
||||
|
||||
# enable for Authelia (requires authelia-server.conf in the server block)
|
||||
#include /config/nginx/authelia-location.conf;
|
||||
|
||||
# enable for Authentik (requires authentik-server.conf in the server block)
|
||||
#include /config/nginx/authentik-location.conf;
|
||||
|
||||
include /config/nginx/proxy.conf;
|
||||
include /config/nginx/resolver.conf;
|
||||
|
||||
set $upstream_app netalertx;
|
||||
set $upstream_port 20211;
|
||||
set $upstream_proto http;
|
||||
|
||||
proxy_pass $upstream_proto://$upstream_app:$upstream_port;
|
||||
proxy_set_header Accept-Encoding "";
|
||||
|
||||
proxy_redirect ~^/(.*)$ /netalertx/$1;
|
||||
rewrite ^/netalertx/?(.*)$ /$1 break;
|
||||
|
||||
sub_filter_once off;
|
||||
sub_filter_types *;
|
||||
|
||||
sub_filter 'href="/' 'href="/netalertx/';
|
||||
|
||||
sub_filter '(?>$host)/css' '/netalertx/css';
|
||||
sub_filter '(?>$host)/js' '/netalertx/js';
|
||||
|
||||
sub_filter '/img' '/netalertx/img';
|
||||
sub_filter '/lib' '/netalertx/lib';
|
||||
sub_filter '/php' '/netalertx/php';
|
||||
}
|
||||
```
|
||||
|
||||
<br/>
|
||||
|
||||
## Traefik
|
||||
|
||||
> Submitted by [Isegrimm](https://github.com/Isegrimm) 🙏 (based on this [discussion](https://github.com/jokob-sk/NetAlertX/discussions/449#discussioncomment-7281442))
|
||||
|
||||
Assuming the user already has a working Traefik setup, this is what's needed to make NetAlertX work at a URL like www.domain.com/netalertx/.
|
||||
|
||||
Note: Everything in these configs assumes '**www.domain.com**' as your domainname and '**section31**' as an arbitrary name for your certificate setup. You will have to substitute these with your own.
|
||||
|
||||
Also, I use the prefix '**netalertx**'. If you want to use another prefix, change it in these files: dynamic.toml and default.
|
||||
|
||||
Content of my yaml-file (this is the generic Traefik config, which defines which ports to listen on, redirect http to https and sets up the certificate process).
|
||||
It also contains Authelia, which I use for authentication.
|
||||
This part contains nothing specific to NetAlertX.
|
||||
|
||||
```yaml
|
||||
version: '3.8'
|
||||
|
||||
services:
|
||||
traefik:
|
||||
image: traefik
|
||||
container_name: traefik
|
||||
command:
|
||||
- "--api=true"
|
||||
- "--api.insecure=true"
|
||||
- "--api.dashboard=true"
|
||||
- "--entrypoints.web.address=:80"
|
||||
- "--entrypoints.web.http.redirections.entryPoint.to=websecure"
|
||||
- "--entrypoints.web.http.redirections.entryPoint.scheme=https"
|
||||
- "--entrypoints.websecure.address=:443"
|
||||
- "--providers.file.filename=/traefik-config/dynamic.toml"
|
||||
- "--providers.file.watch=true"
|
||||
- "--log.level=ERROR"
|
||||
- "--certificatesresolvers.section31.acme.email=postmaster@domain.com"
|
||||
- "--certificatesresolvers.section31.acme.storage=/traefik-config/acme.json"
|
||||
- "--certificatesresolvers.section31.acme.httpchallenge=true"
|
||||
- "--certificatesresolvers.section31.acme.httpchallenge.entrypoint=web"
|
||||
ports:
|
||||
- "80:80"
|
||||
- "443:443"
|
||||
- "8080:8080"
|
||||
volumes:
|
||||
- "/var/run/docker.sock:/var/run/docker.sock:ro"
|
||||
- /appl/docker/traefik/config:/traefik-config
|
||||
depends_on:
|
||||
- authelia
|
||||
restart: unless-stopped
|
||||
authelia:
|
||||
container_name: authelia
|
||||
image: authelia/authelia:latest
|
||||
ports:
|
||||
- "9091:9091"
|
||||
volumes:
|
||||
- /appl/docker/authelia:/config
|
||||
restart: u
|
||||
nless-stopped
|
||||
```
|
||||
Snippet of the dynamic.toml file (referenced in the yml-file above) that defines the config for NetAlertX:
|
||||
The following are self-defined keywords, everything else is traefik keywords:
|
||||
- netalertx-router
|
||||
- netalertx-service
|
||||
- auth
|
||||
- netalertx-stripprefix
|
||||
|
||||
|
||||
```toml
|
||||
[http.routers]
|
||||
[http.routers.netalertx-router]
|
||||
entryPoints = ["websecure"]
|
||||
rule = "Host(`www.domain.com`) && PathPrefix(`/netalertx`)"
|
||||
service = "netalertx-service"
|
||||
middlewares = "auth,netalertx-stripprefix"
|
||||
[http.routers.netalertx-router.tls]
|
||||
certResolver = "section31"
|
||||
[[http.routers.netalertx-router.tls.domains]]
|
||||
main = "www.domain.com"
|
||||
|
||||
[http.services]
|
||||
[http.services.netalertx-service]
|
||||
[[http.services.netalertx-service.loadBalancer.servers]]
|
||||
url = "http://internal-ip-address:20211/"
|
||||
|
||||
[http.middlewares]
|
||||
[http.middlewares.auth.forwardAuth]
|
||||
address = "http://authelia:9091/api/verify?rd=https://www.domain.com/authelia/"
|
||||
trustForwardHeader = true
|
||||
authResponseHeaders = ["Remote-User", "Remote-Groups", "Remote-Name", "Remote-Email"]
|
||||
[http.middlewares.netalertx-stripprefix.stripprefix]
|
||||
prefixes = "/netalertx"
|
||||
forceSlash = false
|
||||
|
||||
```
|
||||
To make NetAlertX work with this setup I modified the default file at `/etc/nginx/sites-available/default` in the docker container by copying it to my local filesystem, adding the changes as specified by [cvc90](https://github.com/cvc90) and mounting the new file into the docker container, overwriting the original one. By mapping the file instead of changing the file in-place, the changes persist if an updated dockerimage is pulled. This is also a downside when the default file is updated, so I only use this as a temporary solution, until the dockerimage is updated with this change.
|
||||
|
||||
Default-file:
|
||||
|
||||
```
|
||||
server {
|
||||
listen 80 default_server;
|
||||
root /var/www/html;
|
||||
index index.php;
|
||||
#rewrite /netalertx/(.*) / permanent;
|
||||
add_header X-Forwarded-Prefix "/netalertx" always;
|
||||
proxy_set_header X-Forwarded-Prefix "/netalertx";
|
||||
|
||||
location ~* \.php$ {
|
||||
fastcgi_pass unix:/run/php/php8.2-fpm.sock;
|
||||
include fastcgi_params;
|
||||
fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
|
||||
fastcgi_param SCRIPT_NAME $fastcgi_script_name;
|
||||
fastcgi_connect_timeout 75;
|
||||
fastcgi_send_timeout 600;
|
||||
fastcgi_read_timeout 600;
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Mapping the updated file (on the local filesystem at `/appl/docker/netalertx/default`) into the docker container:
|
||||
|
||||
|
||||
```yaml
|
||||
...
|
||||
volumes:
|
||||
- /appl/docker/netalertx/default:/etc/nginx/sites-available/default
|
||||
...
|
||||
```
|
||||
## Further Reading
|
||||
|
||||
If you want to understand more about reverse proxies and networking concepts:
|
||||
|
||||
* [What is a Reverse Proxy? (Cloudflare)](https://www.cloudflare.com/learning/cdn/glossary/reverse-proxy/)
|
||||
* [Proxy vs Reverse Proxy (StrongDM)](https://www.strongdm.com/blog/difference-between-proxy-and-reverse-proxy)
|
||||
* [Nginx Reverse Proxy Glossary](https://www.nginx.com/resources/glossary/reverse-proxy-server/)
|
||||
|
||||
@@ -1,892 +0,0 @@
|
||||
## Caddy + Authentik Outpost Proxy SSO
|
||||
> Submitted by [luckylinux](https://github.com/luckylinux) 🙏.
|
||||
|
||||
> [!NOTE]
|
||||
> This is community-contributed. Due to environment, setup, or networking differences, results may vary. Please open a PR to improve it instead of creating an issue, as the maintainer is not actively maintaining it.
|
||||
|
||||
> [!NOTE]
|
||||
> NetAlertX requires access to both the **web UI** (default `20211`) and the **GraphQL backend `GRAPHQL_PORT`** (default `20212`) ports.
|
||||
> Ensure your reverse proxy allows traffic to both for proper functionality.
|
||||
|
||||
### Introduction
|
||||
|
||||
This Setup assumes:
|
||||
|
||||
1. Authentik Installation running on a separate Host at `https://authentik.MYDOMAIN.TLD`
|
||||
2. Container Management is done on Baremetal OR in a Virtual Machine (KVM/Xen/ESXi/..., no LXC Containers !):
|
||||
i. Docker and Docker Compose configured locally running as Root (needed for `network_mode: host`) OR
|
||||
ii. Podman (optionally `podman-compose`) configured locally running as Root (needed for `network_mode: host`)
|
||||
3. TLS Certificates are already pre-obtained and located at `/var/lib/containers/certificates/letsencrypt/MYDOMAIN.TLD`.
|
||||
I use the `certbot/dns-cloudflare` Podman Container on a separate Host to obtain the Certificates which I then distribute internally.
|
||||
This Container uses the Wildcard Top-Level Domain Certificate which is valid for `MYDOMAIN.TLD` and `*.MYDOMAIN.TLD`.
|
||||
4. Proxied Access
|
||||
i. NetAlertX Web Interface is accessible via Caddy Reverse Proxy at `https://netalertx.MYDOMAIN.TLD` (default HTTPS Port 443: `https://netalertx.MYDOMAIN.TLD:443`) with `REPORT_DASHBOARD_URL=https://netalertx.MYDOMAIN.TLD`
|
||||
ii. NetAlertX GraphQL Interface is accessible via Caddy Reverse Proxy at `https://netalertx.MYDOMAIN.TLD:20212` with `BACKEND_API_URL=https://netalertx.MYDOMAIN.TLD:20212`
|
||||
iii. Authentik Proxy Outpost is accessible via Caddy Reverse Proxy at `https://netalertx.MYDOMAIN.TLD:9443`
|
||||
5. Internal Ports
|
||||
i. NGINX Web Server is set to listen on internal Port 20211 set via `PORT=20211`
|
||||
ii. Python Web Server is set to listen on internal Port `GRAPHQL_PORT=20219`
|
||||
iii. Authentik Proxy Outpost is listening on internal Port `AUTHENTIK_LISTEN__HTTP=[::1]:6000` (unencrypted) and Port `AUTHENTIK_LISTEN__HTTPS=[::1]:6443` (encrypted)
|
||||
|
||||
8. Some further Configuration for Caddy is performed in Terms of Logging, SSL Certificates, etc
|
||||
|
||||
It's also possible to [let Caddy automatically request & keep TLS Certificates up-to-date](https://caddyserver.com/docs/automatic-https), although please keep in mind that:
|
||||
|
||||
1. You risk enumerating your LAN. Every Domain/Subdomain for which Caddy requests a TLS Certificate for you will result in that Host to be listed on [List of Letsencrypt Certificates issued](https://crt.sh/).
|
||||
2. You need to either:
|
||||
i. Open Port 80 for external Access ([HTTP challenge](https://caddyserver.com/docs/automatic-https#http-challenge)) in order for Letsencrypt to verify the Ownership of the Domain/Subdomain
|
||||
ii. Open Port 443 for external Access ([TLS-ALPN challenge](https://caddyserver.com/docs/automatic-https#tls-alpn-challenge)) in order for Letsencrypt to verify the Ownership of the Domain/Subdomain
|
||||
iii. Give Caddy the Credentials to update the DNS Records at your DNS Provider ([DNS challenge](https://caddyserver.com/docs/automatic-https#dns-challenge))
|
||||
|
||||
You can also decide to deploy your own Certificates & Certification Authority, either manually with OpenSSL, or by using something like [mkcert](https://github.com/FiloSottile/mkcert).
|
||||
|
||||
In Terms of IP Stack Used:
|
||||
- External: Caddy listens on both IPv4 and IPv6.
|
||||
- Internal:
|
||||
- Authentik Outpost Proxy listens on IPv6 `[::1]`
|
||||
- NetAlertX listens on IPv4 `0.0.0.0`
|
||||
|
||||
### Flow
|
||||
The Traffic Flow will therefore be as follows:
|
||||
|
||||
- Web GUI:
|
||||
i. Client accesses `http://authentik.MYDOMAIN.TLD:80`: default (built-in Caddy) Redirect to `https://authentik.MYDOMAIN.TLD:443`
|
||||
ii. Client accesses `https://authentik.MYDOMAIN.TLD:443` -> reverse Proxy to internal Port 20211 (NetAlertX Web GUI / NGINX - unencrypted)
|
||||
- GraphQL: Client accesses `https://authentik.MYDOMAIN.TLD:20212` -> reverse Proxy to internal Port 20219 (NetAlertX GraphQL - unencrypted)
|
||||
- Authentik Outpost: Client accesses `https://authentik.MYDOMAIN.TLD:9443` -> reverse Proxy to internal Port 6000 (Authentik Outpost Proxy - unencrypted)
|
||||
|
||||
An Overview of the Flow is provided in the Picture below:
|
||||
|
||||

|
||||
|
||||
### Security Considerations
|
||||
|
||||
#### Caddy should be run rootless
|
||||
|
||||
> [!WARNING]
|
||||
> By default Caddy runs as `root` which is a Security Risk.
|
||||
> In order to solve this, it's recommended to create an unprivileged User `caddy` and Group `caddy` on the Host:
|
||||
> ```
|
||||
> groupadd --gid 980 caddy
|
||||
> useradd --shell /usr/sbin/nologin --gid 980 --uid 980 -c "Caddy web server" --base-dir /var/lib/caddy
|
||||
> ```
|
||||
|
||||
At least using Quadlets with Usernames (NOT required with UID/GID), but possibly using Compose in certain Cases as well, a custom `/etc/passwd` and `/etc/group` might need to be bind-mounted inside the Container.
|
||||
`passwd`:
|
||||
```
|
||||
root:x:0:0:root:/root:/bin/sh
|
||||
bin:x:1:1:bin:/bin:/sbin/nologin
|
||||
daemon:x:2:2:daemon:/sbin:/sbin/nologin
|
||||
lp:x:4:7:lp:/var/spool/lpd:/sbin/nologin
|
||||
sync:x:5:0:sync:/sbin:/bin/sync
|
||||
shutdown:x:6:0:shutdown:/sbin:/sbin/shutdown
|
||||
halt:x:7:0:halt:/sbin:/sbin/halt
|
||||
mail:x:8:12:mail:/var/mail:/sbin/nologin
|
||||
news:x:9:13:news:/usr/lib/news:/sbin/nologin
|
||||
uucp:x:10:14:uucp:/var/spool/uucppublic:/sbin/nologin
|
||||
cron:x:16:16:cron:/var/spool/cron:/sbin/nologin
|
||||
ftp:x:21:21::/var/lib/ftp:/sbin/nologin
|
||||
sshd:x:22:22:sshd:/dev/null:/sbin/nologin
|
||||
games:x:35:35:games:/usr/games:/sbin/nologin
|
||||
ntp:x:123:123:NTP:/var/empty:/sbin/nologin
|
||||
guest:x:405:100:guest:/dev/null:/sbin/nologin
|
||||
nobody:x:65534:65534:nobody:/:/sbin/nologin
|
||||
caddy:x:980:980:caddy:/var/lib/caddy:/bin/sh
|
||||
```
|
||||
|
||||
`group`:
|
||||
```
|
||||
root:x:0:root
|
||||
bin:x:1:root,bin,daemon
|
||||
daemon:x:2:root,bin,daemon
|
||||
sys:x:3:root,bin
|
||||
adm:x:4:root,daemon
|
||||
tty:x:5:
|
||||
disk:x:6:root
|
||||
lp:x:7:lp
|
||||
kmem:x:9:
|
||||
wheel:x:10:root
|
||||
floppy:x:11:root
|
||||
mail:x:12:mail
|
||||
news:x:13:news
|
||||
uucp:x:14:uucp
|
||||
cron:x:16:cron
|
||||
audio:x:18:
|
||||
cdrom:x:19:
|
||||
dialout:x:20:root
|
||||
ftp:x:21:
|
||||
sshd:x:22:
|
||||
input:x:23:
|
||||
tape:x:26:root
|
||||
video:x:27:root
|
||||
netdev:x:28:
|
||||
kvm:x:34:kvm
|
||||
games:x:35:
|
||||
shadow:x:42:
|
||||
www-data:x:82:
|
||||
users:x:100:games
|
||||
ntp:x:123:
|
||||
abuild:x:300:
|
||||
utmp:x:406:
|
||||
ping:x:999:
|
||||
nogroup:x:65533:
|
||||
nobody:x:65534:
|
||||
caddy:x:980:
|
||||
```
|
||||
|
||||
#### Authentication of GraphQL Endpoint
|
||||
|
||||
> [!WARNING]
|
||||
> Currently the GraphQL Endpoint is NOT authenticated !
|
||||
|
||||
### Environment Files
|
||||
Depending on the Preference of the User (Environment Variables defined in Compose/Quadlet or in external `.env` File[s]), it might be prefereable to place at least some Environment Variables in external `.env` and `.env.<application>` Files.
|
||||
|
||||
The following is proposed:
|
||||
|
||||
- `.env`: common Settings (empty by Default)
|
||||
- `.env.caddy`: Caddy Settings
|
||||
- `.env.server`: NetAlertX Server/Application Settings
|
||||
- `.env.outpost.proxy`: Authentik Proxy Outpost Settings
|
||||
|
||||
The following Contents is assumed.
|
||||
|
||||
`.env.caddy`:
|
||||
```
|
||||
# Define Application Hostname
|
||||
APPLICATION_HOSTNAME=netalertx.MYDOMAIN.TLD
|
||||
|
||||
# Define Certificate Domain
|
||||
# In this case: use Wildcard Certificate
|
||||
APPLICATION_CERTIFICATE_DOMAIN=MYDOMAIN.TLD
|
||||
APPLICATION_CERTIFICATE_CERT_FILE=fullchain.pem
|
||||
APPLICATION_CERTIFICATE_KEY_FILE=privkey.pem
|
||||
|
||||
# Define Outpost Hostname
|
||||
OUTPOST_HOSTNAME=netalertx.MYDOMAIN.TLD
|
||||
|
||||
# Define Outpost External Port (TLS)
|
||||
OUTPOST_EXTERNAL_PORT=9443
|
||||
```
|
||||
|
||||
`.env.server`:
|
||||
```
|
||||
PORT=20211
|
||||
PORT_SSL=443
|
||||
NETALERTX_NETWORK_MODE=host
|
||||
LISTEN_ADDR=0.0.0.0
|
||||
GRAPHQL_PORT=20219
|
||||
NETALERTX_DEBUG=1
|
||||
BACKEND_API_URL=https://netalertx.MYDOMAIN.TLD:20212
|
||||
```
|
||||
|
||||
`.env.outpost.proxy`:
|
||||
```
|
||||
AUTHENTIK_TOKEN=XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
|
||||
AUTHENTIK_LISTEN__HTTP=[::1]:6000
|
||||
AUTHENTIK_LISTEN__HTTPS=[::1]:6443
|
||||
```
|
||||
|
||||
### Compose Setup
|
||||
```
|
||||
version: "3.8"
|
||||
services:
|
||||
netalertx-caddy:
|
||||
container_name: netalertx-caddy
|
||||
|
||||
network_mode: host
|
||||
image: docker.io/library/caddy:latest
|
||||
pull: missing
|
||||
|
||||
env_file:
|
||||
- .env
|
||||
- .env.caddy
|
||||
|
||||
environment:
|
||||
CADDY_DOCKER_CADDYFILE_PATH: "/etc/caddy/Caddyfile"
|
||||
|
||||
volumes:
|
||||
- ./Caddyfile:/etc/caddy/Caddyfile:ro,z
|
||||
- /var/lib/containers/data/netalertx/caddy:/data/caddy:rw,z
|
||||
- /var/lib/containers/log/netalertx/caddy:/var/log:rw,z
|
||||
- /var/lib/containers/config/netalertx/caddy:/config/caddy:rw,z
|
||||
- /var/lib/containers/certificates/letsencrypt:/certificates:ro,z
|
||||
|
||||
# Set User
|
||||
user: "caddy:caddy"
|
||||
|
||||
# Automatically restart Container
|
||||
restart: unless-stopped
|
||||
|
||||
netalertx-server:
|
||||
container_name: netalertx-server # The name when you docker contiainer ls
|
||||
|
||||
network_mode: host # Use host networking for ARP scanning and other services
|
||||
|
||||
depends_on:
|
||||
netalertx-caddy:
|
||||
condition: service_started
|
||||
restart: true
|
||||
netalertx-outpost-proxy:
|
||||
condition: service_started
|
||||
restart: true
|
||||
|
||||
# Local built Image including latest Changes
|
||||
image: localhost/netalertx-dev:dev-20260109-232454
|
||||
|
||||
read_only: true # Make the container filesystem read-only
|
||||
|
||||
# It is most secure to start with user 20211, but then we lose provisioning capabilities.
|
||||
# user: "${NETALERTX_UID:-20211}:${NETALERTX_GID:-20211}"
|
||||
cap_drop: # Drop all capabilities for enhanced security
|
||||
- ALL
|
||||
cap_add: # Add only the necessary capabilities
|
||||
- NET_ADMIN # Required for scanning with arp-scan, nmap, nbtscan, traceroute, and zero-conf
|
||||
- NET_RAW # Required for raw socket operations with arp-scan, nmap, nbtscan, traceroute and zero-conf
|
||||
- NET_BIND_SERVICE # Required to bind to privileged ports with nbtscan
|
||||
- CHOWN # Required for root-entrypoint to chown /data + /tmp before dropping privileges
|
||||
- SETUID # Required for root-entrypoint to switch to non-root user
|
||||
- SETGID # Required for root-entrypoint to switch to non-root group
|
||||
volumes:
|
||||
|
||||
# Override NGINX Configuration Template
|
||||
- type: bind
|
||||
source: /var/lib/containers/config/netalertx/server/nginx/netalertx.conf.template
|
||||
target: /services/config/nginx/netalertx.conf.template
|
||||
read_only: true
|
||||
bind:
|
||||
selinux: Z
|
||||
|
||||
# Letsencrypt Certificates
|
||||
- type: bind
|
||||
source: /var/lib/containers/certificates/letsencrypt/MYDOMAIN.TLD
|
||||
target: /certificates
|
||||
read_only: true
|
||||
bind:
|
||||
selinux: Z
|
||||
|
||||
# Data Storage for NetAlertX
|
||||
- type: bind # Persistent Docker-managed Named Volume for storage
|
||||
source: /var/lib/containers/data/netalertx/server
|
||||
target: /data # consolidated configuration and database storage
|
||||
read_only: false # writable volume
|
||||
bind:
|
||||
selinux: Z
|
||||
|
||||
# Set the Timezone
|
||||
- type: bind # Bind mount for timezone consistency
|
||||
source: /etc/localtime
|
||||
target: /etc/localtime
|
||||
read_only: true
|
||||
bind:
|
||||
selinux: Z
|
||||
|
||||
# tmpfs mounts for writable directories in a read-only container and improve system performance
|
||||
# All writes now live under /tmp/* subdirectories which are created dynamically by entrypoint.d scripts
|
||||
# mode=1700 gives rwx------ permissions; ownership is set by /root-entrypoint.sh
|
||||
- type: tmpfs
|
||||
target: /tmp
|
||||
tmpfs-mode: 1700
|
||||
uid: 0
|
||||
gid: 0
|
||||
rw: true
|
||||
noexec: true
|
||||
nosuid: true
|
||||
nodev: true
|
||||
async: true
|
||||
noatime: true
|
||||
nodiratime: true
|
||||
bind:
|
||||
selinux: Z
|
||||
|
||||
env_file:
|
||||
- .env
|
||||
- .env.server
|
||||
|
||||
environment:
|
||||
PUID: ${NETALERTX_UID:-20211} # Runtime UID after priming (Synology/no-copy-up safe)
|
||||
PGID: ${NETALERTX_GID:-20211} # Runtime GID after priming (Synology/no-copy-up safe)
|
||||
LISTEN_ADDR: ${LISTEN_ADDR:-0.0.0.0} # Listen for connections on all interfaces
|
||||
PORT: ${PORT:-20211} # Application port
|
||||
PORT_SSL: ${PORT_SSL:-443}
|
||||
GRAPHQL_PORT: ${GRAPHQL_PORT:-20212} # GraphQL API port
|
||||
ALWAYS_FRESH_INSTALL: ${ALWAYS_FRESH_INSTALL:-false} # Set to true to reset your config and database on each container start
|
||||
NETALERTX_DEBUG: ${NETALERTX_DEBUG:-0} # 0=kill all services and restart if any dies. 1 keeps running dead services.
|
||||
BACKEND_API_URL: ${BACKEND_API_URL-"https://netalertx.MYDOMAIN.TLD:20212"}
|
||||
|
||||
# Resource limits to prevent resource exhaustion
|
||||
mem_limit: 4096m # Maximum memory usage
|
||||
mem_reservation: 2048m # Soft memory limit
|
||||
cpu_shares: 512 # Relative CPU weight for CPU contention scenarios
|
||||
pids_limit: 512 # Limit the number of processes/threads to prevent fork bombs
|
||||
logging:
|
||||
driver: "json-file" # Use JSON file logging driver
|
||||
options:
|
||||
max-size: "10m" # Rotate log files after they reach 10MB
|
||||
max-file: "3" # Keep a maximum of 3 log files
|
||||
|
||||
# Always restart the container unless explicitly stopped
|
||||
restart: unless-stopped
|
||||
|
||||
# To sign Out, you need to visit
|
||||
# {$OUTPOST_HOSTNAME}:{$OUTPOST_EXTERNAL_PORT}/outpost.goauthentik.io/sign_out
|
||||
netalertx-outpost-proxy:
|
||||
container_name: netalertx-outpost-proxy
|
||||
|
||||
network_mode: host
|
||||
|
||||
depends_on:
|
||||
netalertx-caddy:
|
||||
condition: service_started
|
||||
restart: true
|
||||
|
||||
restart: unless-stopped
|
||||
|
||||
image: ghcr.io/goauthentik/proxy:2025.10
|
||||
pull: missing
|
||||
|
||||
env_file:
|
||||
- .env
|
||||
- .env.outpost.proxy
|
||||
|
||||
environment:
|
||||
AUTHENTIK_HOST: "https://authentik.MYDOMAIN.TLD"
|
||||
AUTHENTIK_INSECURE: false
|
||||
AUTHENTIK_LISTEN__HTTP: "[::1]:6000"
|
||||
AUTHENTIK_LISTEN__HTTPS: "[::1]:6443"
|
||||
```
|
||||
|
||||
### Quadlet Setup
|
||||
`netalertx.pod`:
|
||||
```
|
||||
[Pod]
|
||||
# Name of the Pod
|
||||
PodName=netalertx
|
||||
|
||||
# Network Mode Host is required for ARP to work
|
||||
Network=host
|
||||
|
||||
# Automatically start Pod at Boot Time
|
||||
[Install]
|
||||
WantedBy=default.target
|
||||
```
|
||||
|
||||
`netalertx-caddy.container`:
|
||||
```
|
||||
[Unit]
|
||||
Description=NetAlertX Caddy Container
|
||||
|
||||
[Service]
|
||||
Restart=always
|
||||
|
||||
[Container]
|
||||
ContainerName=netalertx-caddy
|
||||
|
||||
Pod=netalertx.pod
|
||||
StartWithPod=true
|
||||
|
||||
# Generic Environment Configuration
|
||||
EnvironmentFile=.env
|
||||
|
||||
# Caddy Specific Environment Configuration
|
||||
EnvironmentFile=.env.caddy
|
||||
|
||||
Environment=CADDY_DOCKER_CADDYFILE_PATH=/etc/caddy/Caddyfile
|
||||
|
||||
Image=docker.io/library/caddy:latest
|
||||
Pull=missing
|
||||
|
||||
# Run as rootless
|
||||
# Specifying User & Group by Name requires to mount a custom passwd & group File inside the Container
|
||||
# Otherwise an Error like the following will result: netalertx-caddy[593191]: Error: unable to find user caddy: no matching entries in passwd file
|
||||
# User=caddy
|
||||
# Group=caddy
|
||||
# Volume=/var/lib/containers/config/netalertx/caddy-rootless/passwd:/etc/passwd:ro,z
|
||||
# Volume=/var/lib/containers/config/netalertx/caddy-rootless/group:/etc/group:ro,z
|
||||
|
||||
# Run as rootless
|
||||
# Specifying User & Group by UID/GID will NOT require a custom passwd / group File to be bind-mounted inside the Container
|
||||
User=980
|
||||
Group=980
|
||||
|
||||
Volume=./Caddyfile:/etc/caddy/Caddyfile:ro,z
|
||||
Volume=/var/lib/containers/data/netalertx/caddy:/data/caddy:z
|
||||
Volume=/var/lib/containers/log/netalertx/caddy:/var/log:z
|
||||
Volume=/var/lib/containers/config/netalertx/caddy:/config/caddy:z
|
||||
Volume=/var/lib/containers/certificates/letsencrypt:/certificates:ro,z
|
||||
```
|
||||
|
||||
`netalertx-server.container`:
|
||||
```
|
||||
[Unit]
|
||||
Description=NetAlertX Server Container
|
||||
Requires=netalertx-caddy.service netalertx-outpost-proxy.service
|
||||
After=netalertx-caddy.service netalertx-outpost-proxy.service
|
||||
|
||||
[Service]
|
||||
Restart=always
|
||||
|
||||
[Container]
|
||||
ContainerName=netalertx-server
|
||||
|
||||
Pod=netalertx.pod
|
||||
StartWithPod=true
|
||||
|
||||
# Local built Image including latest Changes
|
||||
Image=localhost/netalertx-dev:dev-20260109-232454
|
||||
Pull=missing
|
||||
|
||||
# Make the container filesystem read-only
|
||||
ReadOnly=true
|
||||
|
||||
# Drop all capabilities for enhanced security
|
||||
DropCapability=ALL
|
||||
|
||||
# It is most secure to start with user 20211, but then we lose provisioning capabilities.
|
||||
# User=20211:20211
|
||||
|
||||
# Required for scanning with arp-scan, nmap, nbtscan, traceroute, and zero-conf
|
||||
AddCapability=NET_ADMIN
|
||||
|
||||
# Required for raw socket operations with arp-scan, nmap, nbtscan, traceroute and zero-conf
|
||||
AddCapability=NET_RAW
|
||||
|
||||
# Required to bind to privileged ports with nbtscan
|
||||
AddCapability=NET_BIND_SERVICE
|
||||
|
||||
# Required for root-entrypoint to chown /data + /tmp before dropping privileges
|
||||
AddCapability=CHOWN
|
||||
|
||||
# Required for root-entrypoint to switch to non-root user
|
||||
AddCapability=SETUID
|
||||
|
||||
# Required for root-entrypoint to switch to non-root group
|
||||
AddCapability=SETGID
|
||||
|
||||
# Override the Configuration Template
|
||||
Volume=/var/lib/containers/config/netalertx/server/nginx/netalertx.conf.template:/services/config/nginx/netalertx.conf.template:ro,Z
|
||||
|
||||
# Letsencrypt Certificates
|
||||
Volume=/var/lib/containers/certificates/letsencrypt/MYDOMAIN.TLD:/certificates:ro,Z
|
||||
|
||||
# Data Storage for NetAlertX
|
||||
Volume=/var/lib/containers/data/netalertx/server:/data:rw,Z
|
||||
|
||||
# Set the Timezone
|
||||
Volume=/etc/localtime:/etc/localtime:ro,Z
|
||||
|
||||
# tmpfs mounts for writable directories in a read-only container and improve system performance
|
||||
# All writes now live under /tmp/* subdirectories which are created dynamically by entrypoint.d scripts
|
||||
# mode=1700 gives rwx------ permissions; ownership is set by /root-entrypoint.sh
|
||||
# Mount=type=tmpfs,destination=/tmp,tmpfs-mode=1700,uid=0,gid=0,rw=true,noexec=true,nosuid=true,nodev=true,async=true,noatime=true,nodiratime=true,relabel=private
|
||||
Mount=type=tmpfs,destination=/tmp,tmpfs-mode=1700,rw=true,noexec=true,nosuid=true,nodev=true
|
||||
|
||||
# Environment Configuration
|
||||
EnvironmentFile=.env
|
||||
EnvironmentFile=.env.server
|
||||
|
||||
# Runtime UID after priming (Synology/no-copy-up safe)
|
||||
Environment=PUID=20211
|
||||
|
||||
# Runtime GID after priming (Synology/no-copy-up safe)
|
||||
Environment=PGID=20211
|
||||
|
||||
# Listen for connections on all interfaces (IPv4)
|
||||
Environment=LISTEN_ADDR=0.0.0.0
|
||||
|
||||
# Application port
|
||||
Environment=PORT=20211
|
||||
|
||||
# SSL Port
|
||||
Environment=PORT_SSL=443
|
||||
|
||||
# GraphQL API port
|
||||
Environment=GRAPHQL_PORT=20212
|
||||
|
||||
# Set to true to reset your config and database on each container start
|
||||
Environment=ALWAYS_FRESH_INSTALL=false
|
||||
|
||||
# 0=kill all services and restart if any dies. 1 keeps running dead services.
|
||||
Environment=NETALERTX_DEBUG=0
|
||||
|
||||
# Set the GraphQL URL for external Access (via Caddy Reverse Proxy)
|
||||
Environment=BACKEND_API_URL=https://netalertx-fedora.MYDOMAIN.TLD:20212
|
||||
|
||||
# Resource limits to prevent resource exhaustion
|
||||
# Maximum memory usage
|
||||
Memory=4g
|
||||
|
||||
# Limit the number of processes/threads to prevent fork bombs
|
||||
PidsLimit=512
|
||||
|
||||
# Relative CPU weight for CPU contention scenarios
|
||||
PodmanArgs=--cpus=2
|
||||
PodmanArgs=--cpu-shares=512
|
||||
|
||||
# Soft memory limit
|
||||
PodmanArgs=--memory-reservation=2g
|
||||
|
||||
# !! The following Keys are unfortunately not [yet] supported !!
|
||||
|
||||
# Relative CPU weight for CPU contention scenarios
|
||||
#CpuShares=512
|
||||
|
||||
# Soft memory limit
|
||||
#MemoryReservation=2g
|
||||
```
|
||||
|
||||
`netalertx-outpost-proxy.container`:
|
||||
```
|
||||
[Unit]
|
||||
Description=NetAlertX Authentik Proxy Outpost Container
|
||||
Requires=netalertx-caddy.service
|
||||
After=netalertx-caddy.service
|
||||
|
||||
[Service]
|
||||
Restart=always
|
||||
|
||||
[Container]
|
||||
ContainerName=netalertx-outpost-proxy
|
||||
|
||||
Pod=netalertx.pod
|
||||
StartWithPod=true
|
||||
|
||||
# General Configuration
|
||||
EnvironmentFile=.env
|
||||
|
||||
# Authentik Outpost Proxy Specific Configuration
|
||||
EnvironmentFile=.env.outpost.proxy
|
||||
|
||||
Environment=AUTHENTIK_HOST=https://authentik.MYDOMAIN.TLD
|
||||
Environment=AUTHENTIK_INSECURE=false
|
||||
|
||||
# Overrides Value from .env.outpost.rac
|
||||
# Environment=AUTHENTIK_TOKEN=XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
|
||||
|
||||
# Optional setting to be used when `authentik_host` for internal communication doesn't match the public URL
|
||||
# Environment=AUTHENTIK_HOST_BROWSER=https://authentik.MYDOMAIN.TLD
|
||||
|
||||
# Container Image
|
||||
Image=ghcr.io/goauthentik/proxy:2025.10
|
||||
Pull=missing
|
||||
|
||||
# Network Configuration
|
||||
Network=container:supermicro-ikvm-pve031-caddy
|
||||
|
||||
# Security Configuration
|
||||
NoNewPrivileges=true
|
||||
```
|
||||
|
||||
### Firewall Setup
|
||||
|
||||
Depending on which GNU/Linux Distribution you are running, it might be required to open up some Firewall Ports in order to be able to access the Endpoints from outside the Host itself.
|
||||
|
||||
This is for instance the Case for Fedora Linux, where I had to open:
|
||||
|
||||
- Port 20212 for external GraphQL Access (both TCP & UDP are open, unsure if UDP is required)
|
||||
- Port 9443 for external Authentik Outpost Proxy Access (both TCP & UDP are open, unsure if UDP is required)
|
||||
|
||||

|
||||
|
||||
### Authentik Setup
|
||||
|
||||
In order to enable Single Sign On (SSO) with Authentik, you will need to create a Provider, an Application and an Outpost.
|
||||
|
||||

|
||||
|
||||
First of all, using the Left Sidebar, navigate to `Applications` → `Providers`, click on `Create` (Blue Button at the Top of the Screen), select `Proxy Provider`, then click `Next`:
|
||||

|
||||
|
||||
Fill in the required Fields:
|
||||
|
||||
- Name: choose a Name for the Provider (e.g. `netalertx`)
|
||||
- Authorization Flow: choose the Authorization Flow. I typically use `default-provider-authorization-implicit-consent (Authorize Application)`. If you select the `default-provider-authorization-explicit-consent (Authorize Application)` you will need to authorize Authentik every Time you want to log in NetAlertX, which can make the Experience less User-friendly
|
||||
- Type: Click on `Forward Auth (single application)`
|
||||
- External Host: set to `https://netalertx.MYDOMAIN.TLD`
|
||||
|
||||
Click `Finish`.
|
||||
|
||||

|
||||
|
||||
Now, using the Left Sidebar, navigate to `Applications` → `Applications`, click on `Create` (Blue Button at the Top of the Screen) and fill in the required Fields:
|
||||
|
||||
- Name: choose a Name for the Application (e.g. `netalertx`)
|
||||
- Slug: choose a Slug for the Application (e.g. `netalertx`)
|
||||
- Group: optionally you can assign this Application to a Group of Applications of your Choosing (for grouping Purposes within Authentik User Interface)
|
||||
- Provider: select the Provider you created the the `Providers` Section previosly (e.g. `netalertx`)
|
||||
|
||||
Then click `Create`.
|
||||
|
||||

|
||||
|
||||
Now, using the Left Sidebar, navigate to `Applications` → `Outposts`, click on `Create` (Blue Button at the Top of the Screen) and fill in the required Fields:
|
||||
|
||||
- Name: choose a Name for the Outpost (e.g. `netalertx`)
|
||||
- Type: `Proxy`
|
||||
- Integration: open the Dropdown and click on `---------`. Make sure it is NOT set to `Local Docker connection` !
|
||||
|
||||
In the `Available Applications` Section, select the Application you created in the Previous Step, then click the right Arrow (approx. located in the Center of the Screen), so that it gets copied in the `Selected Applications` Section.
|
||||
|
||||
Then click `Create`.
|
||||
|
||||

|
||||
|
||||
Wait a few Seconds for the Outpost to be created. Once it appears in the List, click on `Deployment Info` on the Right Side of the relevant Line.
|
||||
|
||||

|
||||
|
||||
Take note of that Token. You will need it for the Authentik Outpost Proxy Container, which will read it as the `AUTHENTIK_TOKEN` Environment Variable.
|
||||
|
||||
### NGINX Configuration inside NetAlertX Container
|
||||
> [!NOTE]
|
||||
> This is something that was implemented based on the previous Content of this Reverse Proxy Document.
|
||||
> Due to some Buffer Warnings/Errors in the Logs as well as some other Issues I was experiencing, I increased a lot the client_body_buffer_size and large_client_header_buffers Parameters, although these might not be required anymore.
|
||||
> Further Testing might be required.
|
||||
|
||||
```
|
||||
# Set number of worker processes automatically based on number of CPU cores.
|
||||
worker_processes auto;
|
||||
|
||||
# Enables the use of JIT for regular expressions to speed-up their processing.
|
||||
pcre_jit on;
|
||||
|
||||
# Configures default error logger.
|
||||
error_log /tmp/log/nginx-error.log warn;
|
||||
|
||||
pid /tmp/run/nginx.pid;
|
||||
|
||||
events {
|
||||
# The maximum number of simultaneous connections that can be opened by
|
||||
# a worker process.
|
||||
worker_connections 1024;
|
||||
}
|
||||
|
||||
http {
|
||||
|
||||
# Mapping of temp paths for various nginx modules.
|
||||
client_body_temp_path /tmp/nginx/client_body;
|
||||
proxy_temp_path /tmp/nginx/proxy;
|
||||
fastcgi_temp_path /tmp/nginx/fastcgi;
|
||||
uwsgi_temp_path /tmp/nginx/uwsgi;
|
||||
scgi_temp_path /tmp/nginx/scgi;
|
||||
|
||||
# Includes mapping of file name extensions to MIME types of responses
|
||||
# and defines the default type.
|
||||
include /services/config/nginx/mime.types;
|
||||
default_type application/octet-stream;
|
||||
|
||||
# Name servers used to resolve names of upstream servers into addresses.
|
||||
# It's also needed when using tcpsocket and udpsocket in Lua modules.
|
||||
#resolver 1.1.1.1 1.0.0.1 [2606:4700:4700::1111] [2606:4700:4700::1001];
|
||||
|
||||
# Don't tell nginx version to the clients. Default is 'on'.
|
||||
server_tokens off;
|
||||
|
||||
# Specifies the maximum accepted body size of a client request, as
|
||||
# indicated by the request header Content-Length. If the stated content
|
||||
# length is greater than this size, then the client receives the HTTP
|
||||
# error code 413. Set to 0 to disable. Default is '1m'.
|
||||
client_max_body_size 1m;
|
||||
|
||||
# Sendfile copies data between one FD and other from within the kernel,
|
||||
# which is more efficient than read() + write(). Default is off.
|
||||
sendfile on;
|
||||
|
||||
# Causes nginx to attempt to send its HTTP response head in one packet,
|
||||
# instead of using partial frames. Default is 'off'.
|
||||
tcp_nopush on;
|
||||
|
||||
|
||||
# Enables the specified protocols. Default is TLSv1 TLSv1.1 TLSv1.2.
|
||||
# TIP: If you're not obligated to support ancient clients, remove TLSv1.1.
|
||||
ssl_protocols TLSv1.2 TLSv1.3;
|
||||
|
||||
# Path of the file with Diffie-Hellman parameters for EDH ciphers.
|
||||
# TIP: Generate with: `openssl dhparam -out /etc/ssl/nginx/dh2048.pem 2048`
|
||||
#ssl_dhparam /etc/ssl/nginx/dh2048.pem;
|
||||
|
||||
# Specifies that our cipher suits should be preferred over client ciphers.
|
||||
# Default is 'off'.
|
||||
ssl_prefer_server_ciphers on;
|
||||
|
||||
# Enables a shared SSL cache with size that can hold around 8000 sessions.
|
||||
# Default is 'none'.
|
||||
ssl_session_cache shared:SSL:2m;
|
||||
|
||||
# Specifies a time during which a client may reuse the session parameters.
|
||||
# Default is '5m'.
|
||||
ssl_session_timeout 1h;
|
||||
|
||||
# Disable TLS session tickets (they are insecure). Default is 'on'.
|
||||
ssl_session_tickets off;
|
||||
|
||||
|
||||
# Enable gzipping of responses.
|
||||
gzip on;
|
||||
|
||||
# Set the Vary HTTP header as defined in the RFC 2616. Default is 'off'.
|
||||
gzip_vary on;
|
||||
|
||||
|
||||
# Specifies the main log format.
|
||||
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
|
||||
'$status $body_bytes_sent "$http_referer" '
|
||||
'"$http_user_agent" "$http_x_forwarded_for"';
|
||||
|
||||
# Sets the path, format, and configuration for a buffered log write.
|
||||
access_log /tmp/log/nginx-access.log main;
|
||||
|
||||
|
||||
# Virtual host config (unencrypted)
|
||||
server {
|
||||
listen ${LISTEN_ADDR}:${PORT} default_server;
|
||||
root /app/front;
|
||||
index index.php;
|
||||
add_header X-Forwarded-Prefix "/app" always;
|
||||
|
||||
server_name netalertx-server;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
|
||||
client_body_buffer_size 512k;
|
||||
large_client_header_buffers 64 128k;
|
||||
|
||||
location ~* \.php$ {
|
||||
# Set Cache-Control header to prevent caching on the first load
|
||||
add_header Cache-Control "no-store";
|
||||
fastcgi_pass unix:/tmp/run/php.sock;
|
||||
include /services/config/nginx/fastcgi_params;
|
||||
fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
|
||||
fastcgi_param SCRIPT_NAME $fastcgi_script_name;
|
||||
fastcgi_connect_timeout 75;
|
||||
fastcgi_send_timeout 600;
|
||||
fastcgi_read_timeout 600;
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Caddyfile
|
||||
```
|
||||
# Example and Guide
|
||||
# https://caddyserver.com/docs/caddyfile/options
|
||||
|
||||
# General Options
|
||||
{
|
||||
# (Optional) Debug Mode
|
||||
# debug
|
||||
|
||||
# (Optional ) Enable / Disable Admin API
|
||||
admin off
|
||||
|
||||
# TLS Options
|
||||
# (Optional) Disable Certificates Management (only if SSL/TLS Certificates are managed by certbot or other external Tools)
|
||||
auto_https disable_certs
|
||||
}
|
||||
|
||||
# (Optional Enable Admin API)
|
||||
# localhost {
|
||||
# reverse_proxy /api/* localhost:9001
|
||||
# }
|
||||
|
||||
# NetAlertX Web GUI (HTTPS Port 443)
|
||||
# (Optional) Only if SSL/TLS Certificates are managed by certbot or other external Tools and Custom Logging is required
|
||||
{$APPLICATION_HOSTNAME}:443 {
|
||||
tls /certificates/{$APPLICATION_CERTIFICATE_DOMAIN}/{$APPLICATION_CERTIFICATE_CERT_FILE:fullchain.pem} /certificates/{$APPLICATION_CERTIFICATE_DOMAIN}/{$APPLICATION_CERTIFICATE_KEY_FILE:privkey.pem}
|
||||
|
||||
log {
|
||||
output file /var/log/{$APPLICATION_HOSTNAME}/access_web.json {
|
||||
roll_size 100MiB
|
||||
roll_keep 5000
|
||||
roll_keep_for 720h
|
||||
roll_uncompressed
|
||||
}
|
||||
|
||||
format json
|
||||
}
|
||||
|
||||
route {
|
||||
# Always forward outpost path to actual outpost
|
||||
reverse_proxy /outpost.goauthentik.io/* https://{$OUTPOST_HOSTNAME}:{$OUTPOST_EXTERNAL_PORT} {
|
||||
header_up Host {http.reverse_proxy.upstream.hostport}
|
||||
}
|
||||
|
||||
# Forward authentication to outpost
|
||||
forward_auth https://{$OUTPOST_HOSTNAME}:{$OUTPOST_EXTERNAL_PORT} {
|
||||
uri /outpost.goauthentik.io/auth/caddy
|
||||
|
||||
# Capitalization of the headers is important, otherwise they will be empty
|
||||
copy_headers X-Authentik-Username X-Authentik-Groups X-Authentik-Email X-Authentik-Name X-Authentik-Uid X-Authentik-Jwt X-Authentik-Meta-Jwks X-Authentik-Meta-Outpost X-Authentik-Meta-Provider X-Authentik-Meta-App X-Authentik-Meta-Version
|
||||
|
||||
# (Optional)
|
||||
# If not set, trust all private ranges, but for Security Reasons, this should be set to the outposts IP
|
||||
trusted_proxies private_ranges
|
||||
}
|
||||
}
|
||||
|
||||
# IPv4 Reverse Proxy to NetAlertX Web GUI (internal unencrypted Host)
|
||||
reverse_proxy http://0.0.0.0:20211
|
||||
|
||||
# IPv6 Reverse Proxy to NetAlertX Web GUI (internal unencrypted Host)
|
||||
# reverse_proxy http://[::1]:20211
|
||||
}
|
||||
|
||||
# NetAlertX GraphQL Endpoint (HTTPS Port 20212)
|
||||
# (Optional) Only if SSL/TLS Certificates are managed by certbot or other external Tools and Custom Logging is required
|
||||
{$APPLICATION_HOSTNAME}:20212 {
|
||||
tls /certificates/{$APPLICATION_CERTIFICATE_DOMAIN}/{$APPLICATION_CERTIFICATE_CERT_FILE:fullchain.pem} /certificates/{$APPLICATION_CERTIFICATE_DOMAIN}/{$APPLICATION_CERTIFICATE_KEY_FILE:privkey.pem}
|
||||
|
||||
log {
|
||||
output file /var/log/{$APPLICATION_HOSTNAME}/access_graphql.json {
|
||||
roll_size 100MiB
|
||||
roll_keep 5000
|
||||
roll_keep_for 720h
|
||||
roll_uncompressed
|
||||
}
|
||||
|
||||
format json
|
||||
}
|
||||
|
||||
# IPv4 Reverse Proxy to NetAlertX GraphQL Endpoint (internal unencrypted Host)
|
||||
reverse_proxy http://0.0.0.0:20219
|
||||
|
||||
# IPv6 Reverse Proxy to NetAlertX GraphQL Endpoint (internal unencrypted Host)
|
||||
# reverse_proxy http://[::1]:6000
|
||||
}
|
||||
|
||||
# Authentik Outpost
|
||||
# (Optional) Only if SSL/TLS Certificates are managed by certbot or other external Tools and Custom Logging is required
|
||||
{$OUTPOST_HOSTNAME}:{$OUTPOST_EXTERNAL_PORT} {
|
||||
tls /certificates/{$APPLICATION_CERTIFICATE_DOMAIN}/{$APPLICATION_CERTIFICATE_CERT_FILE:fullchain.pem} /certificates/{$APPLICATION_CERTIFICATE_DOMAIN}/{$APPLICATION_CERTIFICATE_KEY_FILE:privkey.pem}
|
||||
|
||||
log {
|
||||
output file /var/log/outpost/{$OUTPOST_HOSTNAME}/access.json {
|
||||
roll_size 100MiB
|
||||
roll_keep 5000
|
||||
roll_keep_for 720h
|
||||
roll_uncompressed
|
||||
}
|
||||
|
||||
format json
|
||||
}
|
||||
|
||||
# IPv4 Reverse Proxy to internal unencrypted Host
|
||||
# reverse_proxy http://0.0.0.0:6000
|
||||
|
||||
# IPv6 Reverse Proxy to internal unencrypted Host
|
||||
reverse_proxy http://[::1]:6000
|
||||
}
|
||||
```
|
||||
|
||||
### Login
|
||||
Now try to login by visiting `https://netalertx.MYDOMAIN.TLD`.
|
||||
|
||||
You should be greeted with a Login Screen by Authentik.
|
||||
|
||||
If you are already logged in Authentik, log out first. You can do that by visiting `https://netalertx.MYDOMAIN.TLD/outpost.goauthentik.io/sign_out`, then click on `Log out of authentik` (2nd Button). Or you can just sign out from your Authentik Admin Panel at `https://authentik.MYDOMAIN.TLD`.
|
||||
|
||||
If everything works as expected, then you can now set `SETPWD_enable_password=false` to disable double Authentication.
|
||||
|
||||

|
||||
@@ -1,86 +0,0 @@
|
||||
# Guide: Routing NetAlertX API via Traefik v3
|
||||
|
||||
> [!NOTE]
|
||||
> NetAlertX requires access to both the **web UI** (default `20211`) and the **GraphQL backend `GRAPHQL_PORT`** (default `20212`) ports.
|
||||
> Ensure your reverse proxy allows traffic to both for proper functionality.
|
||||
|
||||
|
||||
> [!NOTE]
|
||||
> This is community-contributed. Due to environment, setup, or networking differences, results may vary. Please open a PR to improve it instead of creating an issue, as the maintainer is not actively maintaining it.
|
||||
|
||||
|
||||
Traefik v3 requires the following setup to route traffic properly. This guide shows a working configuration using a dedicated `PathPrefix`.
|
||||
|
||||
---
|
||||
|
||||
## 1. Configure NetAlertX Backend URL
|
||||
|
||||
1. Open the NetAlertX UI: **Settings → Core → General**.
|
||||
2. Set the `BACKEND_API_URL` to include a custom path prefix, for example:
|
||||
|
||||
```
|
||||
https://netalertx.yourdomain.com/netalertx-api
|
||||
```
|
||||
|
||||
This tells the frontend where to reach the backend API.
|
||||
|
||||
---
|
||||
|
||||
## 2. Create a Traefik Router for the API
|
||||
|
||||
Define a router specifically for the API with a higher priority and a `PathPrefix` rule:
|
||||
|
||||
```yaml
|
||||
netalertx-api:
|
||||
rule: "Host(`netalertx.yourdomain.com`) && PathPrefix(`/netalertx-api`)"
|
||||
service: netalertx-api-service
|
||||
middlewares:
|
||||
- netalertx-stripprefix
|
||||
priority: 100
|
||||
```
|
||||
|
||||
**Notes:**
|
||||
|
||||
* `Host(...)` ensures requests are only routed for your domain.
|
||||
* `PathPrefix(...)` routes anything under `/netalertx-api` to the backend.
|
||||
* Priority `100` ensures this router takes precedence over other routes.
|
||||
|
||||
---
|
||||
|
||||
## 3. Add a Middleware to Strip the Prefix
|
||||
|
||||
NetAlertX expects requests at the root (`/`). Use Traefik’s `StripPrefix` middleware:
|
||||
|
||||
```yaml
|
||||
middlewares:
|
||||
netalertx-stripprefix:
|
||||
stripPrefix:
|
||||
prefixes:
|
||||
- "/netalertx-api"
|
||||
```
|
||||
|
||||
This removes `/netalertx-api` before forwarding the request to the backend container.
|
||||
|
||||
---
|
||||
|
||||
## 4. Map the API Service to the Backend Container
|
||||
|
||||
Point the service to the internal GraphQL/Backend port (20212):
|
||||
|
||||
```yaml
|
||||
netalertx-api-service:
|
||||
loadBalancer:
|
||||
servers:
|
||||
- url: "http://<INTERNAL_IP>:20212"
|
||||
```
|
||||
|
||||
Replace `<INTERNAL_IP>` with your NetAlertX container’s internal address.
|
||||
|
||||
---
|
||||
|
||||
✅ With this setup:
|
||||
|
||||
* `https://netalertx.yourdomain.com` → Web interface (port 20211)
|
||||
* `https://netalertx.yourdomain.com/netalertx-api` → API/GraphQL backend (port 20212)
|
||||
|
||||
This cleanly separates API requests from frontend requests while keeping everything under the same domain.
|
||||
BIN
docs/img/ADVISORIES/down_devices.png
Normal file
|
After Width: | Height: | Size: 63 KiB |
BIN
docs/img/ADVISORIES/filters.png
Normal file
|
After Width: | Height: | Size: 83 KiB |
BIN
docs/img/ADVISORIES/ui_customization_settings.png
Normal file
|
After Width: | Height: | Size: 137 KiB |
|
Before Width: | Height: | Size: 78 KiB |
|
Before Width: | Height: | Size: 1.5 MiB |
|
Before Width: | Height: | Size: 61 KiB |
|
Before Width: | Height: | Size: 52 KiB |
|
Before Width: | Height: | Size: 128 KiB |
|
Before Width: | Height: | Size: 89 KiB |
|
Before Width: | Height: | Size: 27 KiB |
|
Before Width: | Height: | Size: 67 KiB |
@@ -1,202 +0,0 @@
|
||||
<mxfile host="Electron" modified="2026-01-15T05:36:26.645Z" agent="Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) draw.io/24.1.0 Chrome/120.0.6099.109 Electron/28.1.0 Safari/537.36" etag="OpSjRPjeNeyudFLZJ2fD" version="24.1.0" type="device">
|
||||
<diagram name="Page-1" id="mulIpG3YQAhf4Klf7Njm">
|
||||
<mxGraphModel dx="6733" dy="1168" grid="1" gridSize="10" guides="1" tooltips="1" connect="1" arrows="1" fold="1" page="1" pageScale="1" pageWidth="4681" pageHeight="3300" math="0" shadow="0">
|
||||
<root>
|
||||
<mxCell id="0" />
|
||||
<mxCell id="1" parent="0" />
|
||||
<mxCell id="wwqsnaxs0Bt7SYwqQu8i-1" value="" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
|
||||
<mxGeometry x="850" y="160" width="920" height="810" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="wwqsnaxs0Bt7SYwqQu8i-2" value="NetAlertX Pod" style="text;html=1;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;fontSize=32;" vertex="1" parent="1">
|
||||
<mxGeometry x="850" y="130" width="670" height="30" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="wwqsnaxs0Bt7SYwqQu8i-3" value="" style="image;html=1;image=img/lib/clip_art/computers/Laptop_128x128.png" vertex="1" parent="1">
|
||||
<mxGeometry x="-50" y="395" width="140" height="140" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="wwqsnaxs0Bt7SYwqQu8i-4" value="" style="image;html=1;image=img/lib/clip_art/networking/Firewall_02_128x128.png" vertex="1" parent="1">
|
||||
<mxGeometry x="488" y="344" width="80" height="80" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="wwqsnaxs0Bt7SYwqQu8i-5" value="" style="image;html=1;image=img/lib/clip_art/networking/Firewall_02_128x128.png" vertex="1" parent="1">
|
||||
<mxGeometry x="488" y="555" width="80" height="80" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="wwqsnaxs0Bt7SYwqQu8i-8" value="Web UI<br>(NGINX + PHP)" style="text;html=1;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;fontSize=24;" vertex="1" parent="1">
|
||||
<mxGeometry x="230" y="320" width="200" height="60" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="wwqsnaxs0Bt7SYwqQu8i-9" value="API GraphQL<br>(Python)" style="text;html=1;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;fontSize=24;" vertex="1" parent="1">
|
||||
<mxGeometry x="230" y="555" width="200" height="30" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="wwqsnaxs0Bt7SYwqQu8i-10" value="" style="endArrow=classic;html=1;rounded=0;dashed=1;dashPattern=8 8;" edge="1" parent="1">
|
||||
<mxGeometry width="50" height="50" relative="1" as="geometry">
|
||||
<mxPoint x="240" y="390" as="sourcePoint" />
|
||||
<mxPoint x="240" y="600" as="targetPoint" />
|
||||
</mxGeometry>
|
||||
</mxCell>
|
||||
<mxCell id="wwqsnaxs0Bt7SYwqQu8i-12" value="<div>443</div>" style="text;html=1;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;fontSize=24;fontColor=#FF0000;" vertex="1" parent="1">
|
||||
<mxGeometry x="581" y="335" width="100" height="60" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="wwqsnaxs0Bt7SYwqQu8i-13" value="20212" style="text;html=1;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;fontSize=24;fontColor=#FF0000;" vertex="1" parent="1">
|
||||
<mxGeometry x="581" y="554" width="100" height="60" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="wwqsnaxs0Bt7SYwqQu8i-14" value="" style="image;html=1;image=img/lib/clip_art/networking/Firewall_02_128x128.png" vertex="1" parent="1">
|
||||
<mxGeometry x="488" y="813" width="80" height="80" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="wwqsnaxs0Bt7SYwqQu8i-16" value="Authentik SSO for Web UI" style="text;html=1;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;fontSize=24;" vertex="1" parent="1">
|
||||
<mxGeometry x="230" y="793" width="200" height="60" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="wwqsnaxs0Bt7SYwqQu8i-17" value="9443" style="text;html=1;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;fontSize=24;fontColor=#FF0000;" vertex="1" parent="1">
|
||||
<mxGeometry x="580" y="803" width="100" height="60" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="wwqsnaxs0Bt7SYwqQu8i-18" value="" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
|
||||
<mxGeometry x="1470" y="250" width="288" height="440" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="wwqsnaxs0Bt7SYwqQu8i-19" value="NetAlertX" style="text;html=1;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;fontSize=24;" vertex="1" parent="1">
|
||||
<mxGeometry x="1470" y="210" width="288" height="40" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="wwqsnaxs0Bt7SYwqQu8i-21" value="" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
|
||||
<mxGeometry x="1260" y="751" width="500" height="199" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="wwqsnaxs0Bt7SYwqQu8i-22" value="Authentik Outpost Proxy" style="text;html=1;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;fontSize=24;" vertex="1" parent="1">
|
||||
<mxGeometry x="1280" y="711" width="480" height="40" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="wwqsnaxs0Bt7SYwqQu8i-23" value="" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
|
||||
<mxGeometry x="860" y="250" width="380" height="700" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="wwqsnaxs0Bt7SYwqQu8i-24" value="Caddy" style="text;html=1;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;fontSize=24;" vertex="1" parent="1">
|
||||
<mxGeometry x="860" y="210" width="390" height="40" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="wwqsnaxs0Bt7SYwqQu8i-25" value="" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
|
||||
<mxGeometry x="1498" y="319" width="220" height="130" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="wwqsnaxs0Bt7SYwqQu8i-26" value="" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
|
||||
<mxGeometry x="1498" y="530" width="220" height="150" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="wwqsnaxs0Bt7SYwqQu8i-27" value="Web UI<div>(NGINX + PHP)</div>" style="text;html=1;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;fontSize=24;" vertex="1" parent="1">
|
||||
<mxGeometry x="1498" y="264" width="220" height="50" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="wwqsnaxs0Bt7SYwqQu8i-28" value="API GraphQL<div>(Python)</div>" style="text;html=1;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;fontSize=24;" vertex="1" parent="1">
|
||||
<mxGeometry x="1498" y="475" width="220" height="50" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="wwqsnaxs0Bt7SYwqQu8i-6" value="" style="endArrow=classic;html=1;rounded=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="wwqsnaxs0Bt7SYwqQu8i-53" target="wwqsnaxs0Bt7SYwqQu8i-58">
|
||||
<mxGeometry width="50" height="50" relative="1" as="geometry">
|
||||
<mxPoint x="130" y="390" as="sourcePoint" />
|
||||
<mxPoint x="1129" y="389.9999999999998" as="targetPoint" />
|
||||
</mxGeometry>
|
||||
</mxCell>
|
||||
<mxCell id="wwqsnaxs0Bt7SYwqQu8i-30" value="" style="endArrow=classic;html=1;rounded=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="wwqsnaxs0Bt7SYwqQu8i-59" target="wwqsnaxs0Bt7SYwqQu8i-31">
|
||||
<mxGeometry width="50" height="50" relative="1" as="geometry">
|
||||
<mxPoint x="1214" y="483" as="sourcePoint" />
|
||||
<mxPoint x="1209" y="823" as="targetPoint" />
|
||||
</mxGeometry>
|
||||
</mxCell>
|
||||
<mxCell id="wwqsnaxs0Bt7SYwqQu8i-31" value="Authenticated &amp; Authorized ?" style="rhombus;whiteSpace=wrap;html=1;fontSize=18;" vertex="1" parent="1">
|
||||
<mxGeometry x="1294" y="773.5" width="170" height="160" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="wwqsnaxs0Bt7SYwqQu8i-35" value="20211" style="text;html=1;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;fontSize=24;fontColor=#FF0000;" vertex="1" parent="1">
|
||||
<mxGeometry x="1488" y="335" width="100" height="60" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="wwqsnaxs0Bt7SYwqQu8i-36" value="" style="endArrow=classic;html=1;rounded=0;" edge="1" parent="1">
|
||||
<mxGeometry width="50" height="50" relative="1" as="geometry">
|
||||
<mxPoint x="1688" y="369" as="sourcePoint" />
|
||||
<mxPoint x="1688" y="649" as="targetPoint" />
|
||||
</mxGeometry>
|
||||
</mxCell>
|
||||
<mxCell id="wwqsnaxs0Bt7SYwqQu8i-37" value="20219" style="text;html=1;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;fontSize=24;fontColor=#FF0000;" vertex="1" parent="1">
|
||||
<mxGeometry x="1498" y="535" width="100" height="60" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="wwqsnaxs0Bt7SYwqQu8i-38" value="HTTPS" style="text;html=1;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;fontSize=24;fontColor=#66CC00;" vertex="1" parent="1">
|
||||
<mxGeometry x="730" y="340" width="100" height="60" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="wwqsnaxs0Bt7SYwqQu8i-39" value="HTTPS" style="text;html=1;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;fontSize=24;fontColor=#66CC00;" vertex="1" parent="1">
|
||||
<mxGeometry x="730" y="803" width="100" height="60" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="wwqsnaxs0Bt7SYwqQu8i-40" value="HTTPS" style="text;html=1;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;fontSize=24;fontColor=#66CC00;" vertex="1" parent="1">
|
||||
<mxGeometry x="730" y="554" width="100" height="60" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="wwqsnaxs0Bt7SYwqQu8i-42" value="" style="endArrow=none;html=1;rounded=0;endFill=0;" edge="1" parent="1">
|
||||
<mxGeometry width="50" height="50" relative="1" as="geometry">
|
||||
<mxPoint x="1381" y="1071" as="sourcePoint" />
|
||||
<mxPoint x="130" y="1071" as="targetPoint" />
|
||||
</mxGeometry>
|
||||
</mxCell>
|
||||
<mxCell id="wwqsnaxs0Bt7SYwqQu8i-43" value="" style="endArrow=classic;html=1;rounded=0;exitX=0.5;exitY=1;exitDx=0;exitDy=0;" edge="1" parent="1">
|
||||
<mxGeometry width="50" height="50" relative="1" as="geometry">
|
||||
<mxPoint x="130.5" y="1070" as="sourcePoint" />
|
||||
<mxPoint x="130" y="860" as="targetPoint" />
|
||||
</mxGeometry>
|
||||
</mxCell>
|
||||
<mxCell id="wwqsnaxs0Bt7SYwqQu8i-44" value="NO" style="text;html=1;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;fontSize=24;" vertex="1" parent="1">
|
||||
<mxGeometry x="1364" y="1000" width="100" height="60" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="wwqsnaxs0Bt7SYwqQu8i-45" value="YES" style="text;html=1;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;fontSize=24;" vertex="1" parent="1">
|
||||
<mxGeometry x="1294" y="680" width="100" height="60" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="wwqsnaxs0Bt7SYwqQu8i-47" value="" style="endArrow=classic;html=1;rounded=0;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1">
|
||||
<mxGeometry width="50" height="50" relative="1" as="geometry">
|
||||
<mxPoint x="1156.5" y="450" as="sourcePoint" />
|
||||
<mxPoint x="1157" y="1070" as="targetPoint" />
|
||||
</mxGeometry>
|
||||
</mxCell>
|
||||
<mxCell id="wwqsnaxs0Bt7SYwqQu8i-48" value="" style="endArrow=classic;html=1;rounded=0;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="wwqsnaxs0Bt7SYwqQu8i-56" target="wwqsnaxs0Bt7SYwqQu8i-26">
|
||||
<mxGeometry width="50" height="50" relative="1" as="geometry">
|
||||
<mxPoint x="1299" y="600" as="sourcePoint" />
|
||||
<mxPoint x="1499" y="600" as="targetPoint" />
|
||||
</mxGeometry>
|
||||
</mxCell>
|
||||
<mxCell id="wwqsnaxs0Bt7SYwqQu8i-49" value="HTTP" style="text;html=1;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;fontSize=24;fontColor=#FF0000;" vertex="1" parent="1">
|
||||
<mxGeometry x="1379" y="340" width="100" height="60" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="wwqsnaxs0Bt7SYwqQu8i-50" value="HTTP" style="text;html=1;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;fontSize=24;fontColor=#FF0000;" vertex="1" parent="1">
|
||||
<mxGeometry x="1379" y="554" width="100" height="60" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="wwqsnaxs0Bt7SYwqQu8i-54" value="" style="endArrow=classic;html=1;rounded=0;" edge="1" parent="1" target="wwqsnaxs0Bt7SYwqQu8i-53">
|
||||
<mxGeometry width="50" height="50" relative="1" as="geometry">
|
||||
<mxPoint x="130" y="390" as="sourcePoint" />
|
||||
<mxPoint x="1129" y="390" as="targetPoint" />
|
||||
</mxGeometry>
|
||||
</mxCell>
|
||||
<mxCell id="wwqsnaxs0Bt7SYwqQu8i-53" value="TLS Termination" style="whiteSpace=wrap;html=1;aspect=fixed;fontSize=18;" vertex="1" parent="1">
|
||||
<mxGeometry x="905" y="340" width="100" height="100" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="wwqsnaxs0Bt7SYwqQu8i-56" value="TLS Termination" style="whiteSpace=wrap;html=1;aspect=fixed;fontSize=18;" vertex="1" parent="1">
|
||||
<mxGeometry x="902" y="554" width="100" height="100" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="wwqsnaxs0Bt7SYwqQu8i-7" value="" style="endArrow=classic;html=1;rounded=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" target="wwqsnaxs0Bt7SYwqQu8i-56">
|
||||
<mxGeometry width="50" height="50" relative="1" as="geometry">
|
||||
<mxPoint x="130" y="601" as="sourcePoint" />
|
||||
<mxPoint x="850" y="601" as="targetPoint" />
|
||||
</mxGeometry>
|
||||
</mxCell>
|
||||
<mxCell id="wwqsnaxs0Bt7SYwqQu8i-58" value="Check Authentication" style="whiteSpace=wrap;html=1;aspect=fixed;fontSize=18;" vertex="1" parent="1">
|
||||
<mxGeometry x="1097" y="330" width="120" height="120" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="wwqsnaxs0Bt7SYwqQu8i-59" value="TLS Termination" style="whiteSpace=wrap;html=1;aspect=fixed;fontSize=18;" vertex="1" parent="1">
|
||||
<mxGeometry x="899" y="803" width="100" height="100" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="wwqsnaxs0Bt7SYwqQu8i-15" value="" style="endArrow=classic;html=1;rounded=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" target="wwqsnaxs0Bt7SYwqQu8i-59">
|
||||
<mxGeometry width="50" height="50" relative="1" as="geometry">
|
||||
<mxPoint x="30" y="853" as="sourcePoint" />
|
||||
<mxPoint x="850" y="853" as="targetPoint" />
|
||||
</mxGeometry>
|
||||
</mxCell>
|
||||
<mxCell id="wwqsnaxs0Bt7SYwqQu8i-60" value="" style="endArrow=classic;html=1;rounded=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1">
|
||||
<mxGeometry width="50" height="50" relative="1" as="geometry">
|
||||
<mxPoint x="1379" y="390" as="sourcePoint" />
|
||||
<mxPoint x="1500" y="389.58" as="targetPoint" />
|
||||
</mxGeometry>
|
||||
</mxCell>
|
||||
<mxCell id="wwqsnaxs0Bt7SYwqQu8i-61" value="" style="endArrow=none;html=1;rounded=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;endFill=0;" edge="1" parent="1">
|
||||
<mxGeometry width="50" height="50" relative="1" as="geometry">
|
||||
<mxPoint x="1379" y="773" as="sourcePoint" />
|
||||
<mxPoint x="1379" y="390" as="targetPoint" />
|
||||
</mxGeometry>
|
||||
</mxCell>
|
||||
<mxCell id="wwqsnaxs0Bt7SYwqQu8i-62" value="" style="endArrow=classic;html=1;rounded=0;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1">
|
||||
<mxGeometry width="50" height="50" relative="1" as="geometry">
|
||||
<mxPoint x="1380" y="933.5" as="sourcePoint" />
|
||||
<mxPoint x="1379" y="1069" as="targetPoint" />
|
||||
</mxGeometry>
|
||||
</mxCell>
|
||||
</root>
|
||||
</mxGraphModel>
|
||||
</diagram>
|
||||
</mxfile>
|
||||
|
Before Width: | Height: | Size: 176 KiB |
|
Before Width: | Height: | Size: 31 KiB |
@@ -479,7 +479,12 @@ function setDeviceData(direction = '', refreshCallback = '') {
|
||||
if (resp && resp.success) {
|
||||
showMessage(getString("Device_Saved_Success"));
|
||||
} else {
|
||||
showMessage(getString("Device_Saved_Unexpected"));
|
||||
|
||||
console.log(resp);
|
||||
|
||||
errorMessage = resp?.error;
|
||||
|
||||
showMessage(`${getString("Device_Saved_Unexpected")}: ${errorMessage}`, 5000, "modal_red");
|
||||
}
|
||||
|
||||
// Remove navigation prompt
|
||||
|
||||
@@ -116,7 +116,7 @@ function initializeEventsDatatable (eventsRows) {
|
||||
{
|
||||
targets: [0],
|
||||
'createdCell': function (td, cellData, rowData, row, col) {
|
||||
$(td).html(translateHTMLcodes(localizeTimestamp(cellData)));
|
||||
$(td).html(translateHTMLcodes((cellData)));
|
||||
}
|
||||
}
|
||||
],
|
||||
|
||||
@@ -12,7 +12,11 @@ var timerRefreshData = ''
|
||||
|
||||
var emptyArr = ['undefined', "", undefined, null, 'null'];
|
||||
var UI_LANG = "English (en_us)";
|
||||
const allLanguages = ["ar_ar","ca_ca","cs_cz","de_de","en_us","es_es","fa_fa","fr_fr","it_it","ja_jp","nb_no","pl_pl","pt_br","pt_pt","ru_ru","sv_sv","tr_tr","uk_ua","zh_cn"]; // needs to be same as in lang.php
|
||||
const allLanguages = ["ar_ar","ca_ca","cs_cz","de_de",
|
||||
"en_us","es_es","fa_fa","fr_fr",
|
||||
"it_it","ja_jp","nb_no","pl_pl",
|
||||
"pt_br","pt_pt","ru_ru","sv_sv",
|
||||
"tr_tr","uk_ua","vi_vn","zh_cn"]; // needs to be same as in lang.php
|
||||
var settingsJSON = {}
|
||||
|
||||
|
||||
@@ -364,6 +368,9 @@ function getLangCode() {
|
||||
case 'Ukrainian (uk_uk)':
|
||||
lang_code = 'uk_ua';
|
||||
break;
|
||||
case 'Vietnamese (vi_vn)':
|
||||
lang_code = 'vi_vn';
|
||||
break;
|
||||
}
|
||||
|
||||
return lang_code;
|
||||
@@ -447,21 +454,36 @@ function localizeTimestamp(input) {
|
||||
return formatSafe(input, tz);
|
||||
|
||||
function formatSafe(str, tz) {
|
||||
const date = new Date(str);
|
||||
|
||||
// CHECK: Does the input string have timezone information?
|
||||
// - Ends with Z: "2026-02-11T11:37:02Z"
|
||||
// - Has GMT±offset: "Wed Feb 11 2026 12:34:12 GMT+1100 (...)"
|
||||
// - Has offset at end: "2026-02-11 11:37:02+11:00"
|
||||
// - Has timezone name in parentheses: "(Australian Eastern Daylight Time)"
|
||||
const hasOffset = /Z$/i.test(str.trim()) ||
|
||||
/GMT[+-]\d{2,4}/.test(str) ||
|
||||
/[+-]\d{2}:?\d{2}$/.test(str.trim()) ||
|
||||
/\([^)]+\)$/.test(str.trim());
|
||||
|
||||
// ⚠️ CRITICAL: All DB timestamps are stored in UTC without timezone markers.
|
||||
// If no offset is present, we must explicitly mark it as UTC by appending 'Z'
|
||||
// so JavaScript doesn't interpret it as local browser time.
|
||||
let isoStr = str.trim();
|
||||
if (!hasOffset) {
|
||||
// Ensure proper ISO format before appending Z
|
||||
// Replace space with 'T' if needed: "2026-02-11 11:37:02" → "2026-02-11T11:37:02Z"
|
||||
isoStr = isoStr.trim().replace(/^(\d{4}-\d{2}-\d{2}) (\d{2}:\d{2}:\d{2})$/, '$1T$2') + 'Z';
|
||||
}
|
||||
|
||||
const date = new Date(isoStr);
|
||||
if (!isFinite(date)) {
|
||||
console.error(`ERROR: Couldn't parse date: '${str}' with TIMEZONE ${tz}`);
|
||||
return 'Failed conversion';
|
||||
}
|
||||
|
||||
// CHECK: Does the input string have an offset (e.g., +11:00 or Z)?
|
||||
// If it does, and we apply a 'tz' again, we double-shift.
|
||||
const hasOffset = /[Z|[+-]\d{2}:?\d{2}]$/.test(str.trim());
|
||||
|
||||
return new Intl.DateTimeFormat(LOCALE, {
|
||||
// If it has an offset, we display it as-is (UTC mode in Intl
|
||||
// effectively means "don't add more hours").
|
||||
// If no offset, apply your variable 'tz'.
|
||||
timeZone: hasOffset ? 'UTC' : tz,
|
||||
// Convert from UTC to user's configured timezone
|
||||
timeZone: tz,
|
||||
year: 'numeric', month: '2-digit', day: '2-digit',
|
||||
hour: '2-digit', minute: '2-digit', second: '2-digit',
|
||||
hour12: false
|
||||
|
||||
23
front/lib/treeviz/treeviz.iife.js
Normal file
0
front/lib/treeviz/bundle.js → front/lib/treeviz/treeviz.iife.old.js
Executable file → Normal file
@@ -69,7 +69,8 @@
|
||||
require 'php/templates/footer.php';
|
||||
?>
|
||||
|
||||
<script src="lib/treeviz/bundle.js"></script>
|
||||
<!-- <script src="lib/treeviz/bundle.js"></script> -->
|
||||
<script src="lib/treeviz/treeviz.iife.js"></script>
|
||||
|
||||
<script defer>
|
||||
|
||||
@@ -388,7 +389,7 @@
|
||||
const normalized_mac = node_mac.toLowerCase();
|
||||
|
||||
const sql = `
|
||||
SELECT devName, devMac, devLastIP, devVendor, devPresentLastScan, devAlertDown, devParentPort,
|
||||
SELECT devName, devMac, devLastIP, devVendor, devPresentLastScan, devAlertDown, devParentPort, devVlan,
|
||||
CASE
|
||||
WHEN devIsNew = 1 THEN 'New'
|
||||
WHEN devPresentLastScan = 1 THEN 'On-line'
|
||||
@@ -569,10 +570,10 @@ function getChildren(node, list, path, visited = [])
|
||||
// Loop through all items to find children of the current node
|
||||
for (var i in list) {
|
||||
const item = list[i];
|
||||
const parentMac = item.devParentMAC || ""; // null-safe
|
||||
const nodeMac = node.devMac || ""; // null-safe
|
||||
const parentMac = item.devParentMAC?.toLowerCase() || ""; // null-safe
|
||||
const nodeMac = node.devMac?.toLowerCase() || ""; // null-safe
|
||||
|
||||
if (parentMac != "" && parentMac.toLowerCase() == nodeMac.toLowerCase() && !hiddenMacs.includes(parentMac)) {
|
||||
if (parentMac != "" && parentMac == nodeMac && !hiddenMacs.includes(parentMac)) {
|
||||
|
||||
visibleNodesCount++;
|
||||
|
||||
@@ -588,6 +589,8 @@ function getChildren(node, list, path, visited = [])
|
||||
parentNodesCount++;
|
||||
}
|
||||
|
||||
// console.log(node);
|
||||
|
||||
return {
|
||||
name: node.devName,
|
||||
path: path,
|
||||
@@ -607,6 +610,8 @@ function getChildren(node, list, path, visited = [])
|
||||
alertDown: node.devAlertDown,
|
||||
hasChildren: children.length > 0 || hiddenMacs.includes(node.devMac),
|
||||
relType: node.devParentRelType,
|
||||
devVlan: node.devVlan,
|
||||
devSSID: node.devSSID,
|
||||
hiddenChildren: hiddenMacs.includes(node.devMac),
|
||||
qty: children.length,
|
||||
children: children
|
||||
@@ -649,6 +654,8 @@ function toggleSubTree(parentMac, treePath)
|
||||
{
|
||||
treePath = treePath.split('|')
|
||||
|
||||
parentMac = parentMac.toLowerCase()
|
||||
|
||||
if(!hiddenMacs.includes(parentMac))
|
||||
{
|
||||
hiddenMacs.push(parentMac)
|
||||
@@ -883,6 +890,22 @@ function initTree(myHierarchy)
|
||||
idKey: "mac",
|
||||
hasFlatData: false,
|
||||
relationnalField: "children",
|
||||
linkLabel: {
|
||||
render: (parent, child) => {
|
||||
// Return text or HTML to display on the connection line
|
||||
connectionLabel = (child?.data.devVlan ?? "") + "/" + (child?.data.devSSID ?? "");
|
||||
if(connectionLabel == "/")
|
||||
{
|
||||
connectionLabel = "";
|
||||
}
|
||||
|
||||
return connectionLabel;
|
||||
// or with HTML:
|
||||
// return "<tspan><strong>reports to</strong></tspan>";
|
||||
},
|
||||
color: "#336c87ff", // Label text color (optional)
|
||||
fontSize: nodeHeightPx - 5 // Label font size in px (optional)
|
||||
},
|
||||
linkWidth: (nodeData) => 2,
|
||||
linkColor: (nodeData) => {
|
||||
relConf = getRelationshipConf(nodeData.data.relType)
|
||||
|
||||
@@ -27,8 +27,8 @@ function initOnlineHistoryGraph() {
|
||||
var archivedCounts = [];
|
||||
|
||||
res.data.forEach(function(entry) {
|
||||
var dateObj = new Date(entry.Scan_Date);
|
||||
var formattedTime = dateObj.toLocaleTimeString([], {hour: '2-digit', minute: '2-digit', hour12: false});
|
||||
|
||||
var formattedTime = localizeTimestamp(entry.Scan_Date).slice(11, 17);
|
||||
|
||||
timeStamps.push(formattedTime);
|
||||
onlineCounts.push(entry.Online_Devices);
|
||||
|
||||
@@ -789,4 +789,4 @@
|
||||
"settings_system_label": "نظام",
|
||||
"settings_update_item_warning": "قم بتحديث القيمة أدناه. احرص على اتباع التنسيق السابق. <b>لم يتم إجراء التحقق.</b>",
|
||||
"test_event_tooltip": "احفظ التغييرات أولاً قبل اختبار الإعدادات."
|
||||
}
|
||||
}
|
||||
|
||||
@@ -27,7 +27,7 @@
|
||||
"AppEvents_ObjectType": "Object Type",
|
||||
"AppEvents_Plugin": "Plugin",
|
||||
"AppEvents_Type": "Type",
|
||||
"BACKEND_API_URL_description": "Used to generate backend API URLs. Specify if you use reverse proxy to map to your <code>GRAPHQL_PORT</code>. Enter full URL starting with <code>http://</code> including the port number (no trailing slash <code>/</code>).",
|
||||
"BACKEND_API_URL_description": "Used to allow the frontend to communicate with the backend. By default this is set to <code>/server</code> and generally should not be changed.",
|
||||
"BACKEND_API_URL_name": "Backend API URL",
|
||||
"BackDevDetail_Actions_Ask_Run": "Do you want to execute the action?",
|
||||
"BackDevDetail_Actions_Not_Registered": "Action not registered: ",
|
||||
|
||||
@@ -27,7 +27,7 @@
|
||||
"AppEvents_ObjectType": "Type d'objet",
|
||||
"AppEvents_Plugin": "Plugin",
|
||||
"AppEvents_Type": "Type",
|
||||
"BACKEND_API_URL_description": "Utilisé pour générer les URL de l'API back-end. Spécifiez si vous utiliser un reverse proxy pour mapper votre <code>GRAPHQL_PORT</code>. Renseigner l'URL complète, en commençant par <code>http://</code>, et en incluant le numéro de port (sans slash de fin <code>/</code>).",
|
||||
"BACKEND_API_URL_description": "Utilisé pour autoriser l'interface utilisateur à communiquer avec le serveur. Par défaut, cela est défini sur <code>/serveur</code> et ne doit généralement pas être changé.",
|
||||
"BACKEND_API_URL_name": "URL de l'API backend",
|
||||
"BackDevDetail_Actions_Ask_Run": "Voulez-vous exécuter cette action ?",
|
||||
"BackDevDetail_Actions_Not_Registered": "Action non enregistrée : ",
|
||||
|
||||
@@ -27,7 +27,7 @@
|
||||
"AppEvents_ObjectType": "Tipo oggetto",
|
||||
"AppEvents_Plugin": "Plugin",
|
||||
"AppEvents_Type": "Tipo",
|
||||
"BACKEND_API_URL_description": "Utilizzato per generare URL API backend. Specifica se utilizzi un proxy inverso per il mapping al tuo <code>GRAPHQL_PORT</code>. Inserisci l'URL completo che inizia con <code>http://</code> incluso il numero di porta (senza barra finale <code>/</code>).",
|
||||
"BACKEND_API_URL_description": "Utilizzato per consentire al frontend di comunicare con il backend. Per impostazione predefinita è impostato su <code>/server</code> e generalmente non dovrebbe essere modificato.",
|
||||
"BACKEND_API_URL_name": "URL API backend",
|
||||
"BackDevDetail_Actions_Ask_Run": "Vuoi eseguire questa azione?",
|
||||
"BackDevDetail_Actions_Not_Registered": "Azione non registrata: ",
|
||||
|
||||
@@ -789,4 +789,4 @@
|
||||
"settings_system_label": "システム",
|
||||
"settings_update_item_warning": "以下の値を更新してください。以前のフォーマットに従うよう注意してください。<b>検証は行われません。</b>",
|
||||
"test_event_tooltip": "設定をテストする前に、まず変更を保存してください。"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,15 +5,19 @@
|
||||
// ###################################
|
||||
|
||||
$defaultLang = "en_us";
|
||||
$allLanguages = [ "ar_ar", "ca_ca", "cs_cz", "de_de", "en_us", "es_es", "fa_fa", "fr_fr", "it_it", "ja_jp", "nb_no", "pl_pl", "pt_br", "pt_pt", "ru_ru", "sv_sv", "tr_tr", "uk_ua", "zh_cn"];
|
||||
$allLanguages = [ "ar_ar", "ca_ca", "cs_cz", "de_de",
|
||||
"en_us", "es_es", "fa_fa", "fr_fr",
|
||||
"it_it", "ja_jp", "nb_no", "pl_pl",
|
||||
"pt_br", "pt_pt", "ru_ru", "sv_sv",
|
||||
"tr_tr", "uk_ua", "vi_vn", "zh_cn"];
|
||||
|
||||
|
||||
global $db;
|
||||
|
||||
$result = $db->querySingle("SELECT setValue FROM Settings WHERE setKey = 'UI_LANG'");
|
||||
$result = $db->querySingle("SELECT setValue FROM Settings WHERE setKey = 'UI_LANG'");
|
||||
|
||||
// below has to match exactly the values in /front/php/templates/language/lang.php & /front/js/common.js
|
||||
switch($result){
|
||||
switch($result){
|
||||
case 'Arabic (ar_ar)': $pia_lang_selected = 'ar_ar'; break;
|
||||
case 'Catalan (ca_ca)': $pia_lang_selected = 'ca_ca'; break;
|
||||
case 'Czech (cs_cz)': $pia_lang_selected = 'cs_cz'; break;
|
||||
@@ -32,6 +36,7 @@ switch($result){
|
||||
case 'Swedish (sv_sv)': $pia_lang_selected = 'sv_sv'; break;
|
||||
case 'Turkish (tr_tr)': $pia_lang_selected = 'tr_tr'; break;
|
||||
case 'Ukrainian (uk_ua)': $pia_lang_selected = 'uk_ua'; break;
|
||||
case 'Vietnamese (vi_vn)': $pia_lang_selected = 'vi_vn'; break;
|
||||
case 'Chinese (zh_cn)': $pia_lang_selected = 'zh_cn'; break;
|
||||
default: $pia_lang_selected = 'en_us'; break;
|
||||
}
|
||||
|
||||
@@ -38,6 +38,6 @@ if __name__ == "__main__":
|
||||
json_files = ["en_us.json", "ar_ar.json", "ca_ca.json", "cs_cz.json", "de_de.json",
|
||||
"es_es.json", "fa_fa.json", "fr_fr.json", "it_it.json", "ja_jp.json",
|
||||
"nb_no.json", "pl_pl.json", "pt_br.json", "pt_pt.json", "ru_ru.json",
|
||||
"sv_sv.json", "tr_tr.json", "uk_ua.json", "zh_cn.json"]
|
||||
"sv_sv.json", "tr_tr.json", "vi_vn.json", "uk_ua.json", "zh_cn.json"]
|
||||
file_paths = [os.path.join(current_path, file) for file in json_files]
|
||||
merge_translations(file_paths[0], file_paths[1:])
|
||||
|
||||
@@ -27,7 +27,7 @@
|
||||
"AppEvents_ObjectType": "Тип объекта",
|
||||
"AppEvents_Plugin": "Плагин",
|
||||
"AppEvents_Type": "Тип",
|
||||
"BACKEND_API_URL_description": "Используется для создания URL-адресов серверного API. Укажите, используете ли вы обратный прокси-сервер для сопоставления с вашим <code>GRAPHQL_PORT</code>. Введите полный URL-адрес, начинающийся с <code>http://</code>, включая номер порта (без косой черты <code>/</code>).",
|
||||
"BACKEND_API_URL_description": "Используется для обеспечения связи между фронтендом и бэкендом. По умолчанию это значение установлено на <code>/server</code> и, как правило, не должно изменяться.",
|
||||
"BACKEND_API_URL_name": "URL-адрес серверного API",
|
||||
"BackDevDetail_Actions_Ask_Run": "Вы хотите выполнить действие?",
|
||||
"BackDevDetail_Actions_Not_Registered": "Действие не зарегистрировано:· ",
|
||||
|
||||
792
front/php/templates/language/vi_vn.json
Normal file
@@ -0,0 +1,792 @@
|
||||
{
|
||||
"API_CUSTOM_SQL_description": "",
|
||||
"API_CUSTOM_SQL_name": "",
|
||||
"API_TOKEN_description": "",
|
||||
"API_TOKEN_name": "",
|
||||
"API_display_name": "",
|
||||
"API_icon": "",
|
||||
"About_Design": "",
|
||||
"About_Exit": "",
|
||||
"About_Title": "",
|
||||
"AppEvents_AppEventProcessed": "",
|
||||
"AppEvents_DateTimeCreated": "",
|
||||
"AppEvents_Extra": "",
|
||||
"AppEvents_GUID": "",
|
||||
"AppEvents_Helper1": "",
|
||||
"AppEvents_Helper2": "",
|
||||
"AppEvents_Helper3": "",
|
||||
"AppEvents_ObjectForeignKey": "",
|
||||
"AppEvents_ObjectIndex": "",
|
||||
"AppEvents_ObjectIsArchived": "",
|
||||
"AppEvents_ObjectIsNew": "",
|
||||
"AppEvents_ObjectPlugin": "",
|
||||
"AppEvents_ObjectPrimaryID": "",
|
||||
"AppEvents_ObjectSecondaryID": "",
|
||||
"AppEvents_ObjectStatus": "",
|
||||
"AppEvents_ObjectStatusColumn": "",
|
||||
"AppEvents_ObjectType": "",
|
||||
"AppEvents_Plugin": "",
|
||||
"AppEvents_Type": "",
|
||||
"BACKEND_API_URL_description": "",
|
||||
"BACKEND_API_URL_name": "",
|
||||
"BackDevDetail_Actions_Ask_Run": "",
|
||||
"BackDevDetail_Actions_Not_Registered": "",
|
||||
"BackDevDetail_Actions_Title_Run": "",
|
||||
"BackDevDetail_Copy_Ask": "",
|
||||
"BackDevDetail_Copy_Title": "",
|
||||
"BackDevDetail_Tools_WOL_error": "",
|
||||
"BackDevDetail_Tools_WOL_okay": "",
|
||||
"BackDevices_Arpscan_disabled": "",
|
||||
"BackDevices_Arpscan_enabled": "",
|
||||
"BackDevices_Backup_CopError": "",
|
||||
"BackDevices_Backup_Failed": "",
|
||||
"BackDevices_Backup_okay": "",
|
||||
"BackDevices_DBTools_DelDevError_a": "",
|
||||
"BackDevices_DBTools_DelDevError_b": "",
|
||||
"BackDevices_DBTools_DelDev_a": "",
|
||||
"BackDevices_DBTools_DelDev_b": "",
|
||||
"BackDevices_DBTools_DelEvents": "",
|
||||
"BackDevices_DBTools_DelEventsError": "",
|
||||
"BackDevices_DBTools_ImportCSV": "",
|
||||
"BackDevices_DBTools_ImportCSVError": "",
|
||||
"BackDevices_DBTools_ImportCSVMissing": "",
|
||||
"BackDevices_DBTools_Purge": "",
|
||||
"BackDevices_DBTools_UpdDev": "",
|
||||
"BackDevices_DBTools_UpdDevError": "",
|
||||
"BackDevices_DBTools_Upgrade": "",
|
||||
"BackDevices_DBTools_UpgradeError": "",
|
||||
"BackDevices_Device_UpdDevError": "",
|
||||
"BackDevices_Restore_CopError": "",
|
||||
"BackDevices_Restore_Failed": "",
|
||||
"BackDevices_Restore_okay": "",
|
||||
"BackDevices_darkmode_disabled": "",
|
||||
"BackDevices_darkmode_enabled": "",
|
||||
"CLEAR_NEW_FLAG_description": "",
|
||||
"CLEAR_NEW_FLAG_name": "",
|
||||
"CustProps_cant_remove": "",
|
||||
"DAYS_TO_KEEP_EVENTS_description": "",
|
||||
"DAYS_TO_KEEP_EVENTS_name": "",
|
||||
"DISCOVER_PLUGINS_description": "",
|
||||
"DISCOVER_PLUGINS_name": "",
|
||||
"DevDetail_Children_Title": "",
|
||||
"DevDetail_Copy_Device_Title": "",
|
||||
"DevDetail_Copy_Device_Tooltip": "",
|
||||
"DevDetail_CustomProperties_Title": "",
|
||||
"DevDetail_CustomProps_reset_info": "",
|
||||
"DevDetail_DisplayFields_Title": "",
|
||||
"DevDetail_EveandAl_AlertAllEvents": "",
|
||||
"DevDetail_EveandAl_AlertDown": "",
|
||||
"DevDetail_EveandAl_Archived": "",
|
||||
"DevDetail_EveandAl_NewDevice": "",
|
||||
"DevDetail_EveandAl_NewDevice_Tooltip": "",
|
||||
"DevDetail_EveandAl_RandomMAC": "",
|
||||
"DevDetail_EveandAl_ScanCycle": "",
|
||||
"DevDetail_EveandAl_ScanCycle_a": "",
|
||||
"DevDetail_EveandAl_ScanCycle_z": "",
|
||||
"DevDetail_EveandAl_Skip": "",
|
||||
"DevDetail_EveandAl_Title": "",
|
||||
"DevDetail_Events_CheckBox": "",
|
||||
"DevDetail_GoToNetworkNode": "",
|
||||
"DevDetail_Icon": "",
|
||||
"DevDetail_Icon_Descr": "",
|
||||
"DevDetail_Loading": "",
|
||||
"DevDetail_MainInfo_Comments": "",
|
||||
"DevDetail_MainInfo_Favorite": "",
|
||||
"DevDetail_MainInfo_Group": "",
|
||||
"DevDetail_MainInfo_Location": "",
|
||||
"DevDetail_MainInfo_Name": "",
|
||||
"DevDetail_MainInfo_Network": "",
|
||||
"DevDetail_MainInfo_Network_Port": "",
|
||||
"DevDetail_MainInfo_Network_Site": "",
|
||||
"DevDetail_MainInfo_Network_Title": "",
|
||||
"DevDetail_MainInfo_Owner": "",
|
||||
"DevDetail_MainInfo_SSID": "",
|
||||
"DevDetail_MainInfo_Title": "",
|
||||
"DevDetail_MainInfo_Type": "",
|
||||
"DevDetail_MainInfo_Vendor": "",
|
||||
"DevDetail_MainInfo_mac": "",
|
||||
"DevDetail_NavToChildNode": "",
|
||||
"DevDetail_Network_Node_hover": "",
|
||||
"DevDetail_Network_Port_hover": "",
|
||||
"DevDetail_Nmap_Scans": "",
|
||||
"DevDetail_Nmap_Scans_desc": "",
|
||||
"DevDetail_Nmap_buttonDefault": "",
|
||||
"DevDetail_Nmap_buttonDefault_text": "",
|
||||
"DevDetail_Nmap_buttonDetail": "",
|
||||
"DevDetail_Nmap_buttonDetail_text": "",
|
||||
"DevDetail_Nmap_buttonFast": "",
|
||||
"DevDetail_Nmap_buttonFast_text": "",
|
||||
"DevDetail_Nmap_buttonSkipDiscovery": "",
|
||||
"DevDetail_Nmap_buttonSkipDiscovery_text": "",
|
||||
"DevDetail_Nmap_resultsLink": "",
|
||||
"DevDetail_Owner_hover": "",
|
||||
"DevDetail_Periodselect_All": "",
|
||||
"DevDetail_Periodselect_LastMonth": "",
|
||||
"DevDetail_Periodselect_LastWeek": "",
|
||||
"DevDetail_Periodselect_LastYear": "",
|
||||
"DevDetail_Periodselect_today": "",
|
||||
"DevDetail_Run_Actions_Title": "",
|
||||
"DevDetail_Run_Actions_Tooltip": "",
|
||||
"DevDetail_SessionInfo_FirstSession": "",
|
||||
"DevDetail_SessionInfo_LastIP": "",
|
||||
"DevDetail_SessionInfo_LastSession": "",
|
||||
"DevDetail_SessionInfo_StaticIP": "",
|
||||
"DevDetail_SessionInfo_Status": "",
|
||||
"DevDetail_SessionInfo_Title": "",
|
||||
"DevDetail_SessionTable_Additionalinfo": "",
|
||||
"DevDetail_SessionTable_Connection": "",
|
||||
"DevDetail_SessionTable_Disconnection": "",
|
||||
"DevDetail_SessionTable_Duration": "",
|
||||
"DevDetail_SessionTable_IP": "",
|
||||
"DevDetail_SessionTable_Order": "",
|
||||
"DevDetail_Shortcut_CurrentStatus": "",
|
||||
"DevDetail_Shortcut_DownAlerts": "",
|
||||
"DevDetail_Shortcut_Presence": "",
|
||||
"DevDetail_Shortcut_Sessions": "",
|
||||
"DevDetail_Tab_Details": "",
|
||||
"DevDetail_Tab_Events": "",
|
||||
"DevDetail_Tab_EventsTableDate": "",
|
||||
"DevDetail_Tab_EventsTableEvent": "",
|
||||
"DevDetail_Tab_EventsTableIP": "",
|
||||
"DevDetail_Tab_EventsTableInfo": "",
|
||||
"DevDetail_Tab_Nmap": "",
|
||||
"DevDetail_Tab_NmapEmpty": "",
|
||||
"DevDetail_Tab_NmapTableExtra": "",
|
||||
"DevDetail_Tab_NmapTableHeader": "",
|
||||
"DevDetail_Tab_NmapTableIndex": "",
|
||||
"DevDetail_Tab_NmapTablePort": "",
|
||||
"DevDetail_Tab_NmapTableService": "",
|
||||
"DevDetail_Tab_NmapTableState": "",
|
||||
"DevDetail_Tab_NmapTableText": "",
|
||||
"DevDetail_Tab_NmapTableTime": "",
|
||||
"DevDetail_Tab_Plugins": "",
|
||||
"DevDetail_Tab_Presence": "",
|
||||
"DevDetail_Tab_Sessions": "",
|
||||
"DevDetail_Tab_Tools": "",
|
||||
"DevDetail_Tab_Tools_Internet_Info_Description": "",
|
||||
"DevDetail_Tab_Tools_Internet_Info_Error": "",
|
||||
"DevDetail_Tab_Tools_Internet_Info_Start": "",
|
||||
"DevDetail_Tab_Tools_Internet_Info_Title": "",
|
||||
"DevDetail_Tab_Tools_Nslookup_Description": "",
|
||||
"DevDetail_Tab_Tools_Nslookup_Error": "",
|
||||
"DevDetail_Tab_Tools_Nslookup_Start": "",
|
||||
"DevDetail_Tab_Tools_Nslookup_Title": "",
|
||||
"DevDetail_Tab_Tools_Speedtest_Description": "",
|
||||
"DevDetail_Tab_Tools_Speedtest_Start": "",
|
||||
"DevDetail_Tab_Tools_Speedtest_Title": "",
|
||||
"DevDetail_Tab_Tools_Traceroute_Description": "",
|
||||
"DevDetail_Tab_Tools_Traceroute_Error": "",
|
||||
"DevDetail_Tab_Tools_Traceroute_Start": "",
|
||||
"DevDetail_Tab_Tools_Traceroute_Title": "",
|
||||
"DevDetail_Tools_WOL": "",
|
||||
"DevDetail_Tools_WOL_noti": "",
|
||||
"DevDetail_Tools_WOL_noti_text": "",
|
||||
"DevDetail_Type_hover": "",
|
||||
"DevDetail_Vendor_hover": "",
|
||||
"DevDetail_WOL_Title": "",
|
||||
"DevDetail_button_AddIcon": "",
|
||||
"DevDetail_button_AddIcon_Help": "",
|
||||
"DevDetail_button_AddIcon_Tooltip": "",
|
||||
"DevDetail_button_Delete": "",
|
||||
"DevDetail_button_DeleteEvents": "",
|
||||
"DevDetail_button_DeleteEvents_Warning": "",
|
||||
"DevDetail_button_Delete_ask": "",
|
||||
"DevDetail_button_OverwriteIcons": "",
|
||||
"DevDetail_button_OverwriteIcons_Tooltip": "",
|
||||
"DevDetail_button_OverwriteIcons_Warning": "",
|
||||
"DevDetail_button_Reset": "",
|
||||
"DevDetail_button_Save": "",
|
||||
"DeviceEdit_ValidMacIp": "",
|
||||
"Device_MultiEdit": "",
|
||||
"Device_MultiEdit_Backup": "",
|
||||
"Device_MultiEdit_Fields": "",
|
||||
"Device_MultiEdit_MassActions": "",
|
||||
"Device_MultiEdit_No_Devices": "",
|
||||
"Device_MultiEdit_Tooltip": "",
|
||||
"Device_Save_Failed": "",
|
||||
"Device_Save_Unauthorized": "",
|
||||
"Device_Saved_Success": "",
|
||||
"Device_Saved_Unexpected": "",
|
||||
"Device_Searchbox": "",
|
||||
"Device_Shortcut_AllDevices": "",
|
||||
"Device_Shortcut_AllNodes": "",
|
||||
"Device_Shortcut_Archived": "",
|
||||
"Device_Shortcut_Connected": "",
|
||||
"Device_Shortcut_Devices": "",
|
||||
"Device_Shortcut_DownAlerts": "",
|
||||
"Device_Shortcut_DownOnly": "",
|
||||
"Device_Shortcut_Favorites": "",
|
||||
"Device_Shortcut_NewDevices": "",
|
||||
"Device_Shortcut_OnlineChart": "",
|
||||
"Device_TableHead_AlertDown": "",
|
||||
"Device_TableHead_Connected_Devices": "",
|
||||
"Device_TableHead_CustomProps": "",
|
||||
"Device_TableHead_FQDN": "",
|
||||
"Device_TableHead_Favorite": "",
|
||||
"Device_TableHead_FirstSession": "",
|
||||
"Device_TableHead_GUID": "",
|
||||
"Device_TableHead_Group": "",
|
||||
"Device_TableHead_IPv4": "",
|
||||
"Device_TableHead_IPv6": "",
|
||||
"Device_TableHead_Icon": "",
|
||||
"Device_TableHead_LastIP": "",
|
||||
"Device_TableHead_LastIPOrder": "",
|
||||
"Device_TableHead_LastSession": "",
|
||||
"Device_TableHead_Location": "",
|
||||
"Device_TableHead_MAC": "",
|
||||
"Device_TableHead_MAC_full": "",
|
||||
"Device_TableHead_Name": "",
|
||||
"Device_TableHead_NetworkSite": "",
|
||||
"Device_TableHead_Owner": "",
|
||||
"Device_TableHead_ParentRelType": "",
|
||||
"Device_TableHead_Parent_MAC": "",
|
||||
"Device_TableHead_Port": "",
|
||||
"Device_TableHead_PresentLastScan": "",
|
||||
"Device_TableHead_ReqNicsOnline": "",
|
||||
"Device_TableHead_RowID": "",
|
||||
"Device_TableHead_Rowid": "",
|
||||
"Device_TableHead_SSID": "",
|
||||
"Device_TableHead_SourcePlugin": "",
|
||||
"Device_TableHead_Status": "",
|
||||
"Device_TableHead_SyncHubNodeName": "",
|
||||
"Device_TableHead_Type": "",
|
||||
"Device_TableHead_Vendor": "",
|
||||
"Device_TableHead_Vlan": "",
|
||||
"Device_Table_Not_Network_Device": "",
|
||||
"Device_Table_info": "",
|
||||
"Device_Table_nav_next": "",
|
||||
"Device_Table_nav_prev": "",
|
||||
"Device_Tablelenght": "",
|
||||
"Device_Tablelenght_all": "",
|
||||
"Device_Title": "",
|
||||
"Devices_Filters": "",
|
||||
"ENABLE_PLUGINS_description": "",
|
||||
"ENABLE_PLUGINS_name": "",
|
||||
"ENCRYPTION_KEY_description": "",
|
||||
"ENCRYPTION_KEY_name": "",
|
||||
"Email_display_name": "",
|
||||
"Email_icon": "",
|
||||
"Events_Loading": "",
|
||||
"Events_Periodselect_All": "",
|
||||
"Events_Periodselect_LastMonth": "",
|
||||
"Events_Periodselect_LastWeek": "",
|
||||
"Events_Periodselect_LastYear": "",
|
||||
"Events_Periodselect_today": "",
|
||||
"Events_Searchbox": "",
|
||||
"Events_Shortcut_AllEvents": "",
|
||||
"Events_Shortcut_DownAlerts": "",
|
||||
"Events_Shortcut_Events": "",
|
||||
"Events_Shortcut_MissSessions": "",
|
||||
"Events_Shortcut_NewDevices": "",
|
||||
"Events_Shortcut_Sessions": "",
|
||||
"Events_Shortcut_VoidSessions": "",
|
||||
"Events_TableHead_AdditionalInfo": "",
|
||||
"Events_TableHead_Connection": "",
|
||||
"Events_TableHead_Date": "",
|
||||
"Events_TableHead_Device": "",
|
||||
"Events_TableHead_Disconnection": "",
|
||||
"Events_TableHead_Duration": "",
|
||||
"Events_TableHead_DurationOrder": "",
|
||||
"Events_TableHead_EventType": "",
|
||||
"Events_TableHead_IP": "",
|
||||
"Events_TableHead_IPOrder": "",
|
||||
"Events_TableHead_Order": "",
|
||||
"Events_TableHead_Owner": "",
|
||||
"Events_TableHead_PendingAlert": "",
|
||||
"Events_Table_info": "",
|
||||
"Events_Table_nav_next": "",
|
||||
"Events_Table_nav_prev": "",
|
||||
"Events_Tablelenght": "",
|
||||
"Events_Tablelenght_all": "",
|
||||
"Events_Title": "",
|
||||
"FakeMAC_hover": "",
|
||||
"FieldLock_Error": "",
|
||||
"FieldLock_Lock_Tooltip": "",
|
||||
"FieldLock_Locked": "",
|
||||
"FieldLock_SaveBeforeLocking": "",
|
||||
"FieldLock_Source_Label": "",
|
||||
"FieldLock_Unlock_Tooltip": "",
|
||||
"FieldLock_Unlocked": "",
|
||||
"GRAPHQL_PORT_description": "",
|
||||
"GRAPHQL_PORT_name": "",
|
||||
"Gen_Action": "",
|
||||
"Gen_Add": "",
|
||||
"Gen_AddDevice": "",
|
||||
"Gen_Add_All": "",
|
||||
"Gen_All_Devices": "",
|
||||
"Gen_AreYouSure": "",
|
||||
"Gen_Backup": "",
|
||||
"Gen_Cancel": "",
|
||||
"Gen_Change": "",
|
||||
"Gen_Copy": "",
|
||||
"Gen_CopyToClipboard": "",
|
||||
"Gen_DataUpdatedUITakesTime": "",
|
||||
"Gen_Delete": "",
|
||||
"Gen_DeleteAll": "",
|
||||
"Gen_Description": "",
|
||||
"Gen_Error": "",
|
||||
"Gen_Filter": "",
|
||||
"Gen_Generate": "",
|
||||
"Gen_InvalidMac": "",
|
||||
"Gen_Invalid_Value": "",
|
||||
"Gen_LockedDB": "",
|
||||
"Gen_NetworkMask": "",
|
||||
"Gen_Offline": "",
|
||||
"Gen_Okay": "",
|
||||
"Gen_Online": "",
|
||||
"Gen_Purge": "",
|
||||
"Gen_ReadDocs": "",
|
||||
"Gen_Remove_All": "",
|
||||
"Gen_Remove_Last": "",
|
||||
"Gen_Reset": "",
|
||||
"Gen_Restore": "",
|
||||
"Gen_Run": "",
|
||||
"Gen_Save": "",
|
||||
"Gen_Saved": "",
|
||||
"Gen_Search": "",
|
||||
"Gen_Select": "",
|
||||
"Gen_SelectIcon": "",
|
||||
"Gen_SelectToPreview": "",
|
||||
"Gen_Selected_Devices": "",
|
||||
"Gen_Subnet": "",
|
||||
"Gen_Switch": "",
|
||||
"Gen_Upd": "",
|
||||
"Gen_Upd_Fail": "",
|
||||
"Gen_Update": "",
|
||||
"Gen_Update_Value": "",
|
||||
"Gen_ValidIcon": "",
|
||||
"Gen_Warning": "",
|
||||
"Gen_Work_In_Progress": "",
|
||||
"Gen_create_new_device": "",
|
||||
"Gen_create_new_device_info": "",
|
||||
"General_display_name": "",
|
||||
"General_icon": "",
|
||||
"HRS_TO_KEEP_NEWDEV_description": "",
|
||||
"HRS_TO_KEEP_NEWDEV_name": "",
|
||||
"HRS_TO_KEEP_OFFDEV_description": "",
|
||||
"HRS_TO_KEEP_OFFDEV_name": "",
|
||||
"LOADED_PLUGINS_description": "",
|
||||
"LOADED_PLUGINS_name": "",
|
||||
"LOG_LEVEL_description": "",
|
||||
"LOG_LEVEL_name": "",
|
||||
"Loading": "",
|
||||
"Login_Box": "",
|
||||
"Login_Default_PWD": "",
|
||||
"Login_Info": "",
|
||||
"Login_Psw-box": "",
|
||||
"Login_Psw_alert": "",
|
||||
"Login_Psw_folder": "",
|
||||
"Login_Psw_new": "",
|
||||
"Login_Psw_run": "",
|
||||
"Login_Remember": "",
|
||||
"Login_Remember_small": "",
|
||||
"Login_Submit": "",
|
||||
"Login_Toggle_Alert_headline": "",
|
||||
"Login_Toggle_Info": "",
|
||||
"Login_Toggle_Info_headline": "",
|
||||
"Maint_PurgeLog": "",
|
||||
"Maint_RestartServer": "",
|
||||
"Maint_Restart_Server_noti_text": "",
|
||||
"Maintenance_InitCheck": "",
|
||||
"Maintenance_InitCheck_Checking": "",
|
||||
"Maintenance_InitCheck_QuickSetupGuide": "",
|
||||
"Maintenance_InitCheck_Success": "",
|
||||
"Maintenance_ReCheck": "",
|
||||
"Maintenance_Running_Version": "",
|
||||
"Maintenance_Status": "",
|
||||
"Maintenance_Title": "",
|
||||
"Maintenance_Tool_DownloadConfig": "",
|
||||
"Maintenance_Tool_DownloadConfig_text": "",
|
||||
"Maintenance_Tool_DownloadWorkflows": "",
|
||||
"Maintenance_Tool_DownloadWorkflows_text": "",
|
||||
"Maintenance_Tool_ExportCSV": "",
|
||||
"Maintenance_Tool_ExportCSV_noti": "",
|
||||
"Maintenance_Tool_ExportCSV_noti_text": "",
|
||||
"Maintenance_Tool_ExportCSV_text": "",
|
||||
"Maintenance_Tool_ImportCSV": "",
|
||||
"Maintenance_Tool_ImportCSV_noti": "",
|
||||
"Maintenance_Tool_ImportCSV_noti_text": "",
|
||||
"Maintenance_Tool_ImportCSV_text": "",
|
||||
"Maintenance_Tool_ImportConfig_noti": "",
|
||||
"Maintenance_Tool_ImportPastedCSV": "",
|
||||
"Maintenance_Tool_ImportPastedCSV_noti_text": "",
|
||||
"Maintenance_Tool_ImportPastedCSV_text": "",
|
||||
"Maintenance_Tool_ImportPastedConfig": "",
|
||||
"Maintenance_Tool_ImportPastedConfig_noti_text": "",
|
||||
"Maintenance_Tool_ImportPastedConfig_text": "",
|
||||
"Maintenance_Tool_UnlockFields": "",
|
||||
"Maintenance_Tool_UnlockFields_noti": "",
|
||||
"Maintenance_Tool_UnlockFields_noti_text": "",
|
||||
"Maintenance_Tool_UnlockFields_text": "",
|
||||
"Maintenance_Tool_arpscansw": "",
|
||||
"Maintenance_Tool_arpscansw_noti": "",
|
||||
"Maintenance_Tool_arpscansw_noti_text": "",
|
||||
"Maintenance_Tool_arpscansw_text": "",
|
||||
"Maintenance_Tool_backup": "",
|
||||
"Maintenance_Tool_backup_noti": "",
|
||||
"Maintenance_Tool_backup_noti_text": "",
|
||||
"Maintenance_Tool_backup_text": "",
|
||||
"Maintenance_Tool_check_visible": "",
|
||||
"Maintenance_Tool_clearSourceFields_selected": "",
|
||||
"Maintenance_Tool_clearSourceFields_selected_noti": "",
|
||||
"Maintenance_Tool_clearSourceFields_selected_text": "",
|
||||
"Maintenance_Tool_darkmode": "",
|
||||
"Maintenance_Tool_darkmode_noti": "",
|
||||
"Maintenance_Tool_darkmode_noti_text": "",
|
||||
"Maintenance_Tool_darkmode_text": "",
|
||||
"Maintenance_Tool_del_ActHistory": "",
|
||||
"Maintenance_Tool_del_ActHistory_noti": "",
|
||||
"Maintenance_Tool_del_ActHistory_noti_text": "",
|
||||
"Maintenance_Tool_del_ActHistory_text": "",
|
||||
"Maintenance_Tool_del_alldev": "",
|
||||
"Maintenance_Tool_del_alldev_noti": "",
|
||||
"Maintenance_Tool_del_alldev_noti_text": "",
|
||||
"Maintenance_Tool_del_alldev_text": "",
|
||||
"Maintenance_Tool_del_allevents": "",
|
||||
"Maintenance_Tool_del_allevents30": "",
|
||||
"Maintenance_Tool_del_allevents30_noti": "",
|
||||
"Maintenance_Tool_del_allevents30_noti_text": "",
|
||||
"Maintenance_Tool_del_allevents30_text": "",
|
||||
"Maintenance_Tool_del_allevents_noti": "",
|
||||
"Maintenance_Tool_del_allevents_noti_text": "",
|
||||
"Maintenance_Tool_del_allevents_text": "",
|
||||
"Maintenance_Tool_del_empty_macs": "",
|
||||
"Maintenance_Tool_del_empty_macs_noti": "",
|
||||
"Maintenance_Tool_del_empty_macs_noti_text": "",
|
||||
"Maintenance_Tool_del_empty_macs_text": "",
|
||||
"Maintenance_Tool_del_selecteddev": "",
|
||||
"Maintenance_Tool_del_selecteddev_text": "",
|
||||
"Maintenance_Tool_del_unknowndev": "",
|
||||
"Maintenance_Tool_del_unknowndev_noti": "",
|
||||
"Maintenance_Tool_del_unknowndev_noti_text": "",
|
||||
"Maintenance_Tool_del_unknowndev_text": "",
|
||||
"Maintenance_Tool_del_unlockFields_selecteddev_text": "",
|
||||
"Maintenance_Tool_displayed_columns_text": "",
|
||||
"Maintenance_Tool_drag_me": "",
|
||||
"Maintenance_Tool_order_columns_text": "",
|
||||
"Maintenance_Tool_purgebackup": "",
|
||||
"Maintenance_Tool_purgebackup_noti": "",
|
||||
"Maintenance_Tool_purgebackup_noti_text": "",
|
||||
"Maintenance_Tool_purgebackup_text": "",
|
||||
"Maintenance_Tool_restore": "",
|
||||
"Maintenance_Tool_restore_noti": "",
|
||||
"Maintenance_Tool_restore_noti_text": "",
|
||||
"Maintenance_Tool_restore_text": "",
|
||||
"Maintenance_Tool_unlockFields_selecteddev": "",
|
||||
"Maintenance_Tool_unlockFields_selecteddev_noti": "",
|
||||
"Maintenance_Tool_upgrade_database_noti": "",
|
||||
"Maintenance_Tool_upgrade_database_noti_text": "",
|
||||
"Maintenance_Tool_upgrade_database_text": "",
|
||||
"Maintenance_Tools_Tab_BackupRestore": "",
|
||||
"Maintenance_Tools_Tab_Logging": "",
|
||||
"Maintenance_Tools_Tab_Settings": "",
|
||||
"Maintenance_Tools_Tab_Tools": "",
|
||||
"Maintenance_Tools_Tab_UISettings": "",
|
||||
"Maintenance_arp_status": "",
|
||||
"Maintenance_arp_status_off": "",
|
||||
"Maintenance_arp_status_on": "",
|
||||
"Maintenance_built_on": "",
|
||||
"Maintenance_current_version": "",
|
||||
"Maintenance_database_backup": "",
|
||||
"Maintenance_database_backup_found": "",
|
||||
"Maintenance_database_backup_total": "",
|
||||
"Maintenance_database_lastmod": "",
|
||||
"Maintenance_database_path": "",
|
||||
"Maintenance_database_rows": "",
|
||||
"Maintenance_database_size": "",
|
||||
"Maintenance_lang_selector_apply": "",
|
||||
"Maintenance_lang_selector_empty": "",
|
||||
"Maintenance_lang_selector_lable": "",
|
||||
"Maintenance_lang_selector_text": "",
|
||||
"Maintenance_new_version": "",
|
||||
"Maintenance_themeselector_apply": "",
|
||||
"Maintenance_themeselector_empty": "",
|
||||
"Maintenance_themeselector_lable": "",
|
||||
"Maintenance_themeselector_text": "",
|
||||
"Maintenance_version": "",
|
||||
"NETWORK_DEVICE_TYPES_description": "",
|
||||
"NETWORK_DEVICE_TYPES_name": "",
|
||||
"Navigation_About": "",
|
||||
"Navigation_AppEvents": "",
|
||||
"Navigation_Devices": "",
|
||||
"Navigation_Donations": "",
|
||||
"Navigation_Events": "",
|
||||
"Navigation_Integrations": "",
|
||||
"Navigation_Maintenance": "",
|
||||
"Navigation_Monitoring": "",
|
||||
"Navigation_Network": "",
|
||||
"Navigation_Notifications": "",
|
||||
"Navigation_Plugins": "",
|
||||
"Navigation_Presence": "",
|
||||
"Navigation_Report": "",
|
||||
"Navigation_Settings": "",
|
||||
"Navigation_SystemInfo": "",
|
||||
"Navigation_Workflows": "",
|
||||
"Network_Assign": "",
|
||||
"Network_Cant_Assign": "",
|
||||
"Network_Cant_Assign_No_Node_Selected": "",
|
||||
"Network_Configuration_Error": "",
|
||||
"Network_Connected": "",
|
||||
"Network_Devices": "",
|
||||
"Network_ManageAdd": "",
|
||||
"Network_ManageAdd_Name": "",
|
||||
"Network_ManageAdd_Name_text": "",
|
||||
"Network_ManageAdd_Port": "",
|
||||
"Network_ManageAdd_Port_text": "",
|
||||
"Network_ManageAdd_Submit": "",
|
||||
"Network_ManageAdd_Type": "",
|
||||
"Network_ManageAdd_Type_text": "",
|
||||
"Network_ManageAssign": "",
|
||||
"Network_ManageDel": "",
|
||||
"Network_ManageDel_Name": "",
|
||||
"Network_ManageDel_Name_text": "",
|
||||
"Network_ManageDel_Submit": "",
|
||||
"Network_ManageDevices": "",
|
||||
"Network_ManageEdit": "",
|
||||
"Network_ManageEdit_ID": "",
|
||||
"Network_ManageEdit_ID_text": "",
|
||||
"Network_ManageEdit_Name": "",
|
||||
"Network_ManageEdit_Name_text": "",
|
||||
"Network_ManageEdit_Port": "",
|
||||
"Network_ManageEdit_Port_text": "",
|
||||
"Network_ManageEdit_Submit": "",
|
||||
"Network_ManageEdit_Type": "",
|
||||
"Network_ManageEdit_Type_text": "",
|
||||
"Network_ManageLeaf": "",
|
||||
"Network_ManageUnassign": "",
|
||||
"Network_NoAssignedDevices": "",
|
||||
"Network_NoDevices": "",
|
||||
"Network_Node": "",
|
||||
"Network_Node_Name": "",
|
||||
"Network_Parent": "",
|
||||
"Network_Root": "",
|
||||
"Network_Root_Not_Configured": "",
|
||||
"Network_Root_Unconfigurable": "",
|
||||
"Network_ShowArchived": "",
|
||||
"Network_ShowOffline": "",
|
||||
"Network_Table_Hostname": "",
|
||||
"Network_Table_IP": "",
|
||||
"Network_Table_State": "",
|
||||
"Network_Title": "",
|
||||
"Network_UnassignedDevices": "",
|
||||
"Notifications_All": "",
|
||||
"Notifications_Mark_All_Read": "",
|
||||
"PIALERT_WEB_PASSWORD_description": "",
|
||||
"PIALERT_WEB_PASSWORD_name": "",
|
||||
"PIALERT_WEB_PROTECTION_description": "",
|
||||
"PIALERT_WEB_PROTECTION_name": "",
|
||||
"PLUGINS_KEEP_HIST_description": "",
|
||||
"PLUGINS_KEEP_HIST_name": "",
|
||||
"Plugins_DeleteAll": "",
|
||||
"Plugins_Filters_Mac": "",
|
||||
"Plugins_History": "",
|
||||
"Plugins_Obj_DeleteListed": "",
|
||||
"Plugins_Objects": "",
|
||||
"Plugins_Out_of": "",
|
||||
"Plugins_Unprocessed_Events": "",
|
||||
"Plugins_no_control": "",
|
||||
"Presence_CalHead_day": "",
|
||||
"Presence_CalHead_lang": "",
|
||||
"Presence_CalHead_month": "",
|
||||
"Presence_CalHead_quarter": "",
|
||||
"Presence_CalHead_week": "",
|
||||
"Presence_CalHead_year": "",
|
||||
"Presence_CallHead_Devices": "",
|
||||
"Presence_Key_OnlineNow": "",
|
||||
"Presence_Key_OnlineNow_desc": "",
|
||||
"Presence_Key_OnlinePast": "",
|
||||
"Presence_Key_OnlinePastMiss": "",
|
||||
"Presence_Key_OnlinePastMiss_desc": "",
|
||||
"Presence_Key_OnlinePast_desc": "",
|
||||
"Presence_Loading": "",
|
||||
"Presence_Shortcut_AllDevices": "",
|
||||
"Presence_Shortcut_Archived": "",
|
||||
"Presence_Shortcut_Connected": "",
|
||||
"Presence_Shortcut_Devices": "",
|
||||
"Presence_Shortcut_DownAlerts": "",
|
||||
"Presence_Shortcut_Favorites": "",
|
||||
"Presence_Shortcut_NewDevices": "",
|
||||
"Presence_Title": "",
|
||||
"REFRESH_FQDN_description": "",
|
||||
"REFRESH_FQDN_name": "",
|
||||
"REPORT_DASHBOARD_URL_description": "",
|
||||
"REPORT_DASHBOARD_URL_name": "",
|
||||
"REPORT_ERROR": "",
|
||||
"REPORT_MAIL_description": "",
|
||||
"REPORT_MAIL_name": "",
|
||||
"REPORT_TITLE": "",
|
||||
"RandomMAC_hover": "",
|
||||
"Reports_Sent_Log": "",
|
||||
"SCAN_SUBNETS_description": "",
|
||||
"SCAN_SUBNETS_name": "",
|
||||
"SYSTEM_TITLE": "",
|
||||
"Setting_Override": "",
|
||||
"Setting_Override_Description": "",
|
||||
"Settings_Metadata_Toggle": "",
|
||||
"Settings_Show_Description": "",
|
||||
"Settings_device_Scanners_desync": "",
|
||||
"Settings_device_Scanners_desync_popup": "",
|
||||
"Speedtest_Results": "",
|
||||
"Systeminfo_AvailableIps": "",
|
||||
"Systeminfo_CPU": "",
|
||||
"Systeminfo_CPU_Cores": "",
|
||||
"Systeminfo_CPU_Name": "",
|
||||
"Systeminfo_CPU_Speed": "",
|
||||
"Systeminfo_CPU_Temp": "",
|
||||
"Systeminfo_CPU_Vendor": "",
|
||||
"Systeminfo_Client_Resolution": "",
|
||||
"Systeminfo_Client_User_Agent": "",
|
||||
"Systeminfo_General": "",
|
||||
"Systeminfo_General_Date": "",
|
||||
"Systeminfo_General_Date2": "",
|
||||
"Systeminfo_General_Full_Date": "",
|
||||
"Systeminfo_General_TimeZone": "",
|
||||
"Systeminfo_Memory": "",
|
||||
"Systeminfo_Memory_Total_Memory": "",
|
||||
"Systeminfo_Memory_Usage": "",
|
||||
"Systeminfo_Memory_Usage_Percent": "",
|
||||
"Systeminfo_Motherboard": "",
|
||||
"Systeminfo_Motherboard_BIOS": "",
|
||||
"Systeminfo_Motherboard_BIOS_Date": "",
|
||||
"Systeminfo_Motherboard_BIOS_Vendor": "",
|
||||
"Systeminfo_Motherboard_Manufactured": "",
|
||||
"Systeminfo_Motherboard_Name": "",
|
||||
"Systeminfo_Motherboard_Revision": "",
|
||||
"Systeminfo_Network": "",
|
||||
"Systeminfo_Network_Accept_Encoding": "",
|
||||
"Systeminfo_Network_Accept_Language": "",
|
||||
"Systeminfo_Network_Connection_Port": "",
|
||||
"Systeminfo_Network_HTTP_Host": "",
|
||||
"Systeminfo_Network_HTTP_Referer": "",
|
||||
"Systeminfo_Network_HTTP_Referer_String": "",
|
||||
"Systeminfo_Network_Hardware": "",
|
||||
"Systeminfo_Network_Hardware_Interface_Mask": "",
|
||||
"Systeminfo_Network_Hardware_Interface_Name": "",
|
||||
"Systeminfo_Network_Hardware_Interface_RX": "",
|
||||
"Systeminfo_Network_Hardware_Interface_TX": "",
|
||||
"Systeminfo_Network_IP": "",
|
||||
"Systeminfo_Network_IP_Connection": "",
|
||||
"Systeminfo_Network_IP_Server": "",
|
||||
"Systeminfo_Network_MIME": "",
|
||||
"Systeminfo_Network_Request_Method": "",
|
||||
"Systeminfo_Network_Request_Time": "",
|
||||
"Systeminfo_Network_Request_URI": "",
|
||||
"Systeminfo_Network_Secure_Connection": "",
|
||||
"Systeminfo_Network_Secure_Connection_String": "",
|
||||
"Systeminfo_Network_Server_Name": "",
|
||||
"Systeminfo_Network_Server_Name_String": "",
|
||||
"Systeminfo_Network_Server_Query": "",
|
||||
"Systeminfo_Network_Server_Query_String": "",
|
||||
"Systeminfo_Network_Server_Version": "",
|
||||
"Systeminfo_Services": "",
|
||||
"Systeminfo_Services_Description": "",
|
||||
"Systeminfo_Services_Name": "",
|
||||
"Systeminfo_Storage": "",
|
||||
"Systeminfo_Storage_Device": "",
|
||||
"Systeminfo_Storage_Mount": "",
|
||||
"Systeminfo_Storage_Size": "",
|
||||
"Systeminfo_Storage_Type": "",
|
||||
"Systeminfo_Storage_Usage": "",
|
||||
"Systeminfo_Storage_Usage_Free": "",
|
||||
"Systeminfo_Storage_Usage_Mount": "",
|
||||
"Systeminfo_Storage_Usage_Total": "",
|
||||
"Systeminfo_Storage_Usage_Used": "",
|
||||
"Systeminfo_System": "",
|
||||
"Systeminfo_System_AVG": "",
|
||||
"Systeminfo_System_Architecture": "",
|
||||
"Systeminfo_System_Kernel": "",
|
||||
"Systeminfo_System_OSVersion": "",
|
||||
"Systeminfo_System_Running_Processes": "",
|
||||
"Systeminfo_System_System": "",
|
||||
"Systeminfo_System_Uname": "",
|
||||
"Systeminfo_System_Uptime": "",
|
||||
"Systeminfo_This_Client": "",
|
||||
"Systeminfo_USB_Devices": "",
|
||||
"TICKER_MIGRATE_TO_NETALERTX": "",
|
||||
"TIMEZONE_description": "",
|
||||
"TIMEZONE_name": "",
|
||||
"UI_DEV_SECTIONS_description": "",
|
||||
"UI_DEV_SECTIONS_name": "",
|
||||
"UI_ICONS_description": "",
|
||||
"UI_ICONS_name": "",
|
||||
"UI_LANG_description": "",
|
||||
"UI_LANG_name": "",
|
||||
"UI_MY_DEVICES_description": "",
|
||||
"UI_MY_DEVICES_name": "",
|
||||
"UI_NOT_RANDOM_MAC_description": "",
|
||||
"UI_NOT_RANDOM_MAC_name": "",
|
||||
"UI_PRESENCE_description": "",
|
||||
"UI_PRESENCE_name": "",
|
||||
"UI_REFRESH_description": "",
|
||||
"UI_REFRESH_name": "",
|
||||
"VERSION_description": "",
|
||||
"VERSION_name": "",
|
||||
"WF_Action_Add": "",
|
||||
"WF_Action_field": "",
|
||||
"WF_Action_type": "",
|
||||
"WF_Action_value": "",
|
||||
"WF_Actions": "",
|
||||
"WF_Add": "",
|
||||
"WF_Add_Condition": "",
|
||||
"WF_Add_Group": "",
|
||||
"WF_Condition_field": "",
|
||||
"WF_Condition_operator": "",
|
||||
"WF_Condition_value": "",
|
||||
"WF_Conditions": "",
|
||||
"WF_Conditions_logic_rules": "",
|
||||
"WF_Duplicate": "",
|
||||
"WF_Enabled": "",
|
||||
"WF_Export": "",
|
||||
"WF_Export_Copy": "",
|
||||
"WF_Import": "",
|
||||
"WF_Import_Copy": "",
|
||||
"WF_Name": "",
|
||||
"WF_Remove": "",
|
||||
"WF_Remove_Copy": "",
|
||||
"WF_Save": "",
|
||||
"WF_Trigger": "",
|
||||
"WF_Trigger_event_type": "",
|
||||
"WF_Trigger_type": "",
|
||||
"add_icon_event_tooltip": "",
|
||||
"add_option_event_tooltip": "",
|
||||
"copy_icons_event_tooltip": "",
|
||||
"devices_old": "",
|
||||
"general_event_description": "",
|
||||
"general_event_title": "",
|
||||
"go_to_device_event_tooltip": "",
|
||||
"go_to_node_event_tooltip": "",
|
||||
"new_version_available": "",
|
||||
"report_guid": "",
|
||||
"report_guid_missing": "",
|
||||
"report_select_format": "",
|
||||
"report_time": "",
|
||||
"run_event_tooltip": "",
|
||||
"select_icon_event_tooltip": "",
|
||||
"settings_core_icon": "",
|
||||
"settings_core_label": "",
|
||||
"settings_device_scanners": "",
|
||||
"settings_device_scanners_icon": "",
|
||||
"settings_device_scanners_info": "",
|
||||
"settings_device_scanners_label": "",
|
||||
"settings_enabled": "",
|
||||
"settings_enabled_icon": "",
|
||||
"settings_expand_all": "",
|
||||
"settings_imported": "",
|
||||
"settings_imported_label": "",
|
||||
"settings_missing": "",
|
||||
"settings_missing_block": "",
|
||||
"settings_old": "",
|
||||
"settings_other_scanners": "",
|
||||
"settings_other_scanners_icon": "",
|
||||
"settings_other_scanners_label": "",
|
||||
"settings_publishers": "",
|
||||
"settings_publishers_icon": "",
|
||||
"settings_publishers_info": "",
|
||||
"settings_publishers_label": "",
|
||||
"settings_readonly": "",
|
||||
"settings_saved": "",
|
||||
"settings_system_icon": "",
|
||||
"settings_system_label": "",
|
||||
"settings_update_item_warning": "",
|
||||
"test_event_tooltip": ""
|
||||
}
|
||||
@@ -11,7 +11,7 @@ sys.path.extend([f"{INSTALL_PATH}/front/plugins", f"{INSTALL_PATH}/server"])
|
||||
|
||||
import conf # noqa: E402 [flake8 lint suppression]
|
||||
from const import confFileName, logPath # noqa: E402 [flake8 lint suppression]
|
||||
from utils.datetime_utils import timeNowDB # noqa: E402 [flake8 lint suppression]
|
||||
from utils.datetime_utils import timeNowUTC # noqa: E402 [flake8 lint suppression]
|
||||
from plugin_helper import Plugin_Objects # noqa: E402 [flake8 lint suppression]
|
||||
from logger import mylog, Logger # noqa: E402 [flake8 lint suppression]
|
||||
from helper import get_setting_value # noqa: E402 [flake8 lint suppression]
|
||||
@@ -60,7 +60,7 @@ def main():
|
||||
# Log result
|
||||
plugin_objects.add_object(
|
||||
primaryId = pluginName,
|
||||
secondaryId = timeNowDB(),
|
||||
secondaryId = timeNowUTC(),
|
||||
watched1 = notification["GUID"],
|
||||
watched2 = result,
|
||||
watched3 = 'null',
|
||||
|
||||
@@ -19,7 +19,7 @@ sys.path.extend([f"{INSTALL_PATH}/front/plugins", f"{INSTALL_PATH}/server"])
|
||||
import conf # noqa: E402 [flake8 lint suppression]
|
||||
from const import confFileName, logPath # noqa: E402 [flake8 lint suppression]
|
||||
from plugin_helper import Plugin_Objects # noqa: E402 [flake8 lint suppression]
|
||||
from utils.datetime_utils import timeNowDB # noqa: E402 [flake8 lint suppression]
|
||||
from utils.datetime_utils import timeNowUTC # noqa: E402 [flake8 lint suppression]
|
||||
from logger import mylog, Logger # noqa: E402 [flake8 lint suppression]
|
||||
from helper import get_setting_value, hide_email # noqa: E402 [flake8 lint suppression]
|
||||
from models.notification_instance import NotificationInstance # noqa: E402 [flake8 lint suppression]
|
||||
@@ -80,7 +80,7 @@ def main():
|
||||
# Log result
|
||||
plugin_objects.add_object(
|
||||
primaryId = pluginName,
|
||||
secondaryId = timeNowDB(),
|
||||
secondaryId = timeNowUTC(),
|
||||
watched1 = notification["GUID"],
|
||||
watched2 = result,
|
||||
watched3 = 'null',
|
||||
|
||||
@@ -26,7 +26,7 @@ from logger import mylog, Logger # noqa: E402 [flake8 lint suppression]
|
||||
from helper import get_setting_value, bytes_to_string, \
|
||||
sanitize_string, normalize_string # noqa: E402 [flake8 lint suppression]
|
||||
from database import DB, get_device_stats # noqa: E402 [flake8 lint suppression]
|
||||
from utils.datetime_utils import timeNowDB # noqa: E402 [flake8 lint suppression]
|
||||
from utils.datetime_utils import timeNowUTC # noqa: E402 [flake8 lint suppression]
|
||||
from models.notification_instance import NotificationInstance # noqa: E402 [flake8 lint suppression]
|
||||
|
||||
# Make sure the TIMEZONE for logging is correct
|
||||
@@ -583,7 +583,7 @@ def publish_notifications(db, mqtt_client):
|
||||
|
||||
# Optional: attach meta info
|
||||
payload["_meta"] = {
|
||||
"published_at": timeNowDB(),
|
||||
"published_at": timeNowUTC(),
|
||||
"source": "NetAlertX",
|
||||
"notification_GUID": notification["GUID"]
|
||||
}
|
||||
@@ -631,7 +631,7 @@ def prepTimeStamp(datetime_str):
|
||||
except ValueError:
|
||||
mylog('verbose', [f"[{pluginName}] Timestamp conversion failed of string '{datetime_str}'"])
|
||||
# Use the current time if the input format is invalid
|
||||
parsed_datetime = datetime.now(conf.tz)
|
||||
parsed_datetime = timeNowUTC(as_string=False)
|
||||
|
||||
# Convert to the required format with 'T' between date and time and ensure the timezone is included
|
||||
return parsed_datetime.isoformat() # This will include the timezone offset
|
||||
|
||||
@@ -13,7 +13,7 @@ sys.path.extend([f"{INSTALL_PATH}/front/plugins", f"{INSTALL_PATH}/server"])
|
||||
import conf # noqa: E402 [flake8 lint suppression]
|
||||
from const import confFileName, logPath # noqa: E402 [flake8 lint suppression]
|
||||
from plugin_helper import Plugin_Objects, handleEmpty # noqa: E402 [flake8 lint suppression]
|
||||
from utils.datetime_utils import timeNowDB # noqa: E402 [flake8 lint suppression]
|
||||
from utils.datetime_utils import timeNowUTC # noqa: E402 [flake8 lint suppression]
|
||||
from logger import mylog, Logger # noqa: E402 [flake8 lint suppression]
|
||||
from helper import get_setting_value # noqa: E402 [flake8 lint suppression]
|
||||
from models.notification_instance import NotificationInstance # noqa: E402 [flake8 lint suppression]
|
||||
@@ -63,7 +63,7 @@ def main():
|
||||
# Log result
|
||||
plugin_objects.add_object(
|
||||
primaryId = pluginName,
|
||||
secondaryId = timeNowDB(),
|
||||
secondaryId = timeNowUTC(),
|
||||
watched1 = notification["GUID"],
|
||||
watched2 = handleEmpty(response_text),
|
||||
watched3 = response_status_code,
|
||||
|
||||
@@ -15,7 +15,7 @@ sys.path.extend([f"{INSTALL_PATH}/front/plugins", f"{INSTALL_PATH}/server"])
|
||||
from plugin_helper import Plugin_Objects, handleEmpty # noqa: E402 [flake8 lint suppression]
|
||||
from logger import mylog, Logger # noqa: E402 [flake8 lint suppression]
|
||||
from helper import get_setting_value, hide_string # noqa: E402 [flake8 lint suppression]
|
||||
from utils.datetime_utils import timeNowDB # noqa: E402 [flake8 lint suppression]
|
||||
from utils.datetime_utils import timeNowUTC # noqa: E402 [flake8 lint suppression]
|
||||
from models.notification_instance import NotificationInstance # noqa: E402 [flake8 lint suppression]
|
||||
from database import DB # noqa: E402 [flake8 lint suppression]
|
||||
|
||||
@@ -60,7 +60,7 @@ def main():
|
||||
# Log result
|
||||
plugin_objects.add_object(
|
||||
primaryId=pluginName,
|
||||
secondaryId=timeNowDB(),
|
||||
secondaryId=timeNowUTC(),
|
||||
watched1=notification["GUID"],
|
||||
watched2=handleEmpty(response_text),
|
||||
watched3=response_status_code,
|
||||
|
||||
@@ -13,7 +13,7 @@ from const import confFileName, logPath # noqa: E402 [flake8 lint suppression]
|
||||
from plugin_helper import Plugin_Objects, handleEmpty # noqa: E402 [flake8 lint suppression]
|
||||
from logger import mylog, Logger # noqa: E402 [flake8 lint suppression]
|
||||
from helper import get_setting_value, hide_string # noqa: E402 [flake8 lint suppression]
|
||||
from utils.datetime_utils import timeNowDB # noqa: E402 [flake8 lint suppression]
|
||||
from utils.datetime_utils import timeNowUTC # noqa: E402 [flake8 lint suppression]
|
||||
from models.notification_instance import NotificationInstance # noqa: E402 [flake8 lint suppression]
|
||||
from database import DB # noqa: E402 [flake8 lint suppression]
|
||||
from pytz import timezone # noqa: E402 [flake8 lint suppression]
|
||||
@@ -61,7 +61,7 @@ def main():
|
||||
# Log result
|
||||
plugin_objects.add_object(
|
||||
primaryId = pluginName,
|
||||
secondaryId = timeNowDB(),
|
||||
secondaryId = timeNowUTC(),
|
||||
watched1 = notification["GUID"],
|
||||
watched2 = handleEmpty(response_text),
|
||||
watched3 = response_status_code,
|
||||
|
||||
@@ -11,7 +11,7 @@ sys.path.extend([f"{INSTALL_PATH}/front/plugins", f"{INSTALL_PATH}/server"])
|
||||
import conf # noqa: E402 [flake8 lint suppression]
|
||||
from const import confFileName, logPath # noqa: E402 [flake8 lint suppression]
|
||||
from plugin_helper import Plugin_Objects # noqa: E402 [flake8 lint suppression]
|
||||
from utils.datetime_utils import timeNowDB # noqa: E402 [flake8 lint suppression]
|
||||
from utils.datetime_utils import timeNowUTC # noqa: E402 [flake8 lint suppression]
|
||||
from logger import mylog, Logger # noqa: E402 [flake8 lint suppression]
|
||||
from helper import get_setting_value # noqa: E402 [flake8 lint suppression]
|
||||
from models.notification_instance import NotificationInstance # noqa: E402 [flake8 lint suppression]
|
||||
@@ -60,7 +60,7 @@ def main():
|
||||
# Log result
|
||||
plugin_objects.add_object(
|
||||
primaryId=pluginName,
|
||||
secondaryId=timeNowDB(),
|
||||
secondaryId=timeNowUTC(),
|
||||
watched1=notification["GUID"],
|
||||
watched2=result,
|
||||
watched3='null',
|
||||
|
||||
@@ -15,7 +15,7 @@ sys.path.extend([f"{INSTALL_PATH}/front/plugins", f"{INSTALL_PATH}/server"])
|
||||
import conf # noqa: E402 [flake8 lint suppression]
|
||||
from const import logPath, confFileName # noqa: E402 [flake8 lint suppression]
|
||||
from plugin_helper import Plugin_Objects, handleEmpty # noqa: E402 [flake8 lint suppression]
|
||||
from utils.datetime_utils import timeNowDB # noqa: E402 [flake8 lint suppression]
|
||||
from utils.datetime_utils import timeNowUTC # noqa: E402 [flake8 lint suppression]
|
||||
from logger import mylog, Logger # noqa: E402 [flake8 lint suppression]
|
||||
from helper import get_setting_value, write_file # noqa: E402 [flake8 lint suppression]
|
||||
from models.notification_instance import NotificationInstance # noqa: E402 [flake8 lint suppression]
|
||||
@@ -69,7 +69,7 @@ def main():
|
||||
# Log result
|
||||
plugin_objects.add_object(
|
||||
primaryId = pluginName,
|
||||
secondaryId = timeNowDB(),
|
||||
secondaryId = timeNowUTC(),
|
||||
watched1 = notification["GUID"],
|
||||
watched2 = handleEmpty(response_stdout),
|
||||
watched3 = handleEmpty(response_stderr),
|
||||
|
||||
@@ -4,7 +4,6 @@ import os
|
||||
import argparse
|
||||
import sys
|
||||
import csv
|
||||
from datetime import datetime
|
||||
|
||||
# Register NetAlertX directories
|
||||
INSTALL_PATH = os.getenv('NETALERTX_APP', '/app')
|
||||
@@ -13,6 +12,7 @@ sys.path.extend([f"{INSTALL_PATH}/front/plugins", f"{INSTALL_PATH}/server"])
|
||||
from logger import mylog, Logger # noqa: E402 [flake8 lint suppression]
|
||||
from helper import get_setting_value # noqa: E402 [flake8 lint suppression]
|
||||
from const import logPath # noqa: E402 [flake8 lint suppression]
|
||||
from utils.datetime_utils import timeNowUTC # noqa: E402 [flake8 lint suppression]
|
||||
import conf # noqa: E402 [flake8 lint suppression]
|
||||
from pytz import timezone # noqa: E402 [flake8 lint suppression]
|
||||
from database import get_temp_db_connection # noqa: E402 [flake8 lint suppression]
|
||||
@@ -60,7 +60,7 @@ def main():
|
||||
if overwrite:
|
||||
filename = 'devices.csv'
|
||||
else:
|
||||
timestamp = datetime.now().strftime('%Y%m%d%H%M%S')
|
||||
timestamp = timeNowUTC(as_string=False).strftime('%Y%m%d%H%M%S')
|
||||
filename = f'devices_{timestamp}.csv'
|
||||
|
||||
fullPath = os.path.join(values.location.split('=')[1], filename)
|
||||
|
||||
@@ -69,11 +69,9 @@ def cleanup_database(
|
||||
|
||||
mylog("verbose", [f"[{pluginName}] Upkeep Database: {dbPath}"])
|
||||
|
||||
# Connect to the App database
|
||||
conn = get_temp_db_connection()
|
||||
cursor = conn.cursor()
|
||||
|
||||
# Reindwex to prevent fails due to corruption
|
||||
try:
|
||||
cursor.execute("REINDEX;")
|
||||
mylog("verbose", [f"[{pluginName}] REINDEX completed"])
|
||||
@@ -82,25 +80,25 @@ def cleanup_database(
|
||||
|
||||
# -----------------------------------------------------
|
||||
# Cleanup Online History
|
||||
mylog("verbose", [f"[{pluginName}] Online_History: Delete all but keep latest 150 entries"],)
|
||||
mylog("verbose", [f"[{pluginName}] Online_History: Delete all but keep latest 150 entries"])
|
||||
cursor.execute(
|
||||
"""DELETE from Online_History where "Index" not in (
|
||||
SELECT "Index" from Online_History
|
||||
order by Scan_Date desc limit 150)"""
|
||||
)
|
||||
mylog("verbose", [f"[{pluginName}] Online_History deleted rows: {cursor.rowcount}"])
|
||||
|
||||
# -----------------------------------------------------
|
||||
# Cleanup Events
|
||||
mylog("verbose", f"[{pluginName}] Events: Delete all older than {str(DAYS_TO_KEEP_EVENTS)} days (DAYS_TO_KEEP_EVENTS setting)")
|
||||
sql = f"""DELETE FROM Events WHERE eve_DateTime <= date('now', '-{str(DAYS_TO_KEEP_EVENTS)} day')"""
|
||||
|
||||
mylog("verbose", [f"[{pluginName}] SQL : {sql}"])
|
||||
cursor.execute(sql)
|
||||
# -----------------------------------------------------
|
||||
# Trim Plugins_History entries to less than PLUGINS_KEEP_HIST setting per unique "Plugin" column entry
|
||||
mylog("verbose", f"[{pluginName}] Plugins_History: Trim Plugins_History entries to less than {str(PLUGINS_KEEP_HIST)} per Plugin (PLUGINS_KEEP_HIST setting)")
|
||||
mylog("verbose", [f"[{pluginName}] Events deleted rows: {cursor.rowcount}"])
|
||||
|
||||
# Build the SQL query to delete entries that exceed the limit per unique "Plugin" column entry
|
||||
# -----------------------------------------------------
|
||||
# Plugins_History
|
||||
mylog("verbose", f"[{pluginName}] Plugins_History: Trim to {str(PLUGINS_KEEP_HIST)} per Plugin")
|
||||
delete_query = f"""DELETE FROM Plugins_History
|
||||
WHERE "Index" NOT IN (
|
||||
SELECT "Index"
|
||||
@@ -111,17 +109,13 @@ def cleanup_database(
|
||||
) AS ranked_objects
|
||||
WHERE row_num <= {str(PLUGINS_KEEP_HIST)}
|
||||
);"""
|
||||
|
||||
cursor.execute(delete_query)
|
||||
mylog("verbose", [f"[{pluginName}] Plugins_History deleted rows: {cursor.rowcount}"])
|
||||
|
||||
# -----------------------------------------------------
|
||||
# Trim Notifications entries to less than DBCLNP_NOTIFI_HIST setting
|
||||
|
||||
# Notifications
|
||||
histCount = get_setting_value("DBCLNP_NOTIFI_HIST")
|
||||
|
||||
mylog("verbose", f"[{pluginName}] Plugins_History: Trim Notifications entries to less than {histCount}")
|
||||
|
||||
# Build the SQL query to delete entries
|
||||
mylog("verbose", f"[{pluginName}] Notifications: Trim to {histCount}")
|
||||
delete_query = f"""DELETE FROM Notifications
|
||||
WHERE "Index" NOT IN (
|
||||
SELECT "Index"
|
||||
@@ -132,16 +126,13 @@ def cleanup_database(
|
||||
) AS ranked_objects
|
||||
WHERE row_num <= {histCount}
|
||||
);"""
|
||||
|
||||
cursor.execute(delete_query)
|
||||
mylog("verbose", [f"[{pluginName}] Notifications deleted rows: {cursor.rowcount}"])
|
||||
|
||||
# -----------------------------------------------------
|
||||
# Trim Workflow entries to less than WORKFLOWS_AppEvents_hist setting
|
||||
# AppEvents
|
||||
histCount = get_setting_value("WORKFLOWS_AppEvents_hist")
|
||||
|
||||
mylog("verbose", [f"[{pluginName}] Trim AppEvents to less than {histCount}"])
|
||||
|
||||
# Build the SQL query to delete entries
|
||||
delete_query = f"""DELETE FROM AppEvents
|
||||
WHERE "Index" NOT IN (
|
||||
SELECT "Index"
|
||||
@@ -152,38 +143,40 @@ def cleanup_database(
|
||||
) AS ranked_objects
|
||||
WHERE row_num <= {histCount}
|
||||
);"""
|
||||
|
||||
cursor.execute(delete_query)
|
||||
mylog("verbose", [f"[{pluginName}] AppEvents deleted rows: {cursor.rowcount}"])
|
||||
|
||||
conn.commit()
|
||||
|
||||
# -----------------------------------------------------
|
||||
# Cleanup New Devices
|
||||
if HRS_TO_KEEP_NEWDEV != 0:
|
||||
mylog("verbose", f"[{pluginName}] Devices: Delete all New Devices older than {str(HRS_TO_KEEP_NEWDEV)} hours (HRS_TO_KEEP_NEWDEV setting)")
|
||||
mylog("verbose", f"[{pluginName}] Devices: Delete New Devices older than {str(HRS_TO_KEEP_NEWDEV)} hours")
|
||||
query = f"""DELETE FROM Devices WHERE devIsNew = 1 AND devFirstConnection < date('now', '-{str(HRS_TO_KEEP_NEWDEV)} hour')"""
|
||||
mylog("verbose", [f"[{pluginName}] Query: {query} "])
|
||||
mylog("verbose", [f"[{pluginName}] Query: {query}"])
|
||||
cursor.execute(query)
|
||||
mylog("verbose", [f"[{pluginName}] Devices (new) deleted rows: {cursor.rowcount}"])
|
||||
|
||||
# -----------------------------------------------------
|
||||
# Cleanup Offline Devices
|
||||
if HRS_TO_KEEP_OFFDEV != 0:
|
||||
mylog("verbose", f"[{pluginName}] Devices: Delete all New Devices older than {str(HRS_TO_KEEP_OFFDEV)} hours (HRS_TO_KEEP_OFFDEV setting)")
|
||||
mylog("verbose", f"[{pluginName}] Devices: Delete Offline Devices older than {str(HRS_TO_KEEP_OFFDEV)} hours")
|
||||
query = f"""DELETE FROM Devices WHERE devPresentLastScan = 0 AND devLastConnection < date('now', '-{str(HRS_TO_KEEP_OFFDEV)} hour')"""
|
||||
mylog("verbose", [f"[{pluginName}] Query: {query} "])
|
||||
mylog("verbose", [f"[{pluginName}] Query: {query}"])
|
||||
cursor.execute(query)
|
||||
mylog("verbose", [f"[{pluginName}] Devices (offline) deleted rows: {cursor.rowcount}"])
|
||||
|
||||
# -----------------------------------------------------
|
||||
# Clear New Flag
|
||||
if CLEAR_NEW_FLAG != 0:
|
||||
mylog("verbose", f'[{pluginName}] Devices: Clear "New Device" flag for all devices older than {str(CLEAR_NEW_FLAG)} hours (CLEAR_NEW_FLAG setting)')
|
||||
mylog("verbose", f'[{pluginName}] Devices: Clear "New Device" flag older than {str(CLEAR_NEW_FLAG)} hours')
|
||||
query = f"""UPDATE Devices SET devIsNew = 0 WHERE devIsNew = 1 AND date(devFirstConnection, '+{str(CLEAR_NEW_FLAG)} hour') < date('now')"""
|
||||
# select * from Devices where devIsNew = 1 AND date(devFirstConnection, '+3 hour' ) < date('now')
|
||||
mylog("verbose", [f"[{pluginName}] Query: {query} "])
|
||||
mylog("verbose", [f"[{pluginName}] Query: {query}"])
|
||||
cursor.execute(query)
|
||||
mylog("verbose", [f"[{pluginName}] Devices updated rows (clear new): {cursor.rowcount}"])
|
||||
|
||||
# -----------------------------------------------------
|
||||
# De-dupe (de-duplicate) from the Plugins_Objects table
|
||||
# TODO This shouldn't be necessary - probably a concurrency bug somewhere in the code :(
|
||||
# De-dupe Plugins_Objects
|
||||
mylog("verbose", [f"[{pluginName}] Plugins_Objects: Delete all duplicates"])
|
||||
cursor.execute(
|
||||
"""
|
||||
@@ -197,25 +190,20 @@ def cleanup_database(
|
||||
)
|
||||
"""
|
||||
)
|
||||
mylog("verbose", [f"[{pluginName}] Plugins_Objects deleted rows: {cursor.rowcount}"])
|
||||
|
||||
conn.commit()
|
||||
|
||||
# Check WAL file size
|
||||
# WAL + Vacuum
|
||||
cursor.execute("PRAGMA wal_checkpoint(TRUNCATE);")
|
||||
cursor.execute("PRAGMA wal_checkpoint(FULL);")
|
||||
|
||||
mylog("verbose", [f"[{pluginName}] WAL checkpoint executed to truncate file."])
|
||||
|
||||
# Shrink DB
|
||||
mylog("verbose", [f"[{pluginName}] Shrink Database"])
|
||||
cursor.execute("VACUUM;")
|
||||
|
||||
# Close the database connection
|
||||
conn.close()
|
||||
|
||||
|
||||
# ===============================================================================
|
||||
# BEGIN
|
||||
# ===============================================================================
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
||||
@@ -22,7 +22,7 @@ from logger import mylog, Logger # noqa: E402 [flake8 lint suppression]
|
||||
from const import logPath # noqa: E402 [flake8 lint suppression]
|
||||
from helper import get_setting_value # noqa: E402 [flake8 lint suppression]
|
||||
import conf # noqa: E402 [flake8 lint suppression]
|
||||
from utils.datetime_utils import timeNowDB, DATETIME_PATTERN # noqa: E402 [flake8 lint suppression]
|
||||
from utils.datetime_utils import timeNowUTC, DATETIME_PATTERN # noqa: E402 [flake8 lint suppression]
|
||||
|
||||
# Make sure the TIMEZONE for logging is correct
|
||||
conf.tz = timezone(get_setting_value("TIMEZONE"))
|
||||
@@ -151,7 +151,7 @@ def main():
|
||||
watched1=freebox["name"],
|
||||
watched2=freebox["operator"],
|
||||
watched3="Gateway",
|
||||
watched4=timeNowDB(),
|
||||
watched4=timeNowUTC(),
|
||||
extra="",
|
||||
foreignKey=freebox["mac"],
|
||||
)
|
||||
|
||||
@@ -99,7 +99,7 @@
|
||||
"description": [
|
||||
{
|
||||
"language_code": "en_us",
|
||||
"string": "Selects the ICMP engine to use. <code>ping</code> checks devices individually and works even when the ARP / neighbor cache is empty, but is slower on larger networks. <code>fping</code> scans IP ranges in parallel and is significantly faster, but relies on the system neighbor cache to resolve IP addresses to MAC addresses. For most networks, <code>fping</code> is recommended. The default command arguments <code>ICMP_ARGS</code> are compatible with both modes."
|
||||
"string": "Selects the ICMP engine to use. <code>ping</code> checks devices individually, works even with an empty ARP/neighbor cache, but is slower on large networks. <code>fping</code> scans IP ranges in parallel and is much faster, but depends on the system neighbor cache, which can delay MAC resolution. For most networks, <code>fping</code> is recommended, unless precise and timely offline/online detection is needed. Default <code>ICMP_ARGS</code> work with both engines."
|
||||
}
|
||||
]
|
||||
},
|
||||
|
||||
@@ -12,7 +12,7 @@ INSTALL_PATH = os.getenv('NETALERTX_APP', '/app')
|
||||
sys.path.extend([f"{INSTALL_PATH}/front/plugins", f"{INSTALL_PATH}/server"])
|
||||
|
||||
from plugin_helper import Plugin_Objects # noqa: E402 [flake8 lint suppression]
|
||||
from utils.datetime_utils import timeNowDB # noqa: E402 [flake8 lint suppression]
|
||||
from utils.datetime_utils import timeNowUTC # noqa: E402 [flake8 lint suppression]
|
||||
from logger import mylog, Logger, append_line_to_file # noqa: E402 [flake8 lint suppression]
|
||||
from helper import check_IP_format, get_setting_value # noqa: E402 [flake8 lint suppression]
|
||||
from const import logPath # noqa: E402 [flake8 lint suppression]
|
||||
@@ -74,7 +74,7 @@ def main():
|
||||
mylog('verbose', [f'[{pluginName}] Curl Fallback (new_internet_IP|cmd_output): {new_internet_IP} | {cmd_output}'])
|
||||
|
||||
# logging
|
||||
append_line_to_file(logPath + '/IP_changes.log', '[' + str(timeNowDB()) + ']\t' + new_internet_IP + '\n')
|
||||
append_line_to_file(logPath + '/IP_changes.log', '[' + str(timeNowUTC()) + ']\t' + new_internet_IP + '\n')
|
||||
|
||||
plugin_objects = Plugin_Objects(RESULT_FILE)
|
||||
|
||||
|
||||
@@ -11,7 +11,7 @@ INSTALL_PATH = os.getenv('NETALERTX_APP', '/app')
|
||||
sys.path.extend([f"{INSTALL_PATH}/front/plugins", f"{INSTALL_PATH}/server"])
|
||||
|
||||
from plugin_helper import Plugin_Objects # noqa: E402 [flake8 lint suppression]
|
||||
from utils.datetime_utils import timeNowDB # noqa: E402 [flake8 lint suppression]
|
||||
from utils.datetime_utils import timeNowUTC # noqa: E402 [flake8 lint suppression]
|
||||
from logger import mylog, Logger # noqa: E402 [flake8 lint suppression]
|
||||
from helper import get_setting_value # noqa: E402 [flake8 lint suppression]
|
||||
import conf # noqa: E402 [flake8 lint suppression]
|
||||
@@ -37,7 +37,7 @@ def main():
|
||||
speedtest_result = run_speedtest()
|
||||
plugin_objects.add_object(
|
||||
primaryId = 'Speedtest',
|
||||
secondaryId = timeNowDB(),
|
||||
secondaryId = timeNowUTC(),
|
||||
watched1 = speedtest_result['download_speed'],
|
||||
watched2 = speedtest_result['upload_speed'],
|
||||
watched3 = speedtest_result['full_json'],
|
||||
|
||||
@@ -3,7 +3,6 @@
|
||||
import os
|
||||
import sys
|
||||
import subprocess
|
||||
from datetime import datetime
|
||||
from pytz import timezone
|
||||
from functools import reduce
|
||||
|
||||
@@ -13,6 +12,7 @@ sys.path.extend([f"{INSTALL_PATH}/front/plugins", f"{INSTALL_PATH}/server"])
|
||||
|
||||
from plugin_helper import Plugin_Objects # noqa: E402 [flake8 lint suppression]
|
||||
from logger import mylog, Logger # noqa: E402 [flake8 lint suppression]
|
||||
from utils.datetime_utils import timeNowUTC # noqa: E402 [flake8 lint suppression]
|
||||
from const import logPath # noqa: E402 [flake8 lint suppression]
|
||||
from helper import get_setting_value # noqa: E402 [flake8 lint suppression]
|
||||
import conf # noqa: E402 [flake8 lint suppression]
|
||||
@@ -95,7 +95,7 @@ def parse_neighbors(raw_neighbors: list[str]):
|
||||
neighbor = {}
|
||||
neighbor['ip'] = fields[0]
|
||||
neighbor['mac'] = fields[2]
|
||||
neighbor['last_seen'] = datetime.now()
|
||||
neighbor['last_seen'] = timeNowUTC()
|
||||
|
||||
# Unknown data
|
||||
neighbor['hostname'] = '(unknown)'
|
||||
|
||||
@@ -529,7 +529,7 @@
|
||||
},
|
||||
{
|
||||
"column": "Watched_Value2",
|
||||
"mapped_to_column": "cur_NAME",
|
||||
"mapped_to_column": "scanName",
|
||||
"css_classes": "col-sm-2",
|
||||
"show": true,
|
||||
"type": "label",
|
||||
|
||||
@@ -1516,18 +1516,30 @@
|
||||
},
|
||||
{
|
||||
"function": "devSSID",
|
||||
"events": [
|
||||
"add_option"
|
||||
],
|
||||
"type": {
|
||||
"dataType": "string",
|
||||
"elements": [
|
||||
{
|
||||
"elementType": "input",
|
||||
"elementType": "select",
|
||||
"elementOptions": [],
|
||||
"transformers": []
|
||||
}
|
||||
]
|
||||
},
|
||||
"default_value": "",
|
||||
"options": [],
|
||||
"options": [
|
||||
"{value}"
|
||||
],
|
||||
"options_params": [
|
||||
{
|
||||
"name": "value",
|
||||
"type": "sql",
|
||||
"value": "SELECT DISTINCT '' as id, '❌None' as name UNION SELECT devSSID as id, devSSID as name FROM (SELECT devSSID FROM Devices) AS all_devices ORDER BY id;"
|
||||
}
|
||||
],
|
||||
"localized": [
|
||||
"name",
|
||||
"description"
|
||||
@@ -1590,11 +1602,14 @@
|
||||
},
|
||||
{
|
||||
"function": "devVlan",
|
||||
"events": [
|
||||
"add_option"
|
||||
],
|
||||
"type": {
|
||||
"dataType": "string",
|
||||
"elements": [
|
||||
{
|
||||
"elementType": "input",
|
||||
"elementType": "select",
|
||||
"elementOptions": [],
|
||||
"transformers": []
|
||||
}
|
||||
@@ -1602,7 +1617,16 @@
|
||||
},
|
||||
"maxLength": 50,
|
||||
"default_value": "",
|
||||
"options": [],
|
||||
"options": [
|
||||
"{value}"
|
||||
],
|
||||
"options_params": [
|
||||
{
|
||||
"name": "value",
|
||||
"type": "sql",
|
||||
"value": "SELECT DISTINCT '' as id, '❌None' as name UNION SELECT devVlan as id, devVlan as name FROM (SELECT devVlan FROM Devices) AS all_devices ORDER BY id;"
|
||||
}
|
||||
],
|
||||
"localized": [
|
||||
"name",
|
||||
"description"
|
||||
|
||||
@@ -11,7 +11,7 @@ sys.path.extend([f"{INSTALL_PATH}/front/plugins", f"{INSTALL_PATH}/server"])
|
||||
|
||||
from plugin_helper import Plugin_Objects # noqa: E402 [flake8 lint suppression]
|
||||
from logger import mylog, Logger, append_line_to_file # noqa: E402 [flake8 lint suppression]
|
||||
from utils.datetime_utils import timeNowDB # noqa: E402 [flake8 lint suppression]
|
||||
from utils.datetime_utils import timeNowUTC # noqa: E402 [flake8 lint suppression]
|
||||
from helper import get_setting_value # noqa: E402 [flake8 lint suppression]
|
||||
from const import logPath # noqa: E402 [flake8 lint suppression]
|
||||
import conf # noqa: E402 [flake8 lint suppression]
|
||||
@@ -213,7 +213,7 @@ def performNmapScan(deviceIPs, deviceMACs, timeoutSec, args):
|
||||
elif 'PORT' in line and 'STATE' in line and 'SERVICE' in line:
|
||||
startCollecting = False # end reached
|
||||
elif startCollecting and len(line.split()) == 3:
|
||||
newEntriesTmp.append(nmap_entry(ip, deviceMACs[devIndex], timeNowDB(), line.split()[0], line.split()[1], line.split()[2]))
|
||||
newEntriesTmp.append(nmap_entry(ip, deviceMACs[devIndex], timeNowUTC(), line.split()[0], line.split()[1], line.split()[2]))
|
||||
newPortsPerDevice += 1
|
||||
elif 'Nmap done' in line:
|
||||
duration = line.split('scanned in ')[1]
|
||||
|
||||
@@ -6,7 +6,6 @@ Imports devices from Pi-hole v6 API (Network endpoints) into NetAlertX plugin re
|
||||
|
||||
import os
|
||||
import sys
|
||||
import datetime
|
||||
import requests
|
||||
import json
|
||||
from requests.packages.urllib3.exceptions import InsecureRequestWarning
|
||||
@@ -18,6 +17,7 @@ sys.path.extend([f"{INSTALL_PATH}/front/plugins", f"{INSTALL_PATH}/server"])
|
||||
pluginName = 'PIHOLEAPI'
|
||||
|
||||
from plugin_helper import Plugin_Objects, is_mac # noqa: E402 [flake8 lint suppression]
|
||||
from utils.datetime_utils import timeNowUTC # noqa: E402 [flake8 lint suppression]
|
||||
from logger import mylog, Logger # noqa: E402 [flake8 lint suppression]
|
||||
from helper import get_setting_value # noqa: E402 [flake8 lint suppression]
|
||||
from const import logPath # noqa: E402 [flake8 lint suppression]
|
||||
@@ -201,7 +201,7 @@ def gather_device_entries():
|
||||
"""
|
||||
entries = []
|
||||
devices = get_pihole_network_devices()
|
||||
now_ts = int(datetime.datetime.now().timestamp())
|
||||
now_ts = int(timeNowUTC(as_string=False).timestamp())
|
||||
|
||||
for device in devices:
|
||||
hwaddr = device.get('hwaddr')
|
||||
|
||||
@@ -12,7 +12,7 @@ sys.path.append(f"{INSTALL_PATH}/front/plugins")
|
||||
sys.path.append(f'{INSTALL_PATH}/server')
|
||||
|
||||
from logger import mylog # noqa: E402 [flake8 lint suppression]
|
||||
from utils.datetime_utils import timeNowDB # noqa: E402 [flake8 lint suppression]
|
||||
from utils.datetime_utils import timeNowUTC # noqa: E402 [flake8 lint suppression]
|
||||
from const import default_tz, fullConfPath # noqa: E402 [flake8 lint suppression]
|
||||
|
||||
|
||||
@@ -237,7 +237,7 @@ class Plugin_Object:
|
||||
self.pluginPref = ""
|
||||
self.primaryId = primaryId
|
||||
self.secondaryId = secondaryId
|
||||
self.created = timeNowDB()
|
||||
self.created = timeNowUTC()
|
||||
self.changed = ""
|
||||
self.watched1 = watched1
|
||||
self.watched2 = watched2
|
||||
|
||||
@@ -1,103 +0,0 @@
|
||||
<?php
|
||||
|
||||
// External files
|
||||
require '/app/front/php/server/init.php';
|
||||
|
||||
$method = $_SERVER['REQUEST_METHOD'];
|
||||
|
||||
// ----------------------------------------------
|
||||
// Method to check authorization
|
||||
function checkAuthorization($method) {
|
||||
// Retrieve the authorization header
|
||||
$headers = apache_request_headers();
|
||||
$auth_header = $headers['Authorization'] ?? '';
|
||||
$expected_token = 'Bearer ' . getSettingValue('API_TOKEN');
|
||||
|
||||
// Verify the authorization token
|
||||
if ($auth_header !== $expected_token) {
|
||||
http_response_code(403);
|
||||
echo 'Forbidden';
|
||||
displayInAppNoti("[Plugin: SYNC] Incoming data: Incorrect API Token (".$method.")", "error");
|
||||
exit;
|
||||
}
|
||||
}
|
||||
|
||||
// ----------------------------------------------
|
||||
// Function to return JSON response
|
||||
function jsonResponse($status, $data = '', $message = '') {
|
||||
http_response_code($status);
|
||||
header('Content-Type: application/json');
|
||||
echo json_encode([
|
||||
'node_name' => getSettingValue('SYNC_node_name'),
|
||||
'status' => $status,
|
||||
'message' => $message,
|
||||
'data_base64' => $data,
|
||||
'timestamp' => date('Y-m-d H:i:s')
|
||||
]);
|
||||
}
|
||||
|
||||
// ----------------------------------------------
|
||||
// MAIN
|
||||
// ----------------------------------------------
|
||||
|
||||
|
||||
// requesting data (this is a NODE)
|
||||
if ($method === 'GET') {
|
||||
checkAuthorization($method);
|
||||
|
||||
$apiRoot = getenv('NETALERTX_API') ?: '/tmp/api';
|
||||
$file_path = rtrim($apiRoot, '/') . '/table_devices.json';
|
||||
|
||||
$data = file_get_contents($file_path);
|
||||
|
||||
// Prepare the data to return as a JSON response
|
||||
$response_data = base64_encode($data);
|
||||
|
||||
// Return JSON response
|
||||
jsonResponse(200, $response_data, 'OK');
|
||||
|
||||
displayInAppNoti("[Plugin: SYNC] Data sent", "info");
|
||||
|
||||
}
|
||||
// receiving data (this is a HUB)
|
||||
else if ($method === 'POST') {
|
||||
checkAuthorization($method);
|
||||
|
||||
// Retrieve and decode the data from the POST request
|
||||
$data = $_POST['data'] ?? '';
|
||||
$file_path = $_POST['file_path'] ?? '';
|
||||
$node_name = $_POST['node_name'] ?? '';
|
||||
$plugin = $_POST['plugin'] ?? '';
|
||||
|
||||
$logRoot = getenv('NETALERTX_PLUGINS_LOG') ?: (rtrim(getenv('NETALERTX_LOG') ?: '/tmp/log', '/') . '/plugins');
|
||||
$storage_path = rtrim($logRoot, '/');
|
||||
|
||||
// // check location
|
||||
// if (!is_dir($storage_path)) {
|
||||
// echo "Could not open folder: {$storage_path}";
|
||||
// write_notification("[Plugin: SYNC] Could not open folder: {$storage_path}", "alert");
|
||||
// http_response_code(500);
|
||||
// exit;
|
||||
// }
|
||||
|
||||
// Generate a unique file path to avoid overwriting existing files
|
||||
$encoded_files = glob("{$storage_path}/last_result.{$plugin}.encoded.{$node_name}.*.log");
|
||||
$decoded_files = glob("{$storage_path}/last_result.{$plugin}.decoded.{$node_name}.*.log");
|
||||
|
||||
$files = array_merge($encoded_files, $decoded_files);
|
||||
$file_count = count($files) + 1;
|
||||
|
||||
$file_path_new = "{$storage_path}/last_result.{$plugin}.encoded.{$node_name}.{$file_count}.log";
|
||||
|
||||
// Save the decoded data to the file
|
||||
file_put_contents($file_path_new, $data);
|
||||
http_response_code(200);
|
||||
echo 'Data received and stored successfully';
|
||||
displayInAppNoti("[Plugin: SYNC] Data received ({$file_path_new})", "info");
|
||||
|
||||
} else {
|
||||
http_response_code(405);
|
||||
echo 'Method Not Allowed';
|
||||
displayInAppNoti("[Plugin: SYNC] Method Not Allowed", "error");
|
||||
}
|
||||
?>
|
||||
@@ -16,7 +16,7 @@ from utils.plugin_utils import get_plugins_configs, decode_and_rename_files # n
|
||||
from logger import mylog, Logger # noqa: E402 [flake8 lint suppression]
|
||||
from const import logPath # noqa: E402 [flake8 lint suppression]
|
||||
from helper import get_setting_value # noqa: E402 [flake8 lint suppression]
|
||||
from utils.datetime_utils import timeNowDB # noqa: E402 [flake8 lint suppression]
|
||||
from utils.datetime_utils import timeNowUTC # noqa: E402 [flake8 lint suppression]
|
||||
from utils.crypto_utils import encrypt_data # noqa: E402 [flake8 lint suppression]
|
||||
from messaging.in_app import write_notification # noqa: E402 [flake8 lint suppression]
|
||||
import conf # noqa: E402 [flake8 lint suppression]
|
||||
@@ -147,7 +147,7 @@ def main():
|
||||
message = f'[{pluginName}] Device data from node "{node_name}" written to {log_file_name}'
|
||||
mylog('verbose', [message])
|
||||
if lggr.isAbove('verbose'):
|
||||
write_notification(message, 'info', timeNowDB())
|
||||
write_notification(message, 'info', timeNowUTC())
|
||||
|
||||
# Process any received data for the Device DB table (ONLY JSON)
|
||||
# Create the file path
|
||||
@@ -253,7 +253,7 @@ def main():
|
||||
message = f'[{pluginName}] Inserted "{len(new_devices)}" new devices'
|
||||
|
||||
mylog('verbose', [message])
|
||||
write_notification(message, 'info', timeNowDB())
|
||||
write_notification(message, 'info', timeNowUTC())
|
||||
|
||||
# Commit and close the connection
|
||||
conn.commit()
|
||||
@@ -269,7 +269,6 @@ def main():
|
||||
# Data retrieval methods
|
||||
api_endpoints = [
|
||||
"/sync", # New Python-based endpoint
|
||||
"/plugins/sync/hub.php" # Legacy PHP endpoint
|
||||
]
|
||||
|
||||
|
||||
@@ -298,7 +297,7 @@ def send_data(api_token, file_content, encryption_key, file_path, node_name, pre
|
||||
if response.status_code == 200:
|
||||
message = f'[{pluginName}] Data for "{file_path}" sent successfully via {final_endpoint}'
|
||||
mylog('verbose', [message])
|
||||
write_notification(message, 'info', timeNowDB())
|
||||
write_notification(message, 'info', timeNowUTC())
|
||||
return True
|
||||
|
||||
except requests.RequestException as e:
|
||||
@@ -307,7 +306,7 @@ def send_data(api_token, file_content, encryption_key, file_path, node_name, pre
|
||||
# If all endpoints fail
|
||||
message = f'[{pluginName}] Failed to send data for "{file_path}" via all endpoints'
|
||||
mylog('verbose', [message])
|
||||
write_notification(message, 'alert', timeNowDB())
|
||||
write_notification(message, 'alert', timeNowUTC())
|
||||
return False
|
||||
|
||||
|
||||
@@ -331,7 +330,7 @@ def get_data(api_token, node_url):
|
||||
except json.JSONDecodeError:
|
||||
message = f'[{pluginName}] Failed to parse JSON from {final_endpoint}'
|
||||
mylog('verbose', [message])
|
||||
write_notification(message, 'alert', timeNowDB())
|
||||
write_notification(message, 'alert', timeNowUTC())
|
||||
return ""
|
||||
except requests.RequestException as e:
|
||||
mylog('verbose', [f'[{pluginName}] Error calling {final_endpoint}: {e}'])
|
||||
@@ -339,7 +338,7 @@ def get_data(api_token, node_url):
|
||||
# If all endpoints fail
|
||||
message = f'[{pluginName}] Failed to get data from "{node_url}" via all endpoints'
|
||||
mylog('verbose', [message])
|
||||
write_notification(message, 'alert', timeNowDB())
|
||||
write_notification(message, 'alert', timeNowUTC())
|
||||
return ""
|
||||
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
## Overview
|
||||
|
||||
A plugin allowing for importing devices from a UniFi controller. The plugin also tries to import the network map.
|
||||
A plugin allowing for importing devices from a UniFi controller. The plugin also tries to import the network map.
|
||||
|
||||
### Usage
|
||||
|
||||
@@ -9,19 +9,19 @@ Specify the following settings in the Settings section of NetAlertX:
|
||||
- `UNFIMP_username` - Username used to log in the UNIFI controller.
|
||||
- `UNFIMP_password` - Password used to log in the UNIFI controller.
|
||||
- `UNFIMP_host` - Host URL or IP address where the UNIFI controller is hosted (excluding `http://`)
|
||||
- `UNFIMP_sites` - Name of the sites (usually 'default', check the URL in your UniFi controller UI if unsure. The site id is in the following part of the URL: `https://192.168.1.1:8443/manage/site/this-is-the-site-id/settings/`).
|
||||
- `UNFIMP_sites` - Name of the sites (usually 'default', check the URL in your UniFi controller UI if unsure. The site id is in the following part of the URL: `https://192.168.1.1:8443/manage/site/this-is-the-site-id/settings/`).
|
||||
- `UNFIMP_protocol` - https:// or http://
|
||||
- `UNFIMP_port` - Usually `8443`, `8843`, or `443`
|
||||
- `UNFIMP_port` - Usually `8443`, `8843`, or `443` (UPSTREAM BUG: Setting `UNFIMP_version='UDMP-unifiOS'` will force `443` see [#1524](https://github.com/netalertx/NetAlertX/issues/1524) or switch to the `UNIFIAPI` plugin)
|
||||
- `UNFIMP_version` - see below table for details
|
||||
|
||||
|
||||
#### Config overview
|
||||
|
||||
| Controller | `UNFIMP_version` | `UNFIMP_port` |
|
||||
| ------------------------------------------------------ | ------------------------- | ---------------- |
|
||||
| Cloud Gateway Ultra / UCK cloudkey V2 plus (v4.0.18) | `UDMP-unifiOS` | `443` |
|
||||
| Docker hosted | `v5` | `8443` (usually) |
|
||||
| Controller | `UNFIMP_version` | `UNFIMP_port` |
|
||||
| ------------------------------------------------------ | ------------------------- | ------------------------------------ |
|
||||
| Cloud Gateway Ultra / UCK cloudkey V2 plus (v4.0.18) | `UDMP-unifiOS` | `443` (BUG: always forced) |
|
||||
| Docker hosted | `v5` | `8443` (usually) |
|
||||
|
||||
### Notes
|
||||
|
||||
- It is recommended to create a read-only user in your UniFi controller
|
||||
- It is recommended to create a read-only user in your UniFi controller
|
||||
@@ -10,8 +10,8 @@ sys.path.extend([f"{INSTALL_PATH}/front/plugins", f"{INSTALL_PATH}/server"])
|
||||
|
||||
from plugin_helper import Plugin_Objects, handleEmpty # noqa: E402 [flake8 lint suppression]
|
||||
from logger import mylog, Logger # noqa: E402 [flake8 lint suppression]
|
||||
from helper import get_setting_value # noqa: E402 [flake8 lint suppression]
|
||||
from const import logPath, applicationPath # noqa: E402 [flake8 lint suppression]
|
||||
from helper import get_setting_value # noqa: E402 [flake8 lint suppression]
|
||||
from const import logPath, applicationPath, NULL_EQUIVALENTS_SQL # noqa: E402 [flake8 lint suppression]
|
||||
from scan.device_handling import query_MAC_vendor # noqa: E402 [flake8 lint suppression]
|
||||
import conf # noqa: E402 [flake8 lint suppression]
|
||||
from pytz import timezone # noqa: E402 [flake8 lint suppression]
|
||||
@@ -83,17 +83,16 @@ def update_vendors(plugin_objects):
|
||||
mylog('verbose', [' Searching devices vendor'])
|
||||
|
||||
# Get devices without a vendor
|
||||
cursor.execute("""SELECT
|
||||
devMac,
|
||||
devLastIP,
|
||||
devName,
|
||||
devVendor
|
||||
FROM Devices
|
||||
WHERE devVendor = '(unknown)'
|
||||
OR devVendor = '(Unknown)'
|
||||
OR devVendor = ''
|
||||
OR devVendor IS NULL
|
||||
""")
|
||||
query = f"""
|
||||
SELECT
|
||||
devMac,
|
||||
devLastIP,
|
||||
devName,
|
||||
devVendor
|
||||
FROM Devices
|
||||
WHERE devVendor IN ({NULL_EQUIVALENTS_SQL}) OR devVendor IS NULL
|
||||
"""
|
||||
cursor.execute(query)
|
||||
devices = cursor.fetchall()
|
||||
conn.commit()
|
||||
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
require 'php/templates/header.php';
|
||||
require 'php/templates/modals.php';
|
||||
|
||||
|
||||
?>
|
||||
|
||||
<script>
|
||||
@@ -14,7 +14,7 @@
|
||||
|
||||
<!-- Content header--------------------------------------------------------- -->
|
||||
<!-- Main content ---------------------------------------------------------- -->
|
||||
<section class="content tab-content">
|
||||
<section class="content tab-content">
|
||||
|
||||
<div class="box box-gray col-xs-12" >
|
||||
<div class="box-header">
|
||||
@@ -45,7 +45,7 @@
|
||||
<select id="formatSelect" class="pointer">
|
||||
<option value="HTML">HTML</option>
|
||||
<option value="JSON">JSON</option>
|
||||
<option value="Text">Text</option>
|
||||
<option value="Text">Text</option>
|
||||
</select>
|
||||
</div>
|
||||
|
||||
@@ -80,7 +80,7 @@
|
||||
const prevButton = document.getElementById('prevButton');
|
||||
const nextButton = document.getElementById('nextButton');
|
||||
const formatSelect = document.getElementById('formatSelect');
|
||||
|
||||
|
||||
let currentIndex = -1; // Current report index
|
||||
|
||||
// Function to update the displayed data and timestamp based on the selected format and index
|
||||
@@ -115,7 +115,7 @@
|
||||
|
||||
// console.log(notification)
|
||||
|
||||
timestamp.textContent = notification.DateTimeCreated;
|
||||
timestamp.textContent = localizeTimestamp(notification.DateTimeCreated);
|
||||
notiGuid.textContent = notification.GUID;
|
||||
currentIndex = index;
|
||||
|
||||
@@ -161,17 +161,17 @@
|
||||
console.log(index)
|
||||
|
||||
if (index == -1) {
|
||||
showModalOk('WARNING', `${getString("report_guid_missing")} <br/> <br/> <code>${guid}</code>`)
|
||||
showModalOk('WARNING', `${getString("report_guid_missing")} <br/> <br/> <code>${guid}</code>`)
|
||||
}
|
||||
|
||||
// Load the notification with the specified GUID
|
||||
updateData(formatSelect.value, index);
|
||||
|
||||
|
||||
})
|
||||
.catch(error => {
|
||||
console.error('Error:', error);
|
||||
});
|
||||
} else {
|
||||
} else {
|
||||
|
||||
// Initial data load
|
||||
updateData('HTML', -1); // Default format to HTML and load the latest report
|
||||
|
||||
@@ -25,7 +25,7 @@ fi
|
||||
apt-get install -y \
|
||||
tini snmp ca-certificates curl libwww-perl arp-scan perl apt-utils cron sudo gettext-base \
|
||||
nginx-light php php-cgi php-fpm php-sqlite3 php-curl sqlite3 dnsutils net-tools \
|
||||
python3 python3-dev iproute2 nmap fping python3-pip zip usbutils traceroute nbtscan avahi-daemon avahi-utils openrc build-essential git
|
||||
python3 python3-dev python3-psutil iproute2 nmap fping python3-pip zip usbutils traceroute nbtscan avahi-daemon avahi-utils openrc build-essential git
|
||||
|
||||
# alternate dependencies
|
||||
sudo apt-get install nginx nginx-core mtr php-fpm php8.2-fpm php-cli php8.2 php8.2-sqlite3 -y
|
||||
|
||||
@@ -0,0 +1,36 @@
|
||||
#!/bin/bash
|
||||
# 36-override-loaded-plugins.sh - Applies environment variable overrides to app.conf
|
||||
|
||||
set -eu
|
||||
|
||||
# Ensure config exists
|
||||
if [ ! -f "${NETALERTX_CONFIG}/app.conf" ]; then
|
||||
echo "[ENV] No config file found at ${NETALERTX_CONFIG}/app.conf — skipping overrides"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Helper: set or append config key safely
|
||||
set_config_value() {
|
||||
_key="$1"
|
||||
_value="$2"
|
||||
|
||||
# Remove newlines just in case
|
||||
_value=$(printf '%s' "$_value" | tr -d '\n\r')
|
||||
|
||||
# Escape sed-sensitive chars
|
||||
_escaped=$(printf '%s\n' "$_value" | sed 's/[\/&]/\\&/g')
|
||||
|
||||
if grep -q "^${_key}=" "${NETALERTX_CONFIG}/app.conf"; then
|
||||
sed -i "s|^${_key}=.*|${_key}=${_escaped}|" "${NETALERTX_CONFIG}/app.conf"
|
||||
else
|
||||
echo "${_key}=${_value}" >> "${NETALERTX_CONFIG}/app.conf"
|
||||
fi
|
||||
}
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# LOADED_PLUGINS override
|
||||
# ------------------------------------------------------------
|
||||
if [ -n "${LOADED_PLUGINS:-}" ]; then
|
||||
echo "[ENV] Applying LOADED_PLUGINS override"
|
||||
set_config_value "LOADED_PLUGINS" "$LOADED_PLUGINS"
|
||||
fi
|
||||
@@ -156,7 +156,7 @@ fi
|
||||
apt-get install -y --no-install-recommends \
|
||||
tini snmp ca-certificates curl libwww-perl arp-scan perl apt-utils cron sudo \
|
||||
php8.4 php8.4-cgi php8.4-fpm php8.4-sqlite3 php8.4-curl sqlite3 dnsutils net-tools mtr \
|
||||
python3 python3-dev iproute2 nmap fping python3-pip zip usbutils traceroute nbtscan \
|
||||
python3 python3-dev python3-psutil iproute2 nmap fping python3-pip zip usbutils traceroute nbtscan \
|
||||
avahi-daemon avahi-utils build-essential git gnupg2 lsb-release \
|
||||
debian-archive-keyring python3-venv
|
||||
|
||||
|
||||
@@ -4,7 +4,7 @@ aiohttp
|
||||
graphene
|
||||
flask
|
||||
flask-cors
|
||||
unifi-sm-api
|
||||
unifi-sm-api>=0.2.3
|
||||
tplink-omada-client
|
||||
wakeonlan
|
||||
pycryptodome
|
||||
@@ -22,5 +22,6 @@ python-nmap
|
||||
dnspython
|
||||
librouteros
|
||||
yattag
|
||||
zeroconf
|
||||
zeroconf
|
||||
psutil
|
||||
git+https://github.com/foreign-sub/aiofreepybox.git
|
||||
|
||||
@@ -58,7 +58,7 @@ apt-get install -y --no-install-recommends \
|
||||
git \
|
||||
tini ca-certificates curl libwww-perl perl apt-utils cron build-essential \
|
||||
sqlite3 net-tools \
|
||||
python3 python3-venv python3-dev python3-pip
|
||||
python3 python3-venv python3-dev python3-psutil python3-pip
|
||||
|
||||
# Install plugin dependencies
|
||||
apt-get install -y --no-install-recommends \
|
||||
|
||||
@@ -4,7 +4,7 @@ aiohttp
|
||||
graphene
|
||||
flask
|
||||
flask-cors
|
||||
unifi-sm-api
|
||||
unifi-sm-api>=0.2.3
|
||||
tplink-omada-client
|
||||
wakeonlan
|
||||
pycryptodome
|
||||
@@ -22,5 +22,6 @@ python-nmap
|
||||
dnspython
|
||||
librouteros
|
||||
yattag
|
||||
zeroconf
|
||||
zeroconf
|
||||
psutil
|
||||
git+https://github.com/foreign-sub/aiofreepybox.git
|
||||
|
||||
@@ -57,14 +57,14 @@ nav:
|
||||
- Authelia: AUTHELIA.md
|
||||
- Performance: PERFORMANCE.md
|
||||
- Reverse DNS: REVERSE_DNS.md
|
||||
- Reverse Proxy:
|
||||
- Reverse Proxy Overview: REVERSE_PROXY.md
|
||||
- Caddy and Authentik: REVERSE_PROXY_CADDY.md
|
||||
- Traefik: REVERSE_PROXY_TRAEFIK.md
|
||||
- Reverse Proxy: REVERSE_PROXY.md
|
||||
- Webhooks (n8n): WEBHOOK_N8N.md
|
||||
- Workflows: WORKFLOWS.md
|
||||
- Workflow Examples: WORKFLOW_EXAMPLES.md
|
||||
- Docker Swarm: DOCKER_SWARM.md
|
||||
- Best practice advisories:
|
||||
- Eyes on glass: ADVISORY_EYES_ON_GLASS.md
|
||||
- Multi-network monitoring: ADVISORY_MULTI_NETWORK.md
|
||||
- Help:
|
||||
- Common issues: COMMON_ISSUES.md
|
||||
- Random MAC: RANDOM_MAC.md
|
||||
|
||||
@@ -5,7 +5,7 @@ aiohttp
|
||||
graphene
|
||||
flask
|
||||
flask-cors
|
||||
unifi-sm-api
|
||||
unifi-sm-api>=0.2.3
|
||||
tplink-omada-client
|
||||
wakeonlan
|
||||
pycryptodome
|
||||
@@ -32,4 +32,5 @@ httplib2
|
||||
gunicorn
|
||||
git+https://github.com/foreign-sub/aiofreepybox.git
|
||||
mcp
|
||||
psutil
|
||||
pydantic>=2.0,<3.0
|
||||
|
||||
@@ -25,7 +25,7 @@ import conf
|
||||
from const import fullConfPath, sql_new_devices
|
||||
from logger import mylog
|
||||
from helper import filePermissions
|
||||
from utils.datetime_utils import timeNowTZ
|
||||
from utils.datetime_utils import timeNowUTC
|
||||
from app_state import updateState
|
||||
from api import update_api
|
||||
from scan.session_events import process_scan
|
||||
@@ -104,7 +104,7 @@ def main():
|
||||
pm, all_plugins, imported = importConfigs(pm, db, all_plugins)
|
||||
|
||||
# update time started
|
||||
conf.loop_start_time = timeNowTZ()
|
||||
conf.loop_start_time = timeNowUTC(as_string=False)
|
||||
|
||||
loop_start_time = conf.loop_start_time # TODO fix
|
||||
|
||||
|
||||
@@ -23,7 +23,7 @@ from const import (
|
||||
)
|
||||
from logger import mylog
|
||||
from helper import write_file, get_setting_value
|
||||
from utils.datetime_utils import timeNowTZ
|
||||
from utils.datetime_utils import timeNowUTC
|
||||
from app_state import updateState
|
||||
from models.user_events_queue_instance import UserEventsQueueInstance
|
||||
|
||||
@@ -105,7 +105,7 @@ def update_api(
|
||||
class api_endpoint_class:
|
||||
def __init__(self, db, forceUpdate, query, path, is_ad_hoc_user_event=False):
|
||||
|
||||
current_time = timeNowTZ()
|
||||
current_time = timeNowUTC(as_string=False)
|
||||
|
||||
self.db = db
|
||||
self.query = query
|
||||
@@ -163,7 +163,7 @@ class api_endpoint_class:
|
||||
|
||||
# ----------------------------------------
|
||||
def try_write(self, forceUpdate):
|
||||
current_time = timeNowTZ()
|
||||
current_time = timeNowUTC(as_string=False)
|
||||
|
||||
# Debugging info to understand the issue
|
||||
# mylog('debug', [f'[API] api_endpoint_class: {self.fileName} is_ad_hoc_user_event
|
||||
@@ -183,7 +183,7 @@ class api_endpoint_class:
|
||||
write_file(self.path, json.dumps(self.jsonData))
|
||||
|
||||
self.needsUpdate = False
|
||||
self.last_update_time = timeNowTZ() # Reset last_update_time after writing
|
||||
self.last_update_time = timeNowUTC(as_string=False) # Reset last_update_time after writing
|
||||
|
||||
# Update user event execution log
|
||||
# mylog('verbose', [f'[API] api_endpoint_class: is_ad_hoc_user_event {self.is_ad_hoc_user_event}'])
|
||||
|
||||
@@ -41,6 +41,7 @@ from .nettools_endpoint import ( # noqa: E402 [flake8 lint suppression]
|
||||
from .dbquery_endpoint import read_query, write_query, update_query, delete_query # noqa: E402 [flake8 lint suppression]
|
||||
from .sync_endpoint import handle_sync_post, handle_sync_get # noqa: E402 [flake8 lint suppression]
|
||||
from .logs_endpoint import clean_log # noqa: E402 [flake8 lint suppression]
|
||||
from .health_endpoint import get_health_status # noqa: E402 [flake8 lint suppression]
|
||||
from models.user_events_queue_instance import UserEventsQueueInstance # noqa: E402 [flake8 lint suppression]
|
||||
|
||||
from models.event_instance import EventInstance # noqa: E402 [flake8 lint suppression]
|
||||
@@ -86,6 +87,7 @@ from .openapi.schemas import ( # noqa: E402 [flake8 lint suppression]
|
||||
RecentEventsResponse, LastEventsResponse,
|
||||
NetworkTopologyResponse,
|
||||
InternetInfoResponse, NetworkInterfacesResponse,
|
||||
HealthCheckResponse,
|
||||
CreateEventRequest, CreateSessionRequest,
|
||||
DeleteSessionRequest, CreateNotificationRequest,
|
||||
SyncPushRequest, SyncPullResponse,
|
||||
@@ -726,7 +728,7 @@ def api_export_devices(format=None, payload=None):
|
||||
operation_id="import_devices",
|
||||
summary="Import Devices",
|
||||
description="Import devices from CSV or JSON content.",
|
||||
request_model=DeviceImportRequest,
|
||||
request_model=None,
|
||||
response_model=DeviceImportResponse,
|
||||
tags=["devices"],
|
||||
auth_callable=is_authorized,
|
||||
@@ -1930,6 +1932,33 @@ def check_auth(payload=None):
|
||||
if request.method == "GET":
|
||||
return jsonify({"success": True, "message": "Authentication check successful"}), 200
|
||||
|
||||
|
||||
# --------------------------
|
||||
# Health endpoint
|
||||
# --------------------------
|
||||
@app.route("/health", methods=["GET"])
|
||||
@validate_request(
|
||||
operation_id="check_health",
|
||||
summary="System Health Check",
|
||||
description="Retrieve system vitality metrics including database size, memory pressure, system load, disk usage, and CPU temperature.",
|
||||
response_model=HealthCheckResponse,
|
||||
tags=["system", "health"],
|
||||
auth_callable=is_authorized
|
||||
)
|
||||
def check_health(payload=None):
|
||||
"""Get system health metrics for monitoring and diagnostics."""
|
||||
try:
|
||||
health_data = get_health_status()
|
||||
return jsonify({"success": True, **health_data}), 200
|
||||
except Exception as e:
|
||||
mylog("none", [f"[health] Error retrieving health status: {e}"])
|
||||
return jsonify({
|
||||
"success": False,
|
||||
"error": "Failed to retrieve health status",
|
||||
"message": "Internal server error"
|
||||
}), 500
|
||||
|
||||
|
||||
# --------------------------
|
||||
# Background Server Start
|
||||
# --------------------------
|
||||
|
||||
@@ -11,7 +11,7 @@ INSTALL_PATH = os.getenv("NETALERTX_APP", "/app")
|
||||
sys.path.extend([f"{INSTALL_PATH}/server"])
|
||||
|
||||
from logger import mylog # noqa: E402 [flake8 lint suppression]
|
||||
from const import apiPath # noqa: E402 [flake8 lint suppression]
|
||||
from const import apiPath, NULL_EQUIVALENTS # noqa: E402 [flake8 lint suppression]
|
||||
from helper import ( # noqa: E402 [flake8 lint suppression]
|
||||
is_random_mac,
|
||||
get_number_of_children,
|
||||
@@ -266,7 +266,7 @@ class Query(ObjectType):
|
||||
filtered.append(device)
|
||||
|
||||
devices_data = filtered
|
||||
|
||||
# 🔻 START If you change anything here, also update get_device_condition_by_status
|
||||
elif status == "connected":
|
||||
devices_data = [
|
||||
device
|
||||
@@ -275,17 +275,17 @@ class Query(ObjectType):
|
||||
]
|
||||
elif status == "favorites":
|
||||
devices_data = [
|
||||
device for device in devices_data if device["devFavorite"] == 1
|
||||
device for device in devices_data if device["devFavorite"] == 1 and device["devIsArchived"] == 0
|
||||
]
|
||||
elif status == "new":
|
||||
devices_data = [
|
||||
device for device in devices_data if device["devIsNew"] == 1
|
||||
device for device in devices_data if device["devIsNew"] == 1 and device["devIsArchived"] == 0
|
||||
]
|
||||
elif status == "down":
|
||||
devices_data = [
|
||||
device
|
||||
for device in devices_data
|
||||
if device["devPresentLastScan"] == 0 and device["devAlertDown"]
|
||||
if device["devPresentLastScan"] == 0 and device["devAlertDown"] and device["devIsArchived"] == 0
|
||||
]
|
||||
elif status == "archived":
|
||||
devices_data = [
|
||||
@@ -297,14 +297,33 @@ class Query(ObjectType):
|
||||
devices_data = [
|
||||
device
|
||||
for device in devices_data
|
||||
if device["devPresentLastScan"] == 0
|
||||
if device["devPresentLastScan"] == 0 and device["devIsArchived"] == 0
|
||||
]
|
||||
elif status == "unknown":
|
||||
devices_data = [
|
||||
device
|
||||
for device in devices_data
|
||||
if device["devName"] in NULL_EQUIVALENTS and device["devIsArchived"] == 0
|
||||
]
|
||||
elif status == "known":
|
||||
devices_data = [
|
||||
device
|
||||
for device in devices_data
|
||||
if device["devName"] not in NULL_EQUIVALENTS and device["devIsArchived"] == 0
|
||||
]
|
||||
elif status == "network_devices":
|
||||
devices_data = [
|
||||
device
|
||||
for device in devices_data
|
||||
if device["devType"] in network_dev_types
|
||||
if device["devType"] in network_dev_types and device["devIsArchived"] == 0
|
||||
]
|
||||
elif status == "network_devices_down":
|
||||
devices_data = [
|
||||
device
|
||||
for device in devices_data
|
||||
if device["devType"] in network_dev_types and device["devPresentLastScan"] == 0 and device["devIsArchived"] == 0
|
||||
]
|
||||
# 🔺 END If you change anything here, also update get_device_condition_by_status
|
||||
elif status == "all_devices":
|
||||
devices_data = devices_data # keep all
|
||||
|
||||
|
||||
177
server/api_server/health_endpoint.py
Normal file
@@ -0,0 +1,177 @@
|
||||
"""Health check endpoint for NetAlertX system vitality monitoring."""
|
||||
|
||||
import os
|
||||
import psutil
|
||||
from pathlib import Path
|
||||
|
||||
from const import dbPath, dataPath
|
||||
from logger import mylog
|
||||
|
||||
|
||||
# ===============================================================================
|
||||
# Database Vitality
|
||||
# ===============================================================================
|
||||
|
||||
def get_db_size_mb():
|
||||
"""
|
||||
Calculate total database size in MB (app.db + app.db-wal).
|
||||
|
||||
Returns:
|
||||
float: Size in MB, or 0 if database files don't exist.
|
||||
"""
|
||||
try:
|
||||
db_file = Path(dbPath)
|
||||
wal_file = Path(f"{dbPath}-wal")
|
||||
|
||||
size_bytes = 0
|
||||
if db_file.exists():
|
||||
size_bytes += db_file.stat().st_size
|
||||
if wal_file.exists():
|
||||
size_bytes += wal_file.stat().st_size
|
||||
|
||||
return round(size_bytes / (1024 * 1024), 2)
|
||||
except Exception as e:
|
||||
mylog("verbose", [f"[health] Error calculating DB size: {e}"])
|
||||
return 0.0
|
||||
|
||||
|
||||
# ===============================================================================
|
||||
# Memory Pressure
|
||||
# ===============================================================================
|
||||
|
||||
def get_mem_usage_pct():
|
||||
"""
|
||||
Calculate memory usage percentage (used / total * 100).
|
||||
|
||||
Returns:
|
||||
int: Memory usage as integer percentage (0-100), or None on error.
|
||||
"""
|
||||
try:
|
||||
vm = psutil.virtual_memory()
|
||||
pct = int((vm.used / vm.total) * 100)
|
||||
return max(0, min(100, pct)) # Clamp to 0-100
|
||||
except Exception as e:
|
||||
mylog("verbose", [f"[health] Error calculating memory usage: {e}"])
|
||||
return None
|
||||
|
||||
|
||||
def get_load_avg_1m():
|
||||
"""
|
||||
Get 1-minute load average.
|
||||
|
||||
Returns:
|
||||
float: 1-minute load average, or -1 on error.
|
||||
"""
|
||||
try:
|
||||
load_1m, _, _ = os.getloadavg()
|
||||
return round(load_1m, 2)
|
||||
except Exception as e:
|
||||
mylog("verbose", [f"[health] Error getting load average: {e}"])
|
||||
return -1.0
|
||||
|
||||
|
||||
# ===============================================================================
|
||||
# Disk Headroom
|
||||
# ===============================================================================
|
||||
|
||||
def get_storage_pct():
|
||||
"""
|
||||
Calculate disk usage percentage of /data mount.
|
||||
|
||||
Returns:
|
||||
int: Disk usage as integer percentage (0-100), or None on error.
|
||||
"""
|
||||
try:
|
||||
stat = os.statvfs(dataPath)
|
||||
total = stat.f_blocks * stat.f_frsize
|
||||
used = (stat.f_blocks - stat.f_bfree) * stat.f_frsize
|
||||
pct = int((used / total) * 100) if total > 0 else 0
|
||||
return max(0, min(100, pct)) # Clamp to 0-100
|
||||
except Exception as e:
|
||||
mylog("verbose", [f"[health] Error calculating storage usage: {e}"])
|
||||
return None
|
||||
|
||||
|
||||
def get_cpu_temp():
|
||||
"""
|
||||
Get CPU temperature from hardware sensors if available.
|
||||
|
||||
Returns:
|
||||
int: CPU temperature in Celsius, or None if unavailable.
|
||||
"""
|
||||
try:
|
||||
temps = psutil.sensors_temperatures()
|
||||
if not temps:
|
||||
return None
|
||||
|
||||
# Prefer 'coretemp' (Intel), fallback to first available
|
||||
if "coretemp" in temps and temps["coretemp"]:
|
||||
return int(temps["coretemp"][0].current)
|
||||
|
||||
# Fallback to first sensor with data
|
||||
for sensor_type, readings in temps.items():
|
||||
if readings:
|
||||
return int(readings[0].current)
|
||||
|
||||
return None
|
||||
except Exception as e:
|
||||
mylog("verbose", [f"[health] Error reading CPU temperature: {e}"])
|
||||
return None
|
||||
|
||||
|
||||
def get_mem_mb():
|
||||
"""
|
||||
Get total system memory in MB.
|
||||
|
||||
Returns:
|
||||
int: Total memory in MB, or None on error.
|
||||
"""
|
||||
try:
|
||||
vm = psutil.virtual_memory()
|
||||
total_mb = int(vm.total / (1024 * 1024))
|
||||
return total_mb
|
||||
|
||||
except Exception as e:
|
||||
mylog("verbose", [f"[health] Error getting memory size: {e}"])
|
||||
return None
|
||||
|
||||
|
||||
def get_storage_gb():
|
||||
"""
|
||||
Get total storage size of /data in GB.
|
||||
|
||||
Returns:
|
||||
float: Total storage in GB, or None on error.
|
||||
"""
|
||||
try:
|
||||
stat = os.statvfs(dataPath)
|
||||
total = stat.f_blocks * stat.f_frsize
|
||||
|
||||
gb = round(total / (1024 ** 3), 2)
|
||||
return gb
|
||||
|
||||
except Exception as e:
|
||||
mylog("verbose", [f"[health] Error getting storage size: {e}"])
|
||||
return None
|
||||
|
||||
|
||||
# ===============================================================================
|
||||
# Aggregator
|
||||
# ===============================================================================
|
||||
|
||||
def get_health_status():
|
||||
"""
|
||||
Collect all health metrics into a single dict.
|
||||
|
||||
Returns:
|
||||
dict: Dictionary with all health metrics.
|
||||
"""
|
||||
return {
|
||||
"db_size_mb": get_db_size_mb(),
|
||||
"mem_usage_pct": get_mem_usage_pct(),
|
||||
"load_1m": get_load_avg_1m(),
|
||||
"storage_pct": get_storage_pct(),
|
||||
"cpu_temp": get_cpu_temp(),
|
||||
"storage_gb": get_storage_gb(),
|
||||
"mem_mb": get_mem_mb(),
|
||||
}
|
||||
@@ -651,6 +651,38 @@ class NetworkInterfacesResponse(BaseResponse):
|
||||
interfaces: Dict[str, Any] = Field(..., description="Details about network interfaces.")
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# HEALTH CHECK SCHEMAS
|
||||
# =============================================================================
|
||||
|
||||
|
||||
class HealthCheckResponse(BaseResponse):
|
||||
"""System health check with vitality metrics."""
|
||||
model_config = ConfigDict(
|
||||
extra="allow",
|
||||
json_schema_extra={
|
||||
"examples": [{
|
||||
"success": True,
|
||||
"db_size_mb": 125.45,
|
||||
"mem_usage_pct": 65,
|
||||
"load_1m": 2.15,
|
||||
"storage_pct": 42,
|
||||
"cpu_temp": 58,
|
||||
"storage_gb": 8,
|
||||
"mem_mb" : 8192
|
||||
}]
|
||||
}
|
||||
)
|
||||
|
||||
db_size_mb: float = Field(..., description="Database size in MB (app.db + app.db-wal)")
|
||||
mem_usage_pct: Optional[int] = Field(None, ge=0, le=100, description="Memory usage percentage (0-100, nullable if unavailable)")
|
||||
load_1m: float = Field(..., description="1-minute load average")
|
||||
storage_pct: Optional[int] = Field(None, ge=0, le=100, description="Disk usage percentage of /data mount (0-100, nullable if unavailable)")
|
||||
cpu_temp: Optional[int] = Field(None, description="CPU temperature in Celsius (nullable if unavailable)")
|
||||
storage_gb: Optional[int] = Field(..., description="Storage size in GB")
|
||||
mem_mb: Optional[int] = Field(..., description="Installed memory size in MB")
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# EVENTS SCHEMAS
|
||||
# =============================================================================
|
||||
|
||||
@@ -12,7 +12,7 @@ sys.path.extend([f"{INSTALL_PATH}/front/plugins", f"{INSTALL_PATH}/server"])
|
||||
from database import get_temp_db_connection # noqa: E402 [flake8 lint suppression]
|
||||
from helper import get_setting_value, format_ip_long # noqa: E402 [flake8 lint suppression]
|
||||
from db.db_helper import get_date_from_period # noqa: E402 [flake8 lint suppression]
|
||||
from utils.datetime_utils import timeNowDB, format_date_iso, format_event_date, format_date_diff, format_date # noqa: E402 [flake8 lint suppression]
|
||||
from utils.datetime_utils import timeNowUTC, format_date_iso, format_event_date, format_date_diff, format_date # noqa: E402 [flake8 lint suppression]
|
||||
|
||||
|
||||
# --------------------------
|
||||
@@ -165,7 +165,7 @@ def get_sessions_calendar(start_date, end_date, mac):
|
||||
rows = cur.fetchall()
|
||||
conn.close()
|
||||
|
||||
now_iso = timeNowDB()
|
||||
now_iso = timeNowUTC()
|
||||
|
||||
events = []
|
||||
for row in rows:
|
||||
|
||||
@@ -3,7 +3,7 @@ import base64
|
||||
from flask import jsonify, request
|
||||
from logger import mylog
|
||||
from helper import get_setting_value
|
||||
from utils.datetime_utils import timeNowDB
|
||||
from utils.datetime_utils import timeNowUTC
|
||||
from messaging.in_app import write_notification
|
||||
|
||||
INSTALL_PATH = os.getenv("NETALERTX_APP", "/app")
|
||||
@@ -22,19 +22,19 @@ def handle_sync_get():
|
||||
raw_data = f.read()
|
||||
except FileNotFoundError:
|
||||
msg = f"[Plugin: SYNC] Data file not found: {file_path}"
|
||||
write_notification(msg, "alert", timeNowDB())
|
||||
write_notification(msg, "alert", timeNowUTC())
|
||||
mylog("verbose", [msg])
|
||||
return jsonify({"error": msg}), 500
|
||||
|
||||
response_data = base64.b64encode(raw_data).decode("utf-8")
|
||||
|
||||
write_notification("[Plugin: SYNC] Data sent", "info", timeNowDB())
|
||||
write_notification("[Plugin: SYNC] Data sent", "info", timeNowUTC())
|
||||
return jsonify({
|
||||
"node_name": get_setting_value("SYNC_node_name"),
|
||||
"status": 200,
|
||||
"message": "OK",
|
||||
"data_base64": response_data,
|
||||
"timestamp": timeNowDB()
|
||||
"timestamp": timeNowUTC()
|
||||
}), 200
|
||||
|
||||
|
||||
@@ -68,11 +68,11 @@ def handle_sync_post():
|
||||
f.write(data)
|
||||
except Exception as e:
|
||||
msg = f"[Plugin: SYNC] Failed to store data: {e}"
|
||||
write_notification(msg, "alert", timeNowDB())
|
||||
write_notification(msg, "alert", timeNowUTC())
|
||||
mylog("verbose", [msg])
|
||||
return jsonify({"error": msg}), 500
|
||||
|
||||
msg = f"[Plugin: SYNC] Data received ({file_path_new})"
|
||||
write_notification(msg, "info", timeNowDB())
|
||||
write_notification(msg, "info", timeNowUTC())
|
||||
mylog("verbose", [msg])
|
||||
return jsonify({"message": "Data received and stored successfully"}), 200
|
||||
|
||||
@@ -4,7 +4,7 @@ import json
|
||||
from const import applicationPath, apiPath
|
||||
from logger import mylog
|
||||
from helper import checkNewVersion
|
||||
from utils.datetime_utils import timeNowDB, timeNow
|
||||
from utils.datetime_utils import timeNowUTC
|
||||
from api_server.sse_broadcast import broadcast_state_update
|
||||
|
||||
# Register NetAlertX directories using runtime configuration
|
||||
@@ -67,7 +67,7 @@ class app_state_class:
|
||||
previousState = ""
|
||||
|
||||
# Update self
|
||||
self.lastUpdated = str(timeNowDB())
|
||||
self.lastUpdated = str(timeNowUTC())
|
||||
|
||||
if os.path.exists(stateFile):
|
||||
try:
|
||||
@@ -95,7 +95,7 @@ class app_state_class:
|
||||
self.showSpinner = False
|
||||
self.processScan = False
|
||||
self.isNewVersion = checkNewVersion()
|
||||
self.isNewVersionChecked = int(timeNow().timestamp())
|
||||
self.isNewVersionChecked = int(timeNowUTC(as_string=False).timestamp())
|
||||
self.graphQLServerStarted = 0
|
||||
self.currentState = "Init"
|
||||
self.pluginsStates = {}
|
||||
@@ -135,10 +135,10 @@ class app_state_class:
|
||||
self.buildTimestamp = buildTimestamp
|
||||
# check for new version every hour and if currently not running new version
|
||||
if self.isNewVersion is False and self.isNewVersionChecked + 3600 < int(
|
||||
timeNow().timestamp()
|
||||
timeNowUTC(as_string=False).timestamp()
|
||||
):
|
||||
self.isNewVersion = checkNewVersion()
|
||||
self.isNewVersionChecked = int(timeNow().timestamp())
|
||||
self.isNewVersionChecked = int(timeNowUTC(as_string=False).timestamp())
|
||||
|
||||
# Update .json file
|
||||
# with open(stateFile, 'w') as json_file:
|
||||
|
||||
@@ -49,6 +49,15 @@ NATIVE_SPEEDTEST_PATH = os.getenv("NATIVE_SPEEDTEST_PATH", "/usr/bin/speedtest")
|
||||
|
||||
default_tz = "Europe/Berlin"
|
||||
|
||||
# ===============================================================================
|
||||
# Magic strings
|
||||
# ===============================================================================
|
||||
|
||||
NULL_EQUIVALENTS = ["", "null", "(unknown)", "(Unknown)", "(name not found)"]
|
||||
|
||||
# Convert list to SQL string: wrap each value in single quotes and escape single quotes if needed
|
||||
NULL_EQUIVALENTS_SQL = ",".join("'" + v.replace("'", "''") + "'" for v in NULL_EQUIVALENTS)
|
||||
|
||||
|
||||
# ===============================================================================
|
||||
# SQL queries
|
||||
@@ -186,10 +195,19 @@ sql_devices_filters = """
|
||||
FROM Devices WHERE devSSID NOT IN ('', 'null') AND devSSID IS NOT NULL
|
||||
ORDER BY columnName;
|
||||
"""
|
||||
sql_devices_stats = """SELECT Online_Devices as online, Down_Devices as down, All_Devices as 'all', Archived_Devices as archived,
|
||||
(select count(*) from Devices a where devIsNew = 1 ) as new,
|
||||
(select count(*) from Devices a where devName = '(unknown)' or devName = '(name not found)' ) as unknown
|
||||
from Online_History order by Scan_Date desc limit 1"""
|
||||
|
||||
sql_devices_stats = f"""
|
||||
SELECT
|
||||
Online_Devices as online,
|
||||
Down_Devices as down,
|
||||
All_Devices as 'all',
|
||||
Archived_Devices as archived,
|
||||
(SELECT COUNT(*) FROM Devices a WHERE devIsNew = 1) as new,
|
||||
(SELECT COUNT(*) FROM Devices a WHERE devName IN ({NULL_EQUIVALENTS_SQL}) OR devName IS NULL) as unknown
|
||||
FROM Online_History
|
||||
ORDER BY Scan_Date DESC
|
||||
LIMIT 1
|
||||
"""
|
||||
sql_events_pending_alert = "SELECT * FROM Events where eve_PendingAlertEmail is not 0"
|
||||
sql_settings = "SELECT * FROM Settings"
|
||||
sql_plugins_objects = "SELECT * FROM Plugins_Objects"
|
||||
|
||||
@@ -17,6 +17,7 @@ from db.db_upgrade import (
|
||||
ensure_Settings,
|
||||
ensure_Indexes,
|
||||
ensure_mac_lowercase_triggers,
|
||||
migrate_timestamps_to_utc,
|
||||
)
|
||||
|
||||
|
||||
@@ -187,6 +188,9 @@ class DB:
|
||||
# Parameters tables setup
|
||||
ensure_Parameters(self.sql)
|
||||
|
||||
# One-time UTC timestamp migration (must run after Parameters table exists)
|
||||
migrate_timestamps_to_utc(self.sql)
|
||||
|
||||
# Plugins tables setup
|
||||
ensure_plugins_tables(self.sql)
|
||||
|
||||
|
||||
@@ -19,6 +19,7 @@ from logger import mylog # noqa: E402 [flake8 lint suppression]
|
||||
from helper import get_setting_value # noqa: E402 [flake8 lint suppression]
|
||||
from db.db_helper import row_to_json # noqa: E402 [flake8 lint suppression]
|
||||
from plugin_helper import normalize_mac # noqa: E402 [flake8 lint suppression]
|
||||
from const import NULL_EQUIVALENTS # noqa: E402 [flake8 lint suppression]
|
||||
|
||||
|
||||
# Map of field to its source tracking field
|
||||
@@ -96,7 +97,7 @@ def can_overwrite_field(field_name, current_value, current_source, plugin_prefix
|
||||
bool: True if overwrite allowed.
|
||||
"""
|
||||
|
||||
empty_values = ("0.0.0.0", "", "null", "(unknown)", "(name not found)", None)
|
||||
empty_values = ("0.0.0.0", *NULL_EQUIVALENTS, None)
|
||||
|
||||
# Rule 1: USER/LOCKED protected
|
||||
if current_source in ("USER", "LOCKED"):
|
||||
@@ -188,9 +189,7 @@ def get_source_for_field_update_with_value(
|
||||
|
||||
if isinstance(field_value, str):
|
||||
stripped = field_value.strip()
|
||||
if stripped in ("", "null"):
|
||||
return "NEWDEV"
|
||||
if stripped.lower() in ("(unknown)", "(name not found)"):
|
||||
if stripped.lower() in NULL_EQUIVALENTS:
|
||||
return "NEWDEV"
|
||||
|
||||
return plugin_prefix
|
||||
|
||||
@@ -6,14 +6,37 @@ import os
|
||||
INSTALL_PATH = os.getenv("NETALERTX_APP", "/app")
|
||||
sys.path.extend([f"{INSTALL_PATH}/server"])
|
||||
|
||||
from helper import if_byte_then_to_str # noqa: E402 [flake8 lint suppression]
|
||||
from helper import if_byte_then_to_str, get_setting_value # noqa: E402 [flake8 lint suppression]
|
||||
from logger import mylog # noqa: E402 [flake8 lint suppression]
|
||||
from const import NULL_EQUIVALENTS_SQL # noqa: E402 [flake8 lint suppression]
|
||||
|
||||
|
||||
def get_device_conditions():
|
||||
network_dev_types = ",".join("'" + v.replace("'", "''") + "'" for v in get_setting_value("NETWORK_DEVICE_TYPES"))
|
||||
|
||||
# DO NOT CHANGE ORDER
|
||||
conditions = {
|
||||
"all": "WHERE devIsArchived=0",
|
||||
"my": "WHERE devIsArchived=0",
|
||||
"connected": "WHERE devPresentLastScan=1",
|
||||
"favorites": "WHERE devIsArchived=0 AND devFavorite=1",
|
||||
"new": "WHERE devIsArchived=0 AND devIsNew=1",
|
||||
"down": "WHERE devIsArchived=0 AND devAlertDown != 0 AND devPresentLastScan=0",
|
||||
"offline": "WHERE devIsArchived=0 AND devPresentLastScan=0",
|
||||
"archived": "WHERE devIsArchived=1",
|
||||
"network_devices": f"WHERE devIsArchived=0 AND devType in ({network_dev_types})",
|
||||
"network_devices_down": f"WHERE devIsArchived=0 AND devType in ({network_dev_types}) AND devPresentLastScan=0",
|
||||
"unknown": f"WHERE devIsArchived=0 AND devName in ({NULL_EQUIVALENTS_SQL})",
|
||||
"known": f"WHERE devIsArchived=0 AND devName not in ({NULL_EQUIVALENTS_SQL})",
|
||||
"favorites_offline": "WHERE devIsArchived=0 AND devFavorite=1 AND devPresentLastScan=0",
|
||||
"new_online": "WHERE devIsArchived=0 AND devIsNew=1 AND devPresentLastScan=0",
|
||||
}
|
||||
|
||||
return conditions
|
||||
|
||||
|
||||
# -------------------------------------------------------------------------------
|
||||
# Return the SQL WHERE clause for filtering devices based on their status.
|
||||
|
||||
|
||||
def get_device_condition_by_status(device_status):
|
||||
"""
|
||||
Return the SQL WHERE clause for filtering devices based on their status.
|
||||
@@ -32,17 +55,8 @@ def get_device_condition_by_status(device_status):
|
||||
str: SQL WHERE clause corresponding to the device status.
|
||||
Defaults to 'WHERE 1=0' for unrecognized statuses.
|
||||
"""
|
||||
conditions = {
|
||||
"all": "WHERE devIsArchived=0",
|
||||
"my": "WHERE devIsArchived=0",
|
||||
"connected": "WHERE devIsArchived=0 AND devPresentLastScan=1",
|
||||
"favorites": "WHERE devIsArchived=0 AND devFavorite=1",
|
||||
"new": "WHERE devIsArchived=0 AND devIsNew=1",
|
||||
"down": "WHERE devIsArchived=0 AND devAlertDown != 0 AND devPresentLastScan=0",
|
||||
"offline": "WHERE devIsArchived=0 AND devPresentLastScan=0",
|
||||
"archived": "WHERE devIsArchived=1",
|
||||
}
|
||||
return conditions.get(device_status, "WHERE 1=0")
|
||||
|
||||
return get_device_conditions().get(device_status, "WHERE 1=0")
|
||||
|
||||
|
||||
# -------------------------------------------------------------------------------
|
||||
|
||||
@@ -1,10 +1,6 @@
|
||||
import sys
|
||||
import os
|
||||
|
||||
# Register NetAlertX directories
|
||||
INSTALL_PATH = os.getenv("NETALERTX_APP", "/app")
|
||||
sys.path.extend([f"{INSTALL_PATH}/server"])
|
||||
|
||||
import conf
|
||||
from zoneinfo import ZoneInfo
|
||||
import datetime as dt
|
||||
from logger import mylog # noqa: E402 [flake8 lint suppression]
|
||||
from messaging.in_app import write_notification # noqa: E402 [flake8 lint suppression]
|
||||
|
||||
@@ -147,7 +143,7 @@ def ensure_mac_lowercase_triggers(sql):
|
||||
except Exception as e:
|
||||
mylog("none", [f"[db_upgrade] ERROR while ensuring MAC triggers: {e}"])
|
||||
return False
|
||||
|
||||
|
||||
|
||||
def ensure_views(sql) -> bool:
|
||||
"""
|
||||
@@ -228,7 +224,7 @@ def ensure_views(sql) -> bool:
|
||||
)
|
||||
SELECT
|
||||
d.*, -- all Device fields
|
||||
r.* -- all CurrentScan fields (cur_*)
|
||||
r.* -- all CurrentScan fields
|
||||
FROM Devices d
|
||||
LEFT JOIN RankedScans r
|
||||
ON d.devMac = r.scanMac
|
||||
@@ -246,6 +242,23 @@ def ensure_Indexes(sql) -> bool:
|
||||
Parameters:
|
||||
- sql: database cursor or connection wrapper (must support execute()).
|
||||
"""
|
||||
|
||||
# Remove after 12/12/2026 - prevens idx_events_unique from failing - dedupe
|
||||
clean_duplicate_events = """
|
||||
DELETE FROM Events
|
||||
WHERE rowid NOT IN (
|
||||
SELECT MIN(rowid)
|
||||
FROM Events
|
||||
GROUP BY
|
||||
eve_MAC,
|
||||
eve_IP,
|
||||
eve_EventType,
|
||||
eve_DateTime
|
||||
);
|
||||
"""
|
||||
|
||||
sql.execute(clean_duplicate_events)
|
||||
|
||||
indexes = [
|
||||
# Sessions
|
||||
(
|
||||
@@ -273,6 +286,10 @@ def ensure_Indexes(sql) -> bool:
|
||||
"idx_eve_type_date",
|
||||
"CREATE INDEX idx_eve_type_date ON Events(eve_EventType, eve_DateTime)",
|
||||
),
|
||||
(
|
||||
"idx_events_unique",
|
||||
"CREATE UNIQUE INDEX idx_events_unique ON Events (eve_MAC, eve_IP, eve_EventType, eve_DateTime)",
|
||||
),
|
||||
# Devices
|
||||
("idx_dev_mac", "CREATE INDEX idx_dev_mac ON Devices(devMac)"),
|
||||
(
|
||||
@@ -361,8 +378,8 @@ def ensure_Parameters(sql) -> bool:
|
||||
|
||||
sql.execute("""
|
||||
CREATE TABLE "Parameters" (
|
||||
"par_ID" TEXT PRIMARY KEY,
|
||||
"par_Value" TEXT
|
||||
"parID" TEXT PRIMARY KEY,
|
||||
"parValue" TEXT
|
||||
);
|
||||
""")
|
||||
|
||||
@@ -494,3 +511,245 @@ def ensure_plugins_tables(sql) -> bool:
|
||||
); """)
|
||||
|
||||
return True
|
||||
|
||||
|
||||
# ===============================================================================
|
||||
# UTC Timestamp Migration (added 2026-02-10)
|
||||
# ===============================================================================
|
||||
|
||||
def is_timestamps_in_utc(sql) -> bool:
|
||||
"""
|
||||
Check if existing timestamps in Devices table are already in UTC format.
|
||||
|
||||
Strategy:
|
||||
1. Sample 10 non-NULL devFirstConnection timestamps from Devices
|
||||
2. For each timestamp, assume it's UTC and calculate what it would be in local time
|
||||
3. Check if timestamps have a consistent offset pattern (indicating local time storage)
|
||||
4. If offset is consistently > 0, they're likely local timestamps (need migration)
|
||||
5. If offset is ~0 or inconsistent, they're likely already UTC (skip migration)
|
||||
|
||||
Returns:
|
||||
bool: True if timestamps appear to be in UTC already, False if they need migration
|
||||
"""
|
||||
try:
|
||||
# Get timezone offset in seconds
|
||||
import conf
|
||||
import datetime as dt
|
||||
|
||||
now = dt.datetime.now(dt.UTC).replace(microsecond=0)
|
||||
current_offset_seconds = 0
|
||||
|
||||
try:
|
||||
if isinstance(conf.tz, dt.tzinfo):
|
||||
tz = conf.tz
|
||||
elif conf.tz:
|
||||
tz = ZoneInfo(conf.tz)
|
||||
else:
|
||||
tz = None
|
||||
except Exception:
|
||||
tz = None
|
||||
|
||||
if tz:
|
||||
local_now = dt.datetime.now(tz).replace(microsecond=0)
|
||||
local_offset = local_now.utcoffset().total_seconds()
|
||||
utc_offset = now.utcoffset().total_seconds() if now.utcoffset() else 0
|
||||
current_offset_seconds = int(local_offset - utc_offset)
|
||||
|
||||
# Sample timestamps from Devices table
|
||||
sql.execute("""
|
||||
SELECT devFirstConnection, devLastConnection, devLastNotification
|
||||
FROM Devices
|
||||
WHERE devFirstConnection IS NOT NULL
|
||||
LIMIT 10
|
||||
""")
|
||||
|
||||
samples = []
|
||||
for row in sql.fetchall():
|
||||
for ts in row:
|
||||
if ts:
|
||||
samples.append(ts)
|
||||
|
||||
if not samples:
|
||||
mylog("verbose", "[db_upgrade] No timestamp samples found in Devices - assuming UTC")
|
||||
return True # Empty DB, assume UTC
|
||||
|
||||
# Parse samples and check if they have timezone info (which would indicate migration already done)
|
||||
has_tz_marker = any('+' in str(ts) or 'Z' in str(ts) for ts in samples)
|
||||
if has_tz_marker:
|
||||
mylog("verbose", "[db_upgrade] Timestamps have timezone markers - already migrated to UTC")
|
||||
return True
|
||||
|
||||
mylog("debug", f"[db_upgrade] Sampled {len(samples)} timestamps. Current TZ offset: {current_offset_seconds}s")
|
||||
mylog("verbose", "[db_upgrade] Timestamps appear to be in system local time - migration needed")
|
||||
return False
|
||||
|
||||
except Exception as e:
|
||||
mylog("warn", f"[db_upgrade] Error checking UTC status: {e} - assuming UTC")
|
||||
return True
|
||||
|
||||
|
||||
def migrate_timestamps_to_utc(sql) -> bool:
|
||||
"""
|
||||
Safely migrate timestamp columns from local time to UTC.
|
||||
|
||||
Migration rules (fail-safe):
|
||||
- Default behaviour: RUN migration unless proven safe to skip
|
||||
- Version > 26.2.6 → timestamps already UTC → skip
|
||||
- Missing / unknown / unparsable version → migrate
|
||||
- Migration flag present → skip
|
||||
- Detection says already UTC → skip
|
||||
|
||||
Returns:
|
||||
bool: True if migration completed or not needed, False on error
|
||||
"""
|
||||
|
||||
try:
|
||||
# -------------------------------------------------
|
||||
# Check migration flag (idempotency protection)
|
||||
# -------------------------------------------------
|
||||
try:
|
||||
sql.execute("SELECT setValue FROM Settings WHERE setKey='DB_TIMESTAMPS_UTC_MIGRATED'")
|
||||
result = sql.fetchone()
|
||||
if result and str(result[0]) == "1":
|
||||
mylog("verbose", "[db_upgrade] UTC timestamp migration already completed - skipping")
|
||||
return True
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# -------------------------------------------------
|
||||
# Read previous version
|
||||
# -------------------------------------------------
|
||||
sql.execute("SELECT setValue FROM Settings WHERE setKey='VERSION'")
|
||||
result = sql.fetchone()
|
||||
prev_version = result[0] if result else ""
|
||||
|
||||
mylog("verbose", f"[db_upgrade] Version '{prev_version}' detected.")
|
||||
|
||||
# Default behaviour: migrate unless proven safe
|
||||
should_migrate = True
|
||||
|
||||
# -------------------------------------------------
|
||||
# Version-based safety check
|
||||
# -------------------------------------------------
|
||||
if prev_version and str(prev_version).lower() != "unknown":
|
||||
try:
|
||||
version_parts = prev_version.lstrip('v').split('.')
|
||||
major = int(version_parts[0]) if len(version_parts) > 0 else 0
|
||||
minor = int(version_parts[1]) if len(version_parts) > 1 else 0
|
||||
patch = int(version_parts[2]) if len(version_parts) > 2 else 0
|
||||
|
||||
# UTC timestamps introduced AFTER v26.2.6
|
||||
if (major, minor, patch) > (26, 2, 6):
|
||||
should_migrate = False
|
||||
mylog(
|
||||
"verbose",
|
||||
f"[db_upgrade] Version {prev_version} confirmed UTC timestamps - skipping migration",
|
||||
)
|
||||
|
||||
except (ValueError, IndexError) as e:
|
||||
mylog(
|
||||
"warn",
|
||||
f"[db_upgrade] Could not parse version '{prev_version}': {e} - running migration as safety measure",
|
||||
)
|
||||
else:
|
||||
mylog(
|
||||
"warn",
|
||||
"[db_upgrade] VERSION missing/unknown - running migration as safety measure",
|
||||
)
|
||||
|
||||
# -------------------------------------------------
|
||||
# Detection fallback
|
||||
# -------------------------------------------------
|
||||
if should_migrate:
|
||||
try:
|
||||
if is_timestamps_in_utc(sql):
|
||||
mylog(
|
||||
"verbose",
|
||||
"[db_upgrade] Timestamps appear already UTC - skipping migration",
|
||||
)
|
||||
return True
|
||||
except Exception as e:
|
||||
mylog(
|
||||
"warn",
|
||||
f"[db_upgrade] UTC detection failed ({e}) - continuing with migration",
|
||||
)
|
||||
else:
|
||||
return True
|
||||
|
||||
# Get timezone offset
|
||||
try:
|
||||
if isinstance(conf.tz, dt.tzinfo):
|
||||
tz = conf.tz
|
||||
elif conf.tz:
|
||||
tz = ZoneInfo(conf.tz)
|
||||
else:
|
||||
tz = None
|
||||
except Exception:
|
||||
tz = None
|
||||
|
||||
if tz:
|
||||
now_local = dt.datetime.now(tz)
|
||||
offset_hours = (now_local.utcoffset().total_seconds()) / 3600
|
||||
else:
|
||||
offset_hours = 0
|
||||
|
||||
mylog("verbose", f"[db_upgrade] Starting UTC timestamp migration (offset: {offset_hours} hours)")
|
||||
|
||||
# List of tables and their datetime columns
|
||||
timestamp_columns = {
|
||||
'Devices': ['devFirstConnection', 'devLastConnection', 'devLastNotification'],
|
||||
'Events': ['eve_DateTime'],
|
||||
'Sessions': ['ses_DateTimeConnection', 'ses_DateTimeDisconnection'],
|
||||
'Notifications': ['DateTimeCreated', 'DateTimePushed'],
|
||||
'Online_History': ['Scan_Date'],
|
||||
'Plugins_Objects': ['DateTimeCreated', 'DateTimeChanged'],
|
||||
'Plugins_Events': ['DateTimeCreated', 'DateTimeChanged'],
|
||||
'Plugins_History': ['DateTimeCreated', 'DateTimeChanged'],
|
||||
'AppEvents': ['DateTimeCreated'],
|
||||
}
|
||||
|
||||
for table, columns in timestamp_columns.items():
|
||||
try:
|
||||
# Check if table exists
|
||||
sql.execute(f"SELECT name FROM sqlite_master WHERE type='table' AND name='{table}'")
|
||||
if not sql.fetchone():
|
||||
mylog("debug", f"[db_upgrade] Table '{table}' does not exist - skipping")
|
||||
continue
|
||||
|
||||
for column in columns:
|
||||
try:
|
||||
# Update non-NULL timestamps
|
||||
if offset_hours > 0:
|
||||
# Convert local to UTC (subtract offset)
|
||||
sql.execute(f"""
|
||||
UPDATE {table}
|
||||
SET {column} = DATETIME({column}, '-{int(offset_hours)} hours', '-{int((offset_hours % 1) * 60)} minutes')
|
||||
WHERE {column} IS NOT NULL
|
||||
""")
|
||||
elif offset_hours < 0:
|
||||
# Convert local to UTC (add offset absolute value)
|
||||
abs_hours = abs(int(offset_hours))
|
||||
abs_mins = int((abs(offset_hours) % 1) * 60)
|
||||
sql.execute(f"""
|
||||
UPDATE {table}
|
||||
SET {column} = DATETIME({column}, '+{abs_hours} hours', '+{abs_mins} minutes')
|
||||
WHERE {column} IS NOT NULL
|
||||
""")
|
||||
|
||||
row_count = sql.rowcount
|
||||
if row_count > 0:
|
||||
mylog("verbose", f"[db_upgrade] Migrated {row_count} timestamps in {table}.{column}")
|
||||
except Exception as e:
|
||||
mylog("warn", f"[db_upgrade] Error updating {table}.{column}: {e}")
|
||||
continue
|
||||
|
||||
except Exception as e:
|
||||
mylog("warn", f"[db_upgrade] Error processing table {table}: {e}")
|
||||
continue
|
||||
|
||||
mylog("none", "[db_upgrade] ✓ UTC timestamp migration completed successfully")
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
mylog("none", f"[db_upgrade] ERROR during timestamp migration: {e}")
|
||||
return False
|
||||
|
||||
@@ -22,6 +22,10 @@ CREATE TABLE Devices (
|
||||
devFirstConnection DATETIME NOT NULL,
|
||||
devLastConnection DATETIME NOT NULL,
|
||||
devLastIP STRING (50) NOT NULL COLLATE NOCASE,
|
||||
devPrimaryIPv4 TEXT,
|
||||
devPrimaryIPv6 TEXT,
|
||||
devVlan TEXT,
|
||||
devForceStatus TEXT,
|
||||
devStaticIP BOOLEAN DEFAULT (0) NOT NULL CHECK (devStaticIP IN (0, 1)),
|
||||
devScan INTEGER DEFAULT (1) NOT NULL,
|
||||
devLogEvents BOOLEAN NOT NULL DEFAULT (1) CHECK (devLogEvents IN (0, 1)),
|
||||
@@ -42,7 +46,17 @@ CREATE TABLE Devices (
|
||||
devSSID TEXT,
|
||||
devSyncHubNode TEXT,
|
||||
devSourcePlugin TEXT,
|
||||
devFQDN TEXT,
|
||||
devFQDN TEXT,
|
||||
devMacSource TEXT,
|
||||
devNameSource TEXT,
|
||||
devFQDNSource TEXT,
|
||||
devLastIPSource TEXT,
|
||||
devVendorSource TEXT,
|
||||
devSSIDSource TEXT,
|
||||
devParentMACSource TEXT,
|
||||
devParentPortSource TEXT,
|
||||
devParentRelTypeSource TEXT,
|
||||
devVlanSource TEXT,
|
||||
"devCustomProps" TEXT);
|
||||
CREATE TABLE IF NOT EXISTS "Settings" (
|
||||
"setKey" TEXT,
|
||||
@@ -56,8 +70,8 @@ CREATE TABLE IF NOT EXISTS "Settings" (
|
||||
"setOverriddenByEnv" INTEGER
|
||||
);
|
||||
CREATE TABLE IF NOT EXISTS "Parameters" (
|
||||
"par_ID" TEXT PRIMARY KEY,
|
||||
"par_Value" TEXT
|
||||
"parID" TEXT PRIMARY KEY,
|
||||
"parValue" TEXT
|
||||
);
|
||||
CREATE TABLE Plugins_Objects(
|
||||
"Index" INTEGER,
|
||||
@@ -145,6 +159,7 @@ CREATE TABLE CurrentScan (
|
||||
scanSyncHubNode STRING(50),
|
||||
scanSite STRING(250),
|
||||
scanSSID STRING(250),
|
||||
scanVlan STRING(250),
|
||||
scanParentMAC STRING(250),
|
||||
scanParentPort STRING(250),
|
||||
scanType STRING(250),
|
||||
@@ -203,6 +218,13 @@ CREATE INDEX IDX_dev_Favorite ON Devices (devFavorite);
|
||||
CREATE INDEX IDX_dev_LastIP ON Devices (devLastIP);
|
||||
CREATE INDEX IDX_dev_NewDevice ON Devices (devIsNew);
|
||||
CREATE INDEX IDX_dev_Archived ON Devices (devIsArchived);
|
||||
CREATE UNIQUE INDEX IF NOT EXISTS idx_events_unique
|
||||
ON Events (
|
||||
eve_MAC,
|
||||
eve_IP,
|
||||
eve_EventType,
|
||||
eve_DateTime
|
||||
);
|
||||
CREATE VIEW Events_Devices AS
|
||||
SELECT *
|
||||
FROM Events
|
||||
@@ -408,4 +430,4 @@ CREATE TRIGGER "trg_delete_devices"
|
||||
'DEVICES', -- ObjectForeignKey
|
||||
'delete'
|
||||
);
|
||||
END;
|
||||
END;
|
||||
|
||||
@@ -12,7 +12,7 @@ import uuid
|
||||
import conf
|
||||
from const import fullConfPath, fullConfFolder, default_tz
|
||||
from helper import getBuildTimeStampAndVersion, collect_lang_strings, updateSubnets, generate_random_string
|
||||
from utils.datetime_utils import timeNowDB
|
||||
from utils.datetime_utils import timeNowUTC
|
||||
from app_state import updateState
|
||||
from logger import mylog
|
||||
from api import update_api
|
||||
@@ -401,7 +401,7 @@ def importConfigs(pm, db, all_plugins):
|
||||
c_d,
|
||||
"Language Interface",
|
||||
'{"dataType":"string", "elements": [{"elementType" : "select", "elementOptions" : [] ,"transformers": []}]}',
|
||||
"['English (en_us)', 'Arabic (ar_ar)', 'Catalan (ca_ca)', 'Czech (cs_cz)', 'German (de_de)', 'Spanish (es_es)', 'Farsi (fa_fa)', 'French (fr_fr)', 'Italian (it_it)', 'Japanese (ja_jp)', 'Norwegian (nb_no)', 'Polish (pl_pl)', 'Portuguese (pt_br)', 'Portuguese (pt_pt)', 'Russian (ru_ru)', 'Swedish (sv_sv)', 'Turkish (tr_tr)', 'Ukrainian (uk_ua)', 'Chinese (zh_cn)']", # noqa: E501 - inline JSON
|
||||
"['English (en_us)', 'Arabic (ar_ar)', 'Catalan (ca_ca)', 'Czech (cs_cz)', 'German (de_de)', 'Spanish (es_es)', 'Farsi (fa_fa)', 'French (fr_fr)', 'Italian (it_it)', 'Japanese (ja_jp)', 'Norwegian (nb_no)', 'Polish (pl_pl)', 'Portuguese (pt_br)', 'Portuguese (pt_pt)', 'Russian (ru_ru)', 'Swedish (sv_sv)', 'Turkish (tr_tr)', 'Ukrainian (uk_ua)', 'Vietnamese (vi_vn)', 'Chinese (zh_cn)']", # noqa: E501 - inline JSON
|
||||
"UI",
|
||||
)
|
||||
|
||||
@@ -419,7 +419,7 @@ def importConfigs(pm, db, all_plugins):
|
||||
|
||||
# TODO cleanup later ----------------------------------------------------------------------------------
|
||||
# init all time values as we have timezone - all this shoudl be moved into plugin/plugin settings
|
||||
conf.time_started = datetime.datetime.now(conf.tz)
|
||||
conf.time_started = timeNowUTC(as_string=False)
|
||||
conf.plugins_once_run = False
|
||||
|
||||
# timestamps of last execution times
|
||||
@@ -645,7 +645,7 @@ def importConfigs(pm, db, all_plugins):
|
||||
|
||||
if run_val == "schedule":
|
||||
newSchedule = Cron(run_sch).schedule(
|
||||
start_date=datetime.datetime.now(conf.tz)
|
||||
start_date=timeNowUTC(as_string=False)
|
||||
)
|
||||
conf.mySchedules.append(
|
||||
schedule_class(
|
||||
@@ -682,7 +682,7 @@ def importConfigs(pm, db, all_plugins):
|
||||
Check out new features and what has changed in the \
|
||||
<a href="https://github.com/jokob-sk/NetAlertX/releases" target="_blank">📓 release notes</a>.""",
|
||||
'interrupt',
|
||||
timeNowDB()
|
||||
timeNowUTC()
|
||||
)
|
||||
|
||||
# -----------------
|
||||
@@ -721,7 +721,7 @@ def importConfigs(pm, db, all_plugins):
|
||||
mylog('minimal', msg)
|
||||
|
||||
# front end app log loggging
|
||||
write_notification(msg, 'info', timeNowDB())
|
||||
write_notification(msg, 'info', timeNowUTC())
|
||||
|
||||
return pm, all_plugins, True
|
||||
|
||||
@@ -770,7 +770,7 @@ def renameSettings(config_file):
|
||||
# If the file contains old settings, proceed with renaming and backup
|
||||
if contains_old_settings:
|
||||
# Create a backup file with the suffix "_old_setting_names" and timestamp
|
||||
timestamp = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
|
||||
timestamp = timeNowUTC(as_string=False).strftime("%Y%m%d%H%M%S")
|
||||
backup_file = f"{config_file}_old_setting_names_{timestamp}.bak"
|
||||
|
||||
mylog("debug", f"[Config] Old setting names will be replaced and a backup ({backup_file}) of the config created.",)
|
||||
|
||||
@@ -124,12 +124,12 @@ def start_log_writer_thread():
|
||||
|
||||
# -------------------------------------------------------------------------------
|
||||
def file_print(*args):
|
||||
result = timeNowTZ().strftime("%H:%M:%S") + " "
|
||||
result = timeNowTZ(as_string=False).strftime("%H:%M:%S") + " "
|
||||
for arg in args:
|
||||
if isinstance(arg, list):
|
||||
arg = " ".join(
|
||||
str(a) for a in arg
|
||||
) # so taht new lines are handled correctly also when passing a list
|
||||
) # so that new lines are handled correctly also when passing a list
|
||||
result += str(arg)
|
||||
|
||||
logging.log(custom_to_logging_levels.get(currentLevel, logging.NOTSET), result)
|
||||
|
||||
@@ -13,7 +13,7 @@ sys.path.extend([f"{INSTALL_PATH}/server"])
|
||||
|
||||
from const import apiPath # noqa: E402 [flake8 lint suppression]
|
||||
from logger import mylog # noqa: E402 [flake8 lint suppression]
|
||||
from utils.datetime_utils import timeNowDB # noqa: E402 [flake8 lint suppression]
|
||||
from utils.datetime_utils import timeNowUTC # noqa: E402 [flake8 lint suppression]
|
||||
from api_server.sse_broadcast import broadcast_unread_notifications_count # noqa: E402 [flake8 lint suppression]
|
||||
|
||||
|
||||
@@ -64,7 +64,7 @@ def write_notification(content, level="alert", timestamp=None):
|
||||
None
|
||||
"""
|
||||
if timestamp is None:
|
||||
timestamp = timeNowDB()
|
||||
timestamp = timeNowUTC()
|
||||
|
||||
notification = {
|
||||
"timestamp": str(timestamp),
|
||||
|
||||
@@ -10,9 +10,10 @@
|
||||
# cvc90 2023 https://github.com/cvc90 GNU GPLv3 #
|
||||
# ---------------------------------------------------------------------------------#
|
||||
|
||||
import json
|
||||
import os
|
||||
import json
|
||||
import sys
|
||||
from zoneinfo import ZoneInfo
|
||||
|
||||
# Register NetAlertX directories
|
||||
INSTALL_PATH = os.getenv("NETALERTX_APP", "/app")
|
||||
@@ -23,231 +24,295 @@ from helper import ( # noqa: E402 [flake8 lint suppression]
|
||||
)
|
||||
from logger import mylog # noqa: E402 [flake8 lint suppression]
|
||||
from db.sql_safe_builder import create_safe_condition_builder # noqa: E402 [flake8 lint suppression]
|
||||
from utils.datetime_utils import get_timezone_offset # noqa: E402 [flake8 lint suppression]
|
||||
from utils.datetime_utils import format_date_iso # noqa: E402 [flake8 lint suppression]
|
||||
import conf # noqa: E402 [flake8 lint suppression]
|
||||
|
||||
# ===============================================================================
|
||||
# Timezone conversion
|
||||
# ===============================================================================
|
||||
|
||||
DATETIME_FIELDS = {
|
||||
"new_devices": ["Datetime"],
|
||||
"down_devices": ["eve_DateTime"],
|
||||
"down_reconnected": ["eve_DateTime"],
|
||||
"events": ["Datetime"],
|
||||
"plugins": ["DateTimeChanged"],
|
||||
}
|
||||
|
||||
|
||||
def get_datetime_fields_from_columns(column_names):
|
||||
return [
|
||||
col for col in column_names
|
||||
if "date" in col.lower() or "time" in col.lower()
|
||||
]
|
||||
|
||||
|
||||
def apply_timezone_to_json(json_obj, section=None):
|
||||
data = json_obj.json["data"]
|
||||
columns = json_obj.columnNames
|
||||
|
||||
fields = DATETIME_FIELDS.get(section) or get_datetime_fields_from_columns(columns)
|
||||
|
||||
return apply_timezone(data, fields)
|
||||
|
||||
|
||||
def apply_timezone(data, fields):
|
||||
"""
|
||||
Convert UTC datetime fields in a list of dicts to the configured timezone.
|
||||
|
||||
Args:
|
||||
data (list[dict]): Rows returned from DB
|
||||
fields (list[str]): Field names to convert
|
||||
|
||||
Returns:
|
||||
list[dict]: Modified data with timezone-aware ISO strings
|
||||
"""
|
||||
if not data or not fields:
|
||||
return data
|
||||
|
||||
# Determine local timezone
|
||||
tz = conf.tz
|
||||
if isinstance(tz, str):
|
||||
tz = ZoneInfo(tz)
|
||||
|
||||
for row in data:
|
||||
if not isinstance(row, dict):
|
||||
continue
|
||||
|
||||
for field in fields:
|
||||
value = row.get(field)
|
||||
if not value:
|
||||
continue
|
||||
|
||||
try:
|
||||
# Convert DB UTC string → local timezone ISO
|
||||
# format_date_iso already assumes UTC if naive
|
||||
row[field] = format_date_iso(value)
|
||||
except Exception:
|
||||
# Never crash, leave original value if conversion fails
|
||||
continue
|
||||
|
||||
return data
|
||||
|
||||
|
||||
# ===============================================================================
|
||||
# REPORTING
|
||||
# ===============================================================================
|
||||
|
||||
|
||||
# -------------------------------------------------------------------------------
|
||||
def get_notifications(db):
|
||||
sql = db.sql # TO-DO
|
||||
"""
|
||||
Fetch notifications for all configured sections.
|
||||
|
||||
# Reporting section
|
||||
mylog("verbose", ["[Notification] Check if something to report"])
|
||||
Args:
|
||||
db: Database object with `.sql` for executing queries.
|
||||
|
||||
# prepare variables for JSON construction
|
||||
json_new_devices = []
|
||||
json_new_devices_meta = {}
|
||||
json_down_devices = []
|
||||
json_down_devices_meta = {}
|
||||
json_down_reconnected = []
|
||||
json_down_reconnected_meta = {}
|
||||
json_events = []
|
||||
json_events_meta = {}
|
||||
json_plugins = []
|
||||
json_plugins_meta = {}
|
||||
Returns:
|
||||
dict: JSON-ready dict with data and metadata for each section.
|
||||
"""
|
||||
sql = db.sql
|
||||
|
||||
# Disable reporting on events for devices where reporting is disabled based on the MAC address
|
||||
mylog("verbose", "[Notification] Check if something to report")
|
||||
|
||||
# Disable notifications (except down/down reconnected) on devices where devAlertEvents is disabled
|
||||
sql.execute("""UPDATE Events SET eve_PendingAlertEmail = 0
|
||||
WHERE eve_PendingAlertEmail = 1 AND eve_EventType not in ('Device Down', 'Down Reconnected', 'New Device' ) AND eve_MAC IN
|
||||
(
|
||||
SELECT devMac FROM Devices WHERE devAlertEvents = 0
|
||||
)""")
|
||||
|
||||
# Disable down/down reconnected notifications on devices where devAlertDown is disabled
|
||||
sql.execute("""UPDATE Events SET eve_PendingAlertEmail = 0
|
||||
WHERE eve_PendingAlertEmail = 1 AND eve_EventType in ('Device Down', 'Down Reconnected') AND eve_MAC IN
|
||||
(
|
||||
SELECT devMac FROM Devices WHERE devAlertDown = 0
|
||||
)""")
|
||||
|
||||
sections = get_setting_value("NTFPRCS_INCLUDED_SECTIONS")
|
||||
# Disable events where reporting is disabled
|
||||
sql.execute("""
|
||||
UPDATE Events SET eve_PendingAlertEmail = 0
|
||||
WHERE eve_PendingAlertEmail = 1
|
||||
AND eve_EventType NOT IN ('Device Down', 'Down Reconnected', 'New Device')
|
||||
AND eve_MAC IN (SELECT devMac FROM Devices WHERE devAlertEvents = 0)
|
||||
""")
|
||||
sql.execute("""
|
||||
UPDATE Events SET eve_PendingAlertEmail = 0
|
||||
WHERE eve_PendingAlertEmail = 1
|
||||
AND eve_EventType IN ('Device Down', 'Down Reconnected')
|
||||
AND eve_MAC IN (SELECT devMac FROM Devices WHERE devAlertDown = 0)
|
||||
""")
|
||||
|
||||
sections = get_setting_value("NTFPRCS_INCLUDED_SECTIONS") or []
|
||||
mylog("verbose", ["[Notification] Included sections: ", sections])
|
||||
|
||||
if "new_devices" in sections:
|
||||
# Compose New Devices Section (no empty lines in SQL queries!)
|
||||
# Use SafeConditionBuilder to prevent SQL injection vulnerabilities
|
||||
condition_builder = create_safe_condition_builder()
|
||||
new_dev_condition_setting = get_setting_value("NTFPRCS_new_dev_condition")
|
||||
# -------------------------
|
||||
# Helper: condition mapping
|
||||
# -------------------------
|
||||
def get_section_condition(section):
|
||||
"""
|
||||
Resolve condition setting key with backward compatibility.
|
||||
"""
|
||||
# New format
|
||||
key = f"NTFPRCS_{section}_condition"
|
||||
value = get_setting_value(key)
|
||||
|
||||
try:
|
||||
safe_condition, parameters = condition_builder.get_safe_condition_legacy(
|
||||
new_dev_condition_setting
|
||||
)
|
||||
sqlQuery = """SELECT
|
||||
eve_MAC as MAC,
|
||||
eve_DateTime as Datetime,
|
||||
devLastIP as IP,
|
||||
eve_EventType as "Event Type",
|
||||
devName as "Device name",
|
||||
devComments as Comments FROM Events_Devices
|
||||
WHERE eve_PendingAlertEmail = 1
|
||||
AND eve_EventType = 'New Device' {}
|
||||
ORDER BY eve_DateTime""".format(safe_condition)
|
||||
except (ValueError, KeyError, TypeError) as e:
|
||||
mylog("verbose", ["[Notification] Error building safe condition for new devices: ", e])
|
||||
# Fall back to safe default (no additional conditions)
|
||||
sqlQuery = """SELECT
|
||||
eve_MAC as MAC,
|
||||
eve_DateTime as Datetime,
|
||||
devLastIP as IP,
|
||||
eve_EventType as "Event Type",
|
||||
devName as "Device name",
|
||||
devComments as Comments FROM Events_Devices
|
||||
WHERE eve_PendingAlertEmail = 1
|
||||
AND eve_EventType = 'New Device'
|
||||
ORDER BY eve_DateTime"""
|
||||
parameters = {}
|
||||
if value:
|
||||
return value
|
||||
|
||||
mylog("debug", ["[Notification] new_devices SQL query: ", sqlQuery])
|
||||
mylog("debug", ["[Notification] new_devices parameters: ", parameters])
|
||||
|
||||
# Get the events as JSON using parameterized query
|
||||
json_obj = db.get_table_as_json(sqlQuery, parameters)
|
||||
|
||||
json_new_devices_meta = {
|
||||
"title": "🆕 New devices",
|
||||
"columnNames": json_obj.columnNames,
|
||||
# Legacy keys
|
||||
legacy_map = {
|
||||
"new_devices": "NTFPRCS_new_dev_condition",
|
||||
"events": "NTFPRCS_event_condition",
|
||||
}
|
||||
|
||||
json_new_devices = json_obj.json["data"]
|
||||
legacy_key = legacy_map.get(section)
|
||||
if legacy_key:
|
||||
return get_setting_value(legacy_key)
|
||||
|
||||
if "down_devices" in sections:
|
||||
# Compose Devices Down Section
|
||||
# - select only Down Alerts with pending email of devices that didn't reconnect within the specified time window
|
||||
minutes = int(get_setting_value("NTFPRCS_alert_down_time") or 0)
|
||||
tz_offset = get_timezone_offset()
|
||||
sqlQuery = f"""
|
||||
SELECT devName, eve_MAC, devVendor, eve_IP, eve_DateTime, eve_EventType
|
||||
FROM Events_Devices AS down_events
|
||||
WHERE eve_PendingAlertEmail = 1
|
||||
AND down_events.eve_EventType = 'Device Down'
|
||||
AND eve_DateTime < datetime('now', '-{minutes} minutes', '{tz_offset}')
|
||||
AND NOT EXISTS (
|
||||
SELECT 1
|
||||
FROM Events AS connected_events
|
||||
WHERE connected_events.eve_MAC = down_events.eve_MAC
|
||||
AND connected_events.eve_EventType = 'Connected'
|
||||
AND connected_events.eve_DateTime > down_events.eve_DateTime
|
||||
)
|
||||
ORDER BY down_events.eve_DateTime;
|
||||
"""
|
||||
return ""
|
||||
|
||||
# Get the events as JSON
|
||||
json_obj = db.get_table_as_json(sqlQuery)
|
||||
|
||||
json_down_devices_meta = {
|
||||
"title": "🔴 Down devices",
|
||||
"columnNames": json_obj.columnNames,
|
||||
}
|
||||
json_down_devices = json_obj.json["data"]
|
||||
|
||||
mylog("debug", f"[Notification] json_down_devices: {json.dumps(json_down_devices)}")
|
||||
|
||||
if "down_reconnected" in sections:
|
||||
# Compose Reconnected Down Section
|
||||
# - select only Devices, that were previously down and now are Connected
|
||||
sqlQuery = """
|
||||
SELECT devName, eve_MAC, devVendor, eve_IP, eve_DateTime, eve_EventType
|
||||
FROM Events_Devices AS reconnected_devices
|
||||
WHERE reconnected_devices.eve_EventType = 'Down Reconnected'
|
||||
AND reconnected_devices.eve_PendingAlertEmail = 1
|
||||
ORDER BY reconnected_devices.eve_DateTime;
|
||||
"""
|
||||
|
||||
# Get the events as JSON
|
||||
json_obj = db.get_table_as_json(sqlQuery)
|
||||
|
||||
json_down_reconnected_meta = {
|
||||
"title": "🔁 Reconnected down devices",
|
||||
"columnNames": json_obj.columnNames,
|
||||
}
|
||||
json_down_reconnected = json_obj.json["data"]
|
||||
|
||||
mylog("debug", f"[Notification] json_down_reconnected: {json.dumps(json_down_reconnected)}")
|
||||
|
||||
if "events" in sections:
|
||||
# Compose Events Section (no empty lines in SQL queries!)
|
||||
# Use SafeConditionBuilder to prevent SQL injection vulnerabilities
|
||||
condition_builder = create_safe_condition_builder()
|
||||
event_condition_setting = get_setting_value("NTFPRCS_event_condition")
|
||||
|
||||
try:
|
||||
safe_condition, parameters = condition_builder.get_safe_condition_legacy(
|
||||
event_condition_setting
|
||||
)
|
||||
sqlQuery = """SELECT
|
||||
eve_MAC as MAC,
|
||||
eve_DateTime as Datetime,
|
||||
devLastIP as IP,
|
||||
eve_EventType as "Event Type",
|
||||
devName as "Device name",
|
||||
devComments as Comments FROM Events_Devices
|
||||
WHERE eve_PendingAlertEmail = 1
|
||||
AND eve_EventType IN ('Connected', 'Down Reconnected', 'Disconnected','IP Changed') {}
|
||||
ORDER BY eve_DateTime""".format(safe_condition)
|
||||
except Exception as e:
|
||||
mylog("verbose", f"[Notification] Error building safe condition for events: {e}")
|
||||
# Fall back to safe default (no additional conditions)
|
||||
sqlQuery = """SELECT
|
||||
eve_MAC as MAC,
|
||||
eve_DateTime as Datetime,
|
||||
devLastIP as IP,
|
||||
eve_EventType as "Event Type",
|
||||
devName as "Device name",
|
||||
devComments as Comments FROM Events_Devices
|
||||
WHERE eve_PendingAlertEmail = 1
|
||||
AND eve_EventType IN ('Connected', 'Down Reconnected', 'Disconnected','IP Changed')
|
||||
ORDER BY eve_DateTime"""
|
||||
parameters = {}
|
||||
|
||||
mylog("debug", ["[Notification] events SQL query: ", sqlQuery])
|
||||
mylog("debug", ["[Notification] events parameters: ", parameters])
|
||||
|
||||
# Get the events as JSON using parameterized query
|
||||
json_obj = db.get_table_as_json(sqlQuery, parameters)
|
||||
|
||||
json_events_meta = {"title": "⚡ Events", "columnNames": json_obj.columnNames}
|
||||
json_events = json_obj.json["data"]
|
||||
|
||||
if "plugins" in sections:
|
||||
# Compose Plugins Section
|
||||
sqlQuery = """SELECT
|
||||
Plugin,
|
||||
Object_PrimaryId,
|
||||
Object_SecondaryId,
|
||||
DateTimeChanged,
|
||||
Watched_Value1,
|
||||
Watched_Value2,
|
||||
Watched_Value3,
|
||||
Watched_Value4,
|
||||
Status
|
||||
from Plugins_Events"""
|
||||
|
||||
# Get the events as JSON
|
||||
json_obj = db.get_table_as_json(sqlQuery)
|
||||
|
||||
json_plugins_meta = {"title": "🔌 Plugins", "columnNames": json_obj.columnNames}
|
||||
json_plugins = json_obj.json["data"]
|
||||
|
||||
final_json = {
|
||||
"new_devices": json_new_devices,
|
||||
"new_devices_meta": json_new_devices_meta,
|
||||
"down_devices": json_down_devices,
|
||||
"down_devices_meta": json_down_devices_meta,
|
||||
"down_reconnected": json_down_reconnected,
|
||||
"down_reconnected_meta": json_down_reconnected_meta,
|
||||
"events": json_events,
|
||||
"events_meta": json_events_meta,
|
||||
"plugins": json_plugins,
|
||||
"plugins_meta": json_plugins_meta,
|
||||
# -------------------------
|
||||
# SQL templates
|
||||
# -------------------------
|
||||
sql_templates = {
|
||||
"new_devices": """
|
||||
SELECT
|
||||
eve_MAC as MAC,
|
||||
eve_DateTime as Datetime,
|
||||
devLastIP as IP,
|
||||
eve_EventType as "Event Type",
|
||||
devName as "Device name",
|
||||
devComments as Comments
|
||||
FROM Events_Devices
|
||||
WHERE eve_PendingAlertEmail = 1
|
||||
AND eve_EventType = 'New Device' {condition}
|
||||
ORDER BY eve_DateTime
|
||||
""",
|
||||
"down_devices": """
|
||||
SELECT
|
||||
devName,
|
||||
eve_MAC,
|
||||
devVendor,
|
||||
eve_IP,
|
||||
eve_DateTime,
|
||||
eve_EventType
|
||||
FROM Events_Devices AS down_events
|
||||
WHERE eve_PendingAlertEmail = 1
|
||||
AND down_events.eve_EventType = 'Device Down'
|
||||
AND eve_DateTime < datetime('now', '-0 minutes')
|
||||
AND NOT EXISTS (
|
||||
SELECT 1
|
||||
FROM Events AS connected_events
|
||||
WHERE connected_events.eve_MAC = down_events.eve_MAC
|
||||
AND connected_events.eve_EventType = 'Connected'
|
||||
AND connected_events.eve_DateTime > down_events.eve_DateTime
|
||||
)
|
||||
ORDER BY down_events.eve_DateTime
|
||||
""",
|
||||
"down_reconnected": """
|
||||
SELECT
|
||||
devName,
|
||||
eve_MAC,
|
||||
devVendor,
|
||||
eve_IP,
|
||||
eve_DateTime,
|
||||
eve_EventType
|
||||
FROM Events_Devices AS reconnected_devices
|
||||
WHERE reconnected_devices.eve_EventType = 'Down Reconnected'
|
||||
AND reconnected_devices.eve_PendingAlertEmail = 1
|
||||
ORDER BY reconnected_devices.eve_DateTime
|
||||
""",
|
||||
"events": """
|
||||
SELECT
|
||||
eve_MAC as MAC,
|
||||
eve_DateTime as Datetime,
|
||||
devLastIP as IP,
|
||||
eve_EventType as "Event Type",
|
||||
devName as "Device name",
|
||||
devComments as Comments
|
||||
FROM Events_Devices
|
||||
WHERE eve_PendingAlertEmail = 1
|
||||
AND eve_EventType IN ('Connected', 'Down Reconnected', 'Disconnected','IP Changed') {condition}
|
||||
ORDER BY eve_DateTime
|
||||
""",
|
||||
"plugins": """
|
||||
SELECT
|
||||
Plugin,
|
||||
Object_PrimaryId,
|
||||
Object_SecondaryId,
|
||||
DateTimeChanged,
|
||||
Watched_Value1,
|
||||
Watched_Value2,
|
||||
Watched_Value3,
|
||||
Watched_Value4,
|
||||
Status
|
||||
FROM Plugins_Events
|
||||
"""
|
||||
}
|
||||
|
||||
# Titles for metadata
|
||||
section_titles = {
|
||||
"new_devices": "🆕 New devices",
|
||||
"down_devices": "🔴 Down devices",
|
||||
"down_reconnected": "🔁 Reconnected down devices",
|
||||
"events": "⚡ Events",
|
||||
"plugins": "🔌 Plugins"
|
||||
}
|
||||
|
||||
# Sections that support dynamic conditions
|
||||
sections_with_conditions = {"new_devices", "events"}
|
||||
|
||||
# Initialize final structure
|
||||
final_json = {}
|
||||
for section in ["new_devices", "down_devices", "down_reconnected", "events", "plugins"]:
|
||||
final_json[section] = []
|
||||
final_json[f"{section}_meta"] = {
|
||||
"title": section_titles.get(section, section),
|
||||
"columnNames": []
|
||||
}
|
||||
|
||||
condition_builder = create_safe_condition_builder()
|
||||
|
||||
# -------------------------
|
||||
# Main loop
|
||||
# -------------------------
|
||||
condition_builder = create_safe_condition_builder()
|
||||
|
||||
SECTION_CONDITION_MAP = {
|
||||
"new_devices": "NTFPRCS_new_dev_condition",
|
||||
"events": "NTFPRCS_event_condition",
|
||||
}
|
||||
|
||||
sections_with_conditions = set(SECTION_CONDITION_MAP.keys())
|
||||
|
||||
for section in sections:
|
||||
template = sql_templates.get(section)
|
||||
|
||||
if not template:
|
||||
mylog("verbose", ["[Notification] Unknown section: ", section])
|
||||
continue
|
||||
|
||||
safe_condition = ""
|
||||
parameters = {}
|
||||
|
||||
try:
|
||||
if section in sections_with_conditions:
|
||||
condition_key = SECTION_CONDITION_MAP.get(section)
|
||||
condition_setting = get_setting_value(condition_key)
|
||||
|
||||
if condition_setting:
|
||||
safe_condition, parameters = condition_builder.get_safe_condition_legacy(
|
||||
condition_setting
|
||||
)
|
||||
|
||||
sqlQuery = template.format(condition=safe_condition)
|
||||
|
||||
except Exception as e:
|
||||
mylog("verbose", [f"[Notification] Error building condition for {section}: ", e])
|
||||
sqlQuery = template.format(condition="")
|
||||
parameters = {}
|
||||
|
||||
mylog("debug", [f"[Notification] {section} SQL query: ", sqlQuery])
|
||||
mylog("debug", [f"[Notification] {section} parameters: ", parameters])
|
||||
|
||||
try:
|
||||
json_obj = db.get_table_as_json(sqlQuery, parameters)
|
||||
except Exception as e:
|
||||
mylog("minimal", [f"[Notification] DB error in section {section}: ", e])
|
||||
continue
|
||||
|
||||
final_json[section] = json_obj.json.get("data", [])
|
||||
final_json[f"{section}_meta"] = {
|
||||
"title": section_titles.get(section, section),
|
||||
"columnNames": getattr(json_obj, "columnNames", [])
|
||||
}
|
||||
|
||||
mylog("debug", [f"[Notification] final_json: {json.dumps(final_json)}"])
|
||||
|
||||
return final_json
|
||||
|
||||
|
||||
|
||||
@@ -8,7 +8,7 @@ from front.plugins.plugin_helper import is_mac, normalize_mac
|
||||
from logger import mylog
|
||||
from models.plugin_object_instance import PluginObjectInstance
|
||||
from database import get_temp_db_connection
|
||||
from db.db_helper import get_table_json, get_device_condition_by_status, row_to_json, get_date_from_period
|
||||
from db.db_helper import get_table_json, get_device_conditions, get_device_condition_by_status, row_to_json, get_date_from_period
|
||||
from db.authoritative_handler import (
|
||||
enforce_source_on_user_update,
|
||||
get_locked_field_overrides,
|
||||
@@ -18,7 +18,7 @@ from db.authoritative_handler import (
|
||||
unlock_fields
|
||||
)
|
||||
from helper import is_random_mac, get_setting_value
|
||||
from utils.datetime_utils import timeNowDB
|
||||
from utils.datetime_utils import timeNowUTC
|
||||
|
||||
|
||||
class DeviceInstance:
|
||||
@@ -331,22 +331,45 @@ class DeviceInstance:
|
||||
conn = get_temp_db_connection()
|
||||
sql = conn.cursor()
|
||||
|
||||
# Build a combined query with sub-selects for each status
|
||||
query = f"""
|
||||
SELECT
|
||||
(SELECT COUNT(*) FROM Devices {get_device_condition_by_status("my")}) AS devices,
|
||||
(SELECT COUNT(*) FROM Devices {get_device_condition_by_status("connected")}) AS connected,
|
||||
(SELECT COUNT(*) FROM Devices {get_device_condition_by_status("favorites")}) AS favorites,
|
||||
(SELECT COUNT(*) FROM Devices {get_device_condition_by_status("new")}) AS new,
|
||||
(SELECT COUNT(*) FROM Devices {get_device_condition_by_status("down")}) AS down,
|
||||
(SELECT COUNT(*) FROM Devices {get_device_condition_by_status("archived")}) AS archived
|
||||
"""
|
||||
conditions = get_device_conditions()
|
||||
|
||||
# Build sub-selects dynamically for all dictionary entries
|
||||
sub_queries = []
|
||||
for key, condition in conditions.items():
|
||||
# Make sure the alias is SQL-safe (no spaces or special chars)
|
||||
alias = key.replace(" ", "_").lower()
|
||||
sub_queries.append(f'(SELECT COUNT(*) FROM Devices {condition}) AS "{alias}"')
|
||||
|
||||
# Join all sub-selects with commas
|
||||
query = "SELECT\n " + ",\n ".join(sub_queries)
|
||||
sql.execute(query)
|
||||
row = sql.fetchone()
|
||||
conn.close()
|
||||
|
||||
return list(row) if row else []
|
||||
|
||||
def getNamedTotals(self):
|
||||
"""Get device totals by status."""
|
||||
conn = get_temp_db_connection()
|
||||
sql = conn.cursor()
|
||||
|
||||
conditions = get_device_conditions()
|
||||
|
||||
# Build sub-selects dynamically for all dictionary entries
|
||||
sub_queries = []
|
||||
for key, condition in conditions.items():
|
||||
# Make sure the alias is SQL-safe (no spaces or special chars)
|
||||
alias = key.replace(" ", "_").lower()
|
||||
sub_queries.append(f'(SELECT COUNT(*) FROM Devices {condition}) AS "{alias}"')
|
||||
|
||||
# Join all sub-selects with commas
|
||||
query = "SELECT\n " + ",\n ".join(sub_queries)
|
||||
|
||||
mylog('none', [f'[getNamedTotals] query {query}'])
|
||||
json_obj = get_table_json(sql, query, parameters=None)
|
||||
|
||||
return json_obj
|
||||
|
||||
def getByStatus(self, status=None):
|
||||
"""
|
||||
Return devices filtered by status. Returns all if no status provided.
|
||||
@@ -384,7 +407,7 @@ class DeviceInstance:
|
||||
|
||||
def getDeviceData(self, mac, period=""):
|
||||
"""Fetch device info with children, event stats, and presence calculation."""
|
||||
now = timeNowDB()
|
||||
now = timeNowUTC()
|
||||
|
||||
# Special case for new device
|
||||
if mac.lower() == "new":
|
||||
@@ -513,6 +536,12 @@ class DeviceInstance:
|
||||
normalized_mac = normalize_mac(mac)
|
||||
normalized_parent_mac = normalize_mac(data.get("devParentMAC") or "")
|
||||
|
||||
if normalized_mac == normalized_parent_mac:
|
||||
return {
|
||||
"success": False,
|
||||
"error": "Can't set current node as the node parent."
|
||||
}
|
||||
|
||||
fields_updated_by_set_device_data = {
|
||||
"devName",
|
||||
"devOwner",
|
||||
@@ -616,8 +645,8 @@ class DeviceInstance:
|
||||
data.get("devSkipRepeated") or 0,
|
||||
data.get("devIsNew") or 0,
|
||||
data.get("devIsArchived") or 0,
|
||||
data.get("devLastConnection") or timeNowDB(),
|
||||
data.get("devFirstConnection") or timeNowDB(),
|
||||
data.get("devLastConnection") or timeNowUTC(),
|
||||
data.get("devFirstConnection") or timeNowUTC(),
|
||||
data.get("devLastIP") or "",
|
||||
data.get("devGUID") or "",
|
||||
data.get("devCustomProps") or "",
|
||||
|
||||