From e9e5167addd7d32abe5c5436ca6e437cd34ad141 Mon Sep 17 00:00:00 2001 From: <> Date: Thu, 4 Dec 2025 05:11:57 +0000 Subject: [PATCH] Deployed da9d37c with MkDocs version: 1.6.1 --- .nojekyll | 0 404.html | 3899 ++++++++++ API/index.html | 4210 +++++++++++ API_DBQUERY/index.html | 4632 ++++++++++++ API_DEVICE/index.html | 4410 +++++++++++ API_DEVICES/index.html | 4477 +++++++++++ API_EVENTS/index.html | 4368 +++++++++++ API_GRAPHQL/index.html | 4777 ++++++++++++ API_LOGS/index.html | 4184 ++++++++++ API_MESSAGING_IN_APP/index.html | 4513 +++++++++++ API_METRICS/index.html | 4352 +++++++++++ API_NETTOOLS/index.html | 4446 +++++++++++ API_OLD/index.html | 4979 ++++++++++++ API_ONLINEHISTORY/index.html | 4135 ++++++++++ API_SESSIONS/index.html | 4572 +++++++++++ API_SETTINGS/index.html | 4241 +++++++++++ API_SYNC/index.html | 4351 +++++++++++ API_TESTS/index.html | 4088 ++++++++++ AUTHELIA/index.html | 4349 +++++++++++ BACKUPS/index.html | 4608 +++++++++++ BUILDS/index.html | 4412 +++++++++++ COMMON_ISSUES/index.html | 4968 ++++++++++++ COMMUNITY_GUIDES/index.html | 4024 ++++++++++ CUSTOM_PROPERTIES/index.html | 4332 +++++++++++ DATABASE/index.html | 4357 +++++++++++ DEBUG_API_SERVER/index.html | 4276 +++++++++++ DEBUG_INVALID_JSON/index.html | 4136 ++++++++++ DEBUG_PHP/index.html | 4144 ++++++++++ DEBUG_PLUGINS/index.html | 4258 +++++++++++ DEBUG_TIPS/index.html | 4251 +++++++++++ DEVICES_BULK_EDITING/index.html | 4163 ++++++++++ DEVICE_DISPLAY_SETTINGS/index.html | 4088 ++++++++++ DEVICE_HEURISTICS/index.html | 4457 +++++++++++ DEVICE_MANAGEMENT/index.html | 4156 ++++++++++ DEV_DEVCONTAINER/index.html | 4279 +++++++++++ DEV_ENV_SETUP/index.html | 4537 +++++++++++ DEV_PORTS_HOST_MODE/index.html | 4061 ++++++++++ DOCKER_COMPOSE/index.html | 4437 +++++++++++ DOCKER_INSTALLATION/index.html | 4533 +++++++++++ DOCKER_MAINTENANCE/index.html | 4622 ++++++++++++ DOCKER_PORTAINER/index.html | 4287 +++++++++++ DOCKER_SWARM/index.html | 4200 +++++++++++ FILE_PERMISSIONS/index.html | 4251 +++++++++++ FIX_OFFLINE_DETECTION/index.html | 4328 +++++++++++ FRONTEND_DEVELOPMENT/index.html | 4138 ++++++++++ HELPER_SCRIPTS/index.html | 4126 ++++++++++ HOME_ASSISTANT/index.html | 4258 +++++++++++ HW_INSTALL/index.html | 4358 +++++++++++ ICONS/index.html | 4190 ++++++++++ INITIAL_SETUP/index.html | 4335 +++++++++++ INSTALLATION/index.html | 4117 ++++++++++ LOGGING/index.html | 4190 ++++++++++ MIGRATION/index.html | 4861 ++++++++++++ NAME_RESOLUTION/index.html | 4200 +++++++++++ NETWORK_TREE/index.html | 4315 +++++++++++ NOTIFICATIONS/index.html | 4197 +++++++++++ PERFORMANCE/index.html | 4358 +++++++++++ PIHOLE_GUIDE/index.html | 4385 +++++++++++ PLUGINS/index.html | 4727 ++++++++++++ PLUGINS_DEV/index.html | 5248 +++++++++++++ PLUGINS_DEV_CONFIG/index.html | 4763 ++++++++++++ RANDOM_MAC/index.html | 4146 ++++++++++ REMOTE_NETWORKS/index.html | 4167 ++++++++++ REVERSE_DNS/index.html | 4260 +++++++++++ REVERSE_PROXY/index.html | 4821 ++++++++++++ SECURITY/index.html | 4361 +++++++++++ SECURITY_FEATURES/index.html | 4301 +++++++++++ SESSION_INFO/index.html | 4284 +++++++++++ SETTINGS_SYSTEM/index.html | 4305 +++++++++++ SMTP/index.html | 4183 ++++++++++ SUBNETS/index.html | 4333 +++++++++++ SYNOLOGY_GUIDE/index.html | 4287 +++++++++++ UPDATES/index.html | 4548 +++++++++++ VERSIONS/index.html | 4165 ++++++++++ WEBHOOK_N8N/index.html | 4157 ++++++++++ WEBHOOK_SECRET/index.html | 4193 +++++++++++ WEB_UI_PORT_DEBUG/index.html | 4324 +++++++++++ WORKFLOWS/index.html | 4335 +++++++++++ WORKFLOWS_DEBUGGING/index.html | 4090 ++++++++++ WORKFLOW_EXAMPLES/index.html | 4586 +++++++++++ assets/images/favicon.png | Bin 0 -> 1870 bytes assets/javascripts/bundle.e71a0d61.min.js | 16 + assets/javascripts/bundle.e71a0d61.min.js.map | 7 + assets/javascripts/lunr/min/lunr.ar.min.js | 1 + assets/javascripts/lunr/min/lunr.da.min.js | 18 + assets/javascripts/lunr/min/lunr.de.min.js | 18 + assets/javascripts/lunr/min/lunr.du.min.js | 18 + assets/javascripts/lunr/min/lunr.el.min.js | 1 + assets/javascripts/lunr/min/lunr.es.min.js | 18 + assets/javascripts/lunr/min/lunr.fi.min.js | 18 + assets/javascripts/lunr/min/lunr.fr.min.js | 18 + assets/javascripts/lunr/min/lunr.he.min.js | 1 + assets/javascripts/lunr/min/lunr.hi.min.js | 1 + assets/javascripts/lunr/min/lunr.hu.min.js | 18 + assets/javascripts/lunr/min/lunr.hy.min.js | 1 + assets/javascripts/lunr/min/lunr.it.min.js | 18 + assets/javascripts/lunr/min/lunr.ja.min.js | 1 + assets/javascripts/lunr/min/lunr.jp.min.js | 1 + assets/javascripts/lunr/min/lunr.kn.min.js | 1 + assets/javascripts/lunr/min/lunr.ko.min.js | 1 + assets/javascripts/lunr/min/lunr.multi.min.js | 1 + assets/javascripts/lunr/min/lunr.nl.min.js | 18 + assets/javascripts/lunr/min/lunr.no.min.js | 18 + assets/javascripts/lunr/min/lunr.pt.min.js | 18 + assets/javascripts/lunr/min/lunr.ro.min.js | 18 + assets/javascripts/lunr/min/lunr.ru.min.js | 18 + assets/javascripts/lunr/min/lunr.sa.min.js | 1 + .../lunr/min/lunr.stemmer.support.min.js | 1 + assets/javascripts/lunr/min/lunr.sv.min.js | 18 + assets/javascripts/lunr/min/lunr.ta.min.js | 1 + assets/javascripts/lunr/min/lunr.te.min.js | 1 + assets/javascripts/lunr/min/lunr.th.min.js | 1 + assets/javascripts/lunr/min/lunr.tr.min.js | 18 + assets/javascripts/lunr/min/lunr.vi.min.js | 1 + assets/javascripts/lunr/min/lunr.zh.min.js | 1 + assets/javascripts/lunr/tinyseg.js | 206 + assets/javascripts/lunr/wordcut.js | 6708 +++++++++++++++++ .../workers/search.7a47a382.min.js | 42 + .../workers/search.7a47a382.min.js.map | 7 + assets/stylesheets/main.618322db.min.css | 1 + assets/stylesheets/main.618322db.min.css.map | 1 + assets/stylesheets/palette.ab4e12ef.min.css | 1 + .../stylesheets/palette.ab4e12ef.min.css.map | 1 + .../excessive-capabilities/index.html | 4026 ++++++++++ .../file-permissions/index.html | 4019 ++++++++++ .../incorrect-user/index.html | 4020 ++++++++++ .../missing-capabilities/index.html | 4026 ++++++++++ .../mount-configuration-issues/index.html | 4026 ++++++++++ .../network-mode/index.html | 4019 ++++++++++ .../nginx-configuration-mount/index.html | 4029 ++++++++++ .../port-conflicts/index.html | 4110 ++++++++++ .../read-only-filesystem/index.html | 4019 ++++++++++ .../running-as-root/index.html | 4020 ++++++++++ .../troubleshooting/index.html | 3935 ++++++++++ img/@eaDir/device_details.png@SynoEAStream | Bin 0 -> 198 bytes img/@eaDir/devices_dark.png@SynoEAStream | Bin 0 -> 198 bytes img/@eaDir/devices_light.png@SynoEAStream | Bin 0 -> 198 bytes img/@eaDir/devices_split.png@SynoEAStream | Bin 0 -> 198 bytes img/@eaDir/events.png@SynoEAStream | Bin 0 -> 198 bytes img/@eaDir/help_faq.png@SynoEAStream | Bin 0 -> 198 bytes img/@eaDir/maintenance.png@SynoEAStream | Bin 0 -> 198 bytes img/@eaDir/network.png@SynoEAStream | Bin 0 -> 198 bytes img/@eaDir/presence.png@SynoEAStream | Bin 0 -> 198 bytes img/@eaDir/settings.png@SynoEAStream | Bin 0 -> 198 bytes img/BACKUPS/Maintenance_Backup_Restore.png | Bin 0 -> 89655 bytes img/BUILDS/build_images_options_tradeoffs.png | Bin 0 -> 81699 bytes .../Device_Custom_Properties.png | Bin 0 -> 39826 bytes .../Device_Custom_Properties_vid.gif | Bin 0 -> 1641632 bytes img/DATABASE/CurrentScan.png | Bin 0 -> 58932 bytes img/DATABASE/DHCP_Leases.png | Bin 0 -> 7234 bytes img/DATABASE/Devices.png | Bin 0 -> 87340 bytes img/DATABASE/Events.png | Bin 0 -> 44273 bytes img/DATABASE/Nmap_Scan.png | Bin 0 -> 44901 bytes img/DATABASE/Online_History.png | Bin 0 -> 48491 bytes img/DATABASE/Parameters.png | Bin 0 -> 39636 bytes img/DATABASE/Pholus_Scan.png | Bin 0 -> 94553 bytes img/DATABASE/PiHole_Network.png | Bin 0 -> 6737 bytes img/DATABASE/Plugins_Events.png | Bin 0 -> 17684 bytes img/DATABASE/Plugins_History.png | Bin 0 -> 74032 bytes img/DATABASE/Plugins_Language_Strings.png | Bin 0 -> 43351 bytes img/DATABASE/Plugins_Objects.png | Bin 0 -> 66620 bytes img/DATABASE/ScanCycles.png | Bin 0 -> 36022 bytes img/DATABASE/Sessions.png | Bin 0 -> 62256 bytes img/DATABASE/Settings.png | Bin 0 -> 53462 bytes img/DEBUG/Invalid_JSON_repsonse_debug.png | Bin 0 -> 201983 bytes img/DEBUG/JSON_result_example.png | Bin 0 -> 55781 bytes img/DEBUG/array_result_example.png | Bin 0 -> 7168 bytes img/DEBUG/maintenance_debug_php.png | Bin 0 -> 163421 bytes img/DEBUG_API_SERVER/Init_check.png | Bin 0 -> 138211 bytes .../app_conf_graphql_port.png | Bin 0 -> 11712 bytes .../dev_console_graphql_json.png | Bin 0 -> 32385 bytes img/DEBUG_API_SERVER/graphql_running_logs.png | Bin 0 -> 37034 bytes .../graphql_settings_port_token.png | Bin 0 -> 35212 bytes img/DEBUG_API_SERVER/network_graphql.png | Bin 0 -> 36041 bytes img/DEBUG_PLUGINS/plugin_objects_pihole.png | Bin 0 -> 130042 bytes img/DEV/Maintenance_Logs_Restart_server.png | Bin 0 -> 60525 bytes img/DEV/devcontainer_1.png | Bin 0 -> 11401 bytes img/DEV/devcontainer_2.png | Bin 0 -> 17545 bytes img/DEV/devcontainer_3.png | Bin 0 -> 13944 bytes img/DEV/devcontainer_4.png | Bin 0 -> 46304 bytes .../CSV_BACKUP_SETTINGS.png | Bin 0 -> 134588 bytes .../MAINTENANCE_CSV_EXPORT.png | Bin 0 -> 166611 bytes img/DEVICES_BULK_EDITING/MULTI-EDIT.gif | Bin 0 -> 453391 bytes img/DEVICES_BULK_EDITING/NOTEPAD++.png | Bin 0 -> 56006 bytes .../DeviceDetails_DisplaySettings.png | Bin 0 -> 83941 bytes .../DeviceEdit_SaveDummyDevice.png | Bin 0 -> 79354 bytes .../DeviceManagement_MainInfo.png | Bin 0 -> 84258 bytes .../Devices_CreateDummyDevice.png | Bin 0 -> 7154 bytes .../device_management_status_colors.png | Bin 0 -> 40139 bytes img/DOCKER/DOCKER_PORTAINER.png | Bin 0 -> 184111 bytes .../presence_graph_before_after.png | Bin 0 -> 497862 bytes img/Follow_Releases_and_Star.gif | Bin 0 -> 122282 bytes img/GENERAL/github_social_image.jpg | Bin 0 -> 299401 bytes img/GENERAL/in-app-help.png | Bin 0 -> 8876 bytes .../HomeAssistant-Configuration.png | Bin 0 -> 298208 bytes .../HomeAssistant-Device-Presence-History.png | Bin 0 -> 17514 bytes .../HomeAssistant-Device-as-Sensors.png | Bin 0 -> 14385 bytes .../HomeAssistant-Devices-List.png | Bin 0 -> 21690 bytes .../HomeAssistant-Overview-Card.png | Bin 0 -> 9688 bytes img/ICONS/device-icon.png | Bin 0 -> 49988 bytes img/ICONS/device_add_icon.png | Bin 0 -> 55072 bytes img/ICONS/device_icons_preview.gif | Bin 0 -> 114959 bytes img/ICONS/devices-icons.png | Bin 0 -> 13043 bytes img/ICONS/font_awesome_copy_html.png | Bin 0 -> 52318 bytes img/ICONS/iconify_design_copy_svg.png | Bin 0 -> 68322 bytes img/ICONS/paste-svg.png | Bin 0 -> 59420 bytes img/LOGGING/logging_integrations_plugins.png | Bin 0 -> 112798 bytes img/LOGGING/maintenance_logs.png | Bin 0 -> 95605 bytes .../name_res_nslookup_timeout.png | Bin 0 -> 14907 bytes img/NETWORK_TREE/Network_Assign.png | Bin 0 -> 74860 bytes img/NETWORK_TREE/Network_Assigned_Nodes.png | Bin 0 -> 94364 bytes img/NETWORK_TREE/Network_Device_Details.png | Bin 0 -> 59514 bytes .../Network_Device_Details_Parent.png | Bin 0 -> 34918 bytes .../Network_Device_ParentDropdown.png | Bin 0 -> 43187 bytes img/NETWORK_TREE/Network_Device_type.png | Bin 0 -> 96593 bytes img/NETWORK_TREE/Network_Sample.png | Bin 0 -> 83058 bytes img/NETWORK_TREE/Network_tree_details.png | Bin 0 -> 95307 bytes img/NETWORK_TREE/Network_tree_setup_hover.png | Bin 0 -> 86133 bytes .../Device-notification-settings.png | Bin 0 -> 129067 bytes .../Global-notification-settings.png | Bin 0 -> 72805 bytes img/NOTIFICATIONS/NEWDEV_ignores.png | Bin 0 -> 59507 bytes .../Plugin-notification-settings.png | Bin 0 -> 124777 bytes img/NOTIFICATIONS/Schedules_out-of-sync.png | Bin 0 -> 46443 bytes img/NetAlertX_logo.png | Bin 0 -> 6753 bytes img/NetAlertX_logo_b_w_info.png | Bin 0 -> 8457 bytes img/PERFORMANCE/db_size_check.png | Bin 0 -> 62290 bytes img/PIHOLE_GUIDE/DHCPLSS_pihole_settings.png | Bin 0 -> 101341 bytes img/PIHOLE_GUIDE/PIHOLEAPI_settings.png | Bin 0 -> 120211 bytes img/PIHOLE_GUIDE/PIHOLE_settings.png | Bin 0 -> 101924 bytes img/PLUGINS/enable_plugin.gif | Bin 0 -> 137603 bytes img/PLUGINS/loaded_plugins_setting.png | Bin 0 -> 79131 bytes img/RANDOM_MAC/android_random_mac.jpg | Bin 0 -> 564514 bytes img/RANDOM_MAC/ios_random_mac.png | Bin 0 -> 612442 bytes img/RANDOM_MAC/windows_random_mac.png | Bin 0 -> 17107 bytes .../DeviceDetails_SessionInfo.png | Bin 0 -> 54708 bytes img/SESSION_INFO/Monitoring_Presence.png | Bin 0 -> 54220 bytes img/SUBNETS/subnets-setting-location.png | Bin 0 -> 114528 bytes img/SUBNETS/subnets_vlan.png | Bin 0 -> 18066 bytes img/SUBNETS/system_info-network_hardware.png | Bin 0 -> 104515 bytes img/SYNOLOGY/01_Create_folder_structure.png | Bin 0 -> 23105 bytes .../02_Create_folder_structure_db.png | Bin 0 -> 24981 bytes .../03_Create_folder_structure_db.png | Bin 0 -> 28707 bytes .../04_Create_folder_structure_config.png | Bin 0 -> 31815 bytes img/SYNOLOGY/05_Access_folder_properties.png | Bin 0 -> 43356 bytes img/SYNOLOGY/06_Note_location.png | Bin 0 -> 48631 bytes img/SYNOLOGY/07_Create_project.png | Bin 0 -> 23111 bytes .../08_Adjust_docker_compose_volumes.png | Bin 0 -> 10905 bytes img/SYNOLOGY/09_Run_and_build.png | Bin 0 -> 14969 bytes img/SYNOLOGY/10_permissions_before.png | Bin 0 -> 29824 bytes .../11_permissions_create_scheduled_task.png | Bin 0 -> 14303 bytes img/SYNOLOGY/12_permissions_task_general.png | Bin 0 -> 14220 bytes img/SYNOLOGY/13_permissions_task_schedule.png | Bin 0 -> 28656 bytes img/SYNOLOGY/14_permissions_task_settings.png | Bin 0 -> 27483 bytes img/SYNOLOGY/15_permissions_after.png | Bin 0 -> 31439 bytes img/VERSIONS/latest-version-maintenance.png | Bin 0 -> 61842 bytes img/VERSIONS/new-version-available-email.png | Bin 0 -> 33277 bytes .../new-version-available-maintenance.png | Bin 0 -> 62272 bytes img/WEBHOOK_N8N/Webhook_settings.png | Bin 0 -> 62515 bytes img/WEBHOOK_N8N/n8n_send_email_settings.png | Bin 0 -> 38890 bytes img/WEBHOOK_N8N/n8n_webhook_settings.png | Bin 0 -> 31376 bytes img/WEBHOOK_N8N/n8n_workflow.png | Bin 0 -> 6442 bytes img/WEB_UI_PORT_DEBUG/container_port.png | Bin 0 -> 106660 bytes img/WORKFLOWS/actions.jpg | Bin 0 -> 17578 bytes img/WORKFLOWS/conditions.png | Bin 0 -> 22697 bytes img/WORKFLOWS/trigger.jpg | Bin 0 -> 6854 bytes img/WORKFLOWS/workflows.png | Bin 0 -> 131669 bytes img/WORKFLOWS/workflows_app_events_search.png | Bin 0 -> 170440 bytes img/WORKFLOWS/workflows_diagram.png | Bin 0 -> 50185 bytes img/WORKFLOWS/workflows_logs_search.png | Bin 0 -> 167576 bytes img/YouTube_thumbnail.png | Bin 0 -> 187193 bytes img/device_details.png | Bin 0 -> 173614 bytes img/device_nmap.png | Bin 0 -> 177305 bytes img/devices_dark.png | Bin 0 -> 201327 bytes img/devices_light.png | Bin 0 -> 193498 bytes img/devices_split.png | Bin 0 -> 215548 bytes img/events.png | Bin 0 -> 245464 bytes img/help_faq.png | Bin 0 -> 131922 bytes img/maintenance.png | Bin 0 -> 149100 bytes img/multi_edit.png | Bin 0 -> 151696 bytes img/netalertx_docs.png | Bin 0 -> 8146 bytes img/netalertx_docs_old.png | Bin 0 -> 7716 bytes img/network.png | Bin 0 -> 220697 bytes img/network_setup.gif | Bin 0 -> 1143759 bytes img/notification_center.png | Bin 0 -> 114227 bytes img/plugins.png | Bin 0 -> 172504 bytes img/plugins_device_details.png | Bin 0 -> 205670 bytes img/plugins_json_settings.png | Bin 0 -> 189282 bytes img/plugins_json_ui.png | Bin 0 -> 177304 bytes img/plugins_settings.png | Bin 0 -> 161874 bytes img/plugins_webmon.png | Bin 0 -> 136774 bytes img/presence.png | Bin 0 -> 184430 bytes img/report_sample.png | Bin 0 -> 207090 bytes img/sent_reports_text.png | Bin 0 -> 102938 bytes img/settings.png | Bin 0 -> 171139 bytes img/showcase.gif | Bin 0 -> 5328822 bytes img/size_h_1250_w_1000.txt | 1 + img/sync_hub.png | Bin 0 -> 85774 bytes index.html | 4321 +++++++++++ overrides/main.html | 28 + samples/API/Grafana_Dashboard.json | 1110 +++ search/search_index.json | 1 + sitemap.xml | 363 + sitemap.xml.gz | Bin 0 -> 1007 bytes 303 files changed, 401110 insertions(+) create mode 100644 .nojekyll create mode 100644 404.html create mode 100644 API/index.html create mode 100644 API_DBQUERY/index.html create mode 100644 API_DEVICE/index.html create mode 100644 API_DEVICES/index.html create mode 100644 API_EVENTS/index.html create mode 100644 API_GRAPHQL/index.html create mode 100644 API_LOGS/index.html create mode 100644 API_MESSAGING_IN_APP/index.html create mode 100644 API_METRICS/index.html create mode 100644 API_NETTOOLS/index.html create mode 100644 API_OLD/index.html create mode 100644 API_ONLINEHISTORY/index.html create mode 100644 API_SESSIONS/index.html create mode 100644 API_SETTINGS/index.html create mode 100644 API_SYNC/index.html create mode 100644 API_TESTS/index.html create mode 100644 AUTHELIA/index.html create mode 100644 BACKUPS/index.html create mode 100644 BUILDS/index.html create mode 100644 COMMON_ISSUES/index.html create mode 100644 COMMUNITY_GUIDES/index.html create mode 100644 CUSTOM_PROPERTIES/index.html create mode 100644 DATABASE/index.html create mode 100644 DEBUG_API_SERVER/index.html create mode 100644 DEBUG_INVALID_JSON/index.html create mode 100644 DEBUG_PHP/index.html create mode 100644 DEBUG_PLUGINS/index.html create mode 100644 DEBUG_TIPS/index.html create mode 100644 DEVICES_BULK_EDITING/index.html create mode 100644 DEVICE_DISPLAY_SETTINGS/index.html create mode 100644 DEVICE_HEURISTICS/index.html create mode 100644 DEVICE_MANAGEMENT/index.html create mode 100644 DEV_DEVCONTAINER/index.html create mode 100644 DEV_ENV_SETUP/index.html create mode 100644 DEV_PORTS_HOST_MODE/index.html create mode 100644 DOCKER_COMPOSE/index.html create mode 100644 DOCKER_INSTALLATION/index.html create mode 100644 DOCKER_MAINTENANCE/index.html create mode 100644 DOCKER_PORTAINER/index.html create mode 100644 DOCKER_SWARM/index.html create mode 100644 FILE_PERMISSIONS/index.html create mode 100644 FIX_OFFLINE_DETECTION/index.html create mode 100644 FRONTEND_DEVELOPMENT/index.html create mode 100644 HELPER_SCRIPTS/index.html create mode 100644 HOME_ASSISTANT/index.html create mode 100644 HW_INSTALL/index.html create mode 100644 ICONS/index.html create mode 100644 INITIAL_SETUP/index.html create mode 100644 INSTALLATION/index.html create mode 100644 LOGGING/index.html create mode 100644 MIGRATION/index.html create mode 100644 NAME_RESOLUTION/index.html create mode 100644 NETWORK_TREE/index.html create mode 100644 NOTIFICATIONS/index.html create mode 100644 PERFORMANCE/index.html create mode 100644 PIHOLE_GUIDE/index.html create mode 100644 PLUGINS/index.html create mode 100644 PLUGINS_DEV/index.html create mode 100644 PLUGINS_DEV_CONFIG/index.html create mode 100644 RANDOM_MAC/index.html create mode 100644 REMOTE_NETWORKS/index.html create mode 100644 REVERSE_DNS/index.html create mode 100644 REVERSE_PROXY/index.html create mode 100644 SECURITY/index.html create mode 100644 SECURITY_FEATURES/index.html create mode 100644 SESSION_INFO/index.html create mode 100644 SETTINGS_SYSTEM/index.html create mode 100644 SMTP/index.html create mode 100644 SUBNETS/index.html create mode 100644 SYNOLOGY_GUIDE/index.html create mode 100644 UPDATES/index.html create mode 100644 VERSIONS/index.html create mode 100644 WEBHOOK_N8N/index.html create mode 100644 WEBHOOK_SECRET/index.html create mode 100644 WEB_UI_PORT_DEBUG/index.html create mode 100644 WORKFLOWS/index.html create mode 100644 WORKFLOWS_DEBUGGING/index.html create mode 100644 WORKFLOW_EXAMPLES/index.html create mode 100644 assets/images/favicon.png create mode 100644 assets/javascripts/bundle.e71a0d61.min.js create mode 100644 assets/javascripts/bundle.e71a0d61.min.js.map create mode 100644 assets/javascripts/lunr/min/lunr.ar.min.js create mode 100644 assets/javascripts/lunr/min/lunr.da.min.js create mode 100644 assets/javascripts/lunr/min/lunr.de.min.js create mode 100644 assets/javascripts/lunr/min/lunr.du.min.js create mode 100644 assets/javascripts/lunr/min/lunr.el.min.js create mode 100644 assets/javascripts/lunr/min/lunr.es.min.js create mode 100644 assets/javascripts/lunr/min/lunr.fi.min.js create mode 100644 assets/javascripts/lunr/min/lunr.fr.min.js create mode 100644 assets/javascripts/lunr/min/lunr.he.min.js create mode 100644 assets/javascripts/lunr/min/lunr.hi.min.js create mode 100644 assets/javascripts/lunr/min/lunr.hu.min.js create mode 100644 assets/javascripts/lunr/min/lunr.hy.min.js create mode 100644 assets/javascripts/lunr/min/lunr.it.min.js create mode 100644 assets/javascripts/lunr/min/lunr.ja.min.js create mode 100644 assets/javascripts/lunr/min/lunr.jp.min.js create mode 100644 assets/javascripts/lunr/min/lunr.kn.min.js create mode 100644 assets/javascripts/lunr/min/lunr.ko.min.js create mode 100644 assets/javascripts/lunr/min/lunr.multi.min.js create mode 100644 assets/javascripts/lunr/min/lunr.nl.min.js create mode 100644 assets/javascripts/lunr/min/lunr.no.min.js create mode 100644 assets/javascripts/lunr/min/lunr.pt.min.js create mode 100644 assets/javascripts/lunr/min/lunr.ro.min.js create mode 100644 assets/javascripts/lunr/min/lunr.ru.min.js create mode 100644 assets/javascripts/lunr/min/lunr.sa.min.js create mode 100644 assets/javascripts/lunr/min/lunr.stemmer.support.min.js create mode 100644 assets/javascripts/lunr/min/lunr.sv.min.js create mode 100644 assets/javascripts/lunr/min/lunr.ta.min.js create mode 100644 assets/javascripts/lunr/min/lunr.te.min.js create mode 100644 assets/javascripts/lunr/min/lunr.th.min.js create mode 100644 assets/javascripts/lunr/min/lunr.tr.min.js create mode 100644 assets/javascripts/lunr/min/lunr.vi.min.js create mode 100644 assets/javascripts/lunr/min/lunr.zh.min.js create mode 100644 assets/javascripts/lunr/tinyseg.js create mode 100644 assets/javascripts/lunr/wordcut.js create mode 100644 assets/javascripts/workers/search.7a47a382.min.js create mode 100644 assets/javascripts/workers/search.7a47a382.min.js.map create mode 100644 assets/stylesheets/main.618322db.min.css create mode 100644 assets/stylesheets/main.618322db.min.css.map create mode 100644 assets/stylesheets/palette.ab4e12ef.min.css create mode 100644 assets/stylesheets/palette.ab4e12ef.min.css.map create mode 100644 docker-troubleshooting/excessive-capabilities/index.html create mode 100644 docker-troubleshooting/file-permissions/index.html create mode 100644 docker-troubleshooting/incorrect-user/index.html create mode 100644 docker-troubleshooting/missing-capabilities/index.html create mode 100644 docker-troubleshooting/mount-configuration-issues/index.html create mode 100644 docker-troubleshooting/network-mode/index.html create mode 100644 docker-troubleshooting/nginx-configuration-mount/index.html create mode 100644 docker-troubleshooting/port-conflicts/index.html create mode 100644 docker-troubleshooting/read-only-filesystem/index.html create mode 100644 docker-troubleshooting/running-as-root/index.html create mode 100644 docker-troubleshooting/troubleshooting/index.html create mode 100644 img/@eaDir/device_details.png@SynoEAStream create mode 100644 img/@eaDir/devices_dark.png@SynoEAStream create mode 100644 img/@eaDir/devices_light.png@SynoEAStream create mode 100644 img/@eaDir/devices_split.png@SynoEAStream create mode 100644 img/@eaDir/events.png@SynoEAStream create mode 100644 img/@eaDir/help_faq.png@SynoEAStream create mode 100644 img/@eaDir/maintenance.png@SynoEAStream create mode 100644 img/@eaDir/network.png@SynoEAStream create mode 100644 img/@eaDir/presence.png@SynoEAStream create mode 100644 img/@eaDir/settings.png@SynoEAStream create mode 100644 img/BACKUPS/Maintenance_Backup_Restore.png create mode 100644 img/BUILDS/build_images_options_tradeoffs.png create mode 100644 img/CUSTOM_PROPERTIES/Device_Custom_Properties.png create mode 100644 img/CUSTOM_PROPERTIES/Device_Custom_Properties_vid.gif create mode 100644 img/DATABASE/CurrentScan.png create mode 100644 img/DATABASE/DHCP_Leases.png create mode 100644 img/DATABASE/Devices.png create mode 100644 img/DATABASE/Events.png create mode 100644 img/DATABASE/Nmap_Scan.png create mode 100644 img/DATABASE/Online_History.png create mode 100644 img/DATABASE/Parameters.png create mode 100644 img/DATABASE/Pholus_Scan.png create mode 100644 img/DATABASE/PiHole_Network.png create mode 100644 img/DATABASE/Plugins_Events.png create mode 100644 img/DATABASE/Plugins_History.png create mode 100644 img/DATABASE/Plugins_Language_Strings.png create mode 100644 img/DATABASE/Plugins_Objects.png create mode 100644 img/DATABASE/ScanCycles.png create mode 100644 img/DATABASE/Sessions.png create mode 100644 img/DATABASE/Settings.png create mode 100644 img/DEBUG/Invalid_JSON_repsonse_debug.png create mode 100644 img/DEBUG/JSON_result_example.png create mode 100644 img/DEBUG/array_result_example.png create mode 100644 img/DEBUG/maintenance_debug_php.png create mode 100644 img/DEBUG_API_SERVER/Init_check.png create mode 100644 img/DEBUG_API_SERVER/app_conf_graphql_port.png create mode 100644 img/DEBUG_API_SERVER/dev_console_graphql_json.png create mode 100644 img/DEBUG_API_SERVER/graphql_running_logs.png create mode 100644 img/DEBUG_API_SERVER/graphql_settings_port_token.png create mode 100644 img/DEBUG_API_SERVER/network_graphql.png create mode 100644 img/DEBUG_PLUGINS/plugin_objects_pihole.png create mode 100644 img/DEV/Maintenance_Logs_Restart_server.png create mode 100644 img/DEV/devcontainer_1.png create mode 100644 img/DEV/devcontainer_2.png create mode 100644 img/DEV/devcontainer_3.png create mode 100644 img/DEV/devcontainer_4.png create mode 100644 img/DEVICES_BULK_EDITING/CSV_BACKUP_SETTINGS.png create mode 100644 img/DEVICES_BULK_EDITING/MAINTENANCE_CSV_EXPORT.png create mode 100644 img/DEVICES_BULK_EDITING/MULTI-EDIT.gif create mode 100644 img/DEVICES_BULK_EDITING/NOTEPAD++.png create mode 100644 img/DEVICE_MANAGEMENT/DeviceDetails_DisplaySettings.png create mode 100644 img/DEVICE_MANAGEMENT/DeviceEdit_SaveDummyDevice.png create mode 100644 img/DEVICE_MANAGEMENT/DeviceManagement_MainInfo.png create mode 100644 img/DEVICE_MANAGEMENT/Devices_CreateDummyDevice.png create mode 100644 img/DEVICE_MANAGEMENT/device_management_status_colors.png create mode 100644 img/DOCKER/DOCKER_PORTAINER.png create mode 100644 img/FIX_OFFLINE_DETECTION/presence_graph_before_after.png create mode 100644 img/Follow_Releases_and_Star.gif create mode 100644 img/GENERAL/github_social_image.jpg create mode 100644 img/GENERAL/in-app-help.png create mode 100644 img/HOME_ASISSTANT/HomeAssistant-Configuration.png create mode 100644 img/HOME_ASISSTANT/HomeAssistant-Device-Presence-History.png create mode 100644 img/HOME_ASISSTANT/HomeAssistant-Device-as-Sensors.png create mode 100644 img/HOME_ASISSTANT/HomeAssistant-Devices-List.png create mode 100644 img/HOME_ASISSTANT/HomeAssistant-Overview-Card.png create mode 100644 img/ICONS/device-icon.png create mode 100644 img/ICONS/device_add_icon.png create mode 100644 img/ICONS/device_icons_preview.gif create mode 100644 img/ICONS/devices-icons.png create mode 100644 img/ICONS/font_awesome_copy_html.png create mode 100644 img/ICONS/iconify_design_copy_svg.png create mode 100644 img/ICONS/paste-svg.png create mode 100644 img/LOGGING/logging_integrations_plugins.png create mode 100644 img/LOGGING/maintenance_logs.png create mode 100644 img/NAME_RESOLUTION/name_res_nslookup_timeout.png create mode 100644 img/NETWORK_TREE/Network_Assign.png create mode 100644 img/NETWORK_TREE/Network_Assigned_Nodes.png create mode 100644 img/NETWORK_TREE/Network_Device_Details.png create mode 100644 img/NETWORK_TREE/Network_Device_Details_Parent.png create mode 100644 img/NETWORK_TREE/Network_Device_ParentDropdown.png create mode 100644 img/NETWORK_TREE/Network_Device_type.png create mode 100644 img/NETWORK_TREE/Network_Sample.png create mode 100644 img/NETWORK_TREE/Network_tree_details.png create mode 100644 img/NETWORK_TREE/Network_tree_setup_hover.png create mode 100644 img/NOTIFICATIONS/Device-notification-settings.png create mode 100644 img/NOTIFICATIONS/Global-notification-settings.png create mode 100644 img/NOTIFICATIONS/NEWDEV_ignores.png create mode 100644 img/NOTIFICATIONS/Plugin-notification-settings.png create mode 100644 img/NOTIFICATIONS/Schedules_out-of-sync.png create mode 100644 img/NetAlertX_logo.png create mode 100644 img/NetAlertX_logo_b_w_info.png create mode 100644 img/PERFORMANCE/db_size_check.png create mode 100644 img/PIHOLE_GUIDE/DHCPLSS_pihole_settings.png create mode 100644 img/PIHOLE_GUIDE/PIHOLEAPI_settings.png create mode 100644 img/PIHOLE_GUIDE/PIHOLE_settings.png create mode 100644 img/PLUGINS/enable_plugin.gif create mode 100644 img/PLUGINS/loaded_plugins_setting.png create mode 100644 img/RANDOM_MAC/android_random_mac.jpg create mode 100644 img/RANDOM_MAC/ios_random_mac.png create mode 100644 img/RANDOM_MAC/windows_random_mac.png create mode 100644 img/SESSION_INFO/DeviceDetails_SessionInfo.png create mode 100644 img/SESSION_INFO/Monitoring_Presence.png create mode 100644 img/SUBNETS/subnets-setting-location.png create mode 100644 img/SUBNETS/subnets_vlan.png create mode 100644 img/SUBNETS/system_info-network_hardware.png create mode 100644 img/SYNOLOGY/01_Create_folder_structure.png create mode 100644 img/SYNOLOGY/02_Create_folder_structure_db.png create mode 100644 img/SYNOLOGY/03_Create_folder_structure_db.png create mode 100644 img/SYNOLOGY/04_Create_folder_structure_config.png create mode 100644 img/SYNOLOGY/05_Access_folder_properties.png create mode 100644 img/SYNOLOGY/06_Note_location.png create mode 100644 img/SYNOLOGY/07_Create_project.png create mode 100644 img/SYNOLOGY/08_Adjust_docker_compose_volumes.png create mode 100644 img/SYNOLOGY/09_Run_and_build.png create mode 100644 img/SYNOLOGY/10_permissions_before.png create mode 100644 img/SYNOLOGY/11_permissions_create_scheduled_task.png create mode 100644 img/SYNOLOGY/12_permissions_task_general.png create mode 100644 img/SYNOLOGY/13_permissions_task_schedule.png create mode 100644 img/SYNOLOGY/14_permissions_task_settings.png create mode 100644 img/SYNOLOGY/15_permissions_after.png create mode 100644 img/VERSIONS/latest-version-maintenance.png create mode 100644 img/VERSIONS/new-version-available-email.png create mode 100644 img/VERSIONS/new-version-available-maintenance.png create mode 100644 img/WEBHOOK_N8N/Webhook_settings.png create mode 100644 img/WEBHOOK_N8N/n8n_send_email_settings.png create mode 100644 img/WEBHOOK_N8N/n8n_webhook_settings.png create mode 100644 img/WEBHOOK_N8N/n8n_workflow.png create mode 100644 img/WEB_UI_PORT_DEBUG/container_port.png create mode 100644 img/WORKFLOWS/actions.jpg create mode 100644 img/WORKFLOWS/conditions.png create mode 100644 img/WORKFLOWS/trigger.jpg create mode 100644 img/WORKFLOWS/workflows.png create mode 100644 img/WORKFLOWS/workflows_app_events_search.png create mode 100644 img/WORKFLOWS/workflows_diagram.png create mode 100644 img/WORKFLOWS/workflows_logs_search.png create mode 100644 img/YouTube_thumbnail.png create mode 100644 img/device_details.png create mode 100644 img/device_nmap.png create mode 100644 img/devices_dark.png create mode 100644 img/devices_light.png create mode 100644 img/devices_split.png create mode 100644 img/events.png create mode 100644 img/help_faq.png create mode 100644 img/maintenance.png create mode 100644 img/multi_edit.png create mode 100644 img/netalertx_docs.png create mode 100644 img/netalertx_docs_old.png create mode 100644 img/network.png create mode 100644 img/network_setup.gif create mode 100644 img/notification_center.png create mode 100644 img/plugins.png create mode 100644 img/plugins_device_details.png create mode 100644 img/plugins_json_settings.png create mode 100644 img/plugins_json_ui.png create mode 100644 img/plugins_settings.png create mode 100644 img/plugins_webmon.png create mode 100644 img/presence.png create mode 100644 img/report_sample.png create mode 100644 img/sent_reports_text.png create mode 100644 img/settings.png create mode 100644 img/showcase.gif create mode 100644 img/size_h_1250_w_1000.txt create mode 100644 img/sync_hub.png create mode 100644 index.html create mode 100644 overrides/main.html create mode 100644 samples/API/Grafana_Dashboard.json create mode 100644 search/search_index.json create mode 100644 sitemap.xml create mode 100644 sitemap.xml.gz diff --git a/.nojekyll b/.nojekyll new file mode 100644 index 00000000..e69de29b diff --git a/404.html b/404.html new file mode 100644 index 00000000..bec49267 --- /dev/null +++ b/404.html @@ -0,0 +1,3899 @@ + + + +
+ + + + + + + + + + + + + + + + + + + +
+
+
+
+ This API provides programmatic access to devices, events, sessions, metrics, network tools, and sync in NetAlertX. It is implemented as a REST and GraphQL server. All requests require authentication via API Token (API_TOKEN setting) unless explicitly noted. For example, to authorize a GraphQL request, you need to use a Authorization: Bearer API_TOKEN header as per example below:
curl 'http://host:GRAPHQL_PORT/graphql' \
+ -X POST \
+ -H 'Authorization: Bearer API_TOKEN' \
+ -H 'Content-Type: application/json' \
+ --data '{
+ "query": "query GetDevices($options: PageQueryOptionsInput) { devices(options: $options) { devices { rowid devMac devName devOwner devType devVendor devLastConnection devStatus } count } }",
+ "variables": {
+ "options": {
+ "page": 1,
+ "limit": 10,
+ "sort": [{ "field": "devName", "order": "asc" }],
+ "search": "",
+ "status": "connected"
+ }
+ }
+ }'
+
+The API server runs on 0.0.0.0:<graphql_port> with CORS enabled for all main endpoints.
All endpoints require an API token provided in the HTTP headers:
+Authorization: Bearer <API_TOKEN>
+
+If the token is missing or invalid, the server will return:
+{ "error": "Forbidden" }
+
+http://<server>:<GRAPHQL_PORT>/
+
+Tip
+When retrieving devices or settings try using the GraphQL API endpoint first as it is read-optimized.
+See Testing for example requests and usage.
+success: False and an error message.0.0.0.0:<GRAPHQL_PORT> with CORS enabled./sync to ensure data integrity.
+
+
+
+ The Database Query API provides direct, low-level access to the NetAlertX database. It allows read, write, update, and delete operations against tables, using base64-encoded SQL or structured parameters.
+Warning
+This API is primarily used internally to generate and render the application UI. These endpoints are low-level and powerful, and should be used with caution. Wherever possible, prefer the standard API endpoints. Invalid or unsafe queries can corrupt data. +If you need data in a specific format that is not already provided, please open an issue or pull request with a clear, broadly useful use case. This helps ensure new endpoints benefit the wider community rather than relying on raw database queries.
+All /dbquery/* endpoints require an API token in the HTTP headers:
Authorization: Bearer <API_TOKEN>
+
+If the token is missing or invalid:
+{ "error": "Forbidden" }
+
+POST /dbquery/readExecute a read-only SQL query (e.g., SELECT).
{
+ "rawSql": "U0VMRUNUICogRlJPTSBERVZJQ0VT" // base64 encoded SQL
+}
+
+Decoded SQL:
+SELECT * FROM Devices;
+
+{
+ "success": true,
+ "results": [
+ { "devMac": "AA:BB:CC:DD:EE:FF", "devName": "Phone" }
+ ]
+}
+
+curl Examplecurl -X POST "http://<server_ip>:<GRAPHQL_PORT>/dbquery/read" \
+ -H "Authorization: Bearer <API_TOKEN>" \
+ -H "Accept: application/json" \
+ -H "Content-Type: application/json" \
+ -d '{
+ "rawSql": "U0VMRUNUICogRlJPTSBERVZJQ0VT"
+ }'
+
+POST /dbquery/update (safer than /dbquery/write)Update rows in a table by columnName + id. /dbquery/update is parameterized to reduce the risk of SQL injection, while /dbquery/write executes raw SQL directly.
{
+ "columnName": "devMac",
+ "id": ["AA:BB:CC:DD:EE:FF"],
+ "dbtable": "Devices",
+ "columns": ["devName", "devOwner"],
+ "values": ["Laptop", "Alice"]
+}
+
+{ "success": true, "updated_count": 1 }
+
+curl Examplecurl -X POST "http://<server_ip>:<GRAPHQL_PORT>/dbquery/update" \
+ -H "Authorization: Bearer <API_TOKEN>" \
+ -H "Accept: application/json" \
+ -H "Content-Type: application/json" \
+ -d '{
+ "columnName": "devMac",
+ "id": ["AA:BB:CC:DD:EE:FF"],
+ "dbtable": "Devices",
+ "columns": ["devName", "devOwner"],
+ "values": ["Laptop", "Alice"]
+ }'
+
+POST /dbquery/writeExecute a write query (INSERT, UPDATE, DELETE).
{
+ "rawSql": "SU5TRVJUIElOVE8gRGV2aWNlcyAoZGV2TWFjLCBkZXYgTmFtZSwgZGV2Rmlyc3RDb25uZWN0aW9uLCBkZXZMYXN0Q29ubmVjdGlvbiwgZGV2TGFzdElQKSBWQUxVRVMgKCc2QTpCQjo0Qzo1RDo2RTonLCAnVGVzdERldmljZScsICcyMDI1LTA4LTMwIDEyOjAwOjAwJywgJzIwMjUtMDgtMzAgMTI6MDA6MDAnLCAnMTAuMC4wLjEwJyk="
+}
+
+Decoded SQL:
+INSERT INTO Devices (devMac, devName, devFirstConnection, devLastConnection, devLastIP)
+VALUES ('6A:BB:4C:5D:6E', 'TestDevice', '2025-08-30 12:00:00', '2025-08-30 12:00:00', '10.0.0.10');
+
+{ "success": true, "affected_rows": 1 }
+
+curl Examplecurl -X POST "http://<server_ip>:<GRAPHQL_PORT>/dbquery/write" \
+ -H "Authorization: Bearer <API_TOKEN>" \
+ -H "Accept: application/json" \
+ -H "Content-Type: application/json" \
+ -d '{
+ "rawSql": "SU5TRVJUIElOVE8gRGV2aWNlcyAoZGV2TWFjLCBkZXYgTmFtZSwgZGV2Rmlyc3RDb25uZWN0aW9uLCBkZXZMYXN0Q29ubmVjdGlvbiwgZGV2TGFzdElQKSBWQUxVRVMgKCc2QTpCQjo0Qzo1RDo2RTonLCAnVGVzdERldmljZScsICcyMDI1LTA4LTMwIDEyOjAwOjAwJywgJzIwMjUtMDgtMzAgMTI6MDA6MDAnLCAnMTAuMC4wLjEwJyk="
+ }'
+
+POST /dbquery/deleteDelete rows in a table by columnName + id.
{
+ "columnName": "devMac",
+ "id": ["AA:BB:CC:DD:EE:FF"],
+ "dbtable": "Devices"
+}
+
+{ "success": true, "deleted_count": 1 }
+
+curl Examplecurl -X POST "http://<server_ip>:<GRAPHQL_PORT>/dbquery/delete" \
+ -H "Authorization: Bearer <API_TOKEN>" \
+ -H "Accept: application/json" \
+ -H "Content-Type: application/json" \
+ -d '{
+ "columnName": "devMac",
+ "id": ["AA:BB:CC:DD:EE:FF"],
+ "dbtable": "Devices"
+ }'
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Manage a single device by its MAC address. Operations include retrieval, updates, deletion, resetting properties, and copying data between devices. All endpoints require authorization via Bearer token.
+GET /device/<mac>
+ Fetch all details for a single device, including:
Computed status (devStatus) → On-line, Off-line, or Down
devSessions, devEvents, devDownAlerts)devPresenceHours)devChildrenDynamic) and NIC children (devChildrenNicsDynamic)Special case: mac=new returns a template for a new device with default values.
Response (success):
+{
+ "devMac": "AA:BB:CC:DD:EE:FF",
+ "devName": "Net - Huawei",
+ "devOwner": "Admin",
+ "devType": "Router",
+ "devVendor": "Huawei",
+ "devStatus": "On-line",
+ "devSessions": 12,
+ "devEvents": 5,
+ "devDownAlerts": 1,
+ "devPresenceHours": 32,
+ "devChildrenDynamic": [...],
+ "devChildrenNicsDynamic": [...],
+ ...
+}
+
+Error Responses:
+/device/<mac>
+ Create or update a device record.Request Body:
+{
+ "devName": "New Device",
+ "devOwner": "Admin",
+ "createNew": true
+}
+
+Behavior:
+createNew=true → creates a new deviceResponse:
+{
+ "success": true
+}
+
+Error Responses:
+/device/<mac>/delete
+ Deletes the device with the given MAC.Response:
+{
+ "success": true
+}
+
+Error Responses:
+/device/<mac>/events/delete
+ Removes all events associated with a device.Response:
+{
+ "success": true
+}
+
+/device/<mac>/reset-props
+ Resets the device's custom properties to default values.Request Body: Optional JSON for additional parameters.
+Response:
+{
+ "success": true
+}
+
+/device/copy
+ Copy all data from one device to another. If a device exists with macTo, it is replaced.Request Body:
+{
+ "macFrom": "AA:BB:CC:DD:EE:FF",
+ "macTo": "11:22:33:44:55:66"
+}
+
+Response:
+{
+ "success": true,
+ "message": "Device copied from AA:BB:CC:DD:EE:FF to 11:22:33:44:55:66"
+}
+
+Error Responses:
+macFrom or macTo → HTTP 400/device/<mac>/update-column
+ Update one specific column for a device.Request Body:
+{
+ "columnName": "devName",
+ "columnValue": "Updated Device Name"
+}
+
+Response (success):
+{
+ "success": true
+}
+
+Error Responses:
+columnName or columnValue → HTTP 400curl RequestsGet Device Details:
+curl -X GET "http://<server_ip>:<GRAPHQL_PORT>/device/AA:BB:CC:DD:EE:FF" \
+ -H "Authorization: Bearer <API_TOKEN>"
+
+Update Device Fields:
+curl -X POST "http://<server_ip>:<GRAPHQL_PORT>/device/AA:BB:CC:DD:EE:FF" \
+ -H "Authorization: Bearer <API_TOKEN>" \
+ -H "Content-Type: application/json" \
+ --data '{"devName": "New Device Name"}'
+
+Delete Device:
+curl -X DELETE "http://<server_ip>:<GRAPHQL_PORT>/device/AA:BB:CC:DD:EE:FF/delete" \
+ -H "Authorization: Bearer <API_TOKEN>"
+
+Copy Device Data:
+curl -X POST "http://<server_ip>:<GRAPHQL_PORT>/device/copy" \
+ -H "Authorization: Bearer <API_TOKEN>" \
+ -H "Content-Type: application/json" \
+ --data '{"macFrom":"AA:BB:CC:DD:EE:FF","macTo":"11:22:33:44:55:66"}'
+
+Update Single Column:
+curl -X POST "http://<server_ip>:<GRAPHQL_PORT>/device/AA:BB:CC:DD:EE:FF/update-column" \
+ -H "Authorization: Bearer <API_TOKEN>" \
+ -H "Content-Type: application/json" \
+ --data '{"columnName":"devName","columnValue":"Updated Device"}'
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ The Devices Collection API provides operations to retrieve, manage, import/export, and filter devices in bulk. All endpoints require authorization via Bearer token.
+/devices
+ Retrieves all devices from the database.Response (success):
+{
+ "success": true,
+ "devices": [
+ {
+ "devName": "Net - Huawei",
+ "devMAC": "AA:BB:CC:DD:EE:FF",
+ "devIP": "192.168.1.1",
+ "devType": "Router",
+ "devFavorite": 0,
+ "devStatus": "online"
+ },
+ ...
+ ]
+}
+
+Error Responses:
+/devices
+ Deletes devices by MAC address. Supports exact matches or wildcard *.Request Body:
+{
+ "macs": ["AA:BB:CC:DD:EE:FF", "11:22:33:*"]
+}
+
+Behavior:
+macs is omitted or null → deletes all devices.* match multiple devices.Response:
+{
+ "success": true,
+ "deleted_count": 5
+}
+
+Error Responses:
+/devices/empty-macs
+ Removes all devices where MAC address is null or empty.Response:
+{
+ "success": true,
+ "deleted": 3
+}
+
+/devices/unknown
+ Deletes devices with names marked as (unknown) or (name not found).Response:
+{
+ "success": true,
+ "deleted": 2
+}
+
+/devices/export or /devices/export/<format>
+ Exports all devices in CSV (default) or JSON format.Query Parameter / URL Parameter:
+format (optional) → csv (default) or jsonCSV Response:
+Content-Disposition: attachment; filename=devices.csvJSON Response:
+{
+ "data": [
+ { "devName": "Net - Huawei", "devMAC": "AA:BB:CC:DD:EE:FF", ... },
+ ...
+ ],
+ "columns": ["devName", "devMAC", "devIP", "devType", "devFavorite", "devStatus"]
+}
+
+Error Responses:
+/devices/import
+ Imports devices from an uploaded CSV or base64-encoded CSV content.Request Body (multipart file or JSON with content field):
{
+ "content": "<base64-encoded CSV content>"
+}
+
+Response:
+{
+ "success": true,
+ "inserted": 25,
+ "skipped_lines": [3, 7]
+}
+
+Error Responses:
+/devices/totals
+ Returns counts of devices by various categories.Response:
+[
+ 120, // Total devices
+ 85, // Connected
+ 5, // Favorites
+ 10, // New
+ 8, // Down
+ 12 // Archived
+]
+
+Order: [all, connected, favorites, new, down, archived]
/devices/by-status?status=<status>
+ Returns devices filtered by status.Query Parameter:
+status → Supported values: online, offline, down, archived, favorites, new, myResponse (success):
+[
+ { "id": "AA:BB:CC:DD:EE:FF", "title": "Net - Huawei", "favorite": 0 },
+ { "id": "11:22:33:44:55:66", "title": "★ USG Firewall", "favorite": 1 }
+]
+
+If devFavorite=1, the title is prepended with a star ★.
curl RequestsGet All Devices:
+curl -X GET "http://<server_ip>:<GRAPHQL_PORT>/devices" \
+ -H "Authorization: Bearer <API_TOKEN>"
+
+Delete Devices by MAC:
+curl -X DELETE "http://<server_ip>:<GRAPHQL_PORT>/devices" \
+ -H "Authorization: Bearer <API_TOKEN>" \
+ -H "Content-Type: application/json" \
+ --data '{"macs":["AA:BB:CC:DD:EE:FF","11:22:33:*"]}'
+
+Export Devices CSV:
+curl -X GET "http://<server_ip>:<GRAPHQL_PORT>/devices/export?format=csv" \
+ -H "Authorization: Bearer <API_TOKEN>"
+
+Import Devices from CSV:
+curl -X POST "http://<server_ip>:<GRAPHQL_PORT>/devices/import" \
+ -H "Authorization: Bearer <API_TOKEN>" \
+ -F "file=@devices.csv"
+
+Get Devices by Status:
+curl -X GET "http://<server_ip>:<GRAPHQL_PORT>/devices/by-status?status=online" \
+ -H "Authorization: Bearer <API_TOKEN>"
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ The Events API provides access to device event logs, allowing creation, retrieval, deletion, and summary of events over time.
+/events/create/<mac>
+ Create an event for a device identified by its MAC address.Request Body (JSON):
+{
+ "ip": "192.168.1.10",
+ "event_type": "Device Down",
+ "additional_info": "Optional info about the event",
+ "pending_alert": 1,
+ "event_time": "2025-08-24T12:00:00Z"
+}
+
+Parameters:
+ip (string, optional): IP address of the device
event_type (string, optional): Type of event (default "Device Down")additional_info (string, optional): Extra informationpending_alert (int, optional): 1 if alert email is pending (default 1)event_time (ISO datetime, optional): Event timestamp; defaults to current timeResponse (JSON):
+{
+ "success": true,
+ "message": "Event created for 00:11:22:33:44:55"
+}
+
+/events
+ Retrieve all events, optionally filtered by MAC address:/events?mac=<mac>
+
+Response:
+{
+ "success": true,
+ "events": [
+ {
+ "eve_MAC": "00:11:22:33:44:55",
+ "eve_IP": "192.168.1.10",
+ "eve_DateTime": "2025-08-24T12:00:00Z",
+ "eve_EventType": "Device Down",
+ "eve_AdditionalInfo": "",
+ "eve_PendingAlertEmail": 1
+ }
+ ]
+}
+
+/events/<mac> → Delete events for a specific MAC/events → Delete all events/events/<days> → Delete events older than N daysResponse:
+{
+ "success": true,
+ "message": "Deleted events older than <days> days"
+}
+
+/sessions/totals?period=<period>
+ Return event and session totals over a given period.Query Parameters:
+| Parameter | +Description | +
|---|---|
period |
+Time period for totals, e.g., "7 days", "1 month", "1 year", "100 years" |
+
Sample Response (JSON Array):
+[120, 85, 5, 10, 3, 7]
+
+Meaning of Values:
+eve_EventType LIKE 'VOIDED%')eve_EventType LIKE 'New Device')eve_EventType LIKE 'Device Down'){ "error": "Forbidden" }
+
+Events are stored in the Events table with the following fields:
+ eve_MAC, eve_IP, eve_DateTime, eve_EventType, eve_AdditionalInfo, eve_PendingAlertEmail.
Event creation automatically logs activity for debugging.
+curl RequestsCreate Event:
+curl -X POST "http://<server_ip>:<GRAPHQL_PORT>/events/create/00:11:22:33:44:55" \
+ -H "Authorization: Bearer <API_TOKEN>" \
+ -H "Content-Type: application/json" \
+ --data '{
+ "ip": "192.168.1.10",
+ "event_type": "Device Down",
+ "additional_info": "Power outage",
+ "pending_alert": 1
+ }'
+
+Get Events for a Device:
+curl "http://<server_ip>:<GRAPHQL_PORT>/events?mac=00:11:22:33:44:55" \
+ -H "Authorization: Bearer <API_TOKEN>"
+
+Delete Events Older Than 30 Days:
+curl -X DELETE "http://<server_ip>:<GRAPHQL_PORT>/events/30" \
+ -H "Authorization: Bearer <API_TOKEN>"
+
+Get Event Totals for 7 Days:
+curl "http://<server_ip>:<GRAPHQL_PORT>/sessions/totals?period=7 days" \
+ -H "Authorization: Bearer <API_TOKEN>"
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ GraphQL queries are read-optimized for speed. Data may be slightly out of date until the file system cache refreshes. The GraphQL endpoints allow you to access the following objects:
+GET /graphql
+ Returns a simple status message (useful for browser or debugging).
POST /graphql
+ Execute GraphQL queries against the devicesSchema.
query GetDevices($options: PageQueryOptionsInput) {
+ devices(options: $options) {
+ devices {
+ rowid
+ devMac
+ devName
+ devOwner
+ devType
+ devVendor
+ devLastConnection
+ devStatus
+ }
+ count
+ }
+}
+
+| Parameter | +Description | +
|---|---|
page |
+Page number of results to fetch. | +
limit |
+Number of results per page. | +
sort |
+Sorting options (field = field name, order = asc or desc). |
+
search |
+Term to filter devices. | +
status |
+Filter devices by status: my_devices, connected, favorites, new, down, archived, offline. |
+
filters |
+Additional filters (array of { filterColumn, filterValue }). |
+
curl Examplecurl 'http://host:GRAPHQL_PORT/graphql' \
+ -X POST \
+ -H 'Authorization: Bearer API_TOKEN' \
+ -H 'Content-Type: application/json' \
+ --data '{
+ "query": "query GetDevices($options: PageQueryOptionsInput) { devices(options: $options) { devices { rowid devMac devName devOwner devType devVendor devLastConnection devStatus } count } }",
+ "variables": {
+ "options": {
+ "page": 1,
+ "limit": 10,
+ "sort": [{ "field": "devName", "order": "asc" }],
+ "search": "",
+ "status": "connected"
+ }
+ }
+ }'
+
+{
+ "data": {
+ "devices": {
+ "devices": [
+ {
+ "rowid": 1,
+ "devMac": "00:11:22:33:44:55",
+ "devName": "Device 1",
+ "devOwner": "Owner 1",
+ "devType": "Type 1",
+ "devVendor": "Vendor 1",
+ "devLastConnection": "2025-01-01T00:00:00Z",
+ "devStatus": "connected"
+ }
+ ],
+ "count": 1
+ }
+ }
+}
+
+The settings query provides access to NetAlertX configuration stored in the settings table.
+query GetSettings {
+ settings {
+ settings {
+ setKey
+ setName
+ setDescription
+ setType
+ setOptions
+ setGroup
+ setValue
+ setEvents
+ setOverriddenByEnv
+ }
+ count
+ }
+}
+
+| Field | +Type | +Description | +
|---|---|---|
setKey |
+String | +Unique key identifier for the setting. | +
setName |
+String | +Human-readable name. | +
setDescription |
+String | +Description or documentation of the setting. | +
setType |
+String | +Data type (string, int, bool, json, etc.). |
+
setOptions |
+String | +Available options (for dropdown/select-type settings). | +
setGroup |
+String | +Group/category the setting belongs to. | +
setValue |
+String | +Current value of the setting. | +
setEvents |
+String | +Events or triggers related to this setting. | +
setOverriddenByEnv |
+Boolean | +Whether the setting is overridden by an environment variable at runtime. | +
curl Examplecurl 'http://host:GRAPHQL_PORT/graphql' \
+ -X POST \
+ -H 'Authorization: Bearer API_TOKEN' \
+ -H 'Content-Type: application/json' \
+ --data '{
+ "query": "query GetSettings { settings { settings { setKey setName setDescription setType setOptions setGroup setValue setEvents setOverriddenByEnv } count } }"
+ }'
+
+{
+ "data": {
+ "settings": {
+ "settings": [
+ {
+ "setKey": "UI_MY_DEVICES",
+ "setName": "My Devices Filter",
+ "setDescription": "Defines which statuses to include in the 'My Devices' view.",
+ "setType": "list",
+ "setOptions": "[\"online\",\"new\",\"down\",\"offline\",\"archived\"]",
+ "setGroup": "UI",
+ "setValue": "[\"online\",\"new\"]",
+ "setEvents": null,
+ "setOverriddenByEnv": false
+ },
+ {
+ "setKey": "NETWORK_DEVICE_TYPES",
+ "setName": "Network Device Types",
+ "setDescription": "Types of devices considered as network infrastructure.",
+ "setType": "list",
+ "setOptions": "[\"Router\",\"Switch\",\"AP\"]",
+ "setGroup": "Network",
+ "setValue": "[\"Router\",\"Switch\"]",
+ "setEvents": null,
+ "setOverriddenByEnv": true
+ }
+ ],
+ "count": 2
+ }
+ }
+}
+
+The LangStrings query provides access to localized strings. Supports filtering by langCode and langStringKey. If the requested string is missing or empty, you can optionally fallback to en_us.
query GetLangStrings {
+ langStrings(langCode: "de_de", langStringKey: "settings_other_scanners") {
+ langStrings {
+ langCode
+ langStringKey
+ langStringText
+ }
+ count
+ }
+}
+
+| Parameter | +Type | +Description | +
|---|---|---|
langCode |
+String | +Optional language code (e.g., en_us, de_de). If omitted, all languages are returned. |
+
langStringKey |
+String | +Optional string key to retrieve a specific entry. | +
fallback_to_en |
+Boolean | +Optional (default true). If true, empty or missing strings fallback to en_us. |
+
curl Examplecurl 'http://host:GRAPHQL_PORT/graphql' \
+ -X POST \
+ -H 'Authorization: Bearer API_TOKEN' \
+ -H 'Content-Type: application/json' \
+ --data '{
+ "query": "query GetLangStrings { langStrings(langCode: \"de_de\", langStringKey: \"settings_other_scanners\") { langStrings { langCode langStringKey langStringText } count } }"
+ }'
+
+{
+ "data": {
+ "langStrings": {
+ "count": 1,
+ "langStrings": [
+ {
+ "langCode": "de_de",
+ "langStringKey": "settings_other_scanners",
+ "langStringText": "Other, non-device scanner plugins that are currently enabled." // falls back to en_us if empty
+ }
+ ]
+ }
+ }
+}
+
+fallback_to_en feature ensures UI always has a value even if a translation is missing.setOverriddenByEnv flag helps identify setting values that are locked at container runtime.
+
+
+
+ Manage or purge application log files stored under /app/log and manage the execution queue. These endpoints are primarily used for maintenance tasks such as clearing accumulated logs or adding system actions without restarting the container.
Only specific, pre-approved log files can be purged for security and stability reasons.
+/logs?file=<log_file> → Purge the contents of an allowed log file.Query Parameter:
+file → The name of the log file to purge (e.g., app.log, stdout.log)Allowed Files:
+app.log
+app_front.log
+IP_changes.log
+stdout.log
+stderr.log
+app.php_errors.log
+execution_queue.log
+db_is_locked.log
+
+Authorization:
+Requires a valid API token in the Authorization header.
curl Example (Success)curl -X DELETE 'http://<server_ip>:<GRAPHQL_PORT>/logs?file=app.log' \
+ -H 'Authorization: Bearer <API_TOKEN>' \
+ -H 'Accept: application/json'
+
+Response:
+{
+ "success": true,
+ "message": "[clean_log] File app.log purged successfully"
+}
+
+curl Example (Not Allowed)curl -X DELETE 'http://<server_ip>:<GRAPHQL_PORT>/logs?file=not_allowed.log' \
+ -H 'Authorization: Bearer <API_TOKEN>' \
+ -H 'Accept: application/json'
+
+Response:
+{
+ "success": false,
+ "message": "[clean_log] File not_allowed.log is not allowed to be purged"
+}
+
+curl Example (Unauthorized)curl -X DELETE 'http://<server_ip>:<GRAPHQL_PORT>/logs?file=app.log' \
+ -H 'Accept: application/json'
+
+Response:
+{
+ "error": "Forbidden"
+}
+
+/logs/add-to-execution-queue → Add a system action to the execution queue.Request Body (JSON):
+{
+ "action": "update_api|devices"
+}
+
+Authorization:
+Requires a valid API token in the Authorization header.
curl Example (Success)The below will update the API cache for Devices
+curl -X POST 'http://<server_ip>:<GRAPHQL_PORT>/logs/add-to-execution-queue' \
+ -H 'Authorization: Bearer <API_TOKEN>' \
+ -H 'Content-Type: application/json' \
+ --data '{"action": "update_api|devices"}'
+
+Response:
+{
+ "success": true,
+ "message": "[UserEventsQueueInstance] Action \"update_api|devices\" added to the execution queue."
+}
+
+curl Example (Missing Parameter)curl -X POST 'http://<server_ip>:<GRAPHQL_PORT>/logs/add-to-execution-queue' \
+ -H 'Authorization: Bearer <API_TOKEN>' \
+ -H 'Content-Type: application/json' \
+ --data '{}'
+
+Response:
+{
+ "success": false,
+ "message": "Missing parameters",
+ "error": "Missing required 'action' field in JSON body"
+}
+
+curl Example (Unauthorized)curl -X POST 'http://<server_ip>:<GRAPHQL_PORT>/logs/add-to-execution-queue' \
+ -H 'Content-Type: application/json' \
+ --data '{"action": "update_api|devices"}'
+
+Response:
+{
+ "error": "Forbidden"
+}
+
+/app/log can be purged — arbitrary paths are not permitted.When a log file is purged:
+Its content is replaced with a short marker text: "File manually purged".
mylog().write_notification().execution_queue.log and can be processed asynchronously by background tasks or workflows.
+
+
+
+ Manage in-app notifications for users. Notifications can be written, retrieved, marked as read, or deleted.
+/messaging/in-app/write → Create a new in-app notification.Request Body:
+json
+ {
+ "content": "This is a test notification",
+ "level": "alert" // optional, ["interrupt","info","alert"] default: "alert"
+ }
Response:
+json
+ {
+ "success": true
+ }
curl Examplecurl -X POST "http://<server_ip>:<GRAPHQL_PORT>/messaging/in-app/write" \
+ -H "Authorization: Bearer <API_TOKEN>" \
+ -H "Accept: application/json" \
+ -H "Content-Type: application/json" \
+ -d '{
+ "content": "This is a test notification",
+ "level": "alert"
+ }'
+
+/messaging/in-app/unread → Retrieve all unread notifications.Response:
+json
+ [
+ {
+ "timestamp": "2025-10-10T12:34:56",
+ "guid": "f47ac10b-58cc-4372-a567-0e02b2c3d479",
+ "read": 0,
+ "level": "alert",
+ "content": "This is a test notification"
+ }
+ ]
curl Examplecurl -X GET "http://<server_ip>:<GRAPHQL_PORT>/messaging/in-app/unread" \
+ -H "Authorization: Bearer <API_TOKEN>" \
+ -H "Accept: application/json"
+
+/messaging/in-app/read/all → Mark all notifications as read.Response:
+json
+ {
+ "success": true
+ }
curl Examplecurl -X POST "http://<server_ip>:<GRAPHQL_PORT>/messaging/in-app/read/all" \
+ -H "Authorization: Bearer <API_TOKEN>" \
+ -H "Accept: application/json"
+
+/messaging/in-app/read/<guid> → Mark a single notification as read using its GUID.Response (success):
+json
+ {
+ "success": true
+ }
Response (failure):
+json
+ {
+ "success": false,
+ "error": "Notification not found"
+ }
curl Examplecurl -X POST "http://<server_ip>:<GRAPHQL_PORT>/messaging/in-app/read/f47ac10b-58cc-4372-a567-0e02b2c3d479" \
+ -H "Authorization: Bearer <API_TOKEN>" \
+ -H "Accept: application/json"
+
+/messaging/in-app/delete → Remove all notifications from the system.Response:
+json
+ {
+ "success": true
+ }
curl Examplecurl -X DELETE "http://<server_ip>:<GRAPHQL_PORT>/messaging/in-app/delete" \
+ -H "Authorization: Bearer <API_TOKEN>" \
+ -H "Accept: application/json"
+
+/messaging/in-app/delete/<guid> → Remove a single notification by its GUID.Response (success):
+json
+ {
+ "success": true
+ }
Response (failure):
+json
+ {
+ "success": false,
+ "error": "Notification not found"
+ }
curl Examplecurl -X DELETE "http://<server_ip>:<GRAPHQL_PORT>/messaging/in-app/delete/f47ac10b-58cc-4372-a567-0e02b2c3d479" \
+ -H "Authorization: Bearer <API_TOKEN>" \
+ -H "Accept: application/json"
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ The /metrics endpoint exposes Prometheus-compatible metrics for NetAlertX, including aggregate device counts and per-device status.
/metrics → Returns metrics in plain text.GRAPHQL_PORT (default: 20212)netalertx_connected_devices 31
+netalertx_offline_devices 54
+netalertx_down_devices 0
+netalertx_new_devices 0
+netalertx_archived_devices 31
+netalertx_favorite_devices 2
+netalertx_my_devices 54
+
+netalertx_device_status{device="Net - Huawei", mac="Internet", ip="1111.111.111.111", vendor="None", first_connection="2021-01-01 00:00:00", last_connection="2025-08-04 17:57:00", dev_type="Router", device_status="Online"} 1
+netalertx_device_status{device="Net - USG", mac="74:ac:74:ac:74:ac", ip="192.168.1.1", vendor="Ubiquiti Networks Inc.", first_connection="2022-02-12 22:05:00", last_connection="2025-06-07 08:16:49", dev_type="Firewall", device_status="Archived"} 1
+netalertx_device_status{device="Raspberry Pi 4 LAN", mac="74:ac:74:ac:74:74", ip="192.168.1.9", vendor="Raspberry Pi Trading Ltd", first_connection="2022-02-12 22:05:00", last_connection="2025-08-04 17:57:00", dev_type="Singleboard Computer (SBC)", device_status="Online"} 1
+...
+
+| Metric | +Description | +
|---|---|
netalertx_connected_devices |
+Devices currently connected | +
netalertx_offline_devices |
+Devices currently offline | +
netalertx_down_devices |
+Down/unreachable devices | +
netalertx_new_devices |
+Recently detected devices | +
netalertx_archived_devices |
+Archived devices | +
netalertx_favorite_devices |
+User-marked favorites | +
netalertx_my_devices |
+Devices associated with the current user | +
Metric: netalertx_device_status
+Each device has labels:
device: friendly namemac: MAC address (or placeholder)ip: last recorded IPvendor: manufacturer or "None"first_connection: timestamp of first detectionlast_connection: most recent contactdev_type: device type/categorydevice_status: current status (Online, Offline, Archived, Down, …)Metric value is always 1 (presence indicator).
curlcurl 'http://<server_ip>:<GRAPHQL_PORT>/metrics' \
+ -H 'Authorization: Bearer <API_TOKEN>' \
+ -H 'Accept: text/plain'
+
+Replace placeholders:
+<server_ip> – NetAlertX host IP/hostname<GRAPHQL_PORT> – configured port (default 20212)<API_TOKEN> – your API tokenscrape_configs:
+ - job_name: 'netalertx'
+ metrics_path: /metrics
+ scheme: http
+ scrape_interval: 60s
+ static_configs:
+ - targets: ['<server_ip>:<GRAPHQL_PORT>']
+ authorization:
+ type: Bearer
+ credentials: <API_TOKEN>
+
+Sample template JSON: Download
+ + + + + + + + + + + + + +
+
+
+
+ The Net Tools API provides network diagnostic utilities, including Wake-on-LAN, traceroute, speed testing, DNS resolution, nmap scanning, and internet connection information.
+All endpoints require authorization via Bearer token.
+/nettools/wakeonlan
+ Sends a Wake-on-LAN packet to wake a device.Request Body (JSON):
+{
+ "devMac": "AA:BB:CC:DD:EE:FF"
+}
+
+Response (success):
+{
+ "success": true,
+ "message": "WOL packet sent",
+ "output": "Sent magic packet to AA:BB:CC:DD:EE:FF"
+}
+
+Error Responses:
+/nettools/traceroute
+ Performs a traceroute to a specified IP address.Request Body:
+{
+ "devLastIP": "192.168.1.1"
+}
+
+Response (success):
+{
+ "success": true,
+ "output": "traceroute output as string"
+}
+
+Error Responses:
+/nettools/speedtest
+ Runs an internet speed test using speedtest-cli.Response (success):
+{
+ "success": true,
+ "output": [
+ "Ping: 15 ms",
+ "Download: 120.5 Mbit/s",
+ "Upload: 22.4 Mbit/s"
+ ]
+}
+
+Error Responses:
+/nettools/nslookup
+ Resolves an IP address or hostname using nslookup.Request Body:
+{
+ "devLastIP": "8.8.8.8"
+}
+
+Response (success):
+{
+ "success": true,
+ "output": [
+ "Server: 8.8.8.8",
+ "Address: 8.8.8.8#53",
+ "Name: google-public-dns-a.google.com"
+ ]
+}
+
+Error Responses:
+devLastIP → HTTP 400/nettools/nmap
+ Runs an nmap scan on a target IP address or range.Request Body:
+{
+ "scan": "192.168.1.0/24",
+ "mode": "fast"
+}
+
+Supported Modes:
+| Mode | +nmap Arguments | +
|---|---|
fast |
+-F |
+
normal |
+default | +
detail |
+-A |
+
skipdiscovery |
+-Pn |
+
Response (success):
+{
+ "success": true,
+ "mode": "fast",
+ "ip": "192.168.1.0/24",
+ "output": [
+ "Starting Nmap 7.91",
+ "Host 192.168.1.1 is up",
+ "... scan results ..."
+ ]
+}
+
+Error Responses:
+/nettools/internetinfo
+ Fetches public internet connection information using ipinfo.io.Response (success):
+{
+ "success": true,
+ "output": "IP: 203.0.113.5 City: Sydney Country: AU Org: Example ISP"
+}
+
+Error Responses:
+curl RequestsWake-on-LAN:
+curl -X POST "http://<server_ip>:<GRAPHQL_PORT>/nettools/wakeonlan" \
+ -H "Authorization: Bearer <API_TOKEN>" \
+ -H "Content-Type: application/json" \
+ --data '{"devMac":"AA:BB:CC:DD:EE:FF"}'
+
+Traceroute:
+curl -X POST "http://<server_ip>:<GRAPHQL_PORT>/nettools/traceroute" \
+ -H "Authorization: Bearer <API_TOKEN>" \
+ -H "Content-Type: application/json" \
+ --data '{"devLastIP":"192.168.1.1"}'
+
+Speedtest:
+curl "http://<server_ip>:<GRAPHQL_PORT>/nettools/speedtest" \
+ -H "Authorization: Bearer <API_TOKEN>"
+
+Nslookup:
+curl -X POST "http://<server_ip>:<GRAPHQL_PORT>/nettools/nslookup" \
+ -H "Authorization: Bearer <API_TOKEN>" \
+ -H "Content-Type: application/json" \
+ --data '{"devLastIP":"8.8.8.8"}'
+
+Nmap Scan:
+curl -X POST "http://<server_ip>:<GRAPHQL_PORT>/nettools/nmap" \
+ -H "Authorization: Bearer <API_TOKEN>" \
+ -H "Content-Type: application/json" \
+ --data '{"scan":"192.168.1.0/24","mode":"fast"}'
+
+Internet Info:
+curl "http://<server_ip>:<GRAPHQL_PORT>/nettools/internetinfo" \
+ -H "Authorization: Bearer <API_TOKEN>"
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Warning
+Some of these endpoints will be deprecated soon. Please refere to the new API endpoints docs for details on the new API layer.
+NetAlertX comes with a couple of API endpoints. All requests need to be authorized (executed in a logged in browser session) or you have to pass the value of the API_TOKEN settings as authorization bearer, for example:
curl 'http://host:GRAPHQL_PORT/graphql' \
+ -X POST \
+ -H 'Authorization: Bearer API_TOKEN' \
+ -H 'Content-Type: application/json' \
+ --data '{
+ "query": "query GetDevices($options: PageQueryOptionsInput) { devices(options: $options) { devices { rowid devMac devName devOwner devType devVendor devLastConnection devStatus } count } }",
+ "variables": {
+ "options": {
+ "page": 1,
+ "limit": 10,
+ "sort": [{ "field": "devName", "order": "asc" }],
+ "search": "",
+ "status": "connected"
+ }
+ }
+ }'
+
+php/server/query_graphql.phpsame as front end (web ui)20212 or as defined by the GRAPHQL_PORT settingFirst, let's define the GraphQL query to fetch devices with pagination and sorting options.
+query GetDevices($options: PageQueryOptionsInput) {
+ devices(options: $options) {
+ devices {
+ rowid
+ devMac
+ devName
+ devOwner
+ devType
+ devVendor
+ devLastConnection
+ devStatus
+ }
+ count
+ }
+}
+
+See also: Debugging GraphQL issues
+curl CommandYou can use the following curl command to execute the query.
curl 'http://host:GRAPHQL_PORT/graphql' -X POST -H 'Authorization: Bearer API_TOKEN' -H 'Content-Type: application/json' --data '{
+ "query": "query GetDevices($options: PageQueryOptionsInput) { devices(options: $options) { devices { rowid devMac devName devOwner devType devVendor devLastConnection devStatus } count } }",
+ "variables": {
+ "options": {
+ "page": 1,
+ "limit": 10,
+ "sort": [{ "field": "devName", "order": "asc" }],
+ "search": "",
+ "status": "connected"
+ }
+ }
+ }'
+
+query parameter contains the GraphQL query as a string.The variables parameter contains the input variables for the query.
Query Variables:
+page: Specifies the page number of results to fetch.limit: Specifies the number of results per page.sort: Specifies the sorting options, with field being the field to sort by and order being the sort order (asc for ascending or desc for descending).search: A search term to filter the devices.status: The status filter to apply (valid values are my_devices (determined by the UI_MY_DEVICES setting), connected, favorites, new, down, archived, offline).
curl Command:
-X POST option specifies that we are making a POST request.-H "Content-Type: application/json" option sets the content type of the request to JSON.-d option provides the request payload, which includes the GraphQL query and variables.The response will be in JSON format, similar to the following:
+{
+ "data": {
+ "devices": {
+ "devices": [
+ {
+ "rowid": 1,
+ "devMac": "00:11:22:33:44:55",
+ "devName": "Device 1",
+ "devOwner": "Owner 1",
+ "devType": "Type 1",
+ "devVendor": "Vendor 1",
+ "devLastConnection": "2025-01-01T00:00:00Z",
+ "devStatus": "connected"
+ },
+ {
+ "rowid": 2,
+ "devMac": "66:77:88:99:AA:BB",
+ "devName": "Device 2",
+ "devOwner": "Owner 2",
+ "devType": "Type 2",
+ "devVendor": "Vendor 2",
+ "devLastConnection": "2025-01-02T00:00:00Z",
+ "devStatus": "connected"
+ }
+ ],
+ "count": 2
+ }
+ }
+}
+
+This API endpoint retrieves static files, that are periodically updated.
+php/server/query_json.php?file=<file name>same as front end (web ui)20211 or as defined by the $PORT docker environment variable (same as the port for the web ui)The endpoints are updated when objects in the API endpoints are changed.
+In the container, these files are located under the API directory (default: /tmp/api/, configurable via NETALERTX_API environment variable). You can access them via the /php/server/query_json.php?file=user_notifications.json endpoint.
You can access the following files:
+| File name | +Description | +
|---|---|
notification_json_final.json |
+The json version of the last notification (e.g. used for webhooks - sample JSON). | +
table_devices.json |
+All of the available Devices detected by the app. | +
table_plugins_events.json |
+The list of the unprocessed (pending) notification events (plugins_events DB table). | +
table_plugins_history.json |
+The list of notification events history. | +
table_plugins_objects.json |
+The content of the plugins_objects table. Find more info on the Plugin system here | +
language_strings.json |
+The content of the language_strings table, which in turn is loaded from the plugins config.json definitions. |
+
table_custom_endpoint.json |
+A custom endpoint generated by the SQL query specified by the API_CUSTOM_SQL setting. |
+
table_settings.json |
+The content of the settings table. | +
app_state.json |
+Contains the current application state. | +
The endpoints starting with the table_ prefix contain most, if not all, data contained in the corresponding database table. The common format for those is:
{
+ "data": [
+ {
+ "db_column_name": "data",
+ "db_column_name2": "data2"
+ },
+ {
+ "db_column_name": "data3",
+ "db_column_name2": "data4"
+ }
+ ]
+}
+
+
+Example JSON of the table_devices.json endpoint with two Devices (database rows):
{
+ "data": [
+ {
+ "devMac": "Internet",
+ "devName": "Net - Huawei",
+ "devType": "Router",
+ "devVendor": null,
+ "devGroup": "Always on",
+ "devFirstConnection": "2021-01-01 00:00:00",
+ "devLastConnection": "2021-01-28 22:22:11",
+ "devLastIP": "192.168.1.24",
+ "devStaticIP": 0,
+ "devPresentLastScan": 1,
+ "devLastNotification": "2023-01-28 22:22:28.998715",
+ "devIsNew": 0,
+ "devParentMAC": "",
+ "devParentPort": "",
+ "devIcon": "globe"
+ },
+ {
+ "devMac": "a4:8f:ff:aa:ba:1f",
+ "devName": "Net - USG",
+ "devType": "Firewall",
+ "devVendor": "Ubiquiti Inc",
+ "devGroup": "",
+ "devFirstConnection": "2021-02-12 22:05:00",
+ "devLastConnection": "2021-07-17 15:40:00",
+ "devLastIP": "192.168.1.1",
+ "devStaticIP": 1,
+ "devPresentLastScan": 1,
+ "devLastNotification": "2021-07-17 15:40:10.667717",
+ "devIsNew": 0,
+ "devParentMAC": "Internet",
+ "devParentPort": 1,
+ "devIcon": "shield-halved"
+ }
+ ]
+}
+
+
+/metricsGRAPHQL_PORT setting (20212 by default)/metrics EndpointBelow is a representative snippet of the metrics you may find when querying the /metrics endpoint for netalertx. It includes both aggregate counters and device_status labels per device.
netalertx_connected_devices 31
+netalertx_offline_devices 54
+netalertx_down_devices 0
+netalertx_new_devices 0
+netalertx_archived_devices 31
+netalertx_favorite_devices 2
+netalertx_my_devices 54
+
+netalertx_device_status{device="Net - Huawei", mac="Internet", ip="1111.111.111.111", vendor="None", first_connection="2021-01-01 00:00:00", last_connection="2025-08-04 17:57:00", dev_type="Router", device_status="Online"} 1
+netalertx_device_status{device="Net - USG", mac="74:ac:74:ac:74:ac", ip="192.168.1.1", vendor="Ubiquiti Networks Inc.", first_connection="2022-02-12 22:05:00", last_connection="2025-06-07 08:16:49", dev_type="Firewall", device_status="Archived"} 1
+netalertx_device_status{device="Raspberry Pi 4 LAN", mac="74:ac:74:ac:74:74", ip="192.168.1.9", vendor="Raspberry Pi Trading Ltd", first_connection="2022-02-12 22:05:00", last_connection="2025-08-04 17:57:00", dev_type="Singleboard Computer (SBC)", device_status="Online"} 1
+...
+
+Metric names prefixed with netalertx_ provide aggregated counts by device status:
netalertx_connected_devices: number of devices currently connectednetalertx_offline_devices: devices currently offlinenetalertx_down_devices: down/unreachable devicesnetalertx_new_devices: devices recently detectednetalertx_archived_devices: archived devicesnetalertx_favorite_devices: user-marked favorite devicesnetalertx_my_devices: devices associated with the current user contextThese numeric values give a high-level overview of device distribution.
+Each individual device is represented by a netalertx_device_status metric, with descriptive labels:
device: friendly name of the devicemac: MAC address (or placeholder)ip: last recorded IP addressvendor: manufacturer or "None" if unknownfirst_connection: timestamp when the device was first observedlast_connection: most recent contact timestampdev_type: device category or typedevice_status: current status (Online / Offline / Archived / Down / ...)The metric value is always 1 (indicating presence or active state) and the combination of labels identifies the device.
curlTo fetch the metrics from the NetAlertX exporter:
+curl 'http://<server_ip>:<GRAPHQL_PORT>/metrics' \
+ -H 'Authorization: Bearer <API_TOKEN>' \
+ -H 'Accept: text/plain'
+
+Replace:
+<server_ip>: IP or hostname of the NetAlertX server<GRAPHQL_PORT>: port specified in your GRAPHQL_PORT setting (default: 20212)<API_TOKEN> your Bearer token from the API_TOKEN setting/metrics provides both summary counters and per-device status entries.scrape_configs:
+ - job_name: 'netalertx'
+ metrics_path: /metrics
+ scheme: http
+ scrape_interval: 60s
+ static_configs:
+ - targets: ['<server_ip>:<GRAPHQL_PORT>']
+ authorization:
+ type: Bearer
+ credentials: <API_TOKEN>
+
+Grafana template sample: Download json
+This API endpoint retrieves files from the /tmp/log folder.
php/server/query_logs.php?file=<file name>same as front end (web ui)20211 or as defined by the $PORT docker environment variable (same as the port for the web ui)| File | +Description | +
|---|---|
IP_changes.log |
+Logs of IP address changes | +
app.log |
+Main application log | +
app.php_errors.log |
+PHP error log | +
app_front.log |
+Frontend application log | +
app_nmap.log |
+Logs of Nmap scan results | +
db_is_locked.log |
+Logs when the database is locked | +
execution_queue.log |
+Logs of execution queue activities | +
plugins/ |
+Directory for temporary plugin-related files (not accessible) | +
report_output.html |
+HTML report output | +
report_output.json |
+JSON format report output | +
report_output.txt |
+Text format report output | +
stderr.log |
+Logs of standard error output | +
stdout.log |
+Logs of standard output | +
To retrieve files from the /data/config folder.
php/server/query_config.php?file=<file name>same as front end (web ui)20211 or as defined by the $PORT docker environment variable (same as the port for the web ui)| File | +Description | +
|---|---|
devices.csv |
+Devices csv file | +
app.conf |
+Application config file | +
+
+
+
+ Manage the online history records of devices. Currently, the API supports deletion of all history entries. All endpoints require authorization.
+/history
+ Remove all records from the online history table (Online_History). This operation cannot be undone.Response (success):
+{
+ "success": true,
+ "message": "Deleted online history"
+}
+
+Error Responses:
+curl Requestcurl -X DELETE "http://<server_ip>:<GRAPHQL_PORT>/history" \
+ -H "Authorization: Bearer <API_TOKEN>"
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Track and manage device connection sessions. Sessions record when a device connects or disconnects on the network.
+/sessions/create → Create a new session for a deviceRequest Body:
+json
+ {
+ "mac": "AA:BB:CC:DD:EE:FF",
+ "ip": "192.168.1.10",
+ "start_time": "2025-08-01T10:00:00",
+ "end_time": "2025-08-01T12:00:00", // optional
+ "event_type_conn": "Connected", // optional, default "Connected"
+ "event_type_disc": "Disconnected" // optional, default "Disconnected"
+ }
Response:
+json
+ {
+ "success": true,
+ "message": "Session created for MAC AA:BB:CC:DD:EE:FF"
+ }
curl Examplecurl -X POST "http://<server_ip>:<GRAPHQL_PORT>/sessions/create" \
+ -H "Authorization: Bearer <API_TOKEN>" \
+ -H "Accept: application/json" \
+ -H "Content-Type: application/json" \
+ -d '{
+ "mac": "AA:BB:CC:DD:EE:FF",
+ "ip": "192.168.1.10",
+ "start_time": "2025-08-01T10:00:00",
+ "end_time": "2025-08-01T12:00:00",
+ "event_type_conn": "Connected",
+ "event_type_disc": "Disconnected"
+ }'
+
+
+/sessions/delete → Delete all sessions for a given MACRequest Body:
+json
+ {
+ "mac": "AA:BB:CC:DD:EE:FF"
+ }
Response:
+json
+ {
+ "success": true,
+ "message": "Deleted sessions for MAC AA:BB:CC:DD:EE:FF"
+ }
curl Examplecurl -X DELETE "http://<server_ip>:<GRAPHQL_PORT>/sessions/delete" \
+ -H "Authorization: Bearer <API_TOKEN>" \
+ -H "Accept: application/json" \
+ -H "Content-Type: application/json" \
+ -d '{
+ "mac": "AA:BB:CC:DD:EE:FF"
+ }'
+
+/sessions/list → Retrieve sessions optionally filtered by device and date rangeQuery Parameters:
+mac (optional) → Filter by device MAC addressstart_date (optional) → Filter sessions starting from this date (YYYY-MM-DD)end_date (optional) → Filter sessions ending by this date (YYYY-MM-DD)Example:
+/sessions/list?mac=AA:BB:CC:DD:EE:FF&start_date=2025-08-01&end_date=2025-08-21
Response:
+json
+ {
+ "success": true,
+ "sessions": [
+ {
+ "ses_MAC": "AA:BB:CC:DD:EE:FF",
+ "ses_Connection": "2025-08-01 10:00",
+ "ses_Disconnection": "2025-08-01 12:00",
+ "ses_Duration": "2h 0m",
+ "ses_IP": "192.168.1.10",
+ "ses_Info": ""
+ }
+ ]
+ }
curl Examplecurl -X GET "http://<server_ip>:<GRAPHQL_PORT>/sessions/list?mac=AA:BB:CC:DD:EE:FF&start_date=2025-08-01&end_date=2025-08-21" \
+ -H "Authorization: Bearer <API_TOKEN>" \
+ -H "Accept: application/json"
+
+/sessions/calendar → View sessions in calendar formatQuery Parameters:
+start → Start date (YYYY-MM-DD)end → End date (YYYY-MM-DD)Example:
+/sessions/calendar?start=2025-08-01&end=2025-08-21
Response:
+json
+ {
+ "success": true,
+ "sessions": [
+ {
+ "resourceId": "AA:BB:CC:DD:EE:FF",
+ "title": "",
+ "start": "2025-08-01T10:00:00",
+ "end": "2025-08-01T12:00:00",
+ "color": "#00a659",
+ "tooltip": "Connection: 2025-08-01 10:00\nDisconnection: 2025-08-01 12:00\nIP: 192.168.1.10",
+ "className": "no-border"
+ }
+ ]
+ }
curl Examplecurl -X GET "http://<server_ip>:<GRAPHQL_PORT>/sessions/calendar?start=2025-08-01&end=2025-08-21" \
+ -H "Authorization: Bearer <API_TOKEN>" \
+ -H "Accept: application/json"
+
+/sessions/<mac> → Retrieve sessions for a specific deviceQuery Parameters:
+period → Period to retrieve sessions (1 day, 7 days, 1 month, etc.)
+ Default: 1 dayExample:
+/sessions/AA:BB:CC:DD:EE:FF?period=7 days
Response:
+json
+ {
+ "success": true,
+ "sessions": [
+ {
+ "ses_MAC": "AA:BB:CC:DD:EE:FF",
+ "ses_Connection": "2025-08-01 10:00",
+ "ses_Disconnection": "2025-08-01 12:00",
+ "ses_Duration": "2h 0m",
+ "ses_IP": "192.168.1.10",
+ "ses_Info": ""
+ }
+ ]
+ }
curl Examplecurl -X GET "http://<server_ip>:<GRAPHQL_PORT>/sessions/AA:BB:CC:DD:EE:FF?period=7%20days" \
+ -H "Authorization: Bearer <API_TOKEN>" \
+ -H "Accept: application/json"
+
+/sessions/session-events → Retrieve a summary of session eventsQuery Parameters:
+type → Event type (all, sessions, missing, voided, new, down)
+ Default: allperiod → Period to retrieve events (7 days, 1 month, etc.)Example:
+/sessions/session-events?type=all&period=7 days
Response: + Returns a list of events or sessions with formatted connection, disconnection, duration, and IP information.
+curl Examplecurl -X GET "http://<server_ip>:<GRAPHQL_PORT>/sessions/session-events?type=all&period=7%20days" \
+ -H "Authorization: Bearer <API_TOKEN>" \
+ -H "Accept: application/json"
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Retrieve application settings stored in the configuration system. This endpoint is useful for quickly fetching individual settings such as API_TOKEN or TIMEZONE.
For bulk or structured access (all settings, schema details, or filtering), use the GraphQL API Endpoint.
+/settings/<key> → Retrieve the value of a specific settingPath Parameter:
+key → The setting key to retrieve (e.g., API_TOKEN, TIMEZONE)Authorization:
+Requires a valid API token in the Authorization header.
curl Example (Success)curl 'http://<server_ip>:<GRAPHQL_PORT>/settings/API_TOKEN' \
+ -H 'Authorization: Bearer <API_TOKEN>' \
+ -H 'Accept: application/json'
+
+Response:
+{
+ "success": true,
+ "value": "my-secret-token"
+}
+
+curl Example (Invalid Key)curl 'http://<server_ip>:<GRAPHQL_PORT>/settings/DOES_NOT_EXIST' \
+ -H 'Authorization: Bearer <API_TOKEN>' \
+ -H 'Accept: application/json'
+
+Response:
+{
+ "success": true,
+ "value": null
+}
+
+curl Example (Unauthorized)curl 'http://<server_ip>:<GRAPHQL_PORT>/settings/API_TOKEN' \
+ -H 'Accept: application/json'
+
+Response:
+{
+ "error": "Forbidden"
+}
+
+setName, setDescription, setType, or checking if a setting is overridden by environment variables), use the GraphQL Settings Query:curl 'http://<server_ip>:<GRAPHQL_PORT>/graphql' \
+ -X POST \
+ -H 'Authorization: Bearer <API_TOKEN>' \
+ -H 'Content-Type: application/json' \
+ --data '{
+ "query": "query GetSettings { settings { settings { setKey setName setDescription setType setOptions setGroup setValue setEvents setOverriddenByEnv } count } }"
+ }'
+
+See the GraphQL API Endpoint for more details.
+ + + + + + + + + + + + + +
+
+
+
+ The /sync endpoint is used by the SYNC plugin to synchronize data between multiple NetAlertX instances (e.g., from a node to a hub). It supports both GET and POST requests.
/syncFetches data from a node to the hub. The data is returned as a base64-encoded JSON file.
+Example Request:
+curl 'http://<server>:<GRAPHQL_PORT>/sync' \
+ -H 'Authorization: Bearer <API_TOKEN>'
+
+Response Example:
+{
+ "node_name": "NODE-01",
+ "status": 200,
+ "message": "OK",
+ "data_base64": "eyJkZXZpY2VzIjogW3siZGV2TWFjIjogIjAwOjExOjIyOjMzOjQ0OjU1IiwiZGV2TmFtZSI6ICJEZXZpY2UgMSJ9XSwgImNvdW50Ijog1fQ==",
+ "timestamp": "2025-08-24T10:15:00+10:00"
+}
+
+Notes:
+data_base64 contains the full JSON data encoded in Base64.node_name corresponds to the SYNC_node_name setting on the node./syncThe POST endpoint is used by nodes to send data to the hub. The hub expects the data as form-encoded fields (application/x-www-form-urlencoded or multipart/form-data). The hub then stores the data in the plugin log folder for processing.
+| Field | +Type | +Description | +
|---|---|---|
data |
+string | +The payload from the plugin or devices. Typically plain text, JSON, or encrypted Base64 data. In your Python script, encrypt_data() is applied before sending. |
+
node_name |
+string | +The name of the node sending the data. Matches the node’s SYNC_node_name setting. Used to generate the filename on the hub. |
+
plugin |
+string | +The name of the plugin sending the data. Determines the filename prefix (last_result.<plugin>...). |
+
file_path |
+string (optional) | +Path of the local file being sent. Used only for logging/debugging purposes on the hub; not required for processing. | +
INSTALL_PATH/log/plugins/last_result.<plugin>.encoded.<node_name>.<sequence>.log
+
+<plugin> → plugin name from the POST request.<node_name> → node name from the POST request.<sequence> → incremented number for each submission.
Decodes / decrypts the data if necessary (Base64 or encrypted) before processing.
+Processes JSON payloads (e.g., device info) to:
+Avoid duplicates by tracking devMac.
devSyncHubNode.processed_last_result.<plugin>.<node_name>.<sequence>.log
+
+If a node is sending device data:
+curl -X POST 'http://<hub>:<PORT>/sync' \
+ -H 'Authorization: Bearer <API_TOKEN>' \
+ -F 'data={"data":[{"devMac":"00:11:22:33:44:55","devName":"Device 1","devVendor":"Vendor A","devLastIP":"192.168.1.10"}]}' \
+ -F 'node_name=NODE-01' \
+ -F 'plugin=SYNC'
+
+data field contains JSON with a data array, where each element is a device object or plugin data object.plugin and node_name fields allow the hub to organize and store the file correctly.plugin and node_name values for consistent storage.encrypt_data() before sending, and the hub decodes it before processing.multipart/form-data (cURL -F) or application/x-www-form-urlencoded.Storage Details:
+INSTALL_PATH/log/plugins with filenames following the pattern:last_result.<plugin>.encoded.<node_name>.<sequence>.log
+
+node_name and plugin are consistent to avoid overwriting files.write_notification), which can be used for alerting or auditing.
+
+
+
+ Warning
+Please note these test modify data in the database.
+/test directory for available test cases. These are not exhaustive but cover the main API endpoints. sudo docker exec -it netalertx /bin/bash pip install pytest pytest /app/test/TESTFILE.py
+
+
+
+ Warning
+This is community contributed content and work in progress. Contributions are welcome.
+theme: dark
+
+default_2fa_method: "totp"
+
+server:
+ address: 0.0.0.0:9091
+ endpoints:
+ enable_expvars: false
+ enable_pprof: false
+ authz:
+ forward-auth:
+ implementation: 'ForwardAuth'
+ authn_strategies:
+ - name: 'HeaderAuthorization'
+ schemes:
+ - 'Basic'
+ - name: 'CookieSession'
+ ext-authz:
+ implementation: 'ExtAuthz'
+ authn_strategies:
+ - name: 'HeaderAuthorization'
+ schemes:
+ - 'Basic'
+ - name: 'CookieSession'
+ auth-request:
+ implementation: 'AuthRequest'
+ authn_strategies:
+ - name: 'HeaderAuthRequestProxyAuthorization'
+ schemes:
+ - 'Basic'
+ - name: 'CookieSession'
+ legacy:
+ implementation: 'Legacy'
+ authn_strategies:
+ - name: 'HeaderLegacy'
+ - name: 'CookieSession'
+ disable_healthcheck: false
+ tls:
+ key: ""
+ certificate: ""
+ client_certificates: []
+ headers:
+ csp_template: ""
+
+log:
+ ## Level of verbosity for logs: info, debug, trace.
+ level: info
+
+###############################################################
+# The most important section
+###############################################################
+access_control:
+ ## Default policy can either be 'bypass', 'one_factor', 'two_factor' or 'deny'.
+ default_policy: deny
+ networks:
+ - name: internal
+ networks:
+ - '192.168.0.0/18'
+ - '10.10.10.0/8' # Zerotier
+ - name: private
+ networks:
+ - '172.16.0.0/12'
+ rules:
+ - networks:
+ - private
+ domain:
+ - '*'
+ policy: bypass
+ - networks:
+ - internal
+ domain:
+ - '*'
+ policy: bypass
+ - domain:
+ # exclude itself from auth, should not happen as we use Traefik middleware on a case-by-case screnario
+ - 'auth.MYDOMAIN1.TLD'
+ - 'authelia.MYDOMAIN1.TLD'
+ - 'auth.MYDOMAIN2.TLD'
+ - 'authelia.MYDOMAIN2.TLD'
+ policy: bypass
+ - domain:
+ #All subdomains match
+ - 'MYDOMAIN1.TLD'
+ - '*.MYDOMAIN1.TLD'
+ policy: two_factor
+ - domain:
+ # This will not work yet as Authelio does not support multi-domain authentication
+ - 'MYDOMAIN2.TLD'
+ - '*.MYDOMAIN2.TLD'
+ policy: two_factor
+
+
+############################################################
+identity_validation:
+ reset_password:
+ jwt_secret: "[REDACTED]"
+
+identity_providers:
+ oidc:
+ enable_client_debug_messages: true
+ enforce_pkce: public_clients_only
+ hmac_secret: [REDACTED]
+ lifespans:
+ authorize_code: 1m
+ id_token: 1h
+ refresh_token: 90m
+ access_token: 1h
+ cors:
+ endpoints:
+ - authorization
+ - token
+ - revocation
+ - introspection
+ - userinfo
+ allowed_origins:
+ - "*"
+ allowed_origins_from_client_redirect_uris: false
+ jwks:
+ - key: [REDACTED]
+ certificate_chain:
+ clients:
+ - client_id: portainer
+ client_name: Portainer
+ # generate secret with "authelia crypto hash generate pbkdf2 --random --random.length 32 --random.charset alphanumeric"
+ # Random Password: [REDACTED]
+ # Digest: [REDACTED]
+ client_secret: [REDACTED]
+ token_endpoint_auth_method: 'client_secret_post'
+ public: false
+ authorization_policy: two_factor
+ consent_mode: pre-configured #explicit
+ pre_configured_consent_duration: '6M' #Must be re-authorised every 6 Months
+ scopes:
+ - openid
+ #- groups #Currently not supported in Authelia V
+ - email
+ - profile
+ redirect_uris:
+ - https://portainer.MYDOMAIN1.LTD
+ userinfo_signed_response_alg: none
+
+ - client_id: openproject
+ client_name: OpenProject
+ # generate secret with "authelia crypto hash generate pbkdf2 --random --random.length 32 --random.charset alphanumeric"
+ # Random Password: [REDACTED]
+ # Digest: [REDACTED]
+ client_secret: [REDACTED]
+ token_endpoint_auth_method: 'client_secret_basic'
+ public: false
+ authorization_policy: two_factor
+ consent_mode: pre-configured #explicit
+ pre_configured_consent_duration: '6M' #Must be re-authorised every 6 Months
+ scopes:
+ - openid
+ #- groups #Currently not supported in Authelia V
+ - email
+ - profile
+ redirect_uris:
+ - https://op.MYDOMAIN.TLD
+ #grant_types:
+ # - refresh_token
+ # - authorization_code
+ #response_types:
+ # - code
+ #response_modes:
+ # - form_post
+ # - query
+ # - fragment
+ userinfo_signed_response_alg: none
+##################################################################
+
+
+telemetry:
+ metrics:
+ enabled: false
+ address: tcp://0.0.0.0:9959
+
+totp:
+ disable: false
+ issuer: authelia.com
+ algorithm: sha1
+ digits: 6
+ period: 30 ## The period in seconds a one-time password is valid for.
+ skew: 1
+ secret_size: 32
+
+webauthn:
+ disable: false
+ timeout: 60s ## Adjust the interaction timeout for Webauthn dialogues.
+ display_name: Authelia
+ attestation_conveyance_preference: indirect
+ user_verification: preferred
+
+ntp:
+ address: "pool.ntp.org"
+ version: 4
+ max_desync: 5s
+ disable_startup_check: false
+ disable_failure: false
+
+authentication_backend:
+ password_reset:
+ disable: false
+ custom_url: ""
+ refresh_interval: 5m
+ file:
+ path: /config/users_database.yml
+ watch: true
+ password:
+ algorithm: argon2
+ argon2:
+ variant: argon2id
+ iterations: 3
+ memory: 65536
+ parallelism: 4
+ key_length: 32
+ salt_length: 16
+
+password_policy:
+ standard:
+ enabled: false
+ min_length: 8
+ max_length: 0
+ require_uppercase: true
+ require_lowercase: true
+ require_number: true
+ require_special: true
+ ## zxcvbn is a well known and used password strength algorithm. It does not have tunable settings.
+ zxcvbn:
+ enabled: false
+ min_score: 3
+
+regulation:
+ max_retries: 3
+ find_time: 2m
+ ban_time: 5m
+
+session:
+ name: authelia_session
+ secret: [REDACTED]
+ expiration: 60m
+ inactivity: 15m
+ cookies:
+ - domain: 'MYDOMAIN1.LTD'
+ authelia_url: 'https://auth.MYDOMAIN1.LTD'
+ name: 'authelia_session'
+ default_redirection_url: 'https://MYDOMAIN1.LTD'
+ - domain: 'MYDOMAIN2.LTD'
+ authelia_url: 'https://auth.MYDOMAIN2.LTD'
+ name: 'authelia_session_other'
+ default_redirection_url: 'https://MYDOMAIN2.LTD'
+
+storage:
+ encryption_key: [REDACTED]
+ local:
+ path: /config/db.sqlite3
+
+notifier:
+ disable_startup_check: true
+ smtp:
+ address: MYOTHERDOMAIN.LTD:465
+ timeout: 5s
+ username: "USER@DOMAIN"
+ password: "[REDACTED]"
+ sender: "Authelia <postmaster@MYOTHERDOMAIN.LTD>"
+ identifier: NAME@MYOTHERDOMAIN.LTD
+ subject: "[Authelia] {title}"
+ startup_check_address: postmaster@MYOTHERDOMAIN.LTD
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Note
+To back up 99% of your configuration, back up at least the /data/config folder.
+Database definitions can change between releases, so the safest method is to restore backups using the same app version they were taken from, then upgrade incrementally.
There are four key artifacts you can use to back up your NetAlertX configuration:
+| File | +Description | +Limitations | +
|---|---|---|
/db/app.db |
+The application database | +Might be in an uncommitted state or corrupted | +
/config/app.conf |
+Configuration file | +Can be overridden using the APP_CONF_OVERRIDE variable |
+
/config/devices.csv |
+CSV file containing device data | +Does not include historical data | +
/config/workflows.json |
+JSON file containing your workflows | +N/A | +
Understanding where your data is stored helps you plan your backup strategy.
+Stored in /data/config/app.conf.
+This includes settings for:
(See Settings System for details.)
+Stored in /data/config/devices_<timestamp>.csv or /data/config/devices.csv, created by the CSV Backup CSVBCKP Plugin.
+Contains:
Stored in /data/db/app.db (see Database Overview).
+Contains:
The safest approach is to back up both the /db and /config folders regularly. Tools like Kopia make this simple and efficient.
If you can only keep a few files, prioritize:
+devices_<timestamp>.csv or devices.csvapp.confworkflows.jsonYou can also download the app.conf and devices.csv files from the Maintenance section:

Goal: Full recovery of your configuration and data.
+/data/db/app.db (uncorrupted)/data/config/app.conf/data/config/workflows.jsonMap these files into your container as described in the Setup documentation.
+Goal: Recover configuration and device data when the database is lost or corrupted.
+/data/config/app.conf/data/config/workflows.json/data/config/devices_<timestamp>.csv (rename to devices.csv during restore)app.conf and workflows.json into /data/config/devices_<timestamp>.csv → /data/config/devices.csvThis recovers nearly all configuration, workflows, and device metadata.
+For users running NetAlertX via Docker, you can back up or restore directly from your host system — a convenient and scriptable option.
+bash
+ docker stop netalertx
bash
+ docker run --rm -v local_path/config:/config -v local_path/db:/db alpine tar -cz /config /db > netalertx-backup.tar.gz
bash
+ docker start netalertx
bash
+ docker stop netalertx
bash
+ docker run --rm -i -v local_path/config:/config -v local_path/db:/db alpine tar -C / -xz < netalertx-backup.tar.gz
bash
+ docker start netalertx
++This approach uses a temporary, minimal
+alpinecontainer to access Docker-managed volumes. Thetarcommand creates or extracts an archive directly from your host’s filesystem, making it fast, clean, and reliable for both automation and manual recovery.
/data/config for configuration and devices; /data/db for historyalpine-based backup method for consistency and portability
+
+
+
+ NetAlertX provides different installation methods for different needs. This guide helps you choose the right path for security, experimentation, or development.
+Note
+Use this image if: You want to use NetAlertX securely.
+All users who want a stable, secure, "set-it-and-forget-it" appliance.
+Dockerfile (hardened target)
Note
+Use this image if: You want to experiment with NetAlertX.
+Power users, developers, and "tinkerers" wanting a familiar "VM-like" experience.
+apt, sudo, gitDockerfile.debian
Note
+Use this image if: You want to develop NetAlertX itself.
+Project contributors who are actively writing and debugging code for NetAlertX.
+FROM runner stagexdebug, pytestDockerfile (devcontainer target)
This chart compares the three builds across key attributes. A higher score means "more of" that attribute. Notice the clear trade-offs between security and development features.
+
The final images originate from two different files and build paths. The main Dockerfile uses stages to create both the hardened and development container images.
Dockerfile -> builder (Stage 1) -> runner (Stage 2) -> hardened (Final Stage) (Production Image) + devcontainer (Final Stage) (Developer Image)
+Dockerfile.debian -> "Tinkerer's" Image (Insecure VM-Style Image)
+ + + + + + + + + + + + + +
+
+
+
+ Tip
+Before troubleshooting, ensure you have set the correct Debugging and LOG_LEVEL.
+Initial setup issues are often caused by missing permissions or incorrectly mapped volumes. Always double-check your docker run or docker-compose.yml against the official setup guide before proceeding.
Make sure your file permissions are correctly set:
+/tmp/log.app.db as described in the File Permissions guide.-d:docker run --rm -it <your_image>
+
+If the container starts but the app shows unexpected behavior, the cause is often data corruption, incorrect configuration, or unexpected input data.
+A misconfigured application may display a persistent Loading... dialog. This is usually caused by the backend failing to start.
Steps to troubleshoot:
+trace or debug logging for detailed output (see Debug Tips).GRAPHQL_PORT is correctly configured.Check browser logs (press F12):
Console tab → refresh the page
+If you are unsure how to resolve errors, provide screenshots or log excerpts in your issue report or Discord discussion.
+SCAN_SUBNETSIf SCAN_SUBNETS is misconfigured, you may see only a few devices in your device list after a scan. See the Subnets Documentation for proper configuration.
SCAN_SUBNETS uses the correct mask and --interface.Error:
+sudo: unexpected child termination condition: 0
+
+Resolution:
+wget ftp.us.debian.org/debian/pool/main/libs/libseccomp/libseccomp2_2.5.3-2_armhf.deb
+sudo dpkg -i libseccomp2_2.5.3-2_armhf.deb
+
+++⚠️ The link may break over time. Check Debian Packages for the latest version.
+
SCAN_SUBNETS.eth0 to the correct interface./data/db and /data/config are mapped to persistent storage.Slowness can be caused by:
+app.log.++See Performance Tips for detailed optimization steps.
+
With ARPSCAN scans some devices might flip IP addresses after each scan triggering false notifications. This is because some devices respond to broadcast calls and thus different IPs after scans are logged.
See how to prevent IP flipping in the ARPSCAN plugin guide.
+Alternatively adjust your notification settings to prevent false positives by filtering out events or devices.
+ + + + + + + + + + + + + +
+
+
+
+ Use the official installation guides at first and use community content as supplementary material. Open an issue or PR if you'd like to add your link to the list 🙏 (Ordered by last update time)
+
+
+
+
+ 
This functionality allows you to define custom properties for devices, which can store and display additional information on the device listing page. By marking properties as "Show", you can enhance the user interface with quick actions, notes, or external links.
+Custom properties are structured as a list of objects, where each property includes the following fields:
+| Field | +Description | +
|---|---|
CUSTPROP_icon |
+The icon (Base64-encoded HTML) displayed for the property. | +
CUSTPROP_type |
+The action type (e.g., show_notes, link, delete_dev). |
+
CUSTPROP_name |
+A short name or title for the property. | +
CUSTPROP_args |
+Arguments for the action (e.g., URL or modal text). | +
CUSTPROP_notes |
+Additional notes or details displayed when applicable. | +
CUSTPROP_show |
+A boolean to control visibility (true to show on the listing page). |
+

Visible properties (CUSTPROP_show: true) are displayed as interactive icons in the device listing. Each icon can perform one of the following actions based on the CUSTPROP_type:
Example: Firmware version details.
+Links:
+Example: Open a device's documentation or external site.
+Device Actions:
+Example: Quickly remove a device from the network.
+Plugins:
+Add a custom property with CUSTPROP_type set to link or link_new_tab to allow quick navigation to the external documentation of the device.
Firmware Details:
+Use CUSTPROP_type: show_notes to display firmware versions or upgrade instructions in a modal.
Device Removal:
+CUSTPROP_type: delete_dev.run_plugin action type is currently not implemented and will show an alert if used.CUSTPROP_icon settings CUSTPROP_show: true will appear on the listing page.This feature provides a flexible way to enhance device management and display with interactive elements tailored to your needs.
+ + + + + + + + + + + + + +
+
+
+
+ An overview of the most important database tables as well as an detailed overview of the Devices table. The MAC address is used as a foreign key in most cases.
+| Field Name | +Description | +Sample Value | +
|---|---|---|
devMac |
+MAC address of the device. | +00:1A:2B:3C:4D:5E |
+
devName |
+Name of the device. | +iPhone 12 |
+
devOwner |
+Owner of the device. | +John Doe |
+
devType |
+Type of the device (e.g., phone, laptop, etc.). If set to a network type (e.g., switch), it will become selectable as a Network Parent Node. | +Laptop |
+
devVendor |
+Vendor/manufacturer of the device. | +Apple |
+
devFavorite |
+Whether the device is marked as a favorite. | +1 |
+
devGroup |
+Group the device belongs to. | +Home Devices |
+
devComments |
+User comments or notes about the device. | +Used for work purposes |
+
devFirstConnection |
+Timestamp of the device's first connection. | +2025-03-22 12:07:26+11:00 |
+
devLastConnection |
+Timestamp of the device's last connection. | +2025-03-22 12:07:26+11:00 |
+
devLastIP |
+Last known IP address of the device. | +192.168.1.5 |
+
devStaticIP |
+Whether the device has a static IP address. | +0 |
+
devScan |
+Whether the device should be scanned. | +1 |
+
devLogEvents |
+Whether events related to the device should be logged. | +0 |
+
devAlertEvents |
+Whether alerts should be generated for events. | +1 |
+
devAlertDown |
+Whether an alert should be sent when the device goes down. | +0 |
+
devSkipRepeated |
+Whether to skip repeated alerts for this device. | +1 |
+
devLastNotification |
+Timestamp of the last notification sent for this device. | +2025-03-22 12:07:26+11:00 |
+
devPresentLastScan |
+Whether the device was present during the last scan. | +1 |
+
devIsNew |
+Whether the device is marked as new. | +0 |
+
devLocation |
+Physical or logical location of the device. | +Living Room |
+
devIsArchived |
+Whether the device is archived. | +0 |
+
devParentMAC |
+MAC address of the parent device (if applicable) to build the Network Tree. | +00:1A:2B:3C:4D:5F |
+
devParentPort |
+Port of the parent device to which this device is connected. | +Port 3 |
+
devIcon |
+Icon representing the device. The value is a base64-encoded SVG or Font Awesome HTML tag. | +PHN2ZyB... |
+
devGUID |
+Unique identifier for the device. | +a2f4b5d6-7a8c-9d10-11e1-f12345678901 |
+
devSite |
+Site or location where the device is registered. | +Office |
+
devSSID |
+SSID of the Wi-Fi network the device is connected to. | +HomeNetwork |
+
devSyncHubNode |
+The NetAlertX node ID used for synchronization between NetAlertX instances. | +node_1 |
+
devSourcePlugin |
+Source plugin that discovered the device. | +ARPSCAN |
+
devCustomProps |
+Custom properties related to the device. The value is a base64-encoded JSON object. | +PHN2ZyB... |
+
devFQDN |
+Fully qualified domain name. | +raspberrypi.local |
+
devParentRelType |
+The type of relationship between the current device and it's parent node. By default, selecting nic will hide it from lists. |
+nic |
+
devReqNicsOnline |
+If all NICs are required to be online to mark teh current device online. | +0 |
+
To understand how values of these fields influuence application behavior, such as Notifications or Network topology, see also:
+ +| Table name | +Description | +Sample data | +
|---|---|---|
| CurrentScan | +Result of the current scan | +![]() |
+
| Devices | +The main devices database that also contains the Network tree mappings. If ScanCycle is set to 0 device is not scanned. |
+![]() |
+
| Events | +Used to collect connection/disconnection events. | +![]() |
+
| Online_History | +Used to display the Device presence chart |
+![]() |
+
| Parameters | +Used to pass values between the frontend and backend. | +![]() |
+
| Plugins_Events | +For capturing events exposed by a plugin via the last_result.log file. If unique then saved into the Plugins_Objects table. Entries are deleted once processed and stored in the Plugins_History and/or Plugins_Objects tables. |
+![]() |
+
| Plugins_History | +History of all entries from the Plugins_Events table |
+![]() |
+
| Plugins_Language_Strings | +Language strings collected from the plugin config.json files used for string resolution in the frontend. |
+![]() |
+
| Plugins_Objects | +Unique objects detected by individual plugins. | +![]() |
+
| Sessions | +Used to display sessions in the charts | +![]() |
+
| Settings | +Database representation of the sum of all settings from app.conf and plugins coming from config.json files. |
+![]() |
+
+
+
+
+ The GraphQL server is an API middle layer, running on it's own port specified by GRAPHQL_PORT, to retrieve and show the data in the UI. It can also be used to retrieve data for custom third party integarions. Check the API documentation for details.
The most common issue is that the GraphQL server doesn't start properly, usually due to a port conflict. If you are running multiple NetAlertX instances, make sure to use unique ports by changing the GRAPHQL_PORT setting. The default is 20212.
GRAPHQL_PORT in case of issuesAs a first troubleshooting step try changing the default GRAPHQL_PORT setting. Please remember NetAlertX is running on the host so any application uising the same port will cause issues.
Ideally use the Settings UI to update the setting under General -> Core -> GraphQL port:
+
You might need to temporarily stop other applications or NetAlertX instances causing conflicts to update the setting. The API_TOKEN is used to authenticate any API calls, including GraphQL requests.
app.conf fileIf the UI is not accessible, you can directly edit the app.conf file in your /config folder:

All application settings can also be initialized via the APP_CONF_OVERRIDE docker env variable.
...
+ environment:
+ - PORT=20213
+ - APP_CONF_OVERRIDE={"GRAPHQL_PORT":"20214"}
+...
+
+There are several ways to check if the GraphQL server is running.
+You can navigate to Maintenance -> Init Check to see if isGraphQLServerRunning is ticked:

You can navigate to Maintenance -> Logs and search for graphql to see if it started correctly and serving requests:

In your browser open the dev console (usually F12) and navigate to the Network tab where you can filter GraphQL requests (e.g., reload the Devices page).
+
You can then inspect any of the POST requests by opening them in a new tab.
+
+
+
+
+ Check the the HTTP response of the failing backend call by following these steps:
+
http://<server>:20211/api/table_devices.json?nocache=1704141103121http://<server>:20211/php/server/devices.php?action=getDevicesTotals
Post the error response in the existing issue thread on GitHub or create a new issue and include the redacted response of the failing query.
+For reference, the above queries should return results in the following format:
+

You can copy and paste any JSON result (result of the First and Third query) into an online JSON checker, such as this one to check if it's valid.
+ + + + + + + + + + + + + +
+
+
+
+ 
You can view recent backend PHP errors directly in the Maintenance > Logs section of the UI. This provides quick access to logs without needing terminal access.
+Sometimes, the UI might not be accessible. In that case, you can access the logs directly inside the container.
+bash
+ docker exec -it netalertx /bin/sh
bash
+ cat /var/log/nginx/error.log
bash
+ cat /tmp/log/app.php_errors.log
These logs will help identify syntax issues, fatal errors, or startup problems when the UI fails to load properly.
+ + + + + + + + + + + + + +
+
+
+
+ Tip
+Before troubleshooting, please ensure you have the right Debugging and LOG_LEVEL set.
+If a Plugin supplies data to the main app it's done either vie a SQL query or via a script that updates the last_result.log file in the plugin log folder (app/log/plugins/).
For a more in-depth overview on how plugins work check the Plugins development docs.
+Input data from the plugin might cause mapping issues in specific edge cases. Look for a corresponding section in the app.log file, for example notice the first line of the execution run of the PIHOLE plugin below:
17:31:05 [Scheduler] - Scheduler run for PIHOLE: YES
+17:31:05 [Plugin utils] ---------------------------------------------
+17:31:05 [Plugin utils] display_name: PiHole (Device sync)
+17:31:05 [Plugins] CMD: SELECT n.hwaddr AS Object_PrimaryID, {s-quote}null{s-quote} AS Object_SecondaryID, datetime() AS DateTime, na.ip AS Watched_Value1, n.lastQuery AS Watched_Value2, na.name AS Watched_Value3, n.macVendor AS Watched_Value4, {s-quote}null{s-quote} AS Extra, n.hwaddr AS ForeignKey FROM EXTERNAL_PIHOLE.Network AS n LEFT JOIN EXTERNAL_PIHOLE.Network_Addresses AS na ON na.network_id = n.id WHERE n.hwaddr NOT LIKE {s-quote}ip-%{s-quote} AND n.hwaddr is not {s-quote}00:00:00:00:00:00{s-quote} AND na.ip is not null
+17:31:05 [Plugins] setTyp: subnets
+17:31:05 [Plugin utils] Flattening the below array
+17:31:05 ['192.168.1.0/24 --interface=eth1']
+17:31:05 [Plugin utils] isinstance(arr, list) : False | isinstance(arr, str) : True
+17:31:05 [Plugins] Resolved value: 192.168.1.0/24 --interface=eth1
+17:31:05 [Plugins] Convert to Base64: True
+17:31:05 [Plugins] base64 value: b'MTkyLjE2OC4xLjAvMjQgLS1pbnRlcmZhY2U9ZXRoMQ=='
+17:31:05 [Plugins] Timeout: 10
+17:31:05 [Plugins] Executing: SELECT n.hwaddr AS Object_PrimaryID, 'null' AS Object_SecondaryID, datetime() AS DateTime, na.ip AS Watched_Value1, n.lastQuery AS Watched_Value2, na.name AS Watched_Value3, n.macVendor AS Watched_Value4, 'null' AS Extra, n.hwaddr AS ForeignKey FROM EXTERNAL_PIHOLE.Network AS n LEFT JOIN EXTERNAL_PIHOLE.Network_Addresses AS na ON na.network_id = n.id WHERE n.hwaddr NOT LIKE 'ip-%' AND n.hwaddr is not '00:00:00:00:00:00' AND na.ip is not null
+🔻
+17:31:05 [Plugins] SUCCESS, received 2 entries
+17:31:05 [Plugins] sqlParam entries: [(0, 'PIHOLE', '01:01:01:01:01:01', 'null', 'null', '2023-12-25 06:31:05', '172.30.0.1', 0, 'aaaa', 'vvvvvvvvv', 'not-processed', 'null', 'null', '01:01:01:01:01:01'), (0, 'PIHOLE', '02:42:ac:1e:00:02', 'null', 'null', '2023-12-25 06:31:05', '172.30.0.2', 0, 'dddd', 'vvvvv2222', 'not-processed', 'null', 'null', '02:42:ac:1e:00:02')]
+17:31:05 [Plugins] Processing : PIHOLE
+17:31:05 [Plugins] Existing objects from Plugins_Objects: 4
+17:31:05 [Plugins] Logged events from the plugin run : 2
+17:31:05 [Plugins] pluginEvents count: 2
+17:31:05 [Plugins] pluginObjects count: 4
+17:31:05 [Plugins] events_to_insert count: 0
+17:31:05 [Plugins] history_to_insert count: 4
+17:31:05 [Plugins] objects_to_insert count: 0
+17:31:05 [Plugins] objects_to_update count: 4
+17:31:05 [Plugin utils] In pluginEvents there are 2 events with the status "watched-not-changed"
+17:31:05 [Plugin utils] In pluginObjects there are 2 events with the status "missing-in-last-scan"
+17:31:05 [Plugin utils] In pluginObjects there are 2 events with the status "watched-not-changed"
+17:31:05 [Plugins] Mapping objects to database table: CurrentScan
+17:31:05 [Plugins] SQL query for mapping: INSERT into CurrentScan ( "cur_MAC", "cur_IP", "cur_LastQuery", "cur_Name", "cur_Vendor", "cur_ScanMethod") VALUES ( ?, ?, ?, ?, ?, ?)
+17:31:05 [Plugins] SQL sqlParams for mapping: [('01:01:01:01:01:01', '172.30.0.1', 0, 'aaaa', 'vvvvvvvvv', 'PIHOLE'), ('02:42:ac:1e:00:02', '172.30.0.2', 0, 'dddd', 'vvvvv2222', 'PIHOLE')]
+🔺
+17:31:05 [API] Update API starting
+17:31:06 [API] Updating table_plugins_history.json file in /api
+
+++The debug output between the 🔻red arrows🔺 is important for debugging (arrows added only to highlight the section on this page, they are not available in the actual debug log)
+
In the above output notice the section logging how many events are produced by the plugin:
+17:31:05 [Plugins] Existing objects from Plugins_Objects: 4
+17:31:05 [Plugins] Logged events from the plugin run : 2
+17:31:05 [Plugins] pluginEvents count: 2
+17:31:05 [Plugins] pluginObjects count: 4
+17:31:05 [Plugins] events_to_insert count: 0
+17:31:05 [Plugins] history_to_insert count: 4
+17:31:05 [Plugins] objects_to_insert count: 0
+17:31:05 [Plugins] objects_to_update count: 4
+
+These values, if formatted correctly, will also show up in the UI:
+
Sometimes specific log sections are needed to debug issues. The Devices and CurrentScan table data is sometimes needed to figure out what's wrong.
+LOG_LEVEL to trace (Disable it once you have the info as this produces big log files).================ DEVICES table content ================ in your logs.================ CurrentScan table content ================ in your logs.LOG_LEVEL to debug or lower.
+
+
+
+ Please follow tips 1 - 4 to get a more detailed error.
+When debugging an issue always set the highest log level:
+LOG_LEVEL='trace'
Start the container via the terminal with a command similar to this one:
+docker run \
+ --network=host \
+ --restart unless-stopped \
+ -v /local_data_dir:/data \
+ -v /etc/localtime:/etc/localtime:ro \
+ --tmpfs /tmp:uid=20211,gid=20211,mode=1700 \
+ -e PORT=20211 \
+ -e APP_CONF_OVERRIDE='{"GRAPHQL_PORT":"20214"}' \
+ ghcr.io/jokob-sk/netalertx:latest
+
+
+Note: Your /local_data_dir should contain a config and db folder.
Note
+⚠ The most important part is NOT to use the -d parameter so you see the error when the container crashes. Use this error in your issue description.
If possible, check if your issue got fixed in the _dev image before opening a new issue. The container is:
ghcr.io/jokob-sk/netalertx-dev:latest
++⚠ Please backup your DB and config beforehand!
+
Please also search open issues.
+To prevent a Docker container from automatically restarting in a Docker Compose file, specify the restart policy as no:
version: '3'
+
+services:
+ your-service:
+ image: your-image:tag
+ restart: no
+ # Other service configurations...
+
+Try starting the container with all data to be in non-persistent volumes. If this works, the issue might be related to the permissions of your persistent data mount locations on your server. See teh Permissions guide for details.
+Sometimes specific log sections are needed to debug issues. The Devices and CurrentScan table data is sometimes needed to figure out what's wrong.
+LOG_LEVEL to trace (Disable it once you have the info as this produces big log files).================ DEVICES table content ================ in your logs.================ CurrentScan table content ================ in your logs.LOG_LEVEL to debug or lower.See Common issues for additional troubleshooting tips.
+ + + + + + + + + + + + + +
+
+
+
+ NetAlertX allows you to mass-edit devices via a CSV export and import feature, or directly in the UI.
+Note
+Make sure you have your backups saved and restorable before doing any mass edits. Check Backup strategies.
+You can select devices in the Devices view by selecting devices to edit and then clicking the Multi-edit button or via the Maintenance > Multi-Edit section.
+
The database and device structure may change with new releases. When using the CSV import functionality, ensure the format matches what the application expects. To avoid issues, you can first export the devices and review the column formats before importing any custom data.
+Note
+As always, backup everything, just in case.
+devices.csv is generated in the /config folderdevices.csv file however you like.
Note
+The file containing a list of Devices including the Network relationships between Network Nodes and connected devices. You can also trigger this by acessing this URL: <server>:20211/php/server/devices.php?action=ExportCSV or via the CSV Backup plugin. (💡 You can schedule this)

Note
+Keep Linux line endings (suggested editors: Nano, Notepad++)
+
+
+
+
+ This set of settings allows you to group Devices under different views. The Archived toggle allows you to exclude a Device from most listings and notifications.
+

NTFPRCS_alert_down_time. See also Notification guide.
+ + + + + + + + + + + + + +
+
+
+
+ This module is responsible for inferring the most likely device type and icon based on minimal identifying data like MAC address, vendor, IP, or device name.
+It does this using a set of heuristics defined in an external JSON rules file, which it evaluates in priority order.
+Note
+You can find the full source code of the heuristics module in the device_heuristics.py file.
Rules are defined in a file called device_heuristics_rules.json (located under /back), structured like:
[
+ {
+ "dev_type": "Phone",
+ "icon_html": "<i class=\"fa-brands fa-apple\"></i>",
+ "matching_pattern": [
+ { "mac_prefix": "001A79", "vendor": "Apple" }
+ ],
+ "name_pattern": ["iphone", "pixel"]
+ }
+]
+
+Note
+Feel free to raise a PR in case you'd like to add any rules into the device_heuristics_rules.json file. Please place new rules into the correct position and consider the priority of already available rules.
| Field | +Type | +Description | +
|---|---|---|
dev_type |
+string |
+Type to assign if rule matches (e.g. "Gateway", "Phone") |
+
icon_html |
+string |
+Icon (HTML string) to assign if rule matches. Encoded to base64 at load time. | +
matching_pattern |
+array |
+List of { mac_prefix, vendor } objects for first strict and then loose matching |
+
name_pattern |
+array (optional) |
+List of lowercase substrings (used with regex) | +
ip_pattern |
+array (optional) |
+Regex patterns to match IPs | +
Order in this array defines priority — rules are checked top-down and short-circuit on first match.
+The function guess_device_attributes(...) runs a series of matching functions in strict order:
match_mac_and_vendor()match_vendor()match_name()match_ip()NEWDEV_devIcon and NEWDEV_devType settings.Note
+The app will try guessing the device type or icon if devType or devIcon are "" or "null".
The guessing process runs for every device as long as the current type or icon still matches the default values. Even if earlier heuristics return a match, the system continues evaluating additional clues — like name or IP — to try and replace placeholders.
+# Still considered a match attempt if current values are defaults
+if (not type_ or type_ == default_type) or (not icon or icon == default_icon):
+ type_, icon = match_ip(ip, default_type, default_icon)
+
+In other words: if the type or icon is still "unknown" (or matches the default), the system assumes the match isn’t final — and keeps looking. It stops only when both values are non-default (defaults are defined in the NEWDEV_devIcon and NEWDEV_devType settings).
These functions are executed in the following order:
+match_mac_and_vendor(mac_clean, vendor, ...)match_vendor(vendor, ...)mac_prefix is present (ensures this is really a fallback)match_name(name, ...)name_pattern values using regexmatch_ip(ip, ...)icon_html, which is converted to a icon_base64 on loaddefault_icon (NEWDEV_devIcon setting)TL;DR: Type and icon must both be matched. If only one is matched, the other falls back to the default.
+
+
+
+
+ The Main Info section is where most of the device identifiable information is stored and edited. Some of the information is autodetected via various plugins. Initial values for most of the fields can be specified in the NEWDEV plugin.
Note
+You can multi-edit devices by selecting them in the main Devices view, from the Mainetence section, or via the CSV Export functionality under Maintenance. More info can be found in the Devices Bulk-editing docs.
+
(IP match) if the name is discovered via an IP match and not MAC match which could mean the name could be incorrect as IPs might change.Smartphone, Tablet,
+ Laptop, TV, router, etc.) or add a new device type. If you want the device to act as a Network device (and be able to be a network node in the Network view), select a type under Network Devices or add a new Network Device type in Settings. More information can be found in the Network Setup docs.Always on, Personal, Friends, etc.) or type
+ your own Group name.Kitchen, Attic, Living room, etc.) or add a custom Location.Note
+Please note the above usage of the fields are only suggestions. You can use most of these fields for other purposes, such as storing the network interface, company owning a device, or similar.
+You can create dummy devices from the Devices listing screen.
+
The MAC field and the Last IP field will then become editable.
+
Note
+You can couple this with the ICMP plugin which can be used to monitor the status of these devices, if they are actual devices reachable with the ping command. If not, you can use a loopback IP address so they appear online, such as 0.0.0.0 or 127.0.0.1.
To speed up device population you can also copy data from an existing device. This can be done from the Tools tab on the Device details.
+ + + + + + + + + + + + + +
+
+
+
+ This devcontainer is designed to mirror the production container environment as closely as possible, while providing a rich set of tools for development.
+Prerequisites:
+Launch the Devcontainer:
+Once you're inside the container, everything is set up for you.
+
The container's startup script (.devcontainer/scripts/setup.sh) automatically starts the Nginx/PHP frontend and the Python backend. You can restart them at any time using the built-in tasks.

Debugging for both the Python backend and PHP frontend is pre-configured and ready to go.
+5678. Simply open a Python file (e.g., server/__main__.py), set a breakpoint, and press F5 (or select "Python Backend Debug: Attach") to connect the debugger.9003. In VS Code, start listening for Xdebug connections and use a browser extension (like "Xdebug helper") to start a debugging session for the web UI.
We've created several VS Code Tasks to simplify common operations. Access them by pressing F1 and typing "Tasks: Run Task".
Generate Dockerfile: This is important. The actual .devcontainer/Dockerfile is auto-generated. If you need to change the container environment, edit .devcontainer/resources/devcontainer-Dockerfile and then run this task.Re-Run Startup Script: Manually re-runs the .devcontainer/scripts/setup.sh script to re-link files and restart services.Start Backend (Python) / Start Frontend (nginx and PHP-FPM): Manually restart the services if needed.
The environment includes pytest. You can run tests directly from the VS Code Test Explorer UI or by running pytest -q in the integrated terminal. The necessary PYTHONPATH is already configured so that tests can correctly import the server modules.
The setup is designed to be easy to manage. Here are the core principles:
+Dockerfile Directly: The main .devcontainer/Dockerfile is a combination of the project's root Dockerfile and a special dev-only stage. To add new tools or dependencies, edit .devcontainer/resources/devcontainer-Dockerfile and then run the Generate Dockerfile task.apk add), add them to the resource Dockerfile..devcontainer/scripts/setup.sh..github/copilot-instructions.md file is an excellent resource to help AI and humans understand the project's architecture, conventions, and how to use existing helper functions instead of hardcoding values.This setup provides a powerful and consistent foundation for all current and future contributors to NetAlertX.
+ + + + + + + + + + + + + +
+
+
+
+ I truly appreciate all contributions! To help keep this project maintainable, this guide provides an overview of project priorities, key design considerations, and overall philosophy. It also includes instructions for setting up your environment so you can start contributing right away.
+Before starting development, please review the following guidelines.
+The application architecture is designed for extensibility and maintainability. It relies heavily on configuration manifests via plugins and settings to dynamically build the UI and populate the application with data from various sources.
+For details, see:
+- Plugins Development (includes video)
+- Settings System
Focus on core functionality and integrate with existing tools rather than reinventing the wheel.
+Examples:
+- Using Apprise for notifications instead of implementing multiple separate gateways
+- Implementing regex-based validation instead of one-off validation for each setting
Note
+UI changes have lower priority. PRs are welcome, but please keep them small and focused.
+Tip
+There is also a ready to use devcontainer available.
+The following steps will guide you to set up your environment for local development and to run a custom docker build on your system. For most changes the container doesn't need to be rebuild which speeds up the development significantly.
+Note
+Replace /development with the path where your code files will be stored. The default container name is netalertx so there might be a conflict with your running containers.
mkdir /developmentcd /development && git clone https://github.com/jokob-sk/NetAlertX.gittouch /development/.env_dev && sudo nano /development/.env_dev
The file content should be following, with your custom values.
+#--------------------------------
+#NETALERTX
+#--------------------------------
+PORT=22222 # make sure this port is unique on your whole network
+DEV_LOCATION=/development/NetAlertX
+APP_DATA_LOCATION=/volume/docker_appdata
+# Make sure your GRAPHQL_PORT setting has a port that is unique on your whole host network
+APP_CONF_OVERRIDE={"GRAPHQL_PORT":"22223"}
+# ALWAYS_FRESH_INSTALL=true # uncommenting this will always delete the content of /config and /db dirs on boot to simulate a fresh install
+
+Create a folder netalertx in the APP_DATA_LOCATION (in this example in /volume/docker_appdata) with 2 subfolders db and config.
mkdir /volume/docker_appdata/netalertxmkdir /volume/docker_appdata/netalertx/dbmkdir /volume/docker_appdata/netalertx/configcd /development/NetAlertX && sudo docker-compose --env-file ../.env_devYou can then modify the python script without restarting/rebuilding the container every time. Additionally, you can trigger a plugin run via the UI:
+A quick cheat sheet of useful commands.
+A command to stop, remove the container and the image (replace netalertx and netalertx-netalertx with the appropriate values)
sudo docker container stop netalertx ; sudo docker container rm netalertx ; sudo docker image rm netalertx-netalertxMost code changes can be tested without rebuilding the container. When working on the python server backend, you only need to restart the server.
+
If above doesn't work, SSH into the container and kill & restart the main script loop
+sudo docker exec -it netalertx /bin/bash
pkill -f "python /app/server" && python /app/server &
If none of the above work, restart the docker container.
+This is usually the last resort as sometimes the Docker engine becomes unresponsive and the whole engine needs to be restarted.
+✔ Changes are backward-compatible with existing installs.
+✔ No unnecessary changes are made.
+✔ New features are reusable, not narrowly scoped.
+✔ Features are implemented via plugins if possible.
Notification testing:
+Updating Settings and their persistence.
+Note
+Always run all available tests as per the Testing documentation.
+
+
+
+
+ When using "--network=host" in the devcontainer, VS Code's normal port forwarding model doesn't apply. All container ports are already on the host network namespace, so:
forwardPorts can cause VS Code to pre-bind or reserve them (conflicts with startup scripts waiting for a free port).9003, Python debugpy 5678) can still be listed safely.forwardPorts:
+ jsonc
+ "forwardPorts": [5678, 9003]> Tasks: Run Task → [Dev Container] List NetAlertX PortsScript: scripts/list-ports.sh
+Outputs binding address, PID (if resolvable) and process name for key ports.
You can edit the PORTS variable inside that script to add/remove watched ports.
+Set in 99-xdebug.ini:
xdebug.client_host=127.0.0.1
+xdebug.client_port=9003
+xdebug.discover_client_host=1
+
+Ensure your IDE is listening on 9003.
+| Symptom | +Cause | +Fix | +
|---|---|---|
Waiting for port 20211 to free... repeats |
+VS Code pre-bound the port via forwardPorts |
+Remove the port from forwardPorts, rebuild, retry |
+
| PHP request hangs at start | +Xdebug trying to connect to unresolved host (host.docker.internal) |
+Use 127.0.0.1 or rely on discovery |
+
| PORTS panel empty | +Expected in host mode | +Use the port enumeration task | +
setup.sh and skip the wait loop if the PID using port is the intended process.
+
+
+
+ Warning
+⚠️ Important: The docker-compose has recently changed. Carefully read the Migration guide for detailed instructions.
+Great care is taken to ensure NetAlertX meets the needs of everyone while being flexible enough for anyone. This document outlines how you can configure your docker-compose. There are many settings, so we recommend using the Baseline Docker Compose as-is, or modifying it for your system.Good care is taken to ensure NetAlertX meets the needs of everyone while being flexible enough for anyone. This document outlines how you can configure your docker-compose. There are many settings, so we recommend using the Baseline Docker Compose as-is, or modifying it for your system.
+Note
+The container needs to run in network_mode:"host" to access Layer 2 networking such as arp, nmap and others. Due to lack of support for this feature, Windows host is not a supported operating system.
There is one baseline for NetAlertX. That's the default security-enabled official distribution.
+services:
+ netalertx:
+ #use an environmental variable to set host networking mode if needed
+ container_name: netalertx # The name when you docker contiainer ls
+ image: ghcr.io/jokob-sk/netalertx-dev:latest
+ network_mode: ${NETALERTX_NETWORK_MODE:-host} # Use host networking for ARP scanning and other services
+
+ read_only: true # Make the container filesystem read-only
+ cap_drop: # Drop all capabilities for enhanced security
+ - ALL
+ cap_add: # Add only the necessary capabilities
+ - NET_ADMIN # Required for ARP scanning
+ - NET_RAW # Required for raw socket operations
+ - NET_BIND_SERVICE # Required to bind to privileged ports (nbtscan)
+
+ volumes:
+ - type: volume # Persistent Docker-managed named volume for config + database
+ source: netalertx_data
+ target: /data # `/data/config` and `/data/db` live inside this mount
+ read_only: false
+
+ # Example custom local folder called /home/user/netalertx_data
+ # - type: bind
+ # source: /home/user/netalertx_data
+ # target: /data
+ # read_only: false
+ # ... or use the alternative format
+ # - /home/user/netalertx_data:/data:rw
+
+ - type: bind # Bind mount for timezone consistency
+ source: /etc/localtime
+ target: /etc/localtime
+ read_only: true
+
+ # Mount your DHCP server file into NetAlertX for a plugin to access
+ # - path/on/host/to/dhcp.file:/resources/dhcp.file
+
+ # tmpfs mount consolidates writable state for a read-only container and improves performance
+ # uid=20211 and gid=20211 is the netalertx user inside the container
+ # mode=1700 grants rwx------ permissions to the netalertx user only
+ tmpfs:
+ # Comment out to retain logs between container restarts - this has a server performance impact.
+ - "/tmp:uid=20211,gid=20211,mode=1700,rw,noexec,nosuid,nodev,async,noatime,nodiratime"
+
+ # Retain logs - comment out tmpfs /tmp if you want to retain logs between container restarts
+ # Please note if you remove the /tmp mount, you must create and maintain sub-folder mounts.
+ # - /path/on/host/log:/tmp/log
+ # - "/tmp/api:uid=20211,gid=20211,mode=1700,rw,noexec,nosuid,nodev,async,noatime,nodiratime"
+ # - "/tmp/nginx:uid=20211,gid=20211,mode=1700,rw,noexec,nosuid,nodev,async,noatime,nodiratime"
+ # - "/tmp/run:uid=20211,gid=20211,mode=1700,rw,noexec,nosuid,nodev,async,noatime,nodiratime"
+
+ environment:
+ LISTEN_ADDR: ${LISTEN_ADDR:-0.0.0.0} # Listen for connections on all interfaces
+ PORT: ${PORT:-20211} # Application port
+ GRAPHQL_PORT: ${GRAPHQL_PORT:-20212} # GraphQL API port (passed into APP_CONF_OVERRIDE at runtime)
+ # NETALERTX_DEBUG: ${NETALERTX_DEBUG:-0} # 0=kill all services and restart if any dies. 1 keeps running dead services.
+
+ # Resource limits to prevent resource exhaustion
+ mem_limit: 2048m # Maximum memory usage
+ mem_reservation: 1024m # Soft memory limit
+ cpu_shares: 512 # Relative CPU weight for CPU contention scenarios
+ pids_limit: 512 # Limit the number of processes/threads to prevent fork bombs
+ logging:
+ driver: "json-file" # Use JSON file logging driver
+ options:
+ max-size: "10m" # Rotate log files after they reach 10MB
+ max-file: "3" # Keep a maximum of 3 log files
+
+ # Always restart the container unless explicitly stopped
+ restart: unless-stopped
+
+volumes: # Persistent volume for configuration and database storage
+ netalertx_data:
+
+Run or re-run it:
+docker compose up --force-recreate
+
+You can override the default settings by passing environmental variables to the docker compose up command.
Example using a single variable:
+This command runs NetAlertX on port 8080 instead of the default 20211.
+PORT=8080 docker compose up
+
+Example using all available variables:
+This command demonstrates overriding all primary environmental variables: running with host networking, on port 20211, GraphQL on 20212, and listening on all IPs.
+NETALERTX_NETWORK_MODE=host \
+LISTEN_ADDR=0.0.0.0 \
+PORT=20211 \
+GRAPHQL_PORT=20212 \
+NETALERTX_DEBUG=0 \
+docker compose up
+
+docker-compose.yaml ModificationsBy default, the baseline compose file uses a single named volume (netalertx_data) mounted at /data. This single-volume layout is preferred because NetAlertX manages both configuration and the database under /data (for example, /data/config and /data/db) via its web UI. Using one named volume simplifies permissions and portability: Docker manages the storage and NetAlertX manages the files inside /data.
A two-volume layout that mounts /data/config and /data/db separately (for example, netalertx_config and netalertx_db) is supported for backward compatibility and some advanced workflows, but it is an abnormal/legacy layout and not recommended for new deployments.
However, if you prefer to have direct, file-level access to your configuration for manual editing, a "bind mount" is a simple alternative. This tells Docker to use a specific folder from your computer (the "host") inside the container.
+How to make the change:
+Choose a location on your computer. For example, /local_data_dir.
Create the subfolders: mkdir -p /local_data_dir/config and mkdir -p /local_data_dir/db.
Edit your docker-compose.yml and find the volumes: section (the one inside the netalertx: service).
Comment out (add a # in front) or delete the type: volume blocks for netalertx_config and netalertx_db.
Add new lines pointing to your local folders.
+Before (Using Named Volumes - Preferred):
+...
+ volumes:
+ - netalertx_config:/data/config:rw #short-form volume (no /path is a short volume)
+ - netalertx_db:/data/db:rw
+...
+
+After (Using a Local Folder / Bind Mount):
+Make sure to replace /local_data_dir with your actual path. The format is <path_on_your_computer>:<path_inside_container>:<options>.
...
+ volumes:
+# - netalertx_config:/data/config:rw
+# - netalertx_db:/data/db:rw
+ - /local_data_dir/config:/data/config:rw
+ - /local_data_dir/db:/data/db:rw
+...
+
+Now, any files created by NetAlertX in /data/config will appear in your /local_data_dir/config folder.
This same method works for mounting other things, like custom plugins or enterprise NGINX files, as shown in the commented-out examples in the baseline file.
+Here are the essential modifications for common alternative setups.
+.env File for PathsThis method is useful for keeping your paths and other settings separate from your main compose file, making it more portable.
+docker-compose.yml changes:
...
+services:
+ netalertx:
+ environment:
+ - PORT=${PORT}
+ - GRAPHQL_PORT=${GRAPHQL_PORT}
+
+...
+
+.env file contents:
PORT=20211
+NETALERTX_NETWORK_MODE=host
+LISTEN_ADDR=0.0.0.0
+GRAPHQL_PORT=20212
+
+Run with: sudo docker-compose --env-file /path/to/.env up
This is for deploying on a Docker Swarm cluster. The key differences from the baseline are the removal of network_mode: from the service, and the addition of deploy: and networks: blocks at both the service and top-level.
Here are the only changes you need to make to the baseline compose file to make it Swarm-compatible.
+services:
+ netalertx:
+ ...
+ # network_mode: ${NETALERTX_NETWORK_MODE:-host} # <-- DELETE THIS LINE
+ ...
+
+ # 2. ADD a 'networks:' block INSIDE the service to connect to the external host network.
+ networks:
+ - outside
+ # 3. ADD a 'deploy:' block to manage the service as a swarm replica.
+ deploy:
+ mode: replicated
+ replicas: 1
+ restart_policy:
+ condition: on-failure
+
+
+# 4. ADD a new top-level 'networks:' block at the end of the file to define 'outside' as the external 'host' network.
+networks:
+ outside:
+ external:
+ name: "host"
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ | 📑 Docker guide | +🚀 Releases | +📚 Docs | +🔌 Plugins | +🤖 Ask AI | +
|---|---|---|---|---|
| + | + | + | + | + |
Head to https://netalertx.com/ for more gifs and screenshots 📷.
+Note
+There is also an experimental 🧪 bare-metal install method available.
+Warning
+You will have to run the container on the host network and specify SCAN_SUBNETS unless you use other plugin scanners. The initial scan can take a few minutes, so please wait 5-10 minutes for the initial discovery to finish.
docker run -d --rm --network=host \
+ -v /local_data_dir:/data \
+ -v /etc/localtime:/etc/localtime \
+ --tmpfs /tmp:uid=20211,gid=20211,mode=1700 \
+ -e PORT=20211 \
+ -e APP_CONF_OVERRIDE={"GRAPHQL_PORT":"20214"} \
+ ghcr.io/jokob-sk/netalertx:latest
+
+See alternative docked-compose examples.
+| Default | +Description | +How to override | +
|---|---|---|
20211 |
+Port of the web interface | +-e PORT=20222 |
+
20212 |
+Port of the backend API server | +-e APP_CONF_OVERRIDE={"GRAPHQL_PORT":"20214"} or via the GRAPHQL_PORT Setting |
+
| Variable | +Description | +Example Value | +
|---|---|---|
PORT |
+Port of the web interface | +20211 |
+
LISTEN_ADDR |
+Set the specific IP Address for the listener address for the nginx webserver (web interface). This could be useful when using multiple subnets to hide the web interface from all untrusted networks. | +0.0.0.0 |
+
LOADED_PLUGINS |
+Default plugins to load. Plugins cannot be loaded with APP_CONF_OVERRIDE, you need to use this variable instead and then specify the plugins settings with APP_CONF_OVERRIDE. |
+["PIHOLE","ASUSWRT"] |
+
APP_CONF_OVERRIDE |
+JSON override for settings (except LOADED_PLUGINS). |
+{"SCAN_SUBNETS":"['192.168.1.0/24 --interface=eth1']","GRAPHQL_PORT":"20212"} |
+
ALWAYS_FRESH_INSTALL |
+⚠ If true will delete the content of the /db & /config folders. For testing purposes. Can be coupled with watchtower to have an always freshly installed netalertx/netalertx-dev image. |
+true |
+
++You can override the default GraphQL port setting
+GRAPHQL_PORT(set to20212) by using theAPP_CONF_OVERRIDEenv variable.LOADED_PLUGINSand settings inAPP_CONF_OVERRIDEcan be specified via the UI as well.
Note
+See also Backup strategies.
+| Required | +Path | +Description | +
|---|---|---|
| ✅ | +:/data |
+Folder which needs to contain a /db and /config sub-folders. |
+
| ✅ | +/etc/localtime:/etc/localtime:ro |
+Ensuring the timezone is the same as on the server. | +
| + | :/tmp/log |
+Logs folder useful for debugging if you have issues setting up the container | +
| + | :/tmp/api |
+The API endpoint containing static (but regularly updated) json and other files. Path configurable via NETALERTX_API environment variable. |
+
| + | :/app/front/plugins/<plugin>/ignore_plugin |
+Map a file ignore_plugin to ignore a plugin. Plugins can be soft-disabled via settings. More in the Plugin docs. |
+
| + | :/etc/resolv.conf |
+Use a custom resolv.conf file for better name resolution. |
+
Use separate db and config directories, do not nest them:
data
+├── config
+└── db
+
+If you are facing permissions issues run the following commands on your server. This will change the owner and assure sufficient access to the database and config files that are stored in the /local_data_dir/db and /local_data_dir/config folders (replace local_data_dir with the location where your /db and /config folders are located).
sudo chown -R 20211:20211 /local_data_dir
+sudo chmod -R a+rwx /local_data_dir
+
+app.conf and app.db file on the first run./data/config/ folder directlyYou have to specify which network(s) should be scanned. This is done by entering subnets that are accessible from the host. If you use the default ARPSCAN plugin, you have to specify at least one valid subnet and interface in the SCAN_SUBNETS setting. See the documentation on How to set up multiple SUBNETS, VLANs and what are limitations for troubleshooting and more advanced scenarios.
If you are running PiHole you can synchronize devices directly. Check the PiHole configuration guide for details.
+Note
+You can bulk-import devices via the CSV import method.
+You can read or watch several community configuration guides in Chinese, Korean, German, or French.
+++Please note these might be outdated. Rely on official documentation first.
+
![]() |
+![]() |
+![]() |
+
|---|---|---|
| + | + | + |
1N8tupjeCK12qRVU2XrV17WvKK7LCawyZM0x6e2749Cb42F4411bc98501406BdcD82244e3f9C7++ + + + + + + + + + + + + +📧 Email me at netalertx@gmail.com if you want to get in touch or if I should add other sponsorship platforms.
+
+
+
+
+ Warning
+⚠️ Important: The docker-compose has recently changed. Carefully read the Migration guide for detailed instructions.
+This guide assumes you are starting with the official docker-compose.yml file provided with the project. We strongly recommend you start with or migrate to this file as your baseline and modify it to suit your specific needs (e.g., changing file paths). While there are many ways to configure NetAlertX, the default file is designed to meet the mandatory security baseline with layer-2 networking capabilities while operating securely and without startup warnings.
This guide provides direct, concise solutions for common NetAlertX administrative tasks. It is structured to help you identify a problem, implement the solution, and understand the details.
+Note
+Other relevant resources + - Fixing Permission Issues + - Handling Backups + - Accessing Application Logs
+You want to edit your app.conf and other configuration files directly from your host machine, instead of using a Docker-managed volume.
bash
+ docker-compose down
+2. (Optional but Recommended) Back up your data using the method in Part 1.
+3. Create a local folder on your host machine (e.g., /data/netalertx_config).
+4. Edit docker-compose.yml:
netalertx_config volume entry.yaml
+ ...
+ volumes:
+ # - type: volume
+ # source: netalertx_config
+ # target: /data/config
+ # read_only: false
+ ...
+ # Example custom local folder called /data/netalertx_config
+ - type: bind
+ source: /data/netalertx_config
+ target: /data/config
+ read_only: false
+ ...
+5. (Optional) Restore your backup.
+6. Restart the container:
bash
+ docker-compose up -d
This replaces the Docker-managed volume with a "bind mount." This is a direct mapping between a folder on your host computer (/data/netalertx_config) and a folder inside the container (/data/config), allowing you to edit the files directly.
You are currently using a local folder (bind mount) for your configuration (e.g., /data/netalertx_config) and want to switch to the recommended Docker-managed volume (netalertx_config).
bash
+ docker-compose down
+2. Edit docker-compose.yml:
netalertx_config volume entry.yaml
+ ...
+ volumes:
+ - type: volume
+ source: netalertx_config
+ target: /data/config
+ read_only: false
+ ...
+ # Example custom local folder called /data/netalertx_config
+ # - type: bind
+ # source: /data/netalertx_config
+ # target: /data/config
+ # read_only: false
+ ...
+3. (Optional) Initialize the volume:
bash
+ docker-compose up -d && docker-compose down
+4. Run the migration command (replace /data/netalertx_config with your actual path):
bash
+ docker run --rm -v netalertx_config:/config -v /data/netalertx_config:/local-config alpine \
+ sh -c "tar -C /local-config -c . | tar -C /config -x"
+5. Restart the container:
bash
+ docker-compose up -d
This uses a temporary alpine container that mounts both your source folder (/local-config) and destination volume (/config). The tar ... | tar ... command safely copies all files, including hidden ones, preserving structure.
You need to override the default Nginx configuration to add features like LDAP, SSO, or custom SSL settings.
+bash
+ docker-compose down
+2. Create your custom config file on your host (e.g., /data/my-netalertx.conf).
+3. Edit docker-compose.yml:
yaml
+ ...
+ # Use a custom Enterprise-configured nginx config for ldap or other settings
+ - /data/my-netalertx.conf:/tmp/nginx/active-config/netalertx.conf:ro
+ ...
+4. Restart the container:
bash
+ docker-compose up -d
Docker’s bind mount overlays your host file (my-netalertx.conf) on top of the default file inside the container. The container remains read-only, but Nginx reads your file as if it were the default.
A plugin (like DHCPLSS) needs to read a file from your host machine (e.g., /var/lib/dhcp/dhcpd.leases).
bash
+ docker-compose down
+2. Edit docker-compose.yml and add a new line under the volumes: section:
yaml
+ ...
+ volumes:
+ ...
+ # Mount for DHCPLSS plugin
+ - /var/lib/dhcp/dhcpd.leases:/mnt/dhcpd.leases:ro
+ ...
+3. Restart the container:
bash
+ docker-compose up -d
+4. In the NetAlertX web UI, configure the plugin to read from:
/mnt/dhcpd.leases
This maps your host file to a new, read-only (:ro) location inside the container. The plugin can then safely read this file without exposing anything else on your host filesystem.
+
+
+
+ This guide shows you how to set up NetAlertX using Portainer’s Stacks feature.
+
Before deploying, make sure you have a folder on your Docker host for NetAlertX data. Replace APP_FOLDER with your preferred location, for example /local_data_dir here:
mkdir -p /local_data_dir/netalertx/config
+mkdir -p /local_data_dir/netalertx/db
+mkdir -p /local_data_dir/netalertx/log
+
+netalertx).Copy and paste the following YAML into the Web editor:
+services:
+ netalertx:
+ container_name: netalertx
+ # Use this line for stable release
+ image: "ghcr.io/jokob-sk/netalertx:latest"
+ # Or, use this for the latest development build
+ # image: "ghcr.io/jokob-sk/netalertx-dev:latest"
+ network_mode: "host"
+ restart: unless-stopped
+ cap_drop: # Drop all capabilities for enhanced security
+ - ALL
+ cap_add: # Re-add necessary capabilities
+ - NET_RAW
+ - NET_ADMIN
+ - NET_BIND_SERVICE
+ volumes:
+ - ${APP_FOLDER}/netalertx/config:/data/config
+ - ${APP_FOLDER}/netalertx/db:/data/db
+ # to sync with system time
+ - /etc/localtime:/etc/localtime:ro
+ tmpfs:
+ # All writable runtime state resides under /tmp; comment out to persist logs between restarts
+ - "/tmp:uid=20211,gid=20211,mode=1700,rw,noexec,nosuid,nodev,async,noatime,nodiratime"
+ environment:
+ - PORT=${PORT}
+ - APP_CONF_OVERRIDE=${APP_CONF_OVERRIDE}
+
+In the Environment variables section of Portainer, add the following:
+APP_FOLDER=/local_data_dir (or wherever you created the directories in step 1)PORT=22022 (or another port if needed)APP_CONF_OVERRIDE={"GRAPHQL_PORT":"22023"} (optional advanced settings, otherwise the backend API server PORT defaults to 20212)Tip
+If you are facing permissions issues run the following commands on your server. This will change the owner and assure sufficient access to the database and config files that are stored in the /local_data_dir/db and /local_data_dir/config folders (replace local_data_dir with the location where your /db and /config folders are located).
sudo chown -R 20211:20211 /local_data_dir
sudo chmod -R a+rwx /local_data_dir
http://<your-docker-host-ip>:22022
+
+netalertx → Logs.${APP_FOLDER}/netalertx/log if you enabled that volume.Once the application is running, configure it by reading the initial setup guide, or troubleshoot common issues.
+ + + + + + + + + + + + + +
+
+
+
+ This guide describes how to deploy NetAlertX in a Docker Swarm environment using an ipvlan network. This enables the container to receive a LAN IP address directly, which is ideal for network monitoring.
++Run this command on each node in the Swarm.
+
docker network create -d ipvlan \
+ --subnet=192.168.1.0/24 \ # 🔧 Replace with your LAN subnet
+ --gateway=192.168.1.1 \ # 🔧 Replace with your LAN gateway
+ -o ipvlan_mode=l2 \
+ -o parent=eno1 \ # 🔧 Replace with your network interface (e.g., eth0, eno1)
+ --config-only \
+ ipvlan-swarm-config
+
+++Run this on one Swarm manager node only.
+
docker network create -d ipvlan \
+ --scope swarm \
+ --config-from ipvlan-swarm-config \
+ swarm-ipvlan
+
+Use the following Compose snippet to deploy NetAlertX with a static LAN IP assigned via the swarm-ipvlan network.
services:
+ netalertx:
+ image: ghcr.io/jokob-sk/netalertx:latest
+...
+ networks:
+ swarm-ipvlan:
+ ipv4_address: 192.168.1.240 # ⚠️ Choose a free IP from your LAN
+ deploy:
+ mode: replicated
+ replicas: 1
+ restart_policy:
+ condition: on-failure
+ placement:
+ constraints:
+ - node.role == manager # 🔄 Or use: node.labels.netalertx == true
+
+networks:
+ swarm-ipvlan:
+ external: true
+
+ipvlan setup allows NetAlertX to have a direct IP on your LAN.eno1 with your interface, IP addresses, and volume paths to match your environment.192.168.1.240 above) is not in use or managed by DHCP.node.role == manager for more control.
+
+
+
+ Sometimes, permission issues arise if your existing host directories were created by a previous container running as root or another UID. The container will fail to start with "Permission Denied" errors.
+Tip
+NetAlertX runs in a secure, read-only Alpine-based container under a dedicated netalertx user (UID 20211, GID 20211). All writable paths are either mounted as persistent volumes or tmpfs filesystems. This ensures consistent file ownership and prevents privilege escalation.
Try starting the container with all data to be in non-persistent volumes. If this works, the issue might be related to the permissions of your persistent data mount locations on your server.
+docker run --rm --network=host \
+ -v /etc/localtime:/etc/localtime:ro \
+ --tmpfs /tmp:uid=20211,gid=20211,mode=1700 \
+ -e PORT=20211 \
+ ghcr.io/jokob-sk/netalertx:latest
+
+Warning
+The above should be only used as a test - once the container restarts, all data is lost.
+NetAlertX requires certain paths to be writable at runtime. These paths should be mounted either as host volumes or tmpfs in your docker-compose.yml or docker run command:
| Path | +Purpose | +Notes | +
|---|---|---|
/data/config |
+Application configuration | +Persistent volume recommended | +
/data/db |
+Database files | +Persistent volume recommended | +
/tmp/log |
+Logs | +Lives under /tmp; optional host bind to retain logs |
+
/tmp/api |
+API cache | +Subdirectory of /tmp |
+
/tmp/nginx/active-config |
+Active nginx configuration override | +Mount /tmp (or override specific file) |
+
/tmp/run |
+Runtime directories for nginx & PHP | +Subdirectory of /tmp |
+
/tmp |
+PHP session save directory | +Backed by tmpfs for runtime writes |
+
++Mounting
+/tmpastmpfsautomatically covers all of its subdirectories (log,api,run,nginx/active-config, etc.).All these paths will have UID 20211 / GID 20211 inside the container. Files on the host will appear owned by
+20211:20211.
--user "0") to allow it to correct permissions automatically:docker run -it --rm --name netalertx --user "0" \
+ -v /local_data_dir:/data \
+ --tmpfs /tmp:uid=20211,gid=20211,mode=1700 \
+ ghcr.io/jokob-sk/netalertx:latest
+
+docker-compose.yml or docker run command.++The container startup script detects
+rootand runschown -R 20211:20211on all volumes, fixing ownership for the securenetalertxuser.
Tip
+If you are facing permissions issues run the following commands on your server. This will change the owner and assure sufficient access to the database and config files that are stored in the /local_data_dir/db and /local_data_dir/config folders (replace local_data_dir with the location where your /db and /config folders are located).
sudo chown -R 20211:20211 /local_data_dir
sudo chmod -R a+rwx /local_data_dir
tmpfsservices:
+ netalertx:
+ container_name: netalertx
+ image: "ghcr.io/jokob-sk/netalertx"
+ network_mode: "host"
+ cap_drop: # Drop all capabilities for enhanced security
+ - ALL
+ cap_add: # Add only the necessary capabilities
+ - NET_ADMIN # Required for ARP scanning
+ - NET_RAW # Required for raw socket operations
+ - NET_BIND_SERVICE # Required to bind to privileged ports (nbtscan)
+ restart: unless-stopped
+ volumes:
+ - /local_data_dir:/data
+ - /etc/localtime:/etc/localtime
+ environment:
+ - PORT=20211
+ tmpfs:
+ - "/tmp:uid=20211,gid=20211,mode=1700,rw,noexec,nosuid,nodev,async,noatime,nodiratime"
+
+++ + + + + + + + + + + + + +This setup ensures all writable paths are either in
+tmpfsor host-mounted, and the container never writes outside of controlled volumes.
+
+
+
+ In some network setups, certain devices may intermittently appear as offline in NetAlertX, even though they are connected and responsive. This issue is often more noticeable with devices that have higher IP addresses within the subnet.
+Note
+Network presence graph showing increased drop outs before enabling additional ICMP scans and continuous online presence after following this guide. This graph shows a sudden spike in drop outs probably caused by a device software update.
+
192.168.1.240+).This issue is typically related to scanning limitations:
+Sole reliance on ARP can result in missed detections:
+Some devices (like iPhones) suppress or reject frequent ARP requests.
+ARP responses may be blocked or delayed due to power-saving features or OS behavior.
+Scanning frequency conflicts, where devices ignore repeated scans within a short period.
+To improve presence accuracy and reduce false offline states:
+Extend the ARP scanner timeout and DURATION to ensure full subnet coverage:
+ARPSCAN_RUN_TIMEOUT=360
+ARPSCAN_DURATION=30
+
+++Adjust based on your network size and device count.
+
Enable the ICMP scan plugin to complement ARP detection. ICMP is often more reliable for detecting active hosts, especially when ARP fails.
A combined approach greatly improves detection robustness:
+ARPSCAN (default)ICMP (ping)NMAPDEV (nmap)This hybrid strategy increases reliability, especially for down detection and alerting. See other plugins that might be compatible with your setup. See benefits and drawbacks of individual scan methods in their respective docs.
+After increasing the ARP timeout and adding ICMP scanning (on select IP ranges), users typically report:
+| Setting | +Recommendation | +
|---|---|
ARPSCAN_RUN_TIMEOUT |
+Increase to ensure scans reach all IPs | +
ICMP Scan |
+Enable to detect devices ARP might miss | +
| Multi-method Scanning | +Use a mix of ARP, ICMP, and NMAP-based methods | +
Tip: Each environment is unique. Consider fine-tuning scan settings based on your network size, device behavior, and desired detection accuracy.
+Let us know in the NetAlertX Discussions if you have further feedback or edge cases.
+See also Remote Networks for more advanced setups.
+ + + + + + + + + + + + + +
+
+
+
+ This page contains tips for frontend development when extending NetAlertX. Guiding principles are:
+That means that, when writing code, focus on reusing what's available instead of writing quick fixes. Or creating reusable functions, instead of bespoke functionaility.
+Some examples how to apply the above:
+++Example 1
+I want to implement a scan fucntion. Options would be:
++
+- To add a manual scan functionality to the
+deviceDetails.phppage.- To create a separate page that handles the execution of the scan.
+- To create a configurable Plugin.
+From the above, number 3 would be the most appropriate solution. Then followed by number 2. Number 1 would be approved only in special circumstances.
+Example 2
+I want to change the behavior of the application. Options to implement this could be:
++
+- Hard-code the changes in the code.
+- Implement the changes and add settings to influence the behavior in the
+initialize.pyfile so the user can adjust these.- Implement the changes and add settings via a setting-only plugin.
+- Implement the changes in a way so the behavior can be toggled on each plugin so the core capabilities of Plugins get extended.
+From the above, number 4 would be the most appropriate solution. Then followed by number 3. Number 1 or 2 would be approved only in special circumstances.
+
Some useful frontend JavaScript functions:
+getDevDataByMac(macAddress, devicesColumn) - method to retrieve any device data (database column) based on MAC address in the frontend getString(string stringKey) - method to retrieve translated strings in the frontend getSetting(string stringKey) - method to retrieve settings in the frontend Check the common.js file for more frontend functions.
+ + + + + + + + + + + + + +
+
+
+
+ This page provides an overview of community-contributed scripts for NetAlertX. These scripts are not actively maintained and are provided as-is.
+You can find all scripts in this scripts GitHub folder.
+| Script Name | +Description | +Author | +Version | +Release Date | +
|---|---|---|---|---|
| New Devices Checkmk Script | +Checks for new devices in NetAlertX and reports status to Checkmk. | +N/A | +1.0 | +08-Jan-2025 | +
| DB Cleanup Script | +Queries and removes old device-related entries from the database. | +laxduke | +1.0 | +23-Dec-2024 | +
| OPNsense DHCP Lease Converter | +Retrieves DHCP lease data from OPNsense and converts it to dnsmasq format. |
+im-redactd | +1.0 | +24-Feb-2025 | +
Note
+These scripts are community-supplied and not actively maintained. Use at your own discretion.
+For detailed usage instructions, refer to each script's documentation in each scripts GitHub folder.
+ + + + + + + + + + + + + +
+
+
+
+ NetAlertX comes with MQTT support, allowing you to show all detected devices as devices in Home Assistant. It also supplies a collection of stats, such as number of online devices.
+Tip
+You can install NetAlertX also as a Home Assistant addon via the alexbelgium/hassio-addons repository. This is only possible if you run a supervised instance of Home Assistant. If not, you can still run NetAlertX in a separate Docker container and follow this guide to configure MQTT.
++💡 This guide was tested only with the Mosquitto MQTT broker
+
Enable Mosquitto MQTT in Home Assistant by following the documentation
+Configure a user name and password on your broker.
+Note down the following details that you will need to configure NetAlertX:
+Open the NetAlertX > Settings > MQTT settings group
+
![]() |
+![]() |
+
|---|---|
![]() |
+![]() |
+
If you can't see all devices detected, run sudo arp-scan --interface=eth0 192.168.1.0/24 (change these based on your setup, read Subnets docs for details). This command has to be executed the NetAlertX container, not in the Home Assistant container.
You can access the NetAlertX container via Portainer on your host or via ssh. The container name will be something like addon_db21ed7f_netalertx (you can copy the db21ed7f_netalertx part from the browser when accessing the UI of NetAlertX).
local@local:~ $ ssh pi@192.168.1.9
+
+addon_db21ed7f_netalertxpi@raspberrypi:~ $ sudo docker container ls | grep netalertx
+06c540d97f67 ghcr.io/alexbelgium/netalertx-armv7:25.3.1 "/init" 6 days ago Up 6 days (healthy) addon_db21ed7f_netalertx
+
+pi@raspberrypi:~ $ sudo docker exec -it addon_db21ed7f_netalertx /bin/sh
+/ #
+
+asrp-scan scan/ # sudo arp-scan --ignoredups --retry=6 192.168.1.0/24 --interface=eth0
+Interface: eth0, type: EN10MB, MAC: dc:a6:32:73:8a:b1, IPv4: 192.168.1.9
+Starting arp-scan 1.10.0 with 256 hosts (https://github.com/royhills/arp-scan)
+192.168.1.1 74:ac:b9:54:09:fb Ubiquiti Networks Inc.
+192.168.1.21 74:ac:b9:ad:c3:30 Ubiquiti Networks Inc.
+192.168.1.58 1c:69:7a:a2:34:7b EliteGroup Computer Systems Co., LTD
+192.168.1.57 f4:92:bf:a3:f3:56 Ubiquiti Networks Inc.
+...
+
+If your result doesn't contain results similar to the above, double check your subnet, interface and if you are dealing with an inaccessible network segment, read the Remote networks documentation.
+ + + + + + + + + + + + + +
+
+
+
+ To download and install NetAlertX on the hardware/server directly use the curl or wget commands at the bottom of this page.
Note
+This is an Experimental feature 🧪 and it relies on community support.
+🙏 Looking for maintainers for this installation method 🙂 Current community volunteers: + - slammingprogramming + - ingoratsdorf
+There is no guarantee that the install script or any other script will gracefully handle other installed software. +Data loss is a possibility, it is recommended to install NetAlertX using the supplied Docker image.
+Warning
+A warning to the installation method below: Piping to bash is controversial and may be dangerous, as you cannot see the code that's about to be executed on your system.
+If you trust this repo, you can download the install script via one of the methods (curl/wget) below and it will fo its best to install NetAlertX on your system.
+Alternatively you can download the installation script from the repository and check the code yourself.
+NetAlertX will be installed in /app and run on port number 20211.
Some facts about what and where something will be changed/installed by the HW install setup (may not contain everything!):
+/app directory will be deleted and newly created/app will contain the whole repository (downloaded by the install script)/etc/nginx/sites-enabled/default will be disabled (sym-link deleted or backed up to sites-available)/var/www/html/netalertx directory will be deleted and newly created/etc/nginx/conf.d/netalertx.conf will be sym-linked to the appropriate installer location (depending on your system installer script)/app/install/<system>/start.<system>.sh.Tip
+If the below fails try grabbing and installing one of the previous releases and run the installation from the zip package.
+These commands will download the install.debian12.sh script from the GitHub repository, make it executable with chmod, and then run it using ./install.debian12.sh.
Make sure you have the necessary permissions to execute the script.
+curl -o install.debian12.sh https://raw.githubusercontent.com/jokob-sk/NetAlertX/main/install/debian12/install.debian12.sh && sudo chmod +x install.debian12.sh && sudo ./install.debian12.sh
+
+wget https://raw.githubusercontent.com/jokob-sk/NetAlertX/main/install/debian12/install.debian12.sh -O install.debian12.sh && sudo chmod +x install.debian12.sh && sudo ./install.debian12.sh
+
+Note
+Maintained by ingoratsdorf
+curl -o install.sh https://raw.githubusercontent.com/jokob-sk/NetAlertX/main/install/ubuntu24/install.sh && sudo chmod +x install.sh && sudo ./install.sh
+
+wget https://raw.githubusercontent.com/jokob-sk/NetAlertX/main/install/ubuntu24/install.sh -O install.sh && sudo chmod +x install.sh && sudo ./install.sh
+
+Note
+Use this on a clean LXC/VM for Debian 13 OR Ubuntu 24. +The Scipt will detect OS and build acordingly. +Maintained by JVKeller
+wget https://raw.githubusercontent.com/jokob-sk/NetAlertX/main/install/proxmox/proxmox-install-netalertx.sh -O proxmox-install-netalertx.sh && chmod +x proxmox-install-netalertx.sh && ./proxmox-install-netalertx.sh
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Icons are used to visually distinguish devices in the app in most of the device listing tables and the network tree.
+![]()
Two types of icons are suported:
+You can assign icons individually on each device in the Details tab.
+Copying the SVG (for example from iconify.design):
+![]()
Copying the HTML code from Font Awesome.
+![]()
![]()
![]()
Note
+If you want to mass-apply an icon to all devices of the same device type (Field: Type), you can click the mass-copy button (next to the "+" button). A confirmation prompt is displayed. If you proceed, icons of all devices set to the same device type as the current device, will be overwritten with the current device's icon.
+If you own the premium package of Font Awesome icons you can mount it in your Docker container the following way:
+/font-awesome:/app/front/lib/font-awesome:ro
+
+You can use the full range of Font Awesome icons afterwards.
+ + + + + + + + + + + + + +
+
+
+
+ Get NetAlertX up and running in a few simple steps.
+Tip
+Enable additional plugins under Settings → LOADED_PLUGINS.
+Make sure to save your changes and reload the page to activate them.
+
Initial configuration: ARPSCAN, INTRNT
Note
+ARPSCAN and INTRNT scan the current network. You can complement them with other 🔍 dev scanner plugins like NMAPDEV, or import devices using 📥 importer plugins.
+See the Subnet & VLAN Setup Guide and Remote Networks for advanced configurations.
Initial configuration: SMTP
Note
+Configure your SMTP settings or enable additional ▶️ publisher plugins to send alerts.
+For more flexibility, try 📚 _publisher_apprise, which supports over 80 notification services.

Initial configuration: The app auto-selects a root node (MAC internet) and attempts to identify other network devices by vendor or name.
Note
+Visualize and manage your network using the Network Guide.
+Some plugins (e.g., UNFIMP) build the topology automatically, or you can use Custom Workflows to generate it based on your own rules.

Initial configuration: Notifies on new_devices, down_devices, and events as defined in NTFPRCS_INCLUDED_SECTIONS.
Note
+Notification settings support global, plugin-specific, and per-device rules. +For fine-tuning, refer to the Notification Guide.
+
Initial configuration: N/A
+Note
+Automate responses to device status changes, group management, topology updates, and more. +See the Workflows Guide to simplify your network operations.
+
Initial configuration: The CSVBCKP plugin creates a daily backup to /config/devices.csv.
Note
+For a complete backup strategy, follow the Backup Guide.
+Initial configuration: N/A
+Note
+Build your own scanner, importer, or publisher plugin. +See the Plugin Development Guide and included video tutorials.
+Before opening a new issue:
+ +Let me know if you want a condensed README version, separate pages for each section, or UI copy based on this!
+ + + + + + + + + + + + + +
+
+
+
+ NetAlertX can be installed several ways. The best supported option is Docker, followed by a supervised Home Assistant instance, as an Unraid app, and lastly, on bare metal.
+If facing issues, please spend a few minutes seraching.
+Note
+If you can't find a solution anywhere, ask in Discord if you think it's a quick question, otherwise open a new issue. Please fill in as much as possible to speed up the help process.
+
+
+
+
+ NetAlertX comes with several logs that help to identify application issues. These include nginx logs, app, or plugin logs. For plugin-specific log debugging, please read the Debug Plugins guide.
+Note
+When debugging any issue, increase the LOG_LEVEL Setting as per the Debug tips documentation.
You can find most of the logs exposed in the UI under Maintenance -> Logs.
+If the UI is inaccessible, you can access them under /tmp/log.

In the Maintennace -> Logs you can Purge logs, download the full log file or Filter the lines with some substring to narrow down your search.
+If a Plugin supplies data to the main app it's done either vie a SQL query or via a script that updates the last_result.log file in the plugin log folder (app/log/plugins/). These files are processed at the end of the scan and deleted on successful processing.
The data is in most of the cases then displayed in the application under Integrations -> Plugins (or Device -> Plugins if the plugin is supplying device-specific data).
+
You cannot find any log files on the filesystem. The container is read-only and writes logs to a temporary in-memory filesystem (tmpfs) for security and performance. The application follows container best-practices by writing all logs to the standard output (stdout) and standard error (stderr) streams. Docker's logging driver (set in docker-compose.yml) captures this stream automatically, allowing you to access it with the docker logs <image_name> command.
bash
+ docker logs netalertx
+* To watch the logs live (live feed):
bash
+ docker logs -f netalertx
The default logs are erased every time the container restarts because they are stored in temporary in-memory storage (tmpfs). If you need to keep a persistent, file-based log history, follow the steps below.
Note
+This might lead to performance degradation so this approach is only suggested when actively debugging issues. See the Performance optimization documentation for details.
+bash
+ docker-compose down
Edit your docker-compose.yml file:
Comment out the /tmp/log line under the tmpfs: section.
volumes: section and set your desired host path.yaml
+ ...
+ tmpfs:
+ # - "/tmp/log:uid=20211,gid=20211,mode=1700,rw,noexec,nosuid,nodev,async,noatime,nodiratime"
+ ...
+ volumes:
+ ...
+ # Retain logs - comment out tmpfs /tmp/log if you want to retain logs between container restarts
+ - /home/adam/netalertx_logs:/tmp/log
+ ...
+3. Restart the container:
bash
+ docker-compose up -d
This change stops Docker from mounting a temporary in-memory volume at /tmp/log. Instead, it "bind mounts" a persistent folder from your host computer (e.g., /data/netalertx_logs) to that same location inside the container.
+
+
+
+ When upgrading from older versions of NetAlertX (or PiAlert by jokob-sk), follow the migration steps below to ensure your data and configuration are properly transferred.
+Tip
+It's always important to have a backup strategy in place.
+You are running PiAlert (by jokob-sk)
+ → Read the 1.1 Migration from PiAlert to NetAlertX v25.5.24
You are running NetAlertX (by jokob-sk) 25.5.24 or older
+ → Read the 1.2 Migration from NetAlertX v25.5.24
You are running NetAlertX (by jokob-sk) (v25.6.7 to v25.10.1)
+ → Read the 1.3 Migration from NetAlertX v25.10.1
You can migrate data manually, for example by exporting and importing devices using the CSV import method.
+v25.5.24The application will automatically migrate the database, configuration, and all device information. +A banner message will appear at the top of the web UI reminding you to update your Docker mount points.
+docker-compose.yml or docker run command (See below New Docker mount locations).app.db and app.conf and place them in the appropriate location.Tip
+If you have trouble accessing past backups, config or database files you can copy them into the newly mapped directories, for example by running this command in the container: cp -r /data/config /home/pi/pialert/config/old_backup_files. This should create a folder in the config directory called old_backup_files containing all the files in that location. Another approach is to map the old location and the new one at the same time to copy things over.
The internal application path in the container has changed from /home/pi/pialert to /app. Update your volume mounts as follows:
| Old mount point | +New mount point | +
|---|---|
/home/pi/pialert/config |
+/data/config |
+
/home/pi/pialert/db |
+/data/db |
+
If you were mounting files directly, please note the file names have changed:
+| Old file name | +New file name | +
|---|---|
pialert.conf |
+app.conf |
+
pialert.db |
+app.db |
+
Note
+The application automatically creates symlinks from the old database and config locations to the new ones, so data loss should not occur. Read the backup strategies guide to backup your setup.
+Examples of docker files with the new mount points.
+services:
+ pialert:
+ container_name: pialert
+ # use the below line if you want to test the latest dev image
+ # image: "ghcr.io/jokob-sk/netalertx-dev:latest"
+ image: "jokobsk/pialert:latest"
+ network_mode: "host"
+ restart: unless-stopped
+ volumes:
+ - /local_data_dir/config:/home/pi/pialert/config
+ - /local_data_dir/db:/home/pi/pialert/db
+ # (optional) useful for debugging if you have issues setting up the container
+ - /local_data_dir/logs:/home/pi/pialert/front/log
+ environment:
+ - TZ=Europe/Berlin
+ - PORT=20211
+
+services:
+ netalertx: # 🆕 This has changed
+ container_name: netalertx # 🆕 This has changed
+ image: "ghcr.io/jokob-sk/netalertx:25.5.24" # 🆕 This has changed
+ network_mode: "host"
+ restart: unless-stopped
+ volumes:
+ - /local_data_dir/config:/data/config # 🆕 This has changed
+ - /local_data_dir/db:/data/db # 🆕 This has changed
+ # (optional) useful for debugging if you have issues setting up the container
+ - /local_data_dir/logs:/tmp/log # 🆕 This has changed
+ environment:
+ - TZ=Europe/Berlin
+ - PORT=20211
+
+Note
+The recommendation is to map folders as in Example 1, map files directly only when needed.
+services:
+ pialert:
+ container_name: pialert
+ # use the below line if you want to test the latest dev image
+ # image: "ghcr.io/jokob-sk/netalertx-dev:latest"
+ image: "jokobsk/pialert:latest"
+ network_mode: "host"
+ restart: unless-stopped
+ volumes:
+ - /local_data_dir/config/pialert.conf:/home/pi/pialert/config/pialert.conf
+ - /local_data_dir/db/pialert.db:/home/pi/pialert/db/pialert.db
+ # (optional) useful for debugging if you have issues setting up the container
+ - /local_data_dir/logs:/home/pi/pialert/front/log
+ environment:
+ - TZ=Europe/Berlin
+ - PORT=20211
+
+services:
+ netalertx: # 🆕 This has changed
+ container_name: netalertx # 🆕 This has changed
+ image: "ghcr.io/jokob-sk/netalertx:25.5.24" # 🆕 This has changed
+ network_mode: "host"
+ restart: unless-stopped
+ volumes:
+ - /local_data_dir/config/app.conf:/data/config/app.conf # 🆕 This has changed
+ - /local_data_dir/db/app.db:/data/db/app.db # 🆕 This has changed
+ # (optional) useful for debugging if you have issues setting up the container
+ - /local_data_dir/logs:/tmp/log # 🆕 This has changed
+ environment:
+ - TZ=Europe/Berlin
+ - PORT=20211
+
+v25.5.24Versions before v25.10.1 require an intermediate migration through v25.5.24 to ensure database compatibility. Skipping this step may cause compatibility issues due to database schema changes introduced after v25.5.24.
v25.5.24 by pinning the release version (See Examples below)v25.10.1 by pinning the release version (See Examples below)Examples of docker files with the tagged version.
+services:
+ netalertx:
+ container_name: netalertx
+ image: "ghcr.io/jokob-sk/netalertx:25.5.24" # 🆕 This is important
+ network_mode: "host"
+ restart: unless-stopped
+ volumes:
+ - /local_data_dir/config:/data/config
+ - /local_data_dir/db:/data/db
+ # (optional) useful for debugging if you have issues setting up the container
+ - /local_data_dir/logs:/tmp/log
+ environment:
+ - TZ=Europe/Berlin
+ - PORT=20211
+
+services:
+ netalertx:
+ container_name: netalertx
+ image: "ghcr.io/jokob-sk/netalertx:25.10.1" # 🆕 This is important
+ network_mode: "host"
+ restart: unless-stopped
+ volumes:
+ - /local_data_dir/config:/data/config
+ - /local_data_dir/db:/data/db
+ # (optional) useful for debugging if you have issues setting up the container
+ - /local_data_dir/logs:/tmp/log
+ environment:
+ - TZ=Europe/Berlin
+ - PORT=20211
+
+v25.10.1Starting from v25.10.1, the container uses a more secure, read-only runtime environment, which requires all writable paths (e.g., logs, API cache, temporary data) to be mounted as tmpfs or permanent writable volumes, with sufficient access permissions. The data location has also hanged from /app/db and /app/config to /data/db and /data/config. See detailed steps below.
v25.10.1 by pinning the release version (See the example below)services:
+ netalertx:
+ container_name: netalertx
+ image: "ghcr.io/jokob-sk/netalertx:25.10.1" # 🆕 This is important
+ network_mode: "host"
+ restart: unless-stopped
+ volumes:
+ - /local_data_dir/config:/app/config
+ - /local_data_dir/db:/app/db
+ # (optional) useful for debugging if you have issues setting up the container
+ - /local_data_dir/logs:/tmp/log
+ environment:
+ - TZ=Europe/Berlin
+ - PORT=20211
+
+docker-compose.yml as per example below.services:
+ netalertx:
+ container_name: netalertx
+ image: "ghcr.io/jokob-sk/netalertx" # 🆕 This has changed
+ network_mode: "host"
+ cap_drop: # 🆕 New line
+ - ALL # 🆕 New line
+ cap_add: # 🆕 New line
+ - NET_RAW # 🆕 New line
+ - NET_ADMIN # 🆕 New line
+ - NET_BIND_SERVICE # 🆕 New line
+ restart: unless-stopped
+ volumes:
+ - /local_data_dir:/data # 🆕 This folder contains your /db and /config directories and the parent changed from /app to /data
+ # Ensuring the timezone is the same as on the server - make sure also the TIMEZONE setting is configured
+ - /etc/localtime:/etc/localtime:ro # 🆕 New line
+ environment:
+ - PORT=20211
+ # 🆕 New "tmpfs" section START 🔽
+ tmpfs:
+ # All writable runtime state resides under /tmp; comment out to persist logs between restarts
+ - "/tmp:uid=20211,gid=20211,mode=1700,rw,noexec,nosuid,nodev,async,noatime,nodiratime"
+ # 🆕 New "tmpfs" section END 🔼
+
+netalertx image and 20211 user.Note
+The examples below assumes your /config and /db folders are stored in local_data_dir.
+Replace this path with your actual configuration directory. netalertx is the container name, which might differ from your setup.
Automated approach:
+Run the container with the --user "0" parameter. Please note, some systems will require the manual approach below.
docker run -it --rm --name netalertx --user "0" \
+ -v /local_data_dir/config:/app/config \
+ -v /local_data_dir/db:/app/db \
+ -v /local_data_dir:/data \
+ --tmpfs /tmp:uid=20211,gid=20211,mode=1700 \
+ ghcr.io/jokob-sk/netalertx:latest
+
+Stop the container and run it as you would normally.
+Manual approach:
+Use the manual approach if the Automated approach fails. Execute the below commands:
+sudo chown -R 20211:20211 /local_data_dir
+sudo chmod -R a+rwx /local_data_dir
+
+
+
+
+
+ Name resolution in NetAlertX relies on multiple plugins to resolve device names from IP addresses. If you are seeing (name not found) as device names, follow these steps to diagnose and fix the issue.
Tip
+Before proceeding, make sure Reverse DNS is enabled on your network.
+You can control how names are handled and cleaned using the NEWDEV_NAME_CLEANUP_REGEX setting.
+To auto-update Fully Qualified Domain Names (FQDN), enable the REFRESH_FQDN setting.
For best results, ensure the following name resolution plugins are enabled:
+You can check which plugins are active in your Settings section and enable any that are missing.
+There are other plugins that can supply device names as well, but they rely on bespoke hardware and services. See Plugins overview for details and look for plugins with name discovery (🆎) features.
+If names are not resolving, check the logs for errors or timeouts.
+See how to explore logs in the Logging guide.
+Logs will show which plugins attempted resolution and any failures encountered.
+If resolution is slow or failing due to timeouts, increase the timeout settings in your configuration, for example.
+NSLOOKUP_RUN_TIMEOUT = 30
+
+Raising the timeout may help if your network has high latency or slow DNS responses.
+Each plugin stores results in its respective object. You can inspect these objects to see if they contain valid name resolution data.
+See Logging guide and Debug plugins guides for details.
+If the object contains no results, the issue may be with DNS settings or network access.
+For more details how to improve name resolution refer to the +Reverse DNS Documentation.
+ + + + + + + + + + + + + +
+
+
+
+ The Network page lets you map how devices connect — visually and logically. +It’s especially useful for planning infrastructure, assigning parent-child relationships, and spotting gaps.
+
To get started, you’ll need to define at least one root node and mark certain devices as network nodes (like Switches or Routers).
+Start by creating a root device with the MAC address Internet, if the application didn’t create one already.
+This special MAC address (Internet) is required for the root network node — no other value is currently supported.
+Set its Type to a valid network type — such as Router or Gateway.
Tip
+If you don’t have one, use the Create new device button on the Devices page to add a root device.
+AP, Firewall, Gateway, PLC, Powerline, Router, Switch, USB LAN Adapter, USB WIFI Adapter, WLAN
+ (Or add custom types under Settings → General → NETWORK_DEVICE_TYPES.)0 or empty, a Wi-Fi icon is shown. Otherwise, an Ethernet icon appears.Note
+Use bulk editing with CSV Export to fix Internet root assignments or update many devices at once.
raspberrypi as a SwitchLet’s walk through setting up a device named raspberrypi to act as a network Switch that other devices connect through.
raspberrypiSwitch
nic relationship type can affect parent notifications — see the setting description and Notifications documentation for more.ARPSCAN with those that do, like UNIFIAPI), you must set the setting NEWDEV_devParentMAC to None.
Note
+Only certain device types can act as network nodes:
+AP, Firewall, Gateway, Hypervisor, PLC, Powerline, Router, Switch, USB LAN Adapter, USB WIFI Adapter, WLAN
+You can add custom types via the NETWORK_DEVICE_TYPES setting.
You can confirm that raspberrypi now acts as a network device in two places:
raspberrypi now appears as an option for a Parent Node:
raspberrypi tab, meaning it's recognized as a network node (Switch):
raspberrypi.raspberrypi switch node.

++Hovering over devices in the tree reveals connection details and tooltips for quick inspection.
+
Note
+Selecting certain relationship types hides the device in the default device views.
+You can change this behavior by adjusting the UI_hide_rel_types setting, which by default is set to ["nic","virtual"].
+This means devices with devParentRelType set to nic or virtual will not be shown.
+All devices, regardless of relationship type, are always accessible in the All devices view.
To configure devices on the Network page:
+Internet is set up as the rootNeed to reset or undo changes? Use backups or bulk editing to manage devices at scale. You can also automate device assignment with Workflows.
+ + + + + + + + + + + + + +
+
+
+
+ There are 4 ways how to influence notifications:
+Note
+It's recommended to use the same schedule interval for all plugins responsible for scanning devices, otherwise false positives might be reported if different devices are discovered by different plugins. Check the Settings > Enabled settings section for a warning:
+

The following device properties influence notifications. You can:
+nic relationship type) are online. If disabled, the device is considered online if any NIC is online. If a NIC is online it sets the parent (this) device's status to online irrespectivelly of the detected device's status. The Relationship type is set on the childern device.Note
+Please read through the NTFPRCS plugin documentation to understand how device and global settings influence the notification processing.
+
On almost all plugins there are 2 core settings, <plugin>_WATCH and <plugin>_REPORT_ON.
<plugin>_WATCH specifies the columns which the app should watch. If watched columns change the device state is considered changed. This changed status is then used to decide to send out notifications based on the <plugin>_REPORT_ON setting. <plugin>_REPORT_ON let's you specify on which events the app should notify you. This is related to the <plugin>_WATCH setting. So if you select watched-changed and in <plugin>_WATCH you only select Watched_Value1, then a notification is triggered if Watched_Value1 is changed from the previous value, but no notification is send if Watched_Value2 changes. Click the Read more in the docs. Link at the top of each plugin to get more details on how the given plugin works.
+
In Notification Processing settings, you can specify blanket rules. These allow you to specify exceptions to the Plugin and Device settings and will override those.
+NTFPRCS_INCLUDED_SECTIONS) allows you to specify which events trigger notifications. Usual setups will have new_devices, down_devices, and possibly down_reconnected set. Including plugin (dependenton the Plugin <plugin>_WATCH and <plugin>_REPORT_ON settings) and events (dependent on the on-device Alert Events setting) might be too noisy for most setups. More info in the NTFPRCS plugin on what events these selections include. NTFPRCS_alert_down_time) is useful if you want to wait for some time before the system sends out a down notification for a device. This is related to the on-device Alert down setting and only devices with this checked will trigger a down notification.You can filter out unwanted notifications globally. This could be because of a misbehaving device (GoogleNest/GoogleHub (See also ARPSAN docs and the --exclude-broadcast flag)) which flips between IP addresses, or because you want to ignore new device notifications of a certain pattern.
NTFPRCS_event_condition) - Filter out Events from notifications.NTFPRCS_new_dev_condition) - Filter out New Devices from notifications, but log and keep a new device in the system.
You can completely ignore detected devices globally. This could be because your instance detects docker containers, you want to ignore devices from a specific manufacturer via MAC rules or you want to ignore devices on a specific IP range.
+NEWDEV_ignored_MACs) - List of MACs to ignore.NEWDEV_ignored_IPs) - List of IPs to ignore.
+
+
+
+ There are several ways to improve the application's performance. The application has been tested on a range of devices, from Raspberry Pi 4 units to NAS and NUC systems. If you are running the application on a lower-end device, fine-tuning the performance settings can significantly improve the user experience.
+Performance issues are usually caused by:
+app.log under Maintenance → Logs for details.The application performs regular maintenance and database cleanup. If these tasks are failing, you will see slowdowns.
+A large database or oversized log files can impact performance. You can check database and table sizes on the Maintenance page.
+
Note
+Two plugins help maintain the system’s performance:
+DBCLNP_RUN_SCHD) and timeout (DBCLNP_RUN_TIMEOUT) if necessary.MAINT_RUN_SCHD) and timeout (MAINT_RUN_TIMEOUT) if needed.Frequent scans increase resource usage, network traffic, and database read/write cycles.
+<PLUGIN>_RUN_SCHD) on busy networks or low-end hardware.<PLUGIN>_RUN_TIMEOUT) to avoid plugin failures./24 instead of /16 to reduce scan load.Some plugins also include options to limit which devices are scanned. If certain plugins consistently run long, consider narrowing their scope.
+For example, the ICMP plugin allows scanning only IPs that match a specific regular expression.
+On devices with slower I/O, you can improve performance by storing temporary files (and optionally the database) in memory using tmpfs.
Warning
+Storing the database in tmpfs is generally discouraged. Use this only if device data and historical records are not required to persist. If needed, you can pair this setup with the SYNC plugin to store important persistent data on another node. See the Plugins docs for details.
Using tmpfs reduces disk writes and speeds up I/O, but all data stored in memory will be lost on restart.
Below is an optimized docker-compose.yml snippet using non-persistent logs, API data, and DB:
services:
+ netalertx:
+ container_name: netalertx
+ # Use this line for the stable release
+ image: "ghcr.io/jokob-sk/netalertx:latest"
+ # Or use this line for the latest development build
+ # image: "ghcr.io/jokob-sk/netalertx-dev:latest"
+ network_mode: "host"
+ restart: unless-stopped
+
+ cap_drop: # Drop all capabilities for enhanced security
+ - ALL
+ cap_add: # Re-add necessary capabilities
+ - NET_RAW
+ - NET_ADMIN
+ - NET_BIND_SERVICE
+
+ volumes:
+ - ${APP_FOLDER}/netalertx/config:/data/config
+ - /etc/localtime:/etc/localtime:ro
+
+ tmpfs:
+ # All writable runtime state resides under /tmp; comment out to persist logs between restarts
+ - "/tmp:uid=20211,gid=20211,mode=1700,rw,noexec,nosuid,nodev,async,noatime,nodiratime"
+ - "/data/db:uid=20211,gid=20211,mode=1700" # ⚠ You will lose historical data on restart
+
+ environment:
+ - PORT=${PORT}
+ - APP_CONF_OVERRIDE=${APP_CONF_OVERRIDE}
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ NetAlertX comes with 3 plugins suitable for integrating with your existing PiHole instance. The first plugin uses the v6 API, the second plugin is using a direct SQLite DB connection, the other leverages the DHCP.leases file generated by PiHole. You can combine multiple approaches and also supplement scans with other plugins.
PIHOLEAPI Plugin - Import devices directly from PiHole v6 API
To use this approach make sure the Web UI password in Pi-hole is set.
+| Setting | +Description | +Recommended value | +
|---|---|---|
PIHOLEAPI_URL |
+Your Pi-hole base URL including port. | +http://192.168.1.82:9880/ |
+
PIHOLEAPI_RUN_SCHD |
+If you run multiple device scanner plugins, align the schedules of all plugins to the same value. | +*/5 * * * * |
+
PIHOLEAPI_PASSWORD |
+The Web UI base64 encoded (en-/decoding handled by the app) admin password. | +passw0rd |
+
PIHOLEAPI_SSL_VERIFY |
+Whether to verify HTTPS certificates. Disable only for self-signed certificates. | +False |
+
PIHOLEAPI_API_MAXCLIENTS |
+Maximum number of devices to request from Pi-hole. Defaults are usually fine. | +500 |
+
PIHOLEAPI_FAKE_MAC |
+Generate FAKE MAC from IP. | +False |
+
Check the PiHole API plugin readme for details and troubleshooting.
+No changes needed
+DHCPLSS Plugin - Import devices from the PiHole DHCP leases file
| Setting | +Description | +Recommended value | +
|---|---|---|
DHCPLSS_RUN |
+When the plugin should run. | +schedule |
+
DHCPLSS_RUN_SCHD |
+If you run multiple device scanner plugins, align the schedules of all plugins to the same value. | +*/5 * * * * |
+
DHCPLSS_paths_to_check |
+You need to map the value in this setting in the docker-compose.yml file. The in-container path must contain pihole so it's parsed correctly. |
+['/etc/pihole/dhcp.leases'] |
+
Check the DHCPLSS plugin readme for details
+| Path | +Description | +
|---|---|
:/etc/pihole/dhcp.leases |
+PiHole's dhcp.leases file. Required if you want to use PiHole dhcp.leases file. This has to be matched with a corresponding DHCPLSS_paths_to_check setting entry (the path in the container must contain pihole) |
+
PIHOLE Plugin - Import devices directly from the PiHole database
| Setting | +Description | +Recommended value | +
|---|---|---|
PIHOLE_RUN |
+When the plugin should run. | +schedule |
+
PIHOLE_RUN_SCHD |
+If you run multiple device scanner plugins, align the schedules of all plugins to the same value. | +*/5 * * * * |
+
PIHOLE_DB_PATH |
+You need to map the value in this setting in the docker-compose.yml file. |
+/etc/pihole/pihole-FTL.db |
+
Check the PiHole plugin readme for details
+| Path | +Description | +
|---|---|
:/etc/pihole/pihole-FTL.db |
+PiHole's pihole-FTL.db database file. |
+
Check out other plugins that can help you discover more about your network or check how to scan Remote networks.
+ + + + + + + + + + + + + +
+
+
+
+ NetAlertX supports additional plugins to extend its functionality, each with its own settings and options. Plugins can be loaded via the General -> LOADED_PLUGINS setting. For custom plugin development, refer to the Plugin development guide.
Note
+Please check this Plugins debugging guide and the corresponding Plugin documentation in the below table if you are facing issues.
+Tip
+You can load additional Plugins via the General -> LOADED_PLUGINS setting. You need to save the settings for the new plugins to load (cache/page reload may be necessary).
+
🔍 dev scanner plugin (e.g. ARPSCAN or NMAPDEV), or import devices into the application with an 📥 importer plugin. (See Enabling plugins below)▶️ publisher plugin, if you want to send notifications. If you don't see a publisher you'd like to use, look at the 📚_publisher_apprise plugin which is a proxy for over 80 notification services. | Plugin type | +Icon | +Description | +When to run | +Required | +Data source ? | +
|---|---|---|---|---|---|
| publisher | +▶️ | +Sending notifications to services. | +on_notification |
+✖ | +Script | +
| dev scanner | +🔍 | +Create devices in the app, manages online/offline device status. | +schedule |
+✖ | +Script / SQLite DB | +
| name discovery | +🆎 | +Discovers names of devices via various protocols. | +before_name_updates, schedule |
+✖ | +Script | +
| importer | +📥 | +Importing devices from another service. | +schedule |
+✖ | +Script / SQLite DB | +
| system | +⚙ | +Providing core system functionality. | +schedule / always on |
+✖/✔ | +Script / Template | +
| other | +♻ | +Other plugins | +misc | +✖ | +Script / Template | +
| Icon | +Description | +
|---|---|
| 🖧 | +Auto-imports the network topology diagram | +
| 🔄 | +Has the option to sync some data back into the plugin source | +
Device-detecting plugins insert values into the CurrentScan database table. The plugins that are not required are safe to ignore, however, it makes sense to have at least some device-detecting plugins enabled, such as ARPSCAN or NMAPDEV.
| ID | +Plugin docs | +Type | +Description | +Features | +Required | +
|---|---|---|---|---|---|
APPRISE |
+_publisher_apprise | +▶️ | +Apprise notification proxy | ++ | + |
ARPSCAN |
+arp_scan | +🔍 | +ARP-scan on current network | ++ | + |
AVAHISCAN |
+avahi_scan | +🆎 | +Avahi (mDNS-based) name resolution | ++ | + |
ASUSWRT |
+asuswrt_import | +🔍 | +Import connected devices from AsusWRT | ++ | + |
CSVBCKP |
+csv_backup | +⚙ | +CSV devices backup | ++ | + |
CUSTPROP |
+custom_props | +⚙ | +Managing custom device properties values | ++ | Yes | +
DBCLNP |
+db_cleanup | +⚙ | +Database cleanup | ++ | Yes* | +
DDNS |
+ddns_update | +⚙ | +DDNS update | ++ | + |
DHCPLSS |
+dhcp_leases | +🔍/📥/🆎 | +Import devices from DHCP leases | ++ | + |
DHCPSRVS |
+dhcp_servers | +♻ | +DHCP servers | ++ | + |
DIGSCAN |
+dig_scan | +🆎 | +Dig (DNS) Name resolution | ++ | + |
FREEBOX |
+freebox | +🔍/♻/🆎 | +Pull data and names from Freebox/Iliadbox | ++ | + |
ICMP |
+icmp_scan | +♻ | +ICMP (ping) status checker | ++ | + |
INTRNT |
+internet_ip | +🔍 | +Internet IP scanner | ++ | + |
INTRSPD |
+internet_speedtest | +♻ | +Internet speed test | ++ | + |
IPNEIGH |
+ipneigh | +🔍 | +Scan ARP (IPv4) and NDP (IPv6) tables | ++ | + |
LUCIRPC |
+luci_import | +🔍 | +Import connected devices from OpenWRT | ++ | + |
MAINT |
+maintenance | +⚙ | +Maintenance of logs, etc. | ++ | + |
MQTT |
+_publisher_mqtt | +▶️ | +MQTT for synching to Home Assistant | ++ | + |
MTSCAN |
+mikrotik_scan | +🔍 | +Mikrotik device import & sync | ++ | + |
NBTSCAN |
+nbtscan_scan | +🆎 | +Nbtscan (NetBIOS-based) name resolution | ++ | + |
NEWDEV |
+newdev_template | +⚙ | +New device template | ++ | Yes | +
NMAP |
+nmap_scan | +♻ | +Nmap port scanning & discovery | ++ | + |
NMAPDEV |
+nmap_dev_scan | +🔍 | +Nmap dev scan on current network | ++ | + |
NSLOOKUP |
+nslookup_scan | +🆎 | +NSLookup (DNS-based) name resolution | ++ | + |
NTFPRCS |
+notification_processing | +⚙ | +Notification processing | ++ | Yes | +
NTFY |
+_publisher_ntfy | +▶️ | +NTFY notifications | ++ | + |
OMDSDN |
+omada_sdn_imp | +📥/🆎 ❌ | +UNMAINTAINED use OMDSDNOPENAPI |
+🖧 🔄 | ++ |
OMDSDNOPENAPI |
+omada_sdn_openapi | +📥/🆎 | +OMADA TP-Link import via OpenAPI | +🖧 | ++ |
PIHOLE |
+pihole_scan | +🔍/🆎/📥 | +Pi-hole device import & sync | ++ | + |
PIHOLEAPI |
+pihole_api_scan | +🔍/🆎/📥 | +Pi-hole device import & sync via API v6+ | ++ | + |
PUSHSAFER |
+_publisher_pushsafer | +▶️ | +Pushsafer notifications | ++ | + |
PUSHOVER |
+_publisher_pushover | +▶️ | +Pushover notifications | ++ | + |
SETPWD |
+set_password | +⚙ | +Set password | ++ | Yes | +
SMTP |
+_publisher_email | +▶️ | +Email notifications | ++ | + |
SNMPDSC |
+snmp_discovery | +🔍/📥 | +SNMP device import & sync | ++ | + |
SYNC |
+sync | +🔍/⚙/📥 | +Sync & import from NetAlertX instances | +🖧 🔄 | +Yes | +
TELEGRAM |
+_publisher_telegram | +▶️ | +Telegram notifications | ++ | + |
UI |
+ui_settings | +♻ | +UI specific settings | ++ | Yes | +
UNFIMP |
+unifi_import | +🔍/📥/🆎 | +UniFi device import & sync | +🖧 | ++ |
UNIFIAPI |
+unifi_api_import | +🔍/📥/🆎 | +UniFi device import (SM API, multi-site) | ++ | + |
VNDRPDT |
+vendor_update | +⚙ | +Vendor database update | ++ | + |
WEBHOOK |
+_publisher_webhook | +▶️ | +Webhook notifications | ++ | + |
WEBMON |
+website_monitor | +♻ | +Website down monitoring | ++ | + |
WOL |
+wake_on_lan | +♻ | +Automatic wake-on-lan | ++ | + |
++* The database cleanup plugin (
+DBCLNP) is not required but the app will become unusable after a while if not executed. +❌ marked for removal/unmaintained - looking for help +⌚It's recommended to use the same schedule interval for all plugins responsible for discovering new devices.
Plugins can be enabled via Settings, and can be disabled as needed.
+DISCOVER_PLUGINS and load the required plugins in Settings via the LOADED_PLUGINS setting.<prefix>_RUN Setting to the recommended or custom value as per the documentation of the given setting schedule on a 🔍 dev scanner plugin, make sure the schedules are the same across all 🔍 dev scanner plugins<prefix>_RUN Setting to disabled if you want to disable the plugin, but keep the settingsLOADED_PLUGINS setting.app.conf files are kept in the /config folder) ignore_plugin file into the plugin directory. Ignored plugins won't show up in the LOADED_PLUGINS setting.If you want to develop a custom plugin, please read this Plugin development guide.
+ + + + + + + + + + + + + +
+
+
+
+ NetAlertX comes with a plugin system to feed events from third-party scripts into the UI and then send notifications, if desired. The highlighted core functionality this plugin system supports, is:
+++(Currently, update/overwriting of existing objects is only supported for devices via the
+CurrentScantable.)
Note
+For a high-level overview of how the config.json is used and it's lifecycle check the config.json Lifecycle in NetAlertX Guide.
Tip
+Read this guide Development environment setup guide to set up your local environment for development. 👩💻
+![]() |
+![]() |
+![]() |
+
|---|---|---|
![]() |
+![]() |
++ |
Example use cases for plugins could be:
+If you wish to develop a plugin, please check the existing plugin structure. Once the settings are saved by the user they need to be removed from the app.conf file manually if you want to re-initialize them from the config.json of the plugin.
Please read the below carefully if you'd like to contribute with a plugin yourself. This documentation file might be outdated, so double-check the sample plugins as well.
+++⚠️Folder name must be the same as the code name value in:
+"code_name": "<value>"+Unique prefix needs to be unique compared to the other settings prefixes, e.g.: the prefixAPPRISEis already in use.
| File | +Required (plugin type) | +Description | +
|---|---|---|
config.json |
+yes | +Contains the plugin configuration (manifest) including the settings available to the user. | +
script.py |
+no | +The Python script itself. You may call any valid linux command. | +
last_result.<prefix>.log |
+no | +The file used to interface between NetAlertX and the plugin. Required for a script plugin if you want to feed data into the app. Stored in the /api/log/plugins/ |
+
script.log |
+no | +Logging output (recommended) | +
README.md |
+yes | +Any setup considerations or overview | +
More on specifics below.
+Important
+Spend some time reading and trying to understand the below table. This is the interface between the Plugins and the core application. The application expets 9 or 13 values The first 9 values are mandatory. The next 4 values (HelpVal1 to HelpVal4) are optional. However, if you use any of these optional values (e.g., HelpVal1), you need to supply all optional values (e.g., HelpVal2, HelpVal3, and HelpVal4). If a value is not used, it should be padded with null.
| Order | +Represented Column | +Value Required | +Description | +
|---|---|---|---|
| 0 | +Object_PrimaryID |
+yes | +The primary ID used to group Events under. | +
| 1 | +Object_SecondaryID |
+no | +Optional secondary ID to create a relationship beween other entities, such as a MAC address | +
| 2 | +DateTime |
+yes | +When the event occured in the format 2023-01-02 15:56:30 |
+
| 3 | +Watched_Value1 |
+yes | +A value that is watched and users can receive notifications if it changed compared to the previously saved entry. For example IP address | +
| 4 | +Watched_Value2 |
+no | +As above | +
| 5 | +Watched_Value3 |
+no | +As above | +
| 6 | +Watched_Value4 |
+no | +As above | +
| 7 | +Extra |
+no | +Any other data you want to pass and display in NetAlertX and the notifications | +
| 8 | +ForeignKey |
+no | +A foreign key that can be used to link to the parent object (usually a MAC address) | +
| 9 | +HelpVal1 |
+no | +(optional) A helper value | +
| 10 | +HelpVal2 |
+no | +(optional) A helper value | +
| 11 | +HelpVal3 |
+no | +(optional) A helper value | +
| 12 | +HelpVal4 |
+no | +(optional) A helper value | +
Note
+De-duplication is run once an hour on the Plugins_Objects database table and duplicate entries with the same value in columns Object_PrimaryID, Object_SecondaryID, Plugin (auto-filled based on unique_prefix of the plugin), UserData (can be populated with the "type": "textbox_save" column type) are removed.
The config.json file is the manifest of the plugin. It contains mainly settings definitions and the mapping of Plugin objects to NetAlertX objects.
The execution order is used to specify when a plugin is executed. This is useful if a plugin has access and surfaces more information than others. If a device is detected by 2 plugins and inserted into the CurrentScan table, the plugin with the higher priority (e.g.: Level_0 is a higher priority than Level_1) will insert it's values first. These values (devices) will be then prioritized over any values inserted later.
{
+ "execution_order" : "Layer_0"
+}
+
+Currently, these data sources are supported (valid data_source value).
| Name | +data_source value |
+Needs to return a "table"* | +Overview (more details on this page below) | +
|---|---|---|---|
| Script | +script |
+no | +Executes any linux command in the CMD setting. |
+
| NetAlertX DB query | +app-db-query |
+yes | +Executes a SQL query on the NetAlertX database in the CMD setting. |
+
| Template | +template |
+no | +Used to generate internal settings, such as default values. | +
| External SQLite DB query | +sqlite-db-query |
+yes | +Executes a SQL query from the CMD setting on an external SQLite database mapped in the DB_PATH setting. |
+
| Plugin type | +plugin_type |
+no | +Specifies the type of the plugin and in which section the Plugin settings are displayed ( one of general/system/scanner/other/publisher ). |
+
+++
+- "Needs to return a "table" means that the application expects a
+last_result.<prefix>.logfile with some results. It's not a blocker, however warnings in theapp.logmight be logged.🔎Example +
+json +"data_source": "app-db-query"+If you want to display plugin objects or import devices into the app, data sources have to return a "table" of the exact structure as outlined above.
You can show or hide the UI on the "Plugins" page and "Plugins" tab for a plugin on devices via the show_ui property:
++🔎Example +
+json +"show_ui": true,
If the data_source is set to script the CMD setting (that you specify in the settings array section in the config.json) contains an executable Linux command, that usually generates a last_result.<prefix>.log file (not required if you don't import any data into the app). The last_result.<prefix>.log file needs to be saved in /api/log/plugins.
Important
+A lot of the work is taken care of by the plugin_helper.py library. You don't need to manage the last_result.<prefix>.log file if using the helper objects. Check other script.py of other plugins for details.
The content of the last_result.<prefix>.log file needs to contain the columns as defined in the "Column order and values" section above. The order of columns can't be changed. After every scan it should contain only the results from the latest scan/execution.
last_result.<prefix>.log is a csv-like file with the pipe | as a separator. null. Valid CSV:
+
+https://www.google.com|null|2023-01-02 15:56:30|200|0.7898|null|null|null|null
+https://www.duckduckgo.com|192.168.0.1|2023-01-02 15:56:30|200|0.9898|null|null|Best search engine|ff:ee:ff:11:ff:11
+
+
+Invalid CSV with different errors on each line:
+
+https://www.google.com|null|2023-01-02 15:56:30|200|0.7898||null|null|null
+https://www.duckduckgo.com|null|2023-01-02 15:56:30|200|0.9898|null|null|Best search engine|
+|https://www.duckduckgo.com|null|2023-01-02 15:56:30|200|0.9898|null|null|Best search engine|null
+null|192.168.1.1|2023-01-02 15:56:30|200|0.9898|null|null|Best search engine
+https://www.duckduckgo.com|192.168.1.1|2023-01-02 15:56:30|null|0.9898|null|null|Best search engine
+https://www.google.com|null|2023-01-02 15:56:30|200|0.7898|||
+https://www.google.com|null|2023-01-02 15:56:30|200|0.7898|
+
+
+If the data_source is set to app-db-query, the CMD setting needs to contain a SQL query rendering the columns as defined in the "Column order and values" section above. The order of columns is important.
This SQL query is executed on the app.db SQLite database file.
++🔎Example
+SQL query example:
++
SQL +SELECT dv.devName as Object_PrimaryID, + cast(dv.devLastIP as VARCHAR(100)) || ':' || cast( SUBSTR(ns.Port ,0, INSTR(ns.Port , '/')) as VARCHAR(100)) as Object_SecondaryID, + datetime() as DateTime, + ns.Service as Watched_Value1, + ns.State as Watched_Value2, + 'null' as Watched_Value3, + 'null' as Watched_Value4, + ns.Extra as Extra, + dv.devMac as ForeignKey +FROM + (SELECT * FROM Nmap_Scan) ns +LEFT JOIN + (SELECT devName, devMac, devLastIP FROM Devices) dv +ON ns.MAC = dv.devMacRequired
+CMDsetting example with above query (you can set"type": "label"if you want it to make uneditable in the UI):+
json +{ + "function": "CMD", + "type": {"dataType":"string", "elements": [{"elementType" : "input", "elementOptions" : [] ,"transformers": []}]}, + "default_value":"SELECT dv.devName as Object_PrimaryID, cast(dv.devLastIP as VARCHAR(100)) || ':' || cast( SUBSTR(ns.Port ,0, INSTR(ns.Port , '/')) as VARCHAR(100)) as Object_SecondaryID, datetime() as DateTime, ns.Service as Watched_Value1, ns.State as Watched_Value2, 'null' as Watched_Value3, 'null' as Watched_Value4, ns.Extra as Extra FROM (SELECT * FROM Nmap_Scan) ns LEFT JOIN (SELECT devName, devMac, devLastIP FROM Devices) dv ON ns.MAC = dv.devMac", + "options": [], + "localized": ["name", "description"], + "name" : [{ + "language_code":"en_us", + "string" : "SQL to run" + }], + "description": [{ + "language_code":"en_us", + "string" : "This SQL query is used to populate the coresponding UI tables under the Plugins section." + }] + }
In most cases, it is used to initialize settings. Check the newdev_template plugin for details.
You can execute a SQL query on an external database connected to the current NetAlertX database via a temporary EXTERNAL_<unique prefix>. prefix.
For example for PIHOLE ("unique_prefix": "PIHOLE") it is EXTERNAL_PIHOLE.. The external SQLite database file has to be mapped in the container to the path specified in the DB_PATH setting:
++🔎Example
++
json + ... +{ + "function": "DB_PATH", + "type": {"dataType":"string", "elements": [{"elementType" : "input", "elementOptions" : [{"readonly": "true"}] ,"transformers": []}]}, + "default_value":"/etc/pihole/pihole-FTL.db", + "options": [], + "localized": ["name", "description"], + "name" : [{ + "language_code":"en_us", + "string" : "DB Path" + }], + "description": [{ + "language_code":"en_us", + "string" : "Required setting for the <code>sqlite-db-query</code> plugin type. Is used to mount an external SQLite database and execute the SQL query stored in the <code>CMD</code> setting." + }] + } + ...
The actual SQL query you want to execute is then stored as a CMD setting, similar to a Plugin of the app-db-query plugin type. The format has to adhere to the format outlined in the "Column order and values" section above.
++🔎Example
+Notice the
+EXTERNAL_PIHOLE.prefix.+
json +{ + "function": "CMD", + "type": {"dataType":"string", "elements": [{"elementType" : "input", "elementOptions" : [] ,"transformers": []}]}, + "default_value":"SELECT hwaddr as Object_PrimaryID, cast('http://' || (SELECT ip FROM EXTERNAL_PIHOLE.network_addresses WHERE network_id = id ORDER BY lastseen DESC, ip LIMIT 1) as VARCHAR(100)) || ':' || cast( SUBSTR((SELECT name FROM EXTERNAL_PIHOLE.network_addresses WHERE network_id = id ORDER BY lastseen DESC, ip LIMIT 1), 0, INSTR((SELECT name FROM EXTERNAL_PIHOLE.network_addresses WHERE network_id = id ORDER BY lastseen DESC, ip LIMIT 1), '/')) as VARCHAR(100)) as Object_SecondaryID, datetime() as DateTime, macVendor as Watched_Value1, lastQuery as Watched_Value2, (SELECT name FROM EXTERNAL_PIHOLE.network_addresses WHERE network_id = id ORDER BY lastseen DESC, ip LIMIT 1) as Watched_Value3, 'null' as Watched_Value4, '' as Extra, hwaddr as ForeignKey FROM EXTERNAL_PIHOLE.network WHERE hwaddr NOT LIKE 'ip-%' AND hwaddr <> '00:00:00:00:00:00'; ", + "options": [], + "localized": ["name", "description"], + "name" : [{ + "language_code":"en_us", + "string" : "SQL to run" + }], + "description": [{ + "language_code":"en_us", + "string" : "This SQL query is used to populate the coresponding UI tables under the Plugins section. This particular one selects data from a mapped PiHole SQLite database and maps it to the corresponding Plugin columns." + }] + }
Plugin entries can be filtered in the UI based on values entered into filter fields. The txtMacFilter textbox/field contains the Mac address of the currently viewed device, or simply a Mac address that's available in the mac query string (<url>?mac=aa:22:aa:22:aa:22:aa).
| Property | +Required | +Description | +
|---|---|---|
compare_column |
+yes | +Plugin column name that's value is used for comparison (Left side of the equation) | +
compare_operator |
+yes | +JavaScript comparison operator | +
compare_field_id |
+yes | +The id of a input text field containing a value is used for comparison (Right side of the equation) |
+
compare_js_template |
+yes | +JavaScript code used to convert left and right side of the equation. {value} is replaced with input values. |
+
compare_use_quotes |
+yes | +If true then the end result of the compare_js_template i swrapped in " quotes. Use to compare strings. |
+
Filters are only applied if a filter is specified, and the txtMacFilter is not undefined, or empty (--).
++🔎Example:
++
json + "data_filters": [ + { + "compare_column" : "Object_PrimaryID", + "compare_operator" : "==", + "compare_field_id": "txtMacFilter", + "compare_js_template": "'{value}'.toString()", + "compare_use_quotes": true + } + ],+
+- On the
+pluginsCore.phppage is an input field with the idtxtMacFilter:+
html +<input class="form-control" id="txtMacFilter" type="text" value="--">+
+- +
+This input field is initialized via the
+&mac=query string.- +
+The app then proceeds to use this Mac value from this field and compares it to the value of the
+Object_PrimaryIDdatabase field. Thecompare_operatoris==.- +
+Both values, from the database field
+Object_PrimaryIDand from thetxtMacFilterare wrapped and evaluated with thecompare_js_template, that is'{value}.toString()'.- +
++
compare_use_quotesis set totrueso'{value}'.toString()is wrappe dinto"quotes.- +
+This results in for example this code:
++
javascript + // left part of the expression coming from compare_column and right from the input field + // notice the added quotes ()") around the left and right part of teh expression + "eval('ac:82:ac:82:ac:82".toString()')" == "eval('ac:82:ac:82:ac:82".toString()')"+
Plugin results are always inserted into the standard Plugin_Objects database table. Optionally, NetAlertX can take the results of the plugin execution, and insert these results into an additional database table. This is enabled by with the property "mapped_to_table" in the config.json file. The mapping of the columns is defined in the database_column_definitions array.
Note
+If results are mapped to the CurrentScan table, the data is then included into the regular scan loop, so for example notification for devices are sent out.
++🔍 Example:
+For example, this approach is used to implement the
+DHCPLSSplugin. The script parses all supplied "dhcp.leases" files, gets the results in the generic table format outlined in the "Column order and values" section above, takes individual values, and inserts them into theCurrentScandatabase table in the NetAlertX database. All this is achieved by:+
+- Specifying the database table into which the results are inserted by defining
+"mapped_to_table": "CurrentScan"in the root of theconfig.jsonfile as shown below:+
json +{ + "code_name": "dhcp_leases", + "unique_prefix": "DHCPLSS", + ... + "data_source": "script", + "localized": ["display_name", "description", "icon"], + "mapped_to_table": "CurrentScan", + ... +}+2. Defining the target column with themapped_to_columnproperty for individual columns in thedatabase_column_definitionsarray of theconfig.jsonfile. For example in theDHCPLSSplugin, I needed to map the value of theObject_PrimaryIDcolumn returned by the plugin, to thecur_MACcolumn in the NetAlertX database tableCurrentScan. Notice the"mapped_to_column": "cur_MAC"key-value pair in the sample below.+
json +{ + "column": "Object_PrimaryID", + "mapped_to_column": "cur_MAC", + "css_classes": "col-sm-2", + "show": true, + "type": "device_mac", + "default_value":"", + "options": [], + "localized": ["name"], + "name":[{ + "language_code":"en_us", + "string" : "MAC address" + }] + }+
+- That's it. The app takes care of the rest. It loops thru the objects discovered by the plugin, takes the results line-by-line, and inserts them into the database table specified in
+"mapped_to_table". The columns are translated from the generic plugin columns to the target table columns via the"mapped_to_column"property in the column definitions.
Note
+You can create a column mapping with a default value via the mapped_to_column_data property. This means that the value of the given column will always be this value. That also means that the "column": "NameDoesntMatter" is not important as there is no database source column.
++🔍 Example:
++
json +{ + "column": "NameDoesntMatter", + "mapped_to_column": "cur_ScanMethod", + "mapped_to_column_data": { + "value": "DHCPLSS" + }, + "css_classes": "col-sm-2", + "show": true, + "type": "device_mac", + "default_value":"", + "options": [], + "localized": ["name"], + "name":[{ + "language_code":"en_us", + "string" : "MAC address" + }] + }
Important
+An esier way to access settings in scripts is the get_setting_value method.
+```python
+from helper import get_setting_value
... + NTFY_TOPIC = get_setting_value('NTFY_TOPIC') + ...
+```
+The params array in the config.json is used to enable the user to change the parameters of the executed script. For example, the user wants to monitor a specific URL.
++🔎 Example: +Passing user-defined settings to a command. Let's say, you want to have a script, that is called with a user-defined parameter called
+urls:+
bash +root@server# python3 /app/front/plugins/website_monitor/script.py urls=https://google.com,https://duck.com
function property set to a custom name, such as urls_to_check (this is not a reserved name from the section "Supported settings function values" below). urls in the params section of the config.json the following way (WEBMON_ is the plugin prefix automatically added to all the settings):{
+ "params" : [
+ {
+ "name" : "urls",
+ "type" : "setting",
+ "value" : "WEBMON_urls_to_check"
+ }]
+}
+
+CMD setting. Notice urls={urls} in the below json: {
+ "function": "CMD",
+ "type": {"dataType":"string", "elements": [{"elementType" : "input", "elementOptions" : [] ,"transformers": []}]},
+ "default_value":"python3 /app/front/plugins/website_monitor/script.py urls={urls}",
+ "options": [],
+ "localized": ["name", "description"],
+ "name" : [{
+ "language_code":"en_us",
+ "string" : "Command"
+ }],
+ "description": [{
+ "language_code":"en_us",
+ "string" : "Command to run"
+ }]
+ }
+
+During script execution, the app will take the command "python3 /app/front/plugins/website_monitor/script.py urls={urls}", take the {urls} wildcard and replace it with the value from the WEBMON_urls_to_check setting. This is because:
params entries"name" : "urls"urls params and finds "type" : "setting""value" : "WEBMON_urls_to_check" config.json this setting is identified by "function":"urls_to_check", not "function":"WEBMON_urls_to_check"WEBMON_urls_to_checkWEBMON_urls_to_check contains 2 values entered by the user: WEBMON_urls_to_check=['https://google.com','https://duck.com']WEBMON_urls_to_check and replaces the {urls} wildcard in the setting where "function":"CMD", so you go from:python3 /app/front/plugins/website_monitor/script.py urls={urls}python3 /app/front/plugins/website_monitor/script.py urls=https://google.com,https://duck.com Below are some general additional notes, when defining params:
"name":"name_value" - is used as a wildcard replacement in the CMD setting value by using curly brackets {name_value}. The wildcard is replaced by the result of the "value" : "param_value" and "type":"type_value" combo configuration below."type":"<sql|setting>" - is used to specify the type of the params, currently only 2 supported (sql,setting)."type":"sql" - will execute the SQL query specified in the value property. The sql query needs to return only one column. The column is flattened and separated by commas (,), e.g: SELECT devMac from DEVICES -> Internet,74:ac:74:ac:74:ac,44:44:74:ac:74:ac. This is then used to replace the wildcards in the CMD setting. "type":"setting" - The setting code name. A combination of the value from unique_prefix + _ + function value, or otherwise the code name you can find in the Settings page under the Setting display name, e.g. PIHOLE_RUN. "value": "param_value" - Needs to contain a setting code name or SQL query without wildcards."timeoutMultiplier" : true - used to indicate if the value should multiply the max timeout for the whole script run by the number of values in the given parameter."base64": true - use base64 encoding to pass the value to the script (e.g. if there are spaces)++🔎Example:
++
json +{ + "params" : [{ + "name" : "ips", + "type" : "sql", + "value" : "SELECT devLastIP from DEVICES", + "timeoutMultiplier" : true + }, + { + "name" : "macs", + "type" : "sql", + "value" : "SELECT devMac from DEVICES" + }, + { + "name" : "timeout", + "type" : "setting", + "value" : "NMAP_RUN_TIMEOUT" + }, + { + "name" : "args", + "type" : "setting", + "value" : "NMAP_ARGS", + "base64" : true + }] +}
Note
+The settings flow and when Plugin specific settings are applied is described under the Settings system.
+Required attributes are:
+| Property | +Description | +
|---|---|
"function" |
+Specifies the function the setting drives or a simple unique code name. See Supported settings function values for options. | +
"type" |
+Specifies the form control used for the setting displayed in the Settings page and what values are accepted. Supported options include: | +
| + | - {"dataType":"string", "elements": [{"elementType" : "input", "elementOptions" : [{"type":"password"}] ,"transformers": ["sha256"]}]} |
+
"localized" |
+A list of properties on the current JSON level that need to be localized. | +
"name" |
+Displayed on the Settings page. An array of localized strings. See Localized strings below. | +
"description" |
+Displayed on the Settings page. An array of localized strings. See Localized strings below. | +
(optional) "events" |
+Specifies whether to generate an execution button next to the input field of the setting. Supported values: | +
| + | - "test" - For notification plugins testing |
+
| + | - "run" - Regular plugins testing |
+
(optional) "override_value" |
+Used to determine a user-defined override for the setting. Useful for template-based plugins, where you can choose to leave the current value or override it with the value defined in the setting. (Work in progress) | +
(optional) "events" |
+Used to trigger the plugin. Usually used on the RUN setting. Not fully tested in all scenarios. Will show a play button next to the setting. After clicking, an event is generated for the backend in the Parameters database table to process the front-end event on the next run. |
+
This section outlines the structure and types of UI components, primarily used to build HTML forms or interactive elements dynamically. Each UI component has a "type" which defines its structure, behavior, and rendering options.
The UI component is defined as a JSON object containing a list of elements. Each element specifies how it should behave, with properties like elementType, elementOptions, and any associated transformers to modify the data. The example below demonstrates how a component with two elements (span and select) is structured:
{
+ "function": "devIcon",
+ "type": {
+ "dataType": "string",
+ "elements": [
+ {
+ "elementType": "span",
+ "elementOptions": [
+ { "cssClasses": "input-group-addon iconPreview" },
+ { "getStringKey": "Gen_SelectToPreview" },
+ { "customId": "NEWDEV_devIcon_preview" }
+ ],
+ "transformers": []
+ },
+ {
+ "elementType": "select",
+ "elementHasInputValue": 1,
+ "elementOptions": [
+ { "cssClasses": "col-xs-12" },
+ {
+ "onChange": "updateIconPreview(this)"
+ },
+ { "customParams": "NEWDEV_devIcon,NEWDEV_devIcon_preview" }
+ ],
+ "transformers": []
+ }
+ ]
+ }
+}
+
+
+The code snippet provided demonstrates how the elements are iterated over to generate their corresponding HTML. Depending on the elementType, different HTML tags (like <select>, <input>, <textarea>, <button>, etc.) are created with the respective attributes such as onChange, my-data-type, and class based on the provided elementOptions. Events can also be attached to elements like buttons or select inputs.
select: Renders a dropdown list. Additional options like isMultiSelect and event handlers (e.g., onChange) can be attached.input: Handles various types of input fields, including checkboxes, text, and others, with customizable attributes like readOnly, placeholder, etc.button: Generates clickable buttons with custom event handlers (onClick), icons, or labels.textarea: Creates a multi-line input box for text input.span: Used for inline text or content with customizable classes and data attributes.Each element may also have associated events (e.g., running a scan or triggering a notification) defined under Events.
function valuesYou can have any "function": "my_custom_name" custom name, however, the ones listed below have a specific functionality attached to them.
| Setting | +Description | +
|---|---|
RUN |
+(required) Specifies when the service is executed. | +
| + | Supported Options: | +
| + | - "disabled" - do not run | +
| + | - "once" - run on app start or on settings saved | +
| + | - "schedule" - if included, then a RUN_SCHD setting needs to be specified to determine the schedule |
+
| + | - "always_after_scan" - run always after a scan is finished | +
| + | - "before_name_updates" - run before device names are updated (for name discovery plugins) | +
| + | - "on_new_device" - run when a new device is detected | +
| + | - "before_config_save" - run before the config is marked as saved. Useful if your plugin needs to modify the app.conf file. |
+
RUN_SCHD |
+(required if you include "schedule" in the above RUN function) Cron-like scheduling is used if the RUN setting is set to schedule. |
+
CMD |
+(required) Specifies the command that should be executed. | +
API_SQL |
+(not implemented) Generates a table_ + code_name + .json file as per API docs. |
+
RUN_TIMEOUT |
+(optional) Specifies the maximum execution time of the script. If not specified, a default value of 10 seconds is used to prevent hanging. | +
WATCH |
+(optional) Specifies which database columns are watched for changes for this particular plugin. If not specified, no notifications are sent. | +
REPORT_ON |
+(optional) Specifies when to send a notification. Supported options are: | +
| + | - new means a new unique (unique combination of PrimaryId and SecondaryId) object was discovered. |
+
| + | - watched-changed - means that selected Watched_ValueN columns changed |
+
| + | - watched-not-changed - reports even on events where selected Watched_ValueN did not change |
+
| + | - missing-in-last-scan - if the object is missing compared to previous scans |
+
++🔎 Example:
++
json +{ + "function": "RUN", + "type": {"dataType":"string", "elements": [{"elementType" : "select", "elementOptions" : [] ,"transformers": []}]}, + "default_value":"disabled", + "options": ["disabled", "once", "schedule", "always_after_scan", "on_new_device"], + "localized": ["name", "description"], + "name" :[{ + "language_code":"en_us", + "string" : "When to run" + }], + "description": [{ + "language_code":"en_us", + "string" : "Enable a regular scan of your services. If you select <code>schedule</code> the scheduling settings from below are applied. If you select <code>once</code> the scan is run only once on start of the application (container) for the time specified in <a href=\"#WEBMON_RUN_TIMEOUT\"><code>WEBMON_RUN_TIMEOUT</code> setting</a>." + }] +}
"language_code":"<en_us|es_es|de_de>" - code name of the language string. Only these three are currently supported. At least the "language_code":"en_us" variant has to be defined. "string" - The string to be displayed in the given language.++🔎 Example:
+```json
++{ + "language_code":"en_us", + "string" : "When to run" +} +```
+
The UI will adjust how columns are displayed in the UI based on the resolvers definition of the database_column_definitions object. These are the supported form controls and related functionality:
"show": true and also with at least an English translation will be shown in the UI.| Supported Types | +Description | +
|---|---|
label |
+Displays a column only. | +
textarea_readonly |
+Generates a read only text area and cleans up the text to display it somewhat formatted with new lines preserved. | +
See below for information on threshold, replace. |
++ |
| + | + |
options Property |
+Used in conjunction with types like threshold, replace, regex. |
+
options_params Property |
+Used in conjunction with a "options": "[{value}]" template and text.select/list.select. Can specify SQL query (needs to return 2 columns SELECT devName as name, devMac as id) or Setting (not tested) to populate the dropdown. Check example below or have a look at the NEWDEV plugin config.json file. |
+
threshold |
+The options array contains objects ordered from the lowest maximum to the highest. The corresponding hexColor is used for the value background color if it's less than the specified maximum but more than the previous one in the options array. |
+
replace |
+The options array contains objects with an equals property, which is compared to the "value." If the values are the same, the string in replacement is displayed in the UI instead of the actual "value". |
+
regex |
+Applies a regex to the value. The options array contains objects with an type (must be set to regex) and param (must contain the regex itself) property. |
+
| + | + |
| Type Definitions | ++ |
device_mac |
+The value is considered to be a MAC address, and a link pointing to the device with the given MAC address is generated. | +
device_ip |
+The value is considered to be an IP address. A link pointing to the device with the given IP is generated. The IP is checked against the last detected IP address and translated into a MAC address, which is then used for the link itself. | +
device_name_mac |
+The value is considered to be a MAC address, and a link pointing to the device with the given MAC is generated. The link label is resolved as the target device name. | +
url |
+The value is considered to be a URL, so a link is generated. | +
textbox_save |
+Generates an editable and saveable text box that saves values in the database. Primarily intended for the UserData database column in the Plugins_Objects table. |
+
url_http_https |
+Generates two links with the https and http prefix as lock icons. |
+
eval |
+Evaluates as JavaScript. Use the variable value to use the given column value as input (e.g. '<b>${value}<b>' (replace ' with ` in your code) ) |
+
Note
+Supports chaining. You can chain multiple resolvers with .. For example regex.url_http_https. This will apply the regex resolver and then the url_http_https resolver.
"function": "devType",
+ "type": {"dataType":"string", "elements": [{"elementType" : "select", "elementOptions" : [] ,"transformers": []}]},
+ "maxLength": 30,
+ "default_value": "",
+ "options": ["{value}"],
+ "options_params" : [
+ {
+ "name" : "value",
+ "type" : "sql",
+ "value" : "SELECT '' as id, '' as name UNION SELECT devType as id, devType as name FROM (SELECT devType FROM Devices UNION SELECT 'Smartphone' UNION SELECT 'Tablet' UNION SELECT 'Laptop' UNION SELECT 'PC' UNION SELECT 'Printer' UNION SELECT 'Server' UNION SELECT 'NAS' UNION SELECT 'Domotic' UNION SELECT 'Game Console' UNION SELECT 'SmartTV' UNION SELECT 'Clock' UNION SELECT 'House Appliance' UNION SELECT 'Phone' UNION SELECT 'AP' UNION SELECT 'Gateway' UNION SELECT 'Firewall' UNION SELECT 'Switch' UNION SELECT 'WLAN' UNION SELECT 'Router' UNION SELECT 'Other') AS all_devices ORDER BY id;"
+ },
+ {
+ "name" : "uilang",
+ "type" : "setting",
+ "value" : "UI_LANG"
+ }
+ ]
+
+{
+ "column": "Watched_Value1",
+ "css_classes": "col-sm-2",
+ "show": true,
+ "type": "threshold",
+ "default_value":"",
+ "options": [
+ {
+ "maximum": 199,
+ "hexColor": "#792D86"
+ },
+ {
+ "maximum": 299,
+ "hexColor": "#5B862D"
+ },
+ {
+ "maximum": 399,
+ "hexColor": "#7D862D"
+ },
+ {
+ "maximum": 499,
+ "hexColor": "#BF6440"
+ },
+ {
+ "maximum": 599,
+ "hexColor": "#D33115"
+ }
+ ],
+ "localized": ["name"],
+ "name":[{
+ "language_code":"en_us",
+ "string" : "Status code"
+ }]
+ },
+ {
+ "column": "Status",
+ "show": true,
+ "type": "replace",
+ "default_value":"",
+ "options": [
+ {
+ "equals": "watched-not-changed",
+ "replacement": "<i class='fa-solid fa-square-check'></i>"
+ },
+ {
+ "equals": "watched-changed",
+ "replacement": "<i class='fa-solid fa-triangle-exclamation'></i>"
+ },
+ {
+ "equals": "new",
+ "replacement": "<i class='fa-solid fa-circle-plus'></i>"
+ }
+ ],
+ "localized": ["name"],
+ "name":[{
+ "language_code":"en_us",
+ "string" : "Status"
+ }]
+ },
+ {
+ "column": "Watched_Value3",
+ "css_classes": "col-sm-1",
+ "show": true,
+ "type": "regex.url_http_https",
+ "default_value":"",
+ "options": [
+ {
+ "type": "regex",
+ "param": "([\\d.:]+)"
+ }
+ ],
+ "localized": ["name"],
+ "name":[{
+ "language_code":"en_us",
+ "string" : "HTTP/s links"
+ },
+ {
+ "language_code":"es_es",
+ "string" : "N/A"
+ }]
+ }
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Plugins provide data to the NetAlertX core, which processes it to detect changes, discover new devices, raise alerts, and apply heuristics.
+CurrentScan table.CurrentScan table is evaluated to detect new devices, changes, and triggers.Although plugins run independently, they contribute to the shared CurrentScan table.
+To inspect its contents, set LOG_LEVEL=trace and check for the log section:
================ CurrentScan table content ================
+
+config.json LifecycleThis section outlines how each plugin’s config.json manifest is read, validated, and used by the core and plugins.
+It also describes plugin output expectations and the main plugin categories.
Tip
+For detailed schema and examples, see the Plugin Development Guide.
+config.json for each plugin.RUN).mapped_to_table, database_column_definitions) are parsed to define how data integrates with the main app.Plugins may run:
+On a fixed schedule.
+interval.|).Plugins_* table.Data can be mapped into other tables (e.g., Devices, CurrentScan) as defined by:
database_column_definitions
mapped_to_tableExample: Object_PrimaryID → devMAC
All plugins must follow the Plugin Interface Contract defined in PLUGINS_DEV.md.
+Output values are pipe-delimited in a fixed order.
Object_PrimaryID and Object_SecondaryID uniquely identify records (for example, MAC|IP).Watched_Value1–4)Extra)Helper_Value1–3)Object_PrimaryID + Object_SecondaryID.Plugins fall into several functional categories depending on their purpose and expected outputs.
+MAC and IP for new or updated device records in Devices.CurrentScan.ARPSCAN, NMAPDEV.MAC, IP).NMAP, MQTT.MAC, IP, hostname`).devName and devFQDN.NBTSCAN, NSLOOKUP.INTRSPD, custom monitoring scripts.MAINT, CSVBCKP.After persistence:
+The lifecycle of a plugin configuration is:
+Load → Validate → Prepare → Execute → Parse → Map → Persist → Post-process
+Each plugin must:
+config.json.
+
+
+
+ Some operating systems incorporate randomize MAC addresses to improve privacy.
+This functionality allows you to hide the real MAC of the device and assign a random MAC when we connect to WIFI networks.
+This behavior is especially useful when connecting to WIFI's that we do not know, but it is totally useless when connecting to our own WIFI's or known networks.
+I recommend disabling this on-device functionality when connecting our devices to our own WIFI's, this way, NetAlertX will be able to identify the device, and it will not identify it as a new device every so often (every time iOS or Android randomizes the MAC).
+Random MACs are recognized by the characters "2", "6", "A", or "E" as the 2nd character in the Mac address. You can disable specific prefixes to be detected as random MAC addresses by specifying the UI_NOT_RANDOM_MAC setting.



+
+
+
+ By design, local network scanners such as arp-scan use ARP (Address Resolution Protocol) to map IP addresses to MAC addresses on the local network. Since ARP operates at Layer 2 (Data Link Layer), it typically works only within a single broadcast domain, usually limited to a single router or network segment.
Note
+Ping and ARPSCAN use different protocols so even if you can ping devices it doesn't mean ARPSCAN can detect them.
To scan multiple locally accessible network segments, add them as subnets according to the subnets documentation. If ARPSCAN is not suitable for your setup, read on.
The following network setups might make some devices undetectable with ARPSCAN. Check the specific setup to understand the cause and find potential workarounds to report on these devices.
Wi-Fi extenders typically create a separate network or subnet, which can prevent network scanning tools like arp-scan from detecting devices behind the extender.
++Possible workaround: Scan the specific subnet that the extender uses, if it is separate from the main network.
+
ARP operates at Layer 2 (Data Link Layer) and works only within a local area network (LAN). VPNs, which operate at Layer 3 (Network Layer), route traffic between networks, preventing ARP requests from discovering devices outside the local network.
+VPNs use virtual interfaces (e.g., tun0, tap0) to encapsulate traffic, bypassing ARP-based discovery. Additionally, many VPNs use NAT, which masks individual devices behind a shared IP address.
++Possible workaround: Configure the VPN to bridge networks instead of routing to enable ARP, though this depends on the VPN setup and security requirements.
+
The following workarounds should work for most complex network setups.
+You can use supplementary plugins that employ alternate methods. Protocols used by the SNMPDSC or DHCPLSS plugins are widely supported on different routers and can be effective as workarounds. Check the plugins list to find a plugin that works with your router and network setup.
If you have servers in different networks, you can set up separate NetAlertX instances on those subnets and synchronize the results into one instance using the SYNC plugin.
If you don't need to discover new devices and only need to report on their status (online, offline, down), you can manually enter devices and check their status using the ICMP plugin, which uses the ping command internally.
For more information on how to add devices manually (or dummy devices), refer to the Device Management documentation.
+To create truly dummy devices, you can use a loopback IP address (e.g., 0.0.0.0 or 127.0.0.1) so they appear online.
Scanning remote networks with NMAP is possible (via the NMAPDEV plugin), but since it cannot retrieve the MAC address, you need to enable the NMAPDEV_FAKE_MAC setting. This will generate a fake MAC address based on the IP address, allowing you to track devices. However, this can lead to inconsistencies, especially if the IP address changes or a previously logged device is rediscovered. If this setting is disabled, only the IP address will be discovered, and devices with missing MAC addresses will be skipped.
Check the NMAPDEV plugin for details
+ + + + + + + + + + + + + +
+
+
+
+ If you are running a DNS server, such as AdGuard, set up Private reverse DNS servers for a better name resolution on your network. Enabling this setting will enable NetAlertX to execute dig and nslookup commands to automatically resolve device names based on their IP addresses.
+Tip
+Before proceeding, ensure that name resolution plugins are enabled.
+You can customize how names are cleaned using the NEWDEV_NAME_CLEANUP_REGEX setting.
+To auto-update Fully Qualified Domain Names (FQDN), enable the REFRESH_FQDN setting.
++Example 1: Reverse DNS
+disabled+
jokob@Synology-NAS:/$ nslookup 192.168.1.58 +** server can't find 58.1.168.192.in-addr.arpa: NXDOMAINExample 2: Reverse DNS
+enabled+
jokob@Synology-NAS:/$ nslookup 192.168.1.58 +45.1.168.192.in-addr.arpa name = jokob-NUC.localdomain.
192.168.1.1You can specify the DNS server in the docker-compose to improve name resolution on your network.
+services:
+ netalertx:
+ container_name: netalertx
+ image: "ghcr.io/jokob-sk/netalertx:latest"
+...
+ dns: # specifying the DNS servers used for the container
+ - 10.8.0.1
+ - 10.8.0.17
+
+You can configure a custom /etc/resolv.conf file in docker-compose.yml and set the nameserver to your LAN DNS server (e.g.: Pi-Hole). See the relevant resolv.conf man entry for details.
+services:
+ netalertx:
+ container_name: netalertx
+ volumes:
+...
+ - /local_data_dir/config/resolv.conf:/etc/resolv.conf # ⚠ Mapping the /resolv.conf file for better name resolution
+...
+
+The most important below is the nameserver entry (you can add multiple):
nameserver 192.168.178.11
+options edns0 trust-ad
+search example.com
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ ++Submitted by amazing cvc90 🙏
+
Note
+There are various NGINX config files for NetAlertX, some for the bare-metal install, currently Debian 12 and Ubuntu 24 (netalertx.conf), and one for the docker container (netalertx.template.conf).
The first one you can find in the respective bare metal installer folder /app/install/\<system\>/netalertx.conf.
+The docker one can be found in the install folder. Map, or use, the one appropriate for your setup.
On your NGINX server, create a new file called /etc/nginx/sites-available/netalertx
+In this file, paste the following code:
+ server {
+ listen 80;
+ server_name netalertx;
+ proxy_preserve_host on;
+ proxy_pass http://localhost:20211/;
+ proxy_pass_reverse http://localhost:20211/;
+ }
+
+nginx -s reload or systemctl restart nginx
Check your config with nginx -t. If there are any issues, it will tell you.
Once NGINX restarts, you should be able to access the proxy website at http://netalertx/
+On your NGINX server, create a new file called /etc/nginx/sites-available/netalertx
+In this file, paste the following code:
+ server {
+ listen 80;
+ server_name netalertx;
+ proxy_preserve_host on;
+ location ^~ /netalertx/ {
+ proxy_pass http://localhost:20211/;
+ proxy_pass_reverse http://localhost:20211/;
+ proxy_redirect ~^/(.*)$ /netalertx/$1;
+ rewrite ^/netalertx/?(.*)$ /$1 break;
+ }
+ }
+
+Check your config with nginx -t. If there are any issues, it will tell you.
Activate the new website by running the following command:
+nginx -s reload or systemctl restart nginx
On your NGINX server, create a new file called /etc/nginx/sites-available/netalertx
+In this file, paste the following code:
+ server {
+ listen 80;
+ server_name netalertx;
+ proxy_preserve_host on;
+ location ^~ /netalertx/ {
+ proxy_pass http://localhost:20211/;
+ proxy_pass_reverse http://localhost:20211/;
+ proxy_redirect ~^/(.*)$ /netalertx/$1;
+ rewrite ^/netalertx/?(.*)$ /$1 break;
+ sub_filter_once off;
+ sub_filter_types *;
+ sub_filter 'href="/' 'href="/netalertx/';
+ sub_filter '(?>$host)/css' '/netalertx/css';
+ sub_filter '(?>$host)/js' '/netalertx/js';
+ sub_filter '/img' '/netalertx/img';
+ sub_filter '/lib' '/netalertx/lib';
+ sub_filter '/php' '/netalertx/php';
+ }
+ }
+
+Check your config with nginx -t. If there are any issues, it will tell you.
Activate the new website by running the following command:
+nginx -s reload or systemctl restart nginx
NGINX HTTPS Configuration (Direct Path)
+On your NGINX server, create a new file called /etc/nginx/sites-available/netalertx
+In this file, paste the following code:
+ server {
+ listen 443;
+ server_name netalertx;
+ SSLEngine On;
+ SSLCertificateFile /etc/ssl/certs/netalertx.pem;
+ SSLCertificateKeyFile /etc/ssl/private/netalertx.key;
+ proxy_preserve_host on;
+ proxy_pass http://localhost:20211/;
+ proxy_pass_reverse http://localhost:20211/;
+ }
+
+Check your config with nginx -t. If there are any issues, it will tell you.
Activate the new website by running the following command:
+nginx -s reload or systemctl restart nginx
NGINX HTTPS Configuration (Sub Path)
+On your NGINX server, create a new file called /etc/nginx/sites-available/netalertx
+In this file, paste the following code:
+ server {
+ listen 443;
+ server_name netalertx;
+ SSLEngine On;
+ SSLCertificateFile /etc/ssl/certs/netalertx.pem;
+ SSLCertificateKeyFile /etc/ssl/private/netalertx.key;
+ location ^~ /netalertx/ {
+ proxy_pass http://localhost:20211/;
+ proxy_pass_reverse http://localhost:20211/;
+ proxy_redirect ~^/(.*)$ /netalertx/$1;
+ rewrite ^/netalertx/?(.*)$ /$1 break;
+ }
+ }
+
+Check your config with nginx -t. If there are any issues, it will tell you.
Activate the new website by running the following command:
+nginx -s reload or systemctl restart nginx
On your NGINX server, create a new file called /etc/nginx/sites-available/netalertx
+In this file, paste the following code:
+ server {
+ listen 443;
+ server_name netalertx;
+ SSLEngine On;
+ SSLCertificateFile /etc/ssl/certs/netalertx.pem;
+ SSLCertificateKeyFile /etc/ssl/private/netalertx.key;
+ location ^~ /netalertx/ {
+ proxy_pass http://localhost:20211/;
+ proxy_pass_reverse http://localhost:20211/;
+ proxy_redirect ~^/(.*)$ /netalertx/$1;
+ rewrite ^/netalertx/?(.*)$ /$1 break;
+ sub_filter_once off;
+ sub_filter_types *;
+ sub_filter 'href="/' 'href="/netalertx/';
+ sub_filter '(?>$host)/css' '/netalertx/css';
+ sub_filter '(?>$host)/js' '/netalertx/js';
+ sub_filter '/img' '/netalertx/img';
+ sub_filter '/lib' '/netalertx/lib';
+ sub_filter '/php' '/netalertx/php';
+ }
+ }
+
+Check your config with nginx -t. If there are any issues, it will tell you.
Activate the new website by running the following command:
+nginx -s reload or systemctl restart nginx
On your Apache server, create a new file called /etc/apache2/sites-available/netalertx.conf.
+In this file, paste the following code:
+ <VirtualHost *:80>
+ ServerName netalertx
+ ProxyPreserveHost On
+ ProxyPass / http://localhost:20211/
+ ProxyPassReverse / http://localhost:20211/
+ </VirtualHost>
+
+Check your config with httpd -t (or apache2ctl -t on Debian/Ubuntu). If there are any issues, it will tell you.
Activate the new website by running the following command:
+a2ensite netalertx or service apache2 reload
On your Apache server, create a new file called /etc/apache2/sites-available/netalertx.conf.
+In this file, paste the following code:
+ <VirtualHost *:80>
+ ServerName netalertx
+ location ^~ /netalertx/ {
+ ProxyPreserveHost On
+ ProxyPass / http://localhost:20211/
+ ProxyPassReverse / http://localhost:20211/
+ }
+ </VirtualHost>
+
+Check your config with httpd -t (or apache2ctl -t on Debian/Ubuntu). If there are any issues, it will tell you.
Activate the new website by running the following command:
+a2ensite netalertx or service apache2 reload
On your Apache server, create a new file called /etc/apache2/sites-available/netalertx.conf.
+In this file, paste the following code:
+ <VirtualHost *:443>
+ ServerName netalertx
+ SSLEngine On
+ SSLCertificateFile /etc/ssl/certs/netalertx.pem
+ SSLCertificateKeyFile /etc/ssl/private/netalertx.key
+ ProxyPreserveHost On
+ ProxyPass / http://localhost:20211/
+ ProxyPassReverse / http://localhost:20211/
+ </VirtualHost>
+
+Check your config with httpd -t (or apache2ctl -t on Debian/Ubuntu). If there are any issues, it will tell you.
Activate the new website by running the following command:
+a2ensite netalertx or service apache2 reload
Once Apache restarts, you should be able to access the proxy website at https://netalertx/
+On your Apache server, create a new file called /etc/apache2/sites-available/netalertx.conf.
+In this file, paste the following code:
+ <VirtualHost *:443>
+ ServerName netalertx
+ SSLEngine On
+ SSLCertificateFile /etc/ssl/certs/netalertx.pem
+ SSLCertificateKeyFile /etc/ssl/private/netalertx.key
+ location ^~ /netalertx/ {
+ ProxyPreserveHost On
+ ProxyPass / http://localhost:20211/
+ ProxyPassReverse / http://localhost:20211/
+ }
+ </VirtualHost>
+
+Check your config with httpd -t (or apache2ctl -t on Debian/Ubuntu). If there are any issues, it will tell you.
Activate the new website by running the following command:
+a2ensite netalertx or service apache2 reload
++Submitted by s33d1ing. 🙏
+
In the SWAG container create /config/nginx/proxy-confs/netalertx.subfolder.conf with the following contents:
## Version 2023/02/05
+# make sure that your netalertx container is named netalertx
+# netalertx does not require a base url setting
+
+# Since NetAlertX uses a Host network, you may need to use the IP address of the system running NetAlertX for $upstream_app.
+
+location /netalertx {
+ return 301 $scheme://$host/netalertx/;
+}
+
+location ^~ /netalertx/ {
+ # enable the next two lines for http auth
+ #auth_basic "Restricted";
+ #auth_basic_user_file /config/nginx/.htpasswd;
+
+ # enable for ldap auth (requires ldap-server.conf in the server block)
+ #include /config/nginx/ldap-location.conf;
+
+ # enable for Authelia (requires authelia-server.conf in the server block)
+ #include /config/nginx/authelia-location.conf;
+
+ # enable for Authentik (requires authentik-server.conf in the server block)
+ #include /config/nginx/authentik-location.conf;
+
+ include /config/nginx/proxy.conf;
+ include /config/nginx/resolver.conf;
+
+ set $upstream_app netalertx;
+ set $upstream_port 20211;
+ set $upstream_proto http;
+
+ proxy_pass $upstream_proto://$upstream_app:$upstream_port;
+ proxy_set_header Accept-Encoding "";
+
+ proxy_redirect ~^/(.*)$ /netalertx/$1;
+ rewrite ^/netalertx/?(.*)$ /$1 break;
+
+ sub_filter_once off;
+ sub_filter_types *;
+
+ sub_filter 'href="/' 'href="/netalertx/';
+
+ sub_filter '(?>$host)/css' '/netalertx/css';
+ sub_filter '(?>$host)/js' '/netalertx/js';
+
+ sub_filter '/img' '/netalertx/img';
+ sub_filter '/lib' '/netalertx/lib';
+ sub_filter '/php' '/netalertx/php';
+}
+
+++Submitted by Isegrimm 🙏 (based on this discussion)
+
Assuming the user already has a working Traefik setup, this is what's needed to make NetAlertX work at a URL like www.domain.com/netalertx/.
+Note: Everything in these configs assumes 'www.domain.com' as your domainname and 'section31' as an arbitrary name for your certificate setup. You will have to substitute these with your own.
+Also, I use the prefix 'netalertx'. If you want to use another prefix, change it in these files: dynamic.toml and default.
+Content of my yaml-file (this is the generic Traefik config, which defines which ports to listen on, redirect http to https and sets up the certificate process). +It also contains Authelia, which I use for authentication. +This part contains nothing specific to NetAlertX.
+version: '3.8'
+
+services:
+ traefik:
+ image: traefik
+ container_name: traefik
+ command:
+ - "--api=true"
+ - "--api.insecure=true"
+ - "--api.dashboard=true"
+ - "--entrypoints.web.address=:80"
+ - "--entrypoints.web.http.redirections.entryPoint.to=websecure"
+ - "--entrypoints.web.http.redirections.entryPoint.scheme=https"
+ - "--entrypoints.websecure.address=:443"
+ - "--providers.file.filename=/traefik-config/dynamic.toml"
+ - "--providers.file.watch=true"
+ - "--log.level=ERROR"
+ - "--certificatesresolvers.section31.acme.email=postmaster@domain.com"
+ - "--certificatesresolvers.section31.acme.storage=/traefik-config/acme.json"
+ - "--certificatesresolvers.section31.acme.httpchallenge=true"
+ - "--certificatesresolvers.section31.acme.httpchallenge.entrypoint=web"
+ ports:
+ - "80:80"
+ - "443:443"
+ - "8080:8080"
+ volumes:
+ - "/var/run/docker.sock:/var/run/docker.sock:ro"
+ - /appl/docker/traefik/config:/traefik-config
+ depends_on:
+ - authelia
+ restart: unless-stopped
+ authelia:
+ container_name: authelia
+ image: authelia/authelia:latest
+ ports:
+ - "9091:9091"
+ volumes:
+ - /appl/docker/authelia:/config
+ restart: u
+ nless-stopped
+
+Snippet of the dynamic.toml file (referenced in the yml-file above) that defines the config for NetAlertX: +The following are self-defined keywords, everything else is traefik keywords: +- netalertx-router +- netalertx-service +- auth +- netalertx-stripprefix
+[http.routers]
+ [http.routers.netalertx-router]
+ entryPoints = ["websecure"]
+ rule = "Host(`www.domain.com`) && PathPrefix(`/netalertx`)"
+ service = "netalertx-service"
+ middlewares = "auth,netalertx-stripprefix"
+ [http.routers.netalertx-router.tls]
+ certResolver = "section31"
+ [[http.routers.netalertx-router.tls.domains]]
+ main = "www.domain.com"
+
+[http.services]
+ [http.services.netalertx-service]
+ [[http.services.netalertx-service.loadBalancer.servers]]
+ url = "http://internal-ip-address:20211/"
+
+[http.middlewares]
+ [http.middlewares.auth.forwardAuth]
+ address = "http://authelia:9091/api/verify?rd=https://www.domain.com/authelia/"
+ trustForwardHeader = true
+ authResponseHeaders = ["Remote-User", "Remote-Groups", "Remote-Name", "Remote-Email"]
+ [http.middlewares.netalertx-stripprefix.stripprefix]
+ prefixes = "/netalertx"
+ forceSlash = false
+
+
+To make NetAlertX work with this setup I modified the default file at /etc/nginx/sites-available/default in the docker container by copying it to my local filesystem, adding the changes as specified by cvc90 and mounting the new file into the docker container, overwriting the original one. By mapping the file instead of changing the file in-place, the changes persist if an updated dockerimage is pulled. This is also a downside when the default file is updated, so I only use this as a temporary solution, until the dockerimage is updated with this change.
Default-file:
+server {
+ listen 80 default_server;
+ root /var/www/html;
+ index index.php;
+ #rewrite /netalertx/(.*) / permanent;
+ add_header X-Forwarded-Prefix "/netalertx" always;
+ proxy_set_header X-Forwarded-Prefix "/netalertx";
+
+ location ~* \.php$ {
+ fastcgi_pass unix:/run/php/php8.2-fpm.sock;
+ include fastcgi_params;
+ fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
+ fastcgi_param SCRIPT_NAME $fastcgi_script_name;
+ fastcgi_connect_timeout 75;
+ fastcgi_send_timeout 600;
+ fastcgi_read_timeout 600;
+ }
+}
+
+Mapping the updated file (on the local filesystem at /appl/docker/netalertx/default) into the docker container:
...
+ volumes:
+ - /appl/docker/netalertx/default:/etc/nginx/sites-available/default
+...
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ NetAlertX provides powerful tools for network scanning, presence detection, and automation. However, it is up to you—the deployer—to ensure that your instance is properly secured.
+This includes (but is not limited to): +- Controlling who has access to the UI and API +- Following network and container security best practices +- Running NetAlertX only on networks where you have legal authorization +- Keeping your deployment up to date with the latest patches
+++NetAlertX is not responsible for misuse, misconfiguration, or unsecure deployments. Always test and secure your setup before exposing it to the outside world.
+
NetAlertX is a powerful network scanning and automation framework. With that power comes responsibility. It is your responsibility to secure your deployment, especially if you're running it outside a trusted local environment.
+NetAlertX is designed to be run on private LANs, not the open internet.
+Recommended: Use a VPN to access NetAlertX from remote locations.
+Tailscale sets up a private mesh network between your devices. It's fast to configure and ideal for NetAlertX.
+👉 Get started with Tailscale
By default, NetAlertX does not require login. Before exposing the UI in any way:
+Enable password protection:
+ ini
+ SETPWD_enable_password=true
+ SETPWD_password=your_secure_password
Passwords are stored as SHA256 hashes
+Default password (if not changed): 123456 — change it ASAP!
+++To disable authenticated login, set
+SETPWD_enable_password=falseinapp.conf
Firewall / Network Rules
+ Restrict UI/API access to trusted IPs only.
Limit Docker Capabilities
+ Avoid --privileged. Use --cap-add=NET_RAW and others only if required by your scan method.
Keep NetAlertX Updated
+ Regular updates contain bug fixes and security patches.
Plugin Permissions
+ Disable unused plugins. Only install from trusted sources.
Use Read-Only API Keys
+ When integrating NetAlertX with other tools, scope keys tightly.
read-only mount options where possible (:ro)root unless absolutely necessarydocker scan or other container image vulnerability scanners--network host only on trusted networks and only if needed for ARP-based scansIf you discover a vulnerability or security concern, please report it privately to:
+ +We take security seriously and will work to patch confirmed issues promptly. Your help in responsible disclosure is appreciated!
+By following these recommendations, you can ensure your NetAlertX deployment is both powerful and secure.
+ + + + + + + + + + + + + +
+
+
+
+ Your network security monitor has the "keys to the kingdom," making it a prime target for attackers. If it gets compromised, the game is over.
+NetAlertX is engineered from the ground up to prevent this. It's not just an app; it's a purpose-built security appliance. Its core design is built on a zero-trust philosophy, which is a modern way of saying we assume a breach will happen and plan for it. This isn't a single "lock on the door"; it's a "defense-in-depth" strategy, more like a medieval castle with a moat, high walls, and guards at every door.
+Here’s a breakdown of the defensive layers you get, right out of the box using the default configuration.
+Methodology: The core application and its system files are treated as immutable. Once built, the app's code is "set in concrete," preventing attackers from modifying it or planting malware.
+Immutable Filesystem: At runtime, the container's entire filesystem is set to read_only: true. The application code, system libraries, and all other files are literally frozen. This single control neutralizes a massive range of common attacks.
"Ownership-as-a-Lock" Pattern: During the build, all system files are assigned to a special readonly user. This user has no login shell and no power to write to any files, even its own. It’s a clever, defense-in-depth locking mechanism.
Data Segregation: All user-specific data (like configurations and the device database) is stored completely outside the container in Docker volumes. The application is disposable; the data is persistent.
+What's this mean to you: Even if an attacker gets in, they cannot modify the application code or plant malware. It's like the app is set in digital concrete.
+Methodology: The principle of least privilege is strictly enforced. Every process gets only the absolute minimum set of permissions needed for its specific job.
+Non-Privileged Execution: The entire NetAlertX stack runs as a dedicated, low-power, non-root user (netalertx). No "god mode" privileges are available to the application.
Kernel-Level Capability Revocation: The container is launched with cap_drop: - ALL, which tells the Linux kernel to revoke all "root-like" special powers.
Binary-Specific Privileges (setcap): This is the "keycard" metaphor in action. After revoking all powers, the system uses setcap to grant specific, necessary permissions only to the binaries that absolutely require them (like nmap and arp-scan). This means that even if an attacker compromises the web server, they can't start scanning the network. The web server's "keycard" doesn't open the "scanning" door.
What's this mean to you: A security breach is firewalled. An attacker who gets into the web UI does not have the "keycard" to start scanning your network or take over the system. The breach is contained.
+Methodology: The potential attack surface is aggressively minimized by removing every non-essential tool an attacker would want to use.
+Package Manager Removal: The hardened build stage explicitly deletes the Alpine package manager (apk del apk-tools). This makes it impossible for an attacker to simply apk add their malicious toolkit.
sudo Neutralization: All sudo configurations are removed, and the /usr/bin/sudo command is replaced with a non-functional shim. Any attempt to escalate privileges this way will fail.
Build Toolchain Elimination: The Dockerfile uses a multi-stage build. The initial "builder" stage, which contains all the powerful compilers (gcc) and development tools, is completely discarded. The final production image is lean and contains no build tools.
Minimal User & Group Files: The hardened stage scrubs the system's passwd and group files, removing all default system users to minimize potential avenues for privilege escalation.
What's this mean to you: An attacker who breaks in finds themselves in an empty room with no tools. They have no sudo to get more power, no package manager to download weapons, and no compilers to build new ones.
Methodology: All writable locations are treated as untrusted, temporary, and non-executable by default.
+In-Memory Volatile Storage: The docker-compose.yml configuration maps all temporary directories (e.g., /tmp/log, /tmp/api, /tmp) to in-memory tmpfs filesystems. They do not exist on the host's disk.
Volatile Data: Because these locations exist only in RAM, their contents are instantly and irrevocably erased when the container is stopped. This provides a "self-cleaning" mechanism that purges any attacker-dropped files or payloads on every single restart.
+Secure Mount Flags: These in-memory mounts are configured with the noexec flag. This is a critical security control: it prohibits the execution of any binary or script from a location that is writable.
What's this mean to you: Any malicious file an attacker does manage to drop is written in invisible, non-permanent ink. The file is written to RAM, not disk, so it vaporizes the instant you restart the container. Even worse for them, the noexec flag means they can't even run the file in the first place.
Methodology: The container is constrained by resource limits to function as a "good citizen" on the host system. This prevents a compromised or runaway process from consuming excessive resources, a common vector for Denial of Service (DoS) attacks.
+Process Limiting: The docker-compose.yml defines a pids_limit: 512. This directly mitigates "fork bomb" attacks, where a process attempts to crash the host by recursively spawning thousands of new processes.
Memory & CPU Limits: The configuration file defines strict resource limits to prevent any single process from exhausting the host's available system resources.
+What's this mean to you: NetAlertX is a "good neighbor" and can't be used to crash your host machine. Even if a process is compromised, it's in a digital straitjacket and cannot pull a "denial of service" attack by hogging all your CPU or memory.
+Methodology: Before any services start, NetAlertX runs a comprehensive "pre-flight" check to ensure its own security and configuration are sound. It's like a built-in auditor who verifies its own defenses.
+Active Self-Diagnosis: On every single boot, NetAlertX runs a series of startup pre-checks—and it's fast. The entire self-check process typically completes in less than a second, letting you get to the web UI in about three seconds from startup.
+Validates Its Own Security: These checks actively inspect the other security features. For example, check-0-permissions.sh validates that all the "Digital Concrete" files are locked down and all the "Self-Cleaning" areas are writable, just as they should be. It also checks that the correct netalertx user is running the show, not root.
Catches Misconfigurations: This system acts as a "safety inspector" that catches misconfigurations before they can become security holes. If you've made a mistake in your configuration (like a bad folder permission or incorrect network mode), NetAlertX will tell you in the logs why it can't start, rather than just failing silently.
+What's this mean to you: The system is self-aware and checks its own work. You get instant feedback if a setting is wrong, and you get peace of mind on every single boot knowing all these security layers are active and verified, all in about one second.
+No single security control is a silver bullet. The robust security posture of NetAlertX is achieved through defense in depth, layering these methodologies.
+An adversary must not only gain initial access but must also find a way to write a payload to a non-executable, in-memory location, without access to any standard system tools, sudo, or a package manager. And they must do this while operating as an unprivileged user in a resource-limited environment where the application code is immutable and actively checks its own integrity on every boot.
+
+
+
+ The Sessions Section shows a device’s connection history. All data is automatically detected and cannot be edited.
+
| Field | +Description | +Editable? | +
|---|---|---|
| First Connection | +The first time the device was detected on the network. | +❌ Auto-detected | +
| Last Connection | +The most recent time the device was online. | +❌ Auto-detected | +
Captured details include:
+Connection type (wired or wireless)
+Triggers: + Devices are flagged when session data is incomplete, inconsistent, or conflicting. Examples include:
+Missing first or last connection timestamps
+Sessions showing a device as connected and disconnected at the same time
+System response:
+Automatically highlights affected devices in the Sessions Section.
+Attempts to infer missing information from available data, such as:
+User impact:
+Users do not need to manually fix session data.
+This session information feeds directly into Monitoring → Presence, providing a live view of which devices are currently online.
+
+
+
+
+ This is an explanation how settings are handled intended for anyone thinking about writing their own plugin or contributing to the project.
+If you are a user of the app, settings have a detailed description in the Settings section of the app. Open an issue if you'd like to clarify any of the settings.
+The source of truth for user-defined values is the app.conf file. Editing the file makes the App overwrite values in the Settings database table and in the table_settings.json file.
The Settings database table contains settings for App run purposes. The table is recreated every time the App restarts. The settings are loaded from the source-of-truth, that is the app.conf file. A high-level overview on the database structure can be found in the database documentation.
This is the API endpoint that reflects the state of the Settings database table. Settings can be accessed with the:
getSetting(key) JavaScript methodThe json file is also cached on the client-side local storage of the browser.
+Note
+This is the source of truth for settings. User-defined values in this files always override default values specified in the Plugin definition.
+The App generates two app.conf entries for every setting (Since version 23.8+). One entry is the setting value, the second is the __metadata associated with the setting. This __metadata entry contains the full setting definition in JSON format. Currently unused, but intended to be used in future to extend the Settings system.
Note
+This is the preferred way adding settings going forward. I'll be likely migrating all app settings into plugin-based settings.
+Plugin settings are loaded dynamically from the config.json of individual plugins. If a setting isn't defined in the app.conf file, it is initialized via the default_value property of a setting from the config.json file. Check the Plugins documentation, section ⚙ Setting object structure for details on the structure of the setting.

The process flow is mostly managed by the initialise.py file.
+The script is responsible for reading user-defined values from a configuration file (app.conf), initializing settings, and importing them into a database. It also handles plugins and their configurations.
Here's a high-level description of the code:
+ccd: This function is used to handle user-defined settings and configurations. It takes several parameters related to the setting's name, default value, input type, options, group, and more. It saves the settings and their metadata in different lists (conf.mySettingsSQLsafe and conf.mySettings).
importConfigs: This function is the main entry point of the script. It imports user settings from a configuration file, processes them, and saves them to the database.
read_config_file: This function reads the configuration file (app.conf) and returns a dictionary containing the key-value pairs from the file.
Importing Configuration and Initializing Settings:
+The importConfigs function starts by checking the modification time of the configuration file to determine if it needs to be re-imported. If the file has not been modified since the last import, the function skips the import process.
The function reads the configuration file using the read_config_file function, which returns a dictionary of settings.
The script then initializes various user-defined settings using the ccd function, based on the values read from the configuration file. These settings are categorized into groups such as "General," "Email," "Webhooks," "Apprise," and more.
Plugin Handling:
+It also handles scheduling for plugins with specific RUN_SCHD settings.
Saving Settings to the Database:
+The script clears the existing settings in the database and inserts the updated settings into the database using SQL queries.
+Updating the API and Performing Cleanup:
+
+
+
+
+ The SMTP plugin supports any SMTP server. Here are some commonly used services to help speed up your configuration.
+Note
+If you are using a self hosted SMTP server ssh into the container and verify (e.g. via ping) that your server is reachable from within the NetAlertX container. See also how to ssh into the container if you are running it as a Home Assistant addon.
+Create an app password by following the instructions from Google, you need to Enable 2FA for this to work. +https://support.google.com/accounts/answer/185833
+Specify the following settings:
+ SMTP_RUN='on_notification'
+ SMTP_SKIP_TLS=True
+ SMTP_FORCE_SSL=True
+ SMTP_PORT=465
+ SMTP_SERVER='smtp.gmail.com'
+ SMTP_PASS='16-digit passcode from google'
+ SMTP_REPORT_TO='some_target_email@gmail.com'
+
+Brevo allows for 300 free emails per day as of time of writing.
+SMTP_SERVER='smtp-relay.brevo.com'
+SMTP_PORT=587
+SMTP_SKIP_LOGIN=False
+SMTP_USER='user@email.com'
+SMTP_PASS='xsmtpsib-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx-xxxxxxxxxxx'
+SMTP_SKIP_TLS=False
+SMTP_FORCE_SSL=False
+SMTP_REPORT_TO='some_target_email@gmail.com'
+SMTP_REPORT_FROM='NetAlertX <user@email.com>'
+
+ SMTP_RUN='on_notification'
+ SMTP_SERVER='mail.gmx.com'
+ SMTP_PORT=465
+ SMTP_USER='gmx_email@gmx.com'
+ SMTP_PASS='<your Application-specific password>'
+ SMTP_SKIP_TLS=True
+ SMTP_FORCE_SSL=True
+ SMTP_SKIP_LOGIN=False
+ SMTP_REPORT_FROM='gmx_email@gmx.com' # this has to be the same email as in SMTP_USER
+ SMTP_REPORT_TO='some_target_email@gmail.com'
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ You need to specify the network interface and the network mask. You can also configure multiple subnets and specify VLANs (see VLAN exceptions below).
+ARPSCAN can scan multiple networks if the network allows it. To scan networks directly, the subnets must be accessible from the network where NetAlertX is running. This means NetAlertX needs to have access to the interface attached to that subnet.
Warning
+If you don't see all expected devices run the following command in the NetAlertX container (replace the interface and ip mask):
+sudo arp-scan --interface=eth0 192.168.1.0/24
If this command returns no results, the network is not accessible due to your network or firewall restrictions (Wi-Fi Extenders, VPNs and inaccessible networks). If direct scans are not possible, check the remote networks documentation for workarounds.
+Note
+Please use the UI to configure settings as it ensures the config file is in the correct format. Edit app.conf directly only when really necessary.
+
SCAN_SUBNETS = ['192.168.1.0/24 --interface=eth0']SCAN_SUBNETS = ['192.168.1.0/24 --interface=eth0','192.168.1.0/24 --interface=eth1 --vlan=107']Tip
+When adding more subnets, you may need to increase both the scan interval (ARPSCAN_RUN_SCHD) and the timeout (ARPSCAN_RUN_TIMEOUT)—as well as similar settings for related plugins.
If the timeout is too short, you may see timeout errors in the log. To prevent the application from hanging due to unresponsive plugins, scans are canceled when they exceed the timeout limit.
+To fix this:
+- Reduce the subnet size (e.g., change /16 to /24).
+- Increase the timeout (e.g., set ARPSCAN_RUN_TIMEOUT to 300 for a 5-minute timeout).
+- Extend the scan interval (e.g., set ARPSCAN_RUN_SCHD to */10 * * * * to scan every 10 minutes).
For more troubleshooting tips, see Debugging Plugins.
+Example value: 192.168.1.0/24
The arp-scan time itself depends on the number of IP addresses to check.
++The number of IPs to check depends on the network mask you set in the
+SCAN_SUBNETSsetting.
+For example, a/24mask results in 256 IPs to check, whereas a/16mask checks around 65,536 IPs. Each IP takes a couple of seconds, so an incorrect configuration could makearp-scantake hours instead of seconds.
Specify the network filter, which significantly speeds up the scan process. For example, the filter 192.168.1.0/24 covers IP ranges from 192.168.1.0 to 192.168.1.255.
Example value: --interface=eth0
The adapter will probably be eth0 or eth1. (Check System Info > Network Hardware, or run iwconfig in the container to find your interface name(s)).

Tip
+As an alternative to iwconfig, run ip -o link show | awk -F': ' '!/lo|vir|docker/ {print $2}' in your container to find your interface name(s) (e.g.: eth0, eth1):
+bash
+Synology-NAS:/# ip -o link show | awk -F': ' '!/lo|vir|docker/ {print $2}'
+sit0@NONE
+eth1
+eth0
Example value: --vlan=107
--vlan=107 to the SCAN_SUBNETS field (e.g.: 192.168.1.0/24 --interface=vmbr0 --vlan=107) for multiple VLANs.++Community-sourced content by mscreations from this discussion.
+
Tested Setup: Bare Metal → Hyper-V on Win Server 2019 → Ubuntu 22.04 VM → Docker → NetAlertX.
+Approach 1 (may cause issues):
+Configure multiple network adapters in Hyper-V with distinct VLANs connected to each one using Hyper-V's network setup. However, this action can potentially lead to the Docker host's inability to handle network traffic correctly. This might interfere with other applications such as Authentik.
Approach 2 (working example):
+Network connections to switches are configured as trunk and allow all VLANs access to the server.
+By default, Hyper-V only allows untagged packets through to the VM interface, blocking VLAN-tagged packets. To fix this, follow these steps:
+powershell
+ Set-VMNetworkAdapterVlan -VMName <Docker VM Name> -Trunk -NativeVlanId 0 -AllowedVlanIdList "<comma separated list of vlans>"
yaml
+ network:
+ ethernets:
+ eth0:
+ dhcp4: yes
+ vlans:
+ eth0.2:
+ id: 2
+ link: eth0
+ addresses: [ "192.168.2.2/24" ]
+ routes:
+ - to: 192.168.2.0/24
+ via: 192.168.1.1
sudo netplan apply to activate the interfaces for scanning in NetAlertX.In this case, use 192.168.2.0/24 --interface=eth0.2 in NetAlertX.
Please note the accessibility of macvlans when configured on the same computer. This is a general networking behavior, but feel free to clarify via a PR/issue.
+
+
+
+
+ There are different ways to install NetAlertX on a Synology, including SSH-ing into the machine and using the command line. For this guide, we will use the Project option in Container manager.
+The folders you are creating below will contain the configuration and the database. Back them up regularly.
+netalertxdb sub-folder
+
+ 
config sub-folder
+ 
Fill in the details:
+Project name: netalertx
/app_storage/netalertx (will differ from yours)services:
+ netalertx:
+ container_name: netalertx
+ # use the below line if you want to test the latest dev image
+ # image: "ghcr.io/jokob-sk/netalertx-dev:latest"
+ image: "ghcr.io/jokob-sk/netalertx:latest"
+ network_mode: "host"
+ restart: unless-stopped
+ cap_drop: # Drop all capabilities for enhanced security
+ - ALL
+ cap_add: # Re-add necessary capabilities
+ - NET_RAW
+ - NET_ADMIN
+ - NET_BIND_SERVICE
+ volumes:
+ - /app_storage/netalertx:/data
+ # to sync with system time
+ - /etc/localtime:/etc/localtime:ro
+ tmpfs:
+ # All writable runtime state resides under /tmp; comment out to persist logs between restarts
+ - "/tmp:uid=20211,gid=20211,mode=1700,rw,noexec,nosuid,nodev,async,noatime,nodiratime"
+ environment:
+ - PORT=20211
+
+
Replace the paths to your volume and comment out unnecessary line(s):
+This is only an example, your paths will differ.
+volumes:
+ - /volume1/app_storage/netalertx:/data
+
+
20211 to an unused port if this port is already used.
<Synology URL>:20211 (or your custom port).See also the Permission overview guide.
+Tip
+If you are facing permissions issues run the following commands on your server. This will change the owner and assure sufficient access to the database and config files that are stored in the /local_data_dir/db and /local_data_dir/config folders (replace local_data_dir with the location where your /db and /config folders are located).
sudo chown -R 20211:20211 /local_data_dir
sudo chmod -R a+rwx /local_data_dir
You can also execute the above bash commands via the UI by creating a one-off scheduled task.
+


/local_data_dir with the parent fodler of your /db and /config folders.

In case of issues, double-check the Permission overview guide.
+ + + + + + + + + + + + + +
+
+
+
+ Warning
+For versions prior to v25.6.7 upgrade to version v25.5.24 first (docker pull ghcr.io/jokob-sk/netalertx:25.5.24) as later versions don't support a full upgrade. Alternatively, devices and settings can be migrated manually, e.g. via CSV import.
+See the Migration guide for details.
This guide outlines approaches for updating Docker containers, usually when upgrading to a newer version of NetAlertX. Each method offers different benefits depending on the situation. Here are the methods:
+You can choose any approach that fits your workflow.
+++In the examples I assume that the container name is
+netalertxand the image name isnetalertxas well.
Note
+See also Backup strategies to be on the safe side.
+Use this method when you need precise control over a single container or when dealing with a broken container that needs immediate attention. +Example Commands
+To manually update the netalertx container, stop it, delete it, remove the old image, and start a fresh one with docker-compose.
# Stop the container
+sudo docker container stop netalertx
+
+# Remove the container
+sudo docker container rm netalertx
+
+# Remove the old image
+sudo docker image rm netalertx
+
+# Pull and start a new container
+sudo docker-compose up -d
+
+You can also use --pull always to ensure Docker pulls the latest image before starting the container:
sudo docker-compose up --pull always -d
+
+Always check the Dockcheck docs if encountering issues with the guide below.
+Dockcheck is a useful tool if you have multiple containers to update and some flexibility for handling potential issues that might arise during mass updates. Dockcheck allows you to inspect each container and decide when to update.
+You might use Dockcheck to:
+Dockcheck can help streamline bulk updates, especially if you’re managing multiple containers.
+Below is a script I use to run an update of the Dockcheck script and start a check for new containers:
+cd /path/to/Docker &&
+rm dockcheck.sh &&
+wget https://raw.githubusercontent.com/mag37/dockcheck/main/dockcheck.sh &&
+sudo chmod +x dockcheck.sh &&
+sudo ./dockcheck.sh
+
+Always check the watchtower docs if encountering issues with the guide below.
+Watchtower monitors your Docker containers and automatically updates them when new images are available. This is ideal for ongoing updates without manual intervention.
+docker pull containrrr/watchtower
+
+docker run -d \
+ --name watchtower \
+ -v /var/run/docker.sock:/var/run/docker.sock \
+ containrrr/watchtower \
+ --interval 300 # Check for updates every 5 minutes
+
+You can specify which containers to monitor by listing them. For example, to monitor netalertx only:
+docker run -d \
+ --name watchtower \
+ -v /var/run/docker.sock:/var/run/docker.sock \
+ containrrr/watchtower netalertx
+
+
+This assumes you're using Portainer to manage Docker (or Docker Swarm) and want to pull the latest version of an image and redeploy the container.
+Note
+If you're using Docker Swarm (under "Stacks" or "Services"):
+:latest if your image tag is latest (not recommended for production).| Method | +Type | +Pros | +Cons | +
|---|---|---|---|
| Manual | +CLI | +Full control, no dependencies | +Tedious for many containers | +
| Dockcheck | +CLI Script | +Great for batch updates | +Needs setup, semi-automated | +
| Watchtower | +Daemonized | +Fully automated updates | +Less control, version drift | +
| Portainer | +UI | +Easy via web interface | +No auto-updates | +
These approaches allow you to maintain flexibility in how you update Docker containers, depending on the urgency and scale of the update.
+ + + + + + + + + + + + + +
+
+
+
+ Since version 23.01.14 NetAlertX uses a simple timestamp-based version check to verify if a new version is available. You can check the current and past releases here, or have a look at what I'm currently working on.
+If you are not on the latest version, the app will notify you, that a new released version is avialable the following way:
+If any notification occurs and an email is sent, the email will contain a note that a new version is available. See the sample email below:
+
In the UI via a notification Icon and via a custom message in the Maintenance section.
+
For a comparison, this is how the UI looks like if you are on the latest stable image:
+
During build a /app/front/buildtimestamp.txt file is created. The app then periodically checks if a new release is available with a newer timestamp in GitHub's rest-based JSON endpoint (check the def isNewVersion: method for details).
+
+
+
+ Note
+You need to enable the WEBHOOK plugin first in order to follow this guide. See the Plugins guide for details.
N8N can be used for more advanced conditional notification use cases. For example, you want only to get notified if two out of a specified list of devices is down. Or you can use other plugins to process the notifiations further. The below is a simple example of sending an email on a webhook.
+
See sample JSON if you want to see the JSON paths used in the email template below
+
Events count: {{ $json["body"]["attachments"][0]["text"]["events"].length }}
+New devices count: {{ $json["body"]["attachments"][0]["text"]["new_devices"].length }}
+
+

+
+
+
+ Note
+You need to enable the WEBHOOK plugin first in order to follow this guide. See the Plugins guide for details.
NetAlertX will use the configured secret to create a hash signature of the request body. This SHA256-HMAC signature will appear in the X-Webhook-Signature header of each request to the webhook target URL. You can use the value of this header to validate the request was sent by NetAlertX.
All you need to do in order to add a signature to the request headers is to set the WEBHOOK_SECRET config value to a non-empty string.
There are a few things to keep in mind when validating the webhook delivery:
+X-Webhook-Signature header always starts with sha256=WEBHOOK_SECRET and the request body.== operator. Instead, consider using a method like secure_compare or crypto.timingSafeEqual, which performs a "constant time" string comparison to help mitigate certain timing attacks against regular equality operators, or regular loops in JIT-optimized languages.You can use the following secret and payload to verify that your implementation is working correctly.
+secret: 'this is my secret'
payload: '{"test":"this is a test body"}'
If your implementation is correct, the signature you generated should match the following:
+signature: bed21fcc34f98e94fd71c7edb75e51a544b4a3b38b069ebaaeb19bf4be8147e9
X-Webhook-Signature: sha256=bed21fcc34f98e94fd71c7edb75e51a544b4a3b38b069ebaaeb19bf4be8147e9
If you want to learn more about webhook security, take a look at GitHub's webhook documentation.
+You can find examples for validating a webhook delivery here.
+ + + + + + + + + + + + + +
+
+
+
+ The application uses the following default ports:
+2021120212The Web UI is served by an nginx server, while the API backend runs on a Flask (Python) server.
+PORT environment variable in the docker-compose.yml file.GRAPHQL_PORT setting, either directly or via Docker:
+ yaml
+ APP_CONF_OVERRIDE={"GRAPHQL_PORT":"20212"}For more information, check the Docker installation guide.
+Follow all of the below in order to disqualify potential causes of issues and to troubleshoot these problems faster.
+When opening an issue or debugging:
+HTTP://<your_server>:20211 (or your custom port)sudo apk add lsofsudo lsof -inginx command in the container:nginx: [emerg] bind() to 0.0.0.0:20211 failed (98: Address in use) try using a different port number
Check for browser console (F12 browser dev console) errors + check different browsers.
+Refresh the browser cache (usually shoft + refresh), try a private window, or different browsers. Please also refresh the app cache by clicking the 🔃 (reload) button in the header of the application.
+If you have any reverse proxy or similar, try disabling it.
+If you are using a firewall, try to temporarily disabling it.
+If you haven't, post your docker compose/run command.
+In the container execute and investigate:
+cat /var/log/nginx/error.log
cat /tmp/log/app.php_errors.log
Tip
+You can try to start the container without mapping the /data/config and /data/db dirs and if the UI shows up then the issue is most likely related to your file system permissions or file ownership.
Please read the Permissions troubleshooting guide and provide a screesnhot of the permissions and ownership in the /data/db and app/config directories.
+
+
+
+ The workflows module in allows to automate repetitive tasks, making network management more efficient. Whether you need to assign newly discovered devices to a specific Network Node, auto-group devices from a given vendor, unarchive a device if detected online, or automatically delete devices, this module provides the flexibility to tailor the automations to your needs.
+
Below are a few examples that demonstrate how this module can be used to simplify network management tasks.
+Note
+In order to apply a workflow change, you must first Save the changes and then reload the application by clicking Restart server.
+
Triggers define the event that activates a workflow. They monitor changes to objects within the system, such as updates to devices or the insertion of new entries. When the specified event occurs, the workflow is executed.
+Tip
+Workflows not running? Check the Workflows debugging guide how to troubleshoot triggers and conditions.
+DevicesupdateThis trigger will activate when a Device object is updated.

Conditions determine whether a workflow should proceed based on certain criteria. These criteria can be set for specific fields, such as whether a device is from a certain vendor, or whether it is new or archived. You can combine conditions using logical operators (AND, OR).
Tip
+To better understand how to use specific Device fields, please read through the Database overview guide.
+ANDdevVendorcontains (case in-sensitive)GoogleThis condition checks if the device's vendor is Google. The workflow will only proceed if the condition is true.

Actions define the tasks that the workflow will perform once the conditions are met. Actions can include updating fields or deleting devices.
+You can include multiple actions that should execute once the conditions are met.
+update_fielddevIsNew0This action updates the devIsNew field to 0, marking the device as no longer new.
You can find a couple of configuration examples in Workflow Examples.
+Tip
+Share your workflows in Discord or GitHub Discussions.
+
+
+
+
+ Tip
+Before troubleshooting, please ensure you have the right Debugging and LOG_LEVEL set.
+Workflows are triggered by various events. These events are captured and listed in the Integrations -> App Events section of the application.
+Note
+Workflow events are processed once every 5 seconds. However, if a scan or other background tasks are running, this can cause a delay up to a few minutes.
+If an event doesn't trigger a workflow as expected, check the App Events section for the event. You can filter these by the ID of the device (devMAC or devGUID).

Once you find the Event Guid and Object GUID, use them to find relevant debug entries.
+Navigate to Mainetenace -> Logs where you can filter the logs based on the Event or Object GUID.
+
Below you can find some example app.log entries that will help you understand why a Workflow was or was not triggered.
16:27:03 [WF] Checking if '13f0ce26-1835-4c48-ae03-cdaf38f328fe' triggers the workflow 'Sample Device Update Workflow'
+16:27:03 [WF] self.triggered 'False' for event '[[155], ['13f0ce26-1835-4c48-ae03-cdaf38f328fe'], [0], ['2025-04-02 05:26:56'], ['Devices'], ['050b6980-7af6-4409-950d-08e9786b7b33'], ['DEVICES'], ['00:11:32:ef:a5:6c'], ['192.168.1.82'], ['050b6980-7af6-4409-950d-08e9786b7b33'], [None], [0], [0], ['devPresentLastScan'], ['online'], ['update'], [None], [None], [None], [None]] and trigger {"object_type": "Devices", "event_type": "insert"}'
+16:27:03 [WF] Checking if '13f0ce26-1835-4c48-ae03-cdaf38f328fe' triggers the workflow 'Location Change'
+16:27:03 [WF] self.triggered 'True' for event '[[155], ['13f0ce26-1835-4c48-ae03-cdaf38f328fe'], [0], ['2025-04-02 05:26:56'], ['Devices'], ['050b6980-7af6-4409-950d-08e9786b7b33'], ['DEVICES'], ['00:11:32:ef:a5:6c'], ['192.168.1.82'], ['050b6980-7af6-4409-950d-08e9786b7b33'], [None], [0], [0], ['devPresentLastScan'], ['online'], ['update'], [None], [None], [None], [None]] and trigger {"object_type": "Devices", "event_type": "update"}'
+16:27:03 [WF] Event with GUID '13f0ce26-1835-4c48-ae03-cdaf38f328fe' triggered the workflow 'Location Change'
+
+Note how one trigger executed, but the other didn't based on different "event_type" values. One is "event_type": "insert", the other "event_type": "update".
Given the Event is a update event (note ...['online'], ['update'], [None]... in the event structure), the "event_type": "insert" trigger didn't execute.
+
+
+
+ Workflows in NetAlertX automate actions based on real-time events and conditions. Below are practical examples that demonstrate how to build automation using triggers, conditions, and actions.
+This workflow automatically unarchives a device if it was previously archived but has now been detected as online.
+Sometimes devices are manually archived (e.g., no longer expected on the network), but they reappear unexpectedly. This workflow reverses the archive status when such devices are detected during a scan.
+{
+ "name": "Un-archive devices if detected online",
+ "trigger": {
+ "object_type": "Devices",
+ "event_type": "update"
+ },
+ "conditions": [
+ {
+ "logic": "AND",
+ "conditions": [
+ {
+ "field": "devIsArchived",
+ "operator": "equals",
+ "value": "1"
+ },
+ {
+ "field": "devPresentLastScan",
+ "operator": "equals",
+ "value": "1"
+ }
+ ]
+ }
+ ],
+ "actions": [
+ {
+ "type": "update_field",
+ "field": "devIsArchived",
+ "value": "0"
+ }
+ ],
+ "enabled": "Yes"
+}
+
+- Trigger: Listens for updates to device records.
+- Conditions:
+ - `devIsArchived` is `1` (archived).
+ - `devPresentLastScan` is `1` (device was detected in the latest scan).
+- Action: Updates the device to set `devIsArchived` to `0` (unarchived).
+
+Whenever a previously archived device shows up during a network scan, it will be automatically unarchived — allowing it to reappear in your device lists and dashboards.
+Here is your updated version of Example 2 and Example 3, fully aligned with the format and structure of Example 1 for consistency and professionalism:
+This workflow assigns newly added devices with IP addresses in the 192.168.1.* range to a specific network node with MAC address 6c:6d:6d:6c:6c:6c.
When new devices join your network, assigning them to the correct network node is important for accurate topology and grouping. This workflow ensures devices in a specific subnet are automatically linked to the intended node.
+{
+ "name": "Assign Device to Network Node Based on IP",
+ "trigger": {
+ "object_type": "Devices",
+ "event_type": "insert"
+ },
+ "conditions": [
+ {
+ "logic": "AND",
+ "conditions": [
+ {
+ "field": "devLastIP",
+ "operator": "contains",
+ "value": "192.168.1."
+ }
+ ]
+ }
+ ],
+ "actions": [
+ {
+ "type": "update_field",
+ "field": "devNetworkNode",
+ "value": "6c:6d:6d:6c:6c:6c"
+ }
+ ],
+ "enabled": "Yes"
+}
+
+Condition:
+devLastIP contains 192.168.1. (matches subnet).
Action:
+Sets devNetworkNode to the specified MAC address.
New devices with IPs in the 192.168.1.* subnet are automatically assigned to the correct network node, streamlining device organization and reducing manual work.
This workflow automatically marks newly detected Google devices as not new and deletes them immediately.
+You may want to automatically clear out newly detected Google devices (such as Chromecast or Google Home) if they’re not needed in your device database. This workflow handles that clean-up automatically.
+{
+ "name": "Mark Device as Not New and Delete If from Google Vendor",
+ "trigger": {
+ "object_type": "Devices",
+ "event_type": "update"
+ },
+ "conditions": [
+ {
+ "logic": "AND",
+ "conditions": [
+ {
+ "field": "devVendor",
+ "operator": "contains",
+ "value": "Google"
+ },
+ {
+ "field": "devIsNew",
+ "operator": "equals",
+ "value": "1"
+ }
+ ]
+ }
+ ],
+ "actions": [
+ {
+ "type": "update_field",
+ "field": "devIsNew",
+ "value": "0"
+ },
+ {
+ "type": "delete_device"
+ }
+ ],
+ "enabled": "Yes"
+}
+
+Conditions:
+Vendor contains Google.
devIsNew is 1).Actions:
+Set devIsNew to 0 (mark as not new).
Any newly detected Google devices are cleaned up instantly — first marked as not new, then deleted — helping you avoid clutter in your device records.
+ + + + + + + + + + + + + +0&&i[i.length-1])&&(p[0]===6||p[0]===2)){r=0;continue}if(p[0]===3&&(!i||p[1]>i[0]&&p[1]=e.length&&(e=void 0),{value:e&&e[o++],done:!e}}};throw new TypeError(t?"Object is not iterable.":"Symbol.iterator is not defined.")}function K(e,t){var r=typeof Symbol=="function"&&e[Symbol.iterator];if(!r)return e;var o=r.call(e),n,i=[],s;try{for(;(t===void 0||t-- >0)&&!(n=o.next()).done;)i.push(n.value)}catch(a){s={error:a}}finally{try{n&&!n.done&&(r=o.return)&&r.call(o)}finally{if(s)throw s.error}}return i}function B(e,t,r){if(r||arguments.length===2)for(var o=0,n=t.length,i;o