Merge remote-tracking branch 'origin/main' into hardening

This commit is contained in:
Adam Outler
2025-10-18 13:23:57 -04:00
63 changed files with 3718 additions and 730 deletions

View File

@@ -1,36 +1,32 @@
#!/usr/bin/env python
import json
import subprocess
import argparse
import os
import pathlib
import sys
from datetime import datetime
import time
import re
import unicodedata
import paho.mqtt.client as mqtt
# from paho.mqtt import client as mqtt_client
# from paho.mqtt import CallbackAPIVersion as mqtt_CallbackAPIVersion
import hashlib
import sqlite3
from pytz import timezone
# Register NetAlertX directories
INSTALL_PATH="/app"
INSTALL_PATH = "/app"
sys.path.extend([f"{INSTALL_PATH}/front/plugins", f"{INSTALL_PATH}/server"])
# NetAlertX modules
import conf
from const import apiPath, confFileName, logPath
from const import confFileName, logPath
from plugin_utils import getPluginObject
from plugin_helper import Plugin_Objects
from logger import mylog, Logger, append_line_to_file
from helper import timeNowTZ, get_setting_value, bytes_to_string, sanitize_string, normalize_string
from models.notification_instance import NotificationInstance
from logger import mylog, Logger
from helper import timeNowTZ, get_setting_value, bytes_to_string, \
sanitize_string, normalize_string
from database import DB, get_device_stats
from pytz import timezone
# Make sure the TIMEZONE for logging is correct
conf.tz = timezone(get_setting_value('TIMEZONE'))
@@ -49,20 +45,19 @@ plugin_objects = Plugin_Objects(RESULT_FILE)
md5_hash = hashlib.md5()
# globals
mqtt_sensors = []
mqtt_connected_to_broker = False
mqtt_client = None # mqtt client
topic_root = get_setting_value('MQTT_topic_root')
def main():
mylog('verbose', [f'[{pluginName}](publisher) In script'])
mylog('verbose', [f'[{pluginName}](publisher) In script'])
# Check if basic config settings supplied
if check_config() == False:
mylog('verbose', [f'[{pluginName}] ⚠ ERROR: Publisher notification gateway not set up correctly. Check your {confFileName} {pluginName}_* variables.'])
if not check_config():
return
# Create a database connection
@@ -70,60 +65,85 @@ def main():
db.open()
mqtt_start(db)
mqtt_client.disconnect()
plugin_objects.write_result_file()
#-------------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# MQTT
#-------------------------------------------------------------------------------
#-------------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
def check_config():
if get_setting_value('MQTT_BROKER') == '' or get_setting_value('MQTT_PORT') == '' or get_setting_value('MQTT_USER') == '' or get_setting_value('MQTT_PASSWORD') == '':
mylog('verbose', [f'[Check Config] ⚠ ERROR: MQTT service not set up correctly. Check your {confFileName} MQTT_* variables.'])
return False
else:
return True
"""
Checks whether the MQTT configuration settings are properly set.
Returns:
bool: True if all required MQTT settings
('MQTT_BROKER', 'MQTT_PORT', 'MQTT_USER', 'MQTT_PASSWORD')
are non-empty;
False otherwise. Logs a verbose error message
if any setting is missing.
"""
if get_setting_value('MQTT_BROKER') == '' \
or get_setting_value('MQTT_PORT') == '' \
or get_setting_value('MQTT_USER') == '' \
or get_setting_value('MQTT_PASSWORD') == '':
mylog('verbose', [f'[Check Config] ⚠ ERROR: MQTT service not set up \
correctly. Check your {confFileName} MQTT_* variables.'])
return False
else:
return True
#-------------------------------------------------------------------------------
# Sensor configs are tracking which sensors in NetAlertX exist and if a config has changed
# -----------------------------------------------------------------------------
# Sensor configs are tracking which sensors in NetAlertX exist
# and if a config has changed
class sensor_config:
def __init__(self, deviceId, deviceName, sensorType, sensorName, icon, mac):
def __init__(self,
deviceId,
deviceName,
sensorType,
sensorName,
icon,
mac):
"""
Initialize the sensor_config object with provided parameters. Sets up sensor configuration
and generates necessary MQTT topics and messages based on the sensor type.
Initialize the sensor_config object with provided parameters.
Sets up sensor configuration and generates necessary MQTT topics
and messages based on the sensor type.
"""
# Assign initial attributes
self.deviceId = deviceId
self.deviceName = deviceName
self.sensorType = sensorType
self.sensorName = sensorName
self.icon = icon
self.icon = icon
self.mac = mac
self.model = deviceName
self.hash = ''
self.model = deviceName
self.hash = ''
self.state_topic = ''
self.json_attr_topic = ''
self.topic = ''
self.message = {} # Initialize message as an empty dictionary
self.unique_id = ''
# Call helper functions to initialize the message, generate a hash, and handle plugin object
# Call helper functions to initialize the message, generate a hash,
# and handle plugin object
self.initialize_message()
self.generate_hash()
self.handle_plugin_object()
def initialize_message(self):
"""
Initialize the MQTT message payload based on the sensor type. This method handles sensors of types:
Initialize the MQTT message payload based on the sensor type.
This method handles sensors of types:
- 'timestamp'
- 'binary_sensor'
- 'sensor'
- 'device_tracker'
"""
# Ensure self.message is initialized as a dictionary if not already done
# Ensure self.message is initialized as a dictionary
# if not already done
if not isinstance(self.message, dict):
self.message = {}
@@ -153,7 +173,6 @@ class sensor_config:
"icon": f'mdi:{self.icon}'
})
# Handle 'device_tracker' sensor type
elif self.sensorType == 'device_tracker':
self.topic = f'homeassistant/device_tracker/{self.deviceId}/config'
@@ -229,25 +248,36 @@ class sensor_config:
)
#-------------------------------------------------------------------------------
# -------------------------------------------------------------------------------
def publish_mqtt(mqtt_client, topic, message):
"""
Publishes a message to an MQTT topic using the provided MQTT client.
If the message is not a string, it is converted to a JSON-formatted string.
The function retrieves the desired QoS level from settings and logs the publishing process.
If the client is not connected to the broker, the function logs an error and aborts.
It attempts to publish the message, retrying until the publish status indicates success.
Args:
mqtt_client: The MQTT client instance used to publish the message.
topic (str): The MQTT topic to publish to.
message (Any): The message payload to send. Non-string messages are converted to JSON.
Returns:
bool: True if the message was published successfully, False if not connected to the broker.
"""
status = 1
# convert anything but a simple string to json
if not isinstance(message, str):
message = json.dumps(message).replace("'",'"')
message = json.dumps(message).replace("'", '"')
qos = get_setting_value('MQTT_QOS')
mylog('verbose', [f"[{pluginName}] Sending MQTT topic: {topic}"])
mylog('verbose', [f"[{pluginName}] Sending MQTT message: {message}"])
mylog('debug', [f"[{pluginName}] Sending MQTT topic: {topic}",
f"[{pluginName}] Sending MQTT message: {message}"])
# mylog('verbose', [f"[{pluginName}] get_setting_value('MQTT_QOS'): {qos}"])
if mqtt_connected_to_broker == False:
mylog('verbose', [f"[{pluginName}] ⚠ ERROR: Not connected to broker, aborting."])
if not mqtt_connected_to_broker:
mylog('minimal', [f"[{pluginName}] ⚠ ERROR: Not connected to broker, aborting."])
return False
while status != 0:
@@ -267,45 +297,46 @@ def publish_mqtt(mqtt_client, topic, message):
# mylog('verbose', [f"[{pluginName}] status: {status}"])
# mylog('verbose', [f"[{pluginName}] result: {result}"])
if status != 0:
mylog('verbose', [f"[{pluginName}] Waiting to reconnect to MQTT broker"])
time.sleep(0.1)
if status != 0:
mylog('debug', [f"[{pluginName}] Waiting to reconnect to MQTT broker"])
time.sleep(0.1)
return True
#-------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# Create a generic device for overal stats
def create_generic_device(mqtt_client, deviceId, deviceName):
create_sensor(mqtt_client, deviceId, deviceName, 'sensor', 'online', 'wifi-check')
create_sensor(mqtt_client, deviceId, deviceName, 'sensor', 'down', 'wifi-cancel')
def create_generic_device(mqtt_client, deviceId, deviceName):
create_sensor(mqtt_client, deviceId, deviceName, 'sensor', 'online', 'wifi-check')
create_sensor(mqtt_client, deviceId, deviceName, 'sensor', 'down', 'wifi-cancel')
create_sensor(mqtt_client, deviceId, deviceName, 'sensor', 'all', 'wifi')
create_sensor(mqtt_client, deviceId, deviceName, 'sensor', 'archived', 'wifi-lock')
create_sensor(mqtt_client, deviceId, deviceName, 'sensor', 'new', 'wifi-plus')
create_sensor(mqtt_client, deviceId, deviceName, 'sensor', 'unknown', 'wifi-alert')
#-------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# Register sensor config on the broker
def create_sensor(mqtt_client, deviceId, deviceName, sensorType, sensorName, icon, mac=""):
global mqtt_sensors
def create_sensor(mqtt_client, deviceId, deviceName, sensorType, sensorName, icon, mac=""):
global mqtt_sensors
# check previous configs
sensorConfig = sensor_config(deviceId, deviceName, sensorType, sensorName, icon, mac)
sensorConfig = sensor_config(deviceId, deviceName, sensorType, sensorName, icon, mac)
# send if new
if sensorConfig.isNew:
# Create the HA sensor config if a new device is discovered
if sensorConfig.isNew:
# add the sensor to the global list to keep track of succesfully added sensors
if publish_mqtt(mqtt_client, sensorConfig.topic, sensorConfig.message):
# hack - delay adding to the queue in case the process is
time.sleep(get_setting_value('MQTT_DELAY_SEC')) # restarted and previous publish processes aborted
# (it takes ~2s to update a sensor config on the broker)
mqtt_sensors.append(sensorConfig)
if publish_mqtt(mqtt_client, sensorConfig.topic, sensorConfig.message):
# hack - delay adding to the queue in case the process is
# restarted and previous publish processes aborted
# (it takes ~2s to update a sensor config on the broker)
time.sleep(get_setting_value('MQTT_DELAY_SEC'))
mqtt_sensors.append(sensorConfig)
return sensorConfig
#-------------------------------------------------------------------------------
# -----------------------------------------------------------------------------
def mqtt_create_client():
# attempt reconnections on failure, ref https://www.emqx.com/en/blog/how-to-use-mqtt-in-python
@@ -313,11 +344,11 @@ def mqtt_create_client():
RECONNECT_RATE = 2
MAX_RECONNECT_COUNT = 12
MAX_RECONNECT_DELAY = 60
mytransport = 'tcp' # or 'websockets'
mytransport = 'tcp' # or 'websockets'
def on_disconnect(mqtt_client, userdata, rc):
global mqtt_connected_to_broker
mylog('verbose', [f"[{pluginName}] Connection terminated, reason_code: {rc}"])
@@ -328,7 +359,7 @@ def mqtt_create_client():
try:
mqtt_client.reconnect()
mqtt_connected_to_broker = True # Signal connection
mqtt_connected_to_broker = True # Signal connection
mylog('verbose', [f"[{pluginName}] Reconnected successfully"])
return
except Exception as err:
@@ -338,19 +369,18 @@ def mqtt_create_client():
reconnect_delay *= RECONNECT_RATE
reconnect_delay = min(reconnect_delay, MAX_RECONNECT_DELAY)
reconnect_count += 1
mqtt_connected_to_broker = False
def on_connect(mqtt_client, userdata, flags, rc, properties):
global mqtt_connected_to_broker
# REF: Good docu on reason codes: https://www.emqx.com/en/blog/mqtt5-new-features-reason-code-and-ack
if rc == 0:
mylog('verbose', [f"[{pluginName}] Connected to broker"])
mqtt_connected_to_broker = True # Signal connection
else:
if rc == 0:
mylog('verbose', [f"[{pluginName}] Connected to broker"])
mqtt_connected_to_broker = True # Signal connection
else:
mylog('verbose', [f"[{pluginName}] Connection failed, reason_code: {rc}"])
mqtt_connected_to_broker = False
@@ -367,10 +397,12 @@ def mqtt_create_client():
version = mqtt.MQTTv5
# we now hardcode the client id into here.
# TODO: Add config ffor client id
# TODO: Add config for client id (atm, we use a fixed client id,
# so only one instance of NetAlertX can connect to the broker at any given time)
# If you intend to run multiple instances simultaneously, make sure to set unique client IDs for each instance.
mqtt_client = mqtt.Client(
client_id='netalertx',
callback_api_version = mqtt.CallbackAPIVersion.VERSION2,
callback_api_version=mqtt.CallbackAPIVersion.VERSION2,
transport=mytransport,
protocol=version)
mqtt_client.on_connect = on_connect
@@ -379,8 +411,8 @@ def mqtt_create_client():
if get_setting_value('MQTT_TLS'):
mqtt_client.tls_set()
mqtt_client.username_pw_set(username = get_setting_value('MQTT_USER'), password = get_setting_value('MQTT_PASSWORD'))
err_code = mqtt_client.connect(host = get_setting_value('MQTT_BROKER'), port = get_setting_value('MQTT_PORT'))
mqtt_client.username_pw_set(username=get_setting_value('MQTT_USER'), password=get_setting_value('MQTT_PASSWORD'))
err_code = mqtt_client.connect(host=get_setting_value('MQTT_BROKER'), port=get_setting_value('MQTT_PORT'))
if (err_code == mqtt.MQTT_ERR_SUCCESS):
# We (prematurely) set the connection state to connected
# the callback may be delayed
@@ -389,36 +421,36 @@ def mqtt_create_client():
# Mosquitto works straight away
# EMQX has a delay and does not update in loop below, so we cannot rely on it, we wait 1 sec
time.sleep(1)
mqtt_client.loop_start()
mqtt_client.loop_start()
return mqtt_client
#-------------------------------------------------------------------------------
def mqtt_start(db):
# -----------------------------------------------------------------------------
def mqtt_start(db):
global mqtt_client, mqtt_connected_to_broker
if mqtt_connected_to_broker == False:
mqtt_connected_to_broker = True
mqtt_client = mqtt_create_client()
if not mqtt_connected_to_broker:
mqtt_client = mqtt_create_client()
deviceName = get_setting_value('MQTT_DEVICE_NAME')
deviceId = get_setting_value('MQTT_DEVICE_ID')
# General stats
deviceId = get_setting_value('MQTT_DEVICE_ID')
# General stats
# Create a generic device for overal stats
if get_setting_value('MQTT_SEND_STATS') == True:
# Create a new device representing overall stats
if get_setting_value('MQTT_SEND_STATS'):
# Create a new device representing overall stats
create_generic_device(mqtt_client, deviceId, deviceName)
# Get the data
row = get_device_stats(db)
row = get_device_stats(db)
# Publish (wrap into {} and remove last ',' from above)
publish_mqtt(mqtt_client, f"{topic_root}/sensor/{deviceId}/state",
{
publish_mqtt(mqtt_client, f"{topic_root}/sensor/{deviceId}/state",
{
"online": row[0],
"down": row[1],
"all": row[2],
@@ -429,7 +461,7 @@ def mqtt_start(db):
)
# Generate device-specific MQTT messages if enabled
if get_setting_value('MQTT_SEND_DEVICES') == True:
if get_setting_value('MQTT_SEND_DEVICES'):
# Specific devices processing
@@ -438,37 +470,35 @@ def mqtt_start(db):
sec_delay = len(devices) * int(get_setting_value('MQTT_DELAY_SEC'))*5
mylog('verbose', [f"[{pluginName}] Estimated delay: ", (sec_delay), 's ', '(', round(sec_delay/60,1) , 'min)' ])
mylog('verbose', [f"[{pluginName}] Estimated delay: ", (sec_delay), 's ', '(', round(sec_delay/60, 1), 'min)'])
debug_index = 0
for device in devices:
for device in devices:
# # debug statement START 🔻
# if 'Moto' not in device["devName"]:
# mylog('none', [f"[{pluginName}] ALERT - ⚠⚠⚠⚠ DEBUGGING ⚠⚠⚠⚠ - this should not be in uncommented in production"])
# mylog('none', [f"[{pluginName}] ALERT - ⚠⚠⚠⚠ DEBUGGING ⚠⚠⚠⚠ - this should not be in uncommented in production"])
# continue
# # debug statement END 🔺
# Create devices in Home Assistant - send config messages
deviceId = 'mac_' + device["devMac"].replace(" ", "").replace(":", "_").lower()
# Normalize the string and remove unwanted characters
devDisplayName = re.sub('[^a-zA-Z0-9-_\\s]', '', normalize_string(device["devName"]))
devDisplayName = re.sub('[^a-zA-Z0-9-_\\s]', '', normalize_string(device["devName"]))
sensorConfig = create_sensor(mqtt_client, deviceId, devDisplayName, 'sensor', 'last_ip', 'ip-network', device["devMac"])
sensorConfig = create_sensor(mqtt_client, deviceId, devDisplayName, 'sensor', 'mac_address', 'folder-key-network', device["devMac"])
sensorConfig = create_sensor(mqtt_client, deviceId, devDisplayName, 'sensor', 'is_new', 'bell-alert-outline', device["devMac"])
sensorConfig = create_sensor(mqtt_client, deviceId, devDisplayName, 'sensor', 'vendor', 'cog', device["devMac"])
sensorConfig = create_sensor(mqtt_client, deviceId, devDisplayName, 'sensor', 'vendor', 'cog', device["devMac"])
sensorConfig = create_sensor(mqtt_client, deviceId, devDisplayName, 'sensor', 'first_connection', 'calendar-start', device["devMac"])
sensorConfig = create_sensor(mqtt_client, deviceId, devDisplayName, 'sensor', 'last_connection', 'calendar-end', device["devMac"])
# handle device_tracker
# IMPORTANT: shared payload - device_tracker attributes and individual sensors
devJson = {
"last_ip": device["devLastIP"],
"is_new": str(device["devIsNew"]),
"alert_down": str(device["devAlertDown"]),
"vendor": sanitize_string(device["devVendor"]),
devJson = {
"last_ip": device["devLastIP"],
"is_new": str(device["devIsNew"]),
"alert_down": str(device["devAlertDown"]),
"vendor": sanitize_string(device["devVendor"]),
"mac_address": str(device["devMac"]),
"model": devDisplayName,
"last_connection": prepTimeStamp(str(device["devLastConnection"])),
@@ -480,53 +510,48 @@ def mqtt_start(db):
"network_parent_name": next((dev["devName"] for dev in devices if dev["devMAC"] == device["devParentMAC"]), "")
}
# bulk update device sensors in home assistant
# bulk update device sensors in home assistant
publish_mqtt(mqtt_client, sensorConfig.state_topic, devJson) # REQUIRED, DON'T DELETE
# create and update is_present sensor
sensorConfig = create_sensor(mqtt_client, deviceId, devDisplayName, 'binary_sensor', 'is_present', 'wifi', device["devMac"])
publish_mqtt(mqtt_client, sensorConfig.state_topic,
{
publish_mqtt(mqtt_client, sensorConfig.state_topic,
{
"is_present": to_binary_sensor(str(device["devPresentLastScan"]))
}
)
)
# handle device_tracker
sensorConfig = create_sensor(mqtt_client, deviceId, devDisplayName, 'device_tracker', 'is_home', 'home', device["devMac"])
sensorConfig = create_sensor(mqtt_client, deviceId, devDisplayName, 'device_tracker', 'is_home', 'home', device["devMac"])
# <away|home> are only valid states
state = 'away'
if to_binary_sensor(str(device["devPresentLastScan"])) == "ON":
state = 'home'
publish_mqtt(mqtt_client, sensorConfig.state_topic, state)
publish_mqtt(mqtt_client, sensorConfig.state_topic, state)
# publish device_tracker attributes
publish_mqtt(mqtt_client, sensorConfig.json_attr_topic, devJson)
publish_mqtt(mqtt_client, sensorConfig.json_attr_topic, devJson)
#===============================================================================
# =============================================================================
# Home Assistant UTILs
#===============================================================================
# =============================================================================
def to_binary_sensor(input):
# In HA a binary sensor returns ON or OFF
result = "OFF"
"""
Converts various input types to a binary sensor state ("ON" or "OFF") for Home Assistant.
"""
if isinstance(input, (int, float)) and input >= 1:
return "ON"
elif isinstance(input, bool) and input:
return "ON"
elif isinstance(input, str) and input == "1":
return "ON"
elif isinstance(input, bytes) and bytes_to_string(input) == "1":
return "ON"
return "OFF"
# bytestring
if isinstance(input, str):
if input == "1":
result = "ON"
elif isinstance(input, int):
if input == 1:
result = "ON"
elif isinstance(input, bool):
if input == True:
result = "ON"
elif isinstance(input, bytes):
if bytes_to_string(input) == "1":
result = "ON"
return result
# -------------------------------------
# Convert to format that is interpretable by Home Assistant
@@ -537,7 +562,7 @@ def prepTimeStamp(datetime_str):
# If the parsed datetime is naive (i.e., does not contain timezone info), add UTC timezone
if parsed_datetime.tzinfo is None:
parsed_datetime = parsed_datetime.replace(tzinfo=conf.tz)
parsed_datetime = conf.tz.localize(parsed_datetime)
except ValueError:
mylog('verbose', [f"[{pluginName}] Timestamp conversion failed of string '{datetime_str}'"])
@@ -547,9 +572,7 @@ def prepTimeStamp(datetime_str):
# Convert to the required format with 'T' between date and time and ensure the timezone is included
return parsed_datetime.isoformat() # This will include the timezone offset
# -------------INIT---------------------
if __name__ == '__main__':
sys.exit(main())

View File

@@ -64,7 +64,8 @@
"name": "subnets",
"type": "setting",
"value": "SCAN_SUBNETS",
"base64": true
"base64": true,
"timeoutMultiplier": true
}
],
"settings": [
@@ -387,6 +388,34 @@
"string": "Arguments to run arps-scan with. Recommended and tested only with the setting: <br/> <code>sudo arp-scan --ignoredups --retry=6</code>."
}
]
},
{
"function": "DURATION",
"type": {
"dataType": "integer",
"elements": [
{
"elementType": "input",
"elementOptions": [{ "type": "number" }],
"transformers": []
}
]
},
"default_value": 0,
"options": [],
"localized": ["name", "description"],
"name": [
{
"language_code": "en_us",
"string": "Discovery duration"
}
],
"description": [
{
"language_code": "en_us",
"string": "If <code>DURATION</code> is not <code>0</code>, the scan runs repeatedly per interface for that many seconds. <strong>Important:</strong> <code>RUN_TIMEOUT</code> must be greater than <code>DURATION</code>, otherwise the scan will fail."
}
]
}
],
"database_column_definitions": [

View File

@@ -1,6 +1,7 @@
#!/usr/bin/env python
import os
import time
import pathlib
import argparse
import sys
@@ -46,7 +47,7 @@ def main():
plugin_objects = Plugin_Objects(RESULT_FILE)
# Print a message to indicate that the script is starting.
mylog('verbose', ['[ARP Scan] In script '])
mylog('verbose', [f'[{pluginName}] In script '])
# holds a list of user-submitted subnets.
# mylog('verbose', ['[ARP Scan] values.userSubnets: ', values.userSubnets])
@@ -150,16 +151,28 @@ def execute_arpscan_on_interface(interface):
# Prepare command arguments
arpscan_args = get_setting_value('ARPSCAN_ARGS').split() + interface.split()
# Execute command
# Optional duration in seconds (0 = run once)
try:
# try running a subprocess safely
result = subprocess.check_output(arpscan_args, universal_newlines=True)
except subprocess.CalledProcessError as e:
# An error occurred, handle it
error_type = type(e).__name__ # Capture the error type
result = ""
scan_duration = int(get_setting_value('ARPSCAN_DURATION'))
except Exception:
scan_duration = 0 # default: single run
return result
results = []
start_time = time.time()
while True:
try:
result = subprocess.check_output(arpscan_args, universal_newlines=True)
results.append(result)
except subprocess.CalledProcessError as e:
result = ""
# stop looping if duration not set or expired
if scan_duration == 0 or (time.time() - start_time) > scan_duration:
break
time.sleep(2) # short delay between scans
# concatenate all outputs (for regex parsing)
return "\n".join(results)

View File

@@ -1,232 +0,0 @@
#!/usr/bin/env python
import os
import pathlib
import sys
import json
import subprocess
# Define the installation path and extend the system path for plugin imports
INSTALL_PATH = "/app"
sys.path.extend([f"{INSTALL_PATH}/front/plugins", f"{INSTALL_PATH}/server"])
from plugin_helper import Plugin_Object, Plugin_Objects, decodeBase64
from plugin_utils import get_plugins_configs
from logger import mylog, Logger
from const import pluginsPath, fullDbPath, logPath
from helper import timeNowTZ, get_setting_value
from messaging.in_app import write_notification
from database import DB
from models.device_instance import DeviceInstance
import conf
from pytz import timezone
# Make sure the TIMEZONE for logging is correct
conf.tz = timezone(get_setting_value('TIMEZONE'))
# Make sure log level is initialized correctly
Logger(get_setting_value('LOG_LEVEL'))
pluginName = 'AVAHISCAN'
# Define the current path and log file paths
LOG_PATH = logPath + '/plugins'
LOG_FILE = os.path.join(LOG_PATH, f'script.{pluginName}.log')
RESULT_FILE = os.path.join(LOG_PATH, f'last_result.{pluginName}.log')
# Initialize the Plugin obj output file
plugin_objects = Plugin_Objects(RESULT_FILE)
def main():
mylog('verbose', [f'[{pluginName}] In script'])
# Retrieve timeout from settings (use AVAHISCAN_RUN_TIMEOUT), fall back to 20
try:
_timeout_val = get_setting_value('AVAHISCAN_RUN_TIMEOUT')
if _timeout_val is None or _timeout_val == '':
timeout = 20
else:
try:
timeout = int(_timeout_val)
except (ValueError, TypeError):
timeout = 20
except Exception:
timeout = 20
# Create a database connection
db = DB() # instance of class DB
db.open()
# Initialize the Plugin obj output file
plugin_objects = Plugin_Objects(RESULT_FILE)
# Create a DeviceInstance instance
device_handler = DeviceInstance(db)
# Retrieve devices
if get_setting_value("REFRESH_FQDN"):
devices = device_handler.getAll()
else:
devices = device_handler.getUnknown()
mylog('verbose', [f'[{pluginName}] Devices count: {len(devices)}'])
# Mock list of devices (replace with actual device_handler.getUnknown() in production)
# devices = [
# {'devMac': '00:11:22:33:44:55', 'devLastIP': '192.168.1.121'},
# {'devMac': '00:11:22:33:44:56', 'devLastIP': '192.168.1.9'},
# {'devMac': '00:11:22:33:44:57', 'devLastIP': '192.168.1.82'},
# ]
if len(devices) > 0:
# ensure service is running
ensure_avahi_running()
for device in devices:
domain_name = execute_name_lookup(device['devLastIP'], timeout)
# check if found and not a timeout ('to')
if domain_name != '' and domain_name != 'to':
plugin_objects.add_object(
# "MAC", "IP", "Server", "Name"
primaryId = device['devMac'],
secondaryId = device['devLastIP'],
watched1 = '', # You can add any relevant info here if needed
watched2 = domain_name,
watched3 = '',
watched4 = '',
extra = '',
foreignKey = device['devMac'])
plugin_objects.write_result_file()
mylog('verbose', [f'[{pluginName}] Script finished'])
return 0
#===============================================================================
# Execute scan
#===============================================================================
def execute_name_lookup(ip, timeout):
"""
Execute the avahi-resolve command on the IP.
"""
args = ['avahi-resolve', '-a', ip]
# Execute command
output = ""
try:
mylog('debug', [f'[{pluginName}] DEBUG CMD :', args])
# Run the subprocess with a forced timeout
output = subprocess.check_output(args, universal_newlines=True, stderr=subprocess.STDOUT, timeout=timeout)
mylog('debug', [f'[{pluginName}] DEBUG OUTPUT : {output}'])
domain_name = ''
# Split the output into lines
lines = output.splitlines()
# Look for the resolved IP address
for line in lines:
if ip in line:
parts = line.split()
if len(parts) > 1:
domain_name = parts[1] # Second part is the resolved domain name
else:
mylog('verbose', [f'[{pluginName}] ⚠ ERROR - Unexpected output format: {line}'])
mylog('debug', [f'[{pluginName}] Domain Name: {domain_name}'])
return domain_name
except subprocess.CalledProcessError as e:
mylog('none', [f'[{pluginName}] ⚠ ERROR - {e.output}'])
except subprocess.TimeoutExpired as e:
# Return a distinct value that main() checks for when a timeout occurs
# Keep logging for telemetry/debugging
mylog('none', [f'[{pluginName}] TIMEOUT - the process forcefully terminated as timeout reached{": " + str(getattr(e, "output", "")) if getattr(e, "output", None) else ""}'])
return 'to'
if output == "":
mylog('none', [f'[{pluginName}] Scan: FAIL - check logs'])
else:
mylog('debug', [f'[{pluginName}] Scan: SUCCESS'])
return ''
# Function to ensure Avahi and its dependencies are running
def ensure_avahi_running(attempt=1, max_retries=2):
"""
Ensure that D-Bus is running and the Avahi daemon is started, with recursive retry logic.
"""
mylog('debug', [f'[{pluginName}] Attempt {attempt} - Ensuring D-Bus and Avahi daemon are running...'])
# Check rc-status
try:
subprocess.run(['rc-status'], check=True)
except subprocess.CalledProcessError as e:
mylog('none', [f'[{pluginName}] ⚠ ERROR - Failed to check rc-status: {e.output}'])
return
# Create OpenRC soft level (wrap in try/except to keep error handling consistent)
try:
subprocess.run(['touch', '/run/openrc/softlevel'], check=True, capture_output=True, text=True)
except subprocess.CalledProcessError as e:
mylog('none', [f'[{pluginName}] ⚠ ERROR - Failed to create OpenRC soft level: {e.stderr if e.stderr else str(e)}'])
return
# Add Avahi daemon to runlevel
try:
subprocess.run(['rc-update', 'add', 'avahi-daemon'], check=True)
except subprocess.CalledProcessError as e:
mylog('none', [f'[{pluginName}] ⚠ ERROR - Failed to add Avahi to runlevel: {e.output}'])
return
# Start the D-Bus service
try:
subprocess.run(['rc-service', 'dbus', 'start'], check=True)
except subprocess.CalledProcessError as e:
mylog('none', [f'[{pluginName}] ⚠ ERROR - Failed to start D-Bus: {e.output}'])
return
# Check Avahi status
status_output = subprocess.run(['rc-service', 'avahi-daemon', 'status'], capture_output=True, text=True)
if 'started' in status_output.stdout:
mylog('debug', [f'[{pluginName}] Avahi Daemon is already running.'])
return
mylog('none', [f'[{pluginName}] Avahi Daemon is not running, attempting to start... (Attempt {attempt})'])
# Start the Avahi daemon
try:
subprocess.run(['rc-service', 'avahi-daemon', 'start'], check=True)
except subprocess.CalledProcessError as e:
mylog('none', [f'[{pluginName}] ⚠ ERROR - Failed to start Avahi daemon: {e.output}'])
# Check status after starting
status_output = subprocess.run(['rc-service', 'avahi-daemon', 'status'], capture_output=True, text=True)
if 'started' in status_output.stdout:
mylog('debug', [f'[{pluginName}] Avahi Daemon successfully started.'])
return
# Retry if not started and attempts are left
if attempt < max_retries:
mylog('debug', [f'[{pluginName}] Retrying... ({attempt + 1}/{max_retries})'])
ensure_avahi_running(attempt + 1, max_retries)
else:
mylog('none', [f'[{pluginName}] ⚠ ERROR - Avahi Daemon failed to start after {max_retries} attempts.'])
# rc-update add avahi-daemon
# rc-service avahi-daemon status
# rc-service avahi-daemon start
if __name__ == '__main__':
main()

View File

@@ -1,127 +1,142 @@
#!/usr/bin/env python
#!/usr/bin/env python3
import os
import pathlib
import sys
import json
import dns.resolver
import socket
import ipaddress
from zeroconf import Zeroconf, ServiceBrowser, ServiceInfo, InterfaceChoice, IPVersion
from zeroconf.asyncio import AsyncZeroconf
# Define the installation path and extend the system path for plugin imports
INSTALL_PATH = "/app"
sys.path.extend([f"{INSTALL_PATH}/front/plugins", f"{INSTALL_PATH}/server"])
from plugin_helper import Plugin_Object, Plugin_Objects, decodeBase64
from plugin_utils import get_plugins_configs
from logger import mylog as write_log, Logger
from const import pluginsPath, fullDbPath, logPath
from helper import timeNowTZ, get_setting_value
from messaging.in_app import write_notification
from plugin_helper import Plugin_Objects
from logger import mylog, Logger
from const import logPath
from helper import get_setting_value
from database import DB
from models.device_instance import DeviceInstance
import conf
from pytz import timezone
# Make sure the TIMEZONE for logging is correct
conf.tz = timezone(get_setting_value('TIMEZONE'))
# Configure timezone and logging
conf.tz = timezone(get_setting_value("TIMEZONE"))
Logger(get_setting_value("LOG_LEVEL"))
# Make sure log level is initialized correctly
Logger(get_setting_value('LOG_LEVEL'))
pluginName = "AVAHISCAN"
pluginName = 'AVAHISCAN'
# Define log paths
LOG_PATH = os.path.join(logPath, "plugins")
LOG_FILE = os.path.join(LOG_PATH, f"script.{pluginName}.log")
RESULT_FILE = os.path.join(LOG_PATH, f"last_result.{pluginName}.log")
# Define the current path and log file paths
LOG_PATH = logPath + '/plugins'
LOG_FILE = os.path.join(LOG_PATH, f'script.{pluginName}.log')
RESULT_FILE = os.path.join(LOG_PATH, f'last_result.{pluginName}.log')
# Initialize the Plugin obj output file
# Initialize plugin results
plugin_objects = Plugin_Objects(RESULT_FILE)
#===============================================================================
# Execute scan using DNS resolver
#===============================================================================
def resolve_ips_with_zeroconf(ips, timeout):
# =============================================================================
# Helper functions
# =============================================================================
def resolve_mdns_name(ip: str, timeout: int = 5) -> str:
"""
Uses DNS resolver to actively query PTR records for reverse DNS lookups on given IP addresses.
Attempts to resolve a hostname via multicast DNS using the Zeroconf library.
Args:
ip (str): The IP address to resolve.
timeout (int): Timeout in seconds for mDNS resolution.
Returns:
str: Resolved hostname (or empty string if not found).
"""
resolved_hosts = {}
for ip in ips:
mylog("debug", [f"[{pluginName}] Resolving mDNS for {ip}"])
# Convert string IP to an address object
try:
addr = ipaddress.ip_address(ip)
except ValueError:
mylog("none", [f"[{pluginName}] Invalid IP: {ip}"])
return ""
# Reverse lookup name, e.g. "121.1.168.192.in-addr.arpa"
if addr.version == 4:
rev_name = ipaddress.ip_address(ip).reverse_pointer
else:
rev_name = ipaddress.ip_address(ip).reverse_pointer
try:
zeroconf = Zeroconf()
hostname = socket.getnameinfo((ip, 0), socket.NI_NAMEREQD)[0]
zeroconf.close()
if hostname and hostname != ip:
mylog("debug", [f"[{pluginName}] Found mDNS name: {hostname}"])
return hostname
except Exception as e:
mylog("debug", [f"[{pluginName}] Zeroconf lookup failed for {ip}: {e}"])
finally:
try:
# Construct the reverse IP for PTR query (e.g., 8.1.168.192.in-addr.arpa.)
reverse_ip = '.'.join(reversed(ip.split('.'))) + '.in-addr.arpa.'
# Query PTR record with timeout; respect the passed timeout per query
answers = dns.resolver.resolve(reverse_ip, 'PTR', lifetime=max(1, timeout))
if answers:
# For PTR records, the hostname is in the target field
hostname = str(answers[0].target).rstrip('.')
resolved_hosts[ip] = hostname
write_log('verbose', [f'[{pluginName}] Resolved {ip} -> {hostname}'])
except Exception as e:
write_log('verbose', [f'[{pluginName}] Error resolving {ip}: {e}'])
write_log('verbose', [f'[{pluginName}] Active resolution finished. Found {len(resolved_hosts)} hosts.'])
return resolved_hosts
zeroconf.close()
except Exception:
pass
return ""
# =============================================================================
# Main logic
# =============================================================================
def main():
write_log('verbose', [f'[{pluginName}] In script'])
mylog("verbose", [f"[{pluginName}] Script started"])
# Get timeout from settings, default to 20s, and subtract a buffer
try:
timeout_setting = int(get_setting_value('AVAHISCAN_RUN_TIMEOUT'))
except (ValueError, TypeError):
timeout_setting = 30 # Default to 30s as a safe value
timeout = get_setting_value("AVAHISCAN_RUN_TIMEOUT")
use_mock = "--mockdata" in sys.argv
# Use a timeout 5 seconds less than the plugin's configured timeout to allow for cleanup
scan_duration = max(5, timeout_setting - 5)
db = DB()
db.open()
plugin_objects = Plugin_Objects(RESULT_FILE)
device_handler = DeviceInstance(db)
# Retrieve devices based on REFRESH_FQDN setting to match original script's logic
if get_setting_value("REFRESH_FQDN"):
devices = device_handler.getAll()
write_log('verbose', [f'[{pluginName}] REFRESH_FQDN is true, getting all devices.'])
if use_mock:
mylog("verbose", [f"[{pluginName}] Running in MOCK mode"])
devices = [
{"devMac": "00:11:22:33:44:55", "devLastIP": "192.168.1.121"},
{"devMac": "00:11:22:33:44:56", "devLastIP": "192.168.1.9"},
{"devMac": "00:11:22:33:44:57", "devLastIP": "192.168.1.82"},
]
else:
devices = device_handler.getUnknown()
write_log('verbose', [f'[{pluginName}] REFRESH_FQDN is false, getting devices with unknown hostnames.'])
db = DB()
db.open()
device_handler = DeviceInstance(db)
devices = (
device_handler.getAll()
if get_setting_value("REFRESH_FQDN")
else device_handler.getUnknown()
)
# db.close() # This was causing the crash, DB object doesn't have a close method.
mylog("verbose", [f"[{pluginName}] Devices count: {len(devices)}"])
write_log('verbose', [f'[{pluginName}] Devices to scan: {len(devices)}'])
for device in devices:
ip = device["devLastIP"]
mac = device["devMac"]
if len(devices) > 0:
ips_to_find = [device['devLastIP'] for device in devices if device['devLastIP']]
if ips_to_find:
write_log('verbose', [f'[{pluginName}] IPs to be scanned: {ips_to_find}'])
resolved_hosts = resolve_ips_with_zeroconf(ips_to_find, scan_duration)
hostname = resolve_mdns_name(ip, timeout)
for device in devices:
domain_name = resolved_hosts.get(device['devLastIP'])
if domain_name:
plugin_objects.add_object(
primaryId = device['devMac'],
secondaryId = device['devLastIP'],
watched1 = '',
watched2 = domain_name,
watched3 = '',
watched4 = '',
extra = '',
foreignKey = device['devMac']
)
else:
write_log('verbose', [f'[{pluginName}] No devices with IP addresses to scan.'])
if hostname:
plugin_objects.add_object(
primaryId=mac,
secondaryId=ip,
watched1="",
watched2=hostname,
watched3="",
watched4="",
extra="",
foreignKey=mac,
)
plugin_objects.write_result_file()
write_log('verbose', [f'[{pluginName}] Script finished'])
mylog("verbose", [f"[{pluginName}] Script finished"])
return 0
if __name__ == '__main__':
# =============================================================================
# Entrypoint
# =============================================================================
if __name__ == "__main__":
main()

View File

@@ -2,6 +2,8 @@
Plugin for pinging existing devices via the [ping](https://linux.die.net/man/8/ping) network utility. The devices have to be accessible from the container. You can use this plugin with other suplementing plugins as described in the [subnets docs](https://github.com/jokob-sk/NetAlertX/blob/main/docs/SUBNETS.md).
This plugin can be used if you are getting false offline positives on specific devices. See the [Fix offline detection guide](https://github.com/jokob-sk/NetAlertX/blob/main/docs/FIX_OFFLINE_DETECTION.md) for details.
### Usage
- Check the Settings page for details.

View File

@@ -25,3 +25,11 @@ To assign a meaningful device name, the plugin resolves it in the following orde
- **Comment**: The `comment` field in the MikroTik router's DHCP lease configuration. This is useful for naming static leases of known devies.
- **Hostname**: The hostname provided by the device during DHCP negotiation.
- **"(unknown)"**: as the fallback name, allowing other plugins to resolve the device name later.
### Other info
- Version: 1.0
- Author: [lookflying](https://github.com/lookflying)
- Maintainer(s): [elraro](https://github.com/elraro), [kamil-olszewski-devskiller](https://github.com/kamil-olszewski-devskiller)
- Release Date: 12-Sep-2024

View File

@@ -178,7 +178,7 @@ def main():
if file_name != 'last_result.log':
mylog('verbose', [f'[{pluginName}] Processing: "{file_name}"'])
# make sure the file has teh correct name (e.g last_result.encoded.Node_1.1.log) to skip any otehr plugin files
# make sure the file has the correct name (e.g last_result.encoded.Node_1.1.log) to skip any otehr plugin files
if len(file_name.split('.')) > 2:
# Store e.g. Node_1 from last_result.encoded.Node_1.1.log
syncHubNodeName = file_name.split('.')[1]
@@ -210,9 +210,10 @@ def main():
existing_mac_addresses = set(row[0] for row in cursor.fetchall())
# insert devices into the lats_result.log to manage state
# insert devices into the last_result.log and thus CurrentScan table to manage state
for device in device_data:
if device['devPresentLastScan'] == 1:
# only insert devices taht were online and skip the root node to prevent IP flipping on the hub
if device['devPresentLastScan'] == 1 and str(device['devMac']).lower() != 'internet':
plugin_objects.add_object(
primaryId = device['devMac'],
secondaryId = device['devLastIP'],

View File

@@ -213,6 +213,33 @@
}
]
},
{
"function": "DEFAULT_PAGE_SIZE",
"type": {
"dataType": "integer",
"elements": [
{
"elementType": "input",
"elementOptions": [{ "type": "number" }],
"transformers": []
}
]
},
"maxLength": 50,
"default_value": 20,
"options": [],
"localized": [],
"name": [
{
"string": "Default page size"
}
],
"description": [
{
"string": "Default number of items shown in tables per page, for example in teh Devices lists."
}
]
},
{
"function": "DEV_SECTIONS",
"type": {

View File

@@ -112,7 +112,12 @@ def get_device_data(site, api):
mylog('verbose', [f'[{pluginName}] Site: {site_name} clients: {json.dumps(clients_resp, indent=2)}'])
# Build a lookup for devices by their 'id' to find parent MAC easily
device_id_to_mac = {dev['id']: dev.get('macAddress', '') for dev in unifi_devices}
device_id_to_mac = {}
for dev in unifi_devices:
if "id" not in dev:
mylog("verbose", [f"[{pluginName}] Skipping device without 'id': {json.dumps(dev)}"])
continue
device_id_to_mac[dev["id"]] = dev.get("macAddress", "")
# Helper to resolve uplinkDeviceId to parent MAC, or "Internet" if no uplink
def resolve_parent_mac(uplink_id):