Devcontainer operational, services all down

This commit is contained in:
Adam Outler
2025-09-25 23:03:55 +00:00
parent dfcc375fba
commit c228d45cea
7 changed files with 328 additions and 308 deletions

View File

@@ -1,112 +0,0 @@
# DO NOT MODIFY THIS FILE DIRECTLY. IT IS AUTO-GENERATED BY .devcontainer/scripts/generate-dockerfile.sh
# ---/Dockerfile---
FROM alpine:3.22 AS builder
ARG INSTALL_DIR=/app
ENV PYTHONUNBUFFERED=1
# Install build dependencies
RUN apk add --no-cache bash shadow python3 python3-dev gcc musl-dev libffi-dev openssl-dev git \
&& python -m venv /opt/venv
# Enable venv
ENV PATH="/opt/venv/bin:$PATH"
RUN pip install openwrt-luci-rpc asusrouter asyncio aiohttp graphene flask flask-cors unifi-sm-api tplink-omada-client wakeonlan pycryptodome requests paho-mqtt scapy cron-converter pytz json2table dhcp-leases pyunifi speedtest-cli chardet python-nmap dnspython librouteros yattag git+https://github.com/foreign-sub/aiofreepybox.git
# Append Iliadbox certificate to aiofreepybox
# second stage
FROM alpine:3.22 AS runner
ARG INSTALL_DIR=/app
COPY --from=builder /opt/venv /opt/venv
COPY --from=builder /usr/sbin/usermod /usr/sbin/groupmod /usr/sbin/
# Enable venv
ENV PATH="/opt/venv/bin:$PATH"
# default port and listen address
ENV PORT=20211 LISTEN_ADDR=0.0.0.0
# needed for s6-overlay
ENV S6_CMD_WAIT_FOR_SERVICES_MAXTIME=0
# ❗ IMPORTANT - if you modify this file modify the /install/install_dependecies.sh file as well ❗
RUN apk update --no-cache \
&& apk add --no-cache bash libbsd zip lsblk gettext-envsubst sudo mtr tzdata s6-overlay \
&& apk add --no-cache curl arp-scan iproute2 iproute2-ss nmap nmap-scripts traceroute nbtscan avahi avahi-tools openrc dbus net-tools net-snmp-tools bind-tools awake ca-certificates \
&& apk add --no-cache sqlite php83 php83-fpm php83-cgi php83-curl php83-sqlite3 php83-session \
&& apk add --no-cache python3 nginx \
&& ln -s /usr/bin/awake /usr/bin/wakeonlan \
&& rm -f /etc/nginx/http.d/default.conf
# Add crontab file
COPY --chmod=600 --chown=root:root install/crontab /etc/crontabs/root
# Start all required services
HEALTHCHECK --interval=30s --timeout=5s --start-period=15s --retries=2 \
CMD curl -sf -o /dev/null ${LISTEN_ADDR}:${PORT}/php/server/query_json.php?file=app_state.json
ENTRYPOINT ["/init"]
# ---/resources/devcontainer-Dockerfile---
# Devcontainer build stage (do not build directly)
# This file is combined with the root /Dockerfile by
# .devcontainer/scripts/generate-dockerfile.sh
# The generator appends this stage to produce .devcontainer/Dockerfile.
# Prefer to place dev-only setup here; use setup.sh only for runtime fixes.
FROM runner AS devcontainer
ENV INSTALL_DIR=/app
ENV PYTHONPATH=/workspaces/NetAlertX/test:/workspaces/NetAlertX/server:/app:/app/server:/opt/venv/lib/python3.12/site-packages
# Install common tools, create user, and set up sudo
RUN apk add --no-cache git nano vim jq php83-pecl-xdebug py3-pip nodejs sudo gpgconf pytest pytest-cov && \
adduser -D -s /bin/sh netalertx && \
addgroup netalertx nginx && \
addgroup netalertx www-data && \
echo "netalertx ALL=(ALL) NOPASSWD:ALL" > /etc/sudoers.d/90-netalertx && \
chmod 440 /etc/sudoers.d/90-netalertx
# Install debugpy in the virtualenv if present, otherwise into system python3
RUN /bin/sh -c '(/opt/venv/bin/python3 -m pip install --no-cache-dir debugpy) || (python3 -m pip install --no-cache-dir debugpy) || true'
# setup nginx
COPY .devcontainer/resources/netalertx-devcontainer.conf /etc/nginx/http.d/netalert-frontend.conf
RUN set -e; \
chown netalertx:nginx /etc/nginx/http.d/netalert-frontend.conf; \
install -d -o netalertx -g www-data -m 775 /app; \
install -d -o netalertx -g www-data -m 755 /run/nginx; \
install -d -o netalertx -g www-data -m 755 /var/lib/nginx/logs; \
rm -f /var/lib/nginx/logs/* || true; \
for f in error access; do : > /var/lib/nginx/logs/$f.log; done; \
install -d -o netalertx -g www-data -m 777 /run/php; \
install -d -o netalertx -g www-data -m 775 /var/log/php; \
chown -R netalertx:www-data /etc/nginx/http.d; \
chmod -R 775 /etc/nginx/http.d; \
chown -R netalertx:www-data /var/lib/nginx; \
chmod -R 755 /var/lib/nginx && \
chown -R netalertx:www-data /var/log/nginx/ && \
sed -i '/^user /d' /etc/nginx/nginx.conf; \
sed -i 's|^error_log .*|error_log /dev/stderr warn;|' /etc/nginx/nginx.conf; \
sed -i 's|^access_log .*|access_log /dev/stdout main;|' /etc/nginx/nginx.conf; \
sed -i 's|error_log .*|error_log /dev/stderr warn;|g' /etc/nginx/http.d/*.conf 2>/dev/null || true; \
sed -i 's|access_log .*|access_log /dev/stdout main;|g' /etc/nginx/http.d/*.conf 2>/dev/null || true; \
mkdir -p /run/openrc; \
chown netalertx:nginx /run/openrc/; \
rm -Rf /run/openrc/*;
# setup pytest
RUN sudo /opt/venv/bin/python -m pip install -U pytest pytest-cov
WORKDIR /workspaces/NetAlertX
ENTRYPOINT ["/bin/sh","-c","sleep infinity"]

View File

@@ -2,9 +2,9 @@
"name": "NetAlertX DevContainer",
"remoteUser": "netalertx",
"build": {
"dockerfile": "Dockerfile",
"dockerfile": "../Dockerfile",
"context": "..",
"target": "devcontainer"
"target": "runner"
},
"workspaceFolder": "/workspaces/NetAlertX",
"runArgs": [
@@ -20,7 +20,7 @@
"postStartCommand": "${containerWorkspaceFolder}/.devcontainer/scripts/setup.sh",
"postStartCommand": "sudo ${containerWorkspaceFolder}/.devcontainer/scripts/setup.sh",
"customizations": {
"vscode": {

View File

@@ -29,7 +29,7 @@ export TZ=Europe/Paris
export PORT=20211
export SOURCE_DIR="/workspaces/NetAlertX"
apk add git
main() {
echo "=== NetAlertX Development Container Setup ==="
@@ -91,9 +91,7 @@ configure_source() {
echo " -> Copying static files to ${INSTALL_DIR}"
cp -R ${SOURCE_DIR}/CODE_OF_CONDUCT.md ${INSTALL_DIR}/
cp -R ${SOURCE_DIR}/dockerfiles ${INSTALL_DIR}/dockerfiles
sudo cp -na "${INSTALL_DIR}/back/${CONF_FILE}" "${INSTALL_DIR}/config/${CONF_FILE}"
sudo cp -na "${INSTALL_DIR}/back/${DB_FILE}" "${FULL_FILEDB_PATH}"
cp -R ${SOURCE_DIR}/install/ /
if [ -e "${INSTALL_DIR}/api/user_notifications.json" ]; then
echo " -> Removing existing user_notifications.json"
sudo rm "${INSTALL_DIR}"/api/user_notifications.json

2
.vscode/tasks.json vendored
View File

@@ -27,7 +27,7 @@
{
"label": "Re-Run Startup Script",
"type": "shell",
"command": "${workspaceFolder:NetAlertX}/.devcontainer/scripts/setup.sh",
"command": "sudo ${workspaceFolder:NetAlertX}/.devcontainer/scripts/setup.sh",
"presentation": {
"echo": true,
"reveal": "always",

View File

@@ -19,7 +19,7 @@ COPY db ${INSTALL_DIR}/db
COPY front ${INSTALL_DIR}/front
COPY server ${INSTALL_DIR}/server
RUN pip install openwrt-luci-rpc asusrouter asyncio aiohttp graphene flask flask-cors unifi-sm-api tplink-omada-client wakeonlan pycryptodome requests paho-mqtt scapy cron-converter pytz json2table dhcp-leases pyunifi speedtest-cli chardet python-nmap dnspython librouteros yattag git+https://github.com/foreign-sub/aiofreepybox.git
RUN pip install openwrt-luci-rpc asusrouter asyncio aiohttp graphene flask flask-cors unifi-sm-api tplink-omada-client wakeonlan pycryptodome requests paho-mqtt scapy cron-converter pytz json2table dhcp-leases pyunifi speedtest-cli chardet python-nmap dnspython librouteros yattag zeroconf git+https://github.com/foreign-sub/aiofreepybox.git
RUN bash -c "find ${INSTALL_DIR} -type d -exec chmod 750 {} \;" \
&& bash -c "find ${INSTALL_DIR} -type f -exec chmod 640 {} \;" \
@@ -46,7 +46,7 @@ ENV PATH="/opt/venv/bin:$PATH"
ENV PORT=20211 LISTEN_ADDR=0.0.0.0 GRAPHQL_PORT=20212
# NetAlertX app directories
ENV NETALERTX_APP=${INSTALL_DIR}
ENV NETALERTX_APP=/app
ENV NETALERTX_CONFIG=${NETALERTX_APP}/config
ENV NETALERTX_FRONT=${NETALERTX_APP}/front
ENV NETALERTX_SERVER=${NETALERTX_APP}/server
@@ -56,6 +56,7 @@ ENV NETALERTX_BACK=${NETALERTX_APP}/back
ENV NETALERTX_LOG=${NETALERTX_APP}/log
ENV NETALERTX_PLUGINS_LOG=${NETALERTX_LOG}/plugins
ENV NETALERTX_NGINIX_CONFIG=${NETALERTX_APP}/services/nginx
ENV NETALERTX_SERVICES=${NETALERTX_APP}/services
# NetAlertX log files
ENV LOG_IP_CHANGES=${NETALERTX_LOG}/IP_changes.log
@@ -77,16 +78,17 @@ ENV NETALERTX_CONFIG_FILE=${NETALERTX_CONFIG}/app.conf
ENV NETALERTX_DB_FILE=${NETALERTX_DB}/app.db
ENV PHP_FPM_CONFIG_FILE=/etc/php83/php-fpm.conf
ENV PHP_WWW_CONF_FILE=/etc/php83/php-fpm.d/www.conf
ENV SYSTEM_SERVICES=/services
RUN apk update --no-cache \
&& apk add --no-cache bash libbsd zip lsblk gettext-envsubst sudo mtr tzdata \
&& apk add --no-cache curl arp-scan iproute2 iproute2-ss nmap nmap-scripts traceroute nbtscan avahi avahi-tools openrc dbus net-tools net-snmp-tools bind-tools awake ca-certificates \
&& apk add --no-cache curl arp-scan iproute2 iproute2-ss nmap nmap-scripts traceroute nbtscan openrc dbus net-tools net-snmp-tools bind-tools awake ca-certificates \
&& apk add --no-cache sqlite php83 php83-fpm php83-cgi php83-curl php83-sqlite3 php83-session \
&& apk add --no-cache python3 nginx
COPY --from=builder --chown=readonly:readonly ${INSTALL_DIR}/ ${INSTALL_DIR}/
COPY --from=builder --chown=netalertx:netalertx ${INSTALL_DIR}/ ${INSTALL_DIR}/
# set this properly to handle recursive ownership changes
RUN ln -s /usr/bin/awake /usr/bin/wakeonlan \
&& rm -f /etc/nginx/http.d/default.conf
@@ -109,11 +111,10 @@ RUN touch ${LOG_APP} \
&& touch ${LOG_REPORT_OUTPUT_TXT} \
&& touch ${LOG_REPORT_OUTPUT_HTML} \
&& touch ${LOG_REPORT_OUTPUT_JSON} \
&& touch ${NETALERTX_API}/user_notifications.json \
&& chown -R netalertx:netalertx ${NETALERTX_LOG} ${NETALERTX_API}
&& touch ${NETALERTX_API}/user_notifications.json
# Setup services
RUN mkdir -p /services
RUN mkdir -p ${SYSTEM_SERVICES}
@@ -128,27 +129,36 @@ RUN chmod +x /build/*.sh \
# Create buildtimestamp.txt
RUN chmod +x /services/*.sh /entrypoint.sh
RUN chmod +x ${SYSTEM_SERVICES}/*.sh /entrypoint.sh
# Setup config and db files
RUN cp ${NETALERTX_BACK}/app.conf ${NETALERTX_CONFIG_FILE} && \
cp ${NETALERTX_BACK}/app.db ${NETALERTX_DB_FILE}
# set netalertx to allow sudoers for any command, no password
RUN echo "netalertx ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers
RUN date +%s > ${INSTALL_DIR}/front/buildtimestamp.txt
# Ensure proper permissions
# Skip certain system directories to avoid permission issues
# Also skip log directories to avoid changing log file ownerships
RUN find / -path /proc -prune -o -path /sys -prune -o -path /dev -prune -o -path /run -prune -o -path /var/log -prune -o -path /tmp -prune -o -group 0 -o -user 0 -exec chown readonly:readonly {} +
RUN chmod 555 /app
RUN chown -R readonly:readonly ${NETALERTX_BACK} ${NETALERTX_FRONT} ${NETALERTX_SERVER} ${NETALERTX_APP}/services
RUN chmod -R 004 ${NETALERTX_BACK} ${NETALERTX_FRONT} ${NETALERTX_SERVER} ${NETALERTX_APP}/services
RUN chown -R netalertx:netalertx ${INSTALL_DIR}/config ${INSTALL_DIR}/db ${INSTALL_DIR}/log ${INSTALL_DIR}/api
RUN find ${NETALERTX_APP} -type d -exec chmod 555 {} \;
RUN cp ${NETALERTX_BACK}/app.conf ${NETALERTX_CONFIG}/app.conf && \
cp ${NETALERTX_BACK}/app.db ${NETALERTX_DB}/app.db && \
chmod 600 ${NETALERTX_CONFIG}/app.conf && \
chmod 600 ${NETALERTX_DB}/app.db
RUN chmod -R 700 ${NETALERTX_CONFIG} ${NETALERTX_DB} ${NETALERTX_LOG} ${NETALERTX_API}
RUN find ${NETALERTX_CONFIG} ${NETALERTX_DB} ${NETALERTX_LOG} ${NETALERTX_API} -type f -exec chmod 600 {} \;
RUN chmod -R 555 /services
FROM runner AS hardened
# remove netalertx from sudoers
RUN sed -i '/netalertx ALL=(ALL) NOPASSWD: ALL/d
RUN chown -R readonly:readonly ${NETALERTX_BACK} ${NETALERTX_FRONT} ${NETALERTX_SERVER} ${SYSTEM_SERVICES}
RUN chmod -R 004 ${NETALERTX_BACK} ${NETALERTX_FRONT} ${NETALERTX_SERVER}
RUN chmod 005 ${NETALERTX_BACK} ${NETALERTX_FRONT} ${NETALERTX_SERVER}
RUN chmod -R 005 ${SYSTEM_SERVICES}
RUN chown -R netalertx:netalertx ${NETALERTX_CONFIG} ${NETALERTX_DB} ${NETALERTX_DB} ${NETALERTX_API} ${NETALERTX_LOG} ${NETALERTX_CONFIG_FILE} ${NETALERTX_DB_FILE} && \
chmod -R 600 ${NETALERTX_CONFIG} ${NETALERTX_DB} ${NETALERTX_LOG} ${NETALERTX_API} && \
chmod 700 ${NETALERTX_CONFIG} ${NETALERTX_DB} ${NETALERTX_LOG} ${NETALERTX_API}
RUN chown readonly:readonly /
RUN rm /usr/bin/sudo
RUN touch /var/log/nginx/access.log /var/log/nginx/error.log
@@ -156,18 +166,14 @@ RUN chown -R netalertx:netalertx /var/log/nginx /run/
RUN chown -R netalertx:netalertx /var/lib/nginx
RUN echo -ne '#!/bin/bash\nexit 0\n' > /usr/bin/sudo && chmod +x /usr/bin/sudo
RUN find / -path /proc -prune -o -path /sys -prune -o -path /dev -prune -o -path /run -prune -o -path /var/log -prune -o -path /tmp -prune -o -group 0 -o -user 0 -exec chown readonly:readonly {} +
USER netalertx
HEALTHCHECK --interval=30s --timeout=10s --start-period=60s --retries=3 \
CMD /usr/local/bin/healthcheck.sh
CMD /usr/local/bin/healthcheck.sh
CMD /entrypoint.sh
# Assistant, I commented this out while bringing up permissions. this way I can login by specifying the command.
# ok? got it? We're using CMD now instead of ENTRYPOINT so we can override it if needed. Stop specifying the entrypoint.
#
# ENTRYPOINT ["/entrypoint.sh"]
ENTRYPOINT ["/entrypoint.sh"]

View File

@@ -0,0 +1,215 @@
#!/usr/bin/env python
import os
import pathlib
import sys
import json
import sqlite3
import subprocess
# Define the installation path and extend the system path for plugin imports
INSTALL_PATH = "/app"
sys.path.extend([f"{INSTALL_PATH}/front/plugins", f"{INSTALL_PATH}/server"])
from plugin_helper import Plugin_Object, Plugin_Objects, decodeBase64
from plugin_utils import get_plugins_configs
from logger import mylog, Logger
from const import pluginsPath, fullDbPath, logPath
from helper import timeNowTZ, get_setting_value
from messaging.in_app import write_notification
from database import DB
from models.device_instance import DeviceInstance
import conf
from pytz import timezone
# Make sure the TIMEZONE for logging is correct
conf.tz = timezone(get_setting_value('TIMEZONE'))
# Make sure log level is initialized correctly
Logger(get_setting_value('LOG_LEVEL'))
pluginName = 'AVAHISCAN'
# Define the current path and log file paths
LOG_PATH = logPath + '/plugins'
LOG_FILE = os.path.join(LOG_PATH, f'script.{pluginName}.log')
RESULT_FILE = os.path.join(LOG_PATH, f'last_result.{pluginName}.log')
# Initialize the Plugin obj output file
plugin_objects = Plugin_Objects(RESULT_FILE)
def main():
mylog('verbose', [f'[{pluginName}] In script'])
# timeout = get_setting_value('AVAHI_RUN_TIMEOUT')
timeout = 20
# Create a database connection
db = DB() # instance of class DB
db.open()
# Initialize the Plugin obj output file
plugin_objects = Plugin_Objects(RESULT_FILE)
# Create a DeviceInstance instance
device_handler = DeviceInstance(db)
# Retrieve devices
if get_setting_value("REFRESH_FQDN"):
devices = device_handler.getAll()
else:
devices = device_handler.getUnknown()
mylog('verbose', [f'[{pluginName}] Devices count: {len(devices)}'])
# Mock list of devices (replace with actual device_handler.getUnknown() in production)
# devices = [
# {'devMac': '00:11:22:33:44:55', 'devLastIP': '192.168.1.121'},
# {'devMac': '00:11:22:33:44:56', 'devLastIP': '192.168.1.9'},
# {'devMac': '00:11:22:33:44:57', 'devLastIP': '192.168.1.82'},
# ]
if len(devices) > 0:
# ensure service is running
ensure_avahi_running()
for device in devices:
domain_name = execute_name_lookup(device['devLastIP'], timeout)
# check if found and not a timeout ('to')
if domain_name != '' and domain_name != 'to':
plugin_objects.add_object(
# "MAC", "IP", "Server", "Name"
primaryId = device['devMac'],
secondaryId = device['devLastIP'],
watched1 = '', # You can add any relevant info here if needed
watched2 = domain_name,
watched3 = '',
watched4 = '',
extra = '',
foreignKey = device['devMac'])
plugin_objects.write_result_file()
mylog('verbose', [f'[{pluginName}] Script finished'])
return 0
#===============================================================================
# Execute scan
#===============================================================================
def execute_name_lookup(ip, timeout):
"""
Execute the avahi-resolve command on the IP.
"""
args = ['avahi-resolve', '-a', ip]
# Execute command
output = ""
try:
mylog('debug', [f'[{pluginName}] DEBUG CMD :', args])
# Run the subprocess with a forced timeout
output = subprocess.check_output(args, universal_newlines=True, stderr=subprocess.STDOUT, timeout=timeout)
mylog('debug', [f'[{pluginName}] DEBUG OUTPUT : {output}'])
domain_name = ''
# Split the output into lines
lines = output.splitlines()
# Look for the resolved IP address
for line in lines:
if ip in line:
parts = line.split()
if len(parts) > 1:
domain_name = parts[1] # Second part is the resolved domain name
else:
mylog('verbose', [f'[{pluginName}] ⚠ ERROR - Unexpected output format: {line}'])
mylog('debug', [f'[{pluginName}] Domain Name: {domain_name}'])
return domain_name
except subprocess.CalledProcessError as e:
mylog('none', [f'[{pluginName}] ⚠ ERROR - {e.output}'])
except subprocess.TimeoutExpired:
mylog('none', [f'[{pluginName}] TIMEOUT - the process forcefully terminated as timeout reached'])
if output == "":
mylog('none', [f'[{pluginName}] Scan: FAIL - check logs'])
else:
mylog('debug', [f'[{pluginName}] Scan: SUCCESS'])
return ''
# Function to ensure Avahi and its dependencies are running
def ensure_avahi_running(attempt=1, max_retries=2):
"""
Ensure that D-Bus is running and the Avahi daemon is started, with recursive retry logic.
"""
mylog('debug', [f'[{pluginName}] Attempt {attempt} - Ensuring D-Bus and Avahi daemon are running...'])
# Check rc-status
try:
subprocess.run(['rc-status'], check=True)
except subprocess.CalledProcessError as e:
mylog('none', [f'[{pluginName}] ⚠ ERROR - Failed to check rc-status: {e.output}'])
return
# Create OpenRC soft level
subprocess.run(['touch', '/run/openrc/softlevel'], check=True)
# Add Avahi daemon to runlevel
try:
subprocess.run(['rc-update', 'add', 'avahi-daemon'], check=True)
except subprocess.CalledProcessError as e:
mylog('none', [f'[{pluginName}] ⚠ ERROR - Failed to add Avahi to runlevel: {e.output}'])
return
# Start the D-Bus service
try:
subprocess.run(['rc-service', 'dbus', 'start'], check=True)
except subprocess.CalledProcessError as e:
mylog('none', [f'[{pluginName}] ⚠ ERROR - Failed to start D-Bus: {e.output}'])
return
# Check Avahi status
status_output = subprocess.run(['rc-service', 'avahi-daemon', 'status'], capture_output=True, text=True)
if 'started' in status_output.stdout:
mylog('debug', [f'[{pluginName}] Avahi Daemon is already running.'])
return
mylog('none', [f'[{pluginName}] Avahi Daemon is not running, attempting to start... (Attempt {attempt})'])
# Start the Avahi daemon
try:
subprocess.run(['rc-service', 'avahi-daemon', 'start'], check=True)
except subprocess.CalledProcessError as e:
mylog('none', [f'[{pluginName}] ⚠ ERROR - Failed to start Avahi daemon: {e.output}'])
# Check status after starting
status_output = subprocess.run(['rc-service', 'avahi-daemon', 'status'], capture_output=True, text=True)
if 'started' in status_output.stdout:
mylog('debug', [f'[{pluginName}] Avahi Daemon successfully started.'])
return
# Retry if not started and attempts are left
if attempt < max_retries:
mylog('debug', [f'[{pluginName}] Retrying... ({attempt + 1}/{max_retries})'])
ensure_avahi_running(attempt + 1, max_retries)
else:
mylog('none', [f'[{pluginName}] ⚠ ERROR - Avahi Daemon failed to start after {max_retries} attempts.'])
# rc-update add avahi-daemon
# rc-service avahi-daemon status
# rc-service avahi-daemon start
if __name__ == '__main__':
main()

View File

@@ -4,8 +4,8 @@ import os
import pathlib
import sys
import json
import sqlite3
import subprocess
import time
import dns.resolver
# Define the installation path and extend the system path for plugin imports
INSTALL_PATH = "/app"
@@ -13,7 +13,7 @@ sys.path.extend([f"{INSTALL_PATH}/front/plugins", f"{INSTALL_PATH}/server"])
from plugin_helper import Plugin_Object, Plugin_Objects, decodeBase64
from plugin_utils import get_plugins_configs
from logger import mylog, Logger
from logger import mylog as write_log, Logger
from const import pluginsPath, fullDbPath, logPath
from helper import timeNowTZ, get_setting_value
from messaging.in_app import write_notification
@@ -38,178 +38,91 @@ RESULT_FILE = os.path.join(LOG_PATH, f'last_result.{pluginName}.log')
# Initialize the Plugin obj output file
plugin_objects = Plugin_Objects(RESULT_FILE)
#===============================================================================
# Execute scan using DNS resolver
#===============================================================================
def resolve_ips_with_zeroconf(ips, timeout):
"""
Uses DNS resolver to actively query PTR records for reverse DNS lookups on given IP addresses.
"""
resolved_hosts = {}
for ip in ips:
try:
# Construct the reverse IP for PTR query (e.g., 8.1.168.192.in-addr.arpa.)
reverse_ip = '.'.join(reversed(ip.split('.'))) + '.in-addr.arpa.'
# Query PTR record with timeout
answers = dns.resolver.resolve(reverse_ip, 'PTR', lifetime=max(1, min(timeout, 5)))
if answers:
# For PTR records, the hostname is in the target field
hostname = str(answers[0].target).rstrip('.')
resolved_hosts[ip] = hostname
write_log('verbose', [f'[{pluginName}] Resolved {ip} -> {hostname}'])
except Exception as e:
write_log('verbose', [f'[{pluginName}] Error resolving {ip}: {e}'])
write_log('verbose', [f'[{pluginName}] Active resolution finished. Found {len(resolved_hosts)} hosts.'])
return resolved_hosts
def main():
mylog('verbose', [f'[{pluginName}] In script'])
write_log('verbose', [f'[{pluginName}] In script'])
# timeout = get_setting_value('AVAHI_RUN_TIMEOUT')
timeout = 20
# Get timeout from settings, default to 20s, and subtract a buffer
try:
timeout_setting = int(get_setting_value('AVAHISCAN_RUN_TIMEOUT'))
except (ValueError, TypeError):
timeout_setting = 30 # Default to 30s as a safe value
# Create a database connection
db = DB() # instance of class DB
# Use a timeout 5 seconds less than the plugin's configured timeout to allow for cleanup
scan_duration = max(5, timeout_setting - 5)
db = DB()
db.open()
# Initialize the Plugin obj output file
plugin_objects = Plugin_Objects(RESULT_FILE)
# Create a DeviceInstance instance
device_handler = DeviceInstance(db)
# Retrieve devices
# Retrieve devices based on REFRESH_FQDN setting to match original script's logic
if get_setting_value("REFRESH_FQDN"):
devices = device_handler.getAll()
write_log('verbose', [f'[{pluginName}] REFRESH_FQDN is true, getting all devices.'])
else:
devices = device_handler.getUnknown()
write_log('verbose', [f'[{pluginName}] REFRESH_FQDN is false, getting devices with unknown hostnames.'])
mylog('verbose', [f'[{pluginName}] Devices count: {len(devices)}'])
# db.close() # This was causing the crash, DB object doesn't have a close method.
# Mock list of devices (replace with actual device_handler.getUnknown() in production)
# devices = [
# {'devMac': '00:11:22:33:44:55', 'devLastIP': '192.168.1.121'},
# {'devMac': '00:11:22:33:44:56', 'devLastIP': '192.168.1.9'},
# {'devMac': '00:11:22:33:44:57', 'devLastIP': '192.168.1.82'},
# ]
write_log('verbose', [f'[{pluginName}] Devices to scan: {len(devices)}'])
if len(devices) > 0:
# ensure service is running
ensure_avahi_running()
ips_to_find = [device['devLastIP'] for device in devices if device['devLastIP']]
if ips_to_find:
write_log('verbose', [f'[{pluginName}] IPs to be scanned: {ips_to_find}'])
resolved_hosts = resolve_ips_with_zeroconf(ips_to_find, scan_duration)
for device in devices:
domain_name = execute_name_lookup(device['devLastIP'], timeout)
# check if found and not a timeout ('to')
if domain_name != '' and domain_name != 'to':
domain_name = resolved_hosts.get(device['devLastIP'])
if domain_name:
plugin_objects.add_object(
# "MAC", "IP", "Server", "Name"
primaryId = device['devMac'],
secondaryId = device['devLastIP'],
watched1 = '', # You can add any relevant info here if needed
watched1 = '',
watched2 = domain_name,
watched3 = '',
watched4 = '',
extra = '',
foreignKey = device['devMac'])
foreignKey = device['devMac']
)
else:
write_log('verbose', [f'[{pluginName}] No devices with IP addresses to scan.'])
plugin_objects.write_result_file()
mylog('verbose', [f'[{pluginName}] Script finished'])
write_log('verbose', [f'[{pluginName}] Script finished'])
return 0
#===============================================================================
# Execute scan
#===============================================================================
def execute_name_lookup(ip, timeout):
"""
Execute the avahi-resolve command on the IP.
"""
args = ['avahi-resolve', '-a', ip]
# Execute command
output = ""
try:
mylog('debug', [f'[{pluginName}] DEBUG CMD :', args])
# Run the subprocess with a forced timeout
output = subprocess.check_output(args, universal_newlines=True, stderr=subprocess.STDOUT, timeout=timeout)
mylog('debug', [f'[{pluginName}] DEBUG OUTPUT : {output}'])
domain_name = ''
# Split the output into lines
lines = output.splitlines()
# Look for the resolved IP address
for line in lines:
if ip in line:
parts = line.split()
if len(parts) > 1:
domain_name = parts[1] # Second part is the resolved domain name
else:
mylog('verbose', [f'[{pluginName}] ⚠ ERROR - Unexpected output format: {line}'])
mylog('debug', [f'[{pluginName}] Domain Name: {domain_name}'])
return domain_name
except subprocess.CalledProcessError as e:
mylog('none', [f'[{pluginName}] ⚠ ERROR - {e.output}'])
except subprocess.TimeoutExpired:
mylog('none', [f'[{pluginName}] TIMEOUT - the process forcefully terminated as timeout reached'])
if output == "":
mylog('none', [f'[{pluginName}] Scan: FAIL - check logs'])
else:
mylog('debug', [f'[{pluginName}] Scan: SUCCESS'])
return ''
# Function to ensure Avahi and its dependencies are running
def ensure_avahi_running(attempt=1, max_retries=2):
"""
Ensure that D-Bus is running and the Avahi daemon is started, with recursive retry logic.
"""
mylog('debug', [f'[{pluginName}] Attempt {attempt} - Ensuring D-Bus and Avahi daemon are running...'])
# Check rc-status
try:
subprocess.run(['rc-status'], check=True)
except subprocess.CalledProcessError as e:
mylog('none', [f'[{pluginName}] ⚠ ERROR - Failed to check rc-status: {e.output}'])
return
# Create OpenRC soft level
subprocess.run(['touch', '/run/openrc/softlevel'], check=True)
# Add Avahi daemon to runlevel
try:
subprocess.run(['rc-update', 'add', 'avahi-daemon'], check=True)
except subprocess.CalledProcessError as e:
mylog('none', [f'[{pluginName}] ⚠ ERROR - Failed to add Avahi to runlevel: {e.output}'])
return
# Start the D-Bus service
try:
subprocess.run(['rc-service', 'dbus', 'start'], check=True)
except subprocess.CalledProcessError as e:
mylog('none', [f'[{pluginName}] ⚠ ERROR - Failed to start D-Bus: {e.output}'])
return
# Check Avahi status
status_output = subprocess.run(['rc-service', 'avahi-daemon', 'status'], capture_output=True, text=True)
if 'started' in status_output.stdout:
mylog('debug', [f'[{pluginName}] Avahi Daemon is already running.'])
return
mylog('none', [f'[{pluginName}] Avahi Daemon is not running, attempting to start... (Attempt {attempt})'])
# Start the Avahi daemon
try:
subprocess.run(['rc-service', 'avahi-daemon', 'start'], check=True)
except subprocess.CalledProcessError as e:
mylog('none', [f'[{pluginName}] ⚠ ERROR - Failed to start Avahi daemon: {e.output}'])
# Check status after starting
status_output = subprocess.run(['rc-service', 'avahi-daemon', 'status'], capture_output=True, text=True)
if 'started' in status_output.stdout:
mylog('debug', [f'[{pluginName}] Avahi Daemon successfully started.'])
return
# Retry if not started and attempts are left
if attempt < max_retries:
mylog('debug', [f'[{pluginName}] Retrying... ({attempt + 1}/{max_retries})'])
ensure_avahi_running(attempt + 1, max_retries)
else:
mylog('none', [f'[{pluginName}] ⚠ ERROR - Avahi Daemon failed to start after {max_retries} attempts.'])
# rc-update add avahi-daemon
# rc-service avahi-daemon status
# rc-service avahi-daemon start
if __name__ == '__main__':
main()