Plugins 0.1 - Website monitoring cleanup

This commit is contained in:
Jokob-sk
2023-02-05 15:24:46 +11:00
parent 1cb5375a92
commit da9ca8a1f4
7 changed files with 79 additions and 216 deletions

1
.gitignore vendored
View File

@@ -3,5 +3,6 @@
config/pialert.conf config/pialert.conf
db/* db/*
front/log/* front/log/*
front/plugins/**/*.log
**/%40eaDir/ **/%40eaDir/
**/@eaDir/ **/@eaDir/

View File

@@ -31,6 +31,7 @@ services:
- ${DEV_LOCATION}/front/network.php:/home/pi/pialert/front/network.php - ${DEV_LOCATION}/front/network.php:/home/pi/pialert/front/network.php
- ${DEV_LOCATION}/front/presence.php:/home/pi/pialert/front/presence.php - ${DEV_LOCATION}/front/presence.php:/home/pi/pialert/front/presence.php
- ${DEV_LOCATION}/front/settings.php:/home/pi/pialert/front/settings.php - ${DEV_LOCATION}/front/settings.php:/home/pi/pialert/front/settings.php
- ${DEV_LOCATION}/front/plugins:/home/pi/pialert/front/plugins
# DELETE END anyone trying to use this file: comment out / delete ABOVE lines, they are only for development purposes # DELETE END anyone trying to use this file: comment out / delete ABOVE lines, they are only for development purposes
environment: environment:
- TZ=${TZ} - TZ=${TZ}

View File

@@ -10,7 +10,7 @@ If you wish to develop a plugin, please check the existing plugin structure.
|----------------------|----------------------|----------------------| |----------------------|----------------------|----------------------|
| `config.json` | yes | Contains the plugin configuration including the settings available to the user. | | `config.json` | yes | Contains the plugin configuration including the settings available to the user. |
| `script.py` | yes | The Python script itself | | `script.py` | yes | The Python script itself |
| `last_result.log` | yes | The file used to interface between PiAlert and the plugin (script). Should contain a set of testing data. | | `last_result.log` | yes | The file used to interface between PiAlert and the plugin (script). |
| `script.log` | no | Logging output (recommended) | | `script.log` | no | Logging output (recommended) |
| `README.md` | no | Amy setup considerations or overview | | `README.md` | no | Amy setup considerations or overview |
@@ -30,7 +30,7 @@ Used to interface between PiAlert and the plugin (script). After every scan it s
| Order | Represented Column | Required | Description | | Order | Represented Column | Required | Description |
|----------------------|----------------------|----------------------|----------------------| |----------------------|----------------------|----------------------|----------------------|
| 0 | `Object_PrimaryID` | yes | The primary ID used to group Events under. Should be UNIQUE in the context of the last result (so in `last_result.log`) | | 0 | `Object_PrimaryID` | yes | The primary ID used to group Events under. |
| 1 | `Object_SecondaryID` | no | Optionalsecondary ID to create a relationship beween other entities, such as a MAC address | | 1 | `Object_SecondaryID` | no | Optionalsecondary ID to create a relationship beween other entities, such as a MAC address |
| 2 | `DateTime` | yes | When the event occured in the format `2023-01-02 15:56:30` | | 2 | `DateTime` | yes | When the event occured in the format `2023-01-02 15:56:30` |
| 3 | `Watched_Value1` | yes | A value that is watched and users can receive notifications if it changed compared to the previously saved entry. For example IP address | | 3 | `Watched_Value1` | yes | A value that is watched and users can receive notifications if it changed compared to the previously saved entry. For example IP address |

View File

@@ -38,10 +38,24 @@
"en_us" : "Enable a regular scan of your services. If you select <code>schedule</code> the scheduling settings from below are applied. If you select <code>once</code> the scan is run only once on start of the application (container) for the time specified in <a href=\"#WEBMON_TIMEOUT\"><code>WEBMON_TIMEOUT</code> setting</a>." "en_us" : "Enable a regular scan of your services. If you select <code>schedule</code> the scheduling settings from below are applied. If you select <code>once</code> the scan is run only once on start of the application (container) for the time specified in <a href=\"#WEBMON_TIMEOUT\"><code>WEBMON_TIMEOUT</code> setting</a>."
} }
},
{
"type": "FORCE_REPORT",
"default_value": false,
"options": [],
"name" : {
"en_us" : "Schedule"
},
"description":
{
"en_us" : "Enable a regular scan of your services. If you select <code>schedule</code> the scheduling settings from below are applied. If you select <code>once</code> the scan is run only once on start of the application (container) for the time specified in <a href=\"#WEBMON_TIMEOUT\"><code>WEBMON_TIMEOUT</code> setting</a>."
}
}, },
{ {
"type": "RUN_SCHD", "type": "RUN_SCHD",
"default_value":"0 2 * * *", "default_value":"0 2 * * *",
"options": [],
"name" : { "name" : {
"en_us" : "Schedule" "en_us" : "Schedule"
}, },
@@ -54,6 +68,7 @@
{ {
"type": "API_SQL", "type": "API_SQL",
"default_value":"SELECT * FROM plugin_website_monitor", "default_value":"SELECT * FROM plugin_website_monitor",
"options": [],
"name" : { "name" : {
"en_us" : "API endpoint" "en_us" : "API endpoint"
}, },
@@ -66,6 +81,7 @@
{ {
"type": "TIMEOUT", "type": "TIMEOUT",
"default_value":5, "default_value":5,
"options": [],
"name" : { "name" : {
"en_us" : "Run timeout" "en_us" : "Run timeout"
}, },
@@ -90,6 +106,7 @@
{ {
"type": "ARGS", "type": "ARGS",
"default_value":"", "default_value":"",
"options": [],
"name" : { "name" : {
"en_us" : "Run timeout" "en_us" : "Run timeout"
}, },

View File

@@ -1,2 +1,2 @@
https://www.google.com|null|2023-01-02 15:56:30|404|0.7898|null|null|null http://google.com|null|2023-02-05 15:23:04|200|0.407871|null|null|null
https://www.duckduckgo.com|192.168.0.1|2023-01-02 15:56:30|200|0.9898|null|null|Best search engine http://bing.com|null|2023-02-05 15:23:04|200|0.196052|null|null|null

View File

@@ -0,0 +1,13 @@
Pi.Alert [Prototype]:
---------------------------------------------------------
Current User: root
Monitor Web-Services
Timestamp: 2023-02-05 15:23:03
Start Services Monitoring
| Timestamp | URL | StatusCode | ResponseTime |
-----------------------------------------------
2023-02-05 15:23:04 | http://google.com | 200 | 0.407871
2023-02-05 15:23:04 | http://bing.com | 200 | 0.196052

View File

@@ -1,174 +1,71 @@
#!/usr/bin/env python #!/usr/bin/env python
# Based on the work of https://github.com/leiweibau/Pi.Alert
# /home/pi/pialert/front/plugins/website_monitoring/script.py urls=http://google.com,http://bing.com
from __future__ import unicode_literals from __future__ import unicode_literals
from time import sleep, time, strftime from time import sleep, time, strftime
import requests import requests
import pathlib
import argparse
import io import io
#import smtplib #import smtplib
import sys import sys
#from smtp_config import sender, password, receivers, host, port #from smtp_config import sender, password, receivers, host, port
from requests.packages.urllib3.exceptions import InsecureRequestWarning from requests.packages.urllib3.exceptions import InsecureRequestWarning
import sqlite3
import pwd import pwd
import os import os
con = sqlite3.connect("monitoring.db") curPath = str(pathlib.Path(__file__).parent.resolve())
cur = con.cursor() log_file = curPath + '/script.log'
last_run = curPath + '/last_result.log'
#DELAY = 60 # Delay between site queries print(last_run)
EMAIL_INTERVAL = 1800 # Delay between alert emails
last_email_time = {} # Monitored sites and timestamp of last alert sent
# Message template for alert
MESSAGE = """From: {sender}
To: {receivers}
Subject: Monitor Service Notification
You are being notified that {site} is experiencing a {status} status!
"""
# Workflow # Workflow
def main(): def main():
global cur parser = argparse.ArgumentParser(description='Simple URL monitoring tool')
global con parser.add_argument('urls', action="store", help="urls to check separated by ','")
values = parser.parse_args()
prepare_service_monitoring_env() if values.urls:
service_monitoring() with open(last_run, 'w') as last_run_logfile:
print_service_monitoring_changes() # empty file
# prepare_service_monitoring_notification() last_run_logfile.write("")
service_monitoring(values.urls.split('=')[1].split(','))
# -----------------------------------------------------------------------------
def prepare_service_monitoring_env ():
global con
global cur
sql_create_table = """ CREATE TABLE IF NOT EXISTS Services_Events(
moneve_URL TEXT NOT NULL,
moneve_DateTime TEXT NOT NULL,
moneve_StatusCode NUMERIC NOT NULL,
moneve_Latency TEXT NOT NULL
); """
cur.execute(sql_create_table)
sql_create_table = """ CREATE TABLE IF NOT EXISTS Services_CurrentScan(
cur_URL TEXT NOT NULL,
cur_DateTime TEXT NOT NULL,
cur_StatusCode NUMERIC NOT NULL,
cur_Latency TEXT NOT NULL,
cur_AlertEvents INTEGER DEFAULT 0,
cur_AlertDown INTEGER DEFAULT 0,
cur_StatusChanged INTEGER DEFAULT 0,
cur_LatencyChanged INTEGER DEFAULT 0
); """
cur.execute(sql_create_table)
sql_create_table = """ CREATE TABLE IF NOT EXISTS Services(
mon_URL TEXT NOT NULL,
mon_MAC TEXT,
mon_LastStatus NUMERIC NOT NULL,
mon_LastLatency TEXT NOT NULL,
mon_LastScan TEXT NOT NULL,
mon_Tags TEXT,
mon_AlertEvents INTEGER DEFAULT 0,
mon_AlertDown INTEGER DEFAULT 0,
PRIMARY KEY(mon_URL)
); """
cur.execute(sql_create_table)
# Update Service with lastLatence, lastScan and lastStatus
# -----------------------------------------------------------------------------
def set_service_update(_mon_URL, _mon_lastScan, _mon_lastStatus, _mon_lastLatence,):
global con
global cur
sqlite_insert = """UPDATE Services SET mon_LastScan=?, mon_LastStatus=?, mon_LastLatency=? WHERE mon_URL=?;"""
table_data = (_mon_lastScan, _mon_lastStatus, _mon_lastLatence, _mon_URL)
cur.execute(sqlite_insert, table_data)
con.commit()
# Insert Services_Events with moneve_URL, moneve_DateTime, moneve_StatusCode and moneve_Latency
# -----------------------------------------------------------------------------
def set_services_events(_moneve_URL, _moneve_DateTime, _moneve_StatusCode, _moneve_Latency):
global con
global cur
sqlite_insert = """INSERT INTO Services_Events
(moneve_URL, moneve_DateTime, moneve_StatusCode, moneve_Latency)
VALUES (?, ?, ?, ?);"""
table_data = (_moneve_URL, _moneve_DateTime, _moneve_StatusCode, _moneve_Latency)
cur.execute(sqlite_insert, table_data)
con.commit()
# Insert Services_Events with moneve_URL, moneve_DateTime, moneve_StatusCode and moneve_Latency
# -----------------------------------------------------------------------------
def set_services_current_scan(_cur_URL, _cur_DateTime, _cur_StatusCode, _cur_Latency):
global con
global cur
cur.execute("SELECT * FROM Services WHERE mon_URL = ?", [_cur_URL])
rows = cur.fetchall()
for row in rows:
_mon_AlertEvents = row[6]
_mon_AlertDown = row[7]
_mon_StatusCode = row[2]
_mon_Latency = row[3]
if _mon_StatusCode != _cur_StatusCode:
_cur_StatusChanged = 1
else: else:
_cur_StatusChanged = 0 return
if _mon_Latency == "99999" and _mon_Latency != _cur_Latency:
_cur_LatencyChanged = 1
elif _cur_Latency == "99999" and _mon_Latency != _cur_Latency:
_cur_LatencyChanged = 1
else:
_cur_LatencyChanged = 0
sqlite_insert = """INSERT INTO Services_CurrentScan
(cur_URL, cur_DateTime, cur_StatusCode, cur_Latency, cur_AlertEvents, cur_AlertDown, cur_StatusChanged, cur_LatencyChanged)
VALUES (?, ?, ?, ?, ?, ?, ?, ?);"""
table_data = (_cur_URL, _cur_DateTime, _cur_StatusCode, _cur_Latency, _mon_AlertEvents, _mon_AlertDown, _cur_StatusChanged, _cur_LatencyChanged)
cur.execute(sqlite_insert, table_data)
con.commit()
# ----------------------------------------------------------------------------- # -----------------------------------------------------------------------------
def service_monitoring_log(site, status, latency): def service_monitoring_log(site, status, latency):
# global monitor_logfile # global monitor_logfile
# Log status message to log file # Log status message to log file
with open('monitor.log', 'a') as monitor_logfile: with open(log_file, 'a') as monitor_logfile:
monitor_logfile.write("{} | {} | {} | {}\n".format(strftime("%Y-%m-%d %H:%M:%S"), monitor_logfile.write("{} | {} | {} | {}\n".format(strftime("%Y-%m-%d %H:%M:%S"),
site, site,
status, status,
latency, latency,
) )
) )
with open(last_run, 'a') as last_run_logfile:
# ----------------------------------------------------------------------------- # https://www.duckduckgo.com|192.168.0.1|2023-01-02 15:56:30|200|0.9898|null|null|Best search engine
def send_alert(site, status): last_run_logfile.write("{}|{}|{}|{}|{}|{}|{}|{}\n".format(
"""If more than EMAIL_INTERVAL seconds since last email, resend email""" site,
if (time() - last_email_time[site]) > EMAIL_INTERVAL: 'null',
try: strftime("%Y-%m-%d %H:%M:%S"),
smtpObj = smtplib.SMTP(host, port) # Set up SMTP object status,
smtpObj.starttls() latency,
smtpObj.login(sender, password) 'null',
smtpObj.sendmail(sender, 'null',
receivers, 'null',
MESSAGE.format(sender=sender, )
receivers=", ".join(receivers),
site=site,
status=status
)
) )
last_email_time[site] = time() # Update time of last email
print("Successfully sent email")
except smtplib.SMTPException:
print("Error sending email ({}:{})".format(host, port))
# ----------------------------------------------------------------------------- # -----------------------------------------------------------------------------
def check_services_health(site): def check_services_health(site):
@@ -190,73 +87,19 @@ def check_services_health(site):
latency = "99999" latency = "99999"
return 503, latency return 503, latency
# ----------------------------------------------------------------------------- Duplicat # -----------------------------------------------------------------------------
def get_username(): def get_username():
return pwd.getpwuid(os.getuid())[0] return pwd.getpwuid(os.getuid())[0]
# -----------------------------------------------------------------------------
def get_services_list():
global cur
global con
with open('monitor.log', 'a') as monitor_logfile:
monitor_logfile.write("... Get Services List\n")
monitor_logfile.close()
cur.execute("SELECT mon_URL FROM Services")
rows = cur.fetchall()
sites = []
for row in rows:
sites.append(row[0])
return sites
# ----------------------------------------------------------------------------- # -----------------------------------------------------------------------------
def flush_services_current_scan(): def service_monitoring(urls):
global cur
global con
with open('monitor.log', 'a') as monitor_logfile:
monitor_logfile.write("... Flush previous scan results\n")
monitor_logfile.close()
cur.execute("DELETE FROM Services_CurrentScan")
con.commit()
# -----------------------------------------------------------------------------
def print_service_monitoring_changes():
global cur
global con
print("Services Monitoring Changes")
changedStatusCode = cur.execute("SELECT COUNT() FROM Services_CurrentScan WHERE cur_StatusChanged = 1").fetchone()[0]
print("... Changed StatusCodes: ", str(changedStatusCode))
changedLatency = cur.execute("SELECT COUNT() FROM Services_CurrentScan WHERE cur_LatencyChanged = 1").fetchone()[0]
print("... Changed Reachability: ", str(changedLatency))
with open('monitor.log', 'a') as monitor_logfile:
monitor_logfile.write("\nServices Monitoring Changes:\n")
monitor_logfile.write("... Changed StatusCodes: " + str(changedStatusCode))
monitor_logfile.write("\n... Changed Reachability: " + str(changedLatency))
monitor_logfile.write("\n")
monitor_logfile.close()
# -----------------------------------------------------------------------------
# def prepare_service_monitoring_notification():
# global cur
# global con
# -----------------------------------------------------------------------------
def service_monitoring():
global cur
global con
# Empty Log and write new header # Empty Log and write new header
print("Prepare Services Monitoring") print("Prepare Services Monitoring")
print("... Prepare Logfile") print("... Prepare Logfile")
with open('monitor.log', 'w') as monitor_logfile: with open(log_file, 'w') as monitor_logfile:
monitor_logfile.write("Pi.Alert [Prototype]:\n---------------------------------------------------------\n") monitor_logfile.write("Pi.Alert [Prototype]:\n---------------------------------------------------------\n")
monitor_logfile.write("Current User: %s \n\n" % get_username()) monitor_logfile.write("Current User: %s \n\n" % get_username())
monitor_logfile.write("Monitor Web-Services\n") monitor_logfile.write("Monitor Web-Services\n")
@@ -264,19 +107,14 @@ def service_monitoring():
monitor_logfile.close() monitor_logfile.close()
print("... Get Services List") print("... Get Services List")
sites = get_services_list() sites = urls
print("... Flush previous scan results")
flush_services_current_scan()
print("Start Services Monitoring") print("Start Services Monitoring")
with open('monitor.log', 'a') as monitor_logfile: with open(log_file, 'a') as monitor_logfile:
monitor_logfile.write("\nStart Services Monitoring\n\n| Timestamp | URL | StatusCode | ResponseTime |\n-----------------------------------------------\n") monitor_logfile.write("\nStart Services Monitoring\n\n| Timestamp | URL | StatusCode | ResponseTime |\n-----------------------------------------------\n")
monitor_logfile.close() monitor_logfile.close()
for site in sites:
last_email_time[site] = 0 # Initialize timestamp as 0
while sites: while sites:
for site in sites: for site in sites:
status,latency = check_services_health(site) status,latency = check_services_health(site)
@@ -291,20 +129,13 @@ def service_monitoring():
# Write Logfile # Write Logfile
service_monitoring_log(site, status, latency) service_monitoring_log(site, status, latency)
# Insert Services_Events with moneve_URL, moneve_DateTime, moneve_StatusCode and moneve_Latency
set_services_events(site, scantime, status, latency)
# Insert Services_CurrentScan with moneve_URL, moneve_DateTime, moneve_StatusCode and moneve_Latency
set_services_current_scan(site, scantime, status, latency)
sys.stdout.flush() sys.stdout.flush()
# Update Service with lastLatence, lastScan and lastStatus after compare with services_current_scan
set_service_update(site, scantime, status, latency)
break break
else: else:
with open('monitor.log', 'a') as monitor_logfile: with open(log_file, 'a') as monitor_logfile:
monitor_logfile.write("\n\nNo site(s) to monitor!") monitor_logfile.write("\n\nNo site(s) to monitor!")
monitor_logfile.close() monitor_logfile.close()