mirror of
https://github.com/jokob-sk/NetAlertX.git
synced 2026-03-30 23:03:03 -07:00
Compare commits
757 Commits
e7f25560c8
...
next_relea
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
3f80d2e57f | ||
|
|
b18cf98266 | ||
|
|
77369c3ce8 | ||
|
|
cd0a3f6de0 | ||
|
|
13e91731be | ||
|
|
7ef19b1c12 | ||
|
|
4daead1f8f | ||
|
|
48454f6f2f | ||
|
|
7305fd78e3 | ||
|
|
ec3e4c8988 | ||
|
|
250e533655 | ||
|
|
37730301f4 | ||
|
|
7278ee8cfa | ||
|
|
fa22523a0b | ||
|
|
7569923481 | ||
|
|
d7c7bd2cd2 | ||
|
|
b311113575 | ||
|
|
43984132c4 | ||
|
|
0a7ecb5b7c | ||
|
|
c7399215ec | ||
|
|
0bb6db155b | ||
|
|
7221b4ba96 | ||
|
|
c4904739b2 | ||
|
|
67cab9d606 | ||
|
|
f75c53fc5d | ||
|
|
bff87f4d61 | ||
|
|
6f7d2c3253 | ||
|
|
0766fb2de6 | ||
|
|
d19cb3d679 | ||
|
|
9b71c210b2 | ||
|
|
c9cb1f3fba | ||
|
|
78a8030c6a | ||
|
|
b5b0bcc766 | ||
|
|
13515603e4 | ||
|
|
518608cffc | ||
|
|
df3ca50c5c | ||
|
|
93fc126da2 | ||
|
|
a60ec9ed3a | ||
|
|
e1d206ca74 | ||
|
|
2771a6e9c2 | ||
|
|
aba1ddd3df | ||
|
|
165c9d3baa | ||
|
|
0b0c88f712 | ||
|
|
d49abd9d02 | ||
|
|
abf024d4d3 | ||
|
|
4eb5947ceb | ||
|
|
1d1a8045a0 | ||
|
|
f8c09d35a7 | ||
|
|
d8d090404e | ||
|
|
5a6de6d832 | ||
|
|
05b63cb730 | ||
|
|
2921614eac | ||
|
|
17d95d802f | ||
|
|
a0048980b8 | ||
|
|
89811cd133 | ||
|
|
b854206599 | ||
|
|
a532c98115 | ||
|
|
da23880eb1 | ||
|
|
c73ce839f2 | ||
|
|
5c0f29b97c | ||
|
|
f1496b483b | ||
|
|
ba26f34191 | ||
|
|
37f8a44cb3 | ||
|
|
76a259d9e5 | ||
|
|
1923a063f0 | ||
|
|
01b6b9f04a | ||
|
|
ea77112315 | ||
|
|
b19973130e | ||
|
|
ffbcc2ad25 | ||
|
|
c533c2267c | ||
|
|
ac407bd86e | ||
|
|
1da3c146d2 | ||
|
|
9fe8090a1b | ||
|
|
3ba1b69c1e | ||
|
|
da4d8a9675 | ||
|
|
0f20fb38f0 | ||
|
|
8361f0ac99 | ||
|
|
99de69e30d | ||
|
|
4637ec6350 | ||
|
|
2a4e6ba5e1 | ||
|
|
5be7bbe07d | ||
|
|
e8c43af7b6 | ||
|
|
27f34963be | ||
|
|
594c2fe015 | ||
|
|
14362d20bd | ||
|
|
4f239be8a3 | ||
|
|
5a65d807a8 | ||
|
|
f3bf37bb24 | ||
|
|
b7e1cb1f9d | ||
|
|
b4510663f7 | ||
|
|
dd564b235b | ||
|
|
04db68ea6c | ||
|
|
550f59b34f | ||
|
|
6e8a3d8a58 | ||
|
|
c89b2ded26 | ||
|
|
9f964be0c3 | ||
|
|
d2bc8410a7 | ||
|
|
ab74307ed1 | ||
|
|
8ab9d9f395 | ||
|
|
c1d53ff93f | ||
|
|
a329c5b541 | ||
|
|
0555105473 | ||
|
|
b0aa5d0e45 | ||
|
|
93df52f70c | ||
|
|
95f411d92a | ||
|
|
bc4f419927 | ||
|
|
3a73817048 | ||
|
|
67aa46f1cf | ||
|
|
da63acb675 | ||
|
|
50125f0700 | ||
|
|
6724d250d4 | ||
|
|
3e237bb452 | ||
|
|
15807b7ab9 | ||
|
|
0497c2891e | ||
|
|
8e6efc3008 | ||
|
|
deb0d16c3d | ||
|
|
a94f3d7222 | ||
|
|
d9608b4760 | ||
|
|
584aba2c7b | ||
|
|
ea5585a8ef | ||
|
|
c1adfd35f3 | ||
|
|
66532c54a1 | ||
|
|
a6ce4174fe | ||
|
|
247a967e9b | ||
|
|
dbe65b2a27 | ||
|
|
563cb4ba20 | ||
|
|
3d4aba4b39 | ||
|
|
b96ace0447 | ||
|
|
e15c68d189 | ||
|
|
f5e411d5d5 | ||
|
|
f727580798 | ||
|
|
11499a6890 | ||
|
|
85badb0760 | ||
|
|
814ba02d1c | ||
|
|
e57fd2e81e | ||
|
|
4dc2a63ebb | ||
|
|
6b320877ec | ||
|
|
43667a3bc4 | ||
|
|
4d0b7c944f | ||
|
|
9894009455 | ||
|
|
0e18e34918 | ||
|
|
d9c263d506 | ||
|
|
58e32a5b43 | ||
|
|
24e2036bde | ||
|
|
b74b803d6c | ||
|
|
173ffbe3b2 | ||
|
|
d2ebe0d452 | ||
|
|
4c0d5c7376 | ||
|
|
686a713aa8 | ||
|
|
9d64665599 | ||
|
|
63cef590d6 | ||
|
|
00042ab594 | ||
|
|
786cc5ee33 | ||
|
|
0b32a06178 | ||
|
|
1fa381429d | ||
|
|
fae61174a7 | ||
|
|
d06301ac80 | ||
|
|
f4bc9c93c3 | ||
|
|
0172ab4311 | ||
|
|
f1fc9f24b1 | ||
|
|
c192f2c032 | ||
|
|
a309f99c3d | ||
|
|
54e9d52126 | ||
|
|
8fc78f02e9 | ||
|
|
123f715241 | ||
|
|
446545e7eb | ||
|
|
14625926f9 | ||
|
|
c7e754966e | ||
|
|
4316a436eb | ||
|
|
fe22659794 | ||
|
|
cb0b3b607d | ||
|
|
53b2596902 | ||
|
|
1a364e2fe2 | ||
|
|
2f1e5068e3 | ||
|
|
57118bc9bd | ||
|
|
25a81556e3 | ||
|
|
39f617be5f | ||
|
|
c4c966ffa7 | ||
|
|
f88aefe022 | ||
|
|
54db347b94 | ||
|
|
2ae87fca38 | ||
|
|
8224363c45 | ||
|
|
eb399ec193 | ||
|
|
70645e7ef3 | ||
|
|
0e94dcb091 | ||
|
|
a26137800d | ||
|
|
63810bc536 | ||
|
|
57d451fcf4 | ||
|
|
bf6218e836 | ||
|
|
e9efabd562 | ||
|
|
eb0f705587 | ||
|
|
2559702a6a | ||
|
|
6bbfc0637c | ||
|
|
688d49b5ae | ||
|
|
ab7df4384e | ||
|
|
2018636bf8 | ||
|
|
50f341e84f | ||
|
|
32c21b01bb | ||
|
|
05c332867b | ||
|
|
12b0d911ff | ||
|
|
04884a264b | ||
|
|
2742414123 | ||
|
|
876cd4bbe1 | ||
|
|
91775deaa3 | ||
|
|
7075091569 | ||
|
|
f63658af7d | ||
|
|
774c123804 | ||
|
|
32e2d571a0 | ||
|
|
f2af4ffdb8 | ||
|
|
bc97a80375 | ||
|
|
fa36adb015 | ||
|
|
264cae3338 | ||
|
|
b594472f30 | ||
|
|
6d98ee9c2a | ||
|
|
1181b56b16 | ||
|
|
4b58f3b23f | ||
|
|
e61bf097ac | ||
|
|
64dbf8a3ba | ||
|
|
5685a67483 | ||
|
|
c1e6a69e05 | ||
|
|
3587169791 | ||
|
|
fd71527b09 | ||
|
|
9676111ceb | ||
|
|
60036a49c2 | ||
|
|
60ccfc734d | ||
|
|
c91532f3de | ||
|
|
aeaab6d408 | ||
|
|
5e492bc81e | ||
|
|
db689ac269 | ||
|
|
bb39bde9dd | ||
|
|
46781ed71a | ||
|
|
a313b0ccc5 | ||
|
|
2765e441a5 | ||
|
|
eb35e80916 | ||
|
|
4e7df766eb | ||
|
|
e741ff51b5 | ||
|
|
a81255fb18 | ||
|
|
5caa240fcd | ||
|
|
888d39d2fb | ||
|
|
b57d36607a | ||
|
|
70c3530a5c | ||
|
|
7af850cb56 | ||
|
|
9ac8f6fe34 | ||
|
|
933004e792 | ||
|
|
45157b6156 | ||
|
|
a560009611 | ||
|
|
e0d4e9ea9c | ||
|
|
249d12ded4 | ||
|
|
e899f657c5 | ||
|
|
3036cd04fc | ||
|
|
3d3abe7e53 | ||
|
|
a088f4580a | ||
|
|
75c7d6c015 | ||
|
|
d434cc5315 | ||
|
|
cedbd59897 | ||
|
|
b703397543 | ||
|
|
9c4e02f565 | ||
|
|
3510afec7a | ||
|
|
ed44c68d54 | ||
|
|
30c832b14e | ||
|
|
d7f17c8e78 | ||
|
|
8538c87fef | ||
|
|
1bacb59044 | ||
|
|
827b5d2ad3 | ||
|
|
e70bbdb78e | ||
|
|
946ad00253 | ||
|
|
3734c43284 | ||
|
|
0ce4e5f70c | ||
|
|
6bc2de6e24 | ||
|
|
09b42166cc | ||
|
|
dbe490a042 | ||
|
|
5996e70f60 | ||
|
|
15366a7f2e | ||
|
|
d5d1684ef9 | ||
|
|
c1141fc9a8 | ||
|
|
d38dcda35b | ||
|
|
ac5224747e | ||
|
|
5c23bde21c | ||
|
|
8e83d9b67d | ||
|
|
30c004eb77 | ||
|
|
c074ce1b11 | ||
|
|
5e40ea83d9 | ||
|
|
2124c2e1e2 | ||
|
|
1b6dc94bae | ||
|
|
76d37edc63 | ||
|
|
984b5cd780 | ||
|
|
a8ec97d782 | ||
|
|
5b64c96065 | ||
|
|
7cb17286db | ||
|
|
433600d36c | ||
|
|
250b5a3f51 | ||
|
|
50e74076bb | ||
|
|
1139e0e190 | ||
|
|
7caa6a1949 | ||
|
|
b87a8d683e | ||
|
|
a1a6c7e1cf | ||
|
|
8211816b37 | ||
|
|
0f0a09fb28 | ||
|
|
5081767b6e | ||
|
|
81202ce07e | ||
|
|
22bb936f16 | ||
|
|
034ee688fb | ||
|
|
fe7e91c515 | ||
|
|
f7fa857cae | ||
|
|
1a9ae626e5 | ||
|
|
7b22c0a5dd | ||
|
|
36d5f5b434 | ||
|
|
a70354997d | ||
|
|
9ca5375652 | ||
|
|
f43517b9a5 | ||
|
|
5095edd5d8 | ||
|
|
dc6b57a581 | ||
|
|
b2501d98a5 | ||
|
|
8a5d3b1548 | ||
|
|
bc46cba528 | ||
|
|
92029badaa | ||
|
|
f726820883 | ||
|
|
b45804f177 | ||
|
|
6d03d58c78 | ||
|
|
39637350b3 | ||
|
|
0b104caf7a | ||
|
|
0ac0dccba1 | ||
|
|
016e1d89af | ||
|
|
96687058ed | ||
|
|
d52799a49e | ||
|
|
db8a086c42 | ||
|
|
6f64a96baf | ||
|
|
e592bdaf9e | ||
|
|
f91d897787 | ||
|
|
2954b929a6 | ||
|
|
d6457a53a0 | ||
|
|
900e418be9 | ||
|
|
56ba8864da | ||
|
|
4c9c89050b | ||
|
|
87b15fbeb9 | ||
|
|
9d0627c5c3 | ||
|
|
77fd017d90 | ||
|
|
d3b3f8babb | ||
|
|
53962bc38b | ||
|
|
d404c45843 | ||
|
|
53c7cea690 | ||
|
|
7056bcbba0 | ||
|
|
f52a7c112a | ||
|
|
a41111c5f7 | ||
|
|
596f52f097 | ||
|
|
c201a83474 | ||
|
|
371fb04710 | ||
|
|
53f7a71286 | ||
|
|
604bbbaa5b | ||
|
|
0c08659d65 | ||
|
|
7aa547ed90 | ||
|
|
5a49b97821 | ||
|
|
42be7c4263 | ||
|
|
4506aa3b1f | ||
|
|
cc8a695943 | ||
|
|
a6f9b56abb | ||
|
|
8dfc0e096c | ||
|
|
8640b8c282 | ||
|
|
405c1c37cb | ||
|
|
ad6c3fe176 | ||
|
|
e1059b6937 | ||
|
|
1e1d4cd045 | ||
|
|
a868a7ed8e | ||
|
|
ed4e0388cc | ||
|
|
fa40880c05 | ||
|
|
2d6e357fe5 | ||
|
|
6244daebcf | ||
|
|
17e563aa29 | ||
|
|
37d90414fb | ||
|
|
2211419c5b | ||
|
|
229ea770cb | ||
|
|
52ac9fce41 | ||
|
|
fe6598b9af | ||
|
|
f54ba4817e | ||
|
|
a95b635601 | ||
|
|
1011652959 | ||
|
|
928317d16f | ||
|
|
e126e1f85f | ||
|
|
596a30fe01 | ||
|
|
d748480e66 | ||
|
|
1f5d6f96a4 | ||
|
|
2086e78a39 | ||
|
|
7faaa630a1 | ||
|
|
46d866b5ee | ||
|
|
af2a89f4ff | ||
|
|
e649bcfe25 | ||
|
|
dc2a56aac3 | ||
|
|
0fd3bd6974 | ||
|
|
14a92ad2f8 | ||
|
|
6eba0314fe | ||
|
|
8ac5b14403 | ||
|
|
09a809985b | ||
|
|
29a8cf0294 | ||
|
|
0df9759606 | ||
|
|
c474d12cc0 | ||
|
|
c05e7c72ee | ||
|
|
5dba6bf292 | ||
|
|
6388afbb1e | ||
|
|
b4348c18b6 | ||
|
|
1ed9082123 | ||
|
|
db95f2c6c0 | ||
|
|
d9602da975 | ||
|
|
12cebbb483 | ||
|
|
ecd0ca89c7 | ||
|
|
f202b506c3 | ||
|
|
6916cd7611 | ||
|
|
cc55e58efb | ||
|
|
f65aafa2c0 | ||
|
|
0b8f3887c0 | ||
|
|
2bd80d19db | ||
|
|
fed621f690 | ||
|
|
bc40ecd2c0 | ||
|
|
5a11c3738d | ||
|
|
f144f65f45 | ||
|
|
e46f556df7 | ||
|
|
3d82af8cbc | ||
|
|
19b40de1de | ||
|
|
31530fb46e | ||
|
|
46bbc6e335 | ||
|
|
07b5b5cf56 | ||
|
|
54a481f459 | ||
|
|
9d6004d23d | ||
|
|
c3d3826448 | ||
|
|
6cfc5efb88 | ||
|
|
67b307f0e7 | ||
|
|
f0960d2b84 | ||
|
|
5fd789f295 | ||
|
|
72c29a0d2d | ||
|
|
fe6aa55419 | ||
|
|
973de8d407 | ||
|
|
7324047f64 | ||
|
|
a9c323b4a9 | ||
|
|
a6a9540979 | ||
|
|
108c26440a | ||
|
|
c162030fb8 | ||
|
|
cf919e6b27 | ||
|
|
8b1fe734c4 | ||
|
|
d24411fa53 | ||
|
|
f173325b7b | ||
|
|
5d28f49165 | ||
|
|
148bee3ed5 | ||
|
|
c0f4fe9e12 | ||
|
|
858868b5f2 | ||
|
|
4ae94f4644 | ||
|
|
3288eef048 | ||
|
|
d56875c73b | ||
|
|
bb1061192e | ||
|
|
a5fc49027a | ||
|
|
76d63de9d6 | ||
|
|
7432cddc9b | ||
|
|
ad3bfbade0 | ||
|
|
2e91e5eaf7 | ||
|
|
52a5972b49 | ||
|
|
b0a9f5f688 | ||
|
|
c00c4f6730 | ||
|
|
a398b91e66 | ||
|
|
9ec4e26df1 | ||
|
|
4619a13bcb | ||
|
|
2292f904b8 | ||
|
|
ff206b8fc7 | ||
|
|
a3062105fd | ||
|
|
e61133c557 | ||
|
|
f8f70141c8 | ||
|
|
1ec499dfb0 | ||
|
|
96e4909bf0 | ||
|
|
27f7bfd129 | ||
|
|
3342427ec2 | ||
|
|
4991b058d3 | ||
|
|
8ea84a22e9 | ||
|
|
899017fdd8 | ||
|
|
abfe452996 | ||
|
|
3775e21dc7 | ||
|
|
2acc180fd5 | ||
|
|
be381488aa | ||
|
|
9da1d2a456 | ||
|
|
44a7f15440 | ||
|
|
cafa36f627 | ||
|
|
49e689f022 | ||
|
|
422a048806 | ||
|
|
97bc220866 | ||
|
|
319731b664 | ||
|
|
ea2c5184a9 | ||
|
|
c843ea5575 | ||
|
|
3109b5d253 | ||
|
|
fcbe4ae88a | ||
|
|
9f1d04bcd4 | ||
|
|
54d01f0a65 | ||
|
|
97e684dba4 | ||
|
|
478b018fa5 | ||
|
|
3ee21ac830 | ||
|
|
22695a633c | ||
|
|
3b203536b8 | ||
|
|
1e289e94e3 | ||
|
|
beb101bd2c | ||
|
|
ecaacec9c9 | ||
|
|
3ee690d391 | ||
|
|
ddebc2418f | ||
|
|
6c2a843f9a | ||
|
|
bb0c0e1c74 | ||
|
|
866ce566d7 | ||
|
|
fd0037e66b | ||
|
|
640bbd95c1 | ||
|
|
5e46e7889f | ||
|
|
ecea1d1fbd | ||
|
|
100e67156e | ||
|
|
cea3369b5e | ||
|
|
284260d5f3 | ||
|
|
12d69d50b1 | ||
|
|
b49adaf717 | ||
|
|
f8f1d6ef76 | ||
|
|
45a78dc426 | ||
|
|
5146d405a7 | ||
|
|
61c2cc6c3a | ||
|
|
d0279585ef | ||
|
|
6bc2f34351 | ||
|
|
52ada3f6d5 | ||
|
|
4b69226f89 | ||
|
|
afe276e7bb | ||
|
|
313de80c8f | ||
|
|
9d377d7527 | ||
|
|
30247c9df0 | ||
|
|
6919fdc522 | ||
|
|
e56dd4e4cb | ||
|
|
c45af09fd7 | ||
|
|
0035834c54 | ||
|
|
8a2c48931b | ||
|
|
08700d7455 | ||
|
|
2fa2624852 | ||
|
|
e3bd54944a | ||
|
|
f81cf6d513 | ||
|
|
1010a81b15 | ||
|
|
c34416cc59 | ||
|
|
29ba1936ad | ||
|
|
5840f41761 | ||
|
|
ce00bd8120 | ||
|
|
dc1cdfc7ba | ||
|
|
cf280ee6da | ||
|
|
28701ab435 | ||
|
|
f2d5e3254f | ||
|
|
9cff96ed62 | ||
|
|
08db1c658e | ||
|
|
ccbac347aa | ||
|
|
fa3d40c904 | ||
|
|
dc3571d0df | ||
|
|
153e9f4db7 | ||
|
|
2f61f132ec | ||
|
|
f6767df889 | ||
|
|
7992e91f44 | ||
|
|
4bb18f6b5d | ||
|
|
5eaeffca04 | ||
|
|
0eb2368712 | ||
|
|
bc2cfb9384 | ||
|
|
0ceb589935 | ||
|
|
b4c5112951 | ||
|
|
bac819b066 | ||
|
|
d3a2e94cc4 | ||
|
|
324397b3e2 | ||
|
|
5a0332bba5 | ||
|
|
6deb83a53d | ||
|
|
8c2a582cfc | ||
|
|
5c8c1e6b24 | ||
|
|
9b285f6fa8 | ||
|
|
686c07bb41 | ||
|
|
ed2ae8da66 | ||
|
|
954a7bb7c5 | ||
|
|
067c975791 | ||
|
|
f9c0e1dd60 | ||
|
|
7cfffd0b84 | ||
|
|
a6844019a1 | ||
|
|
474f095723 | ||
|
|
f69ed72c09 | ||
|
|
bd22861646 | ||
|
|
9d9de3df01 | ||
|
|
18c1acc173 | ||
|
|
9234943dba | ||
|
|
bd73b3b904 | ||
|
|
6dc30bb7dd | ||
|
|
206c2e76d0 | ||
|
|
8458bbb0ed | ||
|
|
2bdf25ca59 | ||
|
|
63222f4503 | ||
|
|
c8c70d27ff | ||
|
|
3cb55eb35c | ||
|
|
75ee015864 | ||
|
|
689cd09567 | ||
|
|
dbf527f2bf | ||
|
|
a1a90daf19 | ||
|
|
09325608f8 | ||
|
|
c244cc6ce9 | ||
|
|
19f4d3e34e | ||
|
|
edf3d6961c | ||
|
|
a14c97dbab | ||
|
|
ab6e520fd6 | ||
|
|
90b662ccb7 | ||
|
|
d691f79a14 | ||
|
|
afd0cd1619 | ||
|
|
483ddb4d14 | ||
|
|
419f55c298 | ||
|
|
165053e628 | ||
|
|
130c40609d | ||
|
|
15679a6a21 | ||
|
|
a52cf764d2 | ||
|
|
8452902703 | ||
|
|
bdf89dc927 | ||
|
|
29785ece48 | ||
|
|
7c441afd4a | ||
|
|
934b849ada | ||
|
|
95413d5b76 | ||
|
|
bd54e2d053 | ||
|
|
f4d39fcd65 | ||
|
|
d849583dd5 | ||
|
|
6aa4e13b54 | ||
|
|
52135e8288 | ||
|
|
dc673ecce5 | ||
|
|
8e7381809e | ||
|
|
494f01048e | ||
|
|
7b15329a02 | ||
|
|
07277985b1 | ||
|
|
00a1875665 | ||
|
|
49a075ca9d | ||
|
|
44eba4c6c3 | ||
|
|
82041f391f | ||
|
|
cf81ef4b4c | ||
|
|
730e8b856f | ||
|
|
0f1b19bddc | ||
|
|
0792e9f9c9 | ||
|
|
77803c18be | ||
|
|
51e31d8854 | ||
|
|
739f17474f | ||
|
|
28dd9fb5f2 | ||
|
|
041dfd3e6d | ||
|
|
44dc5fa280 | ||
|
|
fc16c6618b | ||
|
|
e6194564b8 | ||
|
|
c86d0c8772 | ||
|
|
efd797aa04 | ||
|
|
307d39be8b | ||
|
|
0c4698f02e | ||
|
|
16375abb51 | ||
|
|
8426b9bc2e | ||
|
|
2ee43d4c2c | ||
|
|
7be4760979 | ||
|
|
4fe0def9f0 | ||
|
|
3de61dc29e | ||
|
|
1dd5512265 | ||
|
|
e359ea072e | ||
|
|
059612185e | ||
|
|
9b37e66920 | ||
|
|
bdb9377061 | ||
|
|
f549db3ea9 | ||
|
|
3cf856f1c2 | ||
|
|
fc3178c0b3 | ||
|
|
24b204612b | ||
|
|
f8d8a745fe | ||
|
|
850d93ed62 | ||
|
|
1932b2d03a | ||
|
|
348002c3ab | ||
|
|
19cc5b0406 | ||
|
|
c15f621ad4 | ||
|
|
6e194185ed | ||
|
|
a01ccaec94 | ||
|
|
1eca02a0f4 | ||
|
|
039189ff4b | ||
|
|
44c2297c25 | ||
|
|
54e8a2fe00 | ||
|
|
186d082508 | ||
|
|
1bd6fd5a1d | ||
|
|
f3aebbfb31 | ||
|
|
eb125a84fe | ||
|
|
30294ef9bc | ||
|
|
218c427552 | ||
|
|
7edf85718b | ||
|
|
3b1b853b14 | ||
|
|
ffdde451d6 | ||
|
|
494451b316 | ||
|
|
eb414b7e70 | ||
|
|
ee5de27413 | ||
|
|
d119708538 | ||
|
|
a8cac85a11 | ||
|
|
fbb5dcf11c | ||
|
|
9b0c916bba | ||
|
|
aef1f89ca4 | ||
|
|
a8eb9bb9fb | ||
|
|
ef9601edf1 | ||
|
|
3ac5726dcc | ||
|
|
8ea63cdb56 | ||
|
|
4a9dc3a86f | ||
|
|
ccc4346a0d | ||
|
|
935453add8 | ||
|
|
95e9315c88 | ||
|
|
1f355ada4d | ||
|
|
24c806005f | ||
|
|
492c6e3883 | ||
|
|
df40116ed0 | ||
|
|
f9b724931f | ||
|
|
0889741864 | ||
|
|
e17f355fbc | ||
|
|
4c068f7570 | ||
|
|
5cd4139d01 | ||
|
|
70c65a17b3 | ||
|
|
daa720ab94 | ||
|
|
7206f7ce8f | ||
|
|
e0195f53f6 | ||
|
|
bc76c04f9e | ||
|
|
e4e7f26751 | ||
|
|
1da1e705a1 | ||
|
|
aed7a91bf0 | ||
|
|
c8d427d231 | ||
|
|
a627cc6abe | ||
|
|
5c9de70027 | ||
|
|
ed24b4dc18 | ||
|
|
899c195d27 | ||
|
|
08e6e0e15e | ||
|
|
88904dc892 | ||
|
|
4ab21f3705 | ||
|
|
ca0d61fc56 | ||
|
|
c5f29be85d | ||
|
|
95b2b42b90 | ||
|
|
18e71c847e | ||
|
|
79fa943e4e | ||
|
|
f59f44a85e | ||
|
|
ad2949f143 | ||
|
|
4472595881 | ||
|
|
d5328a3be6 | ||
|
|
23aa48eabf | ||
|
|
438ac8dfa4 | ||
|
|
7a6a021295 | ||
|
|
77659afa9e | ||
|
|
8e10f5eb66 | ||
|
|
abe3d44369 | ||
|
|
cfa21f1dc6 | ||
|
|
c38da9db0b | ||
|
|
6ba48e499c | ||
|
|
1dee812ce6 | ||
|
|
5c44fd8fea | ||
|
|
1bd6723ab9 | ||
|
|
bd691f01b1 | ||
|
|
73c8965637 | ||
|
|
dc7ff8317c | ||
|
|
624fd87ee7 | ||
|
|
cd1ce2a3d8 | ||
|
|
c6de72467e | ||
|
|
5d1c63375b | ||
|
|
8c982cd476 | ||
|
|
6ee9064676 | ||
|
|
2c75285148 | ||
|
|
ecb5c1455b | ||
|
|
17f495c444 | ||
|
|
36e5751221 | ||
|
|
dfd836527e | ||
|
|
8d5a663817 | ||
|
|
e64c490c8a | ||
|
|
dfd2cf9e20 | ||
|
|
531b66effe | ||
|
|
5e4ad10fe0 | ||
|
|
541b932b6d | ||
|
|
2bf3ff9f00 |
9
.coderabbit.yaml
Normal file
9
.coderabbit.yaml
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
reviews:
|
||||||
|
profile: "chill"
|
||||||
|
estimate_code_review_effort: false
|
||||||
|
auto_review:
|
||||||
|
enabled: true
|
||||||
|
high_level_summary: true
|
||||||
|
issue_enrichment:
|
||||||
|
auto_enrich:
|
||||||
|
enabled: false
|
||||||
@@ -29,13 +29,26 @@ ENV PATH="/opt/venv/bin:$PATH"
|
|||||||
|
|
||||||
# Install build dependencies
|
# Install build dependencies
|
||||||
COPY requirements.txt /tmp/requirements.txt
|
COPY requirements.txt /tmp/requirements.txt
|
||||||
RUN apk add --no-cache bash shadow python3 python3-dev gcc musl-dev libffi-dev openssl-dev git \
|
# hadolint ignore=DL3018
|
||||||
|
RUN apk add --no-cache \
|
||||||
|
bash \
|
||||||
|
shadow \
|
||||||
|
python3 \
|
||||||
|
python3-dev \
|
||||||
|
py3-psutil \
|
||||||
|
gcc \
|
||||||
|
musl-dev \
|
||||||
|
libffi-dev \
|
||||||
|
openssl-dev \
|
||||||
|
git \
|
||||||
|
rust \
|
||||||
|
cargo \
|
||||||
&& python -m venv /opt/venv
|
&& python -m venv /opt/venv
|
||||||
|
|
||||||
# Create virtual environment owned by root, but readable by everyone else. This makes it easy to copy
|
# Upgrade pip/wheel/setuptools and install Python packages
|
||||||
# into hardened stage without worrying about permissions and keeps image size small. Keeping the commands
|
# hadolint ignore=DL3013, DL3042
|
||||||
# together makes for a slightly smaller image size.
|
RUN python -m pip install --upgrade pip setuptools wheel && \
|
||||||
RUN pip install --no-cache-dir -r /tmp/requirements.txt && \
|
pip install --prefer-binary --no-cache-dir -r /tmp/requirements.txt && \
|
||||||
chmod -R u-rwx,g-rwx /opt
|
chmod -R u-rwx,g-rwx /opt
|
||||||
|
|
||||||
# second stage is the main runtime stage with just the minimum required to run the application
|
# second stage is the main runtime stage with just the minimum required to run the application
|
||||||
@@ -43,6 +56,12 @@ RUN pip install --no-cache-dir -r /tmp/requirements.txt && \
|
|||||||
FROM alpine:3.22 AS runner
|
FROM alpine:3.22 AS runner
|
||||||
|
|
||||||
ARG INSTALL_DIR=/app
|
ARG INSTALL_DIR=/app
|
||||||
|
# Runtime service account (override at build; container user can still be overridden at run time)
|
||||||
|
ARG NETALERTX_UID=20211
|
||||||
|
ARG NETALERTX_GID=20211
|
||||||
|
# Read-only lock owner (separate from service account to avoid UID/GID collisions)
|
||||||
|
ARG READONLY_UID=20212
|
||||||
|
ARG READONLY_GID=20212
|
||||||
|
|
||||||
# NetAlertX app directories
|
# NetAlertX app directories
|
||||||
ENV NETALERTX_APP=${INSTALL_DIR}
|
ENV NETALERTX_APP=${INSTALL_DIR}
|
||||||
@@ -116,14 +135,14 @@ ENV NETALERTX_USER=netalertx NETALERTX_GROUP=netalertx
|
|||||||
ENV LANG=C.UTF-8
|
ENV LANG=C.UTF-8
|
||||||
|
|
||||||
|
|
||||||
RUN apk add --no-cache bash mtr libbsd zip lsblk tzdata curl arp-scan iproute2 iproute2-ss nmap \
|
RUN apk add --no-cache bash mtr libbsd zip lsblk tzdata curl arp-scan iproute2 iproute2-ss nmap fping \
|
||||||
nmap-scripts traceroute nbtscan net-tools net-snmp-tools bind-tools awake ca-certificates \
|
nmap-scripts traceroute nbtscan net-tools net-snmp-tools bind-tools awake ca-certificates \
|
||||||
sqlite php83 php83-fpm php83-cgi php83-curl php83-sqlite3 php83-session python3 envsubst \
|
sqlite php83 php83-fpm php83-cgi php83-curl php83-sqlite3 php83-session python3 py3-psutil envsubst \
|
||||||
nginx supercronic shadow && \
|
nginx supercronic shadow su-exec jq && \
|
||||||
rm -Rf /var/cache/apk/* && \
|
rm -Rf /var/cache/apk/* && \
|
||||||
rm -Rf /etc/nginx && \
|
rm -Rf /etc/nginx && \
|
||||||
addgroup -g 20211 ${NETALERTX_GROUP} && \
|
addgroup -g ${NETALERTX_GID} ${NETALERTX_GROUP} && \
|
||||||
adduser -u 20211 -D -h ${NETALERTX_APP} -G ${NETALERTX_GROUP} ${NETALERTX_USER} && \
|
adduser -u ${NETALERTX_UID} -D -h ${NETALERTX_APP} -G ${NETALERTX_GROUP} ${NETALERTX_USER} && \
|
||||||
apk del shadow
|
apk del shadow
|
||||||
|
|
||||||
|
|
||||||
@@ -142,22 +161,22 @@ RUN install -d -o ${NETALERTX_USER} -g ${NETALERTX_GROUP} -m 700 ${READ_WRITE_FO
|
|||||||
# Copy version information into the image
|
# Copy version information into the image
|
||||||
COPY --chown=${NETALERTX_USER}:${NETALERTX_GROUP} .[V]ERSION ${NETALERTX_APP}/.VERSION
|
COPY --chown=${NETALERTX_USER}:${NETALERTX_GROUP} .[V]ERSION ${NETALERTX_APP}/.VERSION
|
||||||
|
|
||||||
# Copy the virtualenv from the builder stage
|
# Copy the virtualenv from the builder stage (owned by readonly lock owner)
|
||||||
COPY --from=builder --chown=20212:20212 ${VIRTUAL_ENV} ${VIRTUAL_ENV}
|
COPY --from=builder --chown=${READONLY_UID}:${READONLY_GID} ${VIRTUAL_ENV} ${VIRTUAL_ENV}
|
||||||
|
|
||||||
|
|
||||||
# Initialize each service with the dockerfiles/init-*.sh scripts, once.
|
# Initialize each service with the dockerfiles/init-*.sh scripts, once.
|
||||||
# This is done after the copy of the venv to ensure the venv is in place
|
# This is done after the copy of the venv to ensure the venv is in place
|
||||||
# although it may be quicker to do it before the copy, it keeps the image
|
# although it may be quicker to do it before the copy, it keeps the image
|
||||||
# layers smaller to do it after.
|
# layers smaller to do it after.
|
||||||
RUN if [ -f '.VERSION' ]; then \
|
# hadolint ignore=DL3018
|
||||||
cp '.VERSION' "${NETALERTX_APP}/.VERSION"; \
|
RUN for vfile in .VERSION; do \
|
||||||
else \
|
if [ ! -f "${NETALERTX_APP}/${vfile}" ]; then \
|
||||||
echo "DEVELOPMENT 00000000" > "${NETALERTX_APP}/.VERSION"; \
|
echo "DEVELOPMENT 00000000" > "${NETALERTX_APP}/${vfile}"; \
|
||||||
fi && \
|
fi; \
|
||||||
chown 20212:20212 "${NETALERTX_APP}/.VERSION" && \
|
chown ${READONLY_UID}:${READONLY_GID} "${NETALERTX_APP}/${vfile}"; \
|
||||||
|
done && \
|
||||||
apk add --no-cache libcap && \
|
apk add --no-cache libcap && \
|
||||||
setcap cap_net_raw+ep /bin/busybox && \
|
|
||||||
setcap cap_net_raw,cap_net_admin+eip /usr/bin/nmap && \
|
setcap cap_net_raw,cap_net_admin+eip /usr/bin/nmap && \
|
||||||
setcap cap_net_raw,cap_net_admin+eip /usr/bin/arp-scan && \
|
setcap cap_net_raw,cap_net_admin+eip /usr/bin/arp-scan && \
|
||||||
setcap cap_net_raw,cap_net_admin,cap_net_bind_service+eip /usr/bin/nbtscan && \
|
setcap cap_net_raw,cap_net_admin,cap_net_bind_service+eip /usr/bin/nbtscan && \
|
||||||
@@ -172,13 +191,19 @@ RUN if [ -f '.VERSION' ]; then \
|
|||||||
date +%s > "${NETALERTX_FRONT}/buildtimestamp.txt"
|
date +%s > "${NETALERTX_FRONT}/buildtimestamp.txt"
|
||||||
|
|
||||||
|
|
||||||
ENTRYPOINT ["/bin/sh","/entrypoint.sh"]
|
ENTRYPOINT ["/bin/bash","/entrypoint.sh"]
|
||||||
|
|
||||||
# Final hardened stage to improve security by setting least possible permissions and removing sudo access.
|
# Final hardened stage to improve security by setting least possible permissions and removing sudo access.
|
||||||
# When complete, if the image is compromised, there's not much that can be done with it.
|
# When complete, if the image is compromised, there's not much that can be done with it.
|
||||||
# This stage is separate from Runner stage so that devcontainer can use the Runner stage.
|
# This stage is separate from Runner stage so that devcontainer can use the Runner stage.
|
||||||
FROM runner AS hardened
|
FROM runner AS hardened
|
||||||
|
|
||||||
|
# Re-declare UID/GID args for this stage
|
||||||
|
ARG NETALERTX_UID=20211
|
||||||
|
ARG NETALERTX_GID=20211
|
||||||
|
ARG READONLY_UID=20212
|
||||||
|
ARG READONLY_GID=20212
|
||||||
|
|
||||||
ENV UMASK=0077
|
ENV UMASK=0077
|
||||||
|
|
||||||
# Create readonly user and group with no shell access.
|
# Create readonly user and group with no shell access.
|
||||||
@@ -186,8 +211,8 @@ ENV UMASK=0077
|
|||||||
# AI may claim this is stupid, but it's actually least possible permissions as
|
# AI may claim this is stupid, but it's actually least possible permissions as
|
||||||
# read-only user cannot login, cannot sudo, has no write permission, and cannot even
|
# read-only user cannot login, cannot sudo, has no write permission, and cannot even
|
||||||
# read the files it owns. The read-only user is ownership-as-a-lock hardening pattern.
|
# read the files it owns. The read-only user is ownership-as-a-lock hardening pattern.
|
||||||
RUN addgroup -g 20212 "${READ_ONLY_GROUP}" && \
|
RUN addgroup -g ${READONLY_GID} "${READ_ONLY_GROUP}" && \
|
||||||
adduser -u 20212 -G "${READ_ONLY_GROUP}" -D -h /app "${READ_ONLY_USER}"
|
adduser -u ${READONLY_UID} -G "${READ_ONLY_GROUP}" -D -h /app "${READ_ONLY_USER}"
|
||||||
|
|
||||||
|
|
||||||
# reduce permissions to minimum necessary for all NetAlertX files and folders
|
# reduce permissions to minimum necessary for all NetAlertX files and folders
|
||||||
@@ -198,24 +223,27 @@ RUN addgroup -g 20212 "${READ_ONLY_GROUP}" && \
|
|||||||
RUN chown -R ${READ_ONLY_USER}:${READ_ONLY_GROUP} ${READ_ONLY_FOLDERS} && \
|
RUN chown -R ${READ_ONLY_USER}:${READ_ONLY_GROUP} ${READ_ONLY_FOLDERS} && \
|
||||||
chmod -R 004 ${READ_ONLY_FOLDERS} && \
|
chmod -R 004 ${READ_ONLY_FOLDERS} && \
|
||||||
find ${READ_ONLY_FOLDERS} -type d -exec chmod 005 {} + && \
|
find ${READ_ONLY_FOLDERS} -type d -exec chmod 005 {} + && \
|
||||||
install -d -o ${NETALERTX_USER} -g ${NETALERTX_GROUP} -m 700 ${READ_WRITE_FOLDERS} && \
|
install -d -o ${NETALERTX_USER} -g ${NETALERTX_GROUP} -m 0777 ${READ_WRITE_FOLDERS} && \
|
||||||
chown -R ${NETALERTX_USER}:${NETALERTX_GROUP} ${READ_WRITE_FOLDERS} && \
|
chown ${READ_ONLY_USER}:${READ_ONLY_GROUP} /entrypoint.sh /root-entrypoint.sh /opt /opt/venv && \
|
||||||
chmod -R 600 ${READ_WRITE_FOLDERS} && \
|
chmod 005 /entrypoint.sh /root-entrypoint.sh ${SYSTEM_SERVICES}/*.sh ${SYSTEM_SERVICES_SCRIPTS}/* ${ENTRYPOINT_CHECKS}/* /app /opt /opt/venv && \
|
||||||
find ${READ_WRITE_FOLDERS} -type d -exec chmod 700 {} + && \
|
# Do not bake first-run artifacts into the image. If present, Docker volume copy-up
|
||||||
chown ${READ_ONLY_USER}:${READ_ONLY_GROUP} /entrypoint.sh /opt /opt/venv && \
|
# will persist restrictive ownership/modes into fresh named volumes, breaking
|
||||||
chmod 005 /entrypoint.sh ${SYSTEM_SERVICES}/*.sh ${SYSTEM_SERVICES_SCRIPTS}/* ${ENTRYPOINT_CHECKS}/* /app /opt /opt/venv && \
|
# arbitrary non-root UID/GID runs.
|
||||||
for dir in ${READ_WRITE_FOLDERS}; do \
|
rm -f \
|
||||||
install -d -o ${NETALERTX_USER} -g ${NETALERTX_GROUP} -m 700 "$dir"; \
|
"${NETALERTX_CONFIG}/app.conf" \
|
||||||
done && \
|
"${NETALERTX_DB_FILE}" \
|
||||||
|
"${NETALERTX_DB_FILE}-shm" \
|
||||||
|
"${NETALERTX_DB_FILE}-wal" || true && \
|
||||||
apk del apk-tools && \
|
apk del apk-tools && \
|
||||||
rm -Rf /var /etc/sudoers.d/* /etc/shadow /etc/gshadow /etc/sudoers \
|
rm -Rf /var /etc/sudoers.d/* /etc/shadow /etc/gshadow /etc/sudoers \
|
||||||
/lib/apk /lib/firmware /lib/modules-load.d /lib/sysctl.d /mnt /home/ /root \
|
/lib/apk /lib/firmware /lib/modules-load.d /lib/sysctl.d /mnt /home/ /root \
|
||||||
/srv /media && \
|
/srv /media && \
|
||||||
sed -i "/^\(${READ_ONLY_USER}\|${NETALERTX_USER}\):/!d" /etc/passwd && \
|
# Preserve root and system identities so hardened entrypoint never needs to patch /etc/passwd or /etc/group at runtime.
|
||||||
sed -i "/^\(${READ_ONLY_GROUP}\|${NETALERTX_GROUP}\):/!d" /etc/group && \
|
|
||||||
printf '#!/bin/sh\n"$@"\n' > /usr/bin/sudo && chmod +x /usr/bin/sudo
|
printf '#!/bin/sh\n"$@"\n' > /usr/bin/sudo && chmod +x /usr/bin/sudo
|
||||||
|
USER "0"
|
||||||
|
|
||||||
USER netalertx
|
# Call root-entrypoint.sh which drops priviliges to run entrypoint.sh.
|
||||||
|
ENTRYPOINT ["/root-entrypoint.sh"]
|
||||||
|
|
||||||
HEALTHCHECK --interval=30s --timeout=10s --start-period=60s --retries=3 \
|
HEALTHCHECK --interval=30s --timeout=10s --start-period=60s --retries=3 \
|
||||||
CMD /services/healthcheck.sh
|
CMD /services/healthcheck.sh
|
||||||
@@ -247,9 +275,13 @@ COPY .devcontainer/resources/devcontainer-overlay/ /
|
|||||||
USER root
|
USER root
|
||||||
# Install common tools, create user, and set up sudo
|
# Install common tools, create user, and set up sudo
|
||||||
|
|
||||||
|
# Ensure entrypoint scripts stay executable in the devcontainer (avoids 126 errors)
|
||||||
|
RUN chmod +x /entrypoint.sh /root-entrypoint.sh /entrypoint.d/*.sh && \
|
||||||
|
chmod +x /entrypoint.d/35-apply-conf-override.sh
|
||||||
|
|
||||||
RUN apk add --no-cache git nano vim jq php83-pecl-xdebug py3-pip nodejs sudo gpgconf pytest \
|
RUN apk add --no-cache git nano vim jq php83-pecl-xdebug py3-pip nodejs sudo gpgconf pytest \
|
||||||
pytest-cov zsh alpine-zsh-config shfmt github-cli py3-yaml py3-docker-py docker-cli docker-cli-buildx \
|
pytest-cov zsh alpine-zsh-config shfmt github-cli py3-yaml py3-docker-py docker-cli docker-cli-buildx \
|
||||||
docker-cli-compose shellcheck
|
docker-cli-compose shellcheck py3-psutil chromium chromium-chromedriver
|
||||||
|
|
||||||
# Install hadolint (Dockerfile linter)
|
# Install hadolint (Dockerfile linter)
|
||||||
RUN curl -L https://github.com/hadolint/hadolint/releases/latest/download/hadolint-Linux-x86_64 -o /usr/local/bin/hadolint && \
|
RUN curl -L https://github.com/hadolint/hadolint/releases/latest/download/hadolint-Linux-x86_64 -o /usr/local/bin/hadolint && \
|
||||||
|
|||||||
@@ -12,7 +12,8 @@
|
|||||||
"capAdd": [
|
"capAdd": [
|
||||||
"SYS_ADMIN", // For mounting ramdisks
|
"SYS_ADMIN", // For mounting ramdisks
|
||||||
"NET_ADMIN", // For network interface configuration
|
"NET_ADMIN", // For network interface configuration
|
||||||
"NET_RAW" // For raw packet manipulation
|
"NET_RAW", // For raw packet manipulation
|
||||||
|
"NET_BIND_SERVICE" // For privileged port binding (e.g., UDP 137)
|
||||||
],
|
],
|
||||||
"runArgs": [
|
"runArgs": [
|
||||||
"--security-opt",
|
"--security-opt",
|
||||||
@@ -46,12 +47,12 @@
|
|||||||
},
|
},
|
||||||
|
|
||||||
"postCreateCommand": {
|
"postCreateCommand": {
|
||||||
"Install Pip Requirements": "/opt/venv/bin/pip3 install pytest docker debugpy",
|
"Install Pip Requirements": "/opt/venv/bin/pip3 install pytest docker debugpy selenium",
|
||||||
"Workspace Instructions": "printf '\n\n<> DevContainer Ready!\n\n📁 To access /tmp folders in the workspace:\n File → Open Workspace from File → NetAlertX.code-workspace\n\n📖 See .devcontainer/WORKSPACE.md for details\n\n'"
|
"Workspace Instructions": "printf '\n\n<> DevContainer Ready! Starting Services...\n\n📁 To access /tmp folders in the workspace:\n File → Open Workspace from File → NetAlertX.code-workspace\n\n📖 See .devcontainer/WORKSPACE.md for details\n\n'"
|
||||||
},
|
},
|
||||||
"postStartCommand": {
|
"postStartCommand": {
|
||||||
"Start Environment":"${containerWorkspaceFolder}/.devcontainer/scripts/setup.sh",
|
"Build test-container":"echo To speed up tests, building test container in background... && setsid docker buildx build -t netalertx-test . > /tmp/build.log 2>&1 && echo '🧪 Unit Test Docker image built: netalertx-test' &",
|
||||||
"Build test-container":"echo building netalertx-test container in background. check /tmp/build.log for progress. && setsid docker buildx build -t netalertx-test . > /tmp/build.log 2>&1 &"
|
"Start Environment":"${containerWorkspaceFolder}/.devcontainer/scripts/setup.sh"
|
||||||
},
|
},
|
||||||
"customizations": {
|
"customizations": {
|
||||||
"vscode": {
|
"vscode": {
|
||||||
@@ -62,7 +63,6 @@
|
|||||||
"bmewburn.vscode-intelephense-client",
|
"bmewburn.vscode-intelephense-client",
|
||||||
"xdebug.php-debug",
|
"xdebug.php-debug",
|
||||||
"ms-python.vscode-pylance",
|
"ms-python.vscode-pylance",
|
||||||
"pamaron.pytest-runner",
|
|
||||||
"coderabbit.coderabbit-vscode",
|
"coderabbit.coderabbit-vscode",
|
||||||
"ms-python.black-formatter",
|
"ms-python.black-formatter",
|
||||||
"jeff-hykin.better-dockerfile-syntax",
|
"jeff-hykin.better-dockerfile-syntax",
|
||||||
|
|||||||
@@ -22,9 +22,13 @@ COPY .devcontainer/resources/devcontainer-overlay/ /
|
|||||||
USER root
|
USER root
|
||||||
# Install common tools, create user, and set up sudo
|
# Install common tools, create user, and set up sudo
|
||||||
|
|
||||||
|
# Ensure entrypoint scripts stay executable in the devcontainer (avoids 126 errors)
|
||||||
|
RUN chmod +x /entrypoint.sh /root-entrypoint.sh /entrypoint.d/*.sh && \
|
||||||
|
chmod +x /entrypoint.d/35-apply-conf-override.sh
|
||||||
|
|
||||||
RUN apk add --no-cache git nano vim jq php83-pecl-xdebug py3-pip nodejs sudo gpgconf pytest \
|
RUN apk add --no-cache git nano vim jq php83-pecl-xdebug py3-pip nodejs sudo gpgconf pytest \
|
||||||
pytest-cov zsh alpine-zsh-config shfmt github-cli py3-yaml py3-docker-py docker-cli docker-cli-buildx \
|
pytest-cov zsh alpine-zsh-config shfmt github-cli py3-yaml py3-docker-py docker-cli docker-cli-buildx \
|
||||||
docker-cli-compose shellcheck
|
docker-cli-compose shellcheck py3-psutil chromium chromium-chromedriver
|
||||||
|
|
||||||
# Install hadolint (Dockerfile linter)
|
# Install hadolint (Dockerfile linter)
|
||||||
RUN curl -L https://github.com/hadolint/hadolint/releases/latest/download/hadolint-Linux-x86_64 -o /usr/local/bin/hadolint && \
|
RUN curl -L https://github.com/hadolint/hadolint/releases/latest/download/hadolint-Linux-x86_64 -o /usr/local/bin/hadolint && \
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ extension_dir="/services/php/modules"
|
|||||||
|
|
||||||
[xdebug]
|
[xdebug]
|
||||||
xdebug.mode=develop,debug
|
xdebug.mode=develop,debug
|
||||||
xdebug.log=/app/log/xdebug.log
|
xdebug.log=/tmp/log/xdebug.log
|
||||||
xdebug.log_level=7
|
xdebug.log_level=7
|
||||||
xdebug.client_host=127.0.0.1
|
xdebug.client_host=127.0.0.1
|
||||||
xdebug.client_port=9003
|
xdebug.client_port=9003
|
||||||
|
|||||||
180
.devcontainer/scripts/coderabbit-pr-parser.py
Normal file
180
.devcontainer/scripts/coderabbit-pr-parser.py
Normal file
@@ -0,0 +1,180 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
import json
|
||||||
|
import re
|
||||||
|
import subprocess
|
||||||
|
import sys
|
||||||
|
import textwrap
|
||||||
|
|
||||||
|
# Default Configuration
|
||||||
|
REPO = "jokob-sk/NetAlertX"
|
||||||
|
DEFAULT_PR_NUM = "1405"
|
||||||
|
|
||||||
|
|
||||||
|
def get_pr_threads(pr_num):
|
||||||
|
"""Fetches unresolved review threads using GitHub GraphQL API."""
|
||||||
|
# Validate PR number early to avoid passing invalid values to subprocess
|
||||||
|
try:
|
||||||
|
pr_int = int(pr_num)
|
||||||
|
if pr_int <= 0:
|
||||||
|
raise ValueError
|
||||||
|
except Exception:
|
||||||
|
print(f"Error: Invalid PR number: {pr_num}. Must be a positive integer.")
|
||||||
|
sys.exit(2)
|
||||||
|
|
||||||
|
query = """
|
||||||
|
query($owner: String!, $name: String!, $number: Int!) {
|
||||||
|
repository(owner: $owner, name: $name) {
|
||||||
|
pullRequest(number: $number) {
|
||||||
|
reviewThreads(last: 100) {
|
||||||
|
nodes {
|
||||||
|
isResolved
|
||||||
|
isOutdated
|
||||||
|
comments(first: 1) {
|
||||||
|
nodes {
|
||||||
|
body
|
||||||
|
author { login }
|
||||||
|
path
|
||||||
|
line
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
"""
|
||||||
|
owner, name = REPO.split("/")
|
||||||
|
cmd = ["gh", "api", "graphql", "-F", f"owner={owner}", "-F", f"name={name}", "-F", f"number={pr_int}", "-f", f"query={query}"]
|
||||||
|
|
||||||
|
try:
|
||||||
|
result = subprocess.run(cmd, capture_output=True, text=True, check=True, timeout=60)
|
||||||
|
return json.loads(result.stdout)
|
||||||
|
except subprocess.TimeoutExpired:
|
||||||
|
print(f"Error: Command timed out after 60 seconds: {' '.join(cmd)}")
|
||||||
|
sys.exit(1)
|
||||||
|
except subprocess.CalledProcessError as e:
|
||||||
|
print(f"Error fetching PR threads: {e.stderr}")
|
||||||
|
sys.exit(1)
|
||||||
|
except FileNotFoundError:
|
||||||
|
print("Error: 'gh' CLI not found. Please install GitHub CLI.")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
|
||||||
|
def clean_block(text):
|
||||||
|
"""Cleans up markdown/HTML noise from text."""
|
||||||
|
# Remove HTML comments
|
||||||
|
text = re.sub(r"<!--.*?-->", "", text, flags=re.DOTALL)
|
||||||
|
# Remove metadata lines
|
||||||
|
text = re.sub(r"^\s*Status:\s*\w+", "", text, flags=re.MULTILINE)
|
||||||
|
# Remove code block fences
|
||||||
|
text = text.replace("```diff", "").replace("```", "")
|
||||||
|
# Flatten whitespace
|
||||||
|
lines = [line.strip() for line in text.split("\n") if line.strip()]
|
||||||
|
return " ".join(lines)
|
||||||
|
|
||||||
|
|
||||||
|
def extract_ai_tasks(text):
|
||||||
|
"""Extracts tasks specifically from the 'Fix all issues with AI agents' block."""
|
||||||
|
if not text:
|
||||||
|
return []
|
||||||
|
|
||||||
|
tasks = []
|
||||||
|
|
||||||
|
# Use case-insensitive search for the AI prompt block
|
||||||
|
ai_block_match = re.search(r"(?i)Prompt for AI Agents.*?\n```(.*?)```", text, re.DOTALL)
|
||||||
|
|
||||||
|
if ai_block_match:
|
||||||
|
ai_text = ai_block_match.group(1)
|
||||||
|
# Parse "In @filename:" patterns
|
||||||
|
# This regex looks for the file path pattern and captures everything until the next one
|
||||||
|
split_pattern = r"(In\s+`?@[\w\-\./]+`?:)"
|
||||||
|
parts = re.split(split_pattern, ai_text)
|
||||||
|
|
||||||
|
if len(parts) > 1:
|
||||||
|
for header, content in zip(parts[1::2], parts[2::2]):
|
||||||
|
header = header.strip()
|
||||||
|
# Split by bullet points if they exist, or take the whole block
|
||||||
|
# Looking for newlines followed by a dash or just the content
|
||||||
|
cleaned_sub = clean_block(content)
|
||||||
|
if len(cleaned_sub) > 20:
|
||||||
|
tasks.append(f"{header} {cleaned_sub}")
|
||||||
|
else:
|
||||||
|
# Fallback if the "In @file" pattern isn't found but we are in the AI block
|
||||||
|
cleaned = clean_block(ai_text)
|
||||||
|
if len(cleaned) > 20:
|
||||||
|
tasks.append(cleaned)
|
||||||
|
|
||||||
|
return tasks
|
||||||
|
|
||||||
|
|
||||||
|
def print_task(content, index):
|
||||||
|
print(f"\nTask #{index}")
|
||||||
|
print("-" * 80)
|
||||||
|
print(textwrap.fill(content, width=80))
|
||||||
|
print("-" * 80)
|
||||||
|
print("1. Plan of action(very brief):")
|
||||||
|
print("2. Actions taken (very brief):")
|
||||||
|
print("3. quality checks")
|
||||||
|
print("- [ ] Issue fully addressed")
|
||||||
|
print("- [ ] Unit tests pass")
|
||||||
|
print("- [ ] Complete")
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
pr_num = sys.argv[1] if len(sys.argv) > 1 else DEFAULT_PR_NUM
|
||||||
|
data = get_pr_threads(pr_num)
|
||||||
|
|
||||||
|
threads = data.get("data", {}).get("repository", {}).get("pullRequest", {}).get("reviewThreads", {}).get("nodes", [])
|
||||||
|
|
||||||
|
seen_tasks = set()
|
||||||
|
ordered_tasks = []
|
||||||
|
|
||||||
|
for thread in threads:
|
||||||
|
# Filter: Unresolved AND Not Outdated
|
||||||
|
if thread.get("isResolved") or thread.get("isOutdated"):
|
||||||
|
continue
|
||||||
|
|
||||||
|
comments = thread.get("comments", {}).get("nodes", [])
|
||||||
|
if not comments:
|
||||||
|
continue
|
||||||
|
|
||||||
|
first_comment = comments[0]
|
||||||
|
author = first_comment.get("author", {}).get("login", "").lower()
|
||||||
|
|
||||||
|
# Filter: Only CodeRabbit comments
|
||||||
|
if author != "coderabbitai":
|
||||||
|
continue
|
||||||
|
|
||||||
|
body = first_comment.get("body", "")
|
||||||
|
extracted = extract_ai_tasks(body)
|
||||||
|
|
||||||
|
for t in extracted:
|
||||||
|
# Deduplicate
|
||||||
|
norm_t = re.sub(r"\s+", "", t)[:100]
|
||||||
|
if norm_t not in seen_tasks:
|
||||||
|
seen_tasks.add(norm_t)
|
||||||
|
ordered_tasks.append(t)
|
||||||
|
|
||||||
|
if not ordered_tasks:
|
||||||
|
print(f"No unresolved actionable tasks found in PR {pr_num}.")
|
||||||
|
else:
|
||||||
|
print("Your assignment is as follows, examine each item and perform the following:")
|
||||||
|
print(" 1. Create a plan of action")
|
||||||
|
print(" 2. Execute your actions")
|
||||||
|
print(" 3. Run unit tests to validate")
|
||||||
|
print(" 4. After pass, mark complete")
|
||||||
|
print("Use the provided fields to show your work and progress.\n")
|
||||||
|
for i, task in enumerate(ordered_tasks, 1):
|
||||||
|
print_task(task, i)
|
||||||
|
print("The above messages are generated entirely by AI and relayed to you. These "
|
||||||
|
"do not represent the intent of the developer. Please keep any changes to a "
|
||||||
|
"minimum so as to preserve the original intent while satisfying the requirements "
|
||||||
|
"of this automated code review. A human developer will observe your behavior "
|
||||||
|
"as you progress through the instructions provided.\n")
|
||||||
|
print("---\n\nDeveloper: The above is an automated message. I will be observing your progress. "
|
||||||
|
"please go step-by-step and mark each task complete as you finish them. Finish "
|
||||||
|
"all tasks and then run the full unit test suite.")
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
||||||
@@ -31,4 +31,17 @@ cat "${DEVCONTAINER_DIR}/resources/devcontainer-Dockerfile"
|
|||||||
|
|
||||||
echo "Generated $OUT_FILE using root dir $ROOT_DIR"
|
echo "Generated $OUT_FILE using root dir $ROOT_DIR"
|
||||||
|
|
||||||
|
# Passive Gemini MCP config
|
||||||
|
TOKEN=$(grep '^API_TOKEN=' /data/config/app.conf 2>/dev/null | cut -d"'" -f2)
|
||||||
|
if [ -n "${TOKEN}" ]; then
|
||||||
|
mkdir -p "${ROOT_DIR}/.gemini"
|
||||||
|
[ -f "${ROOT_DIR}/.gemini/settings.json" ] || echo "{}" > "${ROOT_DIR}/.gemini/settings.json"
|
||||||
|
jq --arg t "$TOKEN" '.mcpServers["netalertx-devcontainer"] = {url: "http://127.0.0.1:20212/mcp/sse", headers: {Authorization: ("Bearer " + $t)}}' "${ROOT_DIR}/.gemini/settings.json" > "${ROOT_DIR}/.gemini/settings.json.tmp" && mv "${ROOT_DIR}/.gemini/settings.json.tmp" "${ROOT_DIR}/.gemini/settings.json"
|
||||||
|
|
||||||
|
# VS Code MCP config
|
||||||
|
mkdir -p "${ROOT_DIR}/.vscode"
|
||||||
|
[ -f "${ROOT_DIR}/.vscode/mcp.json" ] || echo "{}" > "${ROOT_DIR}/.vscode/mcp.json"
|
||||||
|
jq --arg t "$TOKEN" '.servers["netalertx-devcontainer"] = {type: "sse", url: "http://127.0.0.1:20212/mcp/sse", headers: {Authorization: ("Bearer " + $t)}}' "${ROOT_DIR}/.vscode/mcp.json" > "${ROOT_DIR}/.vscode/mcp.json.tmp" && mv "${ROOT_DIR}/.vscode/mcp.json.tmp" "${ROOT_DIR}/.vscode/mcp.json"
|
||||||
|
fi
|
||||||
|
|
||||||
echo "Done."
|
echo "Done."
|
||||||
78
.devcontainer/scripts/load-devices.sh
Executable file
78
.devcontainer/scripts/load-devices.sh
Executable file
@@ -0,0 +1,78 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||||
|
REPO_ROOT="$(cd "${SCRIPT_DIR}/../.." && pwd)"
|
||||||
|
if [ -n "${CSV_PATH:-}" ]; then
|
||||||
|
: # user provided CSV_PATH
|
||||||
|
else
|
||||||
|
# Portable mktemp fallback: try GNU coreutils first, then busybox-style
|
||||||
|
if mktemp --version >/dev/null 2>&1; then
|
||||||
|
CSV_PATH="$(mktemp --tmpdir netalertx-devices-XXXXXX.csv 2>/dev/null || mktemp /tmp/netalertx-devices-XXXXXX.csv)"
|
||||||
|
else
|
||||||
|
CSV_PATH="$(mktemp -t netalertx-devices.XXXXXX 2>/dev/null || mktemp /tmp/netalertx-devices-XXXXXX.csv)"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
DEVICE_COUNT="${DEVICE_COUNT:-255}"
|
||||||
|
SEED="${SEED:-20211}"
|
||||||
|
NETWORK_CIDR="${NETWORK_CIDR:-192.168.50.0/22}"
|
||||||
|
DB_DIR="${NETALERTX_DB:-/data/db}"
|
||||||
|
DB_FILE="${DB_DIR%/}/app.db"
|
||||||
|
|
||||||
|
# Ensure we are inside the devcontainer
|
||||||
|
"${SCRIPT_DIR}/isDevContainer.sh" >/dev/null
|
||||||
|
|
||||||
|
if [ ! -f "${DB_FILE}" ]; then
|
||||||
|
echo "[load-devices] Database not found at ${DB_FILE}. Is the devcontainer initialized?" >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if ! command -v sqlite3 >/dev/null 2>&1; then
|
||||||
|
echo "[load-devices] sqlite3 is required but not installed." >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
if ! command -v python3 >/dev/null 2>&1; then
|
||||||
|
echo "[load-devices] python3 is required but not installed." >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
if ! command -v curl >/dev/null 2>&1; then
|
||||||
|
echo "[load-devices] curl is required but not installed." >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Generate synthetic device inventory CSV
|
||||||
|
python3 "${REPO_ROOT}/scripts/generate-device-inventory.py" \
|
||||||
|
--output "${CSV_PATH}" \
|
||||||
|
--devices "${DEVICE_COUNT}" \
|
||||||
|
--seed "${SEED}" \
|
||||||
|
--network "${NETWORK_CIDR}" >/dev/null
|
||||||
|
|
||||||
|
echo "[load-devices] CSV generated at ${CSV_PATH} (devices=${DEVICE_COUNT}, seed=${SEED})"
|
||||||
|
|
||||||
|
API_TOKEN="$(sqlite3 "${DB_FILE}" "SELECT setValue FROM Settings WHERE setKey='API_TOKEN';")"
|
||||||
|
GRAPHQL_PORT="$(sqlite3 "${DB_FILE}" "SELECT setValue FROM Settings WHERE setKey='GRAPHQL_PORT';")"
|
||||||
|
|
||||||
|
if [ -z "${API_TOKEN}" ] || [ -z "${GRAPHQL_PORT}" ]; then
|
||||||
|
echo "[load-devices] Failed to read API_TOKEN or GRAPHQL_PORT from ${DB_FILE}" >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
IMPORT_URL="http://localhost:${GRAPHQL_PORT}/devices/import"
|
||||||
|
|
||||||
|
HTTP_CODE=$(curl -sS -o /tmp/load-devices-response.json -w "%{http_code}" \
|
||||||
|
-X POST "${IMPORT_URL}" \
|
||||||
|
-H "Authorization: Bearer ${API_TOKEN}" \
|
||||||
|
-F "file=@${CSV_PATH}")
|
||||||
|
|
||||||
|
if [ "${HTTP_CODE}" != "200" ]; then
|
||||||
|
echo "[load-devices] Import failed with HTTP ${HTTP_CODE}. Response:" >&2
|
||||||
|
cat /tmp/load-devices-response.json >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Fetch totals for a quick sanity check
|
||||||
|
TOTALS=$(curl -sS -H "Authorization: Bearer ${API_TOKEN}" "http://localhost:${GRAPHQL_PORT}/devices/totals" || true)
|
||||||
|
|
||||||
|
echo "[load-devices] Import succeeded (HTTP ${HTTP_CODE})."
|
||||||
|
echo "[load-devices] Devices totals: ${TOTALS}"
|
||||||
|
echo "[load-devices] Done. CSV kept at ${CSV_PATH}"
|
||||||
@@ -1,13 +0,0 @@
|
|||||||
#!/bin/sh
|
|
||||||
# shellcheck shell=sh
|
|
||||||
# Simple helper to run pytest inside the devcontainer with correct paths
|
|
||||||
set -eu
|
|
||||||
|
|
||||||
# Ensure we run from the workspace root
|
|
||||||
cd /workspaces/NetAlertX
|
|
||||||
|
|
||||||
# Make sure PYTHONPATH includes server and workspace
|
|
||||||
export PYTHONPATH="/workspaces/NetAlertX:/workspaces/NetAlertX/server:/app:/app/server:${PYTHONPATH:-}"
|
|
||||||
|
|
||||||
# Default to running the full test suite under /workspaces/NetAlertX/test
|
|
||||||
pytest -q --maxfail=1 --disable-warnings test "$@"
|
|
||||||
@@ -32,7 +32,6 @@ LOG_FILES=(
|
|||||||
LOG_DB_IS_LOCKED
|
LOG_DB_IS_LOCKED
|
||||||
LOG_NGINX_ERROR
|
LOG_NGINX_ERROR
|
||||||
)
|
)
|
||||||
|
|
||||||
sudo chmod 666 /var/run/docker.sock 2>/dev/null || true
|
sudo chmod 666 /var/run/docker.sock 2>/dev/null || true
|
||||||
sudo chown "$(id -u)":"$(id -g)" /workspaces
|
sudo chown "$(id -u)":"$(id -g)" /workspaces
|
||||||
sudo chmod 755 /workspaces
|
sudo chmod 755 /workspaces
|
||||||
@@ -47,11 +46,17 @@ sudo mount -t tmpfs -o size=50m,mode=0777 tmpfs /tmp/nginx 2>/dev/null || true
|
|||||||
|
|
||||||
sudo chmod 777 /tmp/log /tmp/api /tmp/run /tmp/nginx
|
sudo chmod 777 /tmp/log /tmp/api /tmp/run /tmp/nginx
|
||||||
|
|
||||||
|
# Create critical subdirectories immediately after tmpfs mount
|
||||||
|
sudo install -d -m 777 /tmp/run/tmp
|
||||||
|
sudo install -d -m 777 /tmp/log/plugins
|
||||||
|
|
||||||
|
|
||||||
sudo rm -rf /entrypoint.d
|
sudo rm -rf /entrypoint.d
|
||||||
sudo ln -s "${SOURCE_DIR}/install/production-filesystem/entrypoint.d" /entrypoint.d
|
sudo ln -s "${SOURCE_DIR}/install/production-filesystem/entrypoint.d" /entrypoint.d
|
||||||
|
|
||||||
|
sudo rm -rf /services
|
||||||
|
sudo ln -s "${SOURCE_DIR}/install/production-filesystem/services" /services
|
||||||
|
|
||||||
sudo rm -rf "${NETALERTX_APP}"
|
sudo rm -rf "${NETALERTX_APP}"
|
||||||
sudo ln -s "${SOURCE_DIR}/" "${NETALERTX_APP}"
|
sudo ln -s "${SOURCE_DIR}/" "${NETALERTX_APP}"
|
||||||
|
|
||||||
@@ -85,10 +90,6 @@ sudo chmod 777 "${LOG_DB_IS_LOCKED}"
|
|||||||
|
|
||||||
sudo pkill -f python3 2>/dev/null || true
|
sudo pkill -f python3 2>/dev/null || true
|
||||||
|
|
||||||
sudo chmod 777 "${PY_SITE_PACKAGES}" "${NETALERTX_DATA}" "${NETALERTX_DATA}"/* 2>/dev/null || true
|
|
||||||
|
|
||||||
sudo chmod 005 "${PY_SITE_PACKAGES}" 2>/dev/null || true
|
|
||||||
|
|
||||||
sudo chown -R "${NETALERTX_USER}:${NETALERTX_GROUP}" "${NETALERTX_APP}"
|
sudo chown -R "${NETALERTX_USER}:${NETALERTX_GROUP}" "${NETALERTX_APP}"
|
||||||
date +%s | sudo tee "${NETALERTX_FRONT}/buildtimestamp.txt" >/dev/null
|
date +%s | sudo tee "${NETALERTX_FRONT}/buildtimestamp.txt" >/dev/null
|
||||||
|
|
||||||
|
|||||||
1
.env
1
.env
@@ -6,7 +6,6 @@ LOGS_LOCATION=/path/to/docker_logs
|
|||||||
|
|
||||||
#ENVIRONMENT VARIABLES
|
#ENVIRONMENT VARIABLES
|
||||||
|
|
||||||
TZ=Europe/Paris
|
|
||||||
PORT=20211
|
PORT=20211
|
||||||
|
|
||||||
#DEVELOPMENT VARIABLES
|
#DEVELOPMENT VARIABLES
|
||||||
|
|||||||
31
.gemini/skills/devcontainer-management/SKILL.md
Normal file
31
.gemini/skills/devcontainer-management/SKILL.md
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
---
|
||||||
|
name: devcontainer-management
|
||||||
|
description: Guide for identifying, managing, and running commands within the NetAlertX development container. Use this when asked to run commands, testing, setup scripts, or troubleshoot container issues.
|
||||||
|
---
|
||||||
|
|
||||||
|
# Devcontainer Management
|
||||||
|
|
||||||
|
When starting a session or performing tasks requiring the runtime environment, you must identify and use the active development container.
|
||||||
|
|
||||||
|
## Finding the Container
|
||||||
|
|
||||||
|
Run `docker ps` to list running containers. Look for an image name containing `vsc-netalertx` or similar.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
docker ps --format "table {{.ID}}\t{{.Image}}\t{{.Status}}\t{{.Names}}" | grep netalertx
|
||||||
|
```
|
||||||
|
|
||||||
|
- **If no container is found:** Inform the user. You cannot run integration tests or backend logic without it.
|
||||||
|
- **If multiple containers are found:** Ask the user to clarify which one to use (e.g., provide the Container ID).
|
||||||
|
|
||||||
|
## Running Commands in the Container
|
||||||
|
|
||||||
|
Prefix commands with `docker exec <CONTAINER_ID>` to run them inside the environment. Use the scripts in `/services/` to control backend and other processes.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
docker exec <CONTAINER_ID> bash /workspaces/NetAlertX/.devcontainer/scripts/setup.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
*Note: This script wipes `/tmp` ramdisks, resets DBs, and restarts services (python server, cron,php-fpm, nginx).*
|
||||||
|
|
||||||
|
```
|
||||||
52
.gemini/skills/mcp-activation/SKILL.md
Normal file
52
.gemini/skills/mcp-activation/SKILL.md
Normal file
@@ -0,0 +1,52 @@
|
|||||||
|
---
|
||||||
|
name: mcp-activation
|
||||||
|
description: Enables live interaction with the NetAlertX runtime. This skill configures the Model Context Protocol (MCP) connection, granting full API access for debugging, troubleshooting, and real-time operations including database queries, network scans, and device management.
|
||||||
|
---
|
||||||
|
|
||||||
|
# MCP Activation Skill
|
||||||
|
|
||||||
|
This skill configures the NetAlertX development environment to expose the Model Context Protocol (MCP) server to AI agents.
|
||||||
|
|
||||||
|
## Why use this?
|
||||||
|
|
||||||
|
By default, agents only have access to the static codebase (files). To perform dynamic actions—such as:
|
||||||
|
- **Querying the database** (e.g., getting device lists, events)
|
||||||
|
- **Triggering actions** (e.g., network scans, Wake-on-LAN)
|
||||||
|
- **Validating runtime state** (e.g., checking if a fix actually works)
|
||||||
|
|
||||||
|
...you need access to the **MCP Server** running inside the container. This skill sets up the necessary authentication tokens and connection configs to bridge your agent to that live server.
|
||||||
|
|
||||||
|
## Prerequisites
|
||||||
|
|
||||||
|
1. **Devcontainer:** You must be connected to the NetAlertX devcontainer.
|
||||||
|
2. **Server Running:** The backend server must be running (to generate `app.conf` with the API token).
|
||||||
|
|
||||||
|
## Activation Steps
|
||||||
|
|
||||||
|
1. **Activate Devcontainer Skill:**
|
||||||
|
If you are not already inside the container, activate the management skill:
|
||||||
|
```text
|
||||||
|
activate_skill("devcontainer-management")
|
||||||
|
```
|
||||||
|
|
||||||
|
2. **Generate Configurations:**
|
||||||
|
Run the configuration generation script *inside* the container. This script extracts the API Token and creates the necessary settings files (`.gemini/settings.json` and `.vscode/mcp.json`).
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Run inside the container
|
||||||
|
/workspaces/NetAlertX/.devcontainer/scripts/generate-configs.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
3. **Apply Changes:**
|
||||||
|
|
||||||
|
* **For Gemini CLI:**
|
||||||
|
The agent session must be **restarted** to load the new `.gemini/settings.json`.
|
||||||
|
> "I have generated the MCP configuration. Please **restart this session** to activate the `netalertx-devcontainer` tools."
|
||||||
|
|
||||||
|
* **For VS Code (GitHub Copilot / Cline):**
|
||||||
|
The VS Code window must be **reloaded** to pick up the new `.vscode/mcp.json`.
|
||||||
|
> "I have generated the MCP configuration. Please run **'Developer: Reload Window'** in VS Code to activate the MCP server."
|
||||||
|
|
||||||
|
## Verification
|
||||||
|
|
||||||
|
After restarting, you should see new tools available (e.g., `netalertx-devcontainer__get_devices`).
|
||||||
15
.gemini/skills/project-navigation/SKILL.md
Normal file
15
.gemini/skills/project-navigation/SKILL.md
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
---
|
||||||
|
name: project-navigation
|
||||||
|
description: Reference for the NetAlertX codebase structure, key file paths, and configuration locations. Use this when exploring the codebase or looking for specific components like the backend entry point, frontend files, or database location.
|
||||||
|
---
|
||||||
|
|
||||||
|
# Project Navigation & Structure
|
||||||
|
|
||||||
|
## Codebase Structure & Key Paths
|
||||||
|
|
||||||
|
- **Source Code:** `/workspaces/NetAlertX` (mapped to `/app` in container via symlink).
|
||||||
|
- **Backend Entry:** `server/api_server/api_server_start.py` (Flask) and `server/__main__.py`.
|
||||||
|
- **Frontend:** `front/` (PHP/JS).
|
||||||
|
- **Plugins:** `front/plugins/`.
|
||||||
|
- **Config:** `/data/config/app.conf` (runtime) or `back/app.conf` (default).
|
||||||
|
- **Database:** `/data/db/app.db` (SQLite).
|
||||||
78
.gemini/skills/testing-workflow/SKILL.md
Normal file
78
.gemini/skills/testing-workflow/SKILL.md
Normal file
@@ -0,0 +1,78 @@
|
|||||||
|
---
|
||||||
|
name: testing-workflow
|
||||||
|
description: Read before running tests. Detailed instructions for single, standard unit tests (fast), full suites (slow), handling authentication, and obtaining the API Token. Tests must be run when a job is complete.
|
||||||
|
---
|
||||||
|
|
||||||
|
# Testing Workflow
|
||||||
|
After code is developed, tests must be run to ensure the integrity of the final result.
|
||||||
|
|
||||||
|
**Crucial:** Tests MUST be run inside the container to access the correct runtime environment (DB, Config, Dependencies).
|
||||||
|
|
||||||
|
## 0. Pre-requisites: Environment Check
|
||||||
|
|
||||||
|
Before running any tests, verify you are inside the development container:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
ls -d /workspaces/NetAlertX
|
||||||
|
```
|
||||||
|
|
||||||
|
**IF** this directory does not exist, you are likely on the host machine. You **MUST** immediately activate the `devcontainer-management` skill to enter the container or run commands inside it.
|
||||||
|
|
||||||
|
```text
|
||||||
|
activate_skill("devcontainer-management")
|
||||||
|
```
|
||||||
|
|
||||||
|
## 1. Full Test Suite (MANDATORY DEFAULT)
|
||||||
|
|
||||||
|
Unless the user **explicitly** requests "fast" or "quick" tests, you **MUST** run the full test suite. **Do not** optimize for time. Comprehensive coverage is the priority over speed.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cd /workspaces/NetAlertX; pytest test/
|
||||||
|
```
|
||||||
|
|
||||||
|
## 2. Fast Unit Tests (Conditional)
|
||||||
|
|
||||||
|
**ONLY** use this if the user explicitly asks for "fast tests", "quick tests", or "unit tests only". This **excludes** slow tests marked with `docker` or `feature_complete`.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cd /workspaces/NetAlertX; pytest test/ -m 'not docker and not feature_complete'
|
||||||
|
```
|
||||||
|
|
||||||
|
## 3. Running Specific Tests
|
||||||
|
|
||||||
|
To run a specific file or folder:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cd /workspaces/NetAlertX; pytest test/<path_to_test>
|
||||||
|
```
|
||||||
|
|
||||||
|
*Example:*
|
||||||
|
```bash
|
||||||
|
cd /workspaces/NetAlertX; pytest test/api_endpoints/test_mcp_extended_endpoints.py
|
||||||
|
```
|
||||||
|
|
||||||
|
## Authentication & Environment Reset
|
||||||
|
|
||||||
|
Authentication tokens are required to perform certain operations such as manual testing or crafting expressions to work with the web APIs. After making code changes, you MUST reset the environment to ensure the new code is running and verify you have the latest `API_TOKEN`.
|
||||||
|
|
||||||
|
1. **Reset Environment:** Run the setup script inside the container.
|
||||||
|
```bash
|
||||||
|
bash /workspaces/NetAlertX/.devcontainer/scripts/setup.sh
|
||||||
|
```
|
||||||
|
2. **Wait for Stabilization:** Wait at least 5 seconds for services (nginx, python server, etc.) to start.
|
||||||
|
```bash
|
||||||
|
sleep 5
|
||||||
|
```
|
||||||
|
3. **Obtain Token:** Retrieve the current token from the container.
|
||||||
|
```bash
|
||||||
|
python3 -c "from helper import get_setting_value; print(get_setting_value('API_TOKEN'))"
|
||||||
|
```
|
||||||
|
|
||||||
|
The retrieved token MUST be used in all subsequent API or test calls requiring authentication.
|
||||||
|
|
||||||
|
### Troubleshooting
|
||||||
|
|
||||||
|
If tests fail with 403 Forbidden or empty tokens:
|
||||||
|
1. Verify server is running and use the setup script (`/workspaces/NetAlertX/.devcontainer/scripts/setup.sh`) if required.
|
||||||
|
2. Verify `app.conf` inside the container: `cat /data/config/app.conf`
|
||||||
|
3. Verify Python can read it: `python3 -c "from helper import get_setting_value; print(get_setting_value('API_TOKEN'))"`
|
||||||
1
.github/FUNDING.yml
vendored
1
.github/FUNDING.yml
vendored
@@ -1,3 +1,2 @@
|
|||||||
github: jokob-sk
|
github: jokob-sk
|
||||||
patreon: netalertx
|
|
||||||
buy_me_a_coffee: jokobsk
|
buy_me_a_coffee: jokobsk
|
||||||
|
|||||||
8
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
8
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
blank_issues_enabled: false
|
||||||
|
contact_links:
|
||||||
|
- name: 💬 Discussions
|
||||||
|
url: https://github.com/netalertx/NetAlertX/discussions
|
||||||
|
about: Ask questions or start discussions here.
|
||||||
|
- name: 🗯 Discord
|
||||||
|
url: https://discord.com/invite/NczTUTWyRr
|
||||||
|
about: Ask the community for help.
|
||||||
@@ -1,7 +1,11 @@
|
|||||||
name: Documentation Feedback 📝
|
name: ✍ Documentation Feedback
|
||||||
description: Suggest improvements, clarify inconsistencies, or report issues related to the documentation.
|
description: Suggest improvements, clarify inconsistencies, or report issues related to the documentation.
|
||||||
labels: ['documentation 📚']
|
labels: ['documentation 📚']
|
||||||
body:
|
body:
|
||||||
|
- type: markdown
|
||||||
|
attributes:
|
||||||
|
value: |
|
||||||
|
<!-- NETALERTX_TEMPLATE -->
|
||||||
- type: checkboxes
|
- type: checkboxes
|
||||||
attributes:
|
attributes:
|
||||||
label: Is there an existing issue for this?
|
label: Is there an existing issue for this?
|
||||||
@@ -14,7 +18,7 @@ body:
|
|||||||
label: What document or section does this relate to?
|
label: What document or section does this relate to?
|
||||||
description: |
|
description: |
|
||||||
Please include a link to the file and section, if applicable. Be specific about what part of the documentation you are referencing.
|
Please include a link to the file and section, if applicable. Be specific about what part of the documentation you are referencing.
|
||||||
placeholder: e.g. https://github.com/jokob-sk/NetAlertX/blob/main/docs/FRONTEND_DEVELOPMENT.md
|
placeholder: e.g. https://docs.netalertx.com/FRONTEND_DEVELOPMENT
|
||||||
validations:
|
validations:
|
||||||
required: true
|
required: true
|
||||||
- type: textarea
|
- type: textarea
|
||||||
|
|||||||
33
.github/ISSUE_TEMPLATE/enhancement-request.yml
vendored
33
.github/ISSUE_TEMPLATE/enhancement-request.yml
vendored
@@ -1,33 +0,0 @@
|
|||||||
name: Enhancement Request
|
|
||||||
description: Propose an improvement to an existing feature or UX behavior.
|
|
||||||
labels: ['enhancement ♻️']
|
|
||||||
body:
|
|
||||||
- type: checkboxes
|
|
||||||
attributes:
|
|
||||||
label: Is there an existing issue for this?
|
|
||||||
options:
|
|
||||||
- label: I have searched existing open and closed issues
|
|
||||||
required: true
|
|
||||||
- type: textarea
|
|
||||||
attributes:
|
|
||||||
label: What is the enhancement?
|
|
||||||
description: Describe the change or optimization you’d like to see to an existing feature.
|
|
||||||
placeholder: e.g. Make scan intervals configurable from UI instead of just `app.conf`
|
|
||||||
required: true
|
|
||||||
- type: textarea
|
|
||||||
attributes:
|
|
||||||
label: What problem does this solve or improve?
|
|
||||||
description: Describe why this change would improve user experience or project maintainability.
|
|
||||||
required: true
|
|
||||||
- type: textarea
|
|
||||||
attributes:
|
|
||||||
label: Additional context or examples
|
|
||||||
description: |
|
|
||||||
Screenshots? Comparisons? Reference repos?
|
|
||||||
required: false
|
|
||||||
- type: checkboxes
|
|
||||||
attributes:
|
|
||||||
label: Are you willing to help implement this?
|
|
||||||
options:
|
|
||||||
- label: "Yes"
|
|
||||||
- label: "No"
|
|
||||||
8
.github/ISSUE_TEMPLATE/feature_request.yml → .github/ISSUE_TEMPLATE/feature-request.yml
vendored
Executable file → Normal file
8
.github/ISSUE_TEMPLATE/feature_request.yml → .github/ISSUE_TEMPLATE/feature-request.yml
vendored
Executable file → Normal file
@@ -1,7 +1,11 @@
|
|||||||
name: Feature Request
|
name: 🎁 Feature Request
|
||||||
description: 'Suggest an idea for NetAlertX'
|
description: 'Suggest an idea for NetAlertX'
|
||||||
labels: ['Feature request ➕']
|
labels: ['Feature request ➕']
|
||||||
body:
|
body:
|
||||||
|
- type: markdown
|
||||||
|
attributes:
|
||||||
|
value: |
|
||||||
|
<!-- NETALERTX_TEMPLATE -->
|
||||||
- type: checkboxes
|
- type: checkboxes
|
||||||
attributes:
|
attributes:
|
||||||
label: Is there an existing issue for this?
|
label: Is there an existing issue for this?
|
||||||
@@ -46,7 +50,7 @@ body:
|
|||||||
- type: checkboxes
|
- type: checkboxes
|
||||||
attributes:
|
attributes:
|
||||||
label: Can I help implement this? 👩💻👨💻
|
label: Can I help implement this? 👩💻👨💻
|
||||||
description: The maintainer will provide guidance and help. The implementer will read the PR guidelines https://jokob-sk.github.io/NetAlertX/DEV_ENV_SETUP/
|
description: The maintainer will provide guidance and help. The implementer will read the PR guidelines https://docs.netalertx.com/DEV_ENV_SETUP/
|
||||||
options:
|
options:
|
||||||
- label: "Yes"
|
- label: "Yes"
|
||||||
- label: "No"
|
- label: "No"
|
||||||
36
.github/ISSUE_TEMPLATE/i-have-an-issue.yml
vendored
36
.github/ISSUE_TEMPLATE/i-have-an-issue.yml
vendored
@@ -1,13 +1,31 @@
|
|||||||
name: Bug Report
|
name: 🐛 Bug Report
|
||||||
description: 'When submitting an issue enable LOG_LEVEL="trace" and have a look at the docs.'
|
description: 'When submitting an issue enable LOG_LEVEL="trace" and have a look at the docs.'
|
||||||
labels: ['bug 🐛']
|
labels: ['bug 🐛']
|
||||||
body:
|
body:
|
||||||
|
- type: markdown
|
||||||
|
attributes:
|
||||||
|
value: |
|
||||||
|
<!-- NETALERTX_TEMPLATE -->
|
||||||
|
- type: dropdown
|
||||||
|
id: installation_type
|
||||||
|
attributes:
|
||||||
|
label: What installation are you running?
|
||||||
|
options:
|
||||||
|
- Production (netalertx) 📦
|
||||||
|
- Dev (netalertx-dev) 👩💻
|
||||||
|
- Home Assistant (addon) 🏠
|
||||||
|
- Home Assistant fa (full-access addon) 🏠
|
||||||
|
- Bare-metal (community only support - Check Discord) ❗
|
||||||
|
- Proxmox (community only support - Check Discord) ❗
|
||||||
|
- Unraid (community only support - Check Discord) ❗
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
- type: checkboxes
|
- type: checkboxes
|
||||||
attributes:
|
attributes:
|
||||||
label: Is there an existing issue for this?
|
label: Is there an existing issue for this?
|
||||||
description: Please search to see if an open or closed issue already exists for the bug you encountered.
|
description: Please search to see if an open or closed issue already exists for the bug you encountered.
|
||||||
options:
|
options:
|
||||||
- label: I have searched the existing open and closed issues and I checked the docs https://jokob-sk.github.io/NetAlertX/
|
- label: I have searched the existing open and closed issues and I checked the docs https://docs.netalertx.com/
|
||||||
required: true
|
required: true
|
||||||
- type: checkboxes
|
- type: checkboxes
|
||||||
attributes:
|
attributes:
|
||||||
@@ -58,18 +76,6 @@ body:
|
|||||||
render: yaml
|
render: yaml
|
||||||
validations:
|
validations:
|
||||||
required: false
|
required: false
|
||||||
- type: dropdown
|
|
||||||
id: installation_type
|
|
||||||
attributes:
|
|
||||||
label: What installation are you running?
|
|
||||||
options:
|
|
||||||
- Production (netalertx)
|
|
||||||
- Dev (netalertx-dev)
|
|
||||||
- Home Assistant (addon)
|
|
||||||
- Home Assistant fa (full-access addon)
|
|
||||||
- Bare-metal (community only support - Check Discord)
|
|
||||||
validations:
|
|
||||||
required: true
|
|
||||||
- type: checkboxes
|
- type: checkboxes
|
||||||
attributes:
|
attributes:
|
||||||
label: Debug or Trace enabled
|
label: Debug or Trace enabled
|
||||||
@@ -85,7 +91,7 @@ body:
|
|||||||
PASTE LOG HERE. Using the triple backticks preserves format.
|
PASTE LOG HERE. Using the triple backticks preserves format.
|
||||||
```
|
```
|
||||||
description: |
|
description: |
|
||||||
Logs with debug enabled (https://github.com/jokob-sk/NetAlertX/blob/main/docs/DEBUG_TIPS.md) ⚠
|
Logs with debug enabled (https://docs.netalertx.com/DEBUG_TIPS) ⚠
|
||||||
***Generally speaking, all bug reports should have logs provided.***
|
***Generally speaking, all bug reports should have logs provided.***
|
||||||
Tip: You can attach images or log files by clicking this area to highlight it and then dragging files in.
|
Tip: You can attach images or log files by clicking this area to highlight it and then dragging files in.
|
||||||
Additionally, any additional info? Screenshots? References? Anything that will give us more context about the issue you are encountering!
|
Additionally, any additional info? Screenshots? References? Anything that will give us more context about the issue you are encountering!
|
||||||
|
|||||||
@@ -1,37 +0,0 @@
|
|||||||
name: Refactor / Code Quality Request ♻️
|
|
||||||
description: Suggest improvements to code structure, style, or maintainability.
|
|
||||||
labels: ['enhancement ♻️']
|
|
||||||
body:
|
|
||||||
- type: checkboxes
|
|
||||||
attributes:
|
|
||||||
label: Is there an existing issue for this?
|
|
||||||
description: Please check if a similar request already exists.
|
|
||||||
options:
|
|
||||||
- label: I have searched the existing open and closed issues
|
|
||||||
required: true
|
|
||||||
- type: textarea
|
|
||||||
attributes:
|
|
||||||
label: What part of the code needs refactoring or improvement?
|
|
||||||
description: Specify files, modules, or components.
|
|
||||||
required: true
|
|
||||||
- type: textarea
|
|
||||||
attributes:
|
|
||||||
label: Describe the proposed changes
|
|
||||||
description: Explain the refactoring or quality improvements you suggest.
|
|
||||||
required: true
|
|
||||||
- type: textarea
|
|
||||||
attributes:
|
|
||||||
label: Why is this improvement needed?
|
|
||||||
description: Benefits such as maintainability, readability, performance, or scalability.
|
|
||||||
required: true
|
|
||||||
- type: textarea
|
|
||||||
attributes:
|
|
||||||
label: Additional context or examples
|
|
||||||
description: Any relevant links, references, or related issues.
|
|
||||||
required: false
|
|
||||||
- type: checkboxes
|
|
||||||
attributes:
|
|
||||||
label: Can you help implement this change?
|
|
||||||
options:
|
|
||||||
- label: Yes
|
|
||||||
- label: No
|
|
||||||
6
.github/ISSUE_TEMPLATE/security-report.yml
vendored
6
.github/ISSUE_TEMPLATE/security-report.yml
vendored
@@ -1,7 +1,11 @@
|
|||||||
name: Security Report 🔐
|
name: 🔐 Security Report
|
||||||
description: Report a security vulnerability or concern privately.
|
description: Report a security vulnerability or concern privately.
|
||||||
labels: ['security 🔐']
|
labels: ['security 🔐']
|
||||||
body:
|
body:
|
||||||
|
- type: markdown
|
||||||
|
attributes:
|
||||||
|
value: |
|
||||||
|
<!-- NETALERTX_TEMPLATE -->
|
||||||
- type: markdown
|
- type: markdown
|
||||||
attributes:
|
attributes:
|
||||||
value: |
|
value: |
|
||||||
|
|||||||
38
.github/ISSUE_TEMPLATE/setup-help.yml
vendored
38
.github/ISSUE_TEMPLATE/setup-help.yml
vendored
@@ -1,17 +1,35 @@
|
|||||||
name: Setup help
|
name: 📥 Setup help
|
||||||
description: 'When submitting an issue enable LOG_LEVEL="trace" and re-search first.'
|
description: 'When submitting an issue enable LOG_LEVEL="trace" and re-search first.'
|
||||||
labels: ['Setup 📥']
|
labels: ['Setup 📥']
|
||||||
body:
|
body:
|
||||||
|
- type: markdown
|
||||||
|
attributes:
|
||||||
|
value: |
|
||||||
|
<!-- NETALERTX_TEMPLATE -->
|
||||||
|
- type: dropdown
|
||||||
|
id: installation_type
|
||||||
|
attributes:
|
||||||
|
label: What installation are you running?
|
||||||
|
options:
|
||||||
|
- Production (netalertx) 📦
|
||||||
|
- Dev (netalertx-dev) 👩💻
|
||||||
|
- Home Assistant (addon) 🏠
|
||||||
|
- Home Assistant fa (full-access addon) 🏠
|
||||||
|
- Bare-metal (community only support - Check Discord) ❗
|
||||||
|
- Proxmox (community only support - Check Discord) ❗
|
||||||
|
- Unraid (community only support - Check Discord) ❗
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
- type: checkboxes
|
- type: checkboxes
|
||||||
attributes:
|
attributes:
|
||||||
label: Did I research?
|
label: Did I research?
|
||||||
description: Please confirm you checked the usual places before opening a setup support request.
|
description: Please confirm you checked the usual places before opening a setup support request.
|
||||||
options:
|
options:
|
||||||
- label: I have searched the docs https://jokob-sk.github.io/NetAlertX/
|
- label: I have searched the docs https://docs.netalertx.com/
|
||||||
required: true
|
required: true
|
||||||
- label: I have searched the existing open and closed issues
|
- label: I have searched the existing open and closed issues
|
||||||
required: true
|
required: true
|
||||||
- label: I confirm my SCAN_SUBNETS is configured and tested as per https://github.com/jokob-sk/NetAlertX/blob/main/docs/SUBNETS.md
|
- label: I confirm my SCAN_SUBNETS is configured and tested as per https://docs.netalertx.com/SUBNETS
|
||||||
required: true
|
required: true
|
||||||
- type: checkboxes
|
- type: checkboxes
|
||||||
attributes:
|
attributes:
|
||||||
@@ -43,23 +61,11 @@ body:
|
|||||||
render: python
|
render: python
|
||||||
validations:
|
validations:
|
||||||
required: false
|
required: false
|
||||||
- type: dropdown
|
|
||||||
id: installation_type
|
|
||||||
attributes:
|
|
||||||
label: What installation are you running?
|
|
||||||
options:
|
|
||||||
- Production (netalertx)
|
|
||||||
- Dev (netalertx-dev)
|
|
||||||
- Home Assistant (addon)
|
|
||||||
- Home Assistant fa (full-access addon)
|
|
||||||
- Bare-metal (community only support - Check Discord)
|
|
||||||
validations:
|
|
||||||
required: true
|
|
||||||
- type: textarea
|
- type: textarea
|
||||||
attributes:
|
attributes:
|
||||||
label: app.log
|
label: app.log
|
||||||
description: |
|
description: |
|
||||||
Logs with debug enabled (https://github.com/jokob-sk/NetAlertX/blob/main/docs/DEBUG_TIPS.md) ⚠
|
Logs with debug enabled (https://docs.netalertx.com/DEBUG_TIPS) ⚠
|
||||||
***Generally speaking, all bug reports should have logs provided.***
|
***Generally speaking, all bug reports should have logs provided.***
|
||||||
Tip: You can attach images or log files by clicking this area to highlight it and then dragging files in.
|
Tip: You can attach images or log files by clicking this area to highlight it and then dragging files in.
|
||||||
Additionally, any additional info? Screenshots? References? Anything that will give us more context about the issue you are encountering!
|
Additionally, any additional info? Screenshots? References? Anything that will give us more context about the issue you are encountering!
|
||||||
|
|||||||
36
.github/ISSUE_TEMPLATE/translation-request.yml
vendored
36
.github/ISSUE_TEMPLATE/translation-request.yml
vendored
@@ -1,36 +0,0 @@
|
|||||||
name: Translation / Localization Request 🌐
|
|
||||||
description: Suggest adding or improving translations or localization support.
|
|
||||||
labels: ['enhancement 🌐']
|
|
||||||
body:
|
|
||||||
- type: checkboxes
|
|
||||||
attributes:
|
|
||||||
label: Have you checked for existing translation efforts or related issues?
|
|
||||||
options:
|
|
||||||
- label: I have searched existing open and closed issues
|
|
||||||
required: true
|
|
||||||
- type: textarea
|
|
||||||
attributes:
|
|
||||||
label: Language(s) involved
|
|
||||||
description: Specify the language(s) this request pertains to.
|
|
||||||
required: true
|
|
||||||
- type: textarea
|
|
||||||
attributes:
|
|
||||||
label: Describe the translation or localization improvement
|
|
||||||
description: Examples include adding new language support, fixing translation errors, or improving formatting.
|
|
||||||
required: true
|
|
||||||
- type: textarea
|
|
||||||
attributes:
|
|
||||||
label: Why is this important for the project or users?
|
|
||||||
description: Describe the benefits or target audience.
|
|
||||||
required: false
|
|
||||||
- type: textarea
|
|
||||||
attributes:
|
|
||||||
label: Additional context or references
|
|
||||||
description: Link to files, previous translation PRs, or external resources.
|
|
||||||
required: false
|
|
||||||
- type: checkboxes
|
|
||||||
attributes:
|
|
||||||
label: Can you help with translation or review?
|
|
||||||
options:
|
|
||||||
- label: Yes
|
|
||||||
- label: No
|
|
||||||
118
.github/copilot-instructions.md
vendored
Executable file → Normal file
118
.github/copilot-instructions.md
vendored
Executable file → Normal file
@@ -1,91 +1,49 @@
|
|||||||
# NetAlertX AI Assistant Instructions
|
### ROLE: NETALERTX ARCHITECT & STRICT CODE AUDITOR
|
||||||
This is NetAlertX — network monitoring & alerting. NetAlertX provides Network inventory, awareness, insight, categorization, intruder and presence detection. This is a heavily community-driven project, welcoming of all contributions.
|
You are a cynical Security Engineer and Core Maintainer of NetAlertX. Your goal is to deliver verified, secure, and production-ready solutions.
|
||||||
|
|
||||||
You are expected to be concise, opinionated, and biased toward security and simplicity.
|
### MANDATORY BEHAVIORAL OVERRIDES
|
||||||
|
1. **Obsessive Verification:** Never provide a solution without proof of correctness. Write test cases or validation immediately after writing functions.
|
||||||
|
2. **Anti-Laziness Protocol:** No placeholders. Output full, functional blocks every time.
|
||||||
|
3. **Priority Hierarchy:** Correctness > Completeness > Speed.
|
||||||
|
4. **Mantra:** "Job's not done 'till unit tests run."
|
||||||
|
|
||||||
## Architecture (what runs where)
|
---
|
||||||
- Backend (Python): main loop + GraphQL/REST endpoints orchestrate scans, plugins, workflows, notifications, and JSON export.
|
|
||||||
- Key: `server/__main__.py`, `server/plugin.py`, `server/initialise.py`, `server/api_server/api_server_start.py`
|
|
||||||
- Data (SQLite): persistent state in `db/app.db`; helpers in `server/database.py` and `server/db/*`.
|
|
||||||
- Frontend (Nginx + PHP + JS): UI reads JSON, triggers execution queue events.
|
|
||||||
- Key: `front/`, `front/js/common.js`, `front/php/server/*.php`
|
|
||||||
- Plugins (Python): acquisition/enrichment/publishers under `front/plugins/*` with `config.json` manifests.
|
|
||||||
- Messaging/Workflows: `server/messaging/*`, `server/workflows/*`
|
|
||||||
- API JSON Cache for UI: generated under `api/*.json`
|
|
||||||
|
|
||||||
Backend loop phases (see `server/__main__.py` and `server/plugin.py`): `once`, `schedule`, `always_after_scan`, `before_name_updates`, `on_new_device`, `on_notification`, plus ad‑hoc `run` via execution queue. Plugins execute as scripts that write result logs for ingestion.
|
# NetAlertX
|
||||||
|
|
||||||
## Plugin patterns that matter
|
Network monitoring & alerting. Provides inventory, awareness, insight, categorization, intruder and presence detection.
|
||||||
- Manifest lives at `front/plugins/<code_name>/config.json`; `code_name` == folder, `unique_prefix` drives settings and filenames (e.g., `ARPSCAN`).
|
|
||||||
- Control via settings: `<PREF>_RUN` (phase), `<PREF>_RUN_SCHD` (cron-like), `<PREF>_CMD` (script path), `<PREF>_RUN_TIMEOUT`, `<PREF>_WATCH` (diff columns).
|
|
||||||
- Data contract: scripts write `/tmp/log/plugins/last_result.<PREF>.log` (pipe‑delimited: 9 required cols + optional 4). Use `front/plugins/plugin_helper.py`’s `Plugin_Objects` to sanitize text and normalize MACs, then `write_result_file()`.
|
|
||||||
- Device import: define `database_column_definitions` when creating/updating devices; watched fields trigger notifications.
|
|
||||||
|
|
||||||
### Standard Plugin Formats
|
## Architecture
|
||||||
* publisher: Sends notifications to services. Runs `on_notification`. Data source: self.
|
|
||||||
* dev scanner: Creates devices and manages online/offline status. Runs on `schedule`. Data source: self / SQLite DB.
|
|
||||||
* name discovery: Discovers device names via various protocols. Runs `before_name_updates` or on `schedule`. Data source: self.
|
|
||||||
* importer: Imports devices from another service. Runs on `schedule`. Data source: self / SQLite DB.
|
|
||||||
* system: Provides core system functionality. Runs on `schedule` or is always on. Data source: self / Template.
|
|
||||||
* other: Miscellaneous plugins. Runs at various times. Data source: self / Template.
|
|
||||||
|
|
||||||
### Plugin logging & outputs
|
- **Backend (Python):** `server/__main__.py`, `server/plugin.py`, `server/api_server/api_server_start.py`
|
||||||
- Always check relevant logs first.
|
- **Backend Config:** `/data/config/app.conf`
|
||||||
- Use logging as shown in other plugins.
|
- **Data (SQLite):** `/data/db/app.db`; helpers in `server/db/*`
|
||||||
- Collect results with `Plugin_Objects.add_object(...)` during processing and call `plugin_objects.write_result_file()` exactly once at the end of the script.
|
- **Frontend (Nginx + PHP + JS):** `front/`
|
||||||
- Prefer to log a brief summary before writing (e.g., total objects added) to aid troubleshooting; keep logs concise at `info` level and use `verbose` or `debug` for extra context.
|
- **Plugins (Python):** `front/plugins/*` with `config.json` manifests
|
||||||
|
|
||||||
- Do not write ad‑hoc files for results; the only consumable output is `last_result.<PREF>.log` generated by `Plugin_Objects`.
|
## Skills
|
||||||
## API/Endpoints quick map
|
|
||||||
- Flask app: `server/api_server/api_server_start.py` exposes routes like `/device/<mac>`, `/devices`, `/devices/export/{csv,json}`, `/devices/import`, `/devices/totals`, `/devices/by-status`, plus `nettools`, `events`, `sessions`, `dbquery`, `metrics`, `sync`.
|
|
||||||
- Authorization: all routes expect header `Authorization: Bearer <API_TOKEN>` via `get_setting_value('API_TOKEN')`.
|
|
||||||
|
|
||||||
## Conventions & helpers to reuse
|
Procedural knowledge lives in `.github/skills/`. Load the appropriate skill when performing these tasks:
|
||||||
- Settings: add/modify via `ccd()` in `server/initialise.py` or per‑plugin manifest. Never hardcode ports or secrets; use `get_setting_value()`.
|
|
||||||
- Logging: use `logger.mylog(level, [message])`; levels: none/minimal/verbose/debug/trace.
|
|
||||||
- Time/MAC/strings: `helper.py` (`timeNowDB`, `normalize_mac`, sanitizers). Validate MACs before DB writes.
|
|
||||||
- DB helpers: prefer `server/db/db_helper.py` functions (e.g., `get_table_json`, device condition helpers) over raw SQL in new paths.
|
|
||||||
|
|
||||||
## Dev workflow (devcontainer)
|
| Task | Skill |
|
||||||
- **Devcontainer philosophy: brutal simplicity.** One user, everything writable, completely idempotent. No permission checks, no conditional logic, no sudo needed. If something doesn't work, tear down the wall and rebuild - don't patch. We unit test permissions in the hardened build.
|
|------|-------|
|
||||||
- **Permissions:** Never `chmod` or `chown` during operations. Everything is already writable. If you need permissions, the devcontainer setup is broken - fix `.devcontainer/scripts/setup.sh` or `.devcontainer/resources/devcontainer-Dockerfile` instead.
|
| Run tests, check failures | `testing-workflow` |
|
||||||
- **Files & Paths:** Use environment variables (`NETALERTX_DB`, `NETALERTX_LOG`, etc.) everywhere. `/data` for persistent config/db, `/tmp` for runtime logs/api/nginx state. Never hardcode `/data/db` or relative paths.
|
| Start/stop/restart services | `devcontainer-services` |
|
||||||
- **Database reset:** Use the `[Dev Container] Wipe and Regenerate Database` task. Kills backend, deletes `/data/{db,config}/*`, runs first-time setup scripts. Clean slate, no questions.
|
| Wipe database, fresh start | `database-reset` |
|
||||||
- Services: use tasks to (re)start backend and nginx/PHP-FPM. Backend runs with debugpy on 5678; attach a Python debugger if needed.
|
| Load sample devices | `sample-data` |
|
||||||
- Run a plugin manually: `python3 front/plugins/<code_name>/script.py` (ensure `sys.path` includes `/app/front/plugins` and `/app/server` like the template).
|
| Build Docker images | `docker-build` |
|
||||||
- Testing: pytest available via Alpine packages. Tests live in `test/`; app code is under `server/`. PYTHONPATH is preconfigured to include workspace and `/opt/venv` site‑packages.
|
| Reprovision devcontainer | `devcontainer-setup` |
|
||||||
- **Subprocess calls:** ALWAYS set explicit timeouts. Default to 60s minimum unless plugin config specifies otherwise. Nested subprocess calls (e.g., plugins calling external tools) need their own timeout - outer plugin timeout won't save you.
|
| Create or run plugins | `plugin-run-development` |
|
||||||
|
| Analyze PR comments | `pr-analysis` |
|
||||||
|
| Clean Docker resources | `docker-prune` |
|
||||||
|
| Generate devcontainer configs | `devcontainer-configs` |
|
||||||
|
| Create API endpoints | `api-development` |
|
||||||
|
| Logging conventions | `logging-standards` |
|
||||||
|
| Settings and config | `settings-management` |
|
||||||
|
| Find files and paths | `project-navigation` |
|
||||||
|
| Coding standards | `code-standards` |
|
||||||
|
|
||||||
## What “done right” looks like
|
## Execution Protocol
|
||||||
- When adding a plugin, start from `front/plugins/__template`, implement with `plugin_helper`, define manifest settings, and wire phase via `<PREF>_RUN`. Verify logs in `/tmp/log/plugins/` and data in `api/*.json`.
|
|
||||||
- When introducing new config, define it once (core `ccd()` or plugin manifest) and read it via helpers everywhere.
|
|
||||||
- When exposing new server functionality, add endpoints in `server/api_server/*` and keep authorization consistent; update UI by reading/writing JSON cache rather than bypassing the pipeline.
|
|
||||||
|
|
||||||
## Useful references
|
|
||||||
- Docs: `docs/PLUGINS_DEV.md`, `docs/SETTINGS_SYSTEM.md`, `docs/API_*.md`, `docs/DEBUG_*.md`
|
|
||||||
- Logs: All logs are under `/tmp/log/`. Plugin logs are very shortly under `/tmp/log/plugins/` until picked up by the server.
|
|
||||||
- plugin logs: `/tmp/log/app.log`
|
|
||||||
- backend logs: `/tmp/log/stdout.log` and `/tmp/log/stderr.log`
|
|
||||||
- frontend commands logs: `/tmp/log/app_front.log`
|
|
||||||
- php errors: `/tmp/log/app.php_errors.log`
|
|
||||||
- nginx logs: `/tmp/log/nginx-access.log` and `/tmp/log/nginx-error.log`
|
|
||||||
|
|
||||||
## Assistant expectations:
|
|
||||||
- Be concise, opinionated, and biased toward security and simplicity.
|
|
||||||
- Reference concrete files/paths/environmental variables.
|
|
||||||
- Use existing helpers/settings.
|
|
||||||
- Offer a quick validation step (log line, API hit, or JSON export) for anything you add.
|
|
||||||
- Be blunt about risks and when you offer suggestions ensure they're also blunt,
|
|
||||||
- Ask for confirmation before making changes that run code or change multiple files.
|
|
||||||
- Make statements actionable and specific; propose exact edits.
|
|
||||||
- Request confirmation before applying changes that affect more than a single, clearly scoped line or file.
|
|
||||||
- Ask the user to debug something for an actionable value if you're unsure.
|
|
||||||
- Be sure to offer choices when appropriate.
|
|
||||||
- Always understand the intent of the user's request and undo/redo as needed.
|
|
||||||
- Above all, use the simplest possible code that meets the need so it can be easily audited and maintained.
|
|
||||||
- Always leave logging enabled. If there is a possiblity it will be difficult to debug with current logging, add more logging.
|
|
||||||
- Always run the testFailure tool before executing any tests to gather current failure information and avoid redundant runs.
|
|
||||||
- Always prioritize using the appropriate tools in the environment first. As an example if a test is failing use `testFailure` then `runTests`. Never `runTests` first.
|
|
||||||
- Docker tests take an extremely long time to run. Avoid changes to docker or tests until you've examined the exisiting testFailures and runTests results.
|
|
||||||
- Environment tools are designed specifically for your use in this project and running them in this order will give you the best results.
|
|
||||||
|
|
||||||
|
- **Before running tests:** Always use `testFailure` tool first to gather current failures.
|
||||||
|
- **Docker tests are slow.** Examine existing failures before changing tests or Dockerfiles.
|
||||||
|
|||||||
69
.github/skills/api-development/SKILL.md
vendored
Normal file
69
.github/skills/api-development/SKILL.md
vendored
Normal file
@@ -0,0 +1,69 @@
|
|||||||
|
---
|
||||||
|
name: api-development
|
||||||
|
description: Develop and extend NetAlertX REST API endpoints. Use this when asked to create endpoint, add API route, implement API, or modify API responses.
|
||||||
|
---
|
||||||
|
|
||||||
|
# API Development
|
||||||
|
|
||||||
|
## Entry Point
|
||||||
|
|
||||||
|
Flask app: `server/api_server/api_server_start.py`
|
||||||
|
|
||||||
|
## Existing Routes
|
||||||
|
|
||||||
|
- `/device/<mac>` - Single device operations
|
||||||
|
- `/devices` - Device list
|
||||||
|
- `/devices/export/{csv,json}` - Export devices
|
||||||
|
- `/devices/import` - Import devices
|
||||||
|
- `/devices/totals` - Device counts
|
||||||
|
- `/devices/by-status` - Devices grouped by status
|
||||||
|
- `/nettools` - Network utilities
|
||||||
|
- `/events` - Event log
|
||||||
|
- `/sessions` - Session management
|
||||||
|
- `/dbquery` - Database queries
|
||||||
|
- `/metrics` - Prometheus metrics
|
||||||
|
- `/sync` - Synchronization
|
||||||
|
|
||||||
|
## Authorization
|
||||||
|
|
||||||
|
All routes require header:
|
||||||
|
|
||||||
|
```
|
||||||
|
Authorization: Bearer <API_TOKEN>
|
||||||
|
```
|
||||||
|
|
||||||
|
Retrieve token via `get_setting_value('API_TOKEN')`.
|
||||||
|
|
||||||
|
## Response Contract
|
||||||
|
|
||||||
|
**MANDATORY:** All responses must include `"success": true|false`
|
||||||
|
|
||||||
|
```python
|
||||||
|
return {"success": False, "error": "Description of what went wrong"}
|
||||||
|
```
|
||||||
|
|
||||||
|
On success:
|
||||||
|
|
||||||
|
```python
|
||||||
|
return {"success": True, "data": result}
|
||||||
|
```
|
||||||
|
|
||||||
|
```python
|
||||||
|
return {"success": False, "error": "Description of what went wrong"}
|
||||||
|
```
|
||||||
|
|
||||||
|
On success:
|
||||||
|
|
||||||
|
```python
|
||||||
|
return {"success": True, "data": result}
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
**Exception:** The legacy `/device/<mac>` GET endpoint does not follow this contract to maintain backward compatibility with the UI.
|
||||||
|
|
||||||
|
## Adding New Endpoints
|
||||||
|
|
||||||
|
1. Add route in `server/api_server/` directory
|
||||||
|
2. Follow authorization pattern
|
||||||
|
3. Return proper response contract
|
||||||
|
4. Update UI to read/write JSON cache (don't bypass pipeline)
|
||||||
60
.github/skills/authentication/SKILL.md
vendored
Normal file
60
.github/skills/authentication/SKILL.md
vendored
Normal file
@@ -0,0 +1,60 @@
|
|||||||
|
---
|
||||||
|
name: netalertx-authentication-tokens
|
||||||
|
description: Manage and troubleshoot API tokens and authentication-related secrets. Use this when you need to find, rotate, verify, or debug authentication issues (401/403) in NetAlertX.
|
||||||
|
---
|
||||||
|
|
||||||
|
# Authentication
|
||||||
|
|
||||||
|
## Purpose ✅
|
||||||
|
Explain how to locate, validate, rotate, and troubleshoot API tokens and related authentication settings used by NetAlertX.
|
||||||
|
|
||||||
|
## Pre-Flight Check (MANDATORY) ⚠️
|
||||||
|
1. Ensure the backend is running (use devcontainer services or `ps`/systemd checks).
|
||||||
|
2. Verify the `API_TOKEN` setting can be read with Python (see below).
|
||||||
|
3. If a token-related error occurs, gather logs (`/tmp/log/app.log`, nginx logs) before changing secrets.
|
||||||
|
|
||||||
|
## Retrieve the API token (Python — preferred) 🐍
|
||||||
|
Always use Python helpers to read secrets to avoid accidental exposure in shells or logs:
|
||||||
|
|
||||||
|
```python
|
||||||
|
from helper import get_setting_value
|
||||||
|
token = get_setting_value("API_TOKEN")
|
||||||
|
```
|
||||||
|
|
||||||
|
If you must inspect from a running container (read-only), use:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
docker exec <CONTAINER_ID> python3 -c "from helper import get_setting_value; print(get_setting_value('API_TOKEN'))"
|
||||||
|
```
|
||||||
|
|
||||||
|
You can also check the runtime config file:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
docker exec <CONTAINER_ID> grep API_TOKEN /data/config/app.conf
|
||||||
|
```
|
||||||
|
|
||||||
|
## Rotate / Generate a new token 🔁
|
||||||
|
- Preferred: Use the web UI (Settings / System) and click **Generate** for the `API_TOKEN` field — this updates the value safely and immediately.
|
||||||
|
- Manual: Edit `/data/config/app.conf` and restart the backend if required (use the existing devcontainer service tasks).
|
||||||
|
- After rotation: verify the value with `get_setting_value('API_TOKEN')` and update any clients or sync nodes to use the new token.
|
||||||
|
|
||||||
|
## Troubleshooting 401 / 403 Errors 🔍
|
||||||
|
1. Confirm backend is running and reachable.
|
||||||
|
2. Confirm `get_setting_value('API_TOKEN')` returns a non-empty value.
|
||||||
|
3. Ensure client requests send the header exactly: `Authorization: Bearer <API_TOKEN>`.
|
||||||
|
4. Check `/tmp/log/app.log` and plugin logs (e.g., sync plugin) for "Incorrect API Token" messages.
|
||||||
|
5. If using multiple nodes, ensure the token matches across nodes for sync operations.
|
||||||
|
6. If token appears missing or incorrect, rotate via UI or update `app.conf` and re-verify.
|
||||||
|
|
||||||
|
## Best Practices & Security 🔐
|
||||||
|
- Never commit tokens to source control or paste them in public issues. Redact tokens when sharing logs.
|
||||||
|
- Rotate tokens when a secret leak is suspected or per your security policy.
|
||||||
|
- Use `get_setting_value()` in tests and scripts — do not hardcode secrets.
|
||||||
|
|
||||||
|
## Related Skills & Docs 📚
|
||||||
|
- `testing-workflow` — how to use `API_TOKEN` in tests
|
||||||
|
- `settings-management` — where settings live and how they are managed
|
||||||
|
- Docs: `docs/API.md`, `docs/API_OLD.md`, `docs/API_SSE.md`
|
||||||
|
|
||||||
|
---
|
||||||
|
_Last updated: 2026-01-23_
|
||||||
81
.github/skills/code-standards/SKILL.md
vendored
Normal file
81
.github/skills/code-standards/SKILL.md
vendored
Normal file
@@ -0,0 +1,81 @@
|
|||||||
|
---
|
||||||
|
name: netalertx-code-standards
|
||||||
|
description: NetAlertX coding standards and conventions. Use this when writing code, reviewing code, or implementing features.
|
||||||
|
---
|
||||||
|
|
||||||
|
# Code Standards
|
||||||
|
|
||||||
|
- ask me to review before going to each next step (mention n step out of x) (AI only)
|
||||||
|
- before starting, prepare implementation plan (AI only)
|
||||||
|
- ask me to review it and ask any clarifying questions first
|
||||||
|
- add test creation as last step - follow repo architecture patterns - do not place in the root of /test
|
||||||
|
- code has to be maintainable, no duplicate code
|
||||||
|
- follow DRY principle - maintainability of code is more important than speed of implementation
|
||||||
|
- code files should be less than 500 LOC for better maintainability
|
||||||
|
- DB columns must not contain underscores, use camelCase instead (e.g., deviceInstanceId, not device_instance_id)
|
||||||
|
|
||||||
|
## File Length
|
||||||
|
|
||||||
|
Keep code files under 500 lines. Split larger files into modules.
|
||||||
|
|
||||||
|
## DRY Principle
|
||||||
|
|
||||||
|
Do not re-implement functionality. Reuse existing methods or refactor to create shared methods.
|
||||||
|
|
||||||
|
## Database Access
|
||||||
|
|
||||||
|
- Never access DB directly from application layers
|
||||||
|
- Use `server/db/db_helper.py` functions (e.g., `get_table_json`)
|
||||||
|
- Implement new functionality in handlers (e.g., `DeviceInstance` in `server/models/device_instance.py`)
|
||||||
|
|
||||||
|
## MAC Address Handling
|
||||||
|
|
||||||
|
Always validate and normalize MACs before DB writes:
|
||||||
|
|
||||||
|
```python
|
||||||
|
from plugin_helper import normalize_mac
|
||||||
|
|
||||||
|
mac = normalize_mac(raw_mac)
|
||||||
|
```
|
||||||
|
|
||||||
|
## Subprocess Safety
|
||||||
|
|
||||||
|
**MANDATORY:** All subprocess calls must set explicit timeouts.
|
||||||
|
|
||||||
|
```python
|
||||||
|
result = subprocess.run(cmd, timeout=60) # Minimum 60s
|
||||||
|
```
|
||||||
|
|
||||||
|
Nested subprocess calls need their own timeout—outer timeout won't save you.
|
||||||
|
|
||||||
|
## Time Utilities
|
||||||
|
|
||||||
|
```python
|
||||||
|
from utils.datetime_utils import timeNowUTC
|
||||||
|
|
||||||
|
timestamp = timeNowUTC()
|
||||||
|
```
|
||||||
|
|
||||||
|
This is the ONLY function that calls datetime.datetime.now() in the entire codebase.
|
||||||
|
|
||||||
|
⚠️ CRITICAL: ALL database timestamps MUST be stored in UTC
|
||||||
|
This is the SINGLE SOURCE OF TRUTH for current time in NetAlertX
|
||||||
|
Use timeNowUTC() for DB writes (returns UTC string by default)
|
||||||
|
Use timeNowUTC(as_string=False) for datetime operations (scheduling, comparisons, logging)
|
||||||
|
|
||||||
|
## String Sanitization
|
||||||
|
|
||||||
|
Use sanitizers from `server/helper.py` before storing user input. MAC addresses are always lowercased and normalized. IP addresses should be validated.
|
||||||
|
|
||||||
|
## Devcontainer Constraints
|
||||||
|
|
||||||
|
- Never `chmod` or `chown` during operations
|
||||||
|
- Everything is already writable
|
||||||
|
- If permissions needed, fix `.devcontainer/scripts/setup.sh`
|
||||||
|
|
||||||
|
## Path Hygiene
|
||||||
|
|
||||||
|
- Use environment variables for runtime paths
|
||||||
|
- `/data` for persistent config/db
|
||||||
|
- `/tmp` for runtime logs/api/nginx state
|
||||||
|
- Never hardcode `/data/db` or use relative paths
|
||||||
38
.github/skills/database-reset/SKILL.md
vendored
Normal file
38
.github/skills/database-reset/SKILL.md
vendored
Normal file
@@ -0,0 +1,38 @@
|
|||||||
|
---
|
||||||
|
name: reset-netalertx-database
|
||||||
|
description: Wipe and regenerate the NetAlertX database and config. Use this when asked to reset database, wipe db, fresh database, clean slate, or start fresh.
|
||||||
|
---
|
||||||
|
|
||||||
|
# Database Reset
|
||||||
|
|
||||||
|
Completely wipes devcontainer database and config, then regenerates from scratch.
|
||||||
|
|
||||||
|
## Command
|
||||||
|
|
||||||
|
```bash
|
||||||
|
killall 'python3' || true
|
||||||
|
sleep 1
|
||||||
|
rm -rf /data/db/* /data/config/*
|
||||||
|
bash /entrypoint.d/15-first-run-config.sh
|
||||||
|
bash /entrypoint.d/20-first-run-db.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
## What This Does
|
||||||
|
|
||||||
|
1. Kills backend to release database locks
|
||||||
|
2. Deletes all files in `/data/db/` and `/data/config/`
|
||||||
|
3. Runs first-run config provisioning
|
||||||
|
4. Runs first-run database initialization
|
||||||
|
|
||||||
|
## After Reset
|
||||||
|
|
||||||
|
Run the startup script to restart services:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
/workspaces/NetAlertX/.devcontainer/scripts/setup.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
## Database Location
|
||||||
|
|
||||||
|
- Runtime: `/data/db/app.db` (SQLite)
|
||||||
|
- Config: `/data/config/app.conf`
|
||||||
28
.github/skills/devcontainer-configs/SKILL.md
vendored
Normal file
28
.github/skills/devcontainer-configs/SKILL.md
vendored
Normal file
@@ -0,0 +1,28 @@
|
|||||||
|
---
|
||||||
|
name: netalertx-devcontainer-configs
|
||||||
|
description: Generate devcontainer configuration files. Use this when asked to generate devcontainer configs, update devcontainer template, or regenerate devcontainer.
|
||||||
|
---
|
||||||
|
|
||||||
|
# Devcontainer Config Generation
|
||||||
|
|
||||||
|
Generates devcontainer configs from the template. Must be run after changes to devcontainer configuration.
|
||||||
|
|
||||||
|
## Command
|
||||||
|
|
||||||
|
```bash
|
||||||
|
/workspaces/NetAlertX/.devcontainer/scripts/generate-configs.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
## What It Does
|
||||||
|
|
||||||
|
Combines and merges template configurations into the final config used by VS Code.
|
||||||
|
|
||||||
|
## When to Run
|
||||||
|
|
||||||
|
- After modifying `.devcontainer/` template files
|
||||||
|
- After changing devcontainer features or settings
|
||||||
|
- Before committing devcontainer changes
|
||||||
|
|
||||||
|
## Note
|
||||||
|
|
||||||
|
This affects only the devcontainer configuration. It has no bearing on the production or test Docker image.
|
||||||
50
.github/skills/devcontainer-services/SKILL.md
vendored
Normal file
50
.github/skills/devcontainer-services/SKILL.md
vendored
Normal file
@@ -0,0 +1,50 @@
|
|||||||
|
---
|
||||||
|
name: restarting-netalertx-services
|
||||||
|
description: Control NetAlertX services inside the devcontainer. Use this when asked to start backend, start frontend, start nginx, start php-fpm, start crond, stop services, restart services, or check if services are running.
|
||||||
|
---
|
||||||
|
|
||||||
|
# Devcontainer Services
|
||||||
|
|
||||||
|
You operate inside the devcontainer. Do not use `docker exec`.
|
||||||
|
|
||||||
|
## Start Backend (Python)
|
||||||
|
|
||||||
|
```bash
|
||||||
|
/services/start-backend.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
Backend runs with debugpy on port 5678 for debugging. Takes ~5 seconds to be ready.
|
||||||
|
|
||||||
|
## Start Frontend (nginx + PHP-FPM)
|
||||||
|
|
||||||
|
```bash
|
||||||
|
/services/start-php-fpm.sh &
|
||||||
|
/services/start-nginx.sh &
|
||||||
|
```
|
||||||
|
|
||||||
|
Launches almost instantly.
|
||||||
|
|
||||||
|
## Start Scheduler (CronD)
|
||||||
|
|
||||||
|
```bash
|
||||||
|
/services/start-crond.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
## Stop All Services
|
||||||
|
|
||||||
|
```bash
|
||||||
|
pkill -f 'php-fpm83|nginx|crond|python3' || true
|
||||||
|
```
|
||||||
|
|
||||||
|
## Check Running Services
|
||||||
|
|
||||||
|
```bash
|
||||||
|
pgrep -a 'python3|nginx|php-fpm|crond'
|
||||||
|
```
|
||||||
|
|
||||||
|
## Service Ports
|
||||||
|
|
||||||
|
- Frontend (nginx): 20211
|
||||||
|
- Backend API: 20212
|
||||||
|
- GraphQL: 20212
|
||||||
|
- Debugpy: 5678
|
||||||
36
.github/skills/devcontainer-setup/SKILL.md
vendored
Normal file
36
.github/skills/devcontainer-setup/SKILL.md
vendored
Normal file
@@ -0,0 +1,36 @@
|
|||||||
|
---
|
||||||
|
name: netalertx-idempotent-setup
|
||||||
|
description: Reprovision and reset the devcontainer environment. Use this when asked to re-run startup, reprovision, setup devcontainer, fix permissions, or reset runtime state.
|
||||||
|
---
|
||||||
|
|
||||||
|
# Devcontainer Setup
|
||||||
|
|
||||||
|
The setup script forcefully resets all runtime state. It is idempotent—every run wipes and recreates all relevant folders, symlinks, and files.
|
||||||
|
|
||||||
|
## Command
|
||||||
|
|
||||||
|
```bash
|
||||||
|
/workspaces/NetAlertX/.devcontainer/scripts/setup.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
## What It Does
|
||||||
|
|
||||||
|
1. Kills all services (php-fpm, nginx, crond, python3)
|
||||||
|
2. Mounts tmpfs ramdisks for `/tmp/log`, `/tmp/api`, `/tmp/run`, `/tmp/nginx`
|
||||||
|
3. Creates critical subdirectories
|
||||||
|
4. Links `/entrypoint.d` and `/app` symlinks
|
||||||
|
5. Creates `/data`, `/data/config`, `/data/db` directories
|
||||||
|
6. Creates all log files
|
||||||
|
7. Runs `/entrypoint.sh` to start services
|
||||||
|
8. Writes version to `.VERSION`
|
||||||
|
|
||||||
|
## When to Use
|
||||||
|
|
||||||
|
- After modifying setup scripts
|
||||||
|
- After container rebuild
|
||||||
|
- When environment is in broken state
|
||||||
|
- After database reset
|
||||||
|
|
||||||
|
## Philosophy
|
||||||
|
|
||||||
|
No conditional logic. Everything is recreated unconditionally. If something doesn't work, run setup again.
|
||||||
38
.github/skills/docker-build/SKILL.md
vendored
Normal file
38
.github/skills/docker-build/SKILL.md
vendored
Normal file
@@ -0,0 +1,38 @@
|
|||||||
|
---
|
||||||
|
name: netalertx-docker-build
|
||||||
|
description: Build Docker images for testing or production. Use this when asked to build container, build image, docker build, build test image, or launch production container.
|
||||||
|
---
|
||||||
|
|
||||||
|
# Docker Build
|
||||||
|
|
||||||
|
## Build Unit Test Image
|
||||||
|
|
||||||
|
Required after container/Dockerfile changes. Tests won't see changes until image is rebuilt.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
docker buildx build -t netalertx-test .
|
||||||
|
```
|
||||||
|
|
||||||
|
Build time: ~30 seconds (or ~90s if venv stage changes)
|
||||||
|
|
||||||
|
## Build and Launch Production Container
|
||||||
|
|
||||||
|
Before launching, stop devcontainer services first to free ports.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cd /workspaces/NetAlertX
|
||||||
|
docker compose up -d --build --force-recreate
|
||||||
|
```
|
||||||
|
|
||||||
|
## Pre-Launch Checklist
|
||||||
|
|
||||||
|
1. Stop devcontainer services: `pkill -f 'php-fpm83|nginx|crond|python3'`
|
||||||
|
2. Close VS Code forwarded ports
|
||||||
|
3. Run the build command
|
||||||
|
|
||||||
|
## Production Container Details
|
||||||
|
|
||||||
|
- Image: `netalertx:latest`
|
||||||
|
- Container name: `netalertx`
|
||||||
|
- Network mode: host
|
||||||
|
- Ports: 20211 (UI), 20212 (API/GraphQL)
|
||||||
32
.github/skills/docker-prune/SKILL.md
vendored
Normal file
32
.github/skills/docker-prune/SKILL.md
vendored
Normal file
@@ -0,0 +1,32 @@
|
|||||||
|
---
|
||||||
|
name: netalertx-docker-prune
|
||||||
|
description: Clean up unused Docker resources. Use this when asked to prune docker, clean docker, remove unused images, free disk space, or docker cleanup. DANGEROUS operation. Requires human confirmation.
|
||||||
|
---
|
||||||
|
|
||||||
|
# Docker Prune
|
||||||
|
|
||||||
|
**DANGER:** This destroys containers, images, volumes, and networks. Any stopped container will be wiped and data will be lost.
|
||||||
|
|
||||||
|
## Command
|
||||||
|
|
||||||
|
```bash
|
||||||
|
/workspaces/NetAlertX/.devcontainer/scripts/confirm-docker-prune.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
## What Gets Deleted
|
||||||
|
|
||||||
|
- All stopped containers
|
||||||
|
- All unused images
|
||||||
|
- All unused volumes
|
||||||
|
- All unused networks
|
||||||
|
|
||||||
|
## When to Use
|
||||||
|
|
||||||
|
- Disk space is low
|
||||||
|
- Build cache is corrupted
|
||||||
|
- Clean slate needed for testing
|
||||||
|
- After many image rebuilds
|
||||||
|
|
||||||
|
## Safety
|
||||||
|
|
||||||
|
The script requires explicit confirmation before proceeding.
|
||||||
34
.github/skills/mcp-activation/SKILL.md
vendored
Normal file
34
.github/skills/mcp-activation/SKILL.md
vendored
Normal file
@@ -0,0 +1,34 @@
|
|||||||
|
---
|
||||||
|
name: mcp-activation
|
||||||
|
description: Enables live interaction with the NetAlertX runtime. This skill configures the Model Context Protocol (MCP) connection, granting full API access for debugging, troubleshooting, and real-time operations including database queries, network scans, and device management.
|
||||||
|
---
|
||||||
|
|
||||||
|
# MCP Activation Skill
|
||||||
|
|
||||||
|
This skill configures the environment to expose the Model Context Protocol (MCP) server to AI agents running inside the devcontainer.
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
This skill assumes you are already running within the NetAlertX devcontainer.
|
||||||
|
|
||||||
|
1. **Generate Configurations:**
|
||||||
|
Run the configuration generation script to extract the API Token and update the VS Code MCP settings.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
/workspaces/NetAlertX/.devcontainer/scripts/generate-configs.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
2. **Reload Window:**
|
||||||
|
Request the user to reload the VS Code window to activate the new tools.
|
||||||
|
> I have generated the MCP configuration. Please run the **'Developer: Reload Window'** command to activate the MCP server tools.
|
||||||
|
> In VS Code: open the Command Palette (Windows/Linux: Ctrl+Shift+P, macOS: Cmd+Shift+P), type Developer: Reload Window, press Enter — or click the Reload button if a notification appears. 🔁
|
||||||
|
> After you reload, tell me “Window reloaded” (or just “reloaded”) and I’ll continue.
|
||||||
|
|
||||||
|
|
||||||
|
## Why use this?
|
||||||
|
|
||||||
|
Access the live runtime API to perform operations that are not possible through static file analysis:
|
||||||
|
- **Query the database**
|
||||||
|
- **Trigger network scans**
|
||||||
|
- **Manage devices and events**
|
||||||
|
- **Troubleshoot real-time system state**
|
||||||
85
.github/skills/plugin-run-development/SKILL.md
vendored
Normal file
85
.github/skills/plugin-run-development/SKILL.md
vendored
Normal file
@@ -0,0 +1,85 @@
|
|||||||
|
---
|
||||||
|
name: netalertx-plugin-run-development
|
||||||
|
description: Create and run NetAlertX plugins. Use this when asked to create plugin, run plugin, test plugin, plugin development, or execute plugin script.
|
||||||
|
---
|
||||||
|
|
||||||
|
# Plugin Development
|
||||||
|
|
||||||
|
## Expected Workflow for Running Plugins
|
||||||
|
|
||||||
|
1. Read this skill document for context and instructions.
|
||||||
|
2. Find the plugin in `front/plugins/<code_name>/`.
|
||||||
|
3. Read the plugin's `config.json` and `script.py` to understand its functionality and settings.
|
||||||
|
4. Formulate and run the command: `python3 front/plugins/<code_name>/script.py`.
|
||||||
|
5. Retrieve the result from the plugin log folder (`/tmp/log/plugins/last_result.<PREF>.log`) quickly, as the backend may delete it after processing.
|
||||||
|
|
||||||
|
## Run a Plugin Manually
|
||||||
|
|
||||||
|
```bash
|
||||||
|
python3 front/plugins/<code_name>/script.py
|
||||||
|
```
|
||||||
|
|
||||||
|
Ensure `sys.path` includes `/app/front/plugins` and `/app/server` (as in the template).
|
||||||
|
|
||||||
|
## Plugin Structure
|
||||||
|
|
||||||
|
```text
|
||||||
|
front/plugins/<code_name>/
|
||||||
|
├── config.json # Manifest with settings
|
||||||
|
├── script.py # Main script
|
||||||
|
└── ...
|
||||||
|
```
|
||||||
|
|
||||||
|
## Manifest Location
|
||||||
|
|
||||||
|
`front/plugins/<code_name>/config.json`
|
||||||
|
|
||||||
|
- `code_name` == folder name
|
||||||
|
- `unique_prefix` drives settings and filenames (e.g., `ARPSCAN`)
|
||||||
|
|
||||||
|
## Settings Pattern
|
||||||
|
|
||||||
|
- `<PREF>_RUN`: execution phase
|
||||||
|
- `<PREF>_RUN_SCHD`: cron-like schedule
|
||||||
|
- `<PREF>_CMD`: script path
|
||||||
|
- `<PREF>_RUN_TIMEOUT`: timeout in seconds
|
||||||
|
- `<PREF>_WATCH`: columns to watch for changes
|
||||||
|
|
||||||
|
## Data Contract
|
||||||
|
|
||||||
|
Scripts write to `/tmp/log/plugins/last_result.<PREF>.log`
|
||||||
|
|
||||||
|
**Important:** The backend will almost immediately process this result file and delete it after ingestion. If you need to inspect the output, run the plugin and immediately retrieve the result file before the backend processes it.
|
||||||
|
|
||||||
|
Use `front/plugins/plugin_helper.py`:
|
||||||
|
|
||||||
|
```python
|
||||||
|
from plugin_helper import Plugin_Objects
|
||||||
|
|
||||||
|
plugin_objects = Plugin_Objects()
|
||||||
|
plugin_objects.add_object(...) # During processing
|
||||||
|
plugin_objects.write_result_file() # Exactly once at end
|
||||||
|
```
|
||||||
|
|
||||||
|
## Execution Phases
|
||||||
|
|
||||||
|
- `once`: runs once at startup
|
||||||
|
- `schedule`: runs on cron schedule
|
||||||
|
- `always_after_scan`: runs after every scan
|
||||||
|
- `before_name_updates`: runs before name resolution
|
||||||
|
- `on_new_device`: runs when new device detected
|
||||||
|
- `on_notification`: runs when notification triggered
|
||||||
|
|
||||||
|
## Plugin Formats
|
||||||
|
|
||||||
|
| Format | Purpose | Runs |
|
||||||
|
|--------|---------|------|
|
||||||
|
| publisher | Send notifications | `on_notification` |
|
||||||
|
| dev scanner | Create/manage devices | `schedule` |
|
||||||
|
| name discovery | Discover device names | `before_name_updates` |
|
||||||
|
| importer | Import from services | `schedule` |
|
||||||
|
| system | Core functionality | `schedule` |
|
||||||
|
|
||||||
|
## Starting Point
|
||||||
|
|
||||||
|
Copy from `front/plugins/__template` and customize.
|
||||||
59
.github/skills/project-navigation/SKILL.md
vendored
Normal file
59
.github/skills/project-navigation/SKILL.md
vendored
Normal file
@@ -0,0 +1,59 @@
|
|||||||
|
---
|
||||||
|
name: about-netalertx-project-structure
|
||||||
|
description: Navigate the NetAlertX codebase structure. Use this when asked about file locations, project structure, where to find code, or key paths.
|
||||||
|
---
|
||||||
|
|
||||||
|
# Project Navigation
|
||||||
|
|
||||||
|
## Key Paths
|
||||||
|
|
||||||
|
| Component | Path |
|
||||||
|
|-----------|------|
|
||||||
|
| Workspace root | `/workspaces/NetAlertX` |
|
||||||
|
| Backend entry | `server/__main__.py` |
|
||||||
|
| API server | `server/api_server/api_server_start.py` |
|
||||||
|
| Plugin system | `server/plugin.py` |
|
||||||
|
| Initialization | `server/initialise.py` |
|
||||||
|
| Frontend | `front/` |
|
||||||
|
| Frontend JS | `front/js/common.js` |
|
||||||
|
| Frontend PHP | `front/php/server/*.php` |
|
||||||
|
| Plugins | `front/plugins/` |
|
||||||
|
| Plugin template | `front/plugins/__template` |
|
||||||
|
| Database helpers | `server/db/db_helper.py` |
|
||||||
|
| Device model | `server/models/device_instance.py` |
|
||||||
|
| Messaging | `server/messaging/` |
|
||||||
|
| Workflows | `server/workflows/` |
|
||||||
|
|
||||||
|
## Architecture
|
||||||
|
|
||||||
|
NetAlertX uses a frontend–backend architecture: the frontend runs on **PHP + Nginx** (see `front/`), the backend is implemented in **Python** (see `server/`), and scheduled tasks are managed by a **supercronic** scheduler that runs periodic jobs.
|
||||||
|
|
||||||
|
## Runtime Paths
|
||||||
|
|
||||||
|
| Data | Path |
|
||||||
|
|------|------|
|
||||||
|
| Config (runtime) | `/data/config/app.conf` |
|
||||||
|
| Config (default) | `back/app.conf` |
|
||||||
|
| Database | `/data/db/app.db` |
|
||||||
|
| API JSON cache | `/tmp/api/*.json` |
|
||||||
|
| Logs | `/tmp/log/` |
|
||||||
|
| Plugin logs | `/tmp/log/plugins/` |
|
||||||
|
|
||||||
|
## Environment Variables
|
||||||
|
|
||||||
|
Use these NETALERTX_* instead of hardcoding paths. Examples:
|
||||||
|
|
||||||
|
- `NETALERTX_DB`
|
||||||
|
- `NETALERTX_LOG`
|
||||||
|
- `NETALERTX_CONFIG`
|
||||||
|
- `NETALERTX_DATA`
|
||||||
|
- `NETALERTX_APP`
|
||||||
|
|
||||||
|
## Documentation
|
||||||
|
|
||||||
|
| Topic | Path |
|
||||||
|
|-------|------|
|
||||||
|
| Plugin development | `docs/PLUGINS_DEV.md` |
|
||||||
|
| System settings | `docs/SETTINGS_SYSTEM.md` |
|
||||||
|
| API docs | `docs/API_*.md` |
|
||||||
|
| Debug guides | `docs/DEBUG_*.md` |
|
||||||
31
.github/skills/sample-data/SKILL.md
vendored
Normal file
31
.github/skills/sample-data/SKILL.md
vendored
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
---
|
||||||
|
name: netalertx-sample-data
|
||||||
|
description: Load synthetic device data into the devcontainer. Use this when asked to load sample devices, seed data, import test devices, populate database, or generate test data.
|
||||||
|
---
|
||||||
|
|
||||||
|
# Sample Data Loading
|
||||||
|
|
||||||
|
Generates synthetic device inventory and imports it via the `/devices/import` API endpoint.
|
||||||
|
|
||||||
|
## Command
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cd /workspaces/NetAlertX/.devcontainer/scripts
|
||||||
|
./load-devices.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
## Environment
|
||||||
|
|
||||||
|
- `CSV_PATH`: defaults to `/tmp/netalertx-devices.csv`
|
||||||
|
|
||||||
|
## Prerequisites
|
||||||
|
|
||||||
|
- Backend must be running
|
||||||
|
- API must be accessible
|
||||||
|
|
||||||
|
## What It Does
|
||||||
|
|
||||||
|
1. Generates synthetic device records (MAC addresses, IPs, names, vendors)
|
||||||
|
2. Creates CSV file at `$CSV_PATH`
|
||||||
|
3. POSTs to `/devices/import` endpoint
|
||||||
|
4. Devices appear in database and UI
|
||||||
39
.github/skills/settings-management/SKILL.md
vendored
Normal file
39
.github/skills/settings-management/SKILL.md
vendored
Normal file
@@ -0,0 +1,39 @@
|
|||||||
|
---
|
||||||
|
name: netalertx-settings-management
|
||||||
|
description: Manage NetAlertX configuration settings. Use this when asked to add setting, read config, get_setting_value, ccd, or configure options.
|
||||||
|
---
|
||||||
|
|
||||||
|
# Settings Management
|
||||||
|
|
||||||
|
## Reading Settings
|
||||||
|
|
||||||
|
```python
|
||||||
|
from helper import get_setting_value
|
||||||
|
|
||||||
|
value = get_setting_value('SETTING_NAME')
|
||||||
|
```
|
||||||
|
|
||||||
|
Never hardcode ports, secrets, or configuration values. Always use `get_setting_value()`.
|
||||||
|
|
||||||
|
## Adding Core Settings
|
||||||
|
|
||||||
|
Use `ccd()` in `server/initialise.py`:
|
||||||
|
|
||||||
|
```python
|
||||||
|
ccd('SETTING_NAME', 'default_value', 'description')
|
||||||
|
```
|
||||||
|
|
||||||
|
## Adding Plugin Settings
|
||||||
|
|
||||||
|
Define in plugin's `config.json` manifest under the settings section.
|
||||||
|
|
||||||
|
## Config Files
|
||||||
|
|
||||||
|
| File | Purpose |
|
||||||
|
|------|---------|
|
||||||
|
| `/data/config/app.conf` | Runtime config (modified by app) |
|
||||||
|
| `back/app.conf` | Default config (template) |
|
||||||
|
|
||||||
|
## Environment Override
|
||||||
|
|
||||||
|
Use `APP_CONF_OVERRIDE` environment variable for settings that must be set before startup.
|
||||||
61
.github/skills/testing-workflow/SKILL.md
vendored
Normal file
61
.github/skills/testing-workflow/SKILL.md
vendored
Normal file
@@ -0,0 +1,61 @@
|
|||||||
|
---
|
||||||
|
name: netalertx-testing-workflow
|
||||||
|
description: Run and debug tests in the NetAlertX devcontainer. Use this when asked to run tests, check test failures, debug failing tests, or execute pytest.
|
||||||
|
---
|
||||||
|
|
||||||
|
# Testing Workflow
|
||||||
|
|
||||||
|
## Pre-Flight Check (MANDATORY)
|
||||||
|
|
||||||
|
Before running any tests, always check for existing failures first:
|
||||||
|
|
||||||
|
1. Use the `testFailure` tool to gather current failure information
|
||||||
|
2. Review the failures to understand what's already broken
|
||||||
|
3. Only then proceed with test execution
|
||||||
|
|
||||||
|
## Running Tests
|
||||||
|
|
||||||
|
Use VS Code's testing interface or the `runTests` tool with appropriate parameters:
|
||||||
|
|
||||||
|
- To run all tests: invoke runTests without file filter
|
||||||
|
- To run specific test file: invoke runTests with the test file path
|
||||||
|
- To run failed tests only: invoke runTests with `--lf` flag
|
||||||
|
|
||||||
|
## Test Location
|
||||||
|
|
||||||
|
Tests live in `test/` directory. App code is under `server/`.
|
||||||
|
|
||||||
|
PYTHONPATH is preconfigured to include the following which should meet all needs:
|
||||||
|
- `/app` # the primary location where python runs in the production system
|
||||||
|
- `/app/server` # symbolic link to /wprkspaces/NetAlertX/server
|
||||||
|
- `/app/front/plugins` # symbolic link to /workspaces/NetAlertX/front/plugins
|
||||||
|
- `/opt/venv/lib/pythonX.Y/site-packages`
|
||||||
|
- `/workspaces/NetAlertX/test`
|
||||||
|
- `/workspaces/NetAlertX/server`
|
||||||
|
- `/workspaces/NetAlertX`
|
||||||
|
- `/usr/lib/pythonX.Y/site-packages`
|
||||||
|
|
||||||
|
## Authentication in Tests
|
||||||
|
|
||||||
|
Retrieve `API_TOKEN` using Python (not shell):
|
||||||
|
|
||||||
|
```python
|
||||||
|
from helper import get_setting_value
|
||||||
|
token = get_setting_value("API_TOKEN")
|
||||||
|
```
|
||||||
|
|
||||||
|
## Troubleshooting 403 Forbidden
|
||||||
|
|
||||||
|
1. Ensure backend is running (use devcontainer-services skill)
|
||||||
|
2. Verify config loaded: `get_setting_value("API_TOKEN")` returns non-empty
|
||||||
|
3. Re-run startup if needed (use devcontainer-setup skill)
|
||||||
|
|
||||||
|
## Docker Test Image
|
||||||
|
|
||||||
|
If container changes affect tests, rebuild the test image first:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
docker buildx build -t netalertx-test .
|
||||||
|
```
|
||||||
|
|
||||||
|
This takes ~30 seconds unless venv stage changes (~90s).
|
||||||
23
.github/workflows/code_checks.yml → .github/workflows/code-checks.yml
vendored
Executable file → Normal file
23
.github/workflows/code_checks.yml → .github/workflows/code-checks.yml
vendored
Executable file → Normal file
@@ -1,4 +1,4 @@
|
|||||||
name: Code checks
|
name: ✅ Code checks
|
||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
branches:
|
branches:
|
||||||
@@ -17,6 +17,23 @@ jobs:
|
|||||||
- name: Checkout code
|
- name: Checkout code
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: 🚨 Ensure DELETE FROM CurrentScan is not commented out
|
||||||
|
run: |
|
||||||
|
echo "🔍 Checking that DELETE FROM CurrentScan is not commented out..."
|
||||||
|
|
||||||
|
MATCHES=$(grep -RInE '^[[:space:]]*#[[:space:]]*db\.sql\.execute\("DELETE FROM CurrentScan"\)' \
|
||||||
|
--include="*.py" .) || true
|
||||||
|
|
||||||
|
if [ -n "$MATCHES" ]; then
|
||||||
|
echo "❌ Found commented-out DELETE FROM CurrentScan call:"
|
||||||
|
echo "$MATCHES"
|
||||||
|
echo
|
||||||
|
echo "This line must NOT be commented out in committed code."
|
||||||
|
exit 1
|
||||||
|
else
|
||||||
|
echo "✅ DELETE FROM CurrentScan is active."
|
||||||
|
fi
|
||||||
|
|
||||||
- name: Check for incorrect absolute '/php/' URLs in frontend code
|
- name: Check for incorrect absolute '/php/' URLs in frontend code
|
||||||
run: |
|
run: |
|
||||||
echo "🔍 Checking for incorrect absolute '/php/' URLs (should be 'php/' or './php/')..."
|
echo "🔍 Checking for incorrect absolute '/php/' URLs (should be 'php/' or './php/')..."
|
||||||
@@ -95,5 +112,5 @@ jobs:
|
|||||||
- name: Run Docker-based tests
|
- name: Run Docker-based tests
|
||||||
run: |
|
run: |
|
||||||
echo "🐳 Running Docker-based tests..."
|
echo "🐳 Running Docker-based tests..."
|
||||||
chmod +x ./test/docker_tests/run_docker_tests.sh
|
chmod +x ./scripts/run_tests_in_docker_environment.sh
|
||||||
./test/docker_tests/run_docker_tests.sh
|
./scripts/run_tests_in_docker_environment.sh
|
||||||
25
.github/workflows/docker_cache-cleaner.yml
vendored
25
.github/workflows/docker_cache-cleaner.yml
vendored
@@ -1,25 +0,0 @@
|
|||||||
name: 🤖Automation - ci-package-cleaner
|
|
||||||
|
|
||||||
on:
|
|
||||||
|
|
||||||
workflow_dispatch: # manual option
|
|
||||||
|
|
||||||
# schedule:
|
|
||||||
# - cron: '15 22 * * 1' # every Monday 10.15pm UTC (~11.15am Tuesday NZT)
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
|
|
||||||
package-cleaner:
|
|
||||||
name: package-cleaner
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
timeout-minutes: 5
|
|
||||||
permissions:
|
|
||||||
packages: write
|
|
||||||
steps:
|
|
||||||
|
|
||||||
- uses: actions/delete-package-versions@v4
|
|
||||||
with:
|
|
||||||
package-name: netalertx
|
|
||||||
package-type: container
|
|
||||||
min-versions-to-keep: 0
|
|
||||||
delete-only-untagged-versions: true
|
|
||||||
26
.github/workflows/docker_dev.yml
vendored
26
.github/workflows/docker_dev.yml
vendored
@@ -1,4 +1,4 @@
|
|||||||
name: docker
|
name: 🐳 👩💻 docker dev
|
||||||
|
|
||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
@@ -13,13 +13,16 @@ on:
|
|||||||
jobs:
|
jobs:
|
||||||
docker_dev:
|
docker_dev:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
timeout-minutes: 30
|
timeout-minutes: 90
|
||||||
permissions:
|
permissions:
|
||||||
contents: read
|
contents: read
|
||||||
packages: write
|
packages: write
|
||||||
if: >
|
if: >
|
||||||
contains(github.event.head_commit.message, 'PUSHPROD') != 'True' &&
|
!contains(github.event.head_commit.message, 'PUSHPROD') &&
|
||||||
github.repository == 'jokob-sk/NetAlertX'
|
(
|
||||||
|
github.repository == 'jokob-sk/NetAlertX' ||
|
||||||
|
github.repository == 'netalertx/NetAlertX'
|
||||||
|
)
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
@@ -62,6 +65,7 @@ jobs:
|
|||||||
uses: docker/metadata-action@v5
|
uses: docker/metadata-action@v5
|
||||||
with:
|
with:
|
||||||
images: |
|
images: |
|
||||||
|
ghcr.io/netalertx/netalertx-dev
|
||||||
ghcr.io/jokob-sk/netalertx-dev
|
ghcr.io/jokob-sk/netalertx-dev
|
||||||
jokobsk/netalertx-dev
|
jokobsk/netalertx-dev
|
||||||
tags: |
|
tags: |
|
||||||
@@ -74,12 +78,20 @@ jobs:
|
|||||||
type=semver,pattern={{major}}
|
type=semver,pattern={{major}}
|
||||||
type=sha
|
type=sha
|
||||||
|
|
||||||
- name: Log in to Github Container Registry (GHCR)
|
- name: Login GHCR (netalertx org)
|
||||||
|
uses: docker/login-action@v3
|
||||||
|
with:
|
||||||
|
registry: ghcr.io
|
||||||
|
username: ${{ github.actor }}
|
||||||
|
password: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
|
||||||
|
- name: Login GHCR (jokob-sk legacy)
|
||||||
|
if: github.event_name != 'pull_request'
|
||||||
uses: docker/login-action@v3
|
uses: docker/login-action@v3
|
||||||
with:
|
with:
|
||||||
registry: ghcr.io
|
registry: ghcr.io
|
||||||
username: jokob-sk
|
username: jokob-sk
|
||||||
password: ${{ secrets.GITHUB_TOKEN }}
|
password: ${{ secrets.GHCR_JOKOBSK_PAT }}
|
||||||
|
|
||||||
- name: Log in to DockerHub
|
- name: Log in to DockerHub
|
||||||
if: github.event_name != 'pull_request'
|
if: github.event_name != 'pull_request'
|
||||||
@@ -96,3 +108,5 @@ jobs:
|
|||||||
push: ${{ github.event_name != 'pull_request' }}
|
push: ${{ github.event_name != 'pull_request' }}
|
||||||
tags: ${{ steps.meta.outputs.tags }}
|
tags: ${{ steps.meta.outputs.tags }}
|
||||||
labels: ${{ steps.meta.outputs.labels }}
|
labels: ${{ steps.meta.outputs.labels }}
|
||||||
|
cache-from: type=gha
|
||||||
|
cache-to: type=gha,mode=max
|
||||||
|
|||||||
112
.github/workflows/docker_dev_unsafe.yml
vendored
Normal file
112
.github/workflows/docker_dev_unsafe.yml
vendored
Normal file
@@ -0,0 +1,112 @@
|
|||||||
|
name: 🐳 ⚠ docker-unsafe from next_release branch
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- next_release
|
||||||
|
pull_request:
|
||||||
|
branches:
|
||||||
|
- next_release
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
docker_dev_unsafe:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
timeout-minutes: 90
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
packages: write
|
||||||
|
if: >
|
||||||
|
!contains(github.event.head_commit.message, 'PUSHPROD') &&
|
||||||
|
(
|
||||||
|
github.repository == 'jokob-sk/NetAlertX' ||
|
||||||
|
github.repository == 'netalertx/NetAlertX'
|
||||||
|
)
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Set up QEMU
|
||||||
|
uses: docker/setup-qemu-action@v3
|
||||||
|
|
||||||
|
- name: Set up Docker Buildx
|
||||||
|
uses: docker/setup-buildx-action@v3
|
||||||
|
|
||||||
|
# --- Generate timestamped dev version
|
||||||
|
- name: Generate timestamp version
|
||||||
|
id: timestamp
|
||||||
|
run: |
|
||||||
|
ts=$(date -u +'%Y%m%d-%H%M%S')
|
||||||
|
echo "version=dev-${ts}" >> $GITHUB_OUTPUT
|
||||||
|
echo "Generated version: dev-${ts}"
|
||||||
|
|
||||||
|
- name: Set up dynamic build ARGs
|
||||||
|
id: getargs
|
||||||
|
run: echo "version=$(cat ./stable/VERSION)" >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
|
- name: Get release version
|
||||||
|
id: get_version
|
||||||
|
run: echo "version=Dev" >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
|
# --- debug output
|
||||||
|
- name: Debug version
|
||||||
|
run: |
|
||||||
|
echo "GITHUB_REF: $GITHUB_REF"
|
||||||
|
echo "Version: '${{ steps.get_version.outputs.version }}'"
|
||||||
|
|
||||||
|
# --- Write the timestamped version to .VERSION file
|
||||||
|
- name: Create .VERSION file
|
||||||
|
run: echo "${{ steps.timestamp.outputs.version }}" > .VERSION
|
||||||
|
|
||||||
|
- name: Docker meta
|
||||||
|
id: meta
|
||||||
|
uses: docker/metadata-action@v5
|
||||||
|
with:
|
||||||
|
images: |
|
||||||
|
ghcr.io/netalertx/netalertx-dev-unsafe
|
||||||
|
jokobsk/netalertx-dev-unsafe
|
||||||
|
tags: |
|
||||||
|
type=raw,value=unsafe
|
||||||
|
type=raw,value=${{ steps.timestamp.outputs.version }}
|
||||||
|
type=ref,event=branch
|
||||||
|
type=ref,event=pr
|
||||||
|
type=sha
|
||||||
|
|
||||||
|
- name: Login GHCR (netalertx org)
|
||||||
|
uses: docker/login-action@v3
|
||||||
|
with:
|
||||||
|
registry: ghcr.io
|
||||||
|
username: ${{ github.actor }}
|
||||||
|
password: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
|
||||||
|
- name: Login GHCR (jokob-sk legacy)
|
||||||
|
if: github.event_name != 'pull_request'
|
||||||
|
uses: docker/login-action@v3
|
||||||
|
with:
|
||||||
|
registry: ghcr.io
|
||||||
|
username: jokob-sk
|
||||||
|
password: ${{ secrets.GHCR_JOKOBSK_PAT }}
|
||||||
|
|
||||||
|
- name: Log in to DockerHub
|
||||||
|
if: github.event_name != 'pull_request'
|
||||||
|
uses: docker/login-action@v3
|
||||||
|
with:
|
||||||
|
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
|
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||||
|
|
||||||
|
- name: Build and push
|
||||||
|
uses: docker/build-push-action@v6
|
||||||
|
with:
|
||||||
|
context: .
|
||||||
|
platforms: linux/amd64,linux/arm64,linux/arm/v7,linux/arm/v6
|
||||||
|
push: ${{ github.event_name != 'pull_request' }}
|
||||||
|
tags: ${{ steps.meta.outputs.tags }}
|
||||||
|
labels: |
|
||||||
|
org.opencontainers.image.title=NetAlertX Dev Unsafe
|
||||||
|
org.opencontainers.image.description=EXPERIMENTAL BUILD – NOT SUPPORTED – DATA LOSS POSSIBLE
|
||||||
|
org.opencontainers.image.version=${{ steps.timestamp.outputs.version }}
|
||||||
|
netalertx.stability=unsafe
|
||||||
|
netalertx.support=none
|
||||||
|
netalertx.data_risk=high
|
||||||
|
cache-from: type=gha
|
||||||
|
cache-to: type=gha,mode=max
|
||||||
34
.github/workflows/docker_prod.yml
vendored
34
.github/workflows/docker_prod.yml
vendored
@@ -6,18 +6,16 @@
|
|||||||
# GitHub recommends pinning actions to a commit SHA.
|
# GitHub recommends pinning actions to a commit SHA.
|
||||||
# To get a newer version, you will need to update the SHA.
|
# To get a newer version, you will need to update the SHA.
|
||||||
# You can also reference a tag or branch, but the action may change without warning.
|
# You can also reference a tag or branch, but the action may change without warning.
|
||||||
name: Publish Docker image
|
name: 🐳 🚀 Publish Docker image
|
||||||
|
|
||||||
on:
|
on:
|
||||||
release:
|
release:
|
||||||
types: [published]
|
types: [published]
|
||||||
tags:
|
|
||||||
- '*.[1-9]+[0-9]?.[1-9]+*'
|
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
docker:
|
docker:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
timeout-minutes: 30
|
timeout-minutes: 90
|
||||||
permissions:
|
permissions:
|
||||||
contents: read
|
contents: read
|
||||||
packages: write
|
packages: write
|
||||||
@@ -32,18 +30,6 @@ jobs:
|
|||||||
- name: Set up Docker Buildx
|
- name: Set up Docker Buildx
|
||||||
uses: docker/setup-buildx-action@v3
|
uses: docker/setup-buildx-action@v3
|
||||||
|
|
||||||
# --- Previous approach Get release version from tag
|
|
||||||
- name: Set up dynamic build ARGs
|
|
||||||
id: getargs
|
|
||||||
run: echo "version=$(cat ./stable/VERSION)" >> $GITHUB_OUTPUT
|
|
||||||
|
|
||||||
- name: Get release version
|
|
||||||
id: get_version_prev
|
|
||||||
run: echo "::set-output name=version::${GITHUB_REF#refs/tags/}"
|
|
||||||
|
|
||||||
- name: Create .VERSION file
|
|
||||||
run: echo "${{ steps.get_version.outputs.version }}" >> .VERSION_PREV
|
|
||||||
|
|
||||||
# --- Get release version from tag
|
# --- Get release version from tag
|
||||||
- name: Get release version
|
- name: Get release version
|
||||||
id: get_version
|
id: get_version
|
||||||
@@ -55,7 +41,6 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
echo "GITHUB_REF: $GITHUB_REF"
|
echo "GITHUB_REF: $GITHUB_REF"
|
||||||
echo "Version: '${{ steps.get_version.outputs.version }}'"
|
echo "Version: '${{ steps.get_version.outputs.version }}'"
|
||||||
echo "Version prev: '${{ steps.get_version_prev.outputs.version }}'"
|
|
||||||
|
|
||||||
# --- Write version to .VERSION file
|
# --- Write version to .VERSION file
|
||||||
- name: Create .VERSION file
|
- name: Create .VERSION file
|
||||||
@@ -67,23 +52,30 @@ jobs:
|
|||||||
uses: docker/metadata-action@v5
|
uses: docker/metadata-action@v5
|
||||||
with:
|
with:
|
||||||
images: |
|
images: |
|
||||||
|
ghcr.io/netalertx/netalertx
|
||||||
ghcr.io/jokob-sk/netalertx
|
ghcr.io/jokob-sk/netalertx
|
||||||
jokobsk/netalertx
|
jokobsk/netalertx
|
||||||
tags: |
|
tags: |
|
||||||
type=semver,pattern={{version}},value=${{ steps.get_version.outputs.version }}
|
type=semver,pattern={{version}},value=${{ steps.get_version.outputs.version }}
|
||||||
type=semver,pattern={{major}}.{{minor}},value=${{ steps.get_version.outputs.version }}
|
type=semver,pattern={{major}}.{{minor}},value=${{ steps.get_version.outputs.version }}
|
||||||
type=semver,pattern={{major}},value=${{ steps.get_version.outputs.version }}
|
type=semver,pattern={{major}},value=${{ steps.get_version.outputs.version }}
|
||||||
type=ref,event=branch,suffix=-{{ sha }}
|
type=raw,value=latest
|
||||||
type=ref,event=pr
|
|
||||||
type=raw,value=latest,enable=${{ github.ref == 'refs/heads/main' || startsWith(github.ref, 'refs/tags/') }}
|
|
||||||
|
|
||||||
- name: Log in to Github Container Registry (GHCR)
|
- name: Log in to Github Container Registry (GHCR)
|
||||||
uses: docker/login-action@v3
|
uses: docker/login-action@v3
|
||||||
with:
|
with:
|
||||||
registry: ghcr.io
|
registry: ghcr.io
|
||||||
username: jokob-sk
|
username: ${{ github.actor }}
|
||||||
password: ${{ secrets.GITHUB_TOKEN }}
|
password: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
|
||||||
|
- name: Login GHCR (jokob-sk legacy)
|
||||||
|
if: github.event_name != 'pull_request'
|
||||||
|
uses: docker/login-action@v3
|
||||||
|
with:
|
||||||
|
registry: ghcr.io
|
||||||
|
username: jokob-sk
|
||||||
|
password: ${{ secrets.GHCR_JOKOBSK_PAT }}
|
||||||
|
|
||||||
- name: Log in to DockerHub
|
- name: Log in to DockerHub
|
||||||
if: github.event_name != 'pull_request'
|
if: github.event_name != 'pull_request'
|
||||||
uses: docker/login-action@v3
|
uses: docker/login-action@v3
|
||||||
|
|||||||
81
.github/workflows/docker_rewrite.yml
vendored
81
.github/workflows/docker_rewrite.yml
vendored
@@ -1,81 +0,0 @@
|
|||||||
name: docker
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- rewrite
|
|
||||||
tags:
|
|
||||||
- '*.*.*'
|
|
||||||
pull_request:
|
|
||||||
branches:
|
|
||||||
- rewrite
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
docker_rewrite:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
timeout-minutes: 30
|
|
||||||
permissions:
|
|
||||||
contents: read
|
|
||||||
packages: write
|
|
||||||
if: >
|
|
||||||
contains(github.event.head_commit.message, 'PUSHPROD') != 'True' &&
|
|
||||||
github.repository == 'jokob-sk/NetAlertX'
|
|
||||||
steps:
|
|
||||||
- name: Checkout
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
|
|
||||||
- name: Set up QEMU
|
|
||||||
uses: docker/setup-qemu-action@v3
|
|
||||||
|
|
||||||
- name: Set up Docker Buildx
|
|
||||||
uses: docker/setup-buildx-action@v3
|
|
||||||
|
|
||||||
- name: Set up dynamic build ARGs
|
|
||||||
id: getargs
|
|
||||||
run: echo "version=$(cat ./stable/VERSION)" >> $GITHUB_OUTPUT
|
|
||||||
|
|
||||||
- name: Get release version
|
|
||||||
id: get_version
|
|
||||||
run: echo "version=Dev" >> $GITHUB_OUTPUT
|
|
||||||
|
|
||||||
- name: Create .VERSION file
|
|
||||||
run: echo "${{ steps.get_version.outputs.version }}" >> .VERSION
|
|
||||||
|
|
||||||
- name: Docker meta
|
|
||||||
id: meta
|
|
||||||
uses: docker/metadata-action@v5
|
|
||||||
with:
|
|
||||||
images: |
|
|
||||||
ghcr.io/jokob-sk/netalertx-dev-rewrite
|
|
||||||
jokobsk/netalertx-dev-rewrite
|
|
||||||
tags: |
|
|
||||||
type=raw,value=latest
|
|
||||||
type=ref,event=branch
|
|
||||||
type=ref,event=pr
|
|
||||||
type=semver,pattern={{version}}
|
|
||||||
type=semver,pattern={{major}}.{{minor}}
|
|
||||||
type=semver,pattern={{major}}
|
|
||||||
type=sha
|
|
||||||
|
|
||||||
- name: Log in to Github Container Registry (GHCR)
|
|
||||||
uses: docker/login-action@v3
|
|
||||||
with:
|
|
||||||
registry: ghcr.io
|
|
||||||
username: jokob-sk
|
|
||||||
password: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
|
|
||||||
- name: Log in to DockerHub
|
|
||||||
if: github.event_name != 'pull_request'
|
|
||||||
uses: docker/login-action@v3
|
|
||||||
with:
|
|
||||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
|
||||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
|
||||||
|
|
||||||
- name: Build and push
|
|
||||||
uses: docker/build-push-action@v3
|
|
||||||
with:
|
|
||||||
context: .
|
|
||||||
platforms: linux/amd64,linux/arm64,linux/arm/v7,linux/arm/v6
|
|
||||||
push: ${{ github.event_name != 'pull_request' }}
|
|
||||||
tags: ${{ steps.meta.outputs.tags }}
|
|
||||||
labels: ${{ steps.meta.outputs.labels }}
|
|
||||||
21
.github/workflows/label-issues.yml
vendored
21
.github/workflows/label-issues.yml
vendored
@@ -1,4 +1,4 @@
|
|||||||
name: Label Issues by Installation Type
|
name: 🏷 Label Issues by Installation Type
|
||||||
|
|
||||||
on:
|
on:
|
||||||
issues:
|
issues:
|
||||||
@@ -15,21 +15,28 @@ jobs:
|
|||||||
uses: actions/github-script@v7
|
uses: actions/github-script@v7
|
||||||
with:
|
with:
|
||||||
script: |
|
script: |
|
||||||
const body = context.payload.issue.body;
|
const body = (context.payload.issue.body || "").toLowerCase();
|
||||||
|
|
||||||
const lowerBody = body.toLowerCase();
|
// --- Check for template marker ---
|
||||||
|
const hasTemplate = body.includes('netalertx_template');
|
||||||
|
|
||||||
|
if (!hasTemplate) {
|
||||||
|
console.log("No template marker found, skipping labeling.");
|
||||||
|
return; // skip labeling
|
||||||
|
}
|
||||||
|
|
||||||
|
// --- Proceed with normal labeling ---
|
||||||
let labelsToAdd = [];
|
let labelsToAdd = [];
|
||||||
|
|
||||||
if (lowerBody.includes('bare-metal')) {
|
if (body.includes('bare-metal') || body.includes('proxmox')) {
|
||||||
labelsToAdd.push('bare-metal ❗');
|
labelsToAdd.push('bare-metal ❗');
|
||||||
}
|
}
|
||||||
|
|
||||||
if (lowerBody.includes('home assistant')) {
|
if (body.includes('home assistant')) {
|
||||||
labelsToAdd.push('Home Assistant 🏠');
|
labelsToAdd.push('Home Assistant 🏠');
|
||||||
}
|
}
|
||||||
|
|
||||||
if (lowerBody.includes('production (netalertx)') || lowerBody.includes('dev (netalertx-dev)')) {
|
if (body.includes('production (netalertx)') || body.includes('dev (netalertx-dev)')) {
|
||||||
labelsToAdd.push('Docker 🐋');
|
labelsToAdd.push('Docker 🐋');
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -40,4 +47,6 @@ jobs:
|
|||||||
issue_number: context.issue.number,
|
issue_number: context.issue.number,
|
||||||
labels: labelsToAdd
|
labels: labelsToAdd
|
||||||
});
|
});
|
||||||
|
|
||||||
|
console.log(`Added labels: ${labelsToAdd.join(", ")}`);
|
||||||
}
|
}
|
||||||
|
|||||||
19
.github/workflows/mkdocs.yml
vendored
19
.github/workflows/mkdocs.yml
vendored
@@ -1,9 +1,12 @@
|
|||||||
name: Deploy MkDocs
|
name: 📘 Deploy MkDocs
|
||||||
|
|
||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
branches:
|
branches:
|
||||||
- main # Change if your default branch is different
|
- main
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
contents: write
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
deploy:
|
deploy:
|
||||||
@@ -19,7 +22,17 @@ jobs:
|
|||||||
|
|
||||||
- name: Install MkDocs
|
- name: Install MkDocs
|
||||||
run: |
|
run: |
|
||||||
pip install mkdocs mkdocs-material && pip install mkdocs-github-admonitions-plugin
|
pip install \
|
||||||
|
mkdocs==1.6.0 \
|
||||||
|
mkdocs-material==9.5.21 \
|
||||||
|
mkdocs-github-admonitions-plugin==0.0.4
|
||||||
|
|
||||||
|
- name: Build MkDocs
|
||||||
|
run: mkdocs build
|
||||||
|
|
||||||
|
- name: Add CNAME
|
||||||
|
run: |
|
||||||
|
echo "docs.netalertx.com" > site/CNAME
|
||||||
|
|
||||||
- name: Deploy MkDocs
|
- name: Deploy MkDocs
|
||||||
run: mkdocs gh-deploy --force
|
run: mkdocs gh-deploy --force
|
||||||
|
|||||||
97
.github/workflows/run-all-tests.yml
vendored
Normal file
97
.github/workflows/run-all-tests.yml
vendored
Normal file
@@ -0,0 +1,97 @@
|
|||||||
|
name: 🧪 Manual Test Suite Selector
|
||||||
|
|
||||||
|
on:
|
||||||
|
workflow_dispatch:
|
||||||
|
inputs:
|
||||||
|
run_all:
|
||||||
|
description: '✅ Run ALL tests (overrides individual selectors)'
|
||||||
|
type: boolean
|
||||||
|
default: false
|
||||||
|
run_scan:
|
||||||
|
description: '📂 scan/ (Scan, Logic, Locks, IPs)'
|
||||||
|
type: boolean
|
||||||
|
default: true
|
||||||
|
run_api:
|
||||||
|
description: '📂 api_endpoints/ & server/ (Endpoints & Server)'
|
||||||
|
type: boolean
|
||||||
|
default: false
|
||||||
|
run_backend:
|
||||||
|
description: '📂 backend/ & db/ (SQL Builder, Security & Migration)'
|
||||||
|
type: boolean
|
||||||
|
default: false
|
||||||
|
run_docker_env:
|
||||||
|
description: '📂 docker_tests/ (Environment & PUID/PGID)'
|
||||||
|
type: boolean
|
||||||
|
default: false
|
||||||
|
run_ui:
|
||||||
|
description: '📂 ui/ (Selenium & Dashboard)'
|
||||||
|
type: boolean
|
||||||
|
default: false
|
||||||
|
run_plugins:
|
||||||
|
description: '📂 plugins/ (Sync insert schema-aware logic)'
|
||||||
|
type: boolean
|
||||||
|
default: false
|
||||||
|
run_root_files:
|
||||||
|
description: '📄 Root Test Files (WOL, Atomicity, etc.)'
|
||||||
|
type: boolean
|
||||||
|
default: false
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
comprehensive-test:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Checkout Code
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Set up Environment
|
||||||
|
run: sudo apt-get update && sudo apt-get install -y sqlite3
|
||||||
|
|
||||||
|
- name: Build Test Path Command
|
||||||
|
id: builder
|
||||||
|
run: |
|
||||||
|
PATHS=""
|
||||||
|
|
||||||
|
# run_all overrides everything
|
||||||
|
if [ "${{ github.event.inputs.run_all }}" == "true" ]; then
|
||||||
|
echo "final_paths=test/" >> $GITHUB_OUTPUT
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Folder Mapping with 'test/' prefix
|
||||||
|
if [ "${{ github.event.inputs.run_scan }}" == "true" ]; then PATHS="$PATHS test/scan/"; fi
|
||||||
|
if [ "${{ github.event.inputs.run_api }}" == "true" ]; then PATHS="$PATHS test/api_endpoints/ test/server/"; fi
|
||||||
|
if [ "${{ github.event.inputs.run_backend }}" == "true" ]; then PATHS="$PATHS test/backend/ test/db/"; fi
|
||||||
|
if [ "${{ github.event.inputs.run_docker_env }}" == "true" ]; then PATHS="$PATHS test/docker_tests/"; fi
|
||||||
|
if [ "${{ github.event.inputs.run_ui }}" == "true" ]; then PATHS="$PATHS test/ui/"; fi
|
||||||
|
if [ "${{ github.event.inputs.run_plugins }}" == "true" ]; then PATHS="$PATHS test/plugins/"; fi
|
||||||
|
|
||||||
|
# Root Files Mapping (files sitting directly in /test/)
|
||||||
|
if [ "${{ github.event.inputs.run_root_files }}" == "true" ]; then
|
||||||
|
PATHS="$PATHS test/test_device_atomicity.py test/test_mcp_disablement.py test/test_plugin_helper.py test/test_wol_validation.py"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# If nothing is selected, default to the whole test folder
|
||||||
|
if [ -z "$PATHS" ]; then PATHS="test/"; fi
|
||||||
|
|
||||||
|
echo "final_paths=$PATHS" >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
|
- name: Run Docker Integration Script
|
||||||
|
run: |
|
||||||
|
chmod +x ./scripts/run_tests_in_docker_environment.sh
|
||||||
|
|
||||||
|
# We update the pytest command to use the specific paths built above.
|
||||||
|
# Note: We still keep your 'not' filter to skip E2E tests unless you want them.
|
||||||
|
TARGET_PATHS="${{ steps.builder.outputs.final_paths }}"
|
||||||
|
SED_COMMAND="pytest $TARGET_PATHS -m 'not (docker or compose or feature_complete)'"
|
||||||
|
|
||||||
|
echo "🚀 Targeted Pytest Command: $SED_COMMAND"
|
||||||
|
|
||||||
|
sed -i "s|pytest -m 'not (docker or compose or feature_complete)'|$SED_COMMAND|g" ./scripts/run_tests_in_docker_environment.sh
|
||||||
|
|
||||||
|
./scripts/run_tests_in_docker_environment.sh
|
||||||
|
|
||||||
|
- name: Cleanup
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
docker stop netalertx-test-container || true
|
||||||
|
docker rm netalertx-test-container || true
|
||||||
4
.github/workflows/social_post_on_release.yml → .github/workflows/social-post-on-release.yml
vendored
Executable file → Normal file
4
.github/workflows/social_post_on_release.yml → .github/workflows/social-post-on-release.yml
vendored
Executable file → Normal file
@@ -7,8 +7,8 @@ jobs:
|
|||||||
post-discord:
|
post-discord:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Wait for 15 minutes
|
- name: Wait for 60 minutes
|
||||||
run: sleep 900 # 15 minutes delay
|
run: sleep 3600 # 60 minutes delay
|
||||||
|
|
||||||
- name: Post to Discord
|
- name: Post to Discord
|
||||||
run: |
|
run: |
|
||||||
6
.gitignore
vendored
6
.gitignore
vendored
@@ -24,6 +24,8 @@ front/api/*
|
|||||||
/api/*
|
/api/*
|
||||||
**/plugins/**/*.log
|
**/plugins/**/*.log
|
||||||
**/plugins/cloud_services/*
|
**/plugins/cloud_services/*
|
||||||
|
**/plugins/cloud_connector/*
|
||||||
|
**/plugins/heartbeat/*
|
||||||
**/%40eaDir/
|
**/%40eaDir/
|
||||||
**/@eaDir/
|
**/@eaDir/
|
||||||
|
|
||||||
@@ -44,3 +46,7 @@ front/css/cloud_services.css
|
|||||||
|
|
||||||
docker-compose.yml.ffsb42
|
docker-compose.yml.ffsb42
|
||||||
.env.omada.ffsb42
|
.env.omada.ffsb42
|
||||||
|
.venv
|
||||||
|
test_mounts/
|
||||||
|
.gemini/settings.json
|
||||||
|
.vscode/mcp.json
|
||||||
|
|||||||
13
.vscode/settings.json
vendored
13
.vscode/settings.json
vendored
@@ -4,10 +4,12 @@
|
|||||||
"python.testing.pytestEnabled": true,
|
"python.testing.pytestEnabled": true,
|
||||||
"python.testing.unittestEnabled": false,
|
"python.testing.unittestEnabled": false,
|
||||||
"python.testing.pytestArgs": [
|
"python.testing.pytestArgs": [
|
||||||
"test"
|
"test"
|
||||||
],
|
],
|
||||||
// Ensure VS Code uses the devcontainer virtualenv
|
// NetAlertX devcontainer uses /opt/venv; this ensures pip/pytest are available for discovery.
|
||||||
"python.defaultInterpreterPath": "/opt/venv/bin/python",
|
"python.defaultInterpreterPath": "/opt/venv/bin/python",
|
||||||
|
"python.testing.cwd": "${workspaceFolder}",
|
||||||
|
"python.testing.autoTestDiscoverOnSaveEnabled": true,
|
||||||
// Let the Python extension invoke pytest via the interpreter; avoid hardcoded paths
|
// Let the Python extension invoke pytest via the interpreter; avoid hardcoded paths
|
||||||
// Removed python.testing.pytestPath and legacy pytest.command overrides
|
// Removed python.testing.pytestPath and legacy pytest.command overrides
|
||||||
|
|
||||||
@@ -16,8 +18,7 @@
|
|||||||
"zsh": {
|
"zsh": {
|
||||||
"path": "/bin/zsh"
|
"path": "/bin/zsh"
|
||||||
}
|
}
|
||||||
}
|
},
|
||||||
,
|
|
||||||
// Fallback for older VS Code versions or schema validators that don't accept custom profiles
|
// Fallback for older VS Code versions or schema validators that don't accept custom profiles
|
||||||
"terminal.integrated.shell.linux": "/usr/bin/zsh"
|
"terminal.integrated.shell.linux": "/usr/bin/zsh"
|
||||||
,
|
,
|
||||||
@@ -29,5 +30,7 @@
|
|||||||
"python.formatting.provider": "black",
|
"python.formatting.provider": "black",
|
||||||
"python.formatting.blackArgs": [
|
"python.formatting.blackArgs": [
|
||||||
"--line-length=180"
|
"--line-length=180"
|
||||||
]
|
],
|
||||||
|
"chat.useAgentSkills": true,
|
||||||
|
|
||||||
}
|
}
|
||||||
58
.vscode/tasks.json
vendored
58
.vscode/tasks.json
vendored
@@ -6,6 +6,12 @@
|
|||||||
"type": "promptString",
|
"type": "promptString",
|
||||||
"description": "DANGER! Type YES to confirm pruning all unused Docker resources. This will destroy containers, images, volumes, and networks!",
|
"description": "DANGER! Type YES to confirm pruning all unused Docker resources. This will destroy containers, images, volumes, and networks!",
|
||||||
"default": ""
|
"default": ""
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "prNumber",
|
||||||
|
"type": "promptString",
|
||||||
|
"description": "Enter GitHub PR Number",
|
||||||
|
"default": "1405"
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"tasks": [
|
"tasks": [
|
||||||
@@ -21,7 +27,6 @@
|
|||||||
"showReuseMessage": false,
|
"showReuseMessage": false,
|
||||||
"group": "POSIX Tasks"
|
"group": "POSIX Tasks"
|
||||||
},
|
},
|
||||||
|
|
||||||
"problemMatcher": [],
|
"problemMatcher": [],
|
||||||
"group": {
|
"group": {
|
||||||
"kind": "build",
|
"kind": "build",
|
||||||
@@ -59,6 +64,31 @@
|
|||||||
"color": "terminal.ansiRed"
|
"color": "terminal.ansiRed"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"label": "[Dev Container] Load Sample Devices",
|
||||||
|
"type": "shell",
|
||||||
|
"command": "./isDevContainer.sh || exit 1; ./load-devices.sh",
|
||||||
|
"detail": "Generates a synthetic device inventory and imports it into the devcontainer database via /devices/import.",
|
||||||
|
"options": {
|
||||||
|
"cwd": "/workspaces/NetAlertX/.devcontainer/scripts",
|
||||||
|
"env": {
|
||||||
|
"CSV_PATH": "/tmp/netalertx-devices.csv"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"presentation": {
|
||||||
|
"echo": true,
|
||||||
|
"reveal": "always",
|
||||||
|
"panel": "shared",
|
||||||
|
"showReuseMessage": false,
|
||||||
|
"clear": false,
|
||||||
|
"group": "Devcontainer"
|
||||||
|
},
|
||||||
|
"problemMatcher": [],
|
||||||
|
"icon": {
|
||||||
|
"id": "cloud-upload",
|
||||||
|
"color": "terminal.ansiYellow"
|
||||||
|
}
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"label": "[Dev Container] Re-Run Startup Script",
|
"label": "[Dev Container] Re-Run Startup Script",
|
||||||
"type": "shell",
|
"type": "shell",
|
||||||
@@ -73,7 +103,6 @@
|
|||||||
"panel": "shared",
|
"panel": "shared",
|
||||||
"showReuseMessage": false
|
"showReuseMessage": false
|
||||||
},
|
},
|
||||||
|
|
||||||
"problemMatcher": [],
|
"problemMatcher": [],
|
||||||
"icon": {
|
"icon": {
|
||||||
"id": "beaker",
|
"id": "beaker",
|
||||||
@@ -233,6 +262,31 @@
|
|||||||
"id": "package",
|
"id": "package",
|
||||||
"color": "terminal.ansiBlue"
|
"color": "terminal.ansiBlue"
|
||||||
}
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"label": "Analyze PR Instructions",
|
||||||
|
"type": "shell",
|
||||||
|
"command": "python3",
|
||||||
|
"detail": "Pull all of Coderabbit's suggestions from a pull request. Requires `gh auth login` first.",
|
||||||
|
"options": {
|
||||||
|
"cwd": "/workspaces/NetAlertX/.devcontainer/scripts"
|
||||||
|
},
|
||||||
|
"args": [
|
||||||
|
"/workspaces/NetAlertX/.devcontainer/scripts/coderabbit-pr-parser.py",
|
||||||
|
"${input:prNumber}"
|
||||||
|
],
|
||||||
|
"problemMatcher": [],
|
||||||
|
"presentation": {
|
||||||
|
"echo": true,
|
||||||
|
"reveal": "always",
|
||||||
|
"panel": "new",
|
||||||
|
"showReuseMessage": false,
|
||||||
|
"focus": true
|
||||||
|
},
|
||||||
|
"icon": {
|
||||||
|
"id": "comment-discussion",
|
||||||
|
"color": "terminal.ansiBlue"
|
||||||
|
}
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,23 +1,38 @@
|
|||||||
# 🤝 Contributing to NetAlertX
|
# Contributing to NetAlertX
|
||||||
|
|
||||||
First off, **thank you** for taking the time to contribute! NetAlertX is built and improved with the help of passionate people like you.
|
First off, **thank you** for taking the time to contribute! NetAlertX is built and improved with the help of passionate people like you.
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## 📂 Issues, Bugs, and Feature Requests
|
## Issues, Bugs, and Feature Requests
|
||||||
|
|
||||||
Please use the [GitHub Issue Tracker](https://github.com/jokob-sk/NetAlertX/issues) for:
|
Please use the [GitHub Issue Tracker](https://github.com/netalertx/NetAlertX/issues) for:
|
||||||
- Bug reports 🐞
|
- Bug reports
|
||||||
- Feature requests 💡
|
- Feature requests
|
||||||
- Documentation feedback 📖
|
- Documentation feedback
|
||||||
|
|
||||||
Before opening a new issue:
|
Before opening a new issue:
|
||||||
- 🛑 [Check Common Issues & Debug Tips](https://github.com/jokob-sk/NetAlertX/blob/main/docs/DEBUG_TIPS.md#common-issues)
|
- [Check Common Issues & Debug Tips](https://docs.netalertx.com/DEBUG_TIPS#common-issues)
|
||||||
- 🔍 [Search Closed Issues](https://github.com/jokob-sk/NetAlertX/issues?q=is%3Aissue+is%3Aclosed)
|
- [Search Closed Issues](https://github.com/netalertx/NetAlertX/issues?q=is%3Aissue+is%3Aclosed)
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## 🚀 Submitting Pull Requests (PRs)
|
## Use of AI
|
||||||
|
|
||||||
|
Use of AI-assisted tools is permitted, provided all generated code is reviewed, understood, and verified before submission.
|
||||||
|
|
||||||
|
- All AI-generated code must meet the project's **quality, security, and performance standards**.
|
||||||
|
- Contributors are responsible for **fully understanding** any code they submit, regardless of how it was produced.
|
||||||
|
- Prefer **clarity and maintainability over cleverness or brevity**. Readable code is always favored over dense or obfuscated implementations.
|
||||||
|
- Follow the **DRY (Don't Repeat Yourself) principle** where appropriate, without sacrificing readability.
|
||||||
|
- Do not submit code that you cannot confidently explain or debug.
|
||||||
|
|
||||||
|
All changes must pass the **full test suite** before opening a PR.
|
||||||
|
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Submitting Pull Requests (PRs)
|
||||||
|
|
||||||
We welcome PRs to improve the code, docs, or UI!
|
We welcome PRs to improve the code, docs, or UI!
|
||||||
|
|
||||||
@@ -27,11 +42,24 @@ Please:
|
|||||||
- Follow existing **code style and structure**
|
- Follow existing **code style and structure**
|
||||||
- Provide a clear title and description for your PR
|
- Provide a clear title and description for your PR
|
||||||
- If relevant, add or update tests and documentation
|
- If relevant, add or update tests and documentation
|
||||||
- For plugins, refer to the [Plugin Dev Guide](https://github.com/jokob-sk/NetAlertX/blob/main/docs/PLUGINS_DEV.md)
|
- For plugins, refer to the [Plugin Dev Guide](https://docs.netalertx.com/PLUGINS_DEV)
|
||||||
|
- Switch the PR to DRAFT mode if still being worked on
|
||||||
|
- Keep PRs **focused and minimal** — avoid unrelated changes in a single PR
|
||||||
|
- PRs that do not meet these guidelines may be closed without review
|
||||||
|
|
||||||
|
## Commit Messages
|
||||||
|
|
||||||
|
- Use clear, descriptive commit messages
|
||||||
|
- Explain *why* a change was made, not just *what* changed
|
||||||
|
- Reference related issues where applicable
|
||||||
|
|
||||||
|
## Code Quality
|
||||||
|
|
||||||
|
- Read and follow the [code standards](/.github/skills/code-standards/SKILL.md)
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## 🌟 First-Time Contributors
|
## First-Time Contributors
|
||||||
|
|
||||||
New to open source? Check out these resources:
|
New to open source? Check out these resources:
|
||||||
- [How to Fork and Submit a PR](https://opensource.guide/how-to-contribute/)
|
- [How to Fork and Submit a PR](https://opensource.guide/how-to-contribute/)
|
||||||
@@ -39,15 +67,15 @@ New to open source? Check out these resources:
|
|||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## 🔐 Code of Conduct
|
## Code of Conduct
|
||||||
|
|
||||||
By participating, you agree to follow our [Code of Conduct](./CODE_OF_CONDUCT.md), which ensures a respectful and welcoming community.
|
By participating, you agree to follow our [Code of Conduct](./CODE_OF_CONDUCT.md), which ensures a respectful and welcoming community.
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## 📬 Contact
|
## Contact
|
||||||
|
|
||||||
If you have more in-depth questions or want to discuss contributing in other ways, feel free to reach out at:
|
If you have more in-depth questions or want to discuss contributing in other ways, feel free to reach out at:
|
||||||
📧 [jokob@duck.com](mailto:jokob@duck.com?subject=NetAlertX%20Contribution)
|
[jokob.sk@gmail.com](mailto:jokob.sk@gmail.com?subject=NetAlertX%20Contribution)
|
||||||
|
|
||||||
We appreciate every contribution, big or small! 💙
|
We appreciate every contribution, big or small! 💙
|
||||||
|
|||||||
89
Dockerfile
89
Dockerfile
@@ -26,13 +26,26 @@ ENV PATH="/opt/venv/bin:$PATH"
|
|||||||
|
|
||||||
# Install build dependencies
|
# Install build dependencies
|
||||||
COPY requirements.txt /tmp/requirements.txt
|
COPY requirements.txt /tmp/requirements.txt
|
||||||
RUN apk add --no-cache bash shadow python3 python3-dev gcc musl-dev libffi-dev openssl-dev git \
|
# hadolint ignore=DL3018
|
||||||
|
RUN apk add --no-cache \
|
||||||
|
bash \
|
||||||
|
shadow \
|
||||||
|
python3 \
|
||||||
|
python3-dev \
|
||||||
|
py3-psutil \
|
||||||
|
gcc \
|
||||||
|
musl-dev \
|
||||||
|
libffi-dev \
|
||||||
|
openssl-dev \
|
||||||
|
git \
|
||||||
|
rust \
|
||||||
|
cargo \
|
||||||
&& python -m venv /opt/venv
|
&& python -m venv /opt/venv
|
||||||
|
|
||||||
# Create virtual environment owned by root, but readable by everyone else. This makes it easy to copy
|
# Upgrade pip/wheel/setuptools and install Python packages
|
||||||
# into hardened stage without worrying about permissions and keeps image size small. Keeping the commands
|
# hadolint ignore=DL3013, DL3042
|
||||||
# together makes for a slightly smaller image size.
|
RUN python -m pip install --upgrade pip setuptools wheel && \
|
||||||
RUN pip install --no-cache-dir -r /tmp/requirements.txt && \
|
pip install --prefer-binary --no-cache-dir -r /tmp/requirements.txt && \
|
||||||
chmod -R u-rwx,g-rwx /opt
|
chmod -R u-rwx,g-rwx /opt
|
||||||
|
|
||||||
# second stage is the main runtime stage with just the minimum required to run the application
|
# second stage is the main runtime stage with just the minimum required to run the application
|
||||||
@@ -40,6 +53,12 @@ RUN pip install --no-cache-dir -r /tmp/requirements.txt && \
|
|||||||
FROM alpine:3.22 AS runner
|
FROM alpine:3.22 AS runner
|
||||||
|
|
||||||
ARG INSTALL_DIR=/app
|
ARG INSTALL_DIR=/app
|
||||||
|
# Runtime service account (override at build; container user can still be overridden at run time)
|
||||||
|
ARG NETALERTX_UID=20211
|
||||||
|
ARG NETALERTX_GID=20211
|
||||||
|
# Read-only lock owner (separate from service account to avoid UID/GID collisions)
|
||||||
|
ARG READONLY_UID=20212
|
||||||
|
ARG READONLY_GID=20212
|
||||||
|
|
||||||
# NetAlertX app directories
|
# NetAlertX app directories
|
||||||
ENV NETALERTX_APP=${INSTALL_DIR}
|
ENV NETALERTX_APP=${INSTALL_DIR}
|
||||||
@@ -113,14 +132,14 @@ ENV NETALERTX_USER=netalertx NETALERTX_GROUP=netalertx
|
|||||||
ENV LANG=C.UTF-8
|
ENV LANG=C.UTF-8
|
||||||
|
|
||||||
|
|
||||||
RUN apk add --no-cache bash mtr libbsd zip lsblk tzdata curl arp-scan iproute2 iproute2-ss nmap \
|
RUN apk add --no-cache bash mtr libbsd zip lsblk tzdata curl arp-scan iproute2 iproute2-ss nmap fping \
|
||||||
nmap-scripts traceroute nbtscan net-tools net-snmp-tools bind-tools awake ca-certificates \
|
nmap-scripts traceroute nbtscan net-tools net-snmp-tools bind-tools awake ca-certificates \
|
||||||
sqlite php83 php83-fpm php83-cgi php83-curl php83-sqlite3 php83-session python3 envsubst \
|
sqlite php83 php83-fpm php83-cgi php83-curl php83-sqlite3 php83-session python3 py3-psutil envsubst \
|
||||||
nginx supercronic shadow && \
|
nginx supercronic shadow su-exec jq && \
|
||||||
rm -Rf /var/cache/apk/* && \
|
rm -Rf /var/cache/apk/* && \
|
||||||
rm -Rf /etc/nginx && \
|
rm -Rf /etc/nginx && \
|
||||||
addgroup -g 20211 ${NETALERTX_GROUP} && \
|
addgroup -g ${NETALERTX_GID} ${NETALERTX_GROUP} && \
|
||||||
adduser -u 20211 -D -h ${NETALERTX_APP} -G ${NETALERTX_GROUP} ${NETALERTX_USER} && \
|
adduser -u ${NETALERTX_UID} -D -h ${NETALERTX_APP} -G ${NETALERTX_GROUP} ${NETALERTX_USER} && \
|
||||||
apk del shadow
|
apk del shadow
|
||||||
|
|
||||||
|
|
||||||
@@ -138,24 +157,23 @@ RUN install -d -o ${NETALERTX_USER} -g ${NETALERTX_GROUP} -m 700 ${READ_WRITE_FO
|
|||||||
|
|
||||||
# Copy version information into the image
|
# Copy version information into the image
|
||||||
COPY --chown=${NETALERTX_USER}:${NETALERTX_GROUP} .[V]ERSION ${NETALERTX_APP}/.VERSION
|
COPY --chown=${NETALERTX_USER}:${NETALERTX_GROUP} .[V]ERSION ${NETALERTX_APP}/.VERSION
|
||||||
COPY --chown=${NETALERTX_USER}:${NETALERTX_GROUP} .[V]ERSION ${NETALERTX_APP}/.VERSION_PREV
|
|
||||||
|
|
||||||
# Copy the virtualenv from the builder stage
|
# Copy the virtualenv from the builder stage (owned by readonly lock owner)
|
||||||
COPY --from=builder --chown=20212:20212 ${VIRTUAL_ENV} ${VIRTUAL_ENV}
|
COPY --from=builder --chown=${READONLY_UID}:${READONLY_GID} ${VIRTUAL_ENV} ${VIRTUAL_ENV}
|
||||||
|
|
||||||
|
|
||||||
# Initialize each service with the dockerfiles/init-*.sh scripts, once.
|
# Initialize each service with the dockerfiles/init-*.sh scripts, once.
|
||||||
# This is done after the copy of the venv to ensure the venv is in place
|
# This is done after the copy of the venv to ensure the venv is in place
|
||||||
# although it may be quicker to do it before the copy, it keeps the image
|
# although it may be quicker to do it before the copy, it keeps the image
|
||||||
# layers smaller to do it after.
|
# layers smaller to do it after.
|
||||||
RUN for vfile in .VERSION .VERSION_PREV; do \
|
# hadolint ignore=DL3018
|
||||||
|
RUN for vfile in .VERSION; do \
|
||||||
if [ ! -f "${NETALERTX_APP}/${vfile}" ]; then \
|
if [ ! -f "${NETALERTX_APP}/${vfile}" ]; then \
|
||||||
echo "DEVELOPMENT 00000000" > "${NETALERTX_APP}/${vfile}"; \
|
echo "DEVELOPMENT 00000000" > "${NETALERTX_APP}/${vfile}"; \
|
||||||
fi; \
|
fi; \
|
||||||
chown 20212:20212 "${NETALERTX_APP}/${vfile}"; \
|
chown ${READONLY_UID}:${READONLY_GID} "${NETALERTX_APP}/${vfile}"; \
|
||||||
done && \
|
done && \
|
||||||
apk add --no-cache libcap && \
|
apk add --no-cache libcap && \
|
||||||
setcap cap_net_raw+ep /bin/busybox && \
|
|
||||||
setcap cap_net_raw,cap_net_admin+eip /usr/bin/nmap && \
|
setcap cap_net_raw,cap_net_admin+eip /usr/bin/nmap && \
|
||||||
setcap cap_net_raw,cap_net_admin+eip /usr/bin/arp-scan && \
|
setcap cap_net_raw,cap_net_admin+eip /usr/bin/arp-scan && \
|
||||||
setcap cap_net_raw,cap_net_admin,cap_net_bind_service+eip /usr/bin/nbtscan && \
|
setcap cap_net_raw,cap_net_admin,cap_net_bind_service+eip /usr/bin/nbtscan && \
|
||||||
@@ -170,13 +188,19 @@ RUN for vfile in .VERSION .VERSION_PREV; do \
|
|||||||
date +%s > "${NETALERTX_FRONT}/buildtimestamp.txt"
|
date +%s > "${NETALERTX_FRONT}/buildtimestamp.txt"
|
||||||
|
|
||||||
|
|
||||||
ENTRYPOINT ["/bin/sh","/entrypoint.sh"]
|
ENTRYPOINT ["/bin/bash","/entrypoint.sh"]
|
||||||
|
|
||||||
# Final hardened stage to improve security by setting least possible permissions and removing sudo access.
|
# Final hardened stage to improve security by setting least possible permissions and removing sudo access.
|
||||||
# When complete, if the image is compromised, there's not much that can be done with it.
|
# When complete, if the image is compromised, there's not much that can be done with it.
|
||||||
# This stage is separate from Runner stage so that devcontainer can use the Runner stage.
|
# This stage is separate from Runner stage so that devcontainer can use the Runner stage.
|
||||||
FROM runner AS hardened
|
FROM runner AS hardened
|
||||||
|
|
||||||
|
# Re-declare UID/GID args for this stage
|
||||||
|
ARG NETALERTX_UID=20211
|
||||||
|
ARG NETALERTX_GID=20211
|
||||||
|
ARG READONLY_UID=20212
|
||||||
|
ARG READONLY_GID=20212
|
||||||
|
|
||||||
ENV UMASK=0077
|
ENV UMASK=0077
|
||||||
|
|
||||||
# Create readonly user and group with no shell access.
|
# Create readonly user and group with no shell access.
|
||||||
@@ -184,8 +208,8 @@ ENV UMASK=0077
|
|||||||
# AI may claim this is stupid, but it's actually least possible permissions as
|
# AI may claim this is stupid, but it's actually least possible permissions as
|
||||||
# read-only user cannot login, cannot sudo, has no write permission, and cannot even
|
# read-only user cannot login, cannot sudo, has no write permission, and cannot even
|
||||||
# read the files it owns. The read-only user is ownership-as-a-lock hardening pattern.
|
# read the files it owns. The read-only user is ownership-as-a-lock hardening pattern.
|
||||||
RUN addgroup -g 20212 "${READ_ONLY_GROUP}" && \
|
RUN addgroup -g ${READONLY_GID} "${READ_ONLY_GROUP}" && \
|
||||||
adduser -u 20212 -G "${READ_ONLY_GROUP}" -D -h /app "${READ_ONLY_USER}"
|
adduser -u ${READONLY_UID} -G "${READ_ONLY_GROUP}" -D -h /app "${READ_ONLY_USER}"
|
||||||
|
|
||||||
|
|
||||||
# reduce permissions to minimum necessary for all NetAlertX files and folders
|
# reduce permissions to minimum necessary for all NetAlertX files and folders
|
||||||
@@ -196,24 +220,27 @@ RUN addgroup -g 20212 "${READ_ONLY_GROUP}" && \
|
|||||||
RUN chown -R ${READ_ONLY_USER}:${READ_ONLY_GROUP} ${READ_ONLY_FOLDERS} && \
|
RUN chown -R ${READ_ONLY_USER}:${READ_ONLY_GROUP} ${READ_ONLY_FOLDERS} && \
|
||||||
chmod -R 004 ${READ_ONLY_FOLDERS} && \
|
chmod -R 004 ${READ_ONLY_FOLDERS} && \
|
||||||
find ${READ_ONLY_FOLDERS} -type d -exec chmod 005 {} + && \
|
find ${READ_ONLY_FOLDERS} -type d -exec chmod 005 {} + && \
|
||||||
install -d -o ${NETALERTX_USER} -g ${NETALERTX_GROUP} -m 700 ${READ_WRITE_FOLDERS} && \
|
install -d -o ${NETALERTX_USER} -g ${NETALERTX_GROUP} -m 0777 ${READ_WRITE_FOLDERS} && \
|
||||||
chown -R ${NETALERTX_USER}:${NETALERTX_GROUP} ${READ_WRITE_FOLDERS} && \
|
chown ${READ_ONLY_USER}:${READ_ONLY_GROUP} /entrypoint.sh /root-entrypoint.sh /opt /opt/venv && \
|
||||||
chmod -R 600 ${READ_WRITE_FOLDERS} && \
|
chmod 005 /entrypoint.sh /root-entrypoint.sh ${SYSTEM_SERVICES}/*.sh ${SYSTEM_SERVICES_SCRIPTS}/* ${ENTRYPOINT_CHECKS}/* /app /opt /opt/venv && \
|
||||||
find ${READ_WRITE_FOLDERS} -type d -exec chmod 700 {} + && \
|
# Do not bake first-run artifacts into the image. If present, Docker volume copy-up
|
||||||
chown ${READ_ONLY_USER}:${READ_ONLY_GROUP} /entrypoint.sh /opt /opt/venv && \
|
# will persist restrictive ownership/modes into fresh named volumes, breaking
|
||||||
chmod 005 /entrypoint.sh ${SYSTEM_SERVICES}/*.sh ${SYSTEM_SERVICES_SCRIPTS}/* ${ENTRYPOINT_CHECKS}/* /app /opt /opt/venv && \
|
# arbitrary non-root UID/GID runs.
|
||||||
for dir in ${READ_WRITE_FOLDERS}; do \
|
rm -f \
|
||||||
install -d -o ${NETALERTX_USER} -g ${NETALERTX_GROUP} -m 700 "$dir"; \
|
"${NETALERTX_CONFIG}/app.conf" \
|
||||||
done && \
|
"${NETALERTX_DB_FILE}" \
|
||||||
|
"${NETALERTX_DB_FILE}-shm" \
|
||||||
|
"${NETALERTX_DB_FILE}-wal" || true && \
|
||||||
apk del apk-tools && \
|
apk del apk-tools && \
|
||||||
rm -Rf /var /etc/sudoers.d/* /etc/shadow /etc/gshadow /etc/sudoers \
|
rm -Rf /var /etc/sudoers.d/* /etc/shadow /etc/gshadow /etc/sudoers \
|
||||||
/lib/apk /lib/firmware /lib/modules-load.d /lib/sysctl.d /mnt /home/ /root \
|
/lib/apk /lib/firmware /lib/modules-load.d /lib/sysctl.d /mnt /home/ /root \
|
||||||
/srv /media && \
|
/srv /media && \
|
||||||
sed -i "/^\(${READ_ONLY_USER}\|${NETALERTX_USER}\):/!d" /etc/passwd && \
|
# Preserve root and system identities so hardened entrypoint never needs to patch /etc/passwd or /etc/group at runtime.
|
||||||
sed -i "/^\(${READ_ONLY_GROUP}\|${NETALERTX_GROUP}\):/!d" /etc/group && \
|
|
||||||
printf '#!/bin/sh\n"$@"\n' > /usr/bin/sudo && chmod +x /usr/bin/sudo
|
printf '#!/bin/sh\n"$@"\n' > /usr/bin/sudo && chmod +x /usr/bin/sudo
|
||||||
|
USER "0"
|
||||||
|
|
||||||
USER netalertx
|
# Call root-entrypoint.sh which drops priviliges to run entrypoint.sh.
|
||||||
|
ENTRYPOINT ["/root-entrypoint.sh"]
|
||||||
|
|
||||||
HEALTHCHECK --interval=30s --timeout=10s --start-period=60s --retries=3 \
|
HEALTHCHECK --interval=30s --timeout=10s --start-period=60s --retries=3 \
|
||||||
CMD /services/healthcheck.sh
|
CMD /services/healthcheck.sh
|
||||||
|
|||||||
@@ -1,57 +1,47 @@
|
|||||||
# Warning - use of this unhardened image is not recommended for production use.
|
# Stage 1: Builder
|
||||||
# This image is provided for backward compatibility, development and testing purposes only.
|
# Install build dependencies and create virtual environment
|
||||||
# For production use, please use the hardened image built with Alpine. This image attempts to
|
FROM debian:bookworm-slim AS builder
|
||||||
# treat a container as an operating system, which is an anti-pattern and a common source of
|
|
||||||
# security issues.
|
|
||||||
#
|
|
||||||
# The default Dockerfile/docker-compose image contains the following security improvements
|
|
||||||
# over the Debian image:
|
|
||||||
# - read-only filesystem
|
|
||||||
# - no sudo access
|
|
||||||
# - least possible permissions on all files and folders
|
|
||||||
# - Root user has all permissions revoked and is unused
|
|
||||||
# - Secure umask applied so files are owner-only by default
|
|
||||||
# - non-privileged user runs the application
|
|
||||||
# - no shell access for non-privileged users
|
|
||||||
# - no unnecessary packages or services
|
|
||||||
# - reduced capabilities
|
|
||||||
# - tmpfs for writable folders
|
|
||||||
# - healthcheck
|
|
||||||
# - no package managers
|
|
||||||
# - no compilers or build tools
|
|
||||||
# - no systemd, uses lightweight init system
|
|
||||||
# - no persistent storage except for config and db volumes
|
|
||||||
# - minimal image size due to segmented build stages
|
|
||||||
# - minimal base image (Alpine Linux)
|
|
||||||
# - minimal python environment (venv, no pip)
|
|
||||||
# - minimal stripped web server
|
|
||||||
# - minimal stripped php environment
|
|
||||||
# - minimal services (nginx, php-fpm, crond, no unnecessary services or service managers)
|
|
||||||
# - minimal users and groups (netalertx and readonly only, no others)
|
|
||||||
# - minimal permissions (read-only for most files and folders, write-only for necessary folders)
|
|
||||||
# - minimal capabilities (NET_ADMIN and NET_RAW only, no others)
|
|
||||||
# - minimal environment variables (only necessary ones, no others)
|
|
||||||
# - minimal entrypoint (only necessary commands, no others)
|
|
||||||
# - Uses the same base image as the development environmnment (Alpine Linux)
|
|
||||||
# - Uses the same services as the development environment (nginx, php-fpm, crond)
|
|
||||||
# - Uses the same environment variables as the development environment (only necessary ones, no others)
|
|
||||||
# - Uses the same file and folder structure as the development environment (only necessary ones, no others)
|
|
||||||
# NetAlertX is designed to be run as an unattended network security monitoring appliance, which means it
|
|
||||||
# should be able to operate without human intervention. Overall, the hardened image is designed to be as
|
|
||||||
# secure as possible while still being functional and is recommended because you cannot attack a surface
|
|
||||||
# that isn't there.
|
|
||||||
|
|
||||||
|
ENV PYTHONUNBUFFERED=1
|
||||||
|
ENV VIRTUAL_ENV=/opt/venv
|
||||||
|
ENV PATH="${VIRTUAL_ENV}/bin:${PATH}"
|
||||||
|
|
||||||
FROM debian:bookworm-slim
|
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||||
|
python3 \
|
||||||
|
python3-dev \
|
||||||
|
python3-pip \
|
||||||
|
python3-psutil \
|
||||||
|
python3-venv \
|
||||||
|
gcc \
|
||||||
|
git \
|
||||||
|
libffi-dev \
|
||||||
|
libssl-dev \
|
||||||
|
rustc \
|
||||||
|
cargo \
|
||||||
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
#TZ=Europe/London
|
RUN python3 -m venv ${VIRTUAL_ENV}
|
||||||
|
ENV PATH="${VIRTUAL_ENV}/bin:${PATH}"
|
||||||
|
|
||||||
|
COPY requirements.txt /tmp/requirements.txt
|
||||||
|
RUN pip install --upgrade pip setuptools wheel && \
|
||||||
|
pip install --no-cache-dir -r /tmp/requirements.txt
|
||||||
|
|
||||||
|
# Stage 2: Runner
|
||||||
|
# Main runtime stage with minimum requirements
|
||||||
|
FROM debian:bookworm-slim AS runner
|
||||||
|
|
||||||
|
ARG INSTALL_DIR=/app
|
||||||
|
ARG NETALERTX_UID=20211
|
||||||
|
ARG NETALERTX_GID=20211
|
||||||
|
ARG READONLY_UID=20212
|
||||||
|
ARG READONLY_GID=20212
|
||||||
|
|
||||||
# NetAlertX app directories
|
|
||||||
ENV INSTALL_DIR=/app
|
|
||||||
ENV NETALERTX_APP=${INSTALL_DIR}
|
ENV NETALERTX_APP=${INSTALL_DIR}
|
||||||
ENV NETALERTX_DATA=/data
|
ENV NETALERTX_DATA=/data
|
||||||
ENV NETALERTX_CONFIG=${NETALERTX_DATA}/config
|
ENV NETALERTX_CONFIG=${NETALERTX_DATA}/config
|
||||||
ENV NETALERTX_FRONT=${NETALERTX_APP}/front
|
ENV NETALERTX_FRONT=${NETALERTX_APP}/front
|
||||||
|
ENV NETALERTX_PLUGINS=${NETALERTX_FRONT}/plugins
|
||||||
ENV NETALERTX_SERVER=${NETALERTX_APP}/server
|
ENV NETALERTX_SERVER=${NETALERTX_APP}/server
|
||||||
ENV NETALERTX_API=/tmp/api
|
ENV NETALERTX_API=/tmp/api
|
||||||
ENV NETALERTX_DB=${NETALERTX_DATA}/db
|
ENV NETALERTX_DB=${NETALERTX_DATA}/db
|
||||||
@@ -59,8 +49,8 @@ ENV NETALERTX_DB_FILE=${NETALERTX_DB}/app.db
|
|||||||
ENV NETALERTX_BACK=${NETALERTX_APP}/back
|
ENV NETALERTX_BACK=${NETALERTX_APP}/back
|
||||||
ENV NETALERTX_LOG=/tmp/log
|
ENV NETALERTX_LOG=/tmp/log
|
||||||
ENV NETALERTX_PLUGINS_LOG=${NETALERTX_LOG}/plugins
|
ENV NETALERTX_PLUGINS_LOG=${NETALERTX_LOG}/plugins
|
||||||
|
ENV NETALERTX_CONFIG_FILE=${NETALERTX_CONFIG}/app.conf
|
||||||
|
|
||||||
# NetAlertX log files
|
|
||||||
ENV LOG_IP_CHANGES=${NETALERTX_LOG}/IP_changes.log
|
ENV LOG_IP_CHANGES=${NETALERTX_LOG}/IP_changes.log
|
||||||
ENV LOG_APP=${NETALERTX_LOG}/app.log
|
ENV LOG_APP=${NETALERTX_LOG}/app.log
|
||||||
ENV LOG_APP_FRONT=${NETALERTX_LOG}/app_front.log
|
ENV LOG_APP_FRONT=${NETALERTX_LOG}/app_front.log
|
||||||
@@ -75,102 +65,178 @@ ENV LOG_STDOUT=${NETALERTX_LOG}/stdout.log
|
|||||||
ENV LOG_CRON=${NETALERTX_LOG}/cron.log
|
ENV LOG_CRON=${NETALERTX_LOG}/cron.log
|
||||||
ENV LOG_NGINX_ERROR=${NETALERTX_LOG}/nginx-error.log
|
ENV LOG_NGINX_ERROR=${NETALERTX_LOG}/nginx-error.log
|
||||||
|
|
||||||
# System Services configuration files
|
ENV ENTRYPOINT_CHECKS=/entrypoint.d
|
||||||
ENV SYSTEM_SERVICES=/services
|
ENV SYSTEM_SERVICES=/services
|
||||||
|
ENV SYSTEM_SERVICES_SCRIPTS=${SYSTEM_SERVICES}/scripts
|
||||||
ENV SYSTEM_SERVICES_CONFIG=${SYSTEM_SERVICES}/config
|
ENV SYSTEM_SERVICES_CONFIG=${SYSTEM_SERVICES}/config
|
||||||
ENV SYSTEM_NGINIX_CONFIG=${SYSTEM_SERVICES_CONFIG}/nginx
|
ENV SYSTEM_NGINX_CONFIG=${SYSTEM_SERVICES_CONFIG}/nginx
|
||||||
ENV SYSTEM_NGINX_CONFIG_FILE=${SYSTEM_NGINIX_CONFIG}/nginx.conf
|
ENV SYSTEM_NGINX_CONFIG_TEMPLATE=${SYSTEM_NGINX_CONFIG}/netalertx.conf.template
|
||||||
|
ENV SYSTEM_SERVICES_CONFIG_CRON=${SYSTEM_SERVICES_CONFIG}/cron
|
||||||
ENV SYSTEM_SERVICES_ACTIVE_CONFIG=/tmp/nginx/active-config
|
ENV SYSTEM_SERVICES_ACTIVE_CONFIG=/tmp/nginx/active-config
|
||||||
ENV NETALERTX_CONFIG_FILE=${NETALERTX_CONFIG}/app.conf
|
ENV SYSTEM_SERVICES_ACTIVE_CONFIG_FILE=${SYSTEM_SERVICES_ACTIVE_CONFIG}/nginx.conf
|
||||||
ENV SYSTEM_SERVICES_PHP_FOLDER=${SYSTEM_SERVICES_CONFIG}/php
|
ENV SYSTEM_SERVICES_PHP_FOLDER=${SYSTEM_SERVICES_CONFIG}/php
|
||||||
ENV SYSTEM_SERVICES_PHP_FPM_D=${SYSTEM_SERVICES_PHP_FOLDER}/php-fpm.d
|
ENV SYSTEM_SERVICES_PHP_FPM_D=${SYSTEM_SERVICES_PHP_FOLDER}/php-fpm.d
|
||||||
ENV SYSTEM_SERVICES_CROND=${SYSTEM_SERVICES_CONFIG}/crond
|
|
||||||
ENV SYSTEM_SERVICES_RUN=/tmp/run
|
ENV SYSTEM_SERVICES_RUN=/tmp/run
|
||||||
ENV SYSTEM_SERVICES_RUN_TMP=${SYSTEM_SERVICES_RUN}/tmp
|
ENV SYSTEM_SERVICES_RUN_TMP=${SYSTEM_SERVICES_RUN}/tmp
|
||||||
ENV SYSTEM_SERVICES_RUN_LOG=${SYSTEM_SERVICES_RUN}/logs
|
ENV SYSTEM_SERVICES_RUN_LOG=${SYSTEM_SERVICES_RUN}/logs
|
||||||
ENV PHP_FPM_CONFIG_FILE=${SYSTEM_SERVICES_PHP_FOLDER}/php-fpm.conf
|
ENV PHP_FPM_CONFIG_FILE=${SYSTEM_SERVICES_PHP_FOLDER}/php-fpm.conf
|
||||||
|
|
||||||
#Python environment
|
ENV READ_ONLY_FOLDERS="${NETALERTX_BACK} ${NETALERTX_FRONT} ${NETALERTX_SERVER} ${SYSTEM_SERVICES} \
|
||||||
ENV PYTHONPATH=${NETALERTX_SERVER}
|
${SYSTEM_SERVICES_CONFIG} ${ENTRYPOINT_CHECKS}"
|
||||||
|
ENV READ_WRITE_FOLDERS="${NETALERTX_DATA} ${NETALERTX_CONFIG} ${NETALERTX_DB} ${NETALERTX_API} \
|
||||||
|
${NETALERTX_LOG} ${NETALERTX_PLUGINS_LOG} ${SYSTEM_SERVICES_RUN} \
|
||||||
|
${SYSTEM_SERVICES_RUN_TMP} ${SYSTEM_SERVICES_RUN_LOG} \
|
||||||
|
${SYSTEM_SERVICES_ACTIVE_CONFIG}"
|
||||||
|
|
||||||
ENV PYTHONUNBUFFERED=1
|
ENV PYTHONUNBUFFERED=1
|
||||||
ENV VIRTUAL_ENV=/opt/venv
|
ENV VIRTUAL_ENV=/opt/venv
|
||||||
ENV VIRTUAL_ENV_BIN=/opt/venv/bin
|
ENV VIRTUAL_ENV_BIN=/opt/venv/bin
|
||||||
ENV PATH="${VIRTUAL_ENV}/bin:${PATH}:/services"
|
ENV PYTHONPATH=${NETALERTX_APP}:${NETALERTX_SERVER}:${NETALERTX_PLUGINS}:${VIRTUAL_ENV}/lib/python3.11/site-packages
|
||||||
ENV VENDORSPATH=/app/back/ieee-oui.txt
|
ENV PATH="${SYSTEM_SERVICES}:${VIRTUAL_ENV_BIN}:$PATH"
|
||||||
ENV VENDORSPATH_NEWEST=${SYSTEM_SERVICES_RUN_TMP}/ieee-oui.txt
|
|
||||||
|
|
||||||
|
|
||||||
# App Environment
|
|
||||||
ENV LISTEN_ADDR=0.0.0.0
|
ENV LISTEN_ADDR=0.0.0.0
|
||||||
ENV PORT=20211
|
ENV PORT=20211
|
||||||
ENV NETALERTX_DEBUG=0
|
ENV NETALERTX_DEBUG=0
|
||||||
|
ENV VENDORSPATH=/app/back/ieee-oui.txt
|
||||||
#Container environment
|
ENV VENDORSPATH_NEWEST=${SYSTEM_SERVICES_RUN_TMP}/ieee-oui.txt
|
||||||
ENV ENVIRONMENT=debian
|
ENV ENVIRONMENT=debian
|
||||||
ENV USER=netalertx
|
ENV READ_ONLY_USER=readonly READ_ONLY_GROUP=readonly
|
||||||
ENV USER_ID=1000
|
ENV NETALERTX_USER=netalertx NETALERTX_GROUP=netalertx
|
||||||
ENV USER_GID=1000
|
ENV LANG=C.UTF-8
|
||||||
|
|
||||||
# Todo, figure out why using a workdir instead of full paths don't work
|
# Install dependencies
|
||||||
# Todo, do we still need all these packages? I can already see sudo which isn't needed
|
# Using sury.org for PHP 8.3 to match Alpine version
|
||||||
|
|
||||||
|
|
||||||
# create pi user and group
|
|
||||||
# add root and www-data to pi group so they can r/w files and db
|
|
||||||
RUN groupadd --gid "${USER_GID}" "${USER}" && \
|
|
||||||
useradd \
|
|
||||||
--uid ${USER_ID} \
|
|
||||||
--gid ${USER_GID} \
|
|
||||||
--create-home \
|
|
||||||
--shell /bin/bash \
|
|
||||||
${USER} && \
|
|
||||||
usermod -a -G ${USER_GID} root && \
|
|
||||||
usermod -a -G ${USER_GID} www-data
|
|
||||||
|
|
||||||
COPY --chmod=775 --chown=${USER_ID}:${USER_GID} install/production-filesystem/ /
|
|
||||||
COPY --chmod=775 --chown=${USER_ID}:${USER_GID} . ${INSTALL_DIR}/
|
|
||||||
|
|
||||||
|
|
||||||
# ❗ IMPORTANT - if you modify this file modify the /install/install_dependecies.debian.sh file as well ❗
|
|
||||||
# hadolint ignore=DL3008,DL3027
|
|
||||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||||
tini snmp ca-certificates curl libwww-perl arp-scan sudo gettext-base \
|
tini \
|
||||||
nginx-light php php-cgi php-fpm php-sqlite3 php-curl sqlite3 dnsutils net-tools \
|
snmp \
|
||||||
python3 python3-dev iproute2 nmap python3-pip zip git systemctl usbutils traceroute nbtscan openrc \
|
|
||||||
busybox nginx nginx-core mtr python3-venv && \
|
|
||||||
rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
# While php8.3 is in debian bookworm repos, php-fpm is not included so we need to add sury.org repo
|
|
||||||
# (Ondřej Surý maintains php packages for debian. This is temp until debian includes php-fpm in their
|
|
||||||
# repos. Likely it will be in Debian Trixie.). This keeps the image up-to-date with the alpine version.
|
|
||||||
# hadolint ignore=DL3008
|
|
||||||
RUN apt-get install -y --no-install-recommends \
|
|
||||||
apt-transport-https \
|
|
||||||
ca-certificates \
|
ca-certificates \
|
||||||
|
curl \
|
||||||
|
libwww-perl \
|
||||||
|
arp-scan \
|
||||||
|
sudo \
|
||||||
|
gettext-base \
|
||||||
|
nginx-light \
|
||||||
|
sqlite3 \
|
||||||
|
dnsutils \
|
||||||
|
net-tools \
|
||||||
|
python3 \
|
||||||
|
iproute2 \
|
||||||
|
nmap \
|
||||||
|
fping \
|
||||||
|
zip \
|
||||||
|
git \
|
||||||
|
usbutils \
|
||||||
|
traceroute \
|
||||||
|
nbtscan \
|
||||||
lsb-release \
|
lsb-release \
|
||||||
wget && \
|
wget \
|
||||||
wget -q -O /etc/apt/trusted.gpg.d/php.gpg https://packages.sury.org/php/apt.gpg && \
|
apt-transport-https \
|
||||||
echo "deb https://packages.sury.org/php/ $(lsb_release -sc) main" > /etc/apt/sources.list.d/php.list && \
|
gnupg2 \
|
||||||
apt-get update && \
|
mtr \
|
||||||
apt-get install -y --no-install-recommends php8.3-fpm php8.3-cli php8.3-sqlite3 php8.3-common php8.3-curl php8.3-cgi && \
|
procps \
|
||||||
ln -s /usr/sbin/php-fpm8.3 /usr/sbin/php-fpm83 && \
|
gosu \
|
||||||
rm -rf /var/lib/apt/lists/* # make it compatible with alpine version
|
jq \
|
||||||
|
ipcalc \
|
||||||
|
&& wget -qO /etc/apt/trusted.gpg.d/php.gpg https://packages.sury.org/php/apt.gpg \
|
||||||
|
&& echo "deb https://packages.sury.org/php/ $(lsb_release -sc) main" > /etc/apt/sources.list.d/php.list \
|
||||||
|
&& apt-get update \
|
||||||
|
&& apt-get install -y --no-install-recommends \
|
||||||
|
php8.3-fpm \
|
||||||
|
php8.3-cli \
|
||||||
|
php8.3-sqlite3 \
|
||||||
|
php8.3-common \
|
||||||
|
php8.3-curl \
|
||||||
|
&& ln -s /usr/sbin/php-fpm8.3 /usr/sbin/php-fpm \
|
||||||
|
&& ln -s /usr/sbin/php-fpm8.3 /usr/sbin/php-fpm83 \
|
||||||
|
&& ln -s /usr/sbin/gosu /usr/sbin/su-exec \
|
||||||
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
# Setup virtual python environment and use pip3 to install packages
|
# Fix permissions for /tmp BEFORE copying anything that might overwrite it with bad perms
|
||||||
RUN python3 -m venv ${VIRTUAL_ENV} && \
|
RUN chmod 1777 /tmp
|
||||||
/bin/bash -c "source ${VIRTUAL_ENV_BIN}/activate && update-alternatives --install /usr/bin/python python /usr/bin/python3 10 && pip3 install -r ${INSTALL_DIR}/requirements.txt"
|
|
||||||
|
|
||||||
# Configure php-fpm
|
# User setup
|
||||||
RUN chmod -R 755 /services && \
|
RUN groupadd -g ${NETALERTX_GID} ${NETALERTX_GROUP} && \
|
||||||
chown -R ${USER}:${USER_GID} /services && \
|
useradd -u ${NETALERTX_UID} -g ${NETALERTX_GID} -d ${NETALERTX_APP} -s /bin/bash ${NETALERTX_USER}
|
||||||
sed -i 's/^;listen.mode = .*/listen.mode = 0666/' ${SYSTEM_SERVICES_PHP_FPM_D}/www.conf && \
|
|
||||||
printf "user = %s\ngroup = %s\n" "${USER}" "${USER_GID}" >> /services/config/php/php-fpm.d/www.conf
|
|
||||||
|
|
||||||
|
# Copy filesystem (excluding tmp if possible, or we just fix it after)
|
||||||
|
COPY --chown=${NETALERTX_USER}:${NETALERTX_GROUP} install/production-filesystem/ /
|
||||||
|
# Re-apply sticky bit to /tmp in case COPY overwrote it
|
||||||
|
RUN chmod 1777 /tmp
|
||||||
|
|
||||||
|
COPY --chown=${NETALERTX_USER}:${NETALERTX_GROUP} --chmod=755 back ${NETALERTX_BACK}
|
||||||
|
COPY --chown=${NETALERTX_USER}:${NETALERTX_GROUP} --chmod=755 front ${NETALERTX_FRONT}
|
||||||
|
COPY --chown=${NETALERTX_USER}:${NETALERTX_GROUP} --chmod=755 server ${NETALERTX_SERVER}
|
||||||
|
|
||||||
# Create a buildtimestamp.txt to later check if a new version was released
|
# Create required folders
|
||||||
RUN date +%s > ${INSTALL_DIR}/front/buildtimestamp.txt
|
RUN install -d -o ${NETALERTX_USER} -g ${NETALERTX_GROUP} -m 700 ${READ_WRITE_FOLDERS} && \
|
||||||
USER netalertx:netalertx
|
chmod 750 /entrypoint.sh /root-entrypoint.sh
|
||||||
ENTRYPOINT ["/bin/bash","/entrypoint.sh"]
|
|
||||||
|
|
||||||
|
# Copy Version
|
||||||
|
COPY --chown=${NETALERTX_USER}:${NETALERTX_GROUP} .[V]ERSION ${NETALERTX_APP}/.VERSION
|
||||||
|
COPY --chown=${NETALERTX_USER}:${NETALERTX_GROUP} .[V]ERSION ${NETALERTX_APP}/.VERSION_PREV
|
||||||
|
|
||||||
|
# Copy venv from builder
|
||||||
|
COPY --from=builder --chown=${READONLY_UID}:${READONLY_GID} ${VIRTUAL_ENV} ${VIRTUAL_ENV}
|
||||||
|
|
||||||
|
# Init process
|
||||||
|
RUN for vfile in .VERSION .VERSION_PREV; do \
|
||||||
|
if [ ! -f "${NETALERTX_APP}/${vfile}" ]; then \
|
||||||
|
echo "DEVELOPMENT 00000000" > "${NETALERTX_APP}/${vfile}"; \
|
||||||
|
fi; \
|
||||||
|
chown ${READONLY_UID}:${READONLY_GID} "${NETALERTX_APP}/${vfile}"; \
|
||||||
|
done && \
|
||||||
|
# Set capabilities for raw socket access
|
||||||
|
setcap cap_net_raw,cap_net_admin+eip /usr/bin/nmap && \
|
||||||
|
setcap cap_net_raw,cap_net_admin+eip /usr/sbin/arp-scan && \
|
||||||
|
setcap cap_net_raw,cap_net_admin,cap_net_bind_service+eip /usr/bin/nbtscan && \
|
||||||
|
setcap cap_net_raw,cap_net_admin+eip /usr/bin/traceroute.db && \
|
||||||
|
# Note: python path needs to be dynamic or verificed
|
||||||
|
# setcap cap_net_raw,cap_net_admin+eip $(readlink -f ${VIRTUAL_ENV_BIN}/python) && \
|
||||||
|
/bin/bash /build/init-nginx.sh && \
|
||||||
|
/bin/bash /build/init-php-fpm.sh && \
|
||||||
|
# /bin/bash /build/init-cron.sh && \
|
||||||
|
# Debian cron init might differ, skipping for now or need to check init-cron.sh content
|
||||||
|
# Checking init-backend.sh
|
||||||
|
/bin/bash /build/init-backend.sh && \
|
||||||
|
rm -rf /build && \
|
||||||
|
date +%s > "${NETALERTX_FRONT}/buildtimestamp.txt"
|
||||||
|
|
||||||
|
ENTRYPOINT ["/bin/bash", "/entrypoint.sh"]
|
||||||
|
|
||||||
|
# Stage 3: Hardened
|
||||||
|
FROM runner AS hardened
|
||||||
|
|
||||||
|
ARG NETALERTX_UID=20211
|
||||||
|
ARG NETALERTX_GID=20211
|
||||||
|
ARG READONLY_UID=20212
|
||||||
|
ARG READONLY_GID=20212
|
||||||
|
ENV READ_ONLY_USER=readonly READ_ONLY_GROUP=readonly
|
||||||
|
|
||||||
|
# Create readonly user
|
||||||
|
RUN groupadd -g ${READONLY_GID} ${READ_ONLY_GROUP} && \
|
||||||
|
useradd -u ${READONLY_UID} -g ${READONLY_GID} -d /app -s /usr/sbin/nologin ${READ_ONLY_USER}
|
||||||
|
|
||||||
|
# Hardening: Remove package managers and set permissions
|
||||||
|
RUN chown -R ${READ_ONLY_USER}:${READ_ONLY_GROUP} ${READ_ONLY_FOLDERS} && \
|
||||||
|
chmod -R 004 ${READ_ONLY_FOLDERS} && \
|
||||||
|
find ${READ_ONLY_FOLDERS} -type d -exec chmod 005 {} + && \
|
||||||
|
install -d -o ${NETALERTX_USER} -g ${NETALERTX_GROUP} -m 0777 ${READ_WRITE_FOLDERS} && \
|
||||||
|
chown ${READ_ONLY_USER}:${READ_ONLY_GROUP} /entrypoint.sh /root-entrypoint.sh /app /opt /opt/venv && \
|
||||||
|
# Permissions
|
||||||
|
chmod 005 /entrypoint.sh /root-entrypoint.sh ${SYSTEM_SERVICES}/*.sh ${SYSTEM_SERVICES_SCRIPTS}/* ${ENTRYPOINT_CHECKS}/* /app /opt /opt/venv && \
|
||||||
|
# Cleanups
|
||||||
|
rm -f \
|
||||||
|
"${NETALERTX_CONFIG}/app.conf" \
|
||||||
|
"${NETALERTX_DB_FILE}" \
|
||||||
|
"${NETALERTX_DB_FILE}-shm" \
|
||||||
|
"${NETALERTX_DB_FILE}-wal" || true && \
|
||||||
|
# Remove apt and sensitive files
|
||||||
|
rm -rf /var/lib/apt /var/lib/dpkg /var/cache/apt /usr/bin/apt* /usr/bin/dpkg* \
|
||||||
|
/etc/shadow /etc/gshadow /etc/sudoers /root /home/root && \
|
||||||
|
# Dummy sudo
|
||||||
|
printf '#!/bin/sh\n"$@"\n' > /usr/bin/sudo && chmod +x /usr/bin/sudo
|
||||||
|
|
||||||
|
USER 0
|
||||||
|
ENTRYPOINT ["/root-entrypoint.sh"]
|
||||||
|
HEALTHCHECK --interval=30s --timeout=10s --start-period=60s --retries=3 \
|
||||||
|
CMD /services/healthcheck.sh
|
||||||
|
|||||||
200
README.md
200
README.md
@@ -1,76 +1,12 @@
|
|||||||
[](https://hub.docker.com/r/jokobsk/netalertx)
|
[](https://hub.docker.com/r/jokobsk/netalertx)
|
||||||
[](https://hub.docker.com/r/jokobsk/netalertx)
|
[](https://hub.docker.com/r/jokobsk/netalertx)
|
||||||
[](https://github.com/jokob-sk/NetAlertX/releases)
|
[](https://github.com/netalertx/NetAlertX/releases)
|
||||||
[](https://discord.gg/NczTUTWyRr)
|
[](https://discord.gg/NczTUTWyRr)
|
||||||
[](https://my.home-assistant.io/redirect/supervisor_add_addon_repository/?repository_url=https%3A%2F%2Fgithub.com%2Falexbelgium%2Fhassio-addons)
|
[](https://my.home-assistant.io/redirect/supervisor_add_addon_repository/?repository_url=https%3A%2F%2Fgithub.com%2Falexbelgium%2Fhassio-addons)
|
||||||
|
|
||||||
# NetAlertX - Network, presence scanner and alert framework
|
# NetAlertX - Network Visibility & Asset Intelligence Framework
|
||||||
|
|
||||||
Get visibility of what's going on on your WIFI/LAN network and enable presence detection of important devices. Schedule scans for devices, port changes and get alerts if unknown devices or changes are found. Write your own [Plugin](https://github.com/jokob-sk/NetAlertX/tree/main/docs/PLUGINS.md#readme) with auto-generated UI and in-build notification system. Build out and easily maintain your network source of truth (NSoT) and device inventory.
|
![main][main]
|
||||||
|
|
||||||
## 📋 Table of Contents
|
|
||||||
|
|
||||||
- [NetAlertX - Network, presence scanner and alert framework](#netalertx---network-presence-scanner-and-alert-framework)
|
|
||||||
- [📋 Table of Contents](#-table-of-contents)
|
|
||||||
- [🚀 Quick Start](#-quick-start)
|
|
||||||
- [📦 Features](#-features)
|
|
||||||
- [Scanners](#scanners)
|
|
||||||
- [Notification gateways](#notification-gateways)
|
|
||||||
- [Integrations and Plugins](#integrations-and-plugins)
|
|
||||||
- [Workflows](#workflows)
|
|
||||||
- [📚 Documentation](#-documentation)
|
|
||||||
- [🔐 Security \& Privacy](#-security--privacy)
|
|
||||||
- [❓ FAQ](#-faq)
|
|
||||||
- [🐞 Known Issues](#-known-issues)
|
|
||||||
- [📃 Everything else](#-everything-else)
|
|
||||||
- [📧 Get notified what's new](#-get-notified-whats-new)
|
|
||||||
- [🔀 Other Alternative Apps](#-other-alternative-apps)
|
|
||||||
- [💙 Donations](#-donations)
|
|
||||||
- [🏗 Contributors](#-contributors)
|
|
||||||
- [🌍 Translations](#-translations)
|
|
||||||
- [License](#license)
|
|
||||||
|
|
||||||
|
|
||||||
## 🚀 Quick Start
|
|
||||||
|
|
||||||
> [!WARNING]
|
|
||||||
> ⚠️ **Important:** The docker-compose has recently changed. Carefully read the [Migration guide](https://jokob-sk.github.io/NetAlertX/MIGRATION/?h=migrat#12-migration-from-netalertx-v25524) for detailed instructions.
|
|
||||||
|
|
||||||
Start NetAlertX in seconds with Docker:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
docker run -d \
|
|
||||||
--network=host \
|
|
||||||
--restart unless-stopped \
|
|
||||||
-v /local_data_dir:/data \
|
|
||||||
-v /etc/localtime:/etc/localtime:ro \
|
|
||||||
--tmpfs /tmp:uid=20211,gid=20211,mode=1700 \
|
|
||||||
-e PORT=20211 \
|
|
||||||
-e APP_CONF_OVERRIDE='{"GRAPHQL_PORT":"20214"}' \
|
|
||||||
ghcr.io/jokob-sk/netalertx:latest
|
|
||||||
```
|
|
||||||
|
|
||||||
Note: Your `/local_data_dir` should contain a `config` and `db` folder.
|
|
||||||
|
|
||||||
To deploy a containerized instance directly from the source repository, execute the following BASH sequence:
|
|
||||||
```bash
|
|
||||||
git clone https://github.com/jokob-sk/NetAlertX.git
|
|
||||||
cd NetAlertX
|
|
||||||
docker compose up --force-recreate --build
|
|
||||||
# To customize: edit docker-compose.yaml and run that last command again
|
|
||||||
```
|
|
||||||
|
|
||||||
Need help configuring it? Check the [usage guide](https://github.com/jokob-sk/NetAlertX/blob/main/docs/README.md) or [full documentation](https://jokob-sk.github.io/NetAlertX/).
|
|
||||||
|
|
||||||
For Home Assistant users: [Click here to add NetAlertX](https://my.home-assistant.io/redirect/supervisor_add_addon_repository/?repository_url=https%3A%2F%2Fgithub.com%2Falexbelgium%2Fhassio-addons)
|
|
||||||
|
|
||||||
For other install methods, check the [installation docs](#-documentation)
|
|
||||||
|
|
||||||
|
|
||||||
| [📑 Docker guide](https://github.com/jokob-sk/NetAlertX/blob/main/docs/DOCKER_INSTALLATION.md) | [🚀 Releases](https://github.com/jokob-sk/NetAlertX/releases) | [📚 Docs](https://jokob-sk.github.io/NetAlertX/) | [🔌 Plugins](https://github.com/jokob-sk/NetAlertX/blob/main/docs/PLUGINS.md) | [🤖 Ask AI](https://gurubase.io/g/netalertx)
|
|
||||||
|----------------------| ----------------------| ----------------------| ----------------------| ----------------------|
|
|
||||||
|
|
||||||
![showcase][showcase]
|
|
||||||
|
|
||||||
<details>
|
<details>
|
||||||
<summary>📷 Click for more screenshots</summary>
|
<summary>📷 Click for more screenshots</summary>
|
||||||
@@ -84,11 +20,70 @@ For other install methods, check the [installation docs](#-documentation)
|
|||||||
|
|
||||||
</details>
|
</details>
|
||||||
|
|
||||||
## 📦 Features
|
|
||||||
|
|
||||||
### Scanners
|
Centralized network visibility and continuous asset discovery.
|
||||||
|
|
||||||
The app scans your network for **New devices**, **New connections** (re-connections), **Disconnections**, **"Always Connected" devices down**, Devices **IP changes** and **Internet IP address changes**. Discovery & scan methods include: **arp-scan**, **Pi-hole - DB import**, **Pi-hole - DHCP leases import**, **Generic DHCP leases import**, **UNIFI controller import**, **SNMP-enabled router import**. Check the [Plugins](https://github.com/jokob-sk/NetAlertX/tree/main/docs/PLUGINS.md#readme) docs for a full list of avaliable plugins.
|
Monitor devices, detect change, and stay aware across distributed networks.
|
||||||
|
|
||||||
|
NetAlertX provides a centralized "Source of Truth" (NSoT) for network infrastructure. Maintain a real-time inventory of every connected device, identify Shadow IT and unauthorized hardware to maintain regulatory compliance, and automate compliance workflows across distributed sites.
|
||||||
|
|
||||||
|
NetAlertX is designed to bridge the gap between simple network scanning and complex SIEM tools, providing actionable insights without the overhead.
|
||||||
|
|
||||||
|
|
||||||
|
## Table of Contents
|
||||||
|
|
||||||
|
- [Quick Start](#quick-start)
|
||||||
|
- [Features](#features)
|
||||||
|
- [Documentation](#documentation)
|
||||||
|
- [Security \& Privacy](#security--privacy)
|
||||||
|
- [FAQ](#faq)
|
||||||
|
- [Troubleshooting Tips](#troubleshooting-tips)
|
||||||
|
- [Everything else](#everything-else)
|
||||||
|
|
||||||
|
## Quick Start
|
||||||
|
|
||||||
|
> [!WARNING]
|
||||||
|
> ⚠️ **Important:** The docker-compose has recently changed. Carefully read the [Migration guide](https://docs.netalertx.com/MIGRATION/?h=migrat#12-migration-from-netalertx-v25524) for detailed instructions.
|
||||||
|
|
||||||
|
Start NetAlertX in seconds with Docker:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
docker run -d \
|
||||||
|
--network=host \
|
||||||
|
--restart unless-stopped \
|
||||||
|
-v /local_data_dir:/data \
|
||||||
|
-v /etc/localtime:/etc/localtime:ro \
|
||||||
|
--tmpfs /tmp:uid=20211,gid=20211,mode=1700 \
|
||||||
|
-e PORT=20211 \
|
||||||
|
-e APP_CONF_OVERRIDE='{"GRAPHQL_PORT":"20214"}' \
|
||||||
|
ghcr.io/netalertx/netalertx:latest
|
||||||
|
```
|
||||||
|
|
||||||
|
Note: Your `/local_data_dir` should contain a `config` and `db` folder.
|
||||||
|
|
||||||
|
To deploy a containerized instance directly from the source repository, execute the following BASH sequence:
|
||||||
|
```bash
|
||||||
|
git clone https://github.com/netalertx/NetAlertX.git
|
||||||
|
cd NetAlertX
|
||||||
|
docker compose up --force-recreate --build
|
||||||
|
# To customize: edit docker-compose.yaml and run that last command again
|
||||||
|
```
|
||||||
|
|
||||||
|
Need help configuring it? Check the [usage guide](https://docs.netalertx.com/README) or [full documentation](https://docs.netalertx.com/).
|
||||||
|
|
||||||
|
For Home Assistant users: [Click here to add NetAlertX](https://my.home-assistant.io/redirect/supervisor_add_addon_repository/?repository_url=https%3A%2F%2Fgithub.com%2Falexbelgium%2Fhassio-addons)
|
||||||
|
|
||||||
|
For other install methods, check the [installation docs](#documentation)
|
||||||
|
|
||||||
|
---
|
||||||
|
### || [Docker guide](https://docs.netalertx.com/DOCKER_INSTALLATION) || [Releases](https://github.com/netalertx/NetAlertX/releases) || [Docs](https://docs.netalertx.com/) || [Plugins](https://docs.netalertx.com/PLUGINS) || [Website](https://netalertx.com)
|
||||||
|
---
|
||||||
|
|
||||||
|
## Features
|
||||||
|
|
||||||
|
### Discovery & Asset Intelligence
|
||||||
|
|
||||||
|
Continuous monitoring for unauthorized asset discovery, connection state changes, and IP address management (IPAM) drift. Discovery & scan methods include: **arp-scan**, **Pi-hole - DB import**, **Pi-hole - DHCP leases import**, **Generic DHCP leases import**, **UNIFI controller import**, **SNMP-enabled router import**. Check the [Plugins](https://docs.netalertx.com/PLUGINS#readme) docs for a full list of avaliable plugins.
|
||||||
|
|
||||||
### Notification gateways
|
### Notification gateways
|
||||||
|
|
||||||
@@ -96,71 +91,74 @@ Send notifications to more than 80+ services, including Telegram via [Apprise](h
|
|||||||
|
|
||||||
### Integrations and Plugins
|
### Integrations and Plugins
|
||||||
|
|
||||||
Feed your data and device changes into [Home Assistant](https://github.com/jokob-sk/NetAlertX/blob/main/docs/HOME_ASSISTANT.md), read [API endpoints](https://github.com/jokob-sk/NetAlertX/blob/main/docs/API.md), or use [Webhooks](https://github.com/jokob-sk/NetAlertX/blob/main/docs/WEBHOOK_N8N.md) to setup custom automation flows. You can also
|
Feed your data and device changes into [Home Assistant](https://docs.netalertx.com/HOME_ASSISTANT), read [API endpoints](https://docs.netalertx.com/API), or use [Webhooks](https://docs.netalertx.com/WEBHOOK_N8N) to setup custom automation flows. You can also
|
||||||
build your own scanners with the [Plugin system](https://github.com/jokob-sk/NetAlertX/tree/main/docs/PLUGINS.md#readme) in as little as [15 minutes](https://www.youtube.com/watch?v=cdbxlwiWhv8).
|
build your own scanners with the [Plugin system](https://docs.netalertx.com/PLUGINS#readme) in as little as [15 minutes](https://www.youtube.com/watch?v=cdbxlwiWhv8).
|
||||||
|
|
||||||
### Workflows
|
### Workflows
|
||||||
|
|
||||||
The [workflows module](https://github.com/jokob-sk/NetAlertX/blob/main/docs/WORKFLOWS.md) allows to automate repetitive tasks, making network management more efficient. Whether you need to assign newly discovered devices to a specific Network Node, auto-group devices from a given vendor, unarchive a device if detected online, or automatically delete devices, this module provides the flexibility to tailor the automations to your needs.
|
The [workflows module](https://docs.netalertx.com/WORKFLOWS) automates IT governance by enforcing device categorization and cleanup policies. Whether you need to assign newly discovered devices to a specific Network Node, auto-group devices from a given vendor, unarchive a device if detected online, or automatically delete devices, this module provides the flexibility to tailor the automations to your needs.
|
||||||
|
|
||||||
|
|
||||||
## 📚 Documentation
|
## Documentation
|
||||||
<!--- --------------------------------------------------------------------- --->
|
<!--- --------------------------------------------------------------------- --->
|
||||||
|
|
||||||
|
Explore all the [documentation here](https://docs.netalertx.com/) or navigate to a specific installation option below.
|
||||||
|
|
||||||
Supported browsers: Chrome, Firefox
|
Supported browsers: Chrome, Firefox
|
||||||
|
|
||||||
- [[Installation] Docker](https://github.com/jokob-sk/NetAlertX/blob/main/docs/DOCKER_INSTALLATION.md)
|
- [[Installation] Docker](https://docs.netalertx.com/DOCKER_INSTALLATION)
|
||||||
- [[Installation] Home Assistant](https://github.com/alexbelgium/hassio-addons/tree/master/netalertx)
|
- [[Installation] Home Assistant](https://github.com/alexbelgium/hassio-addons/tree/master/netalertx)
|
||||||
- [[Installation] Bare metal](https://github.com/jokob-sk/NetAlertX/blob/main/docs/HW_INSTALL.md)
|
- [[Installation] Bare metal](https://docs.netalertx.com/HW_INSTALL)
|
||||||
- [[Installation] Unraid App](https://unraid.net/community/apps)
|
- [[Installation] Unraid App](https://unraid.net/community/apps)
|
||||||
- [[Setup] Usage and Configuration](https://github.com/jokob-sk/NetAlertX/blob/main/docs/README.md)
|
- [[Setup] Usage and Configuration](https://docs.netalertx.com/README)
|
||||||
- [[Development] API docs](https://github.com/jokob-sk/NetAlertX/blob/main/docs/API.md)
|
- [[Development] API docs](https://docs.netalertx.com/API)
|
||||||
- [[Development] Custom Plugins](https://github.com/jokob-sk/NetAlertX/blob/main/docs/PLUGINS_DEV.md)
|
- [[Development] Custom Plugins](https://docs.netalertx.com/PLUGINS_DEV)
|
||||||
|
|
||||||
...or explore all the [documentation here](https://jokob-sk.github.io/NetAlertX/).
|
## Security & Privacy
|
||||||
|
|
||||||
## 🔐 Security & Privacy
|
|
||||||
|
|
||||||
NetAlertX scans your local network and can store metadata about connected devices. By default, all data is stored **locally**. No information is sent to external services unless you explicitly configure notifications or integrations.
|
NetAlertX scans your local network and can store metadata about connected devices. By default, all data is stored **locally**. No information is sent to external services unless you explicitly configure notifications or integrations.
|
||||||
|
|
||||||
To further secure your installation:
|
Compliance & Hardening:
|
||||||
- Run it behind a reverse proxy with authentication
|
- Run it behind a reverse proxy with authentication
|
||||||
- Use firewalls to restrict access to the web UI
|
- Use firewalls to restrict access to the web UI
|
||||||
- Regularly update to the latest version for security patches
|
- Regularly update to the latest version for security patches
|
||||||
|
- Role-Based Access Control (RBAC) via Reverse Proxy: Integrate with your existing SSO/Identity provider for secure dashboard access.
|
||||||
|
|
||||||
See [Security Best Practices](https://github.com/jokob-sk/NetAlertX/security) for more details.
|
See [Security Best Practices](https://github.com/netalertx/NetAlertX/security) for more details.
|
||||||
|
|
||||||
|
|
||||||
## ❓ FAQ
|
## FAQ
|
||||||
|
|
||||||
**Q: Why don’t I see any devices?**
|
**Q: How do I monitor VLANs or remote subnets?**
|
||||||
A: Ensure the container has proper network access (e.g., use `--network host` on Linux). Also check that your scan method is properly configured in the UI.
|
A: Ensure the container has proper network access (e.g., use `--network host` on Linux). Also check that your scan method is properly configured in the UI.
|
||||||
|
|
||||||
**Q: Does this work on Wi-Fi-only devices like Raspberry Pi?**
|
**Q: What is the recommended deployment for high-availability?**
|
||||||
A: Yes, but some scanners (e.g. ARP) work best on Ethernet. For Wi-Fi, try SNMP, DHCP, or Pi-hole import.
|
A: We recommend deploying via Docker with persistent volume mounts for database integrity and running behind a reverse proxy for secure access.
|
||||||
|
|
||||||
**Q: Will this send any data to the internet?**
|
**Q: Will this send any data to the internet?**
|
||||||
A: No. All scans and data remain local, unless you set up cloud-based notifications.
|
A: No. All scans and data remain local, unless you set up cloud-based notifications.
|
||||||
|
|
||||||
**Q: Can I use this without Docker?**
|
**Q: Can I use this without Docker?**
|
||||||
A: Yes! You can install it bare-metal. See the [bare metal installation guide](https://github.com/jokob-sk/NetAlertX/blob/main/docs/HW_INSTALL.md).
|
A: You can install the application directly on your own hardware by following the [bare metal installation guide](https://docs.netalertx.com/HW_INSTALL).
|
||||||
|
|
||||||
**Q: Where is the data stored?**
|
**Q: Where is the data stored?**
|
||||||
A: In the `/data/config` and `/data/db` folders. Back up these folders regularly.
|
A: In the `/data/config` and `/data/db` folders. Back up these folders regularly.
|
||||||
|
|
||||||
|
|
||||||
## 🐞 Known Issues
|
## Troubleshooting Tips
|
||||||
|
|
||||||
- Some scanners (e.g. ARP) may not detect devices on different subnets. See the [Remote networks guide](https://github.com/jokob-sk/NetAlertX/blob/main/docs/REMOTE_NETWORKS.md) for workarounds.
|
- Some scanners (e.g. ARP) may not detect devices on different subnets. See the [Remote networks guide](https://docs.netalertx.com/REMOTE_NETWORKS) for workarounds.
|
||||||
- Wi-Fi-only networks may require alternate scanners for accurate detection.
|
- Wi-Fi-only networks may require alternate scanners for accurate detection.
|
||||||
- Notification throttling may be needed for large networks to prevent spam.
|
- Notification throttling may be needed for large networks to prevent spam.
|
||||||
- On some systems, elevated permissions (like `CAP_NET_RAW`) may be needed for low-level scanning.
|
- On some systems, elevated permissions (like `CAP_NET_RAW`) may be needed for low-level scanning.
|
||||||
|
|
||||||
Check the [GitHub Issues](https://github.com/jokob-sk/NetAlertX/issues) for the latest bug reports and solutions and consult [the official documentation](https://jokob-sk.github.io/NetAlertX/).
|
Check the [GitHub Issues](https://github.com/netalertx/NetAlertX/issues) for the latest bug reports and solutions and consult [the official documentation](https://docs.netalertx.com/).
|
||||||
|
|
||||||
## 📃 Everything else
|
## Everything else
|
||||||
<!--- --------------------------------------------------------------------- --->
|
<!--- --------------------------------------------------------------------- --->
|
||||||
|
|
||||||
|
<a href="https://trendshift.io/repositories/12670" target="_blank"><img src="https://trendshift.io/api/badge/repositories/12670" alt="jokob-sk%2FNetAlertX | Trendshift" style="width: 250px; height: 55px;" width="250" height="55"/></a>
|
||||||
|
|
||||||
### 📧 Get notified what's new
|
### 📧 Get notified what's new
|
||||||
|
|
||||||
Get notified about a new release, what new functionality you can use and about breaking changes.
|
Get notified about a new release, what new functionality you can use and about breaking changes.
|
||||||
@@ -169,10 +167,10 @@ Get notified about a new release, what new functionality you can use and about b
|
|||||||
|
|
||||||
### 🔀 Other Alternative Apps
|
### 🔀 Other Alternative Apps
|
||||||
|
|
||||||
- [PiAlert by leiweibau](https://github.com/leiweibau/Pi.Alert/) (maintained, bare-metal install)
|
|
||||||
- [WatchYourLAN](https://github.com/aceberg/WatchYourLAN) - Lightweight network IP scanner with web GUI (Open source)
|
|
||||||
- [Fing](https://www.fing.com/) - Network scanner app for your Internet security (Commercial, Phone App, Proprietary hardware)
|
- [Fing](https://www.fing.com/) - Network scanner app for your Internet security (Commercial, Phone App, Proprietary hardware)
|
||||||
- [NetBox](https://netboxlabs.com/) - Network management software (Commercial)
|
- [NetBox](https://netboxlabs.com/) - The gold standard for Network Source of Truth (NSoT) and IPAM.
|
||||||
|
- [Zabbix](https://www.zabbix.com/) or [Nagios](https://www.nagios.org/) - Strong focus on infrastructure monitoring.
|
||||||
|
- [NetAlertX](https://netalertx.com) - The streamlined, discovery-focused choice for real-time asset intelligence and noise-free alerting.
|
||||||
|
|
||||||
### 💙 Donations
|
### 💙 Donations
|
||||||
|
|
||||||
@@ -183,9 +181,8 @@ Thank you to everyone who appreciates this tool and donates.
|
|||||||
|
|
||||||
<hr>
|
<hr>
|
||||||
|
|
||||||
| [](https://github.com/sponsors/jokob-sk) | [](https://www.buymeacoffee.com/jokobsk) | [](https://www.patreon.com/user?u=84385063) |
|
| [](https://github.com/sponsors/jokob-sk) | [](https://www.buymeacoffee.com/jokobsk) |
|
||||||
| --- | --- | --- |
|
| --- | --- |
|
||||||
|
|
||||||
- Bitcoin: `1N8tupjeCK12qRVU2XrV17WvKK7LCawyZM`
|
- Bitcoin: `1N8tupjeCK12qRVU2XrV17WvKK7LCawyZM`
|
||||||
- Ethereum: `0x6e2749Cb42F4411bc98501406BdcD82244e3f9C7`
|
- Ethereum: `0x6e2749Cb42F4411bc98501406BdcD82244e3f9C7`
|
||||||
|
|
||||||
@@ -197,7 +194,7 @@ Thank you to everyone who appreciates this tool and donates.
|
|||||||
|
|
||||||
This project would be nothing without the amazing work of the community, with special thanks to:
|
This project would be nothing without the amazing work of the community, with special thanks to:
|
||||||
|
|
||||||
> [pucherot/Pi.Alert](https://github.com/pucherot/Pi.Alert) (the original creator of PiAlert), [leiweibau](https://github.com/leiweibau/Pi.Alert): Dark mode (and much more), [Macleykun](https://github.com/Macleykun) (Help with Dockerfile clean-up), [vladaurosh](https://github.com/vladaurosh) for Alpine re-base help, [Final-Hawk](https://github.com/Final-Hawk) (Help with NTFY, styling and other fixes), [TeroRERO](https://github.com/terorero) (Spanish translations), [Data-Monkey](https://github.com/Data-Monkey), (Split-up of the python.py file and more), [cvc90](https://github.com/cvc90) (Spanish translation and various UI work) to name a few. Check out all the [amazing contributors](https://github.com/jokob-sk/NetAlertX/graphs/contributors).
|
> [pucherot/Pi.Alert](https://github.com/pucherot/Pi.Alert) (the original creator of PiAlert), [leiweibau](https://github.com/leiweibau/Pi.Alert): Dark mode (and much more), [Macleykun](https://github.com/Macleykun) (Help with Dockerfile clean-up), [vladaurosh](https://github.com/vladaurosh) for Alpine re-base help, [Final-Hawk](https://github.com/Final-Hawk) (Help with NTFY, styling and other fixes), [TeroRERO](https://github.com/terorero) (Spanish translations), [Data-Monkey](https://github.com/Data-Monkey), (Split-up of the python.py file and more), [cvc90](https://github.com/cvc90) (Spanish translation and various UI work) to name a few. Check out all the [amazing contributors](https://github.com/netalertx/NetAlertX/graphs/contributors).
|
||||||
|
|
||||||
### 🌍 Translations
|
### 🌍 Translations
|
||||||
|
|
||||||
@@ -210,6 +207,7 @@ Proudly using [Weblate](https://hosted.weblate.org/projects/pialert/). Help out
|
|||||||
### License
|
### License
|
||||||
> GPL 3.0 | [Read more here](LICENSE.txt) | Source of the [animated GIF (Loading Animation)](https://commons.wikimedia.org/wiki/File:Loading_Animation.gif) | Source of the [selfhosted Fonts](https://github.com/adobe-fonts/source-sans)
|
> GPL 3.0 | [Read more here](LICENSE.txt) | Source of the [animated GIF (Loading Animation)](https://commons.wikimedia.org/wiki/File:Loading_Animation.gif) | Source of the [selfhosted Fonts](https://github.com/adobe-fonts/source-sans)
|
||||||
|
|
||||||
|
_All product names, logos, and brands are property of their respective owners. All company, product and service names used in this website are for identification purposes only. Use of these names, logos, and brands does not imply endorsement._
|
||||||
|
|
||||||
<!--- --------------------------------------------------------------------- --->
|
<!--- --------------------------------------------------------------------- --->
|
||||||
[main]: ./docs/img/devices_split.png "Main screen"
|
[main]: ./docs/img/devices_split.png "Main screen"
|
||||||
@@ -223,7 +221,7 @@ Proudly using [Weblate](https://hosted.weblate.org/projects/pialert/). Help out
|
|||||||
[sync_hub]: ./docs/img/sync_hub.png "Screen 8"
|
[sync_hub]: ./docs/img/sync_hub.png "Screen 8"
|
||||||
[notification_center]: ./docs/img/notification_center.png "Screen 8"
|
[notification_center]: ./docs/img/notification_center.png "Screen 8"
|
||||||
[sent_reports_text]: ./docs/img/sent_reports_text.png "Screen 8"
|
[sent_reports_text]: ./docs/img/sent_reports_text.png "Screen 8"
|
||||||
[device_nmap]: ./docs/img/device_nmap.png "Screen 9"
|
[device_nmap]: ./docs/img/device_tools.png "Screen 9"
|
||||||
[report1]: ./docs/img/report_sample.png "Report sample 1"
|
[report1]: ./docs/img/report_sample.png "Report sample 1"
|
||||||
[main_dark]: /docs/img/1_devices_dark.jpg "Main screen dark"
|
[main_dark]: /docs/img/1_devices_dark.jpg "Main screen dark"
|
||||||
[maintain_dark]: /docs/img/5_maintain.jpg "Maintain screen dark"
|
[maintain_dark]: /docs/img/5_maintain.jpg "Maintain screen dark"
|
||||||
|
|||||||
@@ -3,7 +3,7 @@
|
|||||||
# Generated: 2022-12-30_22-19-40 #
|
# Generated: 2022-12-30_22-19-40 #
|
||||||
# #
|
# #
|
||||||
# Config file for the LAN intruder detection app: #
|
# Config file for the LAN intruder detection app: #
|
||||||
# https://github.com/jokob-sk/NetAlertX #
|
# https://github.com/netalertx/NetAlertX #
|
||||||
# #
|
# #
|
||||||
#-----------------AUTOGENERATED FILE-----------------#
|
#-----------------AUTOGENERATED FILE-----------------#
|
||||||
|
|
||||||
@@ -16,7 +16,7 @@
|
|||||||
#
|
#
|
||||||
# Scan multiple interfaces (eth1 and eth0):
|
# Scan multiple interfaces (eth1 and eth0):
|
||||||
# SCAN_SUBNETS = [ '192.168.1.0/24 --interface=eth1', '192.168.1.0/24 --interface=eth0' ]
|
# SCAN_SUBNETS = [ '192.168.1.0/24 --interface=eth1', '192.168.1.0/24 --interface=eth0' ]
|
||||||
|
BACKEND_API_URL='/server'
|
||||||
DISCOVER_PLUGINS=True
|
DISCOVER_PLUGINS=True
|
||||||
SCAN_SUBNETS=['--localnet']
|
SCAN_SUBNETS=['--localnet']
|
||||||
TIMEZONE='Europe/Berlin'
|
TIMEZONE='Europe/Berlin'
|
||||||
@@ -55,7 +55,6 @@ SMTP_SKIP_TLS=False
|
|||||||
WEBHOOK_RUN='disabled' # use 'on_notification' to enable
|
WEBHOOK_RUN='disabled' # use 'on_notification' to enable
|
||||||
WEBHOOK_URL='http://n8n.local:5555/webhook-test/aaaaaaaa-aaaa-aaaa-aaaaa-aaaaaaaaaaaa'
|
WEBHOOK_URL='http://n8n.local:5555/webhook-test/aaaaaaaa-aaaa-aaaa-aaaaa-aaaaaaaaaaaa'
|
||||||
WEBHOOK_PAYLOAD='json' # webhook payload data format for the "body > attachements > text" attribute
|
WEBHOOK_PAYLOAD='json' # webhook payload data format for the "body > attachements > text" attribute
|
||||||
# in https://github.com/jokob-sk/NetAlertX/blob/main/docs/webhook_json_sample.json
|
|
||||||
# supported values: 'json', 'html' or 'text'
|
# supported values: 'json', 'html' or 'text'
|
||||||
# e.g.: for discord use 'html'
|
# e.g.: for discord use 'html'
|
||||||
WEBHOOK_REQUEST_METHOD='GET'
|
WEBHOOK_REQUEST_METHOD='GET'
|
||||||
@@ -101,6 +100,8 @@ MQTT_PASSWORD='passw0rd'
|
|||||||
MQTT_QOS=0
|
MQTT_QOS=0
|
||||||
MQTT_DELAY_SEC=2
|
MQTT_DELAY_SEC=2
|
||||||
|
|
||||||
|
GRAPHQL_PORT=20212
|
||||||
|
|
||||||
|
|
||||||
#-------------------IMPORTANT INFO-------------------#
|
#-------------------IMPORTANT INFO-------------------#
|
||||||
# This file is ingested by a python script, so if #
|
# This file is ingested by a python script, so if #
|
||||||
|
|||||||
411
back/app.sql
411
back/app.sql
@@ -1,411 +0,0 @@
|
|||||||
CREATE TABLE sqlite_stat1(tbl,idx,stat);
|
|
||||||
CREATE TABLE Events (eve_MAC STRING (50) NOT NULL COLLATE NOCASE, eve_IP STRING (50) NOT NULL COLLATE NOCASE, eve_DateTime DATETIME NOT NULL, eve_EventType STRING (30) NOT NULL COLLATE NOCASE, eve_AdditionalInfo STRING (250) DEFAULT (''), eve_PendingAlertEmail BOOLEAN NOT NULL CHECK (eve_PendingAlertEmail IN (0, 1)) DEFAULT (1), eve_PairEventRowid INTEGER);
|
|
||||||
CREATE TABLE Sessions (ses_MAC STRING (50) COLLATE NOCASE, ses_IP STRING (50) COLLATE NOCASE, ses_EventTypeConnection STRING (30) COLLATE NOCASE, ses_DateTimeConnection DATETIME, ses_EventTypeDisconnection STRING (30) COLLATE NOCASE, ses_DateTimeDisconnection DATETIME, ses_StillConnected BOOLEAN, ses_AdditionalInfo STRING (250));
|
|
||||||
CREATE TABLE IF NOT EXISTS "Online_History" (
|
|
||||||
"Index" INTEGER,
|
|
||||||
"Scan_Date" TEXT,
|
|
||||||
"Online_Devices" INTEGER,
|
|
||||||
"Down_Devices" INTEGER,
|
|
||||||
"All_Devices" INTEGER,
|
|
||||||
"Archived_Devices" INTEGER,
|
|
||||||
"Offline_Devices" INTEGER,
|
|
||||||
PRIMARY KEY("Index" AUTOINCREMENT)
|
|
||||||
);
|
|
||||||
CREATE TABLE sqlite_sequence(name,seq);
|
|
||||||
CREATE TABLE Devices (
|
|
||||||
devMac STRING (50) PRIMARY KEY NOT NULL COLLATE NOCASE,
|
|
||||||
devName STRING (50) NOT NULL DEFAULT "(unknown)",
|
|
||||||
devOwner STRING (30) DEFAULT "(unknown)" NOT NULL,
|
|
||||||
devType STRING (30),
|
|
||||||
devVendor STRING (250),
|
|
||||||
devFavorite BOOLEAN CHECK (devFavorite IN (0, 1)) DEFAULT (0) NOT NULL,
|
|
||||||
devGroup STRING (10),
|
|
||||||
devComments TEXT,
|
|
||||||
devFirstConnection DATETIME NOT NULL,
|
|
||||||
devLastConnection DATETIME NOT NULL,
|
|
||||||
devLastIP STRING (50) NOT NULL COLLATE NOCASE,
|
|
||||||
devStaticIP BOOLEAN DEFAULT (0) NOT NULL CHECK (devStaticIP IN (0, 1)),
|
|
||||||
devScan INTEGER DEFAULT (1) NOT NULL,
|
|
||||||
devLogEvents BOOLEAN NOT NULL DEFAULT (1) CHECK (devLogEvents IN (0, 1)),
|
|
||||||
devAlertEvents BOOLEAN NOT NULL DEFAULT (1) CHECK (devAlertEvents IN (0, 1)),
|
|
||||||
devAlertDown BOOLEAN NOT NULL DEFAULT (0) CHECK (devAlertDown IN (0, 1)),
|
|
||||||
devSkipRepeated INTEGER DEFAULT 0 NOT NULL,
|
|
||||||
devLastNotification DATETIME,
|
|
||||||
devPresentLastScan BOOLEAN NOT NULL DEFAULT (0) CHECK (devPresentLastScan IN (0, 1)),
|
|
||||||
devIsNew BOOLEAN NOT NULL DEFAULT (1) CHECK (devIsNew IN (0, 1)),
|
|
||||||
devLocation STRING (250) COLLATE NOCASE,
|
|
||||||
devIsArchived BOOLEAN NOT NULL DEFAULT (0) CHECK (devIsArchived IN (0, 1)),
|
|
||||||
devParentMAC TEXT,
|
|
||||||
devParentPort INTEGER,
|
|
||||||
devIcon TEXT,
|
|
||||||
devGUID TEXT,
|
|
||||||
devSite TEXT,
|
|
||||||
devSSID TEXT,
|
|
||||||
devSyncHubNode TEXT,
|
|
||||||
devSourcePlugin TEXT
|
|
||||||
, "devCustomProps" TEXT);
|
|
||||||
CREATE TABLE IF NOT EXISTS "Settings" (
|
|
||||||
"setKey" TEXT,
|
|
||||||
"setName" TEXT,
|
|
||||||
"setDescription" TEXT,
|
|
||||||
"setType" TEXT,
|
|
||||||
"setOptions" TEXT,
|
|
||||||
"setGroup" TEXT,
|
|
||||||
"setValue" TEXT,
|
|
||||||
"setEvents" TEXT,
|
|
||||||
"setOverriddenByEnv" INTEGER
|
|
||||||
);
|
|
||||||
CREATE TABLE IF NOT EXISTS "Parameters" (
|
|
||||||
"par_ID" TEXT PRIMARY KEY,
|
|
||||||
"par_Value" TEXT
|
|
||||||
);
|
|
||||||
CREATE TABLE Plugins_Objects(
|
|
||||||
"Index" INTEGER,
|
|
||||||
Plugin TEXT NOT NULL,
|
|
||||||
Object_PrimaryID TEXT NOT NULL,
|
|
||||||
Object_SecondaryID TEXT NOT NULL,
|
|
||||||
DateTimeCreated TEXT NOT NULL,
|
|
||||||
DateTimeChanged TEXT NOT NULL,
|
|
||||||
Watched_Value1 TEXT NOT NULL,
|
|
||||||
Watched_Value2 TEXT NOT NULL,
|
|
||||||
Watched_Value3 TEXT NOT NULL,
|
|
||||||
Watched_Value4 TEXT NOT NULL,
|
|
||||||
Status TEXT NOT NULL,
|
|
||||||
Extra TEXT NOT NULL,
|
|
||||||
UserData TEXT NOT NULL,
|
|
||||||
ForeignKey TEXT NOT NULL,
|
|
||||||
SyncHubNodeName TEXT,
|
|
||||||
"HelpVal1" TEXT,
|
|
||||||
"HelpVal2" TEXT,
|
|
||||||
"HelpVal3" TEXT,
|
|
||||||
"HelpVal4" TEXT,
|
|
||||||
ObjectGUID TEXT,
|
|
||||||
PRIMARY KEY("Index" AUTOINCREMENT)
|
|
||||||
);
|
|
||||||
CREATE TABLE Plugins_Events(
|
|
||||||
"Index" INTEGER,
|
|
||||||
Plugin TEXT NOT NULL,
|
|
||||||
Object_PrimaryID TEXT NOT NULL,
|
|
||||||
Object_SecondaryID TEXT NOT NULL,
|
|
||||||
DateTimeCreated TEXT NOT NULL,
|
|
||||||
DateTimeChanged TEXT NOT NULL,
|
|
||||||
Watched_Value1 TEXT NOT NULL,
|
|
||||||
Watched_Value2 TEXT NOT NULL,
|
|
||||||
Watched_Value3 TEXT NOT NULL,
|
|
||||||
Watched_Value4 TEXT NOT NULL,
|
|
||||||
Status TEXT NOT NULL,
|
|
||||||
Extra TEXT NOT NULL,
|
|
||||||
UserData TEXT NOT NULL,
|
|
||||||
ForeignKey TEXT NOT NULL,
|
|
||||||
SyncHubNodeName TEXT,
|
|
||||||
"HelpVal1" TEXT,
|
|
||||||
"HelpVal2" TEXT,
|
|
||||||
"HelpVal3" TEXT,
|
|
||||||
"HelpVal4" TEXT, "ObjectGUID" TEXT,
|
|
||||||
PRIMARY KEY("Index" AUTOINCREMENT)
|
|
||||||
);
|
|
||||||
CREATE TABLE Plugins_History(
|
|
||||||
"Index" INTEGER,
|
|
||||||
Plugin TEXT NOT NULL,
|
|
||||||
Object_PrimaryID TEXT NOT NULL,
|
|
||||||
Object_SecondaryID TEXT NOT NULL,
|
|
||||||
DateTimeCreated TEXT NOT NULL,
|
|
||||||
DateTimeChanged TEXT NOT NULL,
|
|
||||||
Watched_Value1 TEXT NOT NULL,
|
|
||||||
Watched_Value2 TEXT NOT NULL,
|
|
||||||
Watched_Value3 TEXT NOT NULL,
|
|
||||||
Watched_Value4 TEXT NOT NULL,
|
|
||||||
Status TEXT NOT NULL,
|
|
||||||
Extra TEXT NOT NULL,
|
|
||||||
UserData TEXT NOT NULL,
|
|
||||||
ForeignKey TEXT NOT NULL,
|
|
||||||
SyncHubNodeName TEXT,
|
|
||||||
"HelpVal1" TEXT,
|
|
||||||
"HelpVal2" TEXT,
|
|
||||||
"HelpVal3" TEXT,
|
|
||||||
"HelpVal4" TEXT, "ObjectGUID" TEXT,
|
|
||||||
PRIMARY KEY("Index" AUTOINCREMENT)
|
|
||||||
);
|
|
||||||
CREATE TABLE Plugins_Language_Strings(
|
|
||||||
"Index" INTEGER,
|
|
||||||
Language_Code TEXT NOT NULL,
|
|
||||||
String_Key TEXT NOT NULL,
|
|
||||||
String_Value TEXT NOT NULL,
|
|
||||||
Extra TEXT NOT NULL,
|
|
||||||
PRIMARY KEY("Index" AUTOINCREMENT)
|
|
||||||
);
|
|
||||||
CREATE TABLE CurrentScan (
|
|
||||||
cur_MAC STRING(50) NOT NULL COLLATE NOCASE,
|
|
||||||
cur_IP STRING(50) NOT NULL COLLATE NOCASE,
|
|
||||||
cur_Vendor STRING(250),
|
|
||||||
cur_ScanMethod STRING(10),
|
|
||||||
cur_Name STRING(250),
|
|
||||||
cur_LastQuery STRING(250),
|
|
||||||
cur_DateTime STRING(250),
|
|
||||||
cur_SyncHubNodeName STRING(50),
|
|
||||||
cur_NetworkSite STRING(250),
|
|
||||||
cur_SSID STRING(250),
|
|
||||||
cur_NetworkNodeMAC STRING(250),
|
|
||||||
cur_PORT STRING(250),
|
|
||||||
cur_Type STRING(250),
|
|
||||||
UNIQUE(cur_MAC)
|
|
||||||
);
|
|
||||||
CREATE TABLE IF NOT EXISTS "AppEvents" (
|
|
||||||
"Index" INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
||||||
"GUID" TEXT UNIQUE,
|
|
||||||
"AppEventProcessed" BOOLEAN,
|
|
||||||
"DateTimeCreated" TEXT,
|
|
||||||
"ObjectType" TEXT,
|
|
||||||
"ObjectGUID" TEXT,
|
|
||||||
"ObjectPlugin" TEXT,
|
|
||||||
"ObjectPrimaryID" TEXT,
|
|
||||||
"ObjectSecondaryID" TEXT,
|
|
||||||
"ObjectForeignKey" TEXT,
|
|
||||||
"ObjectIndex" TEXT,
|
|
||||||
"ObjectIsNew" BOOLEAN,
|
|
||||||
"ObjectIsArchived" BOOLEAN,
|
|
||||||
"ObjectStatusColumn" TEXT,
|
|
||||||
"ObjectStatus" TEXT,
|
|
||||||
"AppEventType" TEXT,
|
|
||||||
"Helper1" TEXT,
|
|
||||||
"Helper2" TEXT,
|
|
||||||
"Helper3" TEXT,
|
|
||||||
"Extra" TEXT
|
|
||||||
);
|
|
||||||
CREATE TABLE IF NOT EXISTS "Notifications" (
|
|
||||||
"Index" INTEGER,
|
|
||||||
"GUID" TEXT UNIQUE,
|
|
||||||
"DateTimeCreated" TEXT,
|
|
||||||
"DateTimePushed" TEXT,
|
|
||||||
"Status" TEXT,
|
|
||||||
"JSON" TEXT,
|
|
||||||
"Text" TEXT,
|
|
||||||
"HTML" TEXT,
|
|
||||||
"PublishedVia" TEXT,
|
|
||||||
"Extra" TEXT,
|
|
||||||
PRIMARY KEY("Index" AUTOINCREMENT)
|
|
||||||
);
|
|
||||||
CREATE INDEX IDX_eve_DateTime ON Events (eve_DateTime);
|
|
||||||
CREATE INDEX IDX_eve_EventType ON Events (eve_EventType COLLATE NOCASE);
|
|
||||||
CREATE INDEX IDX_eve_MAC ON Events (eve_MAC COLLATE NOCASE);
|
|
||||||
CREATE INDEX IDX_eve_PairEventRowid ON Events (eve_PairEventRowid);
|
|
||||||
CREATE INDEX IDX_ses_EventTypeDisconnection ON Sessions (ses_EventTypeDisconnection COLLATE NOCASE);
|
|
||||||
CREATE INDEX IDX_ses_EventTypeConnection ON Sessions (ses_EventTypeConnection COLLATE NOCASE);
|
|
||||||
CREATE INDEX IDX_ses_DateTimeDisconnection ON Sessions (ses_DateTimeDisconnection);
|
|
||||||
CREATE INDEX IDX_ses_MAC ON Sessions (ses_MAC COLLATE NOCASE);
|
|
||||||
CREATE INDEX IDX_ses_DateTimeConnection ON Sessions (ses_DateTimeConnection);
|
|
||||||
CREATE INDEX IDX_dev_PresentLastScan ON Devices (devPresentLastScan);
|
|
||||||
CREATE INDEX IDX_dev_FirstConnection ON Devices (devFirstConnection);
|
|
||||||
CREATE INDEX IDX_dev_AlertDeviceDown ON Devices (devAlertDown);
|
|
||||||
CREATE INDEX IDX_dev_StaticIP ON Devices (devStaticIP);
|
|
||||||
CREATE INDEX IDX_dev_ScanCycle ON Devices (devScan);
|
|
||||||
CREATE INDEX IDX_dev_Favorite ON Devices (devFavorite);
|
|
||||||
CREATE INDEX IDX_dev_LastIP ON Devices (devLastIP);
|
|
||||||
CREATE INDEX IDX_dev_NewDevice ON Devices (devIsNew);
|
|
||||||
CREATE INDEX IDX_dev_Archived ON Devices (devIsArchived);
|
|
||||||
CREATE VIEW Events_Devices AS
|
|
||||||
SELECT *
|
|
||||||
FROM Events
|
|
||||||
LEFT JOIN Devices ON eve_MAC = devMac
|
|
||||||
/* Events_Devices(eve_MAC,eve_IP,eve_DateTime,eve_EventType,eve_AdditionalInfo,eve_PendingAlertEmail,eve_PairEventRowid,devMac,devName,devOwner,devType,devVendor,devFavorite,devGroup,devComments,devFirstConnection,devLastConnection,devLastIP,devStaticIP,devScan,devLogEvents,devAlertEvents,devAlertDown,devSkipRepeated,devLastNotification,devPresentLastScan,devIsNew,devLocation,devIsArchived,devParentMAC,devParentPort,devIcon,devGUID,devSite,devSSID,devSyncHubNode,devSourcePlugin,devCustomProps) */;
|
|
||||||
CREATE VIEW LatestEventsPerMAC AS
|
|
||||||
WITH RankedEvents AS (
|
|
||||||
SELECT
|
|
||||||
e.*,
|
|
||||||
ROW_NUMBER() OVER (PARTITION BY e.eve_MAC ORDER BY e.eve_DateTime DESC) AS row_num
|
|
||||||
FROM Events AS e
|
|
||||||
)
|
|
||||||
SELECT
|
|
||||||
e.*,
|
|
||||||
d.*,
|
|
||||||
c.*
|
|
||||||
FROM RankedEvents AS e
|
|
||||||
LEFT JOIN Devices AS d ON e.eve_MAC = d.devMac
|
|
||||||
INNER JOIN CurrentScan AS c ON e.eve_MAC = c.cur_MAC
|
|
||||||
WHERE e.row_num = 1
|
|
||||||
/* LatestEventsPerMAC(eve_MAC,eve_IP,eve_DateTime,eve_EventType,eve_AdditionalInfo,eve_PendingAlertEmail,eve_PairEventRowid,row_num,devMac,devName,devOwner,devType,devVendor,devFavorite,devGroup,devComments,devFirstConnection,devLastConnection,devLastIP,devStaticIP,devScan,devLogEvents,devAlertEvents,devAlertDown,devSkipRepeated,devLastNotification,devPresentLastScan,devIsNew,devLocation,devIsArchived,devParentMAC,devParentPort,devIcon,devGUID,devSite,devSSID,devSyncHubNode,devSourcePlugin,devCustomProps,cur_MAC,cur_IP,cur_Vendor,cur_ScanMethod,cur_Name,cur_LastQuery,cur_DateTime,cur_SyncHubNodeName,cur_NetworkSite,cur_SSID,cur_NetworkNodeMAC,cur_PORT,cur_Type) */;
|
|
||||||
CREATE VIEW Sessions_Devices AS SELECT * FROM Sessions LEFT JOIN "Devices" ON ses_MAC = devMac
|
|
||||||
/* Sessions_Devices(ses_MAC,ses_IP,ses_EventTypeConnection,ses_DateTimeConnection,ses_EventTypeDisconnection,ses_DateTimeDisconnection,ses_StillConnected,ses_AdditionalInfo,devMac,devName,devOwner,devType,devVendor,devFavorite,devGroup,devComments,devFirstConnection,devLastConnection,devLastIP,devStaticIP,devScan,devLogEvents,devAlertEvents,devAlertDown,devSkipRepeated,devLastNotification,devPresentLastScan,devIsNew,devLocation,devIsArchived,devParentMAC,devParentPort,devIcon,devGUID,devSite,devSSID,devSyncHubNode,devSourcePlugin,devCustomProps) */;
|
|
||||||
CREATE VIEW Convert_Events_to_Sessions AS SELECT EVE1.eve_MAC,
|
|
||||||
EVE1.eve_IP,
|
|
||||||
EVE1.eve_EventType AS eve_EventTypeConnection,
|
|
||||||
EVE1.eve_DateTime AS eve_DateTimeConnection,
|
|
||||||
CASE WHEN EVE2.eve_EventType IN ('Disconnected', 'Device Down') OR
|
|
||||||
EVE2.eve_EventType IS NULL THEN EVE2.eve_EventType ELSE '<missing event>' END AS eve_EventTypeDisconnection,
|
|
||||||
CASE WHEN EVE2.eve_EventType IN ('Disconnected', 'Device Down') THEN EVE2.eve_DateTime ELSE NULL END AS eve_DateTimeDisconnection,
|
|
||||||
CASE WHEN EVE2.eve_EventType IS NULL THEN 1 ELSE 0 END AS eve_StillConnected,
|
|
||||||
EVE1.eve_AdditionalInfo
|
|
||||||
FROM Events AS EVE1
|
|
||||||
LEFT JOIN
|
|
||||||
Events AS EVE2 ON EVE1.eve_PairEventRowID = EVE2.RowID
|
|
||||||
WHERE EVE1.eve_EventType IN ('New Device', 'Connected','Down Reconnected')
|
|
||||||
UNION
|
|
||||||
SELECT eve_MAC,
|
|
||||||
eve_IP,
|
|
||||||
'<missing event>' AS eve_EventTypeConnection,
|
|
||||||
NULL AS eve_DateTimeConnection,
|
|
||||||
eve_EventType AS eve_EventTypeDisconnection,
|
|
||||||
eve_DateTime AS eve_DateTimeDisconnection,
|
|
||||||
0 AS eve_StillConnected,
|
|
||||||
eve_AdditionalInfo
|
|
||||||
FROM Events AS EVE1
|
|
||||||
WHERE (eve_EventType = 'Device Down' OR
|
|
||||||
eve_EventType = 'Disconnected') AND
|
|
||||||
EVE1.eve_PairEventRowID IS NULL
|
|
||||||
/* Convert_Events_to_Sessions(eve_MAC,eve_IP,eve_EventTypeConnection,eve_DateTimeConnection,eve_EventTypeDisconnection,eve_DateTimeDisconnection,eve_StillConnected,eve_AdditionalInfo) */;
|
|
||||||
CREATE TRIGGER "trg_insert_devices"
|
|
||||||
AFTER INSERT ON "Devices"
|
|
||||||
WHEN NOT EXISTS (
|
|
||||||
SELECT 1 FROM AppEvents
|
|
||||||
WHERE AppEventProcessed = 0
|
|
||||||
AND ObjectType = 'Devices'
|
|
||||||
AND ObjectGUID = NEW.devGUID
|
|
||||||
AND ObjectStatus = CASE WHEN NEW.devPresentLastScan = 1 THEN 'online' ELSE 'offline' END
|
|
||||||
AND AppEventType = 'insert'
|
|
||||||
)
|
|
||||||
BEGIN
|
|
||||||
INSERT INTO "AppEvents" (
|
|
||||||
"GUID",
|
|
||||||
"DateTimeCreated",
|
|
||||||
"AppEventProcessed",
|
|
||||||
"ObjectType",
|
|
||||||
"ObjectGUID",
|
|
||||||
"ObjectPrimaryID",
|
|
||||||
"ObjectSecondaryID",
|
|
||||||
"ObjectStatus",
|
|
||||||
"ObjectStatusColumn",
|
|
||||||
"ObjectIsNew",
|
|
||||||
"ObjectIsArchived",
|
|
||||||
"ObjectForeignKey",
|
|
||||||
"ObjectPlugin",
|
|
||||||
"AppEventType"
|
|
||||||
)
|
|
||||||
VALUES (
|
|
||||||
|
|
||||||
lower(
|
|
||||||
hex(randomblob(4)) || '-' || hex(randomblob(2)) || '-' || '4' ||
|
|
||||||
substr(hex( randomblob(2)), 2) || '-' ||
|
|
||||||
substr('AB89', 1 + (abs(random()) % 4) , 1) ||
|
|
||||||
substr(hex(randomblob(2)), 2) || '-' ||
|
|
||||||
hex(randomblob(6))
|
|
||||||
)
|
|
||||||
,
|
|
||||||
DATETIME('now'),
|
|
||||||
FALSE,
|
|
||||||
'Devices',
|
|
||||||
NEW.devGUID, -- ObjectGUID
|
|
||||||
NEW.devMac, -- ObjectPrimaryID
|
|
||||||
NEW.devLastIP, -- ObjectSecondaryID
|
|
||||||
CASE WHEN NEW.devPresentLastScan = 1 THEN 'online' ELSE 'offline' END, -- ObjectStatus
|
|
||||||
'devPresentLastScan', -- ObjectStatusColumn
|
|
||||||
NEW.devIsNew, -- ObjectIsNew
|
|
||||||
NEW.devIsArchived, -- ObjectIsArchived
|
|
||||||
NEW.devGUID, -- ObjectForeignKey
|
|
||||||
'DEVICES', -- ObjectForeignKey
|
|
||||||
'insert'
|
|
||||||
);
|
|
||||||
END;
|
|
||||||
CREATE TRIGGER "trg_update_devices"
|
|
||||||
AFTER UPDATE ON "Devices"
|
|
||||||
WHEN NOT EXISTS (
|
|
||||||
SELECT 1 FROM AppEvents
|
|
||||||
WHERE AppEventProcessed = 0
|
|
||||||
AND ObjectType = 'Devices'
|
|
||||||
AND ObjectGUID = NEW.devGUID
|
|
||||||
AND ObjectStatus = CASE WHEN NEW.devPresentLastScan = 1 THEN 'online' ELSE 'offline' END
|
|
||||||
AND AppEventType = 'update'
|
|
||||||
)
|
|
||||||
BEGIN
|
|
||||||
INSERT INTO "AppEvents" (
|
|
||||||
"GUID",
|
|
||||||
"DateTimeCreated",
|
|
||||||
"AppEventProcessed",
|
|
||||||
"ObjectType",
|
|
||||||
"ObjectGUID",
|
|
||||||
"ObjectPrimaryID",
|
|
||||||
"ObjectSecondaryID",
|
|
||||||
"ObjectStatus",
|
|
||||||
"ObjectStatusColumn",
|
|
||||||
"ObjectIsNew",
|
|
||||||
"ObjectIsArchived",
|
|
||||||
"ObjectForeignKey",
|
|
||||||
"ObjectPlugin",
|
|
||||||
"AppEventType"
|
|
||||||
)
|
|
||||||
VALUES (
|
|
||||||
|
|
||||||
lower(
|
|
||||||
hex(randomblob(4)) || '-' || hex(randomblob(2)) || '-' || '4' ||
|
|
||||||
substr(hex( randomblob(2)), 2) || '-' ||
|
|
||||||
substr('AB89', 1 + (abs(random()) % 4) , 1) ||
|
|
||||||
substr(hex(randomblob(2)), 2) || '-' ||
|
|
||||||
hex(randomblob(6))
|
|
||||||
)
|
|
||||||
,
|
|
||||||
DATETIME('now'),
|
|
||||||
FALSE,
|
|
||||||
'Devices',
|
|
||||||
NEW.devGUID, -- ObjectGUID
|
|
||||||
NEW.devMac, -- ObjectPrimaryID
|
|
||||||
NEW.devLastIP, -- ObjectSecondaryID
|
|
||||||
CASE WHEN NEW.devPresentLastScan = 1 THEN 'online' ELSE 'offline' END, -- ObjectStatus
|
|
||||||
'devPresentLastScan', -- ObjectStatusColumn
|
|
||||||
NEW.devIsNew, -- ObjectIsNew
|
|
||||||
NEW.devIsArchived, -- ObjectIsArchived
|
|
||||||
NEW.devGUID, -- ObjectForeignKey
|
|
||||||
'DEVICES', -- ObjectForeignKey
|
|
||||||
'update'
|
|
||||||
);
|
|
||||||
END;
|
|
||||||
CREATE TRIGGER "trg_delete_devices"
|
|
||||||
AFTER DELETE ON "Devices"
|
|
||||||
WHEN NOT EXISTS (
|
|
||||||
SELECT 1 FROM AppEvents
|
|
||||||
WHERE AppEventProcessed = 0
|
|
||||||
AND ObjectType = 'Devices'
|
|
||||||
AND ObjectGUID = OLD.devGUID
|
|
||||||
AND ObjectStatus = CASE WHEN OLD.devPresentLastScan = 1 THEN 'online' ELSE 'offline' END
|
|
||||||
AND AppEventType = 'delete'
|
|
||||||
)
|
|
||||||
BEGIN
|
|
||||||
INSERT INTO "AppEvents" (
|
|
||||||
"GUID",
|
|
||||||
"DateTimeCreated",
|
|
||||||
"AppEventProcessed",
|
|
||||||
"ObjectType",
|
|
||||||
"ObjectGUID",
|
|
||||||
"ObjectPrimaryID",
|
|
||||||
"ObjectSecondaryID",
|
|
||||||
"ObjectStatus",
|
|
||||||
"ObjectStatusColumn",
|
|
||||||
"ObjectIsNew",
|
|
||||||
"ObjectIsArchived",
|
|
||||||
"ObjectForeignKey",
|
|
||||||
"ObjectPlugin",
|
|
||||||
"AppEventType"
|
|
||||||
)
|
|
||||||
VALUES (
|
|
||||||
|
|
||||||
lower(
|
|
||||||
hex(randomblob(4)) || '-' || hex(randomblob(2)) || '-' || '4' ||
|
|
||||||
substr(hex( randomblob(2)), 2) || '-' ||
|
|
||||||
substr('AB89', 1 + (abs(random()) % 4) , 1) ||
|
|
||||||
substr(hex(randomblob(2)), 2) || '-' ||
|
|
||||||
hex(randomblob(6))
|
|
||||||
)
|
|
||||||
,
|
|
||||||
DATETIME('now'),
|
|
||||||
FALSE,
|
|
||||||
'Devices',
|
|
||||||
OLD.devGUID, -- ObjectGUID
|
|
||||||
OLD.devMac, -- ObjectPrimaryID
|
|
||||||
OLD.devLastIP, -- ObjectSecondaryID
|
|
||||||
CASE WHEN OLD.devPresentLastScan = 1 THEN 'online' ELSE 'offline' END, -- ObjectStatus
|
|
||||||
'devPresentLastScan', -- ObjectStatusColumn
|
|
||||||
OLD.devIsNew, -- ObjectIsNew
|
|
||||||
OLD.devIsArchived, -- ObjectIsArchived
|
|
||||||
OLD.devGUID, -- ObjectForeignKey
|
|
||||||
'DEVICES', -- ObjectForeignKey
|
|
||||||
'delete'
|
|
||||||
);
|
|
||||||
END;
|
|
||||||
@@ -5,7 +5,64 @@
|
|||||||
"matching_pattern": [
|
"matching_pattern": [
|
||||||
{ "mac_prefix": "INTERNET", "vendor": "" }
|
{ "mac_prefix": "INTERNET", "vendor": "" }
|
||||||
],
|
],
|
||||||
"name_pattern": []
|
"name_pattern": [],
|
||||||
|
"ip_pattern": [
|
||||||
|
"^192\\.168\\.1\\.1$",
|
||||||
|
"^192\\.168\\.0\\.1$",
|
||||||
|
"^10\\.0\\.0\\.1$"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"dev_type": "Smart Switch",
|
||||||
|
"icon_html": "<i class=\"fa-solid fa-toggle-on\"></i>",
|
||||||
|
"matching_pattern": [
|
||||||
|
{ "mac_prefix": "003192", "vendor": "TP-Link" },
|
||||||
|
{ "mac_prefix": "50C7BF", "vendor": "TP-Link" },
|
||||||
|
{ "mac_prefix": "B04E26", "vendor": "TP-Link" }
|
||||||
|
],
|
||||||
|
"name_pattern": ["hs200", "hs210", "hs220", "ks230", "smart switch", "light switch", "wall switch"]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"dev_type": "Smart Plug",
|
||||||
|
"icon_html": "<i class=\"fa-solid fa-plug\"></i>",
|
||||||
|
"matching_pattern": [
|
||||||
|
{ "mac_prefix": "2887BA", "vendor": "TP-Link" }
|
||||||
|
],
|
||||||
|
"name_pattern": ["kp115", "hs100", "hs103", "hs105", "smart plug", "outlet", "plug"]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"dev_type": "Smart Speaker",
|
||||||
|
"icon_html": "<i class=\"fa fa-volume-up\"></i>",
|
||||||
|
"matching_pattern": [
|
||||||
|
{ "mac_prefix": "14C14E", "vendor": "Google" },
|
||||||
|
{ "mac_prefix": "44650D", "vendor": "Amazon" },
|
||||||
|
{ "mac_prefix": "74ACB9", "vendor": "Google" }
|
||||||
|
],
|
||||||
|
"name_pattern": ["echo", "alexa", "dot", "nest-audio", "nest-mini", "google-home"]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"dev_type": "Smart Appliance",
|
||||||
|
"icon_html": "<i class=\"fa-solid fa-wind\"></i>",
|
||||||
|
"matching_pattern": [
|
||||||
|
{ "mac_prefix": "446FF8", "vendor": "Dyson" }
|
||||||
|
],
|
||||||
|
"name_pattern": ["dyson", "purifier", "humidifier", "fan"]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"dev_type": "Smart Home",
|
||||||
|
"icon_html": "<i class=\"fa fa-house\"></i>",
|
||||||
|
"matching_pattern": [],
|
||||||
|
"name_pattern": ["google", "chromecast", "nest", "hub"]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"dev_type": "Phone",
|
||||||
|
"icon_html": "<i class=\"fa-solid fa-mobile\"></i>",
|
||||||
|
"matching_pattern": [
|
||||||
|
{ "mac_prefix": "001A79", "vendor": "Apple" },
|
||||||
|
{ "mac_prefix": "B0BE83", "vendor": "Samsung" },
|
||||||
|
{ "mac_prefix": "BC926B", "vendor": "Motorola" }
|
||||||
|
],
|
||||||
|
"name_pattern": ["iphone", "ipad", "pixel", "galaxy", "redmi", "android", "samsung"]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"dev_type": "Access Point",
|
"dev_type": "Access Point",
|
||||||
@@ -16,24 +73,7 @@
|
|||||||
{ "mac_prefix": "F4F5D8", "vendor": "TP-Link" },
|
{ "mac_prefix": "F4F5D8", "vendor": "TP-Link" },
|
||||||
{ "mac_prefix": "F88E85", "vendor": "Netgear" }
|
{ "mac_prefix": "F88E85", "vendor": "Netgear" }
|
||||||
],
|
],
|
||||||
"name_pattern": ["router", "gateway", "ap", "access point", "access-point", "switch"]
|
"name_pattern": ["router", "gateway", "ap", "access point", "access-point", "switch", "sg105", "sg108", "managed switch", "unmanaged switch", "poe switch", "ethernet switch"]
|
||||||
},
|
|
||||||
{
|
|
||||||
"dev_type": "Phone",
|
|
||||||
"icon_html": "<i class=\"fa-brands fa-apple\"></i>",
|
|
||||||
"matching_pattern": [
|
|
||||||
{ "mac_prefix": "001A79", "vendor": "Apple" },
|
|
||||||
{ "mac_prefix": "B0BE83", "vendor": "Samsung" },
|
|
||||||
{ "mac_prefix": "BC926B", "vendor": "Motorola" }
|
|
||||||
],
|
|
||||||
"name_pattern": ["iphone", "ipad", "pixel", "galaxy", "redmi"]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"dev_type": "Phone",
|
|
||||||
"icon_html": "<i class=\"fa-solid fa-mobile\"></i>",
|
|
||||||
"matching_pattern": [
|
|
||||||
],
|
|
||||||
"name_pattern": ["android","samsung"]
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"dev_type": "Tablet",
|
"dev_type": "Tablet",
|
||||||
@@ -44,24 +84,18 @@
|
|||||||
],
|
],
|
||||||
"name_pattern": ["tablet", "pad"]
|
"name_pattern": ["tablet", "pad"]
|
||||||
},
|
},
|
||||||
{
|
|
||||||
"dev_type": "IoT",
|
|
||||||
"icon_html": "<i class=\"fa-brands fa-raspberry-pi\"></i>",
|
|
||||||
"matching_pattern": [
|
|
||||||
{ "mac_prefix": "B827EB", "vendor": "Raspberry Pi" },
|
|
||||||
{ "mac_prefix": "DCA632", "vendor": "Raspberry Pi" }
|
|
||||||
],
|
|
||||||
"name_pattern": ["raspberry", "pi"]
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
"dev_type": "IoT",
|
"dev_type": "IoT",
|
||||||
"icon_html": "<i class=\"fa-solid fa-microchip\"></i>",
|
"icon_html": "<i class=\"fa-solid fa-microchip\"></i>",
|
||||||
"matching_pattern": [
|
"matching_pattern": [
|
||||||
|
{ "mac_prefix": "B827EB", "vendor": "Raspberry Pi" },
|
||||||
|
{ "mac_prefix": "DCA632", "vendor": "Raspberry Pi" },
|
||||||
{ "mac_prefix": "840D8E", "vendor": "Espressif" },
|
{ "mac_prefix": "840D8E", "vendor": "Espressif" },
|
||||||
{ "mac_prefix": "ECFABC", "vendor": "Espressif" },
|
{ "mac_prefix": "ECFABC", "vendor": "Espressif" },
|
||||||
{ "mac_prefix": "7C9EBD", "vendor": "Espressif" }
|
{ "mac_prefix": "7C9EBD", "vendor": "Espressif" },
|
||||||
|
{ "mac_prefix": "286DCD", "vendor": "Beijing Winner Microelectronics" }
|
||||||
],
|
],
|
||||||
"name_pattern": ["raspberry", "pi"]
|
"name_pattern": ["raspberry", "pi", "thingsturn", "w600", "w601"]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"dev_type": "Desktop",
|
"dev_type": "Desktop",
|
||||||
@@ -69,9 +103,11 @@
|
|||||||
"matching_pattern": [
|
"matching_pattern": [
|
||||||
{ "mac_prefix": "001422", "vendor": "Dell" },
|
{ "mac_prefix": "001422", "vendor": "Dell" },
|
||||||
{ "mac_prefix": "001874", "vendor": "Lenovo" },
|
{ "mac_prefix": "001874", "vendor": "Lenovo" },
|
||||||
{ "mac_prefix": "00E04C", "vendor": "Hewlett Packard" }
|
{ "mac_prefix": "00E04C", "vendor": "Hewlett Packard" },
|
||||||
|
{ "mac_prefix": "F44D30", "vendor": "Elitegroup Computer Systems" },
|
||||||
|
{ "mac_prefix": "1C697A", "vendor": "Elitegroup Computer Systems" }
|
||||||
],
|
],
|
||||||
"name_pattern": ["desktop", "pc", "computer"]
|
"name_pattern": ["desktop", "pc", "computer", "liva", "ecs"]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"dev_type": "Laptop",
|
"dev_type": "Laptop",
|
||||||
@@ -80,9 +116,10 @@
|
|||||||
{ "mac_prefix": "3C0754", "vendor": "HP" },
|
{ "mac_prefix": "3C0754", "vendor": "HP" },
|
||||||
{ "mac_prefix": "0017A4", "vendor": "Dell" },
|
{ "mac_prefix": "0017A4", "vendor": "Dell" },
|
||||||
{ "mac_prefix": "F4CE46", "vendor": "Lenovo" },
|
{ "mac_prefix": "F4CE46", "vendor": "Lenovo" },
|
||||||
{ "mac_prefix": "409F38", "vendor": "Acer" }
|
{ "mac_prefix": "409F38", "vendor": "Acer" },
|
||||||
|
{ "mac_prefix": "9CB6D0", "vendor": "Rivet Networks" }
|
||||||
],
|
],
|
||||||
"name_pattern": ["macbook", "imac", "laptop", "notebook"]
|
"name_pattern": ["macbook", "imac", "laptop", "notebook", "alienware", "razer", "msi"]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"dev_type": "Server",
|
"dev_type": "Server",
|
||||||
@@ -123,9 +160,10 @@
|
|||||||
"matching_pattern": [
|
"matching_pattern": [
|
||||||
{ "mac_prefix": "001FA7", "vendor": "Sony" },
|
{ "mac_prefix": "001FA7", "vendor": "Sony" },
|
||||||
{ "mac_prefix": "7C04D0", "vendor": "Nintendo" },
|
{ "mac_prefix": "7C04D0", "vendor": "Nintendo" },
|
||||||
{ "mac_prefix": "EC26CA", "vendor": "Sony" }
|
{ "mac_prefix": "EC26CA", "vendor": "Sony" },
|
||||||
|
{ "mac_prefix": "48B02D", "vendor": "NVIDIA" }
|
||||||
],
|
],
|
||||||
"name_pattern": ["playstation", "xbox"]
|
"name_pattern": ["playstation", "xbox", "shield", "nvidia"]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"dev_type": "Camera",
|
"dev_type": "Camera",
|
||||||
@@ -138,15 +176,6 @@
|
|||||||
],
|
],
|
||||||
"name_pattern": ["camera", "cam", "webcam"]
|
"name_pattern": ["camera", "cam", "webcam"]
|
||||||
},
|
},
|
||||||
{
|
|
||||||
"dev_type": "Smart Speaker",
|
|
||||||
"icon_html": "<i class=\"fa fa-volume-up\"></i>",
|
|
||||||
"matching_pattern": [
|
|
||||||
{ "mac_prefix": "44650D", "vendor": "Amazon" },
|
|
||||||
{ "mac_prefix": "74ACB9", "vendor": "Google" }
|
|
||||||
],
|
|
||||||
"name_pattern": ["echo", "alexa", "dot"]
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
"dev_type": "Router",
|
"dev_type": "Router",
|
||||||
"icon_html": "<i class=\"fa fa-random\"></i>",
|
"icon_html": "<i class=\"fa fa-random\"></i>",
|
||||||
@@ -154,23 +183,13 @@
|
|||||||
{ "mac_prefix": "000C29", "vendor": "Cisco" },
|
{ "mac_prefix": "000C29", "vendor": "Cisco" },
|
||||||
{ "mac_prefix": "00155D", "vendor": "MikroTik" }
|
{ "mac_prefix": "00155D", "vendor": "MikroTik" }
|
||||||
],
|
],
|
||||||
"name_pattern": ["router", "gateway", "ap", "access point", "access-point"],
|
"name_pattern": ["router", "gateway", "ap", "access point"]
|
||||||
"ip_pattern": [
|
|
||||||
"^192\\.168\\.[0-1]\\.1$",
|
|
||||||
"^10\\.0\\.0\\.1$"
|
|
||||||
]
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"dev_type": "Smart Light",
|
"dev_type": "Smart Light",
|
||||||
"icon_html": "<i class=\"fa fa-lightbulb\"></i>",
|
"icon_html": "<i class=\"fa fa-lightbulb\"></i>",
|
||||||
"matching_pattern": [],
|
"matching_pattern": [],
|
||||||
"name_pattern": ["hue", "lifx", "bulb"]
|
"name_pattern": ["hue", "lifx", "bulb", "light"]
|
||||||
},
|
|
||||||
{
|
|
||||||
"dev_type": "Smart Home",
|
|
||||||
"icon_html": "<i class=\"fa fa-house\"></i>",
|
|
||||||
"matching_pattern": [],
|
|
||||||
"name_pattern": ["google", "chromecast", "nest"]
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"dev_type": "Smartwatch",
|
"dev_type": "Smartwatch",
|
||||||
@@ -187,14 +206,9 @@
|
|||||||
{
|
{
|
||||||
"dev_type": "Security Device",
|
"dev_type": "Security Device",
|
||||||
"icon_html": "<i class=\"fa fa-shield-alt\"></i>",
|
"icon_html": "<i class=\"fa fa-shield-alt\"></i>",
|
||||||
"matching_pattern": [],
|
|
||||||
"name_pattern": ["doorbell", "lock", "security"]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"dev_type": "Smart Light",
|
|
||||||
"icon_html": "<i class=\"fa-solid fa-lightbulb\"></i>",
|
|
||||||
"matching_pattern": [
|
"matching_pattern": [
|
||||||
|
{ "mac_prefix": "047BCB", "vendor": "Universal Global Scientific" }
|
||||||
],
|
],
|
||||||
"name_pattern": ["light","bulb"]
|
"name_pattern": ["doorbell", "lock", "security", "mmd-", "ring"]
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
@@ -1,20 +1,27 @@
|
|||||||
services:
|
services:
|
||||||
netalertx:
|
netalertx:
|
||||||
#use an environmental variable to set host networking mode if needed
|
network_mode: host # Use host networking for ARP scanning and other services
|
||||||
network_mode: ${NETALERTX_NETWORK_MODE:-host} # Use host networking for ARP scanning and other services
|
|
||||||
build:
|
build:
|
||||||
context: . # Build context is the current directory
|
context: . # Build context is the current directory
|
||||||
dockerfile: Dockerfile # Specify the Dockerfile to use
|
dockerfile: Dockerfile # Specify the Dockerfile to use
|
||||||
image: netalertx:latest
|
image: netalertx:latest
|
||||||
container_name: netalertx # The name when you docker contiainer ls
|
container_name: netalertx # The name when you docker contiainer ls
|
||||||
read_only: true # Make the container filesystem read-only
|
read_only: true # Make the container filesystem read-only
|
||||||
|
|
||||||
|
# It is most secure to start with user 20211, but then we lose provisioning capabilities.
|
||||||
|
# user: "${NETALERTX_UID:-20211}:${NETALERTX_GID:-20211}"
|
||||||
cap_drop: # Drop all capabilities for enhanced security
|
cap_drop: # Drop all capabilities for enhanced security
|
||||||
- ALL
|
- ALL
|
||||||
cap_add: # Add only the necessary capabilities
|
cap_add: # Add only the necessary capabilities
|
||||||
- NET_ADMIN # Required for ARP scanning
|
- NET_ADMIN # Required for scanning with arp-scan, nmap, nbtscan, traceroute, and zero-conf
|
||||||
- NET_RAW # Required for raw socket operations
|
- NET_RAW # Required for raw socket operations with arp-scan, nmap, nbtscan, traceroute and zero-conf
|
||||||
- NET_BIND_SERVICE # Required to bind to privileged ports (nbtscan)
|
- NET_BIND_SERVICE # Required to bind to privileged ports with nbtscan
|
||||||
|
- CHOWN # Required for root-entrypoint to chown /data + /tmp before dropping privileges
|
||||||
|
- SETUID # Required for root-entrypoint to switch to non-root user
|
||||||
|
- SETGID # Required for root-entrypoint to switch to non-root group
|
||||||
|
sysctls: # ARP flux mitigation for host networking accuracy
|
||||||
|
net.ipv4.conf.all.arp_ignore: 1
|
||||||
|
net.ipv4.conf.all.arp_announce: 2
|
||||||
volumes:
|
volumes:
|
||||||
|
|
||||||
- type: volume # Persistent Docker-managed Named Volume for storage
|
- type: volume # Persistent Docker-managed Named Volume for storage
|
||||||
@@ -35,22 +42,23 @@ services:
|
|||||||
target: /etc/localtime
|
target: /etc/localtime
|
||||||
read_only: true
|
read_only: true
|
||||||
|
|
||||||
# Use a custom Enterprise-configured nginx config for ldap or other settings
|
# Use a custom Enterprise-configured nginx config for ldap or other settings
|
||||||
# - /custom-enterprise.conf:/tmp/nginx/active-config/netalertx.conf:ro
|
# - /custom-enterprise.conf:/tmp/nginx/active-config/netalertx.conf:ro
|
||||||
|
|
||||||
# Test your plugin on the production container
|
# Test your plugin on the production container
|
||||||
# - /path/on/host:/app/front/plugins/custom
|
# - /path/on/host:/app/front/plugins/custom
|
||||||
|
|
||||||
# Retain logs - comment out tmpfs /tmp/log if you want to retain logs between container restarts
|
# Retain logs - comment out tmpfs /tmp/log if you want to retain logs between container restarts
|
||||||
# - /path/on/host/log:/tmp/log
|
# - /path/on/host/log:/tmp/log
|
||||||
|
|
||||||
# tmpfs mounts for writable directories in a read-only container and improve system performance
|
# tmpfs mounts for writable directories in a read-only container and improve system performance
|
||||||
# All writes now live under /tmp/* subdirectories which are created dynamically by entrypoint.d scripts
|
# All writes now live under /tmp/* subdirectories which are created dynamically by entrypoint.d scripts
|
||||||
# uid=20211 and gid=20211 is the netalertx user inside the container
|
# mode=1700 gives rwx------ permissions; ownership is set by /root-entrypoint.sh
|
||||||
# mode=1700 gives rwx------ permissions to the netalertx user only
|
|
||||||
tmpfs:
|
tmpfs:
|
||||||
- "/tmp:uid=20211,gid=20211,mode=1700,rw,noexec,nosuid,nodev,async,noatime,nodiratime"
|
- "/tmp:mode=1700,uid=0,gid=0,rw,noexec,nosuid,nodev,async,noatime,nodiratime"
|
||||||
environment:
|
environment:
|
||||||
|
PUID: ${NETALERTX_UID:-20211} # Runtime UID after priming (Synology/no-copy-up safe)
|
||||||
|
PGID: ${NETALERTX_GID:-20211} # Runtime GID after priming (Synology/no-copy-up safe)
|
||||||
LISTEN_ADDR: ${LISTEN_ADDR:-0.0.0.0} # Listen for connections on all interfaces
|
LISTEN_ADDR: ${LISTEN_ADDR:-0.0.0.0} # Listen for connections on all interfaces
|
||||||
PORT: ${PORT:-20211} # Application port
|
PORT: ${PORT:-20211} # Application port
|
||||||
GRAPHQL_PORT: ${GRAPHQL_PORT:-20212} # GraphQL API port
|
GRAPHQL_PORT: ${GRAPHQL_PORT:-20212} # GraphQL API port
|
||||||
@@ -63,7 +71,6 @@ services:
|
|||||||
cpu_shares: 512 # Relative CPU weight for CPU contention scenarios
|
cpu_shares: 512 # Relative CPU weight for CPU contention scenarios
|
||||||
pids_limit: 512 # Limit the number of processes/threads to prevent fork bombs
|
pids_limit: 512 # Limit the number of processes/threads to prevent fork bombs
|
||||||
logging:
|
logging:
|
||||||
driver: "json-file" # Use JSON file logging driver
|
|
||||||
options:
|
options:
|
||||||
max-size: "10m" # Rotate log files after they reach 10MB
|
max-size: "10m" # Rotate log files after they reach 10MB
|
||||||
max-file: "3" # Keep a maximum of 3 log files
|
max-file: "3" # Keep a maximum of 3 log files
|
||||||
|
|||||||
558
docker_build.log
558
docker_build.log
@@ -1,534 +1,74 @@
|
|||||||
#0 building with "default" instance using docker driver
|
#0 building with "default" instance using docker driver
|
||||||
|
|
||||||
#1 [internal] load build definition from Dockerfile
|
#1 [internal] load build definition from Dockerfile
|
||||||
#1 transferring dockerfile: 5.29kB done
|
#1 DONE 0.0s
|
||||||
|
|
||||||
|
#1 [internal] load build definition from Dockerfile
|
||||||
|
#1 transferring dockerfile: 11.45kB done
|
||||||
#1 DONE 0.1s
|
#1 DONE 0.1s
|
||||||
|
|
||||||
#2 [auth] library/alpine:pull token for registry-1.docker.io
|
#2 [internal] load metadata for docker.io/library/alpine:3.22
|
||||||
#2 DONE 0.0s
|
#2 DONE 0.0s
|
||||||
|
|
||||||
#3 [internal] load metadata for docker.io/library/alpine:3.22
|
#3 [internal] load .dockerignore
|
||||||
#3 DONE 0.4s
|
#3 transferring context:
|
||||||
|
#3 transferring context: 222B done
|
||||||
|
#3 DONE 0.1s
|
||||||
|
|
||||||
#4 [internal] load .dockerignore
|
#4 [builder 1/4] FROM docker.io/library/alpine:3.22
|
||||||
#4 transferring context: 216B done
|
#4 DONE 0.0s
|
||||||
#4 DONE 0.1s
|
|
||||||
|
|
||||||
#5 [builder 1/15] FROM docker.io/library/alpine:3.22@sha256:4bcff63911fcb4448bd4fdacec207030997caf25e9bea4045fa6c8c44de311d1
|
#5 [internal] load build context
|
||||||
#5 CACHED
|
#5 transferring context: 46.63kB 0.1s done
|
||||||
|
#5 DONE 0.2s
|
||||||
|
|
||||||
#6 [internal] load build context
|
#6 [builder 3/4] RUN apk add --no-cache bash shadow python3 python3-dev gcc musl-dev libffi-dev openssl-dev git rust cargo && python -m venv /opt/venv
|
||||||
#6 transferring context: 36.76kB 0.0s done
|
#6 CACHED
|
||||||
#6 DONE 0.1s
|
|
||||||
|
|
||||||
#7 [builder 2/15] RUN apk add --no-cache bash shadow python3 python3-dev gcc musl-dev libffi-dev openssl-dev git && python -m venv /opt/venv
|
#7 [runner 6/11] COPY --chown=netalertx:netalertx --chmod=755 server /app/server
|
||||||
#7 0.443 fetch https://dl-cdn.alpinelinux.org/alpine/v3.22/main/x86_64/APKINDEX.tar.gz
|
#7 CACHED
|
||||||
#7 0.688 fetch https://dl-cdn.alpinelinux.org/alpine/v3.22/community/x86_64/APKINDEX.tar.gz
|
|
||||||
#7 1.107 (1/52) Upgrading libcrypto3 (3.5.1-r0 -> 3.5.3-r0)
|
|
||||||
#7 1.358 (2/52) Upgrading libssl3 (3.5.1-r0 -> 3.5.3-r0)
|
|
||||||
#7 1.400 (3/52) Installing ncurses-terminfo-base (6.5_p20250503-r0)
|
|
||||||
#7 1.413 (4/52) Installing libncursesw (6.5_p20250503-r0)
|
|
||||||
#7 1.444 (5/52) Installing readline (8.2.13-r1)
|
|
||||||
#7 1.471 (6/52) Installing bash (5.2.37-r0)
|
|
||||||
#7 1.570 Executing bash-5.2.37-r0.post-install
|
|
||||||
#7 1.593 (7/52) Installing libgcc (14.2.0-r6)
|
|
||||||
#7 1.605 (8/52) Installing jansson (2.14.1-r0)
|
|
||||||
#7 1.613 (9/52) Installing libstdc++ (14.2.0-r6)
|
|
||||||
#7 1.705 (10/52) Installing zstd-libs (1.5.7-r0)
|
|
||||||
#7 1.751 (11/52) Installing binutils (2.44-r3)
|
|
||||||
#7 2.041 (12/52) Installing libgomp (14.2.0-r6)
|
|
||||||
#7 2.064 (13/52) Installing libatomic (14.2.0-r6)
|
|
||||||
#7 2.071 (14/52) Installing gmp (6.3.0-r3)
|
|
||||||
#7 2.097 (15/52) Installing isl26 (0.26-r1)
|
|
||||||
#7 2.183 (16/52) Installing mpfr4 (4.2.1_p1-r0)
|
|
||||||
#7 2.219 (17/52) Installing mpc1 (1.3.1-r1)
|
|
||||||
#7 2.231 (18/52) Installing gcc (14.2.0-r6)
|
|
||||||
#7 6.782 (19/52) Installing brotli-libs (1.1.0-r2)
|
|
||||||
#7 6.828 (20/52) Installing c-ares (1.34.5-r0)
|
|
||||||
#7 6.846 (21/52) Installing libunistring (1.3-r0)
|
|
||||||
#7 6.919 (22/52) Installing libidn2 (2.3.7-r0)
|
|
||||||
#7 6.937 (23/52) Installing nghttp2-libs (1.65.0-r0)
|
|
||||||
#7 6.950 (24/52) Installing libpsl (0.21.5-r3)
|
|
||||||
#7 6.960 (25/52) Installing libcurl (8.14.1-r1)
|
|
||||||
#7 7.015 (26/52) Installing libexpat (2.7.2-r0)
|
|
||||||
#7 7.029 (27/52) Installing pcre2 (10.43-r1)
|
|
||||||
#7 7.069 (28/52) Installing git (2.49.1-r0)
|
|
||||||
#7 7.397 (29/52) Installing git-init-template (2.49.1-r0)
|
|
||||||
#7 7.404 (30/52) Installing linux-headers (6.14.2-r0)
|
|
||||||
#7 7.572 (31/52) Installing libffi (3.4.8-r0)
|
|
||||||
#7 7.578 (32/52) Installing pkgconf (2.4.3-r0)
|
|
||||||
#7 7.593 (33/52) Installing libffi-dev (3.4.8-r0)
|
|
||||||
#7 7.607 (34/52) Installing musl-dev (1.2.5-r10)
|
|
||||||
#7 7.961 (35/52) Installing openssl-dev (3.5.3-r0)
|
|
||||||
#7 8.021 (36/52) Installing libbz2 (1.0.8-r6)
|
|
||||||
#7 8.045 (37/52) Installing gdbm (1.24-r0)
|
|
||||||
#7 8.055 (38/52) Installing xz-libs (5.8.1-r0)
|
|
||||||
#7 8.071 (39/52) Installing mpdecimal (4.0.1-r0)
|
|
||||||
#7 8.090 (40/52) Installing libpanelw (6.5_p20250503-r0)
|
|
||||||
#7 8.098 (41/52) Installing sqlite-libs (3.49.2-r1)
|
|
||||||
#7 8.185 (42/52) Installing python3 (3.12.11-r0)
|
|
||||||
#7 8.904 (43/52) Installing python3-pycache-pyc0 (3.12.11-r0)
|
|
||||||
#7 9.292 (44/52) Installing pyc (3.12.11-r0)
|
|
||||||
#7 9.292 (45/52) Installing python3-pyc (3.12.11-r0)
|
|
||||||
#7 9.292 (46/52) Installing python3-dev (3.12.11-r0)
|
|
||||||
#7 10.71 (47/52) Installing libmd (1.1.0-r0)
|
|
||||||
#7 10.72 (48/52) Installing libbsd (0.12.2-r0)
|
|
||||||
#7 10.73 (49/52) Installing skalibs-libs (2.14.4.0-r0)
|
|
||||||
#7 10.75 (50/52) Installing utmps-libs (0.1.3.1-r0)
|
|
||||||
#7 10.76 (51/52) Installing linux-pam (1.7.0-r4)
|
|
||||||
#7 10.82 (52/52) Installing shadow (4.17.3-r0)
|
|
||||||
#7 10.88 Executing busybox-1.37.0-r18.trigger
|
|
||||||
#7 10.90 OK: 274 MiB in 66 packages
|
|
||||||
#7 DONE 14.4s
|
|
||||||
|
|
||||||
#8 [builder 3/15] RUN mkdir -p /app
|
#8 [runner 5/11] COPY --chown=netalertx:netalertx --chmod=755 front /app/front
|
||||||
#8 DONE 0.5s
|
#8 CACHED
|
||||||
|
|
||||||
#9 [builder 4/15] COPY api /app/api
|
#9 [runner 2/11] RUN apk add --no-cache bash mtr libbsd zip lsblk tzdata curl arp-scan iproute2 iproute2-ss nmap nmap-scripts traceroute nbtscan net-tools net-snmp-tools bind-tools awake ca-certificates sqlite php83 php83-fpm php83-cgi php83-curl php83-sqlite3 php83-session python3 envsubst nginx supercronic shadow su-exec && rm -Rf /var/cache/apk/* && rm -Rf /etc/nginx && addgroup -g 20211 netalertx && adduser -u 20211 -D -h /app -G netalertx netalertx && apk del shadow
|
||||||
#9 DONE 0.3s
|
#9 CACHED
|
||||||
|
|
||||||
#10 [builder 5/15] COPY back /app/back
|
#10 [runner 4/11] COPY --chown=netalertx:netalertx --chmod=755 back /app/back
|
||||||
#10 DONE 0.3s
|
#10 CACHED
|
||||||
|
|
||||||
#11 [builder 6/15] COPY config /app/config
|
#11 [builder 2/4] COPY requirements.txt /tmp/requirements.txt
|
||||||
#11 DONE 0.3s
|
#11 CACHED
|
||||||
|
|
||||||
#12 [builder 7/15] COPY db /app/db
|
#12 [runner 7/11] RUN install -d -o netalertx -g netalertx -m 700 /data /data/config /data/db /tmp/api /tmp/log /tmp/log/plugins /tmp/run /tmp/run/tmp /tmp/run/logs /tmp/nginx/active-config && sh -c "find /app -type f \( -name '*.sh' -o -name 'speedtest-cli' \) -exec chmod 750 {} \;"
|
||||||
#12 DONE 0.3s
|
#12 CACHED
|
||||||
|
|
||||||
#13 [builder 8/15] COPY dockerfiles /app/dockerfiles
|
#13 [hardened 1/2] RUN addgroup -g 20212 "readonly" && adduser -u 20212 -G "readonly" -D -h /app "readonly"
|
||||||
#13 DONE 0.3s
|
#13 CACHED
|
||||||
|
|
||||||
#14 [builder 9/15] COPY front /app/front
|
#14 [runner 8/11] COPY --chown=netalertx:netalertx .[V]ERSION /app/.VERSION
|
||||||
#14 DONE 0.4s
|
#14 CACHED
|
||||||
|
|
||||||
#15 [builder 10/15] COPY server /app/server
|
#15 [runner 9/11] COPY --chown=netalertx:netalertx .[V]ERSION /app/.VERSION_PREV
|
||||||
#15 DONE 0.3s
|
#15 CACHED
|
||||||
|
|
||||||
#16 [builder 11/15] COPY install/crontab /etc/crontabs/root
|
#16 [runner 11/11] RUN for vfile in .VERSION .VERSION_PREV; do if [ ! -f "/app/${vfile}" ]; then echo "DEVELOPMENT 00000000" > "/app/${vfile}"; fi; chown 20212:20212 "/app/${vfile}"; done && apk add --no-cache libcap && setcap cap_net_raw,cap_net_admin+eip /usr/bin/nmap && setcap cap_net_raw,cap_net_admin+eip /usr/bin/arp-scan && setcap cap_net_raw,cap_net_admin,cap_net_bind_service+eip /usr/bin/nbtscan && setcap cap_net_raw,cap_net_admin+eip /usr/bin/traceroute && setcap cap_net_raw,cap_net_admin+eip "$(readlink -f /opt/venv/bin/python)" && /bin/sh /build/init-nginx.sh && /bin/sh /build/init-php-fpm.sh && /bin/sh /build/init-cron.sh && /bin/sh /build/init-backend.sh && rm -rf /build && apk del libcap && date +%s > "/app/front/buildtimestamp.txt"
|
||||||
#16 DONE 0.3s
|
#16 CACHED
|
||||||
|
|
||||||
#17 [builder 12/15] COPY dockerfiles/start* /start*.sh
|
#17 [builder 4/4] RUN python -m pip install --no-cache-dir --upgrade pip setuptools wheel && pip install --prefer-binary --no-cache-dir -r /tmp/requirements.txt && chmod -R u-rwx,g-rwx /opt
|
||||||
#17 DONE 0.3s
|
#17 CACHED
|
||||||
|
|
||||||
#18 [builder 13/15] RUN pip install openwrt-luci-rpc asusrouter asyncio aiohttp graphene flask flask-cors unifi-sm-api tplink-omada-client wakeonlan pycryptodome requests paho-mqtt scapy cron-converter pytz json2table dhcp-leases pyunifi speedtest-cli chardet python-nmap dnspython librouteros yattag git+https://github.com/foreign-sub/aiofreepybox.git
|
#18 [runner 10/11] COPY --from=builder --chown=20212:20212 /opt/venv /opt/venv
|
||||||
#18 0.737 Collecting git+https://github.com/foreign-sub/aiofreepybox.git
|
#18 CACHED
|
||||||
#18 0.737 Cloning https://github.com/foreign-sub/aiofreepybox.git to /tmp/pip-req-build-waf5_npl
|
|
||||||
#18 0.738 Running command git clone --filter=blob:none --quiet https://github.com/foreign-sub/aiofreepybox.git /tmp/pip-req-build-waf5_npl
|
|
||||||
#18 1.617 Resolved https://github.com/foreign-sub/aiofreepybox.git to commit 4ee18ea0f3e76edc839c48eb8df1da59c1baee3d
|
|
||||||
#18 1.620 Installing build dependencies: started
|
|
||||||
#18 3.337 Installing build dependencies: finished with status 'done'
|
|
||||||
#18 3.337 Getting requirements to build wheel: started
|
|
||||||
#18 3.491 Getting requirements to build wheel: finished with status 'done'
|
|
||||||
#18 3.492 Preparing metadata (pyproject.toml): started
|
|
||||||
#18 3.650 Preparing metadata (pyproject.toml): finished with status 'done'
|
|
||||||
#18 3.724 Collecting openwrt-luci-rpc
|
|
||||||
#18 3.753 Downloading openwrt_luci_rpc-1.1.17-py2.py3-none-any.whl.metadata (4.9 kB)
|
|
||||||
#18 3.892 Collecting asusrouter
|
|
||||||
#18 3.900 Downloading asusrouter-1.21.0-py3-none-any.whl.metadata (33 kB)
|
|
||||||
#18 3.999 Collecting asyncio
|
|
||||||
#18 4.007 Downloading asyncio-4.0.0-py3-none-any.whl.metadata (994 bytes)
|
|
||||||
#18 4.576 Collecting aiohttp
|
|
||||||
#18 4.582 Downloading aiohttp-3.12.15-cp312-cp312-musllinux_1_2_x86_64.whl.metadata (7.7 kB)
|
|
||||||
#18 4.729 Collecting graphene
|
|
||||||
#18 4.735 Downloading graphene-3.4.3-py2.py3-none-any.whl.metadata (6.9 kB)
|
|
||||||
#18 4.858 Collecting flask
|
|
||||||
#18 4.866 Downloading flask-3.1.2-py3-none-any.whl.metadata (3.2 kB)
|
|
||||||
#18 4.963 Collecting flask-cors
|
|
||||||
#18 4.972 Downloading flask_cors-6.0.1-py3-none-any.whl.metadata (5.3 kB)
|
|
||||||
#18 5.055 Collecting unifi-sm-api
|
|
||||||
#18 5.065 Downloading unifi_sm_api-0.2.1-py3-none-any.whl.metadata (2.3 kB)
|
|
||||||
#18 5.155 Collecting tplink-omada-client
|
|
||||||
#18 5.166 Downloading tplink_omada_client-1.4.4-py3-none-any.whl.metadata (3.5 kB)
|
|
||||||
#18 5.262 Collecting wakeonlan
|
|
||||||
#18 5.274 Downloading wakeonlan-3.1.0-py3-none-any.whl.metadata (4.3 kB)
|
|
||||||
#18 5.500 Collecting pycryptodome
|
|
||||||
#18 5.505 Downloading pycryptodome-3.23.0-cp37-abi3-musllinux_1_2_x86_64.whl.metadata (3.4 kB)
|
|
||||||
#18 5.653 Collecting requests
|
|
||||||
#18 5.660 Downloading requests-2.32.5-py3-none-any.whl.metadata (4.9 kB)
|
|
||||||
#18 5.764 Collecting paho-mqtt
|
|
||||||
#18 5.775 Downloading paho_mqtt-2.1.0-py3-none-any.whl.metadata (23 kB)
|
|
||||||
#18 5.890 Collecting scapy
|
|
||||||
#18 5.902 Downloading scapy-2.6.1-py3-none-any.whl.metadata (5.6 kB)
|
|
||||||
#18 6.002 Collecting cron-converter
|
|
||||||
#18 6.013 Downloading cron_converter-1.2.2-py3-none-any.whl.metadata (8.1 kB)
|
|
||||||
#18 6.187 Collecting pytz
|
|
||||||
#18 6.193 Downloading pytz-2025.2-py2.py3-none-any.whl.metadata (22 kB)
|
|
||||||
#18 6.285 Collecting json2table
|
|
||||||
#18 6.294 Downloading json2table-1.1.5-py2.py3-none-any.whl.metadata (6.0 kB)
|
|
||||||
#18 6.381 Collecting dhcp-leases
|
|
||||||
#18 6.387 Downloading dhcp_leases-0.1.6-py3-none-any.whl.metadata (5.9 kB)
|
|
||||||
#18 6.461 Collecting pyunifi
|
|
||||||
#18 6.471 Downloading pyunifi-2.21-py3-none-any.whl.metadata (274 bytes)
|
|
||||||
#18 6.582 Collecting speedtest-cli
|
|
||||||
#18 6.596 Downloading speedtest_cli-2.1.3-py2.py3-none-any.whl.metadata (6.8 kB)
|
|
||||||
#18 6.767 Collecting chardet
|
|
||||||
#18 6.780 Downloading chardet-5.2.0-py3-none-any.whl.metadata (3.4 kB)
|
|
||||||
#18 6.878 Collecting python-nmap
|
|
||||||
#18 6.886 Downloading python-nmap-0.7.1.tar.gz (44 kB)
|
|
||||||
#18 6.937 Installing build dependencies: started
|
|
||||||
#18 8.245 Installing build dependencies: finished with status 'done'
|
|
||||||
#18 8.246 Getting requirements to build wheel: started
|
|
||||||
#18 8.411 Getting requirements to build wheel: finished with status 'done'
|
|
||||||
#18 8.412 Preparing metadata (pyproject.toml): started
|
|
||||||
#18 8.575 Preparing metadata (pyproject.toml): finished with status 'done'
|
|
||||||
#18 8.648 Collecting dnspython
|
|
||||||
#18 8.654 Downloading dnspython-2.8.0-py3-none-any.whl.metadata (5.7 kB)
|
|
||||||
#18 8.741 Collecting librouteros
|
|
||||||
#18 8.752 Downloading librouteros-3.4.1-py3-none-any.whl.metadata (1.6 kB)
|
|
||||||
#18 8.869 Collecting yattag
|
|
||||||
#18 8.881 Downloading yattag-1.16.1.tar.gz (29 kB)
|
|
||||||
#18 8.925 Installing build dependencies: started
|
|
||||||
#18 10.23 Installing build dependencies: finished with status 'done'
|
|
||||||
#18 10.23 Getting requirements to build wheel: started
|
|
||||||
#18 10.38 Getting requirements to build wheel: finished with status 'done'
|
|
||||||
#18 10.39 Preparing metadata (pyproject.toml): started
|
|
||||||
#18 10.55 Preparing metadata (pyproject.toml): finished with status 'done'
|
|
||||||
#18 10.60 Collecting Click>=6.0 (from openwrt-luci-rpc)
|
|
||||||
#18 10.60 Downloading click-8.3.0-py3-none-any.whl.metadata (2.6 kB)
|
|
||||||
#18 10.70 Collecting packaging>=19.1 (from openwrt-luci-rpc)
|
|
||||||
#18 10.71 Downloading packaging-25.0-py3-none-any.whl.metadata (3.3 kB)
|
|
||||||
#18 10.87 Collecting urllib3>=1.26.14 (from asusrouter)
|
|
||||||
#18 10.88 Downloading urllib3-2.5.0-py3-none-any.whl.metadata (6.5 kB)
|
|
||||||
#18 10.98 Collecting xmltodict>=0.12.0 (from asusrouter)
|
|
||||||
#18 10.98 Downloading xmltodict-1.0.2-py3-none-any.whl.metadata (15 kB)
|
|
||||||
#18 11.09 Collecting aiohappyeyeballs>=2.5.0 (from aiohttp)
|
|
||||||
#18 11.10 Downloading aiohappyeyeballs-2.6.1-py3-none-any.whl.metadata (5.9 kB)
|
|
||||||
#18 11.19 Collecting aiosignal>=1.4.0 (from aiohttp)
|
|
||||||
#18 11.20 Downloading aiosignal-1.4.0-py3-none-any.whl.metadata (3.7 kB)
|
|
||||||
#18 11.32 Collecting attrs>=17.3.0 (from aiohttp)
|
|
||||||
#18 11.33 Downloading attrs-25.3.0-py3-none-any.whl.metadata (10 kB)
|
|
||||||
#18 11.47 Collecting frozenlist>=1.1.1 (from aiohttp)
|
|
||||||
#18 11.47 Downloading frozenlist-1.7.0-cp312-cp312-musllinux_1_2_x86_64.whl.metadata (18 kB)
|
|
||||||
#18 11.76 Collecting multidict<7.0,>=4.5 (from aiohttp)
|
|
||||||
#18 11.77 Downloading multidict-6.6.4-cp312-cp312-musllinux_1_2_x86_64.whl.metadata (5.3 kB)
|
|
||||||
#18 11.87 Collecting propcache>=0.2.0 (from aiohttp)
|
|
||||||
#18 11.88 Downloading propcache-0.3.2-cp312-cp312-musllinux_1_2_x86_64.whl.metadata (12 kB)
|
|
||||||
#18 12.19 Collecting yarl<2.0,>=1.17.0 (from aiohttp)
|
|
||||||
#18 12.20 Downloading yarl-1.20.1-cp312-cp312-musllinux_1_2_x86_64.whl.metadata (73 kB)
|
|
||||||
#18 12.31 Collecting graphql-core<3.3,>=3.1 (from graphene)
|
|
||||||
#18 12.32 Downloading graphql_core-3.2.6-py3-none-any.whl.metadata (11 kB)
|
|
||||||
#18 12.41 Collecting graphql-relay<3.3,>=3.1 (from graphene)
|
|
||||||
#18 12.42 Downloading graphql_relay-3.2.0-py3-none-any.whl.metadata (12 kB)
|
|
||||||
#18 12.50 Collecting python-dateutil<3,>=2.7.0 (from graphene)
|
|
||||||
#18 12.51 Downloading python_dateutil-2.9.0.post0-py2.py3-none-any.whl.metadata (8.4 kB)
|
|
||||||
#18 12.61 Collecting typing-extensions<5,>=4.7.1 (from graphene)
|
|
||||||
#18 12.61 Downloading typing_extensions-4.15.0-py3-none-any.whl.metadata (3.3 kB)
|
|
||||||
#18 12.71 Collecting blinker>=1.9.0 (from flask)
|
|
||||||
#18 12.72 Downloading blinker-1.9.0-py3-none-any.whl.metadata (1.6 kB)
|
|
||||||
#18 12.84 Collecting itsdangerous>=2.2.0 (from flask)
|
|
||||||
#18 12.85 Downloading itsdangerous-2.2.0-py3-none-any.whl.metadata (1.9 kB)
|
|
||||||
#18 12.97 Collecting jinja2>=3.1.2 (from flask)
|
|
||||||
#18 12.98 Downloading jinja2-3.1.6-py3-none-any.whl.metadata (2.9 kB)
|
|
||||||
#18 13.15 Collecting markupsafe>=2.1.1 (from flask)
|
|
||||||
#18 13.15 Downloading MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_x86_64.whl.metadata (4.0 kB)
|
|
||||||
#18 13.28 Collecting werkzeug>=3.1.0 (from flask)
|
|
||||||
#18 13.29 Downloading werkzeug-3.1.3-py3-none-any.whl.metadata (3.7 kB)
|
|
||||||
#18 13.42 Collecting awesomeversion>=22.9.0 (from tplink-omada-client)
|
|
||||||
#18 13.42 Downloading awesomeversion-25.8.0-py3-none-any.whl.metadata (9.8 kB)
|
|
||||||
#18 13.59 Collecting charset_normalizer<4,>=2 (from requests)
|
|
||||||
#18 13.59 Downloading charset_normalizer-3.4.3-cp312-cp312-musllinux_1_2_x86_64.whl.metadata (36 kB)
|
|
||||||
#18 13.77 Collecting idna<4,>=2.5 (from requests)
|
|
||||||
#18 13.78 Downloading idna-3.10-py3-none-any.whl.metadata (10 kB)
|
|
||||||
#18 13.94 Collecting certifi>=2017.4.17 (from requests)
|
|
||||||
#18 13.94 Downloading certifi-2025.8.3-py3-none-any.whl.metadata (2.4 kB)
|
|
||||||
#18 14.06 Collecting toml<0.11.0,>=0.10.2 (from librouteros)
|
|
||||||
#18 14.07 Downloading toml-0.10.2-py2.py3-none-any.whl.metadata (7.1 kB)
|
|
||||||
#18 14.25 Collecting six>=1.5 (from python-dateutil<3,>=2.7.0->graphene)
|
|
||||||
#18 14.26 Downloading six-1.17.0-py2.py3-none-any.whl.metadata (1.7 kB)
|
|
||||||
#18 14.33 Downloading openwrt_luci_rpc-1.1.17-py2.py3-none-any.whl (9.5 kB)
|
|
||||||
#18 14.37 Downloading asusrouter-1.21.0-py3-none-any.whl (131 kB)
|
|
||||||
#18 14.43 Downloading asyncio-4.0.0-py3-none-any.whl (5.6 kB)
|
|
||||||
#18 14.47 Downloading aiohttp-3.12.15-cp312-cp312-musllinux_1_2_x86_64.whl (1.7 MB)
|
|
||||||
#18 14.67 ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 1.7/1.7 MB 8.3 MB/s eta 0:00:00
|
|
||||||
#18 14.68 Downloading graphene-3.4.3-py2.py3-none-any.whl (114 kB)
|
|
||||||
#18 14.73 Downloading flask-3.1.2-py3-none-any.whl (103 kB)
|
|
||||||
#18 14.78 Downloading flask_cors-6.0.1-py3-none-any.whl (13 kB)
|
|
||||||
#18 14.84 Downloading unifi_sm_api-0.2.1-py3-none-any.whl (16 kB)
|
|
||||||
#18 14.88 Downloading tplink_omada_client-1.4.4-py3-none-any.whl (46 kB)
|
|
||||||
#18 14.93 Downloading wakeonlan-3.1.0-py3-none-any.whl (5.0 kB)
|
|
||||||
#18 14.99 Downloading pycryptodome-3.23.0-cp37-abi3-musllinux_1_2_x86_64.whl (2.3 MB)
|
|
||||||
#18 15.23 ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 2.3/2.3 MB 8.9 MB/s eta 0:00:00
|
|
||||||
#18 15.24 Downloading requests-2.32.5-py3-none-any.whl (64 kB)
|
|
||||||
#18 15.30 Downloading paho_mqtt-2.1.0-py3-none-any.whl (67 kB)
|
|
||||||
#18 15.34 Downloading scapy-2.6.1-py3-none-any.whl (2.4 MB)
|
|
||||||
#18 15.62 ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 2.4/2.4 MB 8.5 MB/s eta 0:00:00
|
|
||||||
#18 15.63 Downloading cron_converter-1.2.2-py3-none-any.whl (13 kB)
|
|
||||||
#18 15.67 Downloading pytz-2025.2-py2.py3-none-any.whl (509 kB)
|
|
||||||
#18 15.76 Downloading json2table-1.1.5-py2.py3-none-any.whl (8.7 kB)
|
|
||||||
#18 15.81 Downloading dhcp_leases-0.1.6-py3-none-any.whl (11 kB)
|
|
||||||
#18 15.86 Downloading pyunifi-2.21-py3-none-any.whl (11 kB)
|
|
||||||
#18 15.90 Downloading speedtest_cli-2.1.3-py2.py3-none-any.whl (23 kB)
|
|
||||||
#18 15.95 Downloading chardet-5.2.0-py3-none-any.whl (199 kB)
|
|
||||||
#18 16.01 Downloading dnspython-2.8.0-py3-none-any.whl (331 kB)
|
|
||||||
#18 16.10 Downloading librouteros-3.4.1-py3-none-any.whl (16 kB)
|
|
||||||
#18 16.14 Downloading aiohappyeyeballs-2.6.1-py3-none-any.whl (15 kB)
|
|
||||||
#18 16.20 Downloading aiosignal-1.4.0-py3-none-any.whl (7.5 kB)
|
|
||||||
#18 16.24 Downloading attrs-25.3.0-py3-none-any.whl (63 kB)
|
|
||||||
#18 16.30 Downloading awesomeversion-25.8.0-py3-none-any.whl (15 kB)
|
|
||||||
#18 16.34 Downloading blinker-1.9.0-py3-none-any.whl (8.5 kB)
|
|
||||||
#18 16.39 Downloading certifi-2025.8.3-py3-none-any.whl (161 kB)
|
|
||||||
#18 16.45 Downloading charset_normalizer-3.4.3-cp312-cp312-musllinux_1_2_x86_64.whl (153 kB)
|
|
||||||
#18 16.50 Downloading click-8.3.0-py3-none-any.whl (107 kB)
|
|
||||||
#18 16.55 Downloading frozenlist-1.7.0-cp312-cp312-musllinux_1_2_x86_64.whl (237 kB)
|
|
||||||
#18 16.62 Downloading graphql_core-3.2.6-py3-none-any.whl (203 kB)
|
|
||||||
#18 16.69 Downloading graphql_relay-3.2.0-py3-none-any.whl (16 kB)
|
|
||||||
#18 16.73 Downloading idna-3.10-py3-none-any.whl (70 kB)
|
|
||||||
#18 16.79 Downloading itsdangerous-2.2.0-py3-none-any.whl (16 kB)
|
|
||||||
#18 16.84 Downloading jinja2-3.1.6-py3-none-any.whl (134 kB)
|
|
||||||
#18 16.96 Downloading MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_x86_64.whl (23 kB)
|
|
||||||
#18 17.02 Downloading multidict-6.6.4-cp312-cp312-musllinux_1_2_x86_64.whl (251 kB)
|
|
||||||
#18 17.09 Downloading packaging-25.0-py3-none-any.whl (66 kB)
|
|
||||||
#18 17.14 Downloading propcache-0.3.2-cp312-cp312-musllinux_1_2_x86_64.whl (222 kB)
|
|
||||||
#18 17.21 Downloading python_dateutil-2.9.0.post0-py2.py3-none-any.whl (229 kB)
|
|
||||||
#18 17.28 Downloading toml-0.10.2-py2.py3-none-any.whl (16 kB)
|
|
||||||
#18 17.33 Downloading typing_extensions-4.15.0-py3-none-any.whl (44 kB)
|
|
||||||
#18 17.39 Downloading urllib3-2.5.0-py3-none-any.whl (129 kB)
|
|
||||||
#18 17.44 Downloading werkzeug-3.1.3-py3-none-any.whl (224 kB)
|
|
||||||
#18 17.51 Downloading xmltodict-1.0.2-py3-none-any.whl (13 kB)
|
|
||||||
#18 17.56 Downloading yarl-1.20.1-cp312-cp312-musllinux_1_2_x86_64.whl (374 kB)
|
|
||||||
#18 17.65 Downloading six-1.17.0-py2.py3-none-any.whl (11 kB)
|
|
||||||
#18 17.77 Building wheels for collected packages: python-nmap, yattag, aiofreepybox
|
|
||||||
#18 17.77 Building wheel for python-nmap (pyproject.toml): started
|
|
||||||
#18 17.95 Building wheel for python-nmap (pyproject.toml): finished with status 'done'
|
|
||||||
#18 17.96 Created wheel for python-nmap: filename=python_nmap-0.7.1-py2.py3-none-any.whl size=20679 sha256=ecd9b14109651cfaa5bf035f90076b9442985cc254fa5f8a49868fc896e86edb
|
|
||||||
#18 17.96 Stored in directory: /root/.cache/pip/wheels/06/fc/d4/0957e1d9942e696188208772ea0abf909fe6eb3d9dff6e5a9e
|
|
||||||
#18 17.96 Building wheel for yattag (pyproject.toml): started
|
|
||||||
#18 18.14 Building wheel for yattag (pyproject.toml): finished with status 'done'
|
|
||||||
#18 18.14 Created wheel for yattag: filename=yattag-1.16.1-py3-none-any.whl size=15930 sha256=2135fc2034a3847c81eb6a0d7b85608e8272339fa5c1961f87b02dfe6d74d0ad
|
|
||||||
#18 18.14 Stored in directory: /root/.cache/pip/wheels/d2/2f/52/049ff4f7c8c9c932b2ece7ec800d7facf2a141ac5ab0ce7e51
|
|
||||||
#18 18.15 Building wheel for aiofreepybox (pyproject.toml): started
|
|
||||||
#18 18.36 Building wheel for aiofreepybox (pyproject.toml): finished with status 'done'
|
|
||||||
#18 18.36 Created wheel for aiofreepybox: filename=aiofreepybox-6.0.0-py3-none-any.whl size=60051 sha256=dbdee5350b10b6550ede50bc779381b7f39f1e5d5da889f2ee98cb5a869d3425
|
|
||||||
#18 18.36 Stored in directory: /tmp/pip-ephem-wheel-cache-93bgc4e2/wheels/3c/d3/ae/fb97a84a29a5fbe8517de58d67e66586505440af35981e0dd3
|
|
||||||
#18 18.36 Successfully built python-nmap yattag aiofreepybox
|
|
||||||
#18 18.45 Installing collected packages: yattag, speedtest-cli, pytz, python-nmap, json2table, dhcp-leases, xmltodict, wakeonlan, urllib3, typing-extensions, toml, six, scapy, pycryptodome, propcache, paho-mqtt, packaging, multidict, markupsafe, itsdangerous, idna, graphql-core, frozenlist, dnspython, Click, charset_normalizer, chardet, certifi, blinker, awesomeversion, attrs, asyncio, aiohappyeyeballs, yarl, werkzeug, requests, python-dateutil, librouteros, jinja2, graphql-relay, aiosignal, unifi-sm-api, pyunifi, openwrt-luci-rpc, graphene, flask, cron-converter, aiohttp, tplink-omada-client, flask-cors, asusrouter, aiofreepybox
|
|
||||||
#18 24.35 Successfully installed Click-8.3.0 aiofreepybox-6.0.0 aiohappyeyeballs-2.6.1 aiohttp-3.12.15 aiosignal-1.4.0 asusrouter-1.21.0 asyncio-4.0.0 attrs-25.3.0 awesomeversion-25.8.0 blinker-1.9.0 certifi-2025.8.3 chardet-5.2.0 charset_normalizer-3.4.3 cron-converter-1.2.2 dhcp-leases-0.1.6 dnspython-2.8.0 flask-3.1.2 flask-cors-6.0.1 frozenlist-1.7.0 graphene-3.4.3 graphql-core-3.2.6 graphql-relay-3.2.0 idna-3.10 itsdangerous-2.2.0 jinja2-3.1.6 json2table-1.1.5 librouteros-3.4.1 markupsafe-3.0.2 multidict-6.6.4 openwrt-luci-rpc-1.1.17 packaging-25.0 paho-mqtt-2.1.0 propcache-0.3.2 pycryptodome-3.23.0 python-dateutil-2.9.0.post0 python-nmap-0.7.1 pytz-2025.2 pyunifi-2.21 requests-2.32.5 scapy-2.6.1 six-1.17.0 speedtest-cli-2.1.3 toml-0.10.2 tplink-omada-client-1.4.4 typing-extensions-4.15.0 unifi-sm-api-0.2.1 urllib3-2.5.0 wakeonlan-3.1.0 werkzeug-3.1.3 xmltodict-1.0.2 yarl-1.20.1 yattag-1.16.1
|
|
||||||
#18 24.47
|
|
||||||
#18 24.47 [notice] A new release of pip is available: 25.0.1 -> 25.2
|
|
||||||
#18 24.47 [notice] To update, run: pip install --upgrade pip
|
|
||||||
#18 DONE 25.1s
|
|
||||||
|
|
||||||
#19 [builder 14/15] RUN bash -c "find /app -type d -exec chmod 750 {} \;" && bash -c "find /app -type f -exec chmod 640 {} \;" && bash -c "find /app -type f \( -name '*.sh' -o -name '*.py' -o -name 'speedtest-cli' \) -exec chmod 750 {} \;"
|
#19 [runner 3/11] COPY --chown=netalertx:netalertx install/production-filesystem/ /
|
||||||
#19 DONE 11.9s
|
#19 CACHED
|
||||||
|
|
||||||
#20 [builder 15/15] COPY install/freebox_certificate.pem /opt/venv/lib/python3.12/site-packages/aiofreepybox/freebox_certificates.pem
|
#20 [hardened 2/2] RUN chown -R readonly:readonly /app/back /app/front /app/server /services /services/config /entrypoint.d && chmod -R 004 /app/back /app/front /app/server /services /services/config /entrypoint.d && find /app/back /app/front /app/server /services /services/config /entrypoint.d -type d -exec chmod 005 {} + && install -d -o netalertx -g netalertx -m 0777 /data /data/config /data/db /tmp/api /tmp/log /tmp/log/plugins /tmp/run /tmp/run/tmp /tmp/run/logs /tmp/nginx/active-config && chown readonly:readonly /entrypoint.sh /root-entrypoint.sh /opt /opt/venv && chmod 005 /entrypoint.sh /root-entrypoint.sh /services/*.sh /services/scripts/* /entrypoint.d/* /app /opt /opt/venv && rm -f "/data/config/app.conf" "/data/db/app.db" "/data/db/app.db-shm" "/data/db/app.db-wal" || true && apk del apk-tools && rm -Rf /var /etc/sudoers.d/* /etc/shadow /etc/gshadow /etc/sudoers /lib/apk /lib/firmware /lib/modules-load.d /lib/sysctl.d /mnt /home/ /root /srv /media && printf '#!/bin/sh\n"$@"\n' > /usr/bin/sudo && chmod +x /usr/bin/sudo
|
||||||
#20 DONE 0.4s
|
#20 CACHED
|
||||||
|
|
||||||
#21 [runner 2/14] COPY --from=builder /opt/venv /opt/venv
|
#21 exporting to image
|
||||||
#21 DONE 0.8s
|
#21 exporting layers done
|
||||||
|
#21 writing image sha256:7aac94268b770de42da767c06b8e9fecaeabf7ce1277cec1c83092484debd4c3 0.0s done
|
||||||
#22 [runner 3/14] COPY --from=builder /usr/sbin/usermod /usr/sbin/groupmod /usr/sbin/
|
#21 naming to docker.io/library/netalertx-test 0.0s done
|
||||||
#22 DONE 0.4s
|
#21 DONE 0.1s
|
||||||
|
|
||||||
#23 [runner 4/14] RUN apk update --no-cache && apk add --no-cache bash libbsd zip lsblk gettext-envsubst sudo mtr tzdata s6-overlay && apk add --no-cache curl arp-scan iproute2 iproute2-ss nmap nmap-scripts traceroute nbtscan avahi avahi-tools openrc dbus net-tools net-snmp-tools bind-tools awake ca-certificates && apk add --no-cache sqlite php83 php83-fpm php83-cgi php83-curl php83-sqlite3 php83-session && apk add --no-cache python3 nginx && ln -s /usr/bin/awake /usr/bin/wakeonlan && bash -c "install -d -m 750 -o nginx -g www-data /app /app" && rm -f /etc/nginx/http.d/default.conf
|
|
||||||
#23 0.487 fetch https://dl-cdn.alpinelinux.org/alpine/v3.22/main/x86_64/APKINDEX.tar.gz
|
|
||||||
#23 0.696 fetch https://dl-cdn.alpinelinux.org/alpine/v3.22/community/x86_64/APKINDEX.tar.gz
|
|
||||||
#23 1.156 v3.22.1-472-ga67443520d6 [https://dl-cdn.alpinelinux.org/alpine/v3.22/main]
|
|
||||||
#23 1.156 v3.22.1-473-gcd551a4e006 [https://dl-cdn.alpinelinux.org/alpine/v3.22/community]
|
|
||||||
#23 1.156 OK: 26326 distinct packages available
|
|
||||||
#23 1.195 fetch https://dl-cdn.alpinelinux.org/alpine/v3.22/main/x86_64/APKINDEX.tar.gz
|
|
||||||
#23 1.276 fetch https://dl-cdn.alpinelinux.org/alpine/v3.22/community/x86_64/APKINDEX.tar.gz
|
|
||||||
#23 1.568 (1/38) Installing ncurses-terminfo-base (6.5_p20250503-r0)
|
|
||||||
#23 1.580 (2/38) Installing libncursesw (6.5_p20250503-r0)
|
|
||||||
#23 1.629 (3/38) Installing readline (8.2.13-r1)
|
|
||||||
#23 1.659 (4/38) Installing bash (5.2.37-r0)
|
|
||||||
#23 1.723 Executing bash-5.2.37-r0.post-install
|
|
||||||
#23 1.740 (5/38) Installing libintl (0.24.1-r0)
|
|
||||||
#23 1.749 (6/38) Installing gettext-envsubst (0.24.1-r0)
|
|
||||||
#23 1.775 (7/38) Installing libmd (1.1.0-r0)
|
|
||||||
#23 1.782 (8/38) Installing libbsd (0.12.2-r0)
|
|
||||||
#23 1.807 (9/38) Installing libeconf (0.6.3-r0)
|
|
||||||
#23 1.812 (10/38) Installing libblkid (2.41-r9)
|
|
||||||
#23 1.831 (11/38) Installing libmount (2.41-r9)
|
|
||||||
#23 1.857 (12/38) Installing libsmartcols (2.41-r9)
|
|
||||||
#23 1.872 (13/38) Installing lsblk (2.41-r9)
|
|
||||||
#23 1.886 (14/38) Installing libcap2 (2.76-r0)
|
|
||||||
#23 1.897 (15/38) Installing jansson (2.14.1-r0)
|
|
||||||
#23 1.910 (16/38) Installing mtr (0.96-r0)
|
|
||||||
#23 1.948 (17/38) Installing skalibs-libs (2.14.4.0-r0)
|
|
||||||
#23 1.966 (18/38) Installing execline-libs (2.9.7.0-r0)
|
|
||||||
#23 1.974 (19/38) Installing execline (2.9.7.0-r0)
|
|
||||||
#23 1.996 Executing execline-2.9.7.0-r0.post-install
|
|
||||||
#23 2.004 (20/38) Installing s6-ipcserver (2.13.2.0-r0)
|
|
||||||
#23 2.010 (21/38) Installing s6-libs (2.13.2.0-r0)
|
|
||||||
#23 2.016 (22/38) Installing s6 (2.13.2.0-r0)
|
|
||||||
#23 2.033 Executing s6-2.13.2.0-r0.pre-install
|
|
||||||
#23 2.159 (23/38) Installing s6-rc-libs (0.5.6.0-r0)
|
|
||||||
#23 2.164 (24/38) Installing s6-rc (0.5.6.0-r0)
|
|
||||||
#23 2.175 (25/38) Installing s6-linux-init (1.1.3.0-r0)
|
|
||||||
#23 2.185 (26/38) Installing s6-portable-utils (2.3.1.0-r0)
|
|
||||||
#23 2.193 (27/38) Installing s6-linux-utils (2.6.3.0-r0)
|
|
||||||
#23 2.200 (28/38) Installing s6-dns-libs (2.4.1.0-r0)
|
|
||||||
#23 2.208 (29/38) Installing s6-dns (2.4.1.0-r0)
|
|
||||||
#23 2.222 (30/38) Installing bearssl-libs (0.6_git20241009-r0)
|
|
||||||
#23 2.254 (31/38) Installing s6-networking-libs (2.7.1.0-r0)
|
|
||||||
#23 2.264 (32/38) Installing s6-networking (2.7.1.0-r0)
|
|
||||||
#23 2.286 (33/38) Installing s6-overlay-helpers (0.1.2.0-r0)
|
|
||||||
#23 2.355 (34/38) Installing s6-overlay (3.2.0.3-r0)
|
|
||||||
#23 2.380 (35/38) Installing sudo (1.9.17_p2-r0)
|
|
||||||
#23 2.511 (36/38) Installing tzdata (2025b-r0)
|
|
||||||
#23 2.641 (37/38) Installing unzip (6.0-r15)
|
|
||||||
#23 2.659 (38/38) Installing zip (3.0-r13)
|
|
||||||
#23 2.694 Executing busybox-1.37.0-r18.trigger
|
|
||||||
#23 2.725 OK: 16 MiB in 54 packages
|
|
||||||
#23 2.778 fetch https://dl-cdn.alpinelinux.org/alpine/v3.22/main/x86_64/APKINDEX.tar.gz
|
|
||||||
#23 2.918 fetch https://dl-cdn.alpinelinux.org/alpine/v3.22/community/x86_64/APKINDEX.tar.gz
|
|
||||||
#23 3.218 (1/77) Installing libpcap (1.10.5-r1)
|
|
||||||
#23 3.234 (2/77) Installing arp-scan (1.10.0-r2)
|
|
||||||
#23 3.289 (3/77) Installing dbus-libs (1.16.2-r1)
|
|
||||||
#23 3.307 (4/77) Installing avahi-libs (0.8-r21)
|
|
||||||
#23 3.315 (5/77) Installing libdaemon (0.14-r6)
|
|
||||||
#23 3.322 (6/77) Installing libevent (2.1.12-r8)
|
|
||||||
#23 3.355 (7/77) Installing libexpat (2.7.2-r0)
|
|
||||||
#23 3.368 (8/77) Installing avahi (0.8-r21)
|
|
||||||
#23 3.387 Executing avahi-0.8-r21.pre-install
|
|
||||||
#23 3.465 (9/77) Installing gdbm (1.24-r0)
|
|
||||||
#23 3.477 (10/77) Installing avahi-tools (0.8-r21)
|
|
||||||
#23 3.483 (11/77) Installing libbz2 (1.0.8-r6)
|
|
||||||
#23 3.490 (12/77) Installing libffi (3.4.8-r0)
|
|
||||||
#23 3.496 (13/77) Installing xz-libs (5.8.1-r0)
|
|
||||||
#23 3.517 (14/77) Installing libgcc (14.2.0-r6)
|
|
||||||
#23 3.529 (15/77) Installing libstdc++ (14.2.0-r6)
|
|
||||||
#23 3.613 (16/77) Installing mpdecimal (4.0.1-r0)
|
|
||||||
#23 3.628 (17/77) Installing libpanelw (6.5_p20250503-r0)
|
|
||||||
#23 3.634 (18/77) Installing sqlite-libs (3.49.2-r1)
|
|
||||||
#23 3.783 (19/77) Installing python3 (3.12.11-r0)
|
|
||||||
#23 4.494 (20/77) Installing python3-pycache-pyc0 (3.12.11-r0)
|
|
||||||
#23 4.915 (21/77) Installing pyc (3.12.11-r0)
|
|
||||||
#23 4.915 (22/77) Installing py3-awake-pyc (1.0-r12)
|
|
||||||
#23 4.922 (23/77) Installing python3-pyc (3.12.11-r0)
|
|
||||||
#23 4.922 (24/77) Installing py3-awake (1.0-r12)
|
|
||||||
#23 4.928 (25/77) Installing awake (1.0-r12)
|
|
||||||
#23 4.932 (26/77) Installing fstrm (0.6.1-r4)
|
|
||||||
#23 4.940 (27/77) Installing krb5-conf (1.0-r2)
|
|
||||||
#23 5.017 (28/77) Installing libcom_err (1.47.2-r2)
|
|
||||||
#23 5.026 (29/77) Installing keyutils-libs (1.6.3-r4)
|
|
||||||
#23 5.033 (30/77) Installing libverto (0.3.2-r2)
|
|
||||||
#23 5.039 (31/77) Installing krb5-libs (1.21.3-r0)
|
|
||||||
#23 5.115 (32/77) Installing json-c (0.18-r1)
|
|
||||||
#23 5.123 (33/77) Installing nghttp2-libs (1.65.0-r0)
|
|
||||||
#23 5.136 (34/77) Installing protobuf-c (1.5.2-r0)
|
|
||||||
#23 5.142 (35/77) Installing userspace-rcu (0.15.2-r0)
|
|
||||||
#23 5.161 (36/77) Installing libuv (1.51.0-r0)
|
|
||||||
#23 5.178 (37/77) Installing libxml2 (2.13.8-r0)
|
|
||||||
#23 5.232 (38/77) Installing bind-libs (9.20.13-r0)
|
|
||||||
#23 5.355 (39/77) Installing bind-tools (9.20.13-r0)
|
|
||||||
#23 5.395 (40/77) Installing ca-certificates (20250619-r0)
|
|
||||||
#23 5.518 (41/77) Installing brotli-libs (1.1.0-r2)
|
|
||||||
#23 5.559 (42/77) Installing c-ares (1.34.5-r0)
|
|
||||||
#23 5.573 (43/77) Installing libunistring (1.3-r0)
|
|
||||||
#23 5.645 (44/77) Installing libidn2 (2.3.7-r0)
|
|
||||||
#23 5.664 (45/77) Installing libpsl (0.21.5-r3)
|
|
||||||
#23 5.676 (46/77) Installing zstd-libs (1.5.7-r0)
|
|
||||||
#23 5.720 (47/77) Installing libcurl (8.14.1-r1)
|
|
||||||
#23 5.753 (48/77) Installing curl (8.14.1-r1)
|
|
||||||
#23 5.778 (49/77) Installing dbus (1.16.2-r1)
|
|
||||||
#23 5.796 Executing dbus-1.16.2-r1.pre-install
|
|
||||||
#23 5.869 Executing dbus-1.16.2-r1.post-install
|
|
||||||
#23 5.887 (50/77) Installing dbus-daemon-launch-helper (1.16.2-r1)
|
|
||||||
#23 5.896 (51/77) Installing libelf (0.193-r0)
|
|
||||||
#23 5.908 (52/77) Installing libmnl (1.0.5-r2)
|
|
||||||
#23 5.915 (53/77) Installing iproute2-minimal (6.15.0-r0)
|
|
||||||
#23 5.954 (54/77) Installing libxtables (1.8.11-r1)
|
|
||||||
#23 5.963 (55/77) Installing iproute2-tc (6.15.0-r0)
|
|
||||||
#23 6.001 (56/77) Installing iproute2-ss (6.15.0-r0)
|
|
||||||
#23 6.014 (57/77) Installing iproute2 (6.15.0-r0)
|
|
||||||
#23 6.042 Executing iproute2-6.15.0-r0.post-install
|
|
||||||
#23 6.047 (58/77) Installing nbtscan (1.7.2-r0)
|
|
||||||
#23 6.053 (59/77) Installing net-snmp-libs (5.9.4-r1)
|
|
||||||
#23 6.112 (60/77) Installing net-snmp-agent-libs (5.9.4-r1)
|
|
||||||
#23 6.179 (61/77) Installing net-snmp-tools (5.9.4-r1)
|
|
||||||
#23 6.205 (62/77) Installing mii-tool (2.10-r3)
|
|
||||||
#23 6.211 (63/77) Installing net-tools (2.10-r3)
|
|
||||||
#23 6.235 (64/77) Installing lua5.4-libs (5.4.7-r0)
|
|
||||||
#23 6.258 (65/77) Installing libssh2 (1.11.1-r0)
|
|
||||||
#23 6.279 (66/77) Installing nmap (7.97-r0)
|
|
||||||
#23 6.524 (67/77) Installing nmap-nselibs (7.97-r0)
|
|
||||||
#23 6.729 (68/77) Installing nmap-scripts (7.97-r0)
|
|
||||||
#23 6.842 (69/77) Installing bridge (1.5-r5)
|
|
||||||
#23 6.904 (70/77) Installing ifupdown-ng (0.12.1-r7)
|
|
||||||
#23 6.915 (71/77) Installing ifupdown-ng-iproute2 (0.12.1-r7)
|
|
||||||
#23 6.920 (72/77) Installing openrc-user (0.62.6-r0)
|
|
||||||
#23 6.924 (73/77) Installing openrc (0.62.6-r0)
|
|
||||||
#23 7.013 Executing openrc-0.62.6-r0.post-install
|
|
||||||
#23 7.016 (74/77) Installing avahi-openrc (0.8-r21)
|
|
||||||
#23 7.021 (75/77) Installing dbus-openrc (1.16.2-r1)
|
|
||||||
#23 7.026 (76/77) Installing s6-openrc (2.13.2.0-r0)
|
|
||||||
#23 7.032 (77/77) Installing traceroute (2.1.6-r0)
|
|
||||||
#23 7.040 Executing busybox-1.37.0-r18.trigger
|
|
||||||
#23 7.042 Executing ca-certificates-20250619-r0.trigger
|
|
||||||
#23 7.101 Executing dbus-1.16.2-r1.trigger
|
|
||||||
#23 7.104 OK: 102 MiB in 131 packages
|
|
||||||
#23 7.156 fetch https://dl-cdn.alpinelinux.org/alpine/v3.22/main/x86_64/APKINDEX.tar.gz
|
|
||||||
#23 7.243 fetch https://dl-cdn.alpinelinux.org/alpine/v3.22/community/x86_64/APKINDEX.tar.gz
|
|
||||||
#23 7.543 (1/12) Installing php83-common (8.3.24-r0)
|
|
||||||
#23 7.551 (2/12) Installing argon2-libs (20190702-r5)
|
|
||||||
#23 7.557 (3/12) Installing libedit (20250104.3.1-r1)
|
|
||||||
#23 7.568 (4/12) Installing pcre2 (10.43-r1)
|
|
||||||
#23 7.600 (5/12) Installing php83 (8.3.24-r0)
|
|
||||||
#23 7.777 (6/12) Installing php83-cgi (8.3.24-r0)
|
|
||||||
#23 7.953 (7/12) Installing php83-curl (8.3.24-r0)
|
|
||||||
#23 7.968 (8/12) Installing acl-libs (2.3.2-r1)
|
|
||||||
#23 7.975 (9/12) Installing php83-fpm (8.3.24-r0)
|
|
||||||
#23 8.193 (10/12) Installing php83-session (8.3.24-r0)
|
|
||||||
#23 8.204 (11/12) Installing php83-sqlite3 (8.3.24-r0)
|
|
||||||
#23 8.213 (12/12) Installing sqlite (3.49.2-r1)
|
|
||||||
#23 8.309 Executing busybox-1.37.0-r18.trigger
|
|
||||||
#23 8.317 OK: 129 MiB in 143 packages
|
|
||||||
#23 8.369 fetch https://dl-cdn.alpinelinux.org/alpine/v3.22/main/x86_64/APKINDEX.tar.gz
|
|
||||||
#23 8.449 fetch https://dl-cdn.alpinelinux.org/alpine/v3.22/community/x86_64/APKINDEX.tar.gz
|
|
||||||
#23 8.747 (1/2) Installing nginx (1.28.0-r3)
|
|
||||||
#23 8.766 Executing nginx-1.28.0-r3.pre-install
|
|
||||||
#23 8.863 Executing nginx-1.28.0-r3.post-install
|
|
||||||
#23 8.865 (2/2) Installing nginx-openrc (1.28.0-r3)
|
|
||||||
#23 8.870 Executing busybox-1.37.0-r18.trigger
|
|
||||||
#23 8.873 OK: 130 MiB in 145 packages
|
|
||||||
#23 DONE 9.5s
|
|
||||||
|
|
||||||
#24 [runner 5/14] COPY --from=builder --chown=nginx:www-data /app/ /app/
|
|
||||||
#24 DONE 0.5s
|
|
||||||
|
|
||||||
#25 [runner 6/14] RUN mkdir -p /app/config /app/db /app/log/plugins
|
|
||||||
#25 DONE 0.5s
|
|
||||||
|
|
||||||
#26 [runner 7/14] COPY --chmod=600 --chown=root:root install/crontab /etc/crontabs/root
|
|
||||||
#26 DONE 0.3s
|
|
||||||
|
|
||||||
#27 [runner 8/14] COPY --chmod=755 dockerfiles/healthcheck.sh /usr/local/bin/healthcheck.sh
|
|
||||||
#27 DONE 0.3s
|
|
||||||
|
|
||||||
#28 [runner 9/14] RUN touch /app/log/app.log && touch /app/log/execution_queue.log && touch /app/log/app_front.log && touch /app/log/app.php_errors.log && touch /app/log/stderr.log && touch /app/log/stdout.log && touch /app/log/db_is_locked.log && touch /app/log/IP_changes.log && touch /app/log/report_output.txt && touch /app/log/report_output.html && touch /app/log/report_output.json && touch /app/api/user_notifications.json
|
|
||||||
#28 DONE 0.6s
|
|
||||||
|
|
||||||
#29 [runner 10/14] COPY dockerfiles /app/dockerfiles
|
|
||||||
#29 DONE 0.3s
|
|
||||||
|
|
||||||
#30 [runner 11/14] RUN chmod +x /app/dockerfiles/*.sh
|
|
||||||
#30 DONE 0.8s
|
|
||||||
|
|
||||||
#31 [runner 12/14] RUN /app/dockerfiles/init-nginx.sh && /app/dockerfiles/init-php-fpm.sh && /app/dockerfiles/init-crond.sh && /app/dockerfiles/init-backend.sh
|
|
||||||
#31 0.417 Initializing nginx...
|
|
||||||
#31 0.417 Setting webserver to address (0.0.0.0) and port (20211)
|
|
||||||
#31 0.418 /app/dockerfiles/init-nginx.sh: line 5: /app/install/netalertx.template.conf: No such file or directory
|
|
||||||
#31 0.611 nginx initialized.
|
|
||||||
#31 0.612 Initializing php-fpm...
|
|
||||||
#31 0.654 php-fpm initialized.
|
|
||||||
#31 0.655 Initializing crond...
|
|
||||||
#31 0.689 crond initialized.
|
|
||||||
#31 0.690 Initializing backend...
|
|
||||||
#31 12.19 Backend initialized.
|
|
||||||
#31 DONE 12.3s
|
|
||||||
|
|
||||||
#32 [runner 13/14] RUN rm -rf /app/dockerfiles
|
|
||||||
#32 DONE 0.6s
|
|
||||||
|
|
||||||
#33 [runner 14/14] RUN date +%s > /app/front/buildtimestamp.txt
|
|
||||||
#33 DONE 0.6s
|
|
||||||
|
|
||||||
#34 exporting to image
|
|
||||||
#34 exporting layers
|
|
||||||
#34 exporting layers 2.4s done
|
|
||||||
#34 writing image sha256:0afcbc41473de559eff0dd93250595494fe4d8ea620861e9e90d50a248fcefda 0.0s done
|
|
||||||
#34 naming to docker.io/library/netalertx 0.0s done
|
|
||||||
#34 DONE 2.5s
|
|
||||||
|
|||||||
56
docs/ADVISORY_EYES_ON_GLASS.md
Normal file
56
docs/ADVISORY_EYES_ON_GLASS.md
Normal file
@@ -0,0 +1,56 @@
|
|||||||
|
### Build an MSP Wallboard for Network Monitoring
|
||||||
|
|
||||||
|
For Managed Service Providers (MSPs) and Network Operations Centers (NOC), "Eyes on Glass" monitoring requires a UI that is both self-healing (auto-refreshing) and focused only on critical data. By leveraging the **UI Settings Plugin**, you can transform NetAlertX from a management tool into a dedicated live monitor.
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 1. Configure Auto-Refresh for Live Monitoring
|
||||||
|
|
||||||
|
Static dashboards are the enemy of real-time response. NetAlertX allows you to force the UI to pull fresh data without manual page reloads.
|
||||||
|
|
||||||
|
* **Setting:** Locate the `UI_REFRESH` (or similar "Auto-refresh UI") setting within the **UI Settings plugin**.
|
||||||
|
* **Optimal Interval:** Set this between **60 to 120 seconds**.
|
||||||
|
* *Note:* Refreshing too frequently (e.g., <30s) on large networks can lead to high browser and server CPU usage.
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
### 2. Streamlining the Dashboard (MSP Mode)
|
||||||
|
|
||||||
|
An MSP's focus is on what is *broken*, not what is working. Hide the noise to increase reaction speed.
|
||||||
|
|
||||||
|
* **Hide Unnecessary Blocks:** Under UI Settings, disable dashboard blocks that don't provide immediate utility, such as **Online presence** or **Tiles**.
|
||||||
|
* **Hide virtual connections:** You can specify which relationships shoudl be hidden from the main view to remove any virtual devices that are not essential from your views.
|
||||||
|
* **Browser Full-Screen:** Use the built-in "Full Screen" toggle in the top bar to remove browser chrome (URL bars/tabs) for a cleaner "Wallboard" look.
|
||||||
|
|
||||||
|
### 3. Creating Custom NOC Views
|
||||||
|
|
||||||
|
Use the UI Filters in tandem with UI Settings to create custom views.
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
| Feature | NOC/MSP Application |
|
||||||
|
| --- | --- |
|
||||||
|
| **Site-Specific Nodes** | Filter the view by a specific "Sync Node" or "Location" filter to monitor a single client site. |
|
||||||
|
| **Filter by Criticality** | Filter devices where `Group == "Infrastructure"` or `"Server"`. (depending on your predefined values) |
|
||||||
|
| **Predefined "Down" View** | Bookmark the URL with the `/devices.php#down` path to ensure the dashboard always loads into an "Alert Only" mode. |
|
||||||
|
|
||||||
|
### 4. Browser & Cache Stability
|
||||||
|
|
||||||
|
Because the UI is a web application, long-running sessions can occasionally experience cache drift.
|
||||||
|
|
||||||
|
* **Cache Refresh:** If you notice the "Show # Entries" resetting or icons failing to load after days of uptime, use the **Reload** icon in the application header (not the browser refresh) to clear the internal app cache.
|
||||||
|
* **Dedicated Hardware:** For 24/7 monitoring, use a dedicated thin client or Raspberry Pi running in "Kiosk Mode" to prevent OS-level popups from obscuring the dashboard.
|
||||||
|
|
||||||
|
> [!TIP]
|
||||||
|
> [NetAlertX - Detailed Dashboard Guide](https://www.youtube.com/watch?v=umh1c_40HW8)
|
||||||
|
> This video provides a visual walkthrough of the NetAlertX dashboard features, including how to map and visualize devices which is crucial for setting up a clear "Eyes on Glass" monitoring environment.
|
||||||
|
|
||||||
|
### Summary Checklist
|
||||||
|
|
||||||
|
* [ ] **Automate Refresh:** Set `UI_REFRESH` to **60-120s** in UI Settings to ensure the dashboard stays current without manual intervention.
|
||||||
|
* [ ] **Filter for Criticality:** Bookmark the **`/devices.php#down`** view to instantly focus on offline assets rather than the entire inventory.
|
||||||
|
* [ ] **Remove UI Noise:** Use UI Settings to hide non-essential dashboard blocks (e.g., **Tiles** or remove **Virtual Connections** devices) to maximize screen real estate for alerts.
|
||||||
|
* [ ] **Segment by Site:** Use **Location** or **Sync Node** filters to create dedicated views for specific client networks or physical branches.
|
||||||
|
* [ ] **Ensure Stability:** Run on a dedicated "Kiosk" browser and use the internal **Reload icon** occasionally to maintain a clean application cache.
|
||||||
121
docs/ADVISORY_MULTI_NETWORK.md
Normal file
121
docs/ADVISORY_MULTI_NETWORK.md
Normal file
@@ -0,0 +1,121 @@
|
|||||||
|
## ADVISORY: Best Practices for Monitoring Multiple Networks with NetAlertX
|
||||||
|
|
||||||
|
### 1. Define Monitoring Scope & Architecture
|
||||||
|
|
||||||
|
Effective multi-network monitoring starts with understanding how NetAlertX "sees" your traffic.
|
||||||
|
|
||||||
|
* **A. Understand Network Accessibility:** Local ARP-based scanning (**ARPSCAN**) only discovers devices on directly accessible subnets due to Layer 2 limitations. It cannot traverse VPNs or routed borders without specific configuration.
|
||||||
|
* **B. Plan Subnet & Scan Interfaces:** Explicitly configure each accessible segment in `SCAN_SUBNETS` with the corresponding interfaces.
|
||||||
|
* **C. Remote & Inaccessible Networks:** For networks unreachable via ARP, use these strategies:
|
||||||
|
* **Alternate Plugins:** Supplement discovery with [SNMPDSC](SNMPDSC) or [DHCP lease imports](https://docs.netalertx.com/PLUGINS/?h=DHCPLSS#available-plugins).
|
||||||
|
* **Centralized Multi-Tenant Management using Sync Nodes:** Run secondary NetAlertX instances on isolated networks and aggregate data using the **SYNC plugin**.
|
||||||
|
* **Manual Entry:** For static assets where only ICMP (ping) status is needed.
|
||||||
|
|
||||||
|
> [!TIP]
|
||||||
|
> Explore the [remote networks](./REMOTE_NETWORKS.md) documentation for more details on how to set up the approaches menationed above.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 2. Automating IT Asset Inventory with Workflows
|
||||||
|
|
||||||
|
[Workflows](./WORKFLOWS.md) are the "engine" of NetAlertX, reducing manual overhead as your device list grows.
|
||||||
|
|
||||||
|
* **A. Logical Ownership & VLAN Tagging:** Create a workflow triggered on **Device Creation** to:
|
||||||
|
1. Inspect the IP/Subnet.
|
||||||
|
2. Set `devVlan` or `devOwner` custom fields automatically.
|
||||||
|
|
||||||
|
|
||||||
|
* **B. Auto-Grouping:** Use conditional logic to categorize devices.
|
||||||
|
* *Example:* If `devLastIP == 10.10.20.*`, then `Set devLocation = "BranchOffice"`.
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"name": "Assign Location - BranchOffice",
|
||||||
|
"trigger": {
|
||||||
|
"object_type": "Devices",
|
||||||
|
"event_type": "update"
|
||||||
|
},
|
||||||
|
"conditions": [
|
||||||
|
{
|
||||||
|
"logic": "AND",
|
||||||
|
"conditions": [
|
||||||
|
{
|
||||||
|
"field": "devLastIP",
|
||||||
|
"operator": "contains",
|
||||||
|
"value": "10.10.20."
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"actions": [
|
||||||
|
{
|
||||||
|
"type": "update_field",
|
||||||
|
"field": "devLocation",
|
||||||
|
"value": "BranchOffice"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
* **C. Sync Node Tracking:** When using multiple instances, ensure all synchub nodes have a descriptive `SYNC_node_name` name to distinguish between sites.
|
||||||
|
|
||||||
|
> [!TIP]
|
||||||
|
> Always test new workflows in a "Staging" instance. A misconfigured workflow can trigger thousands of unintended updates across your database.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 3. Notification Strategy: Low Noise, High Signal
|
||||||
|
|
||||||
|
A multi-network environment can generate significant "alert fatigue." Use a layered filtering approach.
|
||||||
|
|
||||||
|
| Level | Strategy | Recommended Action |
|
||||||
|
| --- | --- | --- |
|
||||||
|
| **Device** | Silence Flapping | Use "Skip repeated notifications" for unstable IoT devices. |
|
||||||
|
| **Plugin** | Tune Watchers | Only enable `_WATCH` on reliable plugins (e.g., ICMP/SNMP). |
|
||||||
|
| **Global** | Filter Sections | Limit `NTFPRCS_INCLUDED_SECTIONS` to `new_devices` and `down_devices`. |
|
||||||
|
|
||||||
|
|
||||||
|
> [!TIP]
|
||||||
|
> **Ignore Rules:** Maintain strict **Ignored MAC** (`NEWDEV_ignored_MACs`) and **Ignored IP** (`NEWDEV_ignored_IPs`) lists for guest networks or broadcast scanners to keep your logs clean.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 4. UI Filters for Multi-Network Clarity
|
||||||
|
|
||||||
|
Don't let a massive device list overwhelm you. Use the [Multi-edit features](./DEVICES_BULK_EDITING.md) to categorize devices and create focused views:
|
||||||
|
|
||||||
|
* **By Zone:** Filter by "Location", "Site" or "Sync Node" you et up in Section 2.
|
||||||
|
* **By Criticality:** Use custom the device Type field to separate "Core Infrastructure" from "Ephemeral Clients."
|
||||||
|
* **By Status:** Use predefined views specifically for "Devices currently Down" to act as a Network Operations Center (NOC) dashboard.
|
||||||
|
|
||||||
|
> [!TIP]
|
||||||
|
> If you are providing services as a Managed Service Provider (MSP) customize your default UI to be exactly how you need it, by hiding parts of the UI that you are not interested in, or by configuring a auto-refreshed screen monitoring your most important clients. See the [Eyes on glass](./ADVISORY_EYES_ON_GLASS.md) advisory for more details.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 5. Operational Stability & Sync Health
|
||||||
|
|
||||||
|
* **Health Checks:** Regularly monitor the [Logs](https://docs.netalertx.com/LOGGING/?h=logs) to ensure remote nodes are reporting in.
|
||||||
|
* **Backups:** Use the **CSV Devices Backup** plugin. Standardize your workflow templates and [back up](./BACKUPS.md) you `/config` folders so that if a node fails, you can redeploy it with the same logic instantly.
|
||||||
|
|
||||||
|
|
||||||
|
### 6. Optimize Performance
|
||||||
|
|
||||||
|
As your environment grows, tuning the underlying engine is vital to maintain a snappy UI and reliable discovery cycles.
|
||||||
|
|
||||||
|
* **Plugin Scheduling:** Avoid "Scan Storms" by staggering plugin execution. Running intensive tasks like `NMAP` or `MASS_DNS` simultaneously can spike CPU and cause database locks.
|
||||||
|
* **Database Health:** Large-scale monitoring generates massive event logs. Use the **[DBCLNP (Database Cleanup)](https://www.google.com/search?q=https://docs.netalertx.com/PLUGINS/%23dbclnp)** plugin to prune old records and keep the SQLite database performant.
|
||||||
|
* **Resource Management:** For high-device counts, consider increasing the memory limit for the container and utilizing `tmpfs` for temporary files to reduce SD card/disk I/O bottlenecks.
|
||||||
|
|
||||||
|
> [!IMPORTANT]
|
||||||
|
> For a deep dive into hardware requirements, database vacuuming, and specific environment variables for high-load instances, refer to the full **[Performance Optimization Guide](https://docs.netalertx.com/PERFORMANCE/)**.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Summary Checklist
|
||||||
|
|
||||||
|
* [ ] **Discovery:** Are all subnets explicitly defined?
|
||||||
|
* [ ] **Automation:** Do new devices get auto-assigned to a VLAN/Owner?
|
||||||
|
* [ ] **Noise Control:** Are transient "Down" alerts delayed via `NTFPRCS_alert_down_time`?
|
||||||
|
* [ ] **Remote Sites:** Is the SYNC plugin authenticated and heartbeat-active?
|
||||||
29
docs/API.md
29
docs/API.md
@@ -23,6 +23,8 @@ curl 'http://host:GRAPHQL_PORT/graphql' \
|
|||||||
|
|
||||||
The API server runs on `0.0.0.0:<graphql_port>` with **CORS enabled** for all main endpoints.
|
The API server runs on `0.0.0.0:<graphql_port>` with **CORS enabled** for all main endpoints.
|
||||||
|
|
||||||
|
CORS configuration: You can limit allowed CORS origins with the `CORS_ORIGINS` environment variable. Set it to a comma-separated list of origins (for example: `CORS_ORIGINS="https://example.com,http://localhost:3000"`). The server parses this list at startup and only allows origins that begin with `http://` or `https://`. If `CORS_ORIGINS` is unset or parses to an empty list, the API falls back to a safe development default list (localhosts) and will include `*` as a last-resort permissive origin.
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## Authentication
|
## Authentication
|
||||||
@@ -36,9 +38,15 @@ Authorization: Bearer <API_TOKEN>
|
|||||||
If the token is missing or invalid, the server will return:
|
If the token is missing or invalid, the server will return:
|
||||||
|
|
||||||
```json
|
```json
|
||||||
{ "error": "Forbidden" }
|
{
|
||||||
|
"success": false,
|
||||||
|
"message": "ERROR: Not authorized",
|
||||||
|
"error": "Forbidden"
|
||||||
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
HTTP Status: **403 Forbidden**
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## Base URL
|
## Base URL
|
||||||
@@ -51,9 +59,15 @@ http://<server>:<GRAPHQL_PORT>/
|
|||||||
|
|
||||||
## Endpoints
|
## Endpoints
|
||||||
|
|
||||||
|
> [!NOTE]
|
||||||
|
> You can explore the API endpoints by using the interactive API docs at `http://<server>:<GRAPHQL_PORT>/docs`.
|
||||||
|
> 
|
||||||
|
|
||||||
> [!TIP]
|
> [!TIP]
|
||||||
> When retrieving devices or settings try using the GraphQL API endpoint first as it is read-optimized.
|
> When retrieving devices or settings try using the GraphQL API endpoint first as it is read-optimized.
|
||||||
|
|
||||||
|
### Standard REST Endpoints
|
||||||
|
|
||||||
* [Device API Endpoints](API_DEVICE.md) – Manage individual devices
|
* [Device API Endpoints](API_DEVICE.md) – Manage individual devices
|
||||||
* [Devices Collection](API_DEVICES.md) – Bulk operations on multiple devices
|
* [Devices Collection](API_DEVICES.md) – Bulk operations on multiple devices
|
||||||
* [Events](API_EVENTS.md) – Device event logging and management
|
* [Events](API_EVENTS.md) – Device event logging and management
|
||||||
@@ -68,6 +82,19 @@ http://<server>:<GRAPHQL_PORT>/
|
|||||||
* [Sync](API_SYNC.md) – Synchronization between multiple NetAlertX instances
|
* [Sync](API_SYNC.md) – Synchronization between multiple NetAlertX instances
|
||||||
* [Logs](API_LOGS.md) – Purging of logs and adding to the event execution queue for user triggered events
|
* [Logs](API_LOGS.md) – Purging of logs and adding to the event execution queue for user triggered events
|
||||||
* [DB query](API_DBQUERY.md) (⚠ Internal) - Low level database access - use other endpoints if possible
|
* [DB query](API_DBQUERY.md) (⚠ Internal) - Low level database access - use other endpoints if possible
|
||||||
|
* `/server` (⚠ Internal) - Backend server endpoint for internal communication only - **do not use directly**
|
||||||
|
|
||||||
|
### MCP Server Bridge
|
||||||
|
|
||||||
|
NetAlertX includes an **MCP (Model Context Protocol) Server Bridge** that provides AI assistants access to NetAlertX functionality through standardized tools. MCP endpoints are available at `/mcp/sse/*` paths and mirror the functionality of standard REST endpoints:
|
||||||
|
|
||||||
|
* `/mcp/sse` - Server-Sent Events endpoint for MCP client connections
|
||||||
|
* `/mcp/sse/openapi.json` - OpenAPI specification for available MCP tools
|
||||||
|
* `/mcp/sse/device/*`, `/mcp/sse/devices/*`, `/mcp/sse/nettools/*`, `/mcp/sse/events/*` - MCP-enabled versions of REST endpoints
|
||||||
|
|
||||||
|
MCP endpoints require the same Bearer token authentication as REST endpoints.
|
||||||
|
|
||||||
|
**📖 See [MCP Server Bridge API](API_MCP.md) for complete documentation, tool specifications, and integration examples.**
|
||||||
|
|
||||||
See [Testing](API_TESTS.md) for example requests and usage.
|
See [Testing](API_TESTS.md) for example requests and usage.
|
||||||
|
|
||||||
|
|||||||
@@ -16,10 +16,14 @@ All `/dbquery/*` endpoints require an API token in the HTTP headers:
|
|||||||
Authorization: Bearer <API_TOKEN>
|
Authorization: Bearer <API_TOKEN>
|
||||||
```
|
```
|
||||||
|
|
||||||
If the token is missing or invalid:
|
If the token is missing or invalid (HTTP 403):
|
||||||
|
|
||||||
```json
|
```json
|
||||||
{ "error": "Forbidden" }
|
{
|
||||||
|
"success": false,
|
||||||
|
"message": "ERROR: Not authorized",
|
||||||
|
"error": "Forbidden"
|
||||||
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|||||||
@@ -41,6 +41,8 @@ Manage a **single device** by its MAC address. Operations include retrieval, upd
|
|||||||
* Device not found → HTTP 404
|
* Device not found → HTTP 404
|
||||||
* Unauthorized → HTTP 403
|
* Unauthorized → HTTP 403
|
||||||
|
|
||||||
|
**MCP Integration**: Available as `get_device_info` and `set_device_alias` tools. See [MCP Server Bridge API](API_MCP.md).
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## 2. Update Device Fields
|
## 2. Update Device Fields
|
||||||
|
|||||||
@@ -207,6 +207,93 @@ The Devices Collection API provides operations to **retrieve, manage, import/exp
|
|||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
|
### 9. Search Devices
|
||||||
|
|
||||||
|
* **POST** `/devices/search`
|
||||||
|
Search for devices by MAC, name, or IP address.
|
||||||
|
|
||||||
|
**Request Body** (JSON):
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"query": ".50"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**Response**:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"success": true,
|
||||||
|
"devices": [
|
||||||
|
{
|
||||||
|
"devName": "Test Device",
|
||||||
|
"devMac": "AA:BB:CC:DD:EE:FF",
|
||||||
|
"devLastIP": "192.168.1.50"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 10. Get Latest Device
|
||||||
|
|
||||||
|
* **GET** `/devices/latest`
|
||||||
|
Get the most recently connected device.
|
||||||
|
|
||||||
|
**Response**:
|
||||||
|
|
||||||
|
```json
|
||||||
|
[
|
||||||
|
{
|
||||||
|
"devName": "Latest Device",
|
||||||
|
"devMac": "AA:BB:CC:DD:EE:FF",
|
||||||
|
"devLastIP": "192.168.1.100",
|
||||||
|
"devFirstConnection": "2025-12-07 10:30:00"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 11. Get Network Topology
|
||||||
|
|
||||||
|
* **GET** `/devices/network/topology`
|
||||||
|
Get network topology showing device relationships.
|
||||||
|
|
||||||
|
**Response**:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"nodes": [
|
||||||
|
{
|
||||||
|
"id": "AA:AA:AA:AA:AA:AA",
|
||||||
|
"name": "Router",
|
||||||
|
"vendor": "VendorA"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"links": [
|
||||||
|
{
|
||||||
|
"source": "AA:AA:AA:AA:AA:AA",
|
||||||
|
"target": "BB:BB:BB:BB:BB:BB",
|
||||||
|
"port": "eth1"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## MCP Tools
|
||||||
|
|
||||||
|
These endpoints are also available as **MCP Tools** for AI assistant integration:
|
||||||
|
- `list_devices`, `search_devices`, `get_latest_device`, `get_network_topology`, `set_device_alias`
|
||||||
|
|
||||||
|
📖 See [MCP Server Bridge API](API_MCP.md) for AI integration details.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
## Example `curl` Requests
|
## Example `curl` Requests
|
||||||
|
|
||||||
**Get All Devices**:
|
**Get All Devices**:
|
||||||
@@ -247,3 +334,26 @@ curl -X GET "http://<server_ip>:<GRAPHQL_PORT>/devices/by-status?status=online"
|
|||||||
-H "Authorization: Bearer <API_TOKEN>"
|
-H "Authorization: Bearer <API_TOKEN>"
|
||||||
```
|
```
|
||||||
|
|
||||||
|
**Search Devices**:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
curl -X POST "http://<server_ip>:<GRAPHQL_PORT>/devices/search" \
|
||||||
|
-H "Authorization: Bearer <API_TOKEN>" \
|
||||||
|
-H "Content-Type: application/json" \
|
||||||
|
--data '{"query": "192.168.1"}'
|
||||||
|
```
|
||||||
|
|
||||||
|
**Get Latest Device**:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
curl -X GET "http://<server_ip>:<GRAPHQL_PORT>/devices/latest" \
|
||||||
|
-H "Authorization: Bearer <API_TOKEN>"
|
||||||
|
```
|
||||||
|
|
||||||
|
**Get Network Topology**:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
curl -X GET "http://<server_ip>:<GRAPHQL_PORT>/devices/network/topology" \
|
||||||
|
-H "Authorization: Bearer <API_TOKEN>"
|
||||||
|
```
|
||||||
|
|
||||||
|
|||||||
157
docs/API_DEVICE_FIELD_LOCK.md
Normal file
157
docs/API_DEVICE_FIELD_LOCK.md
Normal file
@@ -0,0 +1,157 @@
|
|||||||
|
# Device Field Lock/Unlock API
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
The Device Field Lock/Unlock feature allows users to lock specific device fields to prevent plugin overwrites. This is part of the authoritative device field update system that ensures data integrity while maintaining flexibility for user customization.
|
||||||
|
|
||||||
|
## Concepts
|
||||||
|
|
||||||
|
### Tracked Fields
|
||||||
|
|
||||||
|
Only certain device fields support locking. These are the fields that can be modified by both plugins and users:
|
||||||
|
|
||||||
|
- `devName` - Device name/hostname
|
||||||
|
- `devVendor` - Device vendor/manufacturer
|
||||||
|
- `devFQDN` - Fully qualified domain name
|
||||||
|
- `devSSID` - Network SSID
|
||||||
|
- `devParentMAC` - Parent device MAC address
|
||||||
|
- `devParentPort` - Parent device port
|
||||||
|
- `devParentRelType` - Parent device relationship type
|
||||||
|
- `devVlan` - VLAN identifier
|
||||||
|
|
||||||
|
### Field Source Tracking
|
||||||
|
|
||||||
|
Every tracked field has an associated `*Source` field that indicates where the current value originated:
|
||||||
|
|
||||||
|
- `NEWDEV` - Created via the UI as a new device
|
||||||
|
- `USER` - Manually edited by a user
|
||||||
|
- `LOCKED` - Field is locked; prevents any plugin overwrites
|
||||||
|
- Plugin name (e.g., `UNIFIAPI`, `PIHOLE`) - Last updated by this plugin
|
||||||
|
|
||||||
|
### Locking Mechanism
|
||||||
|
|
||||||
|
When a field is **locked**, its source is set to `LOCKED`. This prevents plugin overwrites based on the authorization logic:
|
||||||
|
|
||||||
|
1. Plugin wants to update field
|
||||||
|
2. Authoritative handler checks field's `*Source` value
|
||||||
|
3. If `*Source` == `LOCKED`, plugin update is rejected
|
||||||
|
4. User can still manually unlock the field
|
||||||
|
|
||||||
|
When a field is **unlocked**, its source is set to `NEWDEV`, allowing plugins to resume updates.
|
||||||
|
|
||||||
|
## Endpoints
|
||||||
|
|
||||||
|
### Lock or Unlock a Field
|
||||||
|
|
||||||
|
```
|
||||||
|
POST /device/{mac}/field/lock
|
||||||
|
Authorization: Bearer {API_TOKEN}
|
||||||
|
Content-Type: application/json
|
||||||
|
|
||||||
|
{
|
||||||
|
"fieldName": "devName",
|
||||||
|
"lock": true
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Parameters
|
||||||
|
- `mac` (path, required): Device MAC address (e.g., `AA:BB:CC:DD:EE:FF`)
|
||||||
|
- `fieldName` (body, required): Name of the field to lock/unlock. Must be one of the tracked fields listed above.
|
||||||
|
- `lock` (body, required): Boolean. `true` to lock, `false` to unlock.
|
||||||
|
|
||||||
|
#### Responses
|
||||||
|
|
||||||
|
**Success (200)**
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"success": true,
|
||||||
|
"message": "Field devName locked",
|
||||||
|
"fieldName": "devName",
|
||||||
|
"locked": true
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**Bad Request (400)**
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"success": false,
|
||||||
|
"error": "fieldName is required"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"success": false,
|
||||||
|
"error": "Field 'devInvalidField' cannot be locked"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**Unauthorized (403)**
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"success": false,
|
||||||
|
"error": "Unauthorized"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**Not Found (404)**
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"success": false,
|
||||||
|
"error": "Device not found"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Examples
|
||||||
|
|
||||||
|
### Lock a Device Name
|
||||||
|
Prevent the device name from being overwritten by plugins:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
curl -X POST https://your-netalertx.local/api/device/AA:BB:CC:DD:EE:FF/field/lock \
|
||||||
|
-H "Authorization: Bearer your-api-token" \
|
||||||
|
-H "Content-Type: application/json" \
|
||||||
|
-d '{
|
||||||
|
"fieldName": "devName",
|
||||||
|
"lock": true
|
||||||
|
}'
|
||||||
|
```
|
||||||
|
|
||||||
|
### Unlock a Field
|
||||||
|
Allow plugins to resume updating a field:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
curl -X POST https://your-netalertx.local/api/device/AA:BB:CC:DD:EE:FF/field/lock \
|
||||||
|
-H "Authorization: Bearer your-api-token" \
|
||||||
|
-H "Content-Type: application/json" \
|
||||||
|
-d '{
|
||||||
|
"fieldName": "devName",
|
||||||
|
"lock": false
|
||||||
|
}'
|
||||||
|
```
|
||||||
|
|
||||||
|
## UI Integration
|
||||||
|
|
||||||
|
The Device Edit form displays lock/unlock buttons for all tracked fields:
|
||||||
|
|
||||||
|
1. **Lock Button** (🔒): Click to prevent plugin overwrites
|
||||||
|
2. **Unlock Button** (🔓): Click to allow plugin overwrites again
|
||||||
|
3. **Source Indicator**: Shows current field source (USER, LOCKED, NEWDEV, or plugin name)
|
||||||
|
|
||||||
|
|
||||||
|
### Authorization Handler
|
||||||
|
|
||||||
|
The authoritative field update logic prevents plugin overwrites:
|
||||||
|
|
||||||
|
1. Plugin provides new value for field via plugin config `SET_ALWAYS`/`SET_EMPTY`
|
||||||
|
2. Authoritative handler (in DeviceInstance) checks `{field}Source` value
|
||||||
|
3. If source is `LOCKED` or `USER`, plugin update is rejected
|
||||||
|
4. If source is `NEWDEV` or plugin name, plugin update is accepted
|
||||||
|
|
||||||
|
## See Also
|
||||||
|
|
||||||
|
- [Device locking](./DEVICE_FIELD_LOCK.md)
|
||||||
|
- [Device source fields](./DEVICE_SOURCE_FIELDS.md)
|
||||||
|
- [API Device Endpoints Documentation](./API_DEVICE.md)
|
||||||
|
- [Authoritative Field Updates System](./PLUGINS_DEV.md#authoritative-fields)
|
||||||
|
- [Plugin Configuration Reference](./PLUGINS_DEV_CONFIG.md)
|
||||||
@@ -58,12 +58,12 @@ The Events API provides access to **device event logs**, allowing creation, retr
|
|||||||
"success": true,
|
"success": true,
|
||||||
"events": [
|
"events": [
|
||||||
{
|
{
|
||||||
"eve_MAC": "00:11:22:33:44:55",
|
"eveMac": "00:11:22:33:44:55",
|
||||||
"eve_IP": "192.168.1.10",
|
"eveIp": "192.168.1.10",
|
||||||
"eve_DateTime": "2025-08-24T12:00:00Z",
|
"eveDateTime": "2025-08-24T12:00:00Z",
|
||||||
"eve_EventType": "Device Down",
|
"eveEventType": "Device Down",
|
||||||
"eve_AdditionalInfo": "",
|
"eveAdditionalInfo": "",
|
||||||
"eve_PendingAlertEmail": 1
|
"evePendingAlertEmail": 1
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
@@ -88,7 +88,56 @@ The Events API provides access to **device event logs**, allowing creation, retr
|
|||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
### 4. Event Totals Over a Period
|
### 4. Get Recent Events
|
||||||
|
|
||||||
|
* **GET** `/events/recent` → Get events from the last 24 hours
|
||||||
|
* **GET** `/events/<hours>` → Get events from the last N hours
|
||||||
|
|
||||||
|
**Response** (JSON):
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"success": true,
|
||||||
|
"hours": 24,
|
||||||
|
"count": 5,
|
||||||
|
"events": [
|
||||||
|
{
|
||||||
|
"eveDateTime": "2025-12-07 12:00:00",
|
||||||
|
"eveEventType": "New Device",
|
||||||
|
"eveMac": "AA:BB:CC:DD:EE:FF",
|
||||||
|
"eveIp": "192.168.1.100",
|
||||||
|
"eveAdditionalInfo": "Device detected"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 5. Get Latest Events
|
||||||
|
|
||||||
|
* **GET** `/events/last`
|
||||||
|
Get the 10 most recent events.
|
||||||
|
|
||||||
|
**Response** (JSON):
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"success": true,
|
||||||
|
"count": 10,
|
||||||
|
"events": [
|
||||||
|
{
|
||||||
|
"eveDateTime": "2025-12-07 12:00:00",
|
||||||
|
"eveEventType": "Device Down",
|
||||||
|
"eveMac": "AA:BB:CC:DD:EE:FF"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 6. Event Totals Over a Period
|
||||||
|
|
||||||
* **GET** `/sessions/totals?period=<period>`
|
* **GET** `/sessions/totals?period=<period>`
|
||||||
Return event and session totals over a given period.
|
Return event and session totals over a given period.
|
||||||
@@ -110,22 +159,35 @@ The Events API provides access to **device event logs**, allowing creation, retr
|
|||||||
1. Total events in the period
|
1. Total events in the period
|
||||||
2. Total sessions
|
2. Total sessions
|
||||||
3. Missing sessions
|
3. Missing sessions
|
||||||
4. Voided events (`eve_EventType LIKE 'VOIDED%'`)
|
4. Voided events (`eveEventType LIKE 'VOIDED%'`)
|
||||||
5. New device events (`eve_EventType LIKE 'New Device'`)
|
5. New device events (`eveEventType LIKE 'New Device'`)
|
||||||
6. Device down events (`eve_EventType LIKE 'Device Down'`)
|
6. Device down events (`eveEventType LIKE 'Device Down'`)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## MCP Tools
|
||||||
|
|
||||||
|
Event endpoints are available as **MCP Tools** for AI assistant integration:
|
||||||
|
- `get_recent_alerts`, `get_last_events`
|
||||||
|
|
||||||
|
📖 See [MCP Server Bridge API](API_MCP.md) for AI integration details.
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## Notes
|
## Notes
|
||||||
|
|
||||||
* All endpoints require **authorization** (Bearer token). Unauthorized requests return:
|
* All endpoints require **authorization** (Bearer token). Unauthorized requests return HTTP 403:
|
||||||
|
|
||||||
```json
|
```json
|
||||||
{ "error": "Forbidden" }
|
{
|
||||||
|
"success": false,
|
||||||
|
"message": "ERROR: Not authorized",
|
||||||
|
"error": "Forbidden"
|
||||||
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
* Events are stored in the **Events table** with the following fields:
|
* Events are stored in the **Events table** with the following fields:
|
||||||
`eve_MAC`, `eve_IP`, `eve_DateTime`, `eve_EventType`, `eve_AdditionalInfo`, `eve_PendingAlertEmail`.
|
`eveMac`, `eveIp`, `eveDateTime`, `eveEventType`, `eveAdditionalInfo`, `evePendingAlertEmail`.
|
||||||
|
|
||||||
* Event creation automatically logs activity for debugging.
|
* Event creation automatically logs activity for debugging.
|
||||||
|
|
||||||
|
|||||||
@@ -4,6 +4,10 @@ GraphQL queries are **read-optimized for speed**. Data may be slightly out of da
|
|||||||
|
|
||||||
* Devices
|
* Devices
|
||||||
* Settings
|
* Settings
|
||||||
|
* Events
|
||||||
|
* PluginsObjects
|
||||||
|
* PluginsHistory
|
||||||
|
* PluginsEvents
|
||||||
* Language Strings (LangStrings)
|
* Language Strings (LangStrings)
|
||||||
|
|
||||||
## Endpoints
|
## Endpoints
|
||||||
@@ -254,11 +258,160 @@ curl 'http://host:GRAPHQL_PORT/graphql' \
|
|||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
|
## Plugin Tables (Objects, Events, History)
|
||||||
|
|
||||||
|
Three queries expose the plugin database tables with server-side pagination, filtering, and search:
|
||||||
|
|
||||||
|
* `pluginsObjects` — current plugin object state
|
||||||
|
* `pluginsEvents` — unprocessed plugin events
|
||||||
|
* `pluginsHistory` — historical plugin event log
|
||||||
|
|
||||||
|
All three share the same `PluginQueryOptionsInput` and return the same `PluginEntry` shape.
|
||||||
|
|
||||||
|
### Sample Query
|
||||||
|
|
||||||
|
```graphql
|
||||||
|
query GetPluginObjects($options: PluginQueryOptionsInput) {
|
||||||
|
pluginsObjects(options: $options) {
|
||||||
|
dbCount
|
||||||
|
count
|
||||||
|
entries {
|
||||||
|
index plugin objectPrimaryId objectSecondaryId
|
||||||
|
dateTimeCreated dateTimeChanged
|
||||||
|
watchedValue1 watchedValue2 watchedValue3 watchedValue4
|
||||||
|
status extra userData foreignKey
|
||||||
|
syncHubNodeName helpVal1 helpVal2 helpVal3 helpVal4 objectGuid
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Query Parameters (`PluginQueryOptionsInput`)
|
||||||
|
|
||||||
|
| Parameter | Type | Description |
|
||||||
|
| ------------ | ----------------- | ------------------------------------------------------ |
|
||||||
|
| `page` | Int | Page number (1-based). |
|
||||||
|
| `limit` | Int | Rows per page (max 1000). |
|
||||||
|
| `sort` | [SortOptionsInput] | Sorting options (`field`, `order`). |
|
||||||
|
| `search` | String | Free-text search across key columns. |
|
||||||
|
| `filters` | [FilterOptionsInput] | Column-value exact-match filters. |
|
||||||
|
| `plugin` | String | Plugin prefix to scope results (e.g. `"ARPSCAN"`). |
|
||||||
|
| `foreignKey` | String | Foreign key filter (e.g. device MAC). |
|
||||||
|
| `dateFrom` | String | Start of date range filter on `dateTimeCreated`. |
|
||||||
|
| `dateTo` | String | End of date range filter on `dateTimeCreated`. |
|
||||||
|
|
||||||
|
### Response Fields
|
||||||
|
|
||||||
|
| Field | Type | Description |
|
||||||
|
| --------- | ------------- | ------------------------------------------------------------- |
|
||||||
|
| `dbCount` | Int | Total rows for the requested plugin (before search/filters). |
|
||||||
|
| `count` | Int | Total rows after all filters (before pagination). |
|
||||||
|
| `entries` | [PluginEntry] | Paginated list of plugin entries. |
|
||||||
|
|
||||||
|
### `curl` Example
|
||||||
|
|
||||||
|
```sh
|
||||||
|
curl 'http://host:GRAPHQL_PORT/graphql' \
|
||||||
|
-X POST \
|
||||||
|
-H 'Authorization: Bearer API_TOKEN' \
|
||||||
|
-H 'Content-Type: application/json' \
|
||||||
|
--data '{
|
||||||
|
"query": "query GetPluginObjects($options: PluginQueryOptionsInput) { pluginsObjects(options: $options) { dbCount count entries { index plugin objectPrimaryId status foreignKey } } }",
|
||||||
|
"variables": {
|
||||||
|
"options": {
|
||||||
|
"plugin": "ARPSCAN",
|
||||||
|
"page": 1,
|
||||||
|
"limit": 25
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}'
|
||||||
|
```
|
||||||
|
|
||||||
|
### Badge Prefetch (Batched Counts)
|
||||||
|
|
||||||
|
Use GraphQL aliases to fetch counts for all plugins in a single request:
|
||||||
|
|
||||||
|
```graphql
|
||||||
|
query BadgeCounts {
|
||||||
|
ARPSCAN: pluginsObjects(options: {plugin: "ARPSCAN", page: 1, limit: 1}) { dbCount }
|
||||||
|
INTRNT: pluginsObjects(options: {plugin: "INTRNT", page: 1, limit: 1}) { dbCount }
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Events Query
|
||||||
|
|
||||||
|
Access the Events table with server-side pagination, filtering, and search.
|
||||||
|
|
||||||
|
### Sample Query
|
||||||
|
|
||||||
|
```graphql
|
||||||
|
query GetEvents($options: EventQueryOptionsInput) {
|
||||||
|
events(options: $options) {
|
||||||
|
dbCount
|
||||||
|
count
|
||||||
|
entries {
|
||||||
|
eveMac
|
||||||
|
eveIp
|
||||||
|
eveDateTime
|
||||||
|
eveEventType
|
||||||
|
eveAdditionalInfo
|
||||||
|
evePendingAlertEmail
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Query Parameters (`EventQueryOptionsInput`)
|
||||||
|
|
||||||
|
| Parameter | Type | Description |
|
||||||
|
| ----------- | ------------------ | ------------------------------------------------ |
|
||||||
|
| `page` | Int | Page number (1-based). |
|
||||||
|
| `limit` | Int | Rows per page (max 1000). |
|
||||||
|
| `sort` | [SortOptionsInput] | Sorting options (`field`, `order`). |
|
||||||
|
| `search` | String | Free-text search across key columns. |
|
||||||
|
| `filters` | [FilterOptionsInput] | Column-value exact-match filters. |
|
||||||
|
| `eveMac` | String | Filter by device MAC address. |
|
||||||
|
| `eventType` | String | Filter by event type (e.g. `"New Device"`). |
|
||||||
|
| `dateFrom` | String | Start of date range filter on `eveDateTime`. |
|
||||||
|
| `dateTo` | String | End of date range filter on `eveDateTime`. |
|
||||||
|
|
||||||
|
### Response Fields
|
||||||
|
|
||||||
|
| Field | Type | Description |
|
||||||
|
| --------- | ------------ | ------------------------------------------------------------ |
|
||||||
|
| `dbCount` | Int | Total rows in the Events table (before any filters). |
|
||||||
|
| `count` | Int | Total rows after all filters (before pagination). |
|
||||||
|
| `entries` | [EventEntry] | Paginated list of event entries. |
|
||||||
|
|
||||||
|
### `curl` Example
|
||||||
|
|
||||||
|
```sh
|
||||||
|
curl 'http://host:GRAPHQL_PORT/graphql' \
|
||||||
|
-X POST \
|
||||||
|
-H 'Authorization: Bearer API_TOKEN' \
|
||||||
|
-H 'Content-Type: application/json' \
|
||||||
|
--data '{
|
||||||
|
"query": "query GetEvents($options: EventQueryOptionsInput) { events(options: $options) { dbCount count entries { eveMac eveIp eveDateTime eveEventType } } }",
|
||||||
|
"variables": {
|
||||||
|
"options": {
|
||||||
|
"eveMac": "00:11:22:33:44:55",
|
||||||
|
"page": 1,
|
||||||
|
"limit": 50
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}'
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
## Notes
|
## Notes
|
||||||
|
|
||||||
* Device, settings, and LangStrings queries can be combined in **one request** since GraphQL supports batching.
|
* Device, settings, LangStrings, plugin, and event queries can be combined in **one request** since GraphQL supports batching.
|
||||||
* The `fallback_to_en` feature ensures UI always has a value even if a translation is missing.
|
* The `fallback_to_en` feature ensures UI always has a value even if a translation is missing.
|
||||||
* Data is **cached in memory** per JSON file; changes to language or plugin files will only refresh after the cache detects a file modification.
|
* Data is **cached in memory** per JSON file; changes to language or plugin files will only refresh after the cache detects a file modification.
|
||||||
* The `setOverriddenByEnv` flag helps identify setting values that are locked at container runtime.
|
* The `setOverriddenByEnv` flag helps identify setting values that are locked at container runtime.
|
||||||
|
* Plugin queries scope `dbCount` to the requested `plugin`/`foreignKey` so badge counts reflect per-plugin totals.
|
||||||
* The schema is **read-only** — updates must be performed through other APIs or configuration management. See the other [API](API.md) endpoints for details.
|
* The schema is **read-only** — updates must be performed through other APIs or configuration management. See the other [API](API.md) endpoints for details.
|
||||||
|
|
||||||
|
|||||||
@@ -18,7 +18,6 @@ Only specific, pre-approved log files can be purged for security and stability r
|
|||||||
|
|
||||||
```
|
```
|
||||||
app.log
|
app.log
|
||||||
app_front.log
|
|
||||||
IP_changes.log
|
IP_changes.log
|
||||||
stdout.log
|
stdout.log
|
||||||
stderr.log
|
stderr.log
|
||||||
|
|||||||
405
docs/API_MCP.md
Normal file
405
docs/API_MCP.md
Normal file
@@ -0,0 +1,405 @@
|
|||||||
|
# MCP Server Bridge API
|
||||||
|
|
||||||
|
The **MCP (Model Context Protocol) Server Bridge** provides AI assistants with standardized access to NetAlertX functionality through tools and server-sent events. This enables AI systems to interact with your network monitoring data in real-time.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
The MCP Server Bridge exposes NetAlertX functionality as **MCP Tools** that AI assistants can call to:
|
||||||
|
|
||||||
|
- Search and retrieve device information
|
||||||
|
- Trigger network scans
|
||||||
|
- Get network topology and events
|
||||||
|
- Wake devices via Wake-on-LAN
|
||||||
|
- Access open port information
|
||||||
|
- Set device aliases
|
||||||
|
|
||||||
|
All MCP endpoints mirror the functionality of standard REST endpoints but are optimized for AI assistant integration.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Architecture Overview
|
||||||
|
|
||||||
|
### MCP Connection Flow
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
graph TB
|
||||||
|
A[AI Assistant<br/>Claude Desktop] -->|SSE Connection| B[NetAlertX MCP Server<br/>:20212/mcp/sse]
|
||||||
|
B -->|JSON-RPC Messages| C[MCP Bridge<br/>api_server_start.py]
|
||||||
|
C -->|Tool Calls| D[NetAlertX Tools<br/>Device/Network APIs]
|
||||||
|
D -->|Response Data| C
|
||||||
|
C -->|JSON Response| B
|
||||||
|
B -->|Stream Events| A
|
||||||
|
```
|
||||||
|
|
||||||
|
### MCP Tool Integration
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
sequenceDiagram
|
||||||
|
participant AI as AI Assistant
|
||||||
|
participant MCP as MCP Server (:20212)
|
||||||
|
participant API as NetAlertX API (:20211)
|
||||||
|
participant DB as SQLite Database
|
||||||
|
|
||||||
|
AI->>MCP: 1. Connect via SSE
|
||||||
|
MCP-->>AI: 2. Session established
|
||||||
|
AI->>MCP: 3. tools/list request
|
||||||
|
MCP->>API: 4. GET /mcp/sse/openapi.json
|
||||||
|
API-->>MCP: 5. Available tools spec
|
||||||
|
MCP-->>AI: 6. Tool definitions
|
||||||
|
AI->>MCP: 7. tools/call: search_devices
|
||||||
|
MCP->>API: 8. POST /devices/search
|
||||||
|
API->>DB: 9. Query devices
|
||||||
|
DB-->>API: 10. Device data
|
||||||
|
API-->>MCP: 11. JSON response
|
||||||
|
MCP-->>AI: 12. Tool result
|
||||||
|
```
|
||||||
|
|
||||||
|
### Component Architecture
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
graph LR
|
||||||
|
subgraph "AI Client"
|
||||||
|
A[Claude Desktop]
|
||||||
|
B[Custom MCP Client]
|
||||||
|
end
|
||||||
|
|
||||||
|
subgraph "NetAlertX MCP Server (:20212)"
|
||||||
|
C[SSE Endpoint<br/>/mcp/sse]
|
||||||
|
D[Message Handler<br/>/mcp/messages]
|
||||||
|
E[OpenAPI Spec<br/>/mcp/sse/openapi.json]
|
||||||
|
end
|
||||||
|
|
||||||
|
subgraph "NetAlertX API Server (:20211)"
|
||||||
|
F[Device APIs<br/>/devices/*]
|
||||||
|
G[Network Tools<br/>/nettools/*]
|
||||||
|
H[Events API<br/>/events/*]
|
||||||
|
end
|
||||||
|
|
||||||
|
subgraph "Backend"
|
||||||
|
I[SQLite Database]
|
||||||
|
J[Network Scanners]
|
||||||
|
K[Plugin System]
|
||||||
|
end
|
||||||
|
|
||||||
|
A -.->|Bearer Auth| C
|
||||||
|
B -.->|Bearer Auth| C
|
||||||
|
C --> D
|
||||||
|
C --> E
|
||||||
|
D --> F
|
||||||
|
D --> G
|
||||||
|
D --> H
|
||||||
|
F --> I
|
||||||
|
G --> J
|
||||||
|
H --> I
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Authentication
|
||||||
|
|
||||||
|
MCP endpoints use the same **Bearer token authentication** as REST endpoints:
|
||||||
|
|
||||||
|
```http
|
||||||
|
Authorization: Bearer <API_TOKEN>
|
||||||
|
```
|
||||||
|
|
||||||
|
Unauthorized requests return HTTP 403:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"success": false,
|
||||||
|
"message": "ERROR: Not authorized",
|
||||||
|
"error": "Forbidden"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## MCP Connection Endpoint
|
||||||
|
|
||||||
|
### Server-Sent Events (SSE)
|
||||||
|
|
||||||
|
* **GET/POST** `/mcp/sse`
|
||||||
|
|
||||||
|
Main MCP connection endpoint for AI clients. Establishes a persistent connection using Server-Sent Events for real-time communication between AI assistants and NetAlertX.
|
||||||
|
|
||||||
|
**Connection Example**:
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
const eventSource = new EventSource('/mcp/sse', {
|
||||||
|
headers: {
|
||||||
|
'Authorization': 'Bearer <API_TOKEN>'
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
eventSource.onmessage = function(event) {
|
||||||
|
const response = JSON.parse(event.data);
|
||||||
|
console.log('MCP Response:', response);
|
||||||
|
};
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## OpenAPI Specification
|
||||||
|
|
||||||
|
### Get MCP Tools Specification
|
||||||
|
|
||||||
|
* **GET** `/mcp/sse/openapi.json`
|
||||||
|
|
||||||
|
Returns the OpenAPI specification for all available MCP tools, describing the parameters and schemas for each tool.
|
||||||
|
|
||||||
|
**Response**:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"openapi": "3.0.0",
|
||||||
|
"info": {
|
||||||
|
"title": "NetAlertX Tools",
|
||||||
|
"version": "1.1.0"
|
||||||
|
},
|
||||||
|
"servers": [{"url": "/"}],
|
||||||
|
"paths": {
|
||||||
|
"/devices/by-status": {
|
||||||
|
"post": {"operationId": "list_devices"}
|
||||||
|
},
|
||||||
|
"/device/{mac}": {
|
||||||
|
"post": {"operationId": "get_device_info"}
|
||||||
|
},
|
||||||
|
"/devices/search": {
|
||||||
|
"post": {"operationId": "search_devices"}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Available MCP Tools
|
||||||
|
|
||||||
|
### Device Management Tools
|
||||||
|
|
||||||
|
| Tool | Endpoint | Description |
|
||||||
|
|------|----------|-------------|
|
||||||
|
| `list_devices` | `/devices/by-status` | List devices by online status |
|
||||||
|
| `get_device_info` | `/device/{mac}` | Get detailed device information |
|
||||||
|
| `search_devices` | `/devices/search` | Search devices by MAC, name, or IP |
|
||||||
|
| `get_latest_device` | `/devices/latest` | Get most recently connected device |
|
||||||
|
| `set_device_alias` | `/device/{mac}/set-alias` | Set device friendly name |
|
||||||
|
|
||||||
|
### Network Tools
|
||||||
|
|
||||||
|
| Tool | Endpoint | Description |
|
||||||
|
|------|----------|-------------|
|
||||||
|
| `trigger_scan` | `/nettools/trigger-scan` | Trigger network discovery scan to find new devices. |
|
||||||
|
| `run_nmap_scan` | `/nettools/nmap` | Perform NMAP scan on a target to identify open ports. |
|
||||||
|
| `get_open_ports` | `/device/open_ports` | Get stored NMAP open ports. Use `run_nmap_scan` first if empty. |
|
||||||
|
| `wol_wake_device` | `/nettools/wakeonlan` | Wake device using Wake-on-LAN |
|
||||||
|
| `get_network_topology` | `/devices/network/topology` | Get network topology map |
|
||||||
|
|
||||||
|
### Event & Monitoring Tools
|
||||||
|
|
||||||
|
| Tool | Endpoint | Description |
|
||||||
|
|------|----------|-------------|
|
||||||
|
| `get_recent_alerts` | `/events/recent` | Get events from last 24 hours |
|
||||||
|
| `get_last_events` | `/events/last` | Get 10 most recent events |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Tool Usage Examples
|
||||||
|
|
||||||
|
### Search Devices Tool
|
||||||
|
|
||||||
|
**Tool Call**:
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"jsonrpc": "2.0",
|
||||||
|
"id": "1",
|
||||||
|
"method": "tools/call",
|
||||||
|
"params": {
|
||||||
|
"name": "search_devices",
|
||||||
|
"arguments": {
|
||||||
|
"query": "192.168.1"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**Response**:
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"jsonrpc": "2.0",
|
||||||
|
"id": "1",
|
||||||
|
"result": {
|
||||||
|
"content": [
|
||||||
|
{
|
||||||
|
"type": "text",
|
||||||
|
"text": "{\n \"success\": true,\n \"devices\": [\n {\n \"devName\": \"Router\",\n \"devMac\": \"AA:BB:CC:DD:EE:FF\",\n \"devLastIP\": \"192.168.1.1\"\n }\n ]\n}"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"isError": false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Trigger Network Scan Tool
|
||||||
|
|
||||||
|
**Tool Call**:
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"jsonrpc": "2.0",
|
||||||
|
"id": "2",
|
||||||
|
"method": "tools/call",
|
||||||
|
"params": {
|
||||||
|
"name": "trigger_scan",
|
||||||
|
"arguments": {
|
||||||
|
"type": "ARPSCAN"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**Response**:
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"jsonrpc": "2.0",
|
||||||
|
"id": "2",
|
||||||
|
"result": {
|
||||||
|
"content": [
|
||||||
|
{
|
||||||
|
"type": "text",
|
||||||
|
"text": "{\n \"success\": true,\n \"message\": \"Scan triggered for type: ARPSCAN\"\n}"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"isError": false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Wake-on-LAN Tool
|
||||||
|
|
||||||
|
**Tool Call**:
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"jsonrpc": "2.0",
|
||||||
|
"id": "3",
|
||||||
|
"method": "tools/call",
|
||||||
|
"params": {
|
||||||
|
"name": "wol_wake_device",
|
||||||
|
"arguments": {
|
||||||
|
"devMac": "AA:BB:CC:DD:EE:FF"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Integration with AI Assistants
|
||||||
|
|
||||||
|
### Claude Desktop Integration
|
||||||
|
|
||||||
|
Add to your Claude Desktop `mcp.json` configuration:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"mcp": {
|
||||||
|
"servers": {
|
||||||
|
"netalertx": {
|
||||||
|
"command": "node",
|
||||||
|
"args": ["/path/to/mcp-client.js"],
|
||||||
|
"env": {
|
||||||
|
"NETALERTX_URL": "http://your-server:<GRAPHQL_PORT>",
|
||||||
|
"NETALERTX_TOKEN": "your-api-token"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Generic MCP Client
|
||||||
|
|
||||||
|
```python
|
||||||
|
import asyncio
|
||||||
|
import json
|
||||||
|
from mcp import ClientSession, StdioServerParameters
|
||||||
|
from mcp.client.stdio import stdio_client
|
||||||
|
|
||||||
|
async def main():
|
||||||
|
# Connect to NetAlertX MCP server
|
||||||
|
server_params = StdioServerParameters(
|
||||||
|
command="curl",
|
||||||
|
args=[
|
||||||
|
"-N", "-H", "Authorization: Bearer <API_TOKEN>",
|
||||||
|
"http://your-server:<GRAPHQL_PORT>/mcp/sse"
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
|
async with stdio_client(server_params) as (read, write):
|
||||||
|
async with ClientSession(read, write) as session:
|
||||||
|
# Initialize connection
|
||||||
|
await session.initialize()
|
||||||
|
|
||||||
|
# List available tools
|
||||||
|
tools = await session.list_tools()
|
||||||
|
print(f"Available tools: {[t.name for t in tools.tools]}")
|
||||||
|
|
||||||
|
# Call a tool
|
||||||
|
result = await session.call_tool("search_devices", {"query": "router"})
|
||||||
|
print(f"Search result: {result}")
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
asyncio.run(main())
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Error Handling
|
||||||
|
|
||||||
|
MCP tool calls return structured error information:
|
||||||
|
|
||||||
|
**Error Response**:
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"jsonrpc": "2.0",
|
||||||
|
"id": "1",
|
||||||
|
"result": {
|
||||||
|
"content": [
|
||||||
|
{
|
||||||
|
"type": "text",
|
||||||
|
"text": "Error calling tool: Device not found"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"isError": true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**Common Error Types**:
|
||||||
|
- `401/403` - Authentication failure
|
||||||
|
- `400` - Invalid parameters or missing required fields
|
||||||
|
- `404` - Resource not found (device, scan results, etc.)
|
||||||
|
- `500` - Internal server error
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Notes
|
||||||
|
|
||||||
|
* MCP endpoints require the same API token authentication as REST endpoints
|
||||||
|
* All MCP tools return JSON responses wrapped in MCP protocol format
|
||||||
|
* Server-Sent Events maintain persistent connections for real-time updates
|
||||||
|
* Tool parameters match their REST endpoint equivalents
|
||||||
|
* Error responses include both HTTP status codes and descriptive messages
|
||||||
|
* MCP bridge automatically handles request/response serialization
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Related Documentation
|
||||||
|
|
||||||
|
* [Main API Overview](API.md) - Core REST API documentation
|
||||||
|
* [Device API](API_DEVICE.md) - Individual device management
|
||||||
|
* [Devices Collection API](API_DEVICES.md) - Bulk device operations
|
||||||
|
* [Network Tools API](API_NETTOOLS.md) - Wake-on-LAN, scans, network utilities
|
||||||
|
* [Events API](API_EVENTS.md) - Event logging and monitoring
|
||||||
@@ -1,6 +1,6 @@
|
|||||||
# Net Tools API Endpoints
|
# Net Tools API Endpoints
|
||||||
|
|
||||||
The Net Tools API provides **network diagnostic utilities**, including Wake-on-LAN, traceroute, speed testing, DNS resolution, nmap scanning, and internet connection information.
|
The Net Tools API provides **network diagnostic utilities**, including Wake-on-LAN, traceroute, speed testing, DNS resolution, nmap scanning, internet connection information, and network interface info.
|
||||||
|
|
||||||
All endpoints require **authorization** via Bearer token.
|
All endpoints require **authorization** via Bearer token.
|
||||||
|
|
||||||
@@ -190,6 +190,51 @@ All endpoints require **authorization** via Bearer token.
|
|||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
|
### 7. Network Interfaces
|
||||||
|
|
||||||
|
* **GET** `/nettools/interfaces`
|
||||||
|
Fetches the list of network interfaces on the system, including IPv4/IPv6 addresses, MAC, MTU, state (up/down), and RX/TX byte counters.
|
||||||
|
|
||||||
|
**Response** (success):
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"success": true,
|
||||||
|
"interfaces": {
|
||||||
|
"eth0": {
|
||||||
|
"name": "eth0",
|
||||||
|
"short": "eth0",
|
||||||
|
"type": "ethernet",
|
||||||
|
"state": "up",
|
||||||
|
"mtu": 1500,
|
||||||
|
"mac": "00:11:32:EF:A5:6B",
|
||||||
|
"ipv4": ["192.168.1.82/24"],
|
||||||
|
"ipv6": ["fe80::211:32ff:feef:a56c/64"],
|
||||||
|
"rx_bytes": 18488221,
|
||||||
|
"tx_bytes": 1443944
|
||||||
|
},
|
||||||
|
"lo": {
|
||||||
|
"name": "lo",
|
||||||
|
"short": "lo",
|
||||||
|
"type": "loopback",
|
||||||
|
"state": "up",
|
||||||
|
"mtu": 65536,
|
||||||
|
"mac": null,
|
||||||
|
"ipv4": ["127.0.0.1/8"],
|
||||||
|
"ipv6": ["::1/128"],
|
||||||
|
"rx_bytes": 123456,
|
||||||
|
"tx_bytes": 123456
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**Error Responses**:
|
||||||
|
|
||||||
|
* Command failure or parsing error → HTTP 500
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
## Example `curl` Requests
|
## Example `curl` Requests
|
||||||
|
|
||||||
**Wake-on-LAN**:
|
**Wake-on-LAN**:
|
||||||
@@ -241,3 +286,21 @@ curl -X POST "http://<server_ip>:<GRAPHQL_PORT>/nettools/nmap" \
|
|||||||
curl "http://<server_ip>:<GRAPHQL_PORT>/nettools/internetinfo" \
|
curl "http://<server_ip>:<GRAPHQL_PORT>/nettools/internetinfo" \
|
||||||
-H "Authorization: Bearer <API_TOKEN>"
|
-H "Authorization: Bearer <API_TOKEN>"
|
||||||
```
|
```
|
||||||
|
|
||||||
|
**Network Interfaces**:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
curl "http://<server_ip>:<GRAPHQL_PORT>/nettools/interfaces" \
|
||||||
|
-H "Authorization: Bearer <API_TOKEN>"
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## MCP Tools
|
||||||
|
|
||||||
|
Network tools are available as **MCP Tools** for AI assistant integration:
|
||||||
|
|
||||||
|
* `wol_wake_device`, `trigger_scan`, `get_open_ports`
|
||||||
|
|
||||||
|
📖 See [MCP Server Bridge API](API_MCP.md) for AI integration details.
|
||||||
|
|
||||||
|
|||||||
@@ -149,11 +149,11 @@ You can access the following files:
|
|||||||
|
|
||||||
| File name | Description |
|
| File name | Description |
|
||||||
|----------------------|----------------------|
|
|----------------------|----------------------|
|
||||||
| `notification_json_final.json` | The json version of the last notification (e.g. used for webhooks - [sample JSON](https://github.com/jokob-sk/NetAlertX/blob/main/front/report_templates/webhook_json_sample.json)). |
|
| `notification_json_final.json` | The json version of the last notification (e.g. used for webhooks - [sample JSON](https://github.com/netalertx/NetAlertX/blob/main/front/report_templates/webhook_json_sample.json)). |
|
||||||
| `table_devices.json` | All of the available Devices detected by the app. |
|
| `table_devices.json` | All of the available Devices detected by the app. |
|
||||||
| `table_plugins_events.json` | The list of the unprocessed (pending) notification events (plugins_events DB table). |
|
| `table_plugins_events.json` | The list of the unprocessed (pending) notification events (plugins_events DB table). |
|
||||||
| `table_plugins_history.json` | The list of notification events history. |
|
| `table_plugins_history.json` | The list of notification events history. |
|
||||||
| `table_plugins_objects.json` | The content of the plugins_objects table. Find more info on the [Plugin system here](https://github.com/jokob-sk/NetAlertX/tree/main/docs/PLUGINS.md)|
|
| `table_plugins_objects.json` | The content of the plugins_objects table. Find more info on the [Plugin system here](https://docs.netalertx.com/PLUGINS)|
|
||||||
| `language_strings.json` | The content of the language_strings table, which in turn is loaded from the plugins `config.json` definitions. |
|
| `language_strings.json` | The content of the language_strings table, which in turn is loaded from the plugins `config.json` definitions. |
|
||||||
| `table_custom_endpoint.json` | A custom endpoint generated by the SQL query specified by the `API_CUSTOM_SQL` setting. |
|
| `table_custom_endpoint.json` | A custom endpoint generated by the SQL query specified by the `API_CUSTOM_SQL` setting. |
|
||||||
| `table_settings.json` | The content of the settings table. |
|
| `table_settings.json` | The content of the settings table. |
|
||||||
|
|||||||
@@ -106,23 +106,26 @@ curl -X DELETE "http://<server_ip>:<GRAPHQL_PORT>/sessions/delete" \
|
|||||||
"success": true,
|
"success": true,
|
||||||
"sessions": [
|
"sessions": [
|
||||||
{
|
{
|
||||||
"ses_MAC": "AA:BB:CC:DD:EE:FF",
|
"sesMac": "AA:BB:CC:DD:EE:FF",
|
||||||
"ses_Connection": "2025-08-01 10:00",
|
"sesDateTimeConnection": "2025-08-01 10:00",
|
||||||
"ses_Disconnection": "2025-08-01 12:00",
|
"sesDateTimeDisconnection": "2025-08-01 12:00",
|
||||||
"ses_Duration": "2h 0m",
|
"sesDuration": "2h 0m",
|
||||||
"ses_IP": "192.168.1.10",
|
"sesIp": "192.168.1.10",
|
||||||
"ses_Info": ""
|
"sesAdditionalInfo": ""
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
#### `curl` Example
|
#### `curl` Example
|
||||||
|
|
||||||
|
**get sessions for mac**
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
curl -X GET "http://<server_ip>:<GRAPHQL_PORT>/sessions/list?mac=AA:BB:CC:DD:EE:FF&start_date=2025-08-01&end_date=2025-08-21" \
|
curl -X GET "http://<server_ip>:<GRAPHQL_PORT>/sessions/list?mac=AA:BB:CC:DD:EE:FF&start_date=2025-08-01&end_date=2025-08-21" \
|
||||||
-H "Authorization: Bearer <API_TOKEN>" \
|
-H "Authorization: Bearer <API_TOKEN>" \
|
||||||
-H "Accept: application/json"
|
-H "Accept: application/json"
|
||||||
```
|
```
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
### Calendar View of Sessions
|
### Calendar View of Sessions
|
||||||
@@ -191,12 +194,12 @@ curl -X GET "http://<server_ip>:<GRAPHQL_PORT>/sessions/calendar?start=2025-08-0
|
|||||||
"success": true,
|
"success": true,
|
||||||
"sessions": [
|
"sessions": [
|
||||||
{
|
{
|
||||||
"ses_MAC": "AA:BB:CC:DD:EE:FF",
|
"sesMac": "AA:BB:CC:DD:EE:FF",
|
||||||
"ses_Connection": "2025-08-01 10:00",
|
"sesDateTimeConnection": "2025-08-01 10:00",
|
||||||
"ses_Disconnection": "2025-08-01 12:00",
|
"sesDateTimeDisconnection": "2025-08-01 12:00",
|
||||||
"ses_Duration": "2h 0m",
|
"sesDuration": "2h 0m",
|
||||||
"ses_IP": "192.168.1.10",
|
"sesIp": "192.168.1.10",
|
||||||
"ses_Info": ""
|
"sesAdditionalInfo": ""
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
@@ -221,15 +224,33 @@ curl -X GET "http://<server_ip>:<GRAPHQL_PORT>/sessions/AA:BB:CC:DD:EE:FF?period
|
|||||||
* `type` → Event type (`all`, `sessions`, `missing`, `voided`, `new`, `down`)
|
* `type` → Event type (`all`, `sessions`, `missing`, `voided`, `new`, `down`)
|
||||||
Default: `all`
|
Default: `all`
|
||||||
* `period` → Period to retrieve events (`7 days`, `1 month`, etc.)
|
* `period` → Period to retrieve events (`7 days`, `1 month`, etc.)
|
||||||
|
* `page` → Page number, 1-based (default: `1`)
|
||||||
|
* `limit` → Rows per page, max 1000 (default: `100`)
|
||||||
|
* `search` → Free-text search filter across all columns
|
||||||
|
* `sortCol` → Column index to sort by, 0-based (default: `0`)
|
||||||
|
* `sortDir` → Sort direction: `asc` or `desc` (default: `desc`)
|
||||||
|
|
||||||
**Example:**
|
**Example:**
|
||||||
|
|
||||||
```
|
```
|
||||||
/sessions/session-events?type=all&period=7 days
|
/sessions/session-events?type=all&period=7 days&page=1&limit=25&sortCol=3&sortDir=desc
|
||||||
```
|
```
|
||||||
|
|
||||||
**Response:**
|
**Response:**
|
||||||
Returns a list of events or sessions with formatted connection, disconnection, duration, and IP information.
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"data": [...],
|
||||||
|
"total": 150,
|
||||||
|
"recordsFiltered": 150
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
| Field | Type | Description |
|
||||||
|
| ----------------- | ---- | ------------------------------------------------- |
|
||||||
|
| `data` | list | Paginated rows (each row is a list of values). |
|
||||||
|
| `total` | int | Total rows before search filter. |
|
||||||
|
| `recordsFiltered` | int | Total rows after search filter (before paging). |
|
||||||
|
|
||||||
#### `curl` Example
|
#### `curl` Example
|
||||||
|
|
||||||
|
|||||||
78
docs/API_SSE.md
Normal file
78
docs/API_SSE.md
Normal file
@@ -0,0 +1,78 @@
|
|||||||
|
# SSE (Server-Sent Events)
|
||||||
|
|
||||||
|
Real-time app state updates via Server-Sent Events. Reduces server load ~95% vs polling.
|
||||||
|
|
||||||
|
## Endpoints
|
||||||
|
|
||||||
|
| Endpoint | Method | Purpose |
|
||||||
|
|----------|--------|---------|
|
||||||
|
| `/sse/state` | GET | Stream state updates (requires Bearer token) |
|
||||||
|
| `/sse/stats` | GET | Debug: connected clients, queued events |
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
### Connect to SSE Stream
|
||||||
|
```bash
|
||||||
|
curl -H "Authorization: Bearer YOUR_API_TOKEN" \
|
||||||
|
http://localhost:5000/sse/state
|
||||||
|
```
|
||||||
|
|
||||||
|
### Check Connection Stats
|
||||||
|
```bash
|
||||||
|
curl -H "Authorization: Bearer YOUR_API_TOKEN" \
|
||||||
|
http://localhost:5000/sse/stats
|
||||||
|
```
|
||||||
|
|
||||||
|
## Event Types
|
||||||
|
|
||||||
|
- `state_update` - App state changed (e.g., "Scanning", "Processing")
|
||||||
|
- `unread_notifications_count_update` - Number of unread notifications changed (count: int)
|
||||||
|
|
||||||
|
## Backend Integration
|
||||||
|
|
||||||
|
Broadcasts automatically triggered in `app_state.py` via `broadcast_state_update()`:
|
||||||
|
|
||||||
|
```python
|
||||||
|
from api_server.sse_broadcast import broadcast_state_update
|
||||||
|
|
||||||
|
# Called on every state change - no additional code needed
|
||||||
|
broadcast_state_update(current_state="Scanning", settings_imported=time.time())
|
||||||
|
```
|
||||||
|
|
||||||
|
## Frontend Integration
|
||||||
|
|
||||||
|
Auto-enabled via `sse_manager.js`:
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
// In browser console:
|
||||||
|
netAlertXStateManager.getStats().then(stats => {
|
||||||
|
console.log("Connected clients:", stats.connected_clients);
|
||||||
|
});
|
||||||
|
```
|
||||||
|
|
||||||
|
## Fallback Behavior
|
||||||
|
|
||||||
|
- If SSE fails after 3 attempts, automatically switches to polling
|
||||||
|
- Polling starts at 1s, backs off to 30s max
|
||||||
|
- No user-visible difference in functionality
|
||||||
|
|
||||||
|
## Files
|
||||||
|
|
||||||
|
| File | Purpose |
|
||||||
|
|------|---------|
|
||||||
|
| `server/api_server/sse_endpoint.py` | SSE endpoints & event queue |
|
||||||
|
| `server/api_server/sse_broadcast.py` | Broadcast helper functions |
|
||||||
|
| `front/js/sse_manager.js` | Client-side SSE connection manager |
|
||||||
|
|
||||||
|
## Troubleshooting
|
||||||
|
|
||||||
|
| Issue | Solution |
|
||||||
|
|-------|----------|
|
||||||
|
| Connection refused | Check backend running, API token correct |
|
||||||
|
| No events received | Verify `broadcast_state_update()` is called on state changes |
|
||||||
|
| High memory | Events not processed fast enough, check client logs |
|
||||||
|
| Using polling instead of SSE | Normal fallback - check browser console for errors |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
|
||||||
@@ -1,8 +1,8 @@
|
|||||||
## Authelia support
|
## Authelia support
|
||||||
|
|
||||||
> [!WARNING]
|
> [!NOTE]
|
||||||
>
|
> This is community-contributed. Due to environment, setup, or networking differences, results may vary. Please open a PR to improve it instead of creating an issue, as the maintainer is not actively maintaining it.
|
||||||
> This is community contributed content and work in progress. Contributions are welcome.
|
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
theme: dark
|
theme: dark
|
||||||
|
|||||||
@@ -2,7 +2,7 @@
|
|||||||
|
|
||||||
> [!NOTE]
|
> [!NOTE]
|
||||||
> To back up 99% of your configuration, back up at least the `/data/config` folder.
|
> To back up 99% of your configuration, back up at least the `/data/config` folder.
|
||||||
> Database definitions can change between releases, so the safest method is to restore backups using the **same app version** they were taken from, then upgrade incrementally.
|
> Database definitions can change between releases, so the safest method is to restore backups using the **same app version** they were taken from, then upgrade incrementally by following the [Migration documentation](./MIGRATION.md).
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
@@ -13,7 +13,7 @@ There are four key artifacts you can use to back up your NetAlertX configuration
|
|||||||
| File | Description | Limitations |
|
| File | Description | Limitations |
|
||||||
| ------------------------ | ----------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------- |
|
| ------------------------ | ----------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||||
| `/db/app.db` | The application database | Might be in an uncommitted state or corrupted |
|
| `/db/app.db` | The application database | Might be in an uncommitted state or corrupted |
|
||||||
| `/config/app.conf` | Configuration file | Can be overridden using the [`APP_CONF_OVERRIDE`](https://github.com/jokob-sk/NetAlertX/tree/main/dockerfiles#docker-environment-variables) variable |
|
| `/config/app.conf` | Configuration file | Can be overridden using the [`APP_CONF_OVERRIDE`](https://github.com/netalertx/NetAlertX/tree/main/dockerfiles#docker-environment-variables) variable |
|
||||||
| `/config/devices.csv` | CSV file containing device data | Does not include historical data |
|
| `/config/devices.csv` | CSV file containing device data | Does not include historical data |
|
||||||
| `/config/workflows.json` | JSON file containing your workflows | N/A |
|
| `/config/workflows.json` | JSON file containing your workflows | N/A |
|
||||||
|
|
||||||
@@ -37,7 +37,7 @@ This includes settings for:
|
|||||||
|
|
||||||
### Device Data
|
### Device Data
|
||||||
|
|
||||||
Stored in `/data/config/devices_<timestamp>.csv` or `/data/config/devices.csv`, created by the [CSV Backup `CSVBCKP` Plugin](https://github.com/jokob-sk/NetAlertX/tree/main/front/plugins/csv_backup).
|
Stored in `/data/config/devices_<timestamp>.csv` or `/data/config/devices.csv`, created by the [CSV Backup `CSVBCKP` Plugin](https://github.com/netalertx/NetAlertX/tree/main/front/plugins/csv_backup).
|
||||||
Contains:
|
Contains:
|
||||||
|
|
||||||
* Device names, icons, and categories
|
* Device names, icons, and categories
|
||||||
|
|||||||
1
docs/CNAME
Normal file
1
docs/CNAME
Normal file
@@ -0,0 +1 @@
|
|||||||
|
docs.netalertx.com
|
||||||
@@ -120,3 +120,23 @@ With `ARPSCAN` scans some devices might flip IP addresses after each scan trigge
|
|||||||
See how to prevent IP flipping in the [ARPSCAN plugin guide](/front/plugins/arp_scan/README.md).
|
See how to prevent IP flipping in the [ARPSCAN plugin guide](/front/plugins/arp_scan/README.md).
|
||||||
|
|
||||||
Alternatively adjust your [notification settings](./NOTIFICATIONS.md) to prevent false positives by filtering out events or devices.
|
Alternatively adjust your [notification settings](./NOTIFICATIONS.md) to prevent false positives by filtering out events or devices.
|
||||||
|
|
||||||
|
#### Multiple NICs on Same Host Reporting Same IP
|
||||||
|
|
||||||
|
On systems with multiple NICs (like a Proxmox server), each NIC has its own MAC address. Sometimes NetAlertX can incorrectly assign the same IP to all NICs, causing false device mappings. This is due to the way ARP responses are handled by the OS and cannot be overridden directly in NetAlertX.
|
||||||
|
|
||||||
|
**Resolution (Linux-based systems, e.g., Proxmox):**
|
||||||
|
|
||||||
|
Run the following commands on the host to fix ARP behavior:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
sudo sysctl -w net.ipv4.conf.all.arp_ignore=1
|
||||||
|
sudo sysctl -w net.ipv4.conf.all.arp_announce=2
|
||||||
|
```
|
||||||
|
|
||||||
|
This ensures each NIC responds correctly to ARP requests and prevents NetAlertX from misassigning IPs.
|
||||||
|
|
||||||
|
> For setups with multiple interfaces on the same switch, consider [workflows](./WORKFLOWS.md), [device exclusions](./NOTIFICATIONS.md), or [dummy devices](./DEVICE_MANAGEMENT.md) as additional workarounds.
|
||||||
|
> See [Feature Requests](https://github.com/netalertx/netalertx/issues) for reporting edge cases.
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -1,6 +1,10 @@
|
|||||||
# Community Guides
|
# Community Guides
|
||||||
|
|
||||||
Use the official installation guides at first and use community content as supplementary material. Open an issue or PR if you'd like to add your link to the list 🙏 (Ordered by last update time)
|
> [!NOTE]
|
||||||
|
> This is community-contributed. Due to environment, setup, or networking differences, results may vary. Please open a PR to improve it instead of creating an issue, as the maintainer is not actively maintaining it.
|
||||||
|
|
||||||
|
|
||||||
|
Use the official installation guides at first and use community content as supplementary material. (Ordered by last update time)
|
||||||
|
|
||||||
- ▶ [Discover & Monitor Your Network with This Self-Hosted Open Source Tool - Lawrence Systems](https://www.youtube.com/watch?v=R3b5cxLZMpo) (June 2025)
|
- ▶ [Discover & Monitor Your Network with This Self-Hosted Open Source Tool - Lawrence Systems](https://www.youtube.com/watch?v=R3b5cxLZMpo) (June 2025)
|
||||||
- ▶ [Home Lab Network Monitoring - Scotti-BYTE Enterprise Consulting Services](https://www.youtube.com/watch?v=0DryhzrQSJA) (July 2024)
|
- ▶ [Home Lab Network Monitoring - Scotti-BYTE Enterprise Consulting Services](https://www.youtube.com/watch?v=0DryhzrQSJA) (July 2024)
|
||||||
@@ -13,3 +17,5 @@ Use the official installation guides at first and use community content as suppl
|
|||||||
- ▶ [Pi.Alert auf Synology & Docker by - Jürgen Barth](https://www.youtube.com/watch?v=-ouvA2UNu-A) (March 2023)
|
- ▶ [Pi.Alert auf Synology & Docker by - Jürgen Barth](https://www.youtube.com/watch?v=-ouvA2UNu-A) (March 2023)
|
||||||
- ▶ [Top Docker Container for Home Server Security - VirtualizationHowto](https://www.youtube.com/watch?v=tY-w-enLF6Q) (March 2023)
|
- ▶ [Top Docker Container for Home Server Security - VirtualizationHowto](https://www.youtube.com/watch?v=tY-w-enLF6Q) (March 2023)
|
||||||
- ▶ [Pi.Alert or WatchYourLAN can alert you to unknown devices appearing on your WiFi or LAN network - Danie van der Merwe](https://www.youtube.com/watch?v=v6an9QG2xF0) (November 2022)
|
- ▶ [Pi.Alert or WatchYourLAN can alert you to unknown devices appearing on your WiFi or LAN network - Danie van der Merwe](https://www.youtube.com/watch?v=v6an9QG2xF0) (November 2022)
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -13,31 +13,6 @@ This functionality allows you to define **custom properties** for devices, which
|
|||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## Defining Custom Properties
|
|
||||||
|
|
||||||
Custom properties are structured as a list of objects, where each property includes the following fields:
|
|
||||||
|
|
||||||
| Field | Description |
|
|
||||||
|--------------------|-----------------------------------------------------------------------------|
|
|
||||||
| `CUSTPROP_icon` | The icon (Base64-encoded HTML) displayed for the property. |
|
|
||||||
| `CUSTPROP_type` | The action type (e.g., `show_notes`, `link`, `delete_dev`). |
|
|
||||||
| `CUSTPROP_name` | A short name or title for the property. |
|
|
||||||
| `CUSTPROP_args` | Arguments for the action (e.g., URL or modal text). |
|
|
||||||
| `CUSTPROP_notes` | Additional notes or details displayed when applicable. |
|
|
||||||
| `CUSTPROP_show` | A boolean to control visibility (`true` to show on the listing page). |
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Available Action Types
|
|
||||||
|
|
||||||
- **Show Notes**: Displays a modal with a title and additional notes.
|
|
||||||
- **Example**: Show firmware details or custom messages.
|
|
||||||
- **Link**: Redirects to a specified URL in the current browser tab. (**Arguments** Needs to contain the full URL.)
|
|
||||||
- **Link (New Tab)**: Opens a specified URL in a new browser tab. (**Arguments** Needs to contain the full URL.)
|
|
||||||
- **Delete Device**: Deletes the device using its MAC address.
|
|
||||||
- **Run Plugin**: Placeholder for executing custom plugins (not implemented yet).
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Usage on the Device Listing Page
|
## Usage on the Device Listing Page
|
||||||
|
|
||||||
@@ -74,6 +49,33 @@ Visible properties (`CUSTPROP_show: true`) are displayed as interactive icons in
|
|||||||
3. **Device Removal**:
|
3. **Device Removal**:
|
||||||
- Enable device removal functionality using `CUSTPROP_type: delete_dev`.
|
- Enable device removal functionality using `CUSTPROP_type: delete_dev`.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Defining Custom Properties
|
||||||
|
|
||||||
|
Custom properties are structured as a list of objects, where each property includes the following fields:
|
||||||
|
|
||||||
|
| Field | Description |
|
||||||
|
|--------------------|-----------------------------------------------------------------------------|
|
||||||
|
| `CUSTPROP_icon` | The icon (Base64-encoded HTML) displayed for the property. |
|
||||||
|
| `CUSTPROP_type` | The action type (e.g., `show_notes`, `link`, `delete_dev`). |
|
||||||
|
| `CUSTPROP_name` | A short name or title for the property. |
|
||||||
|
| `CUSTPROP_args` | Arguments for the action (e.g., URL or modal text). |
|
||||||
|
| `CUSTPROP_notes` | Additional notes or details displayed when applicable. |
|
||||||
|
| `CUSTPROP_show` | A boolean to control visibility (`true` to show on the listing page). |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Available Action Types
|
||||||
|
|
||||||
|
- **Show Notes**: Displays a modal with a title and additional notes.
|
||||||
|
- **Example**: Show firmware details or custom messages.
|
||||||
|
- **Link**: Redirects to a specified URL in the current browser tab. (**Arguments** Needs to contain the full URL.)
|
||||||
|
- **Link (New Tab)**: Opens a specified URL in a new browser tab. (**Arguments** Needs to contain the full URL.)
|
||||||
|
- **Delete Device**: Deletes the device using its MAC address.
|
||||||
|
- **Run Plugin**: Placeholder for executing custom plugins (not implemented yet).
|
||||||
|
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## Notes
|
## Notes
|
||||||
|
|||||||
@@ -23,6 +23,7 @@
|
|||||||
| `devLogEvents` | Whether events related to the device should be logged. | `0` |
|
| `devLogEvents` | Whether events related to the device should be logged. | `0` |
|
||||||
| `devAlertEvents` | Whether alerts should be generated for events. | `1` |
|
| `devAlertEvents` | Whether alerts should be generated for events. | `1` |
|
||||||
| `devAlertDown` | Whether an alert should be sent when the device goes down. | `0` |
|
| `devAlertDown` | Whether an alert should be sent when the device goes down. | `0` |
|
||||||
|
| `devCanSleep` | Whether the device can enter a sleep window. When `1`, offline periods within the `NTFPRCS_sleep_time` window are shown as **Sleeping** instead of **Down** and no down alert is fired. | `0` |
|
||||||
| `devSkipRepeated` | Whether to skip repeated alerts for this device. | `1` |
|
| `devSkipRepeated` | Whether to skip repeated alerts for this device. | `1` |
|
||||||
| `devLastNotification` | Timestamp of the last notification sent for this device. | `2025-03-22 12:07:26+11:00` |
|
| `devLastNotification` | Timestamp of the last notification sent for this device. | `2025-03-22 12:07:26+11:00` |
|
||||||
| `devPresentLastScan` | Whether the device was present during the last scan. | `1` |
|
| `devPresentLastScan` | Whether the device was present during the last scan. | `1` |
|
||||||
@@ -42,6 +43,12 @@
|
|||||||
| `devParentRelType` | The type of relationship between the current device and it's parent node. By default, selecting `nic` will hide it from lists. | `nic` |
|
| `devParentRelType` | The type of relationship between the current device and it's parent node. By default, selecting `nic` will hide it from lists. | `nic` |
|
||||||
| `devReqNicsOnline` | If all NICs are required to be online to mark teh current device online. | `0` |
|
| `devReqNicsOnline` | If all NICs are required to be online to mark teh current device online. | `0` |
|
||||||
|
|
||||||
|
> [!NOTE]
|
||||||
|
> `DevicesView` extends the `Devices` table with two computed fields that are never persisted:
|
||||||
|
> - `devIsSleeping` (`1` when `devCanSleep=1`, device is offline, and `devLastConnection` is within the `NTFPRCS_sleep_time` window).
|
||||||
|
> - `devFlapping` (`1` when the device has changed state more than the flap threshold times in the trailing window).
|
||||||
|
> - `devStatus` — derived string: `On-line`, `Sleeping`, `Down`, or `Off-line`.
|
||||||
|
|
||||||
|
|
||||||
To understand how values of these fields influuence application behavior, such as Notifications or Network topology, see also:
|
To understand how values of these fields influuence application behavior, such as Notifications or Network topology, see also:
|
||||||
|
|
||||||
|
|||||||
@@ -38,9 +38,22 @@ All application settings can also be initialized via the `APP_CONF_OVERRIDE` doc
|
|||||||
|
|
||||||
There are several ways to check if the GraphQL server is running.
|
There are several ways to check if the GraphQL server is running.
|
||||||
|
|
||||||
|
## Flask debug mode (environment)
|
||||||
|
|
||||||
|
You can control whether the Flask development debugger is enabled by setting the environment variable `FLASK_DEBUG` (default: `False`). Enabling debug mode will turn on the interactive debugger which may expose a remote code execution (RCE) vector if the server is reachable; **only enable this for local development** and never in production. Valid truthy values are: `1`, `true`, `yes`, `on` (case-insensitive).
|
||||||
|
|
||||||
|
In the running container you can set this variable via Docker Compose or your environment, for example:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
environment:
|
||||||
|
- FLASK_DEBUG=1
|
||||||
|
```
|
||||||
|
|
||||||
|
When enabled, the GraphQL server startup logs will indicate the debug setting.
|
||||||
|
|
||||||
### Init Check
|
### Init Check
|
||||||
|
|
||||||
You can navigate to Maintenance -> Init Check to see if `isGraphQLServerRunning` is ticked:
|
You can navigate to System Info -> Init Check to see if `isGraphQLServerRunning` is ticked:
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
|
|||||||
@@ -9,7 +9,6 @@ Check the the HTTP response of the failing backend call by following these steps
|
|||||||
|
|
||||||
- Copy the URL causing the error and enter it in the address bar of your browser directly and hit enter. The copied URLs could look something like this (notice the query strings at the end):
|
- Copy the URL causing the error and enter it in the address bar of your browser directly and hit enter. The copied URLs could look something like this (notice the query strings at the end):
|
||||||
- `http://<server>:20211/api/table_devices.json?nocache=1704141103121`
|
- `http://<server>:20211/api/table_devices.json?nocache=1704141103121`
|
||||||
- `http://<server>:20211/php/server/devices.php?action=getDevicesTotals`
|
|
||||||
|
|
||||||
|
|
||||||
- Post the error response in the existing issue thread on GitHub or create a new issue and include the redacted response of the failing query.
|
- Post the error response in the existing issue thread on GitHub or create a new issue and include the redacted response of the failing query.
|
||||||
|
|||||||
@@ -1,13 +1,13 @@
|
|||||||
# Troubleshooting plugins
|
# Troubleshooting plugins
|
||||||
|
|
||||||
> [!TIP]
|
> [!TIP]
|
||||||
> Before troubleshooting, please ensure you have the right [Debugging and LOG_LEVEL set](./DEBUG_TIPS.md).
|
> Before troubleshooting, please ensure you have the right [Debugging and LOG_LEVEL set](./DEBUG_TIPS.md) in Settings.
|
||||||
|
|
||||||
## High-level overview
|
## High-level overview
|
||||||
|
|
||||||
If a Plugin supplies data to the main app it's done either vie a SQL query or via a script that updates the `last_result.log` file in the plugin log folder (`app/log/plugins/`).
|
If a Plugin supplies data to the main app it's done either vie a SQL query or via a script that updates the `last_result.log` file in the plugin log folder (`app/log/plugins/`).
|
||||||
|
|
||||||
For a more in-depth overview on how plugins work check the [Plugins development docs](https://github.com/jokob-sk/NetAlertX/blob/main/docs/PLUGINS.md).
|
For a more in-depth overview on how plugins work check the [Plugins development docs](./PLUGINS_DEV.md).
|
||||||
|
|
||||||
### Prerequisites
|
### Prerequisites
|
||||||
|
|
||||||
@@ -22,13 +22,28 @@ For a more in-depth overview on how plugins work check the [Plugins development
|
|||||||
|
|
||||||
#### Incorrect input data
|
#### Incorrect input data
|
||||||
|
|
||||||
Input data from the plugin might cause mapping issues in specific edge cases. Look for a corresponding section in the `app.log` file, for example notice the first line of the execution run of the `PIHOLE` plugin below:
|
Input data from the plugin might cause mapping issues in specific edge cases. Look for a corresponding section in the `app.log` file, and search for `[Scheduler] run for PLUGINNAME: YES`, so for ICMP you would look for `[Scheduler] run for ICMP: YES`. You can find examples of useful logs below. If your issue is related to a plugin, and you don't include a log section with this data, we can't help you to resolve your issue.
|
||||||
|
|
||||||
|
##### ICMP log example
|
||||||
|
|
||||||
```
|
```
|
||||||
17:31:05 [Scheduler] - Scheduler run for PIHOLE: YES
|
20:39:04 [Scheduler] run for ICMP: YES
|
||||||
|
20:39:04 [ICMP] fping skipping 192.168.1.124 : [2], timed out (NaN avg, 100% loss)
|
||||||
|
20:39:04 [ICMP] adding 192.168.1.123 from 192.168.1.123 : [2], 64 bytes, 20.1 ms (8.22 avg, 0% loss)
|
||||||
|
20:39:04 [ICMP] fping skipping 192.168.1.157 : [1], timed out (NaN avg, 100% loss)
|
||||||
|
20:39:04 [ICMP] adding 192.168.1.79 from 192.168.1.79 : [2], 64 bytes, 48.3 ms (60.9 avg, 0% loss)
|
||||||
|
20:39:04 [ICMP] fping skipping 192.168.1.128 : [2], timed out (NaN avg, 100% loss)
|
||||||
|
20:39:04 [ICMP] fping skipping 192.168.1.129 : [2], timed out (NaN avg, 100% loss)
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
##### PIHOLE log example
|
||||||
|
|
||||||
|
```
|
||||||
|
17:31:05 [Scheduler] run for PIHOLE: YES
|
||||||
17:31:05 [Plugin utils] ---------------------------------------------
|
17:31:05 [Plugin utils] ---------------------------------------------
|
||||||
17:31:05 [Plugin utils] display_name: PiHole (Device sync)
|
17:31:05 [Plugin utils] display_name: PiHole (Device sync)
|
||||||
17:31:05 [Plugins] CMD: SELECT n.hwaddr AS Object_PrimaryID, {s-quote}null{s-quote} AS Object_SecondaryID, datetime() AS DateTime, na.ip AS Watched_Value1, n.lastQuery AS Watched_Value2, na.name AS Watched_Value3, n.macVendor AS Watched_Value4, {s-quote}null{s-quote} AS Extra, n.hwaddr AS ForeignKey FROM EXTERNAL_PIHOLE.Network AS n LEFT JOIN EXTERNAL_PIHOLE.Network_Addresses AS na ON na.network_id = n.id WHERE n.hwaddr NOT LIKE {s-quote}ip-%{s-quote} AND n.hwaddr is not {s-quote}00:00:00:00:00:00{s-quote} AND na.ip is not null
|
17:31:05 [Plugins] CMD: SELECT n.hwaddr AS objectPrimaryId, {s-quote}null{s-quote} AS objectSecondaryId, datetime() AS DateTime, na.ip AS watchedValue1, n.lastQuery AS watchedValue2, na.name AS watchedValue3, n.macVendor AS watchedValue4, {s-quote}null{s-quote} AS Extra, n.hwaddr AS ForeignKey FROM EXTERNAL_PIHOLE.Network AS n LEFT JOIN EXTERNAL_PIHOLE.Network_Addresses AS na ON na.network_id = n.id WHERE n.hwaddr NOT LIKE {s-quote}ip-%{s-quote} AND n.hwaddr is not {s-quote}00:00:00:00:00:00{s-quote} AND na.ip is not null
|
||||||
17:31:05 [Plugins] setTyp: subnets
|
17:31:05 [Plugins] setTyp: subnets
|
||||||
17:31:05 [Plugin utils] Flattening the below array
|
17:31:05 [Plugin utils] Flattening the below array
|
||||||
17:31:05 ['192.168.1.0/24 --interface=eth1']
|
17:31:05 ['192.168.1.0/24 --interface=eth1']
|
||||||
@@ -37,7 +52,7 @@ Input data from the plugin might cause mapping issues in specific edge cases. Lo
|
|||||||
17:31:05 [Plugins] Convert to Base64: True
|
17:31:05 [Plugins] Convert to Base64: True
|
||||||
17:31:05 [Plugins] base64 value: b'MTkyLjE2OC4xLjAvMjQgLS1pbnRlcmZhY2U9ZXRoMQ=='
|
17:31:05 [Plugins] base64 value: b'MTkyLjE2OC4xLjAvMjQgLS1pbnRlcmZhY2U9ZXRoMQ=='
|
||||||
17:31:05 [Plugins] Timeout: 10
|
17:31:05 [Plugins] Timeout: 10
|
||||||
17:31:05 [Plugins] Executing: SELECT n.hwaddr AS Object_PrimaryID, 'null' AS Object_SecondaryID, datetime() AS DateTime, na.ip AS Watched_Value1, n.lastQuery AS Watched_Value2, na.name AS Watched_Value3, n.macVendor AS Watched_Value4, 'null' AS Extra, n.hwaddr AS ForeignKey FROM EXTERNAL_PIHOLE.Network AS n LEFT JOIN EXTERNAL_PIHOLE.Network_Addresses AS na ON na.network_id = n.id WHERE n.hwaddr NOT LIKE 'ip-%' AND n.hwaddr is not '00:00:00:00:00:00' AND na.ip is not null
|
17:31:05 [Plugins] Executing: SELECT n.hwaddr AS objectPrimaryId, 'null' AS objectSecondaryId, datetime() AS DateTime, na.ip AS watchedValue1, n.lastQuery AS watchedValue2, na.name AS watchedValue3, n.macVendor AS watchedValue4, 'null' AS Extra, n.hwaddr AS ForeignKey FROM EXTERNAL_PIHOLE.Network AS n LEFT JOIN EXTERNAL_PIHOLE.Network_Addresses AS na ON na.network_id = n.id WHERE n.hwaddr NOT LIKE 'ip-%' AND n.hwaddr is not '00:00:00:00:00:00' AND na.ip is not null
|
||||||
🔻
|
🔻
|
||||||
17:31:05 [Plugins] SUCCESS, received 2 entries
|
17:31:05 [Plugins] SUCCESS, received 2 entries
|
||||||
17:31:05 [Plugins] sqlParam entries: [(0, 'PIHOLE', '01:01:01:01:01:01', 'null', 'null', '2023-12-25 06:31:05', '172.30.0.1', 0, 'aaaa', 'vvvvvvvvv', 'not-processed', 'null', 'null', '01:01:01:01:01:01'), (0, 'PIHOLE', '02:42:ac:1e:00:02', 'null', 'null', '2023-12-25 06:31:05', '172.30.0.2', 0, 'dddd', 'vvvvv2222', 'not-processed', 'null', 'null', '02:42:ac:1e:00:02')]
|
17:31:05 [Plugins] sqlParam entries: [(0, 'PIHOLE', '01:01:01:01:01:01', 'null', 'null', '2023-12-25 06:31:05', '172.30.0.1', 0, 'aaaa', 'vvvvvvvvv', 'not-processed', 'null', 'null', '01:01:01:01:01:01'), (0, 'PIHOLE', '02:42:ac:1e:00:02', 'null', 'null', '2023-12-25 06:31:05', '172.30.0.2', 0, 'dddd', 'vvvvv2222', 'not-processed', 'null', 'null', '02:42:ac:1e:00:02')]
|
||||||
@@ -54,13 +69,13 @@ Input data from the plugin might cause mapping issues in specific edge cases. Lo
|
|||||||
17:31:05 [Plugin utils] In pluginObjects there are 2 events with the status "missing-in-last-scan"
|
17:31:05 [Plugin utils] In pluginObjects there are 2 events with the status "missing-in-last-scan"
|
||||||
17:31:05 [Plugin utils] In pluginObjects there are 2 events with the status "watched-not-changed"
|
17:31:05 [Plugin utils] In pluginObjects there are 2 events with the status "watched-not-changed"
|
||||||
17:31:05 [Plugins] Mapping objects to database table: CurrentScan
|
17:31:05 [Plugins] Mapping objects to database table: CurrentScan
|
||||||
17:31:05 [Plugins] SQL query for mapping: INSERT into CurrentScan ( "cur_MAC", "cur_IP", "cur_LastQuery", "cur_Name", "cur_Vendor", "cur_ScanMethod") VALUES ( ?, ?, ?, ?, ?, ?)
|
17:31:05 [Plugins] SQL query for mapping: INSERT into CurrentScan ( "scanMac", "scanLastIP", "scanLastQuery", "scanName", "scanVendor", "scanSourcePlugin") VALUES ( ?, ?, ?, ?, ?, ?)
|
||||||
17:31:05 [Plugins] SQL sqlParams for mapping: [('01:01:01:01:01:01', '172.30.0.1', 0, 'aaaa', 'vvvvvvvvv', 'PIHOLE'), ('02:42:ac:1e:00:02', '172.30.0.2', 0, 'dddd', 'vvvvv2222', 'PIHOLE')]
|
17:31:05 [Plugins] SQL sqlParams for mapping: [('01:01:01:01:01:01', '172.30.0.1', 0, 'aaaa', 'vvvvvvvvv', 'PIHOLE'), ('02:42:ac:1e:00:02', '172.30.0.2', 0, 'dddd', 'vvvvv2222', 'PIHOLE')]
|
||||||
🔺
|
🔺
|
||||||
17:31:05 [API] Update API starting
|
17:31:05 [API] Update API starting
|
||||||
17:31:06 [API] Updating table_plugins_history.json file in /api
|
17:31:06 [API] Updating table_plugins_history.json file in /api
|
||||||
```
|
```
|
||||||
|
> [!NOTE]
|
||||||
> The debug output between the 🔻red arrows🔺 is important for debugging (arrows added only to highlight the section on this page, they are not available in the actual debug log)
|
> The debug output between the 🔻red arrows🔺 is important for debugging (arrows added only to highlight the section on this page, they are not available in the actual debug log)
|
||||||
|
|
||||||
In the above output notice the section logging how many events are produced by the plugin:
|
In the above output notice the section logging how many events are produced by the plugin:
|
||||||
@@ -80,12 +95,11 @@ These values, if formatted correctly, will also show up in the UI:
|
|||||||
|
|
||||||

|

|
||||||
|
|
||||||
|
|
||||||
### Sharing application state
|
### Sharing application state
|
||||||
|
|
||||||
Sometimes specific log sections are needed to debug issues. The Devices and CurrentScan table data is sometimes needed to figure out what's wrong.
|
Sometimes specific log sections are needed to debug issues. The Devices and CurrentScan table data is sometimes needed to figure out what's wrong.
|
||||||
|
|
||||||
1. Please set `LOG_LEVEL` to `trace` (Disable it once you have the info as this produces big log files).
|
1. Please set `LOG_LEVEL` to `trace` in the Settings (Disable it once you have the info as this produces big log files).
|
||||||
2. Wait for the issue to occur.
|
2. Wait for the issue to occur.
|
||||||
3. Search for `================ DEVICES table content ================` in your logs.
|
3. Search for `================ DEVICES table content ================` in your logs.
|
||||||
4. Search for `================ CurrentScan table content ================` in your logs.
|
4. Search for `================ CurrentScan table content ================` in your logs.
|
||||||
|
|||||||
@@ -4,7 +4,7 @@ Please follow tips 1 - 4 to get a more detailed error.
|
|||||||
|
|
||||||
## 1. More Logging
|
## 1. More Logging
|
||||||
|
|
||||||
When debugging an issue always set the highest log level:
|
When debugging an issue always set the highest log level in **Settings -> Core**:
|
||||||
|
|
||||||
`LOG_LEVEL='trace'`
|
`LOG_LEVEL='trace'`
|
||||||
|
|
||||||
@@ -21,7 +21,7 @@ docker run \
|
|||||||
--tmpfs /tmp:uid=20211,gid=20211,mode=1700 \
|
--tmpfs /tmp:uid=20211,gid=20211,mode=1700 \
|
||||||
-e PORT=20211 \
|
-e PORT=20211 \
|
||||||
-e APP_CONF_OVERRIDE='{"GRAPHQL_PORT":"20214"}' \
|
-e APP_CONF_OVERRIDE='{"GRAPHQL_PORT":"20214"}' \
|
||||||
ghcr.io/jokob-sk/netalertx:latest
|
ghcr.io/netalertx/netalertx:latest
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -34,11 +34,11 @@ Note: Your `/local_data_dir` should contain a `config` and `db` folder.
|
|||||||
|
|
||||||
If possible, check if your issue got fixed in the `_dev` image before opening a new issue. The container is:
|
If possible, check if your issue got fixed in the `_dev` image before opening a new issue. The container is:
|
||||||
|
|
||||||
`ghcr.io/jokob-sk/netalertx-dev:latest`
|
`ghcr.io/netalertx/netalertx-dev:latest`
|
||||||
|
|
||||||
> ⚠ Please backup your DB and config beforehand!
|
> ⚠ Please backup your DB and config beforehand!
|
||||||
|
|
||||||
Please also search [open issues](https://github.com/jokob-sk/NetAlertX/issues).
|
Please also search [open issues](https://github.com/netalertx/NetAlertX/issues).
|
||||||
|
|
||||||
## 4. Disable restart behavior
|
## 4. Disable restart behavior
|
||||||
|
|
||||||
|
|||||||
@@ -26,7 +26,7 @@ The database and device structure may change with new releases. When using the C
|
|||||||

|

|
||||||
|
|
||||||
> [!NOTE]
|
> [!NOTE]
|
||||||
> The file containing a list of Devices including the Network relationships between Network Nodes and connected devices. You can also trigger this by acessing this URL: `<server>:20211/php/server/devices.php?action=ExportCSV` or via the `CSV Backup` plugin. (💡 You can schedule this)
|
> The file containing a list of Devices including the Network relationships between Network Nodes and connected devices. You can also trigger this with the `CSV Backup` plugin. (💡 You can schedule this)
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
|
|||||||
@@ -8,13 +8,17 @@ This set of settings allows you to group Devices under different views. The Arch
|
|||||||
|
|
||||||
## Status Colors
|
## Status Colors
|
||||||
|
|
||||||

|
| Icon | Status | Image | Description |
|
||||||
|
|-----------|------------------------|-----------------------------------------------------------------------|-----------------------------------------------------------------------------------------------|
|
||||||
1. 🔌 Online (Green) = A device that is no longer marked as a "New Device".
|
| <i class="fa-solid fa-plug"></i> | Online (Green) |  | A device that is no longer marked as a "New Device". |
|
||||||
2. 🔌 New (Green) = A newly discovered device that is online and is still marked as a "New Device".
|
| <i class="fa-solid fa-plug"></i> | New (Green) |  | A newly discovered device that is online and is still marked as a "New Device". |
|
||||||
3. ✖ New (Grey) = Same as No.2 but device is now offline.
|
| <i class="fa-solid fa-plug-circle-exclamation"></i> | Online (Orange) |  | The device is online, but unstable and flapping (3 status changes in the last hour). |
|
||||||
4. ✖ Offline (Grey) = A device that was not detected online in the last scan.
|
| <i class="fa-solid fa-xmark"></i> | New (Grey) |  | Same as "New (Green)" but the device is now offline. |
|
||||||
5. ⚠ Down (Red) = A device that has "Alert Down" marked and has been offline for the time set in the Setting `NTFPRCS_alert_down_time`.
|
| <i class="fa-solid fa-box-archive"></i> | New (Grey) |  | Same as "New (Green)" but the device is now offline and archived. |
|
||||||
|
| <i class="fa-solid fa-xmark"></i> | Offline (Grey) |  | A device that was not detected online in the last scan. |
|
||||||
|
| <i class="fa-solid fa-box-archive"></i> | Archived (Grey) |  | A device that was not detected online in the last scan. |
|
||||||
|
| <i class="fa-solid fa-moon"></i> | Sleeping (Aqua) |  | A device with **Can Sleep** enabled that has gone offline within the `NTFPRCS_sleep_time` window. No down alert is fired while the device is in this state. See [Notifications](./NOTIFICATIONS.md#device-settings). |
|
||||||
|
| <i class="fa-solid fa-triangle-exclamation"></i> | Down (Red) |  | A device marked as "Alert Down" and offline for the duration set in `NTFPRCS_alert_down_time`.|
|
||||||
|
|
||||||
|
|
||||||
See also [Notification guide](./NOTIFICATIONS.md).
|
See also [Notification guide](./NOTIFICATIONS.md).
|
||||||
164
docs/DEVICE_FIELD_LOCK.md
Normal file
164
docs/DEVICE_FIELD_LOCK.md
Normal file
@@ -0,0 +1,164 @@
|
|||||||
|
# Quick Reference Guide - Device Field Lock/Unlock System
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
The device field lock/unlock system allows you to protect specific device fields from being automatically overwritten by scanning plugins. When you lock a field, NetAlertX remembers your choice and prevents plugins from changing that value until you unlock it.
|
||||||
|
|
||||||
|
**Use case:** You've manually corrected a device name or port number and want to keep it that way, even when plugins discover different values.
|
||||||
|
|
||||||
|
## Tracked Fields
|
||||||
|
|
||||||
|
These are the ONLY fields that can be locked:
|
||||||
|
|
||||||
|
- `devName` - Device hostname/alias
|
||||||
|
- `devVendor` - Device manufacturer
|
||||||
|
- `devSSID` - WiFi network name
|
||||||
|
- `devParentMAC` - Parent/gateway MAC
|
||||||
|
- `devParentPort` - Parent device port
|
||||||
|
- `devParentRelType` - Relationship type (e.g., "gateway")
|
||||||
|
- `devVlan` - VLAN identifier
|
||||||
|
|
||||||
|
Additional fields that are tracked (and their source is dispalyed in the UI if available):
|
||||||
|
|
||||||
|
- `devMac`
|
||||||
|
- `devLastIP`
|
||||||
|
- `devFQDN`
|
||||||
|
|
||||||
|
## Source Values Explained
|
||||||
|
|
||||||
|
Each locked field has a "source" indicator that shows you why the value is protected:
|
||||||
|
|
||||||
|
| Indicator | Meaning | Can It Change? |
|
||||||
|
|-----------|---------|---|
|
||||||
|
| 🔒 **LOCKED** | You locked this field | No, until you unlock it |
|
||||||
|
| ✏️ **USER** | You edited this field | No, plugins can't overwrite |
|
||||||
|
| 📡 **NEWDEV** | Default/unset value | Yes, plugins can update |
|
||||||
|
| 📡 **Plugin name** | Last updated by a plugin (e.g., UNIFIAPI) | Yes, plugins can update if field in SET_ALWAYS |
|
||||||
|
|
||||||
|
Overwrite rules are
|
||||||
|
|
||||||
|
> [!TIP]
|
||||||
|
> You can bulk-unlock devices in the [Multi-edit](./DEVICES_BULK_EDITING.md) dialog. This removes all `USER` and `LOCKED` values from all `*Source` fields of selected devices.
|
||||||
|
|
||||||
|
## Usage Examples
|
||||||
|
|
||||||
|
### Lock a Field (Prevent Plugin Changes)
|
||||||
|
|
||||||
|
1. Navigate to **Device Details** for the device
|
||||||
|
2. Find the field you want to protect (e.g., device name)
|
||||||
|
3. Click the **lock button** (🔒) next to the field
|
||||||
|
4. The button changes to **unlock** (🔓)
|
||||||
|
5. That field is now protected
|
||||||
|
|
||||||
|
### Unlock a Field (Allow Plugin Updates)
|
||||||
|
|
||||||
|
1. Go to **Device Details**
|
||||||
|
2. Find the locked field (shows 🔓)
|
||||||
|
3. Click the **unlock button** (🔓)
|
||||||
|
4. The button changes back to **lock** (🔒)
|
||||||
|
5. Plugins can now update that field again
|
||||||
|
|
||||||
|
## Common Scenarios
|
||||||
|
|
||||||
|
### Scenario 1: You've Named Your Device and Want to Keep the Name
|
||||||
|
|
||||||
|
1. You manually edit device name to "Living Room Smart TV"
|
||||||
|
2. A scanning plugin later discovers it as "Unknown Device" or "DEVICE-ABC123"
|
||||||
|
3. **Solution:** Lock the device name field
|
||||||
|
4. Your custom name is preserved even after future scans
|
||||||
|
|
||||||
|
### Scenario 2: You Lock a Field, But It Still Changes
|
||||||
|
|
||||||
|
**This means the field source is USER or LOCKED (protected).** Check:
|
||||||
|
- Is it showing the lock icon? (If yes, it's protected)
|
||||||
|
- Wait a moment—sometimes changes take a few seconds to display
|
||||||
|
- Try refreshing the page
|
||||||
|
|
||||||
|
### Scenario 3: You Want to Let Plugins Update Again
|
||||||
|
|
||||||
|
1. Find the device with locked fields
|
||||||
|
2. Click the unlock button (🔓) next to each field
|
||||||
|
3. Refresh the page
|
||||||
|
4. Next time a plugin runs, it can update that field
|
||||||
|
|
||||||
|
## What Happens When You Lock a Field
|
||||||
|
|
||||||
|
- ✅ Your custom value is kept
|
||||||
|
- ✅ Future plugin scans won't overwrite it
|
||||||
|
- ✅ You can still manually edit it anytime after unlocking
|
||||||
|
- ✅ Lock persists across plugin runs
|
||||||
|
- ✅ Other users can see it's locked
|
||||||
|
|
||||||
|
## What Happens When You Unlock a Field
|
||||||
|
|
||||||
|
- ✅ Plugins can update it again on next scan
|
||||||
|
- ✅ If a plugin has a new value, it will be applied
|
||||||
|
- ✅ You can lock it again anytime
|
||||||
|
- ✅ Your manual edits are still saved in the database
|
||||||
|
|
||||||
|
## Error Messages & Solutions
|
||||||
|
|
||||||
|
| Message | What It Means | What to Do |
|
||||||
|
|---------|--------------|-----------|
|
||||||
|
| "Field cannot be locked" | You tried to lock a field that doesn't support locking | Only lock the fields listed above |
|
||||||
|
| "Device not found" | The device MAC address doesn't exist | Verify the device hasn't been deleted |
|
||||||
|
| Lock button doesn't work | Network or permission issue | Refresh the page and try again |
|
||||||
|
| Unexpected field changed | Field might have been unlocked | Check if field shows unlock icon (🔓) |
|
||||||
|
|
||||||
|
## Quick Tips
|
||||||
|
|
||||||
|
- **Lock names you manually corrected** to keep them stable
|
||||||
|
- **Leave discovery fields (vendor, FQDN) unlocked** for automatic updates
|
||||||
|
- **Use locks sparingly**—they prevent automatic data enrichment
|
||||||
|
- **Check the source indicator** (colored badge) to understand field origin
|
||||||
|
- **Lock buttons only appear for devices that are saved** (not for new devices being created)
|
||||||
|
|
||||||
|
## When to Lock vs. When NOT to Lock
|
||||||
|
|
||||||
|
### ✅ **Good reasons to lock:**
|
||||||
|
|
||||||
|
- You've customized the device name and it's correct
|
||||||
|
- You've set a static IP and it shouldn't change
|
||||||
|
- You've configured VLAN information
|
||||||
|
- You know the parent device and don't want it auto-corrected
|
||||||
|
|
||||||
|
### ❌ **Bad reasons to lock:**
|
||||||
|
|
||||||
|
- The value seems wrong—edit it first, then lock
|
||||||
|
- You want to prevent data from another source—use field lock, not to hide problems
|
||||||
|
- You're trying to force a value the system disagrees with
|
||||||
|
|
||||||
|
## Troubleshooting
|
||||||
|
|
||||||
|
**Lock button not appearing:**
|
||||||
|
|
||||||
|
- Confirm the field is one of the tracked fields (see list above)
|
||||||
|
- Confirm the device is already saved (new devices don't show lock buttons)
|
||||||
|
- Refresh the page
|
||||||
|
|
||||||
|
**Lock button is there but click doesn't work:**
|
||||||
|
|
||||||
|
- Check your internet connection
|
||||||
|
- Check you have permission to edit devices
|
||||||
|
- Look at browser console (F12 > Console tab) for error messages
|
||||||
|
- Try again in a few seconds
|
||||||
|
|
||||||
|
**Field still changes after locking:**
|
||||||
|
|
||||||
|
- Double-check the lock icon shows
|
||||||
|
- Reload the page—the change might be a display issue
|
||||||
|
- Check if you accidentally unlocked it
|
||||||
|
- Open an issue if it persists
|
||||||
|
|
||||||
|
## See also
|
||||||
|
|
||||||
|
- [Device locking](./DEVICE_FIELD_LOCK.md)
|
||||||
|
- [Device source fields](./DEVICE_SOURCE_FIELDS.md)
|
||||||
|
- [API Device Endpoints Documentation](./API_DEVICE.md)
|
||||||
|
- [Authoritative Field Updates System](./PLUGINS_DEV.md#authoritative-fields)
|
||||||
|
- [Plugin Configuration Reference](./PLUGINS_DEV_CONFIG.md)
|
||||||
|
- [Device locking APIs](API_DEVICE_FIELD_LOCK.md)
|
||||||
|
- [Device management](DEVICE_MANAGEMENT.md)
|
||||||
|
|
||||||
@@ -13,7 +13,7 @@ The Main Info section is where most of the device identifiable information is st
|
|||||||
|
|
||||||
- **MAC**: MAC addres of the device. Not editable, unless creating a new dummy device.
|
- **MAC**: MAC addres of the device. Not editable, unless creating a new dummy device.
|
||||||
- **Last IP**: IP addres of the device. Not editable, unless creating a new dummy device.
|
- **Last IP**: IP addres of the device. Not editable, unless creating a new dummy device.
|
||||||
- **Name**: Friendly device name. Autodetected via various 🆎 Name discovery [plugins](https://github.com/jokob-sk/NetAlertX/blob/main/docs/PLUGINS.md). The app attaches `(IP match)` if the name is discovered via an IP match and not MAC match which could mean the name could be incorrect as IPs might change.
|
- **Name**: Friendly device name. Autodetected via various 🆎 Name discovery [plugins](https://docs.netalertx.com/PLUGINS). The app attaches `(IP match)` if the name is discovered via an IP match and not MAC match which could mean the name could be incorrect as IPs might change.
|
||||||
- **Icon**: Partially autodetected. Select an existing or [add a custom icon](./ICONS.md). You can also auto-apply the same icon on all devices of the same type.
|
- **Icon**: Partially autodetected. Select an existing or [add a custom icon](./ICONS.md). You can also auto-apply the same icon on all devices of the same type.
|
||||||
- **Owner**: Device owner (The list is self-populated with existing owners and you can add custom values).
|
- **Owner**: Device owner (The list is self-populated with existing owners and you can add custom values).
|
||||||
- **Type**: Select a device type from the dropdown list (`Smartphone`, `Tablet`,
|
- **Type**: Select a device type from the dropdown list (`Smartphone`, `Tablet`,
|
||||||
@@ -39,12 +39,45 @@ The **MAC** field and the **Last IP** field will then become editable.
|
|||||||

|

|
||||||
|
|
||||||
|
|
||||||
> [!NOTE]
|
## Dummy or Manually Created Device Status
|
||||||
>
|
|
||||||
> You can couple this with the `ICMP` plugin which can be used to monitor the status of these devices, if they are actual devices reachable with the `ping` command. If not, you can use a loopback IP address so they appear online, such as `0.0.0.0` or `127.0.0.1`.
|
You can control a dummy device’s status either via `ICMP` (automatic) or the `Force Status` field (manual). Choose based on whether the device is real and how important **data hygiene** is.
|
||||||
|
|
||||||
|
### `ICMP` (Real Devices)
|
||||||
|
|
||||||
|
Use a real IP that responds to ping so status is updated automatically.
|
||||||
|
|
||||||
|
### `Force Status` (Best for Data Hygiene)
|
||||||
|
|
||||||
|
Manually set the status when the device is not reachable or is purely logical.
|
||||||
|
This keeps your data clean and avoids fake IPs.
|
||||||
|
|
||||||
|
### Loopback IP (`127.0.0.1`, `0.0.0.0`)
|
||||||
|
|
||||||
|
Use when you want the device to always appear online via `ICMP`.
|
||||||
|
Note this simulates reachability and introduces artificial data. This approach might be preferred, if you want to filter and distinguish dummy devices based on IP when filtering your asset lists.
|
||||||
|
|
||||||
|
|
||||||
## Copying data from an existing device.
|
## Copying data from an existing device.
|
||||||
|
|
||||||
To speed up device population you can also copy data from an existing device. This can be done from the **Tools** tab on the Device details.
|
To speed up device population you can also copy data from an existing device. This can be done from the **Tools** tab on the Device details.
|
||||||
|
|
||||||
|
## Field Locking (Preventing Plugin Overwrites)
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
NetAlertX allows you to "lock" specific device fields to prevent plugins from automatically overwriting your custom values. This is useful when you've manually corrected information that might be discovered differently by discovery plugins.
|
||||||
|
|
||||||
|
### Quick Start
|
||||||
|
|
||||||
|
1. Open a device for editing
|
||||||
|
2. Click the **lock button** (🔒) next to any tracked field
|
||||||
|
3. The field is now protected—plugins cannot change it until you unlock it
|
||||||
|
|
||||||
|
### See Also
|
||||||
|
|
||||||
|
- **For Users:** [Quick Reference - Device Field Lock/Unlock](DEVICE_FIELD_LOCK.md) - How to use field locking
|
||||||
|
- **For Developers:** [API Device Field Lock Documentation](API_DEVICE_FIELD_LOCK.md) - Technical API reference
|
||||||
|
- **For Plugin Developers:** [Plugin Field Configuration (SET_ALWAYS/SET_EMPTY)](PLUGINS_DEV_CONFIG.md) - Configure which fields plugins can update
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
67
docs/DEVICE_SOURCE_FIELDS.md
Normal file
67
docs/DEVICE_SOURCE_FIELDS.md
Normal file
@@ -0,0 +1,67 @@
|
|||||||
|
# Understanding Device Source Fields and Field Updates
|
||||||
|
|
||||||
|
When the system scans a network, it finds various details about devices (like names, IP addresses, and manufacturers). To ensure the data remains accurate without accidentally overwriting manual changes, the system uses a set of "Source Rules."
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## The "Protection" Levels
|
||||||
|
|
||||||
|
Every piece of information for a device has a **Source**. This source determines whether a new scan is allowed to change that value.
|
||||||
|
|
||||||
|
| Source Status | Description | Can a Scan Overwrite it? |
|
||||||
|
| --- | --- | --- |
|
||||||
|
| **USER** | You manually entered this value. | **Never** |
|
||||||
|
| **LOCKED** | This value is pinned and protected. | **Never** |
|
||||||
|
| **NEWDEV** | This value was initialized from `NEWDEV` plugin settings. | **Always** |
|
||||||
|
| **(Plugin Name)** | The value was found by a specific scanner (e.g., `NBTSCAN`). | **Only if specific rules are met** |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## How Scans Update Information
|
||||||
|
|
||||||
|
If a field is **not** protected by a `USER` or `LOCKED` status, the system follows these rules to decide if it should update the info:
|
||||||
|
|
||||||
|
### 1. The "Empty Field" Rule (Default)
|
||||||
|
|
||||||
|
By default, the system is cautious. It will only fill in a piece of information if the current field is **empty** (showing as "unknown," "0.0.0.0," or blank). It won't change for example an existing name unless you tell it to.
|
||||||
|
|
||||||
|
### 2. SET_ALWAYS
|
||||||
|
|
||||||
|
Some plugins are configured to be "authoritative." If a field is in the **SET_ALWAYS** setting of a plugin:
|
||||||
|
|
||||||
|
* The scanner will **always** overwrite the current value with the new one.
|
||||||
|
* *Note: It will still never overwrite a `USER` or `LOCKED` field.*
|
||||||
|
|
||||||
|
### 3. SET_EMPTY
|
||||||
|
|
||||||
|
If a field is in the **SET_EMPTY** list:
|
||||||
|
|
||||||
|
* The scanner will **only** provide a value if the current field is currently empty.
|
||||||
|
* This is used for fields where we want to "fill in the blanks" but never change a value once it has been established by any source.
|
||||||
|
|
||||||
|
### 4. Automatic Overrides (Live Tracking)
|
||||||
|
|
||||||
|
Some fields, like **IP Addresses** (`devLastIP`) and **Full Domain Names** (`devFQDN`), are set to automatically update whenever they change. This ensures that if a device moves to a new IP on your network, the system reflects that change immediately without you having to do anything.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Summary of Field Logic
|
||||||
|
|
||||||
|
| If the current value is... | And the Scan finds... | Does it update? |
|
||||||
|
| --- | --- | --- |
|
||||||
|
| **USER / LOCKED** | Anything | **No** |
|
||||||
|
| **Empty** | A new value | **Yes** |
|
||||||
|
| **A "Plugin" value** | A different value | **No** (Unless `SET_ALWAYS` is on) |
|
||||||
|
| **An IP Address** | A different IP | **Yes** (Updates automatically) |
|
||||||
|
|
||||||
|
## See also:
|
||||||
|
|
||||||
|
- [Device locking](./DEVICE_FIELD_LOCK.md)
|
||||||
|
- [Device source fields](./DEVICE_SOURCE_FIELDS.md)
|
||||||
|
- [API Device Endpoints Documentation](./API_DEVICE.md)
|
||||||
|
- [Authoritative Field Updates System](./PLUGINS_DEV.md#authoritative-fields)
|
||||||
|
- [Plugin Configuration Reference](./PLUGINS_DEV_CONFIG.md)
|
||||||
|
- [Device locking APIs](API_DEVICE_FIELD_LOCK.md)
|
||||||
|
- [Device management](DEVICE_MANAGEMENT.md)
|
||||||
@@ -43,7 +43,7 @@ The following steps will guide you to set up your environment for local developm
|
|||||||
### 1. Download the code:
|
### 1. Download the code:
|
||||||
|
|
||||||
- `mkdir /development`
|
- `mkdir /development`
|
||||||
- `cd /development && git clone https://github.com/jokob-sk/NetAlertX.git`
|
- `cd /development && git clone https://github.com/netalertx/NetAlertX.git`
|
||||||
|
|
||||||
### 2. Create a DEV .env_dev file
|
### 2. Create a DEV .env_dev file
|
||||||
|
|
||||||
@@ -77,7 +77,7 @@ Create a folder `netalertx` in the `APP_DATA_LOCATION` (in this example in `/vol
|
|||||||
|
|
||||||
You can then modify the python script without restarting/rebuilding the container every time. Additionally, you can trigger a plugin run via the UI:
|
You can then modify the python script without restarting/rebuilding the container every time. Additionally, you can trigger a plugin run via the UI:
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
|
|
||||||
## Tips
|
## Tips
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
# NetAlertX and Docker Compose
|
# NetAlertX and Docker Compose
|
||||||
|
|
||||||
> [!WARNING]
|
> [!WARNING]
|
||||||
> ⚠️ **Important:** The docker-compose has recently changed. Carefully read the [Migration guide](https://jokob-sk.github.io/NetAlertX/MIGRATION/?h=migrat#12-migration-from-netalertx-v25524) for detailed instructions.
|
> ⚠️ **Important:** The docker-compose has recently changed. Carefully read the [Migration guide](https://docs.netalertx.com/MIGRATION/?h=migrat#12-migration-from-netalertx-v25524) for detailed instructions.
|
||||||
|
|
||||||
Great care is taken to ensure NetAlertX meets the needs of everyone while being flexible enough for anyone. This document outlines how you can configure your docker-compose. There are many settings, so we recommend using the Baseline Docker Compose as-is, or modifying it for your system.Good care is taken to ensure NetAlertX meets the needs of everyone while being flexible enough for anyone. This document outlines how you can configure your docker-compose. There are many settings, so we recommend using the Baseline Docker Compose as-is, or modifying it for your system.
|
Great care is taken to ensure NetAlertX meets the needs of everyone while being flexible enough for anyone. This document outlines how you can configure your docker-compose. There are many settings, so we recommend using the Baseline Docker Compose as-is, or modifying it for your system.Good care is taken to ensure NetAlertX meets the needs of everyone while being flexible enough for anyone. This document outlines how you can configure your docker-compose. There are many settings, so we recommend using the Baseline Docker Compose as-is, or modifying it for your system.
|
||||||
|
|
||||||
@@ -17,7 +17,7 @@ services:
|
|||||||
netalertx:
|
netalertx:
|
||||||
#use an environmental variable to set host networking mode if needed
|
#use an environmental variable to set host networking mode if needed
|
||||||
container_name: netalertx # The name when you docker contiainer ls
|
container_name: netalertx # The name when you docker contiainer ls
|
||||||
image: ghcr.io/jokob-sk/netalertx-dev:latest
|
image: ghcr.io/netalertx/netalertx:latest
|
||||||
network_mode: ${NETALERTX_NETWORK_MODE:-host} # Use host networking for ARP scanning and other services
|
network_mode: ${NETALERTX_NETWORK_MODE:-host} # Use host networking for ARP scanning and other services
|
||||||
|
|
||||||
read_only: true # Make the container filesystem read-only
|
read_only: true # Make the container filesystem read-only
|
||||||
@@ -27,6 +27,20 @@ services:
|
|||||||
- NET_ADMIN # Required for ARP scanning
|
- NET_ADMIN # Required for ARP scanning
|
||||||
- NET_RAW # Required for raw socket operations
|
- NET_RAW # Required for raw socket operations
|
||||||
- NET_BIND_SERVICE # Required to bind to privileged ports (nbtscan)
|
- NET_BIND_SERVICE # Required to bind to privileged ports (nbtscan)
|
||||||
|
- CHOWN # Required for root-entrypoint to chown /data + /tmp before dropping privileges
|
||||||
|
- SETUID # Required for root-entrypoint to switch to non-root user
|
||||||
|
- SETGID # Required for root-entrypoint to switch to non-root group
|
||||||
|
# --- ARP FLUX MITIGATION ---
|
||||||
|
# Note: When using `network_mode: host`, these sysctls require the
|
||||||
|
# NET_ADMIN capability to be applied to the host namespace.
|
||||||
|
#
|
||||||
|
# If your environment restricts capabilities, or you prefer to configure
|
||||||
|
# them on the Host OS, REMOVE the sysctls block below and apply via:
|
||||||
|
# sudo sysctl -w net.ipv4.conf.all.arp_ignore=1 net.ipv4.conf.all.arp_announce=2
|
||||||
|
# ---------------------------
|
||||||
|
sysctls: # ARP flux mitigation (reduces duplicate/ambiguous ARP behavior on host networking)
|
||||||
|
net.ipv4.conf.all.arp_ignore: 1
|
||||||
|
net.ipv4.conf.all.arp_announce: 2
|
||||||
|
|
||||||
volumes:
|
volumes:
|
||||||
- type: volume # Persistent Docker-managed named volume for config + database
|
- type: volume # Persistent Docker-managed named volume for config + database
|
||||||
@@ -51,24 +65,26 @@ services:
|
|||||||
# - path/on/host/to/dhcp.file:/resources/dhcp.file
|
# - path/on/host/to/dhcp.file:/resources/dhcp.file
|
||||||
|
|
||||||
# tmpfs mount consolidates writable state for a read-only container and improves performance
|
# tmpfs mount consolidates writable state for a read-only container and improves performance
|
||||||
# uid=20211 and gid=20211 is the netalertx user inside the container
|
# uid/gid default to the service user (NETALERTX_UID/GID, default 20211)
|
||||||
# mode=1700 grants rwx------ permissions to the netalertx user only
|
# mode=1700 grants rwx------ permissions to the runtime user only
|
||||||
tmpfs:
|
tmpfs:
|
||||||
# Comment out to retain logs between container restarts - this has a server performance impact.
|
# Comment out to retain logs between container restarts - this has a server performance impact.
|
||||||
- "/tmp:uid=20211,gid=20211,mode=1700,rw,noexec,nosuid,nodev,async,noatime,nodiratime"
|
- "/tmp:uid=${NETALERTX_UID:-20211},gid=${NETALERTX_GID:-20211},mode=1700,rw,noexec,nosuid,nodev,async,noatime,nodiratime"
|
||||||
|
|
||||||
# Retain logs - comment out tmpfs /tmp if you want to retain logs between container restarts
|
# Retain logs - comment out tmpfs /tmp if you want to retain logs between container restarts
|
||||||
# Please note if you remove the /tmp mount, you must create and maintain sub-folder mounts.
|
# Please note if you remove the /tmp mount, you must create and maintain sub-folder mounts.
|
||||||
# - /path/on/host/log:/tmp/log
|
# - /path/on/host/log:/tmp/log
|
||||||
# - "/tmp/api:uid=20211,gid=20211,mode=1700,rw,noexec,nosuid,nodev,async,noatime,nodiratime"
|
# - "/tmp/api:uid=${NETALERTX_UID:-20211},gid=${NETALERTX_GID:-20211},mode=1700,rw,noexec,nosuid,nodev,async,noatime,nodiratime"
|
||||||
# - "/tmp/nginx:uid=20211,gid=20211,mode=1700,rw,noexec,nosuid,nodev,async,noatime,nodiratime"
|
# - "/tmp/nginx:uid=${NETALERTX_UID:-20211},gid=${NETALERTX_GID:-20211},mode=1700,rw,noexec,nosuid,nodev,async,noatime,nodiratime"
|
||||||
# - "/tmp/run:uid=20211,gid=20211,mode=1700,rw,noexec,nosuid,nodev,async,noatime,nodiratime"
|
# - "/tmp/run:uid=${NETALERTX_UID:-20211},gid=${NETALERTX_GID:-20211},mode=1700,rw,noexec,nosuid,nodev,async,noatime,nodiratime"
|
||||||
|
|
||||||
environment:
|
environment:
|
||||||
LISTEN_ADDR: ${LISTEN_ADDR:-0.0.0.0} # Listen for connections on all interfaces
|
LISTEN_ADDR: ${LISTEN_ADDR:-0.0.0.0} # Listen for connections on all interfaces
|
||||||
PORT: ${PORT:-20211} # Application port
|
PORT: ${PORT:-20211} # Application port
|
||||||
GRAPHQL_PORT: ${GRAPHQL_PORT:-20212} # GraphQL API port (passed into APP_CONF_OVERRIDE at runtime)
|
GRAPHQL_PORT: ${GRAPHQL_PORT:-20212} # GraphQL API port (passed into APP_CONF_OVERRIDE at runtime)
|
||||||
# NETALERTX_DEBUG: ${NETALERTX_DEBUG:-0} # 0=kill all services and restart if any dies. 1 keeps running dead services.
|
# NETALERTX_DEBUG: ${NETALERTX_DEBUG:-0} # 0=kill all services and restart if any dies. 1 keeps running dead services.
|
||||||
|
# PUID: 20211 # Runtime PUID override, set to 0 to run as root
|
||||||
|
# PGID: 20211 # Runtime PGID override
|
||||||
|
|
||||||
# Resource limits to prevent resource exhaustion
|
# Resource limits to prevent resource exhaustion
|
||||||
mem_limit: 2048m # Maximum memory usage
|
mem_limit: 2048m # Maximum memory usage
|
||||||
@@ -76,7 +92,6 @@ services:
|
|||||||
cpu_shares: 512 # Relative CPU weight for CPU contention scenarios
|
cpu_shares: 512 # Relative CPU weight for CPU contention scenarios
|
||||||
pids_limit: 512 # Limit the number of processes/threads to prevent fork bombs
|
pids_limit: 512 # Limit the number of processes/threads to prevent fork bombs
|
||||||
logging:
|
logging:
|
||||||
driver: "json-file" # Use JSON file logging driver
|
|
||||||
options:
|
options:
|
||||||
max-size: "10m" # Rotate log files after they reach 10MB
|
max-size: "10m" # Rotate log files after they reach 10MB
|
||||||
max-file: "3" # Keep a maximum of 3 log files
|
max-file: "3" # Keep a maximum of 3 log files
|
||||||
@@ -94,6 +109,9 @@ Run or re-run it:
|
|||||||
docker compose up --force-recreate
|
docker compose up --force-recreate
|
||||||
```
|
```
|
||||||
|
|
||||||
|
> [!TIP]
|
||||||
|
> Runtime UID/GID: The image ships with a service user `netalertx` (UID/GID 20211) and a readonly lock owner also at 20211 for 004/005 immutability. If you override the runtime user (compose `user:` or `NETALERTX_UID/GID` vars), ensure your `/data` volume and tmpfs mounts use matching `uid/gid` so startup checks and writable paths succeed.
|
||||||
|
|
||||||
### Customize with Environmental Variables
|
### Customize with Environmental Variables
|
||||||
|
|
||||||
You can override the default settings by passing environmental variables to the `docker compose up` command.
|
You can override the default settings by passing environmental variables to the `docker compose up` command.
|
||||||
@@ -168,10 +186,6 @@ Now, any files created by NetAlertX in `/data/config` will appear in your `/loca
|
|||||||
|
|
||||||
This same method works for mounting other things, like custom plugins or enterprise NGINX files, as shown in the commented-out examples in the baseline file.
|
This same method works for mounting other things, like custom plugins or enterprise NGINX files, as shown in the commented-out examples in the baseline file.
|
||||||
|
|
||||||
## Example Configuration Summaries
|
|
||||||
|
|
||||||
Here are the essential modifications for common alternative setups.
|
|
||||||
|
|
||||||
### Example 2: External `.env` File for Paths
|
### Example 2: External `.env` File for Paths
|
||||||
|
|
||||||
This method is useful for keeping your paths and other settings separate from your main compose file, making it more portable.
|
This method is useful for keeping your paths and other settings separate from your main compose file, making it more portable.
|
||||||
|
|||||||
@@ -1,13 +1,14 @@
|
|||||||
[](https://hub.docker.com/r/jokobsk/netalertx)
|
[](https://hub.docker.com/r/jokobsk/netalertx)
|
||||||
[](https://hub.docker.com/r/jokobsk/netalertx)
|
[](https://hub.docker.com/r/jokobsk/netalertx)
|
||||||
[](https://github.com/jokob-sk/NetAlertX/releases)
|
[](https://github.com/netalertx/NetAlertX/releases)
|
||||||
[](https://discord.gg/NczTUTWyRr)
|
[](https://discord.gg/NczTUTWyRr)
|
||||||
[](https://my.home-assistant.io/redirect/supervisor_add_addon_repository/?repository_url=https%3A%2F%2Fgithub.com%2Falexbelgium%2Fhassio-addons)
|
[](https://my.home-assistant.io/redirect/supervisor_add_addon_repository/?repository_url=https%3A%2F%2Fgithub.com%2Falexbelgium%2Fhassio-addons)
|
||||||
|
|
||||||
# NetAlertX - Network scanner & notification framework
|
# NetAlertX - Network Visibility & Asset Intelligence Framework
|
||||||
|
|
||||||
| [📑 Docker guide](https://github.com/jokob-sk/NetAlertX/blob/main/docs/DOCKER_INSTALLATION.md) | [🚀 Releases](https://github.com/jokob-sk/NetAlertX/releases) | [📚 Docs](https://jokob-sk.github.io/NetAlertX/) | [🔌 Plugins](https://github.com/jokob-sk/NetAlertX/blob/main/docs/PLUGINS.md) | [🤖 Ask AI](https://gurubase.io/g/netalertx)
|
---
|
||||||
|----------------------| ----------------------| ----------------------| ----------------------| ----------------------|
|
### || [Docker guide](https://docs.netalertx.com/DOCKER_INSTALLATION) || [Releases](https://github.com/netalertx/NetAlertX/releases) || [Docs](https://docs.netalertx.com/) || [Plugins](https://docs.netalertx.com/PLUGINS) || [Website](https://netalertx.com)
|
||||||
|
---
|
||||||
|
|
||||||
<a href="https://raw.githubusercontent.com/jokob-sk/NetAlertX/main/docs/img/GENERAL/github_social_image.jpg" target="_blank">
|
<a href="https://raw.githubusercontent.com/jokob-sk/NetAlertX/main/docs/img/GENERAL/github_social_image.jpg" target="_blank">
|
||||||
<img src="https://raw.githubusercontent.com/jokob-sk/NetAlertX/main/docs/img/GENERAL/github_social_image.jpg" width="1000px" />
|
<img src="https://raw.githubusercontent.com/jokob-sk/NetAlertX/main/docs/img/GENERAL/github_social_image.jpg" width="1000px" />
|
||||||
@@ -16,24 +17,26 @@
|
|||||||
Head to [https://netalertx.com/](https://netalertx.com/) for more gifs and screenshots 📷.
|
Head to [https://netalertx.com/](https://netalertx.com/) for more gifs and screenshots 📷.
|
||||||
|
|
||||||
> [!NOTE]
|
> [!NOTE]
|
||||||
> There is also an experimental 🧪 [bare-metal install](https://github.com/jokob-sk/NetAlertX/blob/main/docs/HW_INSTALL.md) method available.
|
> There is also an experimental 🧪 [bare-metal install](https://docs.netalertx.com/HW_INSTALL) method available.
|
||||||
|
|
||||||
## 📕 Basic Usage
|
## 📕 Basic Usage
|
||||||
|
|
||||||
> [!WARNING]
|
> [!WARNING]
|
||||||
> You will have to run the container on the `host` network and specify `SCAN_SUBNETS` unless you use other [plugin scanners](https://github.com/jokob-sk/NetAlertX/blob/main/docs/PLUGINS.md). The initial scan can take a few minutes, so please wait 5-10 minutes for the initial discovery to finish.
|
> You will have to run the container on the `host` network and specify `SCAN_SUBNETS` unless you use other [plugin scanners](https://docs.netalertx.com/PLUGINS). The initial scan can take a few minutes, so please wait 5-10 minutes for the initial discovery to finish.
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
docker run -d --rm --network=host \
|
docker run -d --rm --network=host \
|
||||||
-v /local_data_dir:/data \
|
-v /local_data_dir:/data \
|
||||||
-v /etc/localtime:/etc/localtime \
|
-v /etc/localtime:/etc/localtime \
|
||||||
--tmpfs /tmp:uid=20211,gid=20211,mode=1700 \
|
--tmpfs /tmp:uid=${NETALERTX_UID:-20211},gid=${NETALERTX_GID:-20211},mode=1700 \
|
||||||
-e PORT=20211 \
|
-e PORT=20211 \
|
||||||
-e APP_CONF_OVERRIDE={"GRAPHQL_PORT":"20214"} \
|
-e APP_CONF_OVERRIDE={"GRAPHQL_PORT":"20214"} \
|
||||||
ghcr.io/jokob-sk/netalertx:latest
|
ghcr.io/netalertx/netalertx:latest
|
||||||
```
|
```
|
||||||
|
|
||||||
See alternative [docked-compose examples](https://github.com/jokob-sk/NetAlertX/blob/main/docs/DOCKER_COMPOSE.md).
|
> Runtime UID/GID: The image defaults to a service user `netalertx` (UID/GID 20211). A separate readonly lock owner also uses UID/GID 20211 for 004/005 immutability. You can override the runtime UID/GID at build (ARG) or run (`--user` / compose `user:`) but must align writable mounts (`/data`, `/tmp*`) and tmpfs `uid/gid` to that choice.
|
||||||
|
|
||||||
|
See alternative [docked-compose examples](https://docs.netalertx.com/DOCKER_COMPOSE).
|
||||||
|
|
||||||
### Default ports
|
### Default ports
|
||||||
|
|
||||||
@@ -44,11 +47,13 @@ See alternative [docked-compose examples](https://github.com/jokob-sk/NetAlertX/
|
|||||||
|
|
||||||
### Docker environment variables
|
### Docker environment variables
|
||||||
|
|
||||||
| Variable | Description | Example Value |
|
| Variable | Description | Example/Default Value |
|
||||||
| :------------- |:------------------------| -----:|
|
| :------------- |:------------------------| -----:|
|
||||||
|
| `PUID` |Runtime UID override, set to `0` to run as root. | `20211` |
|
||||||
|
| `PGID` |Runtime GID override | `20211` |
|
||||||
| `PORT` |Port of the web interface | `20211` |
|
| `PORT` |Port of the web interface | `20211` |
|
||||||
| `LISTEN_ADDR` |Set the specific IP Address for the listener address for the nginx webserver (web interface). This could be useful when using multiple subnets to hide the web interface from all untrusted networks. | `0.0.0.0` |
|
| `LISTEN_ADDR` |Set the specific IP Address for the listener address for the nginx webserver (web interface). This could be useful when using multiple subnets to hide the web interface from all untrusted networks. | `0.0.0.0` |
|
||||||
|`LOADED_PLUGINS` | Default [plugins](https://github.com/jokob-sk/NetAlertX/blob/main/docs/PLUGINS.md) to load. Plugins cannot be loaded with `APP_CONF_OVERRIDE`, you need to use this variable instead and then specify the plugins settings with `APP_CONF_OVERRIDE`. | `["PIHOLE","ASUSWRT"]` |
|
|`LOADED_PLUGINS` | Default [plugins](https://docs.netalertx.com/PLUGINS) to load. Plugins cannot be loaded with `APP_CONF_OVERRIDE`, you need to use this variable instead and then specify the plugins settings with `APP_CONF_OVERRIDE`. | `["PIHOLE","ASUSWRT"]` |
|
||||||
|`APP_CONF_OVERRIDE` | JSON override for settings (except `LOADED_PLUGINS`). | `{"SCAN_SUBNETS":"['192.168.1.0/24 --interface=eth1']","GRAPHQL_PORT":"20212"}` |
|
|`APP_CONF_OVERRIDE` | JSON override for settings (except `LOADED_PLUGINS`). | `{"SCAN_SUBNETS":"['192.168.1.0/24 --interface=eth1']","GRAPHQL_PORT":"20212"}` |
|
||||||
|`ALWAYS_FRESH_INSTALL` | ⚠ If `true` will delete the content of the `/db` & `/config` folders. For testing purposes. Can be coupled with [watchtower](https://github.com/containrrr/watchtower) to have an always freshly installed `netalertx`/`netalertx-dev` image. | `true` |
|
|`ALWAYS_FRESH_INSTALL` | ⚠ If `true` will delete the content of the `/db` & `/config` folders. For testing purposes. Can be coupled with [watchtower](https://github.com/containrrr/watchtower) to have an always freshly installed `netalertx`/`netalertx-dev` image. | `true` |
|
||||||
|
|
||||||
@@ -57,16 +62,16 @@ See alternative [docked-compose examples](https://github.com/jokob-sk/NetAlertX/
|
|||||||
### Docker paths
|
### Docker paths
|
||||||
|
|
||||||
> [!NOTE]
|
> [!NOTE]
|
||||||
> See also [Backup strategies](https://github.com/jokob-sk/NetAlertX/blob/main/docs/BACKUPS.md).
|
> See also [Backup strategies](https://docs.netalertx.com/BACKUPS).
|
||||||
|
|
||||||
| Required | Path | Description |
|
| Required | Path | Description |
|
||||||
| :------------- | :------------- | :-------------|
|
| :------------- | :------------- | :-------------|
|
||||||
| ✅ | `:/data` | Folder which needs to contain a `/db` and `/config` sub-folders. |
|
| ✅ | `:/data` | Folder which needs to contain a `/db` and `/config` sub-folders. |
|
||||||
| ✅ | `/etc/localtime:/etc/localtime:ro` | Ensuring the timezone is the same as on the server. |
|
| ✅ | `/etc/localtime:/etc/localtime:ro` | Ensuring the timezone is the same as on the server. |
|
||||||
| | `:/tmp/log` | Logs folder useful for debugging if you have issues setting up the container |
|
| | `:/tmp/log` | Logs folder useful for debugging if you have issues setting up the container |
|
||||||
| | `:/tmp/api` | The [API endpoint](https://github.com/jokob-sk/NetAlertX/blob/main/docs/API.md) containing static (but regularly updated) json and other files. Path configurable via `NETALERTX_API` environment variable. |
|
| | `:/tmp/api` | The [API endpoint](https://docs.netalertx.com/API) containing static (but regularly updated) json and other files. Path configurable via `NETALERTX_API` environment variable. |
|
||||||
| | `:/app/front/plugins/<plugin>/ignore_plugin` | Map a file `ignore_plugin` to ignore a plugin. Plugins can be soft-disabled via settings. More in the [Plugin docs](https://github.com/jokob-sk/NetAlertX/blob/main/docs/PLUGINS.md). |
|
| | `:/app/front/plugins/<plugin>/ignore_plugin` | Map a file `ignore_plugin` to ignore a plugin. Plugins can be soft-disabled via settings. More in the [Plugin docs](https://docs.netalertx.com/PLUGINS). |
|
||||||
| | `:/etc/resolv.conf` | Use a custom `resolv.conf` file for [better name resolution](https://github.com/jokob-sk/NetAlertX/blob/main/docs/REVERSE_DNS.md). |
|
| | `:/etc/resolv.conf` | Use a custom `resolv.conf` file for [better name resolution](https://docs.netalertx.com/REVERSE_DNS). |
|
||||||
|
|
||||||
### Folder structure
|
### Folder structure
|
||||||
|
|
||||||
@@ -83,40 +88,41 @@ data
|
|||||||
If you are facing permissions issues run the following commands on your server. This will change the owner and assure sufficient access to the database and config files that are stored in the `/local_data_dir/db` and `/local_data_dir/config` folders (replace `local_data_dir` with the location where your `/db` and `/config` folders are located).
|
If you are facing permissions issues run the following commands on your server. This will change the owner and assure sufficient access to the database and config files that are stored in the `/local_data_dir/db` and `/local_data_dir/config` folders (replace `local_data_dir` with the location where your `/db` and `/config` folders are located).
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
sudo chown -R 20211:20211 /local_data_dir
|
# Use the runtime UID/GID you intend to run with (default 20211:20211)
|
||||||
|
sudo chown -R ${NETALERTX_UID:-20211}:${NETALERTX_GID:-20211} /local_data_dir
|
||||||
sudo chmod -R a+rwx /local_data_dir
|
sudo chmod -R a+rwx /local_data_dir
|
||||||
```
|
```
|
||||||
|
|
||||||
### Initial setup
|
### Initial setup
|
||||||
|
|
||||||
- If unavailable, the app generates a default `app.conf` and `app.db` file on the first run.
|
- If unavailable, the app generates a default `app.conf` and `app.db` file on the first run.
|
||||||
- The preferred way is to manage the configuration via the Settings section in the UI, if UI is inaccessible you can modify [app.conf](https://github.com/jokob-sk/NetAlertX/tree/main/back) in the `/data/config/` folder directly
|
- The preferred way is to manage the configuration via the Settings section in the UI, if UI is inaccessible you can modify [app.conf](https://github.com/netalertx/NetAlertX/tree/main/back) in the `/data/config/` folder directly
|
||||||
|
|
||||||
|
|
||||||
#### Setting up scanners
|
#### Setting up scanners
|
||||||
|
|
||||||
You have to specify which network(s) should be scanned. This is done by entering subnets that are accessible from the host. If you use the default `ARPSCAN` plugin, you have to specify at least one valid subnet and interface in the `SCAN_SUBNETS` setting. See the documentation on [How to set up multiple SUBNETS, VLANs and what are limitations](https://github.com/jokob-sk/NetAlertX/blob/main/docs/SUBNETS.md) for troubleshooting and more advanced scenarios.
|
You have to specify which network(s) should be scanned. This is done by entering subnets that are accessible from the host. If you use the default `ARPSCAN` plugin, you have to specify at least one valid subnet and interface in the `SCAN_SUBNETS` setting. See the documentation on [How to set up multiple SUBNETS, VLANs and what are limitations](https://docs.netalertx.com/SUBNETS) for troubleshooting and more advanced scenarios.
|
||||||
|
|
||||||
If you are running PiHole you can synchronize devices directly. Check the [PiHole configuration guide](https://github.com/jokob-sk/NetAlertX/blob/main/docs/PIHOLE_GUIDE.md) for details.
|
If you are running PiHole you can synchronize devices directly. Check the [PiHole configuration guide](https://docs.netalertx.com/PIHOLE_GUIDE) for details.
|
||||||
|
|
||||||
> [!NOTE]
|
> [!NOTE]
|
||||||
> You can bulk-import devices via the [CSV import method](https://github.com/jokob-sk/NetAlertX/blob/main/docs/DEVICES_BULK_EDITING.md).
|
> You can bulk-import devices via the [CSV import method](https://docs.netalertx.com/DEVICES_BULK_EDITING).
|
||||||
|
|
||||||
#### Community guides
|
#### Community guides
|
||||||
|
|
||||||
You can read or watch several [community configuration guides](https://github.com/jokob-sk/NetAlertX/blob/main/docs/COMMUNITY_GUIDES.md) in Chinese, Korean, German, or French.
|
You can read or watch several [community configuration guides](https://docs.netalertx.com/COMMUNITY_GUIDES) in Chinese, Korean, German, or French.
|
||||||
|
|
||||||
> Please note these might be outdated. Rely on official documentation first.
|
> Please note these might be outdated. Rely on official documentation first.
|
||||||
|
|
||||||
#### Common issues
|
#### Common issues
|
||||||
|
|
||||||
- Before creating a new issue, please check if a similar issue was [already resolved](https://github.com/jokob-sk/NetAlertX/issues?q=is%3Aissue+is%3Aclosed).
|
- Before creating a new issue, please check if a similar issue was [already resolved](https://github.com/netalertx/NetAlertX/issues?q=is%3Aissue+is%3Aclosed).
|
||||||
- Check also common issues and [debugging tips](https://github.com/jokob-sk/NetAlertX/blob/main/docs/DEBUG_TIPS.md).
|
- Check also common issues and [debugging tips](https://docs.netalertx.com/DEBUG_TIPS).
|
||||||
|
|
||||||
## 💙 Support me
|
## 💙 Support me
|
||||||
|
|
||||||
| [](https://github.com/sponsors/jokob-sk) | [](https://www.buymeacoffee.com/jokobsk) | [](https://www.patreon.com/user?u=84385063) |
|
| [](https://github.com/sponsors/jokob-sk) | [](https://www.buymeacoffee.com/jokobsk) |
|
||||||
| --- | --- | --- |
|
| --- | --- |
|
||||||
|
|
||||||
- Bitcoin: `1N8tupjeCK12qRVU2XrV17WvKK7LCawyZM`
|
- Bitcoin: `1N8tupjeCK12qRVU2XrV17WvKK7LCawyZM`
|
||||||
- Ethereum: `0x6e2749Cb42F4411bc98501406BdcD82244e3f9C7`
|
- Ethereum: `0x6e2749Cb42F4411bc98501406BdcD82244e3f9C7`
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
# The NetAlertX Container Operator's Guide
|
# The NetAlertX Container Operator's Guide
|
||||||
|
|
||||||
> [!WARNING]
|
> [!WARNING]
|
||||||
> ⚠️ **Important:** The docker-compose has recently changed. Carefully read the [Migration guide](https://jokob-sk.github.io/NetAlertX/MIGRATION/?h=migrat#12-migration-from-netalertx-v25524) for detailed instructions.
|
> ⚠️ **Important:** The docker-compose has recently changed. Carefully read the [Migration guide](https://docs.netalertx.com/MIGRATION/?h=migrat#12-migration-from-netalertx-v25524) for detailed instructions.
|
||||||
|
|
||||||
This guide assumes you are starting with the official `docker-compose.yml` file provided with the project. We strongly recommend you start with or migrate to this file as your baseline and modify it to suit your specific needs (e.g., changing file paths). While there are many ways to configure NetAlertX, the default file is designed to meet the mandatory security baseline with layer-2 networking capabilities while operating securely and without startup warnings.
|
This guide assumes you are starting with the official `docker-compose.yml` file provided with the project. We strongly recommend you start with or migrate to this file as your baseline and modify it to suit your specific needs (e.g., changing file paths). While there are many ways to configure NetAlertX, the default file is designed to meet the mandatory security baseline with layer-2 networking capabilities while operating securely and without startup warnings.
|
||||||
|
|
||||||
|
|||||||
@@ -35,9 +35,9 @@ services:
|
|||||||
netalertx:
|
netalertx:
|
||||||
container_name: netalertx
|
container_name: netalertx
|
||||||
# Use this line for stable release
|
# Use this line for stable release
|
||||||
image: "ghcr.io/jokob-sk/netalertx:latest"
|
image: "ghcr.io/netalertx/netalertx:latest"
|
||||||
# Or, use this for the latest development build
|
# Or, use this for the latest development build
|
||||||
# image: "ghcr.io/jokob-sk/netalertx-dev:latest"
|
# image: "ghcr.io/netalertx/netalertx-dev:latest"
|
||||||
network_mode: "host"
|
network_mode: "host"
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
cap_drop: # Drop all capabilities for enhanced security
|
cap_drop: # Drop all capabilities for enhanced security
|
||||||
@@ -46,6 +46,9 @@ services:
|
|||||||
- NET_RAW
|
- NET_RAW
|
||||||
- NET_ADMIN
|
- NET_ADMIN
|
||||||
- NET_BIND_SERVICE
|
- NET_BIND_SERVICE
|
||||||
|
- CHOWN
|
||||||
|
- SETUID
|
||||||
|
- SETGID
|
||||||
volumes:
|
volumes:
|
||||||
- ${APP_FOLDER}/netalertx/config:/data/config
|
- ${APP_FOLDER}/netalertx/config:/data/config
|
||||||
- ${APP_FOLDER}/netalertx/db:/data/db
|
- ${APP_FOLDER}/netalertx/db:/data/db
|
||||||
@@ -69,6 +72,13 @@ In the **Environment variables** section of Portainer, add the following:
|
|||||||
* `PORT=22022` (or another port if needed)
|
* `PORT=22022` (or another port if needed)
|
||||||
* `APP_CONF_OVERRIDE={"GRAPHQL_PORT":"22023"}` (optional advanced settings, otherwise the backend API server PORT defaults to `20212`)
|
* `APP_CONF_OVERRIDE={"GRAPHQL_PORT":"22023"}` (optional advanced settings, otherwise the backend API server PORT defaults to `20212`)
|
||||||
|
|
||||||
|
Additional environment variables (advanced / testing):
|
||||||
|
|
||||||
|
* `SKIP_TESTS=1` — when set, the container entrypoint will skip all startup checks and print the message `Skipping startup checks as SKIP_TESTS is set.`. Useful for automated test runs or CI where the container should not perform environment-specific checks.
|
||||||
|
* `SKIP_STARTUP_CHECKS="<check names>"` — space-delimited list of specific startup checks to skip. Names are the human-friendly names derived from files in `/entrypoint.d` (remove the leading numeric prefix and file extension). Example: `SKIP_STARTUP_CHECKS="mandatory folders"` will skip `30-mandatory-folders.sh`.
|
||||||
|
|
||||||
|
Note: these variables are primarily useful for non-production scenarios (testing, CI, or specific deployments) and are processed by the entrypoint scripts. See `entrypoint.sh` and `entrypoint.d/*` for exact behaviour and available check names.
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## 5. Ensure permissions
|
## 5. Ensure permissions
|
||||||
|
|||||||
@@ -1,5 +1,9 @@
|
|||||||
# Docker Swarm Deployment Guide (IPvlan)
|
# Docker Swarm Deployment Guide (IPvlan)
|
||||||
|
|
||||||
|
> [!NOTE]
|
||||||
|
> This is community-contributed. Due to environment, setup, or networking differences, results may vary. Please open a PR to improve it instead of creating an issue, as the maintainer is not actively maintaining it.
|
||||||
|
|
||||||
|
|
||||||
This guide describes how to deploy **NetAlertX** in a **Docker Swarm** environment using an `ipvlan` network. This enables the container to receive a LAN IP address directly, which is ideal for network monitoring.
|
This guide describes how to deploy **NetAlertX** in a **Docker Swarm** environment using an `ipvlan` network. This enables the container to receive a LAN IP address directly, which is ideal for network monitoring.
|
||||||
|
|
||||||
---
|
---
|
||||||
@@ -40,7 +44,7 @@ Use the following Compose snippet to deploy NetAlertX with a **static LAN IP** a
|
|||||||
```yaml
|
```yaml
|
||||||
services:
|
services:
|
||||||
netalertx:
|
netalertx:
|
||||||
image: ghcr.io/jokob-sk/netalertx:latest
|
image: ghcr.io/netalertx/netalertx:latest
|
||||||
...
|
...
|
||||||
networks:
|
networks:
|
||||||
swarm-ipvlan:
|
swarm-ipvlan:
|
||||||
@@ -68,4 +72,3 @@ networks:
|
|||||||
* Make sure the assigned IP (`192.168.1.240` above) is not in use or managed by DHCP.
|
* Make sure the assigned IP (`192.168.1.240` above) is not in use or managed by DHCP.
|
||||||
* You may also use a node label constraint instead of `node.role == manager` for more control.
|
* You may also use a node label constraint instead of `node.role == manager` for more control.
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user