mirror of
https://github.com/jokob-sk/NetAlertX.git
synced 2026-03-30 23:03:03 -07:00
Compare commits
949 Commits
4cec88aaad
...
next_relea
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
3f80d2e57f | ||
|
|
b18cf98266 | ||
|
|
77369c3ce8 | ||
|
|
cd0a3f6de0 | ||
|
|
13e91731be | ||
|
|
7ef19b1c12 | ||
|
|
4daead1f8f | ||
|
|
48454f6f2f | ||
|
|
7305fd78e3 | ||
|
|
ec3e4c8988 | ||
|
|
250e533655 | ||
|
|
37730301f4 | ||
|
|
7278ee8cfa | ||
|
|
fa22523a0b | ||
|
|
7569923481 | ||
|
|
d7c7bd2cd2 | ||
|
|
b311113575 | ||
|
|
43984132c4 | ||
|
|
0a7ecb5b7c | ||
|
|
c7399215ec | ||
|
|
0bb6db155b | ||
|
|
7221b4ba96 | ||
|
|
c4904739b2 | ||
|
|
67cab9d606 | ||
|
|
f75c53fc5d | ||
|
|
bff87f4d61 | ||
|
|
6f7d2c3253 | ||
|
|
0766fb2de6 | ||
|
|
d19cb3d679 | ||
|
|
9b71c210b2 | ||
|
|
c9cb1f3fba | ||
|
|
78a8030c6a | ||
|
|
b5b0bcc766 | ||
|
|
13515603e4 | ||
|
|
518608cffc | ||
|
|
df3ca50c5c | ||
|
|
93fc126da2 | ||
|
|
a60ec9ed3a | ||
|
|
e1d206ca74 | ||
|
|
2771a6e9c2 | ||
|
|
aba1ddd3df | ||
|
|
165c9d3baa | ||
|
|
0b0c88f712 | ||
|
|
d49abd9d02 | ||
|
|
abf024d4d3 | ||
|
|
4eb5947ceb | ||
|
|
1d1a8045a0 | ||
|
|
f8c09d35a7 | ||
|
|
d8d090404e | ||
|
|
5a6de6d832 | ||
|
|
05b63cb730 | ||
|
|
2921614eac | ||
|
|
17d95d802f | ||
|
|
a0048980b8 | ||
|
|
89811cd133 | ||
|
|
b854206599 | ||
|
|
a532c98115 | ||
|
|
da23880eb1 | ||
|
|
c73ce839f2 | ||
|
|
5c0f29b97c | ||
|
|
f1496b483b | ||
|
|
ba26f34191 | ||
|
|
37f8a44cb3 | ||
|
|
76a259d9e5 | ||
|
|
1923a063f0 | ||
|
|
01b6b9f04a | ||
|
|
ea77112315 | ||
|
|
b19973130e | ||
|
|
ffbcc2ad25 | ||
|
|
c533c2267c | ||
|
|
ac407bd86e | ||
|
|
1da3c146d2 | ||
|
|
9fe8090a1b | ||
|
|
3ba1b69c1e | ||
|
|
da4d8a9675 | ||
|
|
0f20fb38f0 | ||
|
|
8361f0ac99 | ||
|
|
99de69e30d | ||
|
|
4637ec6350 | ||
|
|
2a4e6ba5e1 | ||
|
|
5be7bbe07d | ||
|
|
e8c43af7b6 | ||
|
|
27f34963be | ||
|
|
594c2fe015 | ||
|
|
14362d20bd | ||
|
|
4f239be8a3 | ||
|
|
5a65d807a8 | ||
|
|
f3bf37bb24 | ||
|
|
b7e1cb1f9d | ||
|
|
b4510663f7 | ||
|
|
dd564b235b | ||
|
|
04db68ea6c | ||
|
|
550f59b34f | ||
|
|
6e8a3d8a58 | ||
|
|
c89b2ded26 | ||
|
|
9f964be0c3 | ||
|
|
d2bc8410a7 | ||
|
|
ab74307ed1 | ||
|
|
8ab9d9f395 | ||
|
|
c1d53ff93f | ||
|
|
a329c5b541 | ||
|
|
0555105473 | ||
|
|
b0aa5d0e45 | ||
|
|
93df52f70c | ||
|
|
95f411d92a | ||
|
|
bc4f419927 | ||
|
|
3a73817048 | ||
|
|
67aa46f1cf | ||
|
|
da63acb675 | ||
|
|
50125f0700 | ||
|
|
6724d250d4 | ||
|
|
3e237bb452 | ||
|
|
15807b7ab9 | ||
|
|
0497c2891e | ||
|
|
8e6efc3008 | ||
|
|
deb0d16c3d | ||
|
|
a94f3d7222 | ||
|
|
d9608b4760 | ||
|
|
584aba2c7b | ||
|
|
ea5585a8ef | ||
|
|
c1adfd35f3 | ||
|
|
66532c54a1 | ||
|
|
a6ce4174fe | ||
|
|
247a967e9b | ||
|
|
dbe65b2a27 | ||
|
|
563cb4ba20 | ||
|
|
3d4aba4b39 | ||
|
|
b96ace0447 | ||
|
|
e15c68d189 | ||
|
|
f5e411d5d5 | ||
|
|
f727580798 | ||
|
|
11499a6890 | ||
|
|
85badb0760 | ||
|
|
814ba02d1c | ||
|
|
e57fd2e81e | ||
|
|
4dc2a63ebb | ||
|
|
6b320877ec | ||
|
|
43667a3bc4 | ||
|
|
4d0b7c944f | ||
|
|
9894009455 | ||
|
|
0e18e34918 | ||
|
|
d9c263d506 | ||
|
|
58e32a5b43 | ||
|
|
24e2036bde | ||
|
|
b74b803d6c | ||
|
|
173ffbe3b2 | ||
|
|
d2ebe0d452 | ||
|
|
4c0d5c7376 | ||
|
|
686a713aa8 | ||
|
|
9d64665599 | ||
|
|
63cef590d6 | ||
|
|
00042ab594 | ||
|
|
786cc5ee33 | ||
|
|
0b32a06178 | ||
|
|
1fa381429d | ||
|
|
fae61174a7 | ||
|
|
d06301ac80 | ||
|
|
f4bc9c93c3 | ||
|
|
0172ab4311 | ||
|
|
f1fc9f24b1 | ||
|
|
c192f2c032 | ||
|
|
a309f99c3d | ||
|
|
54e9d52126 | ||
|
|
8fc78f02e9 | ||
|
|
123f715241 | ||
|
|
446545e7eb | ||
|
|
14625926f9 | ||
|
|
c7e754966e | ||
|
|
4316a436eb | ||
|
|
fe22659794 | ||
|
|
cb0b3b607d | ||
|
|
53b2596902 | ||
|
|
1a364e2fe2 | ||
|
|
2f1e5068e3 | ||
|
|
57118bc9bd | ||
|
|
25a81556e3 | ||
|
|
39f617be5f | ||
|
|
c4c966ffa7 | ||
|
|
f88aefe022 | ||
|
|
54db347b94 | ||
|
|
2ae87fca38 | ||
|
|
8224363c45 | ||
|
|
eb399ec193 | ||
|
|
70645e7ef3 | ||
|
|
0e94dcb091 | ||
|
|
a26137800d | ||
|
|
63810bc536 | ||
|
|
57d451fcf4 | ||
|
|
bf6218e836 | ||
|
|
e9efabd562 | ||
|
|
eb0f705587 | ||
|
|
2559702a6a | ||
|
|
6bbfc0637c | ||
|
|
688d49b5ae | ||
|
|
ab7df4384e | ||
|
|
2018636bf8 | ||
|
|
50f341e84f | ||
|
|
32c21b01bb | ||
|
|
05c332867b | ||
|
|
12b0d911ff | ||
|
|
04884a264b | ||
|
|
2742414123 | ||
|
|
876cd4bbe1 | ||
|
|
91775deaa3 | ||
|
|
7075091569 | ||
|
|
f63658af7d | ||
|
|
774c123804 | ||
|
|
32e2d571a0 | ||
|
|
f2af4ffdb8 | ||
|
|
bc97a80375 | ||
|
|
fa36adb015 | ||
|
|
264cae3338 | ||
|
|
b594472f30 | ||
|
|
6d98ee9c2a | ||
|
|
1181b56b16 | ||
|
|
4b58f3b23f | ||
|
|
e61bf097ac | ||
|
|
64dbf8a3ba | ||
|
|
5685a67483 | ||
|
|
c1e6a69e05 | ||
|
|
3587169791 | ||
|
|
fd71527b09 | ||
|
|
9676111ceb | ||
|
|
60036a49c2 | ||
|
|
60ccfc734d | ||
|
|
c91532f3de | ||
|
|
aeaab6d408 | ||
|
|
5e492bc81e | ||
|
|
db689ac269 | ||
|
|
bb39bde9dd | ||
|
|
46781ed71a | ||
|
|
a313b0ccc5 | ||
|
|
2765e441a5 | ||
|
|
eb35e80916 | ||
|
|
4e7df766eb | ||
|
|
e741ff51b5 | ||
|
|
a81255fb18 | ||
|
|
5caa240fcd | ||
|
|
888d39d2fb | ||
|
|
b57d36607a | ||
|
|
70c3530a5c | ||
|
|
7af850cb56 | ||
|
|
9ac8f6fe34 | ||
|
|
933004e792 | ||
|
|
45157b6156 | ||
|
|
a560009611 | ||
|
|
e0d4e9ea9c | ||
|
|
249d12ded4 | ||
|
|
e899f657c5 | ||
|
|
3036cd04fc | ||
|
|
3d3abe7e53 | ||
|
|
a088f4580a | ||
|
|
75c7d6c015 | ||
|
|
d434cc5315 | ||
|
|
cedbd59897 | ||
|
|
b703397543 | ||
|
|
9c4e02f565 | ||
|
|
3510afec7a | ||
|
|
ed44c68d54 | ||
|
|
30c832b14e | ||
|
|
d7f17c8e78 | ||
|
|
8538c87fef | ||
|
|
1bacb59044 | ||
|
|
827b5d2ad3 | ||
|
|
e70bbdb78e | ||
|
|
946ad00253 | ||
|
|
3734c43284 | ||
|
|
0ce4e5f70c | ||
|
|
6bc2de6e24 | ||
|
|
09b42166cc | ||
|
|
dbe490a042 | ||
|
|
5996e70f60 | ||
|
|
15366a7f2e | ||
|
|
d5d1684ef9 | ||
|
|
c1141fc9a8 | ||
|
|
d38dcda35b | ||
|
|
ac5224747e | ||
|
|
5c23bde21c | ||
|
|
8e83d9b67d | ||
|
|
30c004eb77 | ||
|
|
c074ce1b11 | ||
|
|
5e40ea83d9 | ||
|
|
2124c2e1e2 | ||
|
|
1b6dc94bae | ||
|
|
76d37edc63 | ||
|
|
984b5cd780 | ||
|
|
a8ec97d782 | ||
|
|
5b64c96065 | ||
|
|
7cb17286db | ||
|
|
433600d36c | ||
|
|
250b5a3f51 | ||
|
|
50e74076bb | ||
|
|
1139e0e190 | ||
|
|
7caa6a1949 | ||
|
|
b87a8d683e | ||
|
|
a1a6c7e1cf | ||
|
|
8211816b37 | ||
|
|
0f0a09fb28 | ||
|
|
5081767b6e | ||
|
|
81202ce07e | ||
|
|
22bb936f16 | ||
|
|
034ee688fb | ||
|
|
fe7e91c515 | ||
|
|
f7fa857cae | ||
|
|
1a9ae626e5 | ||
|
|
7b22c0a5dd | ||
|
|
36d5f5b434 | ||
|
|
a70354997d | ||
|
|
9ca5375652 | ||
|
|
f43517b9a5 | ||
|
|
5095edd5d8 | ||
|
|
dc6b57a581 | ||
|
|
b2501d98a5 | ||
|
|
8a5d3b1548 | ||
|
|
bc46cba528 | ||
|
|
92029badaa | ||
|
|
f726820883 | ||
|
|
b45804f177 | ||
|
|
6d03d58c78 | ||
|
|
39637350b3 | ||
|
|
0b104caf7a | ||
|
|
0ac0dccba1 | ||
|
|
016e1d89af | ||
|
|
96687058ed | ||
|
|
d52799a49e | ||
|
|
db8a086c42 | ||
|
|
6f64a96baf | ||
|
|
e592bdaf9e | ||
|
|
f91d897787 | ||
|
|
2954b929a6 | ||
|
|
d6457a53a0 | ||
|
|
900e418be9 | ||
|
|
56ba8864da | ||
|
|
4c9c89050b | ||
|
|
87b15fbeb9 | ||
|
|
9d0627c5c3 | ||
|
|
77fd017d90 | ||
|
|
d3b3f8babb | ||
|
|
53962bc38b | ||
|
|
d404c45843 | ||
|
|
53c7cea690 | ||
|
|
7056bcbba0 | ||
|
|
f52a7c112a | ||
|
|
a41111c5f7 | ||
|
|
596f52f097 | ||
|
|
c201a83474 | ||
|
|
371fb04710 | ||
|
|
53f7a71286 | ||
|
|
604bbbaa5b | ||
|
|
0c08659d65 | ||
|
|
7aa547ed90 | ||
|
|
5a49b97821 | ||
|
|
42be7c4263 | ||
|
|
4506aa3b1f | ||
|
|
cc8a695943 | ||
|
|
a6f9b56abb | ||
|
|
8dfc0e096c | ||
|
|
8640b8c282 | ||
|
|
405c1c37cb | ||
|
|
ad6c3fe176 | ||
|
|
e1059b6937 | ||
|
|
1e1d4cd045 | ||
|
|
a868a7ed8e | ||
|
|
ed4e0388cc | ||
|
|
fa40880c05 | ||
|
|
2d6e357fe5 | ||
|
|
6244daebcf | ||
|
|
17e563aa29 | ||
|
|
37d90414fb | ||
|
|
2211419c5b | ||
|
|
229ea770cb | ||
|
|
52ac9fce41 | ||
|
|
fe6598b9af | ||
|
|
f54ba4817e | ||
|
|
a95b635601 | ||
|
|
1011652959 | ||
|
|
928317d16f | ||
|
|
e126e1f85f | ||
|
|
596a30fe01 | ||
|
|
d748480e66 | ||
|
|
1f5d6f96a4 | ||
|
|
2086e78a39 | ||
|
|
7faaa630a1 | ||
|
|
46d866b5ee | ||
|
|
af2a89f4ff | ||
|
|
e649bcfe25 | ||
|
|
dc2a56aac3 | ||
|
|
0fd3bd6974 | ||
|
|
14a92ad2f8 | ||
|
|
6eba0314fe | ||
|
|
8ac5b14403 | ||
|
|
09a809985b | ||
|
|
29a8cf0294 | ||
|
|
0df9759606 | ||
|
|
c474d12cc0 | ||
|
|
c05e7c72ee | ||
|
|
5dba6bf292 | ||
|
|
6388afbb1e | ||
|
|
b4348c18b6 | ||
|
|
1ed9082123 | ||
|
|
db95f2c6c0 | ||
|
|
d9602da975 | ||
|
|
12cebbb483 | ||
|
|
ecd0ca89c7 | ||
|
|
f202b506c3 | ||
|
|
6916cd7611 | ||
|
|
cc55e58efb | ||
|
|
f65aafa2c0 | ||
|
|
0b8f3887c0 | ||
|
|
2bd80d19db | ||
|
|
fed621f690 | ||
|
|
bc40ecd2c0 | ||
|
|
5a11c3738d | ||
|
|
f144f65f45 | ||
|
|
e46f556df7 | ||
|
|
3d82af8cbc | ||
|
|
19b40de1de | ||
|
|
31530fb46e | ||
|
|
46bbc6e335 | ||
|
|
07b5b5cf56 | ||
|
|
54a481f459 | ||
|
|
9d6004d23d | ||
|
|
c3d3826448 | ||
|
|
6cfc5efb88 | ||
|
|
67b307f0e7 | ||
|
|
f0960d2b84 | ||
|
|
5fd789f295 | ||
|
|
72c29a0d2d | ||
|
|
fe6aa55419 | ||
|
|
973de8d407 | ||
|
|
7324047f64 | ||
|
|
a9c323b4a9 | ||
|
|
a6a9540979 | ||
|
|
108c26440a | ||
|
|
c162030fb8 | ||
|
|
cf919e6b27 | ||
|
|
8b1fe734c4 | ||
|
|
d24411fa53 | ||
|
|
f173325b7b | ||
|
|
5d28f49165 | ||
|
|
148bee3ed5 | ||
|
|
c0f4fe9e12 | ||
|
|
858868b5f2 | ||
|
|
4ae94f4644 | ||
|
|
3288eef048 | ||
|
|
d56875c73b | ||
|
|
bb1061192e | ||
|
|
a5fc49027a | ||
|
|
76d63de9d6 | ||
|
|
7432cddc9b | ||
|
|
ad3bfbade0 | ||
|
|
2e91e5eaf7 | ||
|
|
52a5972b49 | ||
|
|
b0a9f5f688 | ||
|
|
c00c4f6730 | ||
|
|
a398b91e66 | ||
|
|
9ec4e26df1 | ||
|
|
4619a13bcb | ||
|
|
2292f904b8 | ||
|
|
ff206b8fc7 | ||
|
|
a3062105fd | ||
|
|
e61133c557 | ||
|
|
f8f70141c8 | ||
|
|
1ec499dfb0 | ||
|
|
96e4909bf0 | ||
|
|
27f7bfd129 | ||
|
|
3342427ec2 | ||
|
|
4991b058d3 | ||
|
|
8ea84a22e9 | ||
|
|
899017fdd8 | ||
|
|
abfe452996 | ||
|
|
3775e21dc7 | ||
|
|
2acc180fd5 | ||
|
|
be381488aa | ||
|
|
9da1d2a456 | ||
|
|
44a7f15440 | ||
|
|
cafa36f627 | ||
|
|
49e689f022 | ||
|
|
422a048806 | ||
|
|
97bc220866 | ||
|
|
319731b664 | ||
|
|
ea2c5184a9 | ||
|
|
c843ea5575 | ||
|
|
3109b5d253 | ||
|
|
fcbe4ae88a | ||
|
|
9f1d04bcd4 | ||
|
|
54d01f0a65 | ||
|
|
97e684dba4 | ||
|
|
478b018fa5 | ||
|
|
3ee21ac830 | ||
|
|
22695a633c | ||
|
|
3b203536b8 | ||
|
|
1e289e94e3 | ||
|
|
beb101bd2c | ||
|
|
ecaacec9c9 | ||
|
|
3ee690d391 | ||
|
|
ddebc2418f | ||
|
|
6c2a843f9a | ||
|
|
bb0c0e1c74 | ||
|
|
866ce566d7 | ||
|
|
fd0037e66b | ||
|
|
640bbd95c1 | ||
|
|
5e46e7889f | ||
|
|
ecea1d1fbd | ||
|
|
100e67156e | ||
|
|
cea3369b5e | ||
|
|
284260d5f3 | ||
|
|
12d69d50b1 | ||
|
|
b49adaf717 | ||
|
|
f8f1d6ef76 | ||
|
|
45a78dc426 | ||
|
|
5146d405a7 | ||
|
|
61c2cc6c3a | ||
|
|
d0279585ef | ||
|
|
6bc2f34351 | ||
|
|
52ada3f6d5 | ||
|
|
4b69226f89 | ||
|
|
afe276e7bb | ||
|
|
313de80c8f | ||
|
|
9d377d7527 | ||
|
|
30247c9df0 | ||
|
|
6919fdc522 | ||
|
|
e56dd4e4cb | ||
|
|
c45af09fd7 | ||
|
|
0035834c54 | ||
|
|
8a2c48931b | ||
|
|
08700d7455 | ||
|
|
2fa2624852 | ||
|
|
e3bd54944a | ||
|
|
f81cf6d513 | ||
|
|
1010a81b15 | ||
|
|
c34416cc59 | ||
|
|
29ba1936ad | ||
|
|
5840f41761 | ||
|
|
ce00bd8120 | ||
|
|
dc1cdfc7ba | ||
|
|
cf280ee6da | ||
|
|
28701ab435 | ||
|
|
f2d5e3254f | ||
|
|
9cff96ed62 | ||
|
|
08db1c658e | ||
|
|
ccbac347aa | ||
|
|
fa3d40c904 | ||
|
|
dc3571d0df | ||
|
|
153e9f4db7 | ||
|
|
2f61f132ec | ||
|
|
f6767df889 | ||
|
|
7992e91f44 | ||
|
|
4bb18f6b5d | ||
|
|
5eaeffca04 | ||
|
|
0eb2368712 | ||
|
|
bc2cfb9384 | ||
|
|
0ceb589935 | ||
|
|
b4c5112951 | ||
|
|
bac819b066 | ||
|
|
d3a2e94cc4 | ||
|
|
324397b3e2 | ||
|
|
5a0332bba5 | ||
|
|
6deb83a53d | ||
|
|
8c2a582cfc | ||
|
|
5c8c1e6b24 | ||
|
|
9b285f6fa8 | ||
|
|
686c07bb41 | ||
|
|
ed2ae8da66 | ||
|
|
954a7bb7c5 | ||
|
|
067c975791 | ||
|
|
f9c0e1dd60 | ||
|
|
7cfffd0b84 | ||
|
|
a6844019a1 | ||
|
|
474f095723 | ||
|
|
f69ed72c09 | ||
|
|
bd22861646 | ||
|
|
9d9de3df01 | ||
|
|
18c1acc173 | ||
|
|
9234943dba | ||
|
|
bd73b3b904 | ||
|
|
6dc30bb7dd | ||
|
|
206c2e76d0 | ||
|
|
8458bbb0ed | ||
|
|
2bdf25ca59 | ||
|
|
63222f4503 | ||
|
|
c8c70d27ff | ||
|
|
3cb55eb35c | ||
|
|
75ee015864 | ||
|
|
689cd09567 | ||
|
|
dbf527f2bf | ||
|
|
a1a90daf19 | ||
|
|
09325608f8 | ||
|
|
c244cc6ce9 | ||
|
|
19f4d3e34e | ||
|
|
edf3d6961c | ||
|
|
a14c97dbab | ||
|
|
ab6e520fd6 | ||
|
|
90b662ccb7 | ||
|
|
d691f79a14 | ||
|
|
afd0cd1619 | ||
|
|
483ddb4d14 | ||
|
|
419f55c298 | ||
|
|
165053e628 | ||
|
|
130c40609d | ||
|
|
15679a6a21 | ||
|
|
a52cf764d2 | ||
|
|
8452902703 | ||
|
|
bdf89dc927 | ||
|
|
29785ece48 | ||
|
|
7c441afd4a | ||
|
|
934b849ada | ||
|
|
95413d5b76 | ||
|
|
bd54e2d053 | ||
|
|
f4d39fcd65 | ||
|
|
d849583dd5 | ||
|
|
6aa4e13b54 | ||
|
|
52135e8288 | ||
|
|
dc673ecce5 | ||
|
|
8e7381809e | ||
|
|
494f01048e | ||
|
|
7b15329a02 | ||
|
|
07277985b1 | ||
|
|
00a1875665 | ||
|
|
49a075ca9d | ||
|
|
44eba4c6c3 | ||
|
|
82041f391f | ||
|
|
cf81ef4b4c | ||
|
|
730e8b856f | ||
|
|
0f1b19bddc | ||
|
|
0792e9f9c9 | ||
|
|
77803c18be | ||
|
|
51e31d8854 | ||
|
|
739f17474f | ||
|
|
28dd9fb5f2 | ||
|
|
041dfd3e6d | ||
|
|
44dc5fa280 | ||
|
|
fc16c6618b | ||
|
|
e6194564b8 | ||
|
|
c86d0c8772 | ||
|
|
efd797aa04 | ||
|
|
307d39be8b | ||
|
|
0c4698f02e | ||
|
|
16375abb51 | ||
|
|
8426b9bc2e | ||
|
|
2ee43d4c2c | ||
|
|
7be4760979 | ||
|
|
4fe0def9f0 | ||
|
|
3de61dc29e | ||
|
|
1dd5512265 | ||
|
|
e359ea072e | ||
|
|
059612185e | ||
|
|
9b37e66920 | ||
|
|
bdb9377061 | ||
|
|
f549db3ea9 | ||
|
|
3cf856f1c2 | ||
|
|
fc3178c0b3 | ||
|
|
24b204612b | ||
|
|
f8d8a745fe | ||
|
|
850d93ed62 | ||
|
|
1932b2d03a | ||
|
|
348002c3ab | ||
|
|
19cc5b0406 | ||
|
|
c15f621ad4 | ||
|
|
6e194185ed | ||
|
|
a01ccaec94 | ||
|
|
1eca02a0f4 | ||
|
|
039189ff4b | ||
|
|
44c2297c25 | ||
|
|
54e8a2fe00 | ||
|
|
186d082508 | ||
|
|
1bd6fd5a1d | ||
|
|
f3aebbfb31 | ||
|
|
eb125a84fe | ||
|
|
30294ef9bc | ||
|
|
218c427552 | ||
|
|
7edf85718b | ||
|
|
3b1b853b14 | ||
|
|
ffdde451d6 | ||
|
|
494451b316 | ||
|
|
eb414b7e70 | ||
|
|
ee5de27413 | ||
|
|
d119708538 | ||
|
|
a8cac85a11 | ||
|
|
fbb5dcf11c | ||
|
|
9b0c916bba | ||
|
|
aef1f89ca4 | ||
|
|
a8eb9bb9fb | ||
|
|
ef9601edf1 | ||
|
|
3ac5726dcc | ||
|
|
8ea63cdb56 | ||
|
|
4a9dc3a86f | ||
|
|
ccc4346a0d | ||
|
|
935453add8 | ||
|
|
95e9315c88 | ||
|
|
1f355ada4d | ||
|
|
24c806005f | ||
|
|
492c6e3883 | ||
|
|
df40116ed0 | ||
|
|
f9b724931f | ||
|
|
0889741864 | ||
|
|
e17f355fbc | ||
|
|
4c068f7570 | ||
|
|
5cd4139d01 | ||
|
|
70c65a17b3 | ||
|
|
daa720ab94 | ||
|
|
7206f7ce8f | ||
|
|
e0195f53f6 | ||
|
|
bc76c04f9e | ||
|
|
e4e7f26751 | ||
|
|
1da1e705a1 | ||
|
|
aed7a91bf0 | ||
|
|
c8d427d231 | ||
|
|
a627cc6abe | ||
|
|
5c9de70027 | ||
|
|
ed24b4dc18 | ||
|
|
899c195d27 | ||
|
|
08e6e0e15e | ||
|
|
88904dc892 | ||
|
|
4ab21f3705 | ||
|
|
ca0d61fc56 | ||
|
|
c5f29be85d | ||
|
|
95b2b42b90 | ||
|
|
18e71c847e | ||
|
|
79fa943e4e | ||
|
|
f59f44a85e | ||
|
|
ad2949f143 | ||
|
|
4472595881 | ||
|
|
d5328a3be6 | ||
|
|
23aa48eabf | ||
|
|
438ac8dfa4 | ||
|
|
7a6a021295 | ||
|
|
77659afa9e | ||
|
|
8e10f5eb66 | ||
|
|
abe3d44369 | ||
|
|
cfa21f1dc6 | ||
|
|
c38da9db0b | ||
|
|
6ba48e499c | ||
|
|
1dee812ce6 | ||
|
|
5c44fd8fea | ||
|
|
1bd6723ab9 | ||
|
|
bd691f01b1 | ||
|
|
73c8965637 | ||
|
|
dc7ff8317c | ||
|
|
624fd87ee7 | ||
|
|
cd1ce2a3d8 | ||
|
|
c6de72467e | ||
|
|
5d1c63375b | ||
|
|
8c982cd476 | ||
|
|
6ee9064676 | ||
|
|
2c75285148 | ||
|
|
ecb5c1455b | ||
|
|
17f495c444 | ||
|
|
e7f25560c8 | ||
|
|
fc4d32ebe7 | ||
|
|
b47325d06a | ||
|
|
436ac6de49 | ||
|
|
c1bd611e57 | ||
|
|
edde2596b5 | ||
|
|
da9d37c718 | ||
|
|
5bcb727305 | ||
|
|
2dc688b16c | ||
|
|
0ac9fd79b3 | ||
|
|
3d17dc47b5 | ||
|
|
ef2e7886c4 | ||
|
|
c8f3a84b92 | ||
|
|
9688fee2d2 | ||
|
|
2dcd9eda19 | ||
|
|
24187495e1 | ||
|
|
c27d25d4ab | ||
|
|
93a2dad2eb | ||
|
|
b235863644 | ||
|
|
f387f8c5b6 | ||
|
|
36e5751221 | ||
|
|
5af760f5ee | ||
|
|
dfd836527e | ||
|
|
d93a3981fa | ||
|
|
8d5a663817 | ||
|
|
fbb4a2f8b4 | ||
|
|
54bce6505b | ||
|
|
6da47cc830 | ||
|
|
9cabbf3622 | ||
|
|
6c28a08bee | ||
|
|
86e3decd4e | ||
|
|
e14e0bb9e8 | ||
|
|
b6023d1373 | ||
|
|
1812cc8ef8 | ||
|
|
e64c490c8a | ||
|
|
5df39f984a | ||
|
|
d007ed711a | ||
|
|
dfd2cf9e20 | ||
|
|
61824abb9f | ||
|
|
33c5548fe1 | ||
|
|
fd41c395ae | ||
|
|
1a980844f0 | ||
|
|
82e018e284 | ||
|
|
e0e1233b1c | ||
|
|
74677f940e | ||
|
|
21a4d20579 | ||
|
|
9634e4e0f7 | ||
|
|
00a47ab5d3 | ||
|
|
59b417705e | ||
|
|
525d082f3d | ||
|
|
ba3481759b | ||
|
|
7125cea29b | ||
|
|
8586c5a307 | ||
|
|
0d81315809 | ||
|
|
8f193f1e2c | ||
|
|
b1eef8aa09 | ||
|
|
531b66effe | ||
|
|
5e4ad10fe0 | ||
|
|
541b932b6d | ||
|
|
2bf3ff9f00 | ||
|
|
2da17f272c | ||
|
|
7bcb4586b2 | ||
|
|
d3326b3362 | ||
|
|
b9d3f430fe | ||
|
|
067336dcc1 | ||
|
|
8acb0a876a | ||
|
|
d1be41eca4 | ||
|
|
00e953a7ce | ||
|
|
b9ef9ad041 | ||
|
|
e90fbf17d3 | ||
|
|
139447b253 | ||
|
|
fa9fc2c8e3 | ||
|
|
30071c6848 | ||
|
|
b0bd3c8191 | ||
|
|
c753da9e15 | ||
|
|
4770ee5942 | ||
|
|
5cd53bc8f9 | ||
|
|
5e47ccc9ef | ||
|
|
f5d7c0f9a0 | ||
|
|
35b7e80be4 | ||
|
|
07eeac0a0b | ||
|
|
240d86bf1e | ||
|
|
274fd50a92 | ||
|
|
bbf49c3686 | ||
|
|
e3458630ba | ||
|
|
2f6f1e49e9 | ||
|
|
4f5a40ffce | ||
|
|
f5aea55b29 | ||
|
|
e3e7e2f52e | ||
|
|
872ac1ce0f | ||
|
|
ebeb7a07af | ||
|
|
5c14b34a8b | ||
|
|
f0abd500d9 | ||
|
|
8503cb86f1 | ||
|
|
5f0b670a82 | ||
|
|
9df814e351 | ||
|
|
88509ce8c2 | ||
|
|
995c371f48 | ||
|
|
aee5e04b9f | ||
|
|
e0c96052bb | ||
|
|
fd5235dd0a | ||
|
|
f3de66a287 | ||
|
|
9a4fb35ea5 | ||
|
|
a1ad904042 | ||
|
|
81ff1da756 | ||
|
|
85c9b0b99b | ||
|
|
4ccac66a73 | ||
|
|
c7b9fdaff2 | ||
|
|
c7dcc20a1d | ||
|
|
bb365a5e81 | ||
|
|
e2633d0251 | ||
|
|
09c40e76b2 | ||
|
|
abc3e71440 | ||
|
|
d13596c35c | ||
|
|
7d5dcf061c | ||
|
|
6206e483a9 | ||
|
|
f1ecc61de3 | ||
|
|
92a6a3a916 | ||
|
|
8a89f3b340 | ||
|
|
a93e87493f | ||
|
|
c7032bceba | ||
|
|
0cd7528284 | ||
|
|
2309b8eb3f | ||
|
|
dbd1bdabc2 | ||
|
|
093d595fc5 | ||
|
|
c38758d61a | ||
|
|
6034b12af6 | ||
|
|
972654dc78 | ||
|
|
ec417b0dac | ||
|
|
2e9352dc12 | ||
|
|
566b263d0a | ||
|
|
61b42b4fea | ||
|
|
a45de018fb | ||
|
|
bfe6987867 | ||
|
|
b6567ab5fc | ||
|
|
f71c2fbe94 | ||
|
|
aeb03f50ba | ||
|
|
734db423ee | ||
|
|
4f47dbfe14 | ||
|
|
d23bf45310 | ||
|
|
9c366881f1 | ||
|
|
9dd482618b | ||
|
|
84cc01566d | ||
|
|
ac7b912b45 | ||
|
|
62852f1b2f | ||
|
|
b659a0f06d | ||
|
|
fb3620a378 | ||
|
|
9d56e13818 | ||
|
|
43c5a11271 | ||
|
|
ac957ce599 | ||
|
|
3567906fcd | ||
|
|
be6801d98f | ||
|
|
bb9b242d0a | ||
|
|
5f27d3b9aa | ||
|
|
93af0e9d19 | ||
|
|
398e2a896f | ||
|
|
a98bac331d | ||
|
|
9f6086e5cf | ||
|
|
c5a1f19567 | ||
|
|
6d70a8a71d | ||
|
|
4161261c43 | ||
|
|
179821a527 | ||
|
|
2028b1a6e3 | ||
|
|
5b871865db | ||
|
|
76bcec335d | ||
|
|
8483a741b4 | ||
|
|
68c8e16828 | ||
|
|
76150b2ca7 | ||
|
|
5cf8a25bae | ||
|
|
593aa16f17 | ||
|
|
af9793c2ed | ||
|
|
552d2a8286 | ||
|
|
7822b11d51 | ||
|
|
cbe5a4a732 | ||
|
|
58de31d0ea | ||
|
|
5c06dc68c6 | ||
|
|
44d65cca96 | ||
|
|
71e0d13bef | ||
|
|
30269a6a73 | ||
|
|
6374219e05 | ||
|
|
6e745fc6d1 | ||
|
|
85aa04c490 | ||
|
|
1fd8d97d56 | ||
|
|
286d5555d2 | ||
|
|
57096a9258 | ||
|
|
c08eb1dbba | ||
|
|
746f1a8922 | ||
|
|
0845b7f445 | ||
|
|
a6fffe06b7 | ||
|
|
ea8cea16c5 | ||
|
|
5452b7287b | ||
|
|
80d7ef7f24 | ||
|
|
dc4da5b4c9 | ||
|
|
59477e7b38 | ||
|
|
6dd7251c84 | ||
|
|
c52e44f90c | ||
|
|
db18ca76b4 | ||
|
|
288427c939 | ||
|
|
90a07c61eb | ||
|
|
13341e35c9 | ||
|
|
4c92a941a8 |
9
.coderabbit.yaml
Normal file
9
.coderabbit.yaml
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
reviews:
|
||||||
|
profile: "chill"
|
||||||
|
estimate_code_review_effort: false
|
||||||
|
auto_review:
|
||||||
|
enabled: true
|
||||||
|
high_level_summary: true
|
||||||
|
issue_enrichment:
|
||||||
|
auto_enrich:
|
||||||
|
enabled: false
|
||||||
@@ -4,16 +4,16 @@
|
|||||||
# The NetAlertX Dockerfile has 3 stages:
|
# The NetAlertX Dockerfile has 3 stages:
|
||||||
#
|
#
|
||||||
# Stage 1. Builder - NetAlertX Requires special tools and packages to build our virtual environment, but
|
# Stage 1. Builder - NetAlertX Requires special tools and packages to build our virtual environment, but
|
||||||
# which are not needed in future stages. We build the builder and extract the venv for runner to use as
|
# which are not needed in future stages. We build the builder and extract the venv for runner to use as
|
||||||
# a base.
|
# a base.
|
||||||
#
|
#
|
||||||
# Stage 2. Runner builds the bare minimum requirements to create an operational NetAlertX. The primary
|
# Stage 2. Runner builds the bare minimum requirements to create an operational NetAlertX. The primary
|
||||||
# reason for breaking at this stage is it leaves the system in a proper state for devcontainer operation
|
# reason for breaking at this stage is it leaves the system in a proper state for devcontainer operation
|
||||||
# This image also provides a break-out point for uses who wish to execute the anti-pattern of using a
|
# This image also provides a break-out point for uses who wish to execute the anti-pattern of using a
|
||||||
# docker container as a VM for experimentation and various development patterns.
|
# docker container as a VM for experimentation and various development patterns.
|
||||||
#
|
#
|
||||||
# Stage 3. Hardened removes root, sudoers, folders, permissions, and locks the system down into a read-only
|
# Stage 3. Hardened removes root, sudoers, folders, permissions, and locks the system down into a read-only
|
||||||
# compatible image. While NetAlertX does require some read-write operations, this image can guarantee the
|
# compatible image. While NetAlertX does require some read-write operations, this image can guarantee the
|
||||||
# code pushed out by the project is the only code which will run on the system after each container restart.
|
# code pushed out by the project is the only code which will run on the system after each container restart.
|
||||||
# It reduces the chance of system hijacking and operates with all modern security protocols in place as is
|
# It reduces the chance of system hijacking and operates with all modern security protocols in place as is
|
||||||
# expected from a security appliance.
|
# expected from a security appliance.
|
||||||
@@ -29,13 +29,26 @@ ENV PATH="/opt/venv/bin:$PATH"
|
|||||||
|
|
||||||
# Install build dependencies
|
# Install build dependencies
|
||||||
COPY requirements.txt /tmp/requirements.txt
|
COPY requirements.txt /tmp/requirements.txt
|
||||||
RUN apk add --no-cache bash shadow python3 python3-dev gcc musl-dev libffi-dev openssl-dev git \
|
# hadolint ignore=DL3018
|
||||||
|
RUN apk add --no-cache \
|
||||||
|
bash \
|
||||||
|
shadow \
|
||||||
|
python3 \
|
||||||
|
python3-dev \
|
||||||
|
py3-psutil \
|
||||||
|
gcc \
|
||||||
|
musl-dev \
|
||||||
|
libffi-dev \
|
||||||
|
openssl-dev \
|
||||||
|
git \
|
||||||
|
rust \
|
||||||
|
cargo \
|
||||||
&& python -m venv /opt/venv
|
&& python -m venv /opt/venv
|
||||||
|
|
||||||
# Create virtual environment owned by root, but readable by everyone else. This makes it easy to copy
|
# Upgrade pip/wheel/setuptools and install Python packages
|
||||||
# into hardened stage without worrying about permissions and keeps image size small. Keeping the commands
|
# hadolint ignore=DL3013, DL3042
|
||||||
# together makes for a slightly smaller image size.
|
RUN python -m pip install --upgrade pip setuptools wheel && \
|
||||||
RUN pip install -r /tmp/requirements.txt && \
|
pip install --prefer-binary --no-cache-dir -r /tmp/requirements.txt && \
|
||||||
chmod -R u-rwx,g-rwx /opt
|
chmod -R u-rwx,g-rwx /opt
|
||||||
|
|
||||||
# second stage is the main runtime stage with just the minimum required to run the application
|
# second stage is the main runtime stage with just the minimum required to run the application
|
||||||
@@ -43,17 +56,25 @@ RUN pip install -r /tmp/requirements.txt && \
|
|||||||
FROM alpine:3.22 AS runner
|
FROM alpine:3.22 AS runner
|
||||||
|
|
||||||
ARG INSTALL_DIR=/app
|
ARG INSTALL_DIR=/app
|
||||||
|
# Runtime service account (override at build; container user can still be overridden at run time)
|
||||||
|
ARG NETALERTX_UID=20211
|
||||||
|
ARG NETALERTX_GID=20211
|
||||||
|
# Read-only lock owner (separate from service account to avoid UID/GID collisions)
|
||||||
|
ARG READONLY_UID=20212
|
||||||
|
ARG READONLY_GID=20212
|
||||||
|
|
||||||
# NetAlertX app directories
|
# NetAlertX app directories
|
||||||
ENV NETALERTX_APP=${INSTALL_DIR}
|
ENV NETALERTX_APP=${INSTALL_DIR}
|
||||||
ENV NETALERTX_CONFIG=${NETALERTX_APP}/config
|
ENV NETALERTX_DATA=/data
|
||||||
|
ENV NETALERTX_CONFIG=${NETALERTX_DATA}/config
|
||||||
ENV NETALERTX_FRONT=${NETALERTX_APP}/front
|
ENV NETALERTX_FRONT=${NETALERTX_APP}/front
|
||||||
|
ENV NETALERTX_PLUGINS=${NETALERTX_FRONT}/plugins
|
||||||
ENV NETALERTX_SERVER=${NETALERTX_APP}/server
|
ENV NETALERTX_SERVER=${NETALERTX_APP}/server
|
||||||
ENV NETALERTX_API=${NETALERTX_APP}/api
|
ENV NETALERTX_API=/tmp/api
|
||||||
ENV NETALERTX_DB=${NETALERTX_APP}/db
|
ENV NETALERTX_DB=${NETALERTX_DATA}/db
|
||||||
ENV NETALERTX_DB_FILE=${NETALERTX_DB}/app.db
|
ENV NETALERTX_DB_FILE=${NETALERTX_DB}/app.db
|
||||||
ENV NETALERTX_BACK=${NETALERTX_APP}/back
|
ENV NETALERTX_BACK=${NETALERTX_APP}/back
|
||||||
ENV NETALERTX_LOG=${NETALERTX_APP}/log
|
ENV NETALERTX_LOG=/tmp/log
|
||||||
ENV NETALERTX_PLUGINS_LOG=${NETALERTX_LOG}/plugins
|
ENV NETALERTX_PLUGINS_LOG=${NETALERTX_LOG}/plugins
|
||||||
ENV NETALERTX_CONFIG_FILE=${NETALERTX_CONFIG}/app.conf
|
ENV NETALERTX_CONFIG_FILE=${NETALERTX_CONFIG}/app.conf
|
||||||
|
|
||||||
@@ -69,7 +90,8 @@ ENV LOG_APP_PHP_ERRORS=${NETALERTX_LOG}/app.php_errors.log
|
|||||||
ENV LOG_EXECUTION_QUEUE=${NETALERTX_LOG}/execution_queue.log
|
ENV LOG_EXECUTION_QUEUE=${NETALERTX_LOG}/execution_queue.log
|
||||||
ENV LOG_REPORT_OUTPUT_JSON=${NETALERTX_LOG}/report_output.json
|
ENV LOG_REPORT_OUTPUT_JSON=${NETALERTX_LOG}/report_output.json
|
||||||
ENV LOG_STDOUT=${NETALERTX_LOG}/stdout.log
|
ENV LOG_STDOUT=${NETALERTX_LOG}/stdout.log
|
||||||
ENV LOG_CROND=${NETALERTX_LOG}/crond.log
|
ENV LOG_CRON=${NETALERTX_LOG}/cron.log
|
||||||
|
ENV LOG_NGINX_ERROR=${NETALERTX_LOG}/nginx-error.log
|
||||||
|
|
||||||
# System Services configuration files
|
# System Services configuration files
|
||||||
ENV ENTRYPOINT_CHECKS=/entrypoint.d
|
ENV ENTRYPOINT_CHECKS=/entrypoint.d
|
||||||
@@ -77,48 +99,50 @@ ENV SYSTEM_SERVICES=/services
|
|||||||
ENV SYSTEM_SERVICES_SCRIPTS=${SYSTEM_SERVICES}/scripts
|
ENV SYSTEM_SERVICES_SCRIPTS=${SYSTEM_SERVICES}/scripts
|
||||||
ENV SYSTEM_SERVICES_CONFIG=${SYSTEM_SERVICES}/config
|
ENV SYSTEM_SERVICES_CONFIG=${SYSTEM_SERVICES}/config
|
||||||
ENV SYSTEM_NGINX_CONFIG=${SYSTEM_SERVICES_CONFIG}/nginx
|
ENV SYSTEM_NGINX_CONFIG=${SYSTEM_SERVICES_CONFIG}/nginx
|
||||||
ENV SYSTEM_NGINX_CONFIG_FILE=${SYSTEM_NGINX_CONFIG}/nginx.conf
|
ENV SYSTEM_NGINX_CONFIG_TEMPLATE=${SYSTEM_NGINX_CONFIG}/netalertx.conf.template
|
||||||
ENV SYSTEM_SERVICES_ACTIVE_CONFIG=${SYSTEM_NGINX_CONFIG}/conf.active
|
ENV SYSTEM_SERVICES_CONFIG_CRON=${SYSTEM_SERVICES_CONFIG}/cron
|
||||||
|
ENV SYSTEM_SERVICES_ACTIVE_CONFIG=/tmp/nginx/active-config
|
||||||
|
ENV SYSTEM_SERVICES_ACTIVE_CONFIG_FILE=${SYSTEM_SERVICES_ACTIVE_CONFIG}/nginx.conf
|
||||||
ENV SYSTEM_SERVICES_PHP_FOLDER=${SYSTEM_SERVICES_CONFIG}/php
|
ENV SYSTEM_SERVICES_PHP_FOLDER=${SYSTEM_SERVICES_CONFIG}/php
|
||||||
ENV SYSTEM_SERVICES_PHP_FPM_D=${SYSTEM_SERVICES_PHP_FOLDER}/php-fpm.d
|
ENV SYSTEM_SERVICES_PHP_FPM_D=${SYSTEM_SERVICES_PHP_FOLDER}/php-fpm.d
|
||||||
ENV SYSTEM_SERVICES_CROND=${SYSTEM_SERVICES_CONFIG}/crond
|
ENV SYSTEM_SERVICES_RUN=/tmp/run
|
||||||
ENV SYSTEM_SERVICES_RUN=${SYSTEM_SERVICES}/run
|
|
||||||
ENV SYSTEM_SERVICES_RUN_TMP=${SYSTEM_SERVICES_RUN}/tmp
|
ENV SYSTEM_SERVICES_RUN_TMP=${SYSTEM_SERVICES_RUN}/tmp
|
||||||
ENV SYSTEM_SERVICES_RUN_LOG=${SYSTEM_SERVICES_RUN}/logs
|
ENV SYSTEM_SERVICES_RUN_LOG=${SYSTEM_SERVICES_RUN}/logs
|
||||||
ENV PHP_FPM_CONFIG_FILE=${SYSTEM_SERVICES_PHP_FOLDER}/php-fpm.conf
|
ENV PHP_FPM_CONFIG_FILE=${SYSTEM_SERVICES_PHP_FOLDER}/php-fpm.conf
|
||||||
ENV READ_ONLY_FOLDERS="${NETALERTX_BACK} ${NETALERTX_FRONT} ${NETALERTX_SERVER} ${SYSTEM_SERVICES} \
|
ENV READ_ONLY_FOLDERS="${NETALERTX_BACK} ${NETALERTX_FRONT} ${NETALERTX_SERVER} ${SYSTEM_SERVICES} \
|
||||||
${SYSTEM_SERVICES_CONFIG} ${ENTRYPOINT_CHECKS}"
|
${SYSTEM_SERVICES_CONFIG} ${ENTRYPOINT_CHECKS}"
|
||||||
ENV READ_WRITE_FOLDERS="${NETALERTX_CONFIG} ${NETALERTX_DB} ${NETALERTX_API} ${NETALERTX_LOG} \
|
ENV READ_WRITE_FOLDERS="${NETALERTX_DATA} ${NETALERTX_CONFIG} ${NETALERTX_DB} ${NETALERTX_API} \
|
||||||
${NETALERTX_PLUGINS_LOG} ${SYSTEM_SERVICES_RUN} ${SYSTEM_SERVICES_RUN_TMP} \
|
${NETALERTX_LOG} ${NETALERTX_PLUGINS_LOG} ${SYSTEM_SERVICES_RUN} \
|
||||||
${SYSTEM_SERVICES_RUN_LOG}"
|
${SYSTEM_SERVICES_RUN_TMP} ${SYSTEM_SERVICES_RUN_LOG} \
|
||||||
|
${SYSTEM_SERVICES_ACTIVE_CONFIG}"
|
||||||
|
|
||||||
#Python environment
|
#Python environment
|
||||||
ENV PYTHONUNBUFFERED=1
|
ENV PYTHONUNBUFFERED=1
|
||||||
ENV VIRTUAL_ENV=/opt/venv
|
ENV VIRTUAL_ENV=/opt/venv
|
||||||
ENV VIRTUAL_ENV_BIN=/opt/venv/bin
|
ENV VIRTUAL_ENV_BIN=/opt/venv/bin
|
||||||
ENV PYTHONPATH=${NETALERTX_APP}:${NETALERTX_SERVER}:${VIRTUAL_ENV}/lib/python3.12/site-packages
|
ENV PYTHONPATH=${NETALERTX_APP}:${NETALERTX_SERVER}:${NETALERTX_PLUGINS}:${VIRTUAL_ENV}/lib/python3.12/site-packages
|
||||||
ENV PATH="${SYSTEM_SERVICES}:${VIRTUAL_ENV_BIN}:$PATH"
|
ENV PATH="${SYSTEM_SERVICES}:${VIRTUAL_ENV_BIN}:$PATH"
|
||||||
|
|
||||||
# App Environment
|
# App Environment
|
||||||
ENV LISTEN_ADDR=0.0.0.0
|
ENV LISTEN_ADDR=0.0.0.0
|
||||||
ENV PORT=20211
|
ENV PORT=20211
|
||||||
ENV NETALERTX_DEBUG=0
|
ENV NETALERTX_DEBUG=0
|
||||||
ENV VENDORSPATH=/app/back/ieee-oui.txt
|
ENV VENDORSPATH=/app/back/ieee-oui.txt
|
||||||
ENV VENDORSPATH_NEWEST=/services/run/tmp/ieee-oui.txt
|
ENV VENDORSPATH_NEWEST=${SYSTEM_SERVICES_RUN_TMP}/ieee-oui.txt
|
||||||
ENV ENVIRONMENT=alpine
|
ENV ENVIRONMENT=alpine
|
||||||
ENV READ_ONLY_USER=readonly READ_ONLY_GROUP=readonly
|
ENV READ_ONLY_USER=readonly READ_ONLY_GROUP=readonly
|
||||||
ENV NETALERTX_USER=netalertx NETALERTX_GROUP=netalertx
|
ENV NETALERTX_USER=netalertx NETALERTX_GROUP=netalertx
|
||||||
ENV LANG=C.UTF-8
|
ENV LANG=C.UTF-8
|
||||||
|
|
||||||
|
|
||||||
RUN apk add --no-cache bash mtr libbsd zip lsblk tzdata curl arp-scan iproute2 iproute2-ss nmap \
|
RUN apk add --no-cache bash mtr libbsd zip lsblk tzdata curl arp-scan iproute2 iproute2-ss nmap fping \
|
||||||
nmap-scripts traceroute nbtscan net-tools net-snmp-tools bind-tools awake ca-certificates \
|
nmap-scripts traceroute nbtscan net-tools net-snmp-tools bind-tools awake ca-certificates \
|
||||||
sqlite php83 php83-fpm php83-cgi php83-curl php83-sqlite3 php83-session python3 envsubst \
|
sqlite php83 php83-fpm php83-cgi php83-curl php83-sqlite3 php83-session python3 py3-psutil envsubst \
|
||||||
nginx shadow && \
|
nginx supercronic shadow su-exec jq && \
|
||||||
rm -Rf /var/cache/apk/* && \
|
rm -Rf /var/cache/apk/* && \
|
||||||
rm -Rf /etc/nginx && \
|
rm -Rf /etc/nginx && \
|
||||||
addgroup -g 20211 ${NETALERTX_GROUP} && \
|
addgroup -g ${NETALERTX_GID} ${NETALERTX_GROUP} && \
|
||||||
adduser -u 20211 -D -h ${NETALERTX_APP} -G ${NETALERTX_GROUP} ${NETALERTX_USER} && \
|
adduser -u ${NETALERTX_UID} -D -h ${NETALERTX_APP} -G ${NETALERTX_GROUP} ${NETALERTX_USER} && \
|
||||||
apk del shadow
|
apk del shadow
|
||||||
|
|
||||||
|
|
||||||
@@ -128,77 +152,98 @@ COPY --chown=${NETALERTX_USER}:${NETALERTX_GROUP} install/production-filesystem/
|
|||||||
COPY --chown=${NETALERTX_USER}:${NETALERTX_GROUP} --chmod=755 back ${NETALERTX_BACK}
|
COPY --chown=${NETALERTX_USER}:${NETALERTX_GROUP} --chmod=755 back ${NETALERTX_BACK}
|
||||||
COPY --chown=${NETALERTX_USER}:${NETALERTX_GROUP} --chmod=755 front ${NETALERTX_FRONT}
|
COPY --chown=${NETALERTX_USER}:${NETALERTX_GROUP} --chmod=755 front ${NETALERTX_FRONT}
|
||||||
COPY --chown=${NETALERTX_USER}:${NETALERTX_GROUP} --chmod=755 server ${NETALERTX_SERVER}
|
COPY --chown=${NETALERTX_USER}:${NETALERTX_GROUP} --chmod=755 server ${NETALERTX_SERVER}
|
||||||
RUN install -d -o ${NETALERTX_USER} -g ${NETALERTX_GROUP} -m 755 ${NETALERTX_API} \
|
|
||||||
${NETALERTX_LOG} ${SYSTEM_SERVICES_RUN_TMP} ${SYSTEM_SERVICES_RUN_LOG} && \
|
# Create required folders with correct ownership and permissions
|
||||||
|
RUN install -d -o ${NETALERTX_USER} -g ${NETALERTX_GROUP} -m 700 ${READ_WRITE_FOLDERS} && \
|
||||||
sh -c "find ${NETALERTX_APP} -type f \( -name '*.sh' -o -name 'speedtest-cli' \) \
|
sh -c "find ${NETALERTX_APP} -type f \( -name '*.sh' -o -name 'speedtest-cli' \) \
|
||||||
-exec chmod 750 {} \;"
|
-exec chmod 750 {} \;"
|
||||||
|
|
||||||
# Copy the virtualenv from the builder stage
|
# Copy version information into the image
|
||||||
COPY --from=builder --chown=20212:20212 ${VIRTUAL_ENV} ${VIRTUAL_ENV}
|
COPY --chown=${NETALERTX_USER}:${NETALERTX_GROUP} .[V]ERSION ${NETALERTX_APP}/.VERSION
|
||||||
|
|
||||||
|
# Copy the virtualenv from the builder stage (owned by readonly lock owner)
|
||||||
|
COPY --from=builder --chown=${READONLY_UID}:${READONLY_GID} ${VIRTUAL_ENV} ${VIRTUAL_ENV}
|
||||||
|
|
||||||
|
|
||||||
# Initialize each service with the dockerfiles/init-*.sh scripts, once.
|
# Initialize each service with the dockerfiles/init-*.sh scripts, once.
|
||||||
# This is done after the copy of the venv to ensure the venv is in place
|
# This is done after the copy of the venv to ensure the venv is in place
|
||||||
# although it may be quicker to do it before the copy, it keeps the image
|
# although it may be quicker to do it before the copy, it keeps the image
|
||||||
# layers smaller to do it after.
|
# layers smaller to do it after.
|
||||||
RUN apk add libcap && \
|
# hadolint ignore=DL3018
|
||||||
setcap cap_net_raw+ep /bin/busybox && \
|
RUN for vfile in .VERSION; do \
|
||||||
|
if [ ! -f "${NETALERTX_APP}/${vfile}" ]; then \
|
||||||
|
echo "DEVELOPMENT 00000000" > "${NETALERTX_APP}/${vfile}"; \
|
||||||
|
fi; \
|
||||||
|
chown ${READONLY_UID}:${READONLY_GID} "${NETALERTX_APP}/${vfile}"; \
|
||||||
|
done && \
|
||||||
|
apk add --no-cache libcap && \
|
||||||
setcap cap_net_raw,cap_net_admin+eip /usr/bin/nmap && \
|
setcap cap_net_raw,cap_net_admin+eip /usr/bin/nmap && \
|
||||||
setcap cap_net_raw,cap_net_admin+eip /usr/bin/arp-scan && \
|
setcap cap_net_raw,cap_net_admin+eip /usr/bin/arp-scan && \
|
||||||
setcap cap_net_raw,cap_net_admin,cap_net_bind_service+eip /usr/bin/nbtscan && \
|
setcap cap_net_raw,cap_net_admin,cap_net_bind_service+eip /usr/bin/nbtscan && \
|
||||||
setcap cap_net_raw,cap_net_admin+eip /usr/bin/traceroute && \
|
setcap cap_net_raw,cap_net_admin+eip /usr/bin/traceroute && \
|
||||||
setcap cap_net_raw,cap_net_admin+eip $(readlink -f ${VIRTUAL_ENV_BIN}/python) && \
|
setcap cap_net_raw,cap_net_admin+eip "$(readlink -f ${VIRTUAL_ENV_BIN}/python)" && \
|
||||||
/bin/sh /build/init-nginx.sh && \
|
/bin/sh /build/init-nginx.sh && \
|
||||||
/bin/sh /build/init-php-fpm.sh && \
|
/bin/sh /build/init-php-fpm.sh && \
|
||||||
/bin/sh /build/init-crond.sh && \
|
/bin/sh /build/init-cron.sh && \
|
||||||
/bin/sh /build/init-backend.sh && \
|
/bin/sh /build/init-backend.sh && \
|
||||||
rm -rf /build && \
|
rm -rf /build && \
|
||||||
apk del libcap && \
|
apk del libcap && \
|
||||||
date +%s > ${NETALERTX_FRONT}/buildtimestamp.txt
|
date +%s > "${NETALERTX_FRONT}/buildtimestamp.txt"
|
||||||
|
|
||||||
|
|
||||||
ENTRYPOINT ["/bin/sh","/entrypoint.sh"]
|
ENTRYPOINT ["/bin/bash","/entrypoint.sh"]
|
||||||
|
|
||||||
# Final hardened stage to improve security by setting least possible permissions and removing sudo access.
|
# Final hardened stage to improve security by setting least possible permissions and removing sudo access.
|
||||||
# When complete, if the image is compromised, there's not much that can be done with it.
|
# When complete, if the image is compromised, there's not much that can be done with it.
|
||||||
# This stage is separate from Runner stage so that devcontainer can use the Runner stage.
|
# This stage is separate from Runner stage so that devcontainer can use the Runner stage.
|
||||||
FROM runner AS hardened
|
FROM runner AS hardened
|
||||||
|
|
||||||
|
# Re-declare UID/GID args for this stage
|
||||||
|
ARG NETALERTX_UID=20211
|
||||||
|
ARG NETALERTX_GID=20211
|
||||||
|
ARG READONLY_UID=20212
|
||||||
|
ARG READONLY_GID=20212
|
||||||
|
|
||||||
ENV UMASK=0077
|
ENV UMASK=0077
|
||||||
|
|
||||||
# Create readonly user and group with no shell access.
|
# Create readonly user and group with no shell access.
|
||||||
# Readonly user marks folders that are created by NetAlertX, but should not be modified.
|
# Readonly user marks folders that are created by NetAlertX, but should not be modified.
|
||||||
# AI may claim this is stupid, but it's actually least possible permissions as
|
# AI may claim this is stupid, but it's actually least possible permissions as
|
||||||
# read-only user cannot login, cannot sudo, has no write permission, and cannot even
|
# read-only user cannot login, cannot sudo, has no write permission, and cannot even
|
||||||
# read the files it owns. The read-only user is ownership-as-a-lock hardening pattern.
|
# read the files it owns. The read-only user is ownership-as-a-lock hardening pattern.
|
||||||
RUN addgroup -g 20212 ${READ_ONLY_GROUP} && \
|
RUN addgroup -g ${READONLY_GID} "${READ_ONLY_GROUP}" && \
|
||||||
adduser -u 20212 -G ${READ_ONLY_GROUP} -D -h /app ${READ_ONLY_USER}
|
adduser -u ${READONLY_UID} -G "${READ_ONLY_GROUP}" -D -h /app "${READ_ONLY_USER}"
|
||||||
|
|
||||||
|
|
||||||
# reduce permissions to minimum necessary for all NetAlertX files and folders
|
# reduce permissions to minimum necessary for all NetAlertX files and folders
|
||||||
# Permissions 005 and 004 are not typos, they enable read-only. Everyone can
|
# Permissions 005 and 004 are not typos, they enable read-only. Everyone can
|
||||||
# read the read-only files, and nobody can write to them, even the readonly user.
|
# read the read-only files, and nobody can write to them, even the readonly user.
|
||||||
|
|
||||||
|
# hadolint ignore=SC2114
|
||||||
RUN chown -R ${READ_ONLY_USER}:${READ_ONLY_GROUP} ${READ_ONLY_FOLDERS} && \
|
RUN chown -R ${READ_ONLY_USER}:${READ_ONLY_GROUP} ${READ_ONLY_FOLDERS} && \
|
||||||
chmod -R 004 ${READ_ONLY_FOLDERS} && \
|
chmod -R 004 ${READ_ONLY_FOLDERS} && \
|
||||||
find ${READ_ONLY_FOLDERS} -type d -exec chmod 005 {} + && \
|
find ${READ_ONLY_FOLDERS} -type d -exec chmod 005 {} + && \
|
||||||
install -d -o ${NETALERTX_USER} -g ${NETALERTX_GROUP} -m 700 ${READ_WRITE_FOLDERS} && \
|
install -d -o ${NETALERTX_USER} -g ${NETALERTX_GROUP} -m 0777 ${READ_WRITE_FOLDERS} && \
|
||||||
chown -R ${NETALERTX_USER}:${NETALERTX_GROUP} ${READ_WRITE_FOLDERS} && \
|
chown ${READ_ONLY_USER}:${READ_ONLY_GROUP} /entrypoint.sh /root-entrypoint.sh /opt /opt/venv && \
|
||||||
chmod -R 600 ${READ_WRITE_FOLDERS} && \
|
chmod 005 /entrypoint.sh /root-entrypoint.sh ${SYSTEM_SERVICES}/*.sh ${SYSTEM_SERVICES_SCRIPTS}/* ${ENTRYPOINT_CHECKS}/* /app /opt /opt/venv && \
|
||||||
find ${READ_WRITE_FOLDERS} -type d -exec chmod 700 {} + && \
|
# Do not bake first-run artifacts into the image. If present, Docker volume copy-up
|
||||||
chown ${READ_ONLY_USER}:${READ_ONLY_GROUP} /entrypoint.sh /opt /opt/venv && \
|
# will persist restrictive ownership/modes into fresh named volumes, breaking
|
||||||
chmod 005 /entrypoint.sh ${SYSTEM_SERVICES}/*.sh ${SYSTEM_SERVICES_SCRIPTS}/* ${ENTRYPOINT_CHECKS}/* /app /opt /opt/venv && \
|
# arbitrary non-root UID/GID runs.
|
||||||
for dir in ${READ_WRITE_FOLDERS}; do \
|
rm -f \
|
||||||
install -d -o ${NETALERTX_USER} -g ${NETALERTX_GROUP} -m 700 "$dir"; \
|
"${NETALERTX_CONFIG}/app.conf" \
|
||||||
done && \
|
"${NETALERTX_DB_FILE}" \
|
||||||
|
"${NETALERTX_DB_FILE}-shm" \
|
||||||
|
"${NETALERTX_DB_FILE}-wal" || true && \
|
||||||
apk del apk-tools && \
|
apk del apk-tools && \
|
||||||
rm -Rf /var /etc/sudoers.d/* /etc/shadow /etc/gshadow /etc/sudoers \
|
rm -Rf /var /etc/sudoers.d/* /etc/shadow /etc/gshadow /etc/sudoers \
|
||||||
/lib/apk /lib/firmware /lib/modules-load.d /lib/sysctl.d /mnt /home/ /root \
|
/lib/apk /lib/firmware /lib/modules-load.d /lib/sysctl.d /mnt /home/ /root \
|
||||||
/srv /media && \
|
/srv /media && \
|
||||||
sed -i "/^\(${READ_ONLY_USER}\|${NETALERTX_USER}\):/!d" /etc/passwd && \
|
# Preserve root and system identities so hardened entrypoint never needs to patch /etc/passwd or /etc/group at runtime.
|
||||||
sed -i "/^\(${READ_ONLY_GROUP}\|${NETALERTX_GROUP}\):/!d" /etc/group && \
|
printf '#!/bin/sh\n"$@"\n' > /usr/bin/sudo && chmod +x /usr/bin/sudo
|
||||||
echo -ne '#!/bin/sh\n"$@"\n' > /usr/bin/sudo && chmod +x /usr/bin/sudo
|
USER "0"
|
||||||
|
|
||||||
USER netalertx
|
# Call root-entrypoint.sh which drops priviliges to run entrypoint.sh.
|
||||||
|
ENTRYPOINT ["/root-entrypoint.sh"]
|
||||||
|
|
||||||
HEALTHCHECK --interval=30s --timeout=10s --start-period=60s --retries=3 \
|
HEALTHCHECK --interval=30s --timeout=10s --start-period=60s --retries=3 \
|
||||||
CMD /services/healthcheck.sh
|
CMD /services/healthcheck.sh
|
||||||
@@ -211,11 +256,15 @@ HEALTHCHECK --interval=30s --timeout=10s --start-period=60s --retries=3 \
|
|||||||
# .devcontainer/scripts/generate-configs.sh
|
# .devcontainer/scripts/generate-configs.sh
|
||||||
# The generator appends this stage to produce .devcontainer/Dockerfile.
|
# The generator appends this stage to produce .devcontainer/Dockerfile.
|
||||||
# Prefer to place dev-only setup here; use setup.sh only for runtime fixes.
|
# Prefer to place dev-only setup here; use setup.sh only for runtime fixes.
|
||||||
|
# Permissions in devcontainer should be of a brutalist nature. They will be
|
||||||
|
# Open and wide to avoid permission issues during development allowing max
|
||||||
|
# flexibility.
|
||||||
|
|
||||||
|
# hadolint ignore=DL3006
|
||||||
FROM runner AS netalertx-devcontainer
|
FROM runner AS netalertx-devcontainer
|
||||||
ENV INSTALL_DIR=/app
|
ENV INSTALL_DIR=/app
|
||||||
|
|
||||||
ENV PYTHONPATH=/workspaces/NetAlertX/test:/workspaces/NetAlertX/server:/app:/app/server:/opt/venv/lib/python3.12/site-packages:/usr/lib/python3.12/site-packages
|
ENV PYTHONPATH=${PYTHONPATH}:/workspaces/NetAlertX/test:/workspaces/NetAlertX/server:/usr/lib/python3.12/site-packages
|
||||||
ENV PATH=/services:${PATH}
|
ENV PATH=/services:${PATH}
|
||||||
ENV PHP_INI_SCAN_DIR=/services/config/php/conf.d:/etc/php83/conf.d
|
ENV PHP_INI_SCAN_DIR=/services/config/php/conf.d:/etc/php83/conf.d
|
||||||
ENV LISTEN_ADDR=0.0.0.0
|
ENV LISTEN_ADDR=0.0.0.0
|
||||||
@@ -225,18 +274,39 @@ ENV PYDEVD_DISABLE_FILE_VALIDATION=1
|
|||||||
COPY .devcontainer/resources/devcontainer-overlay/ /
|
COPY .devcontainer/resources/devcontainer-overlay/ /
|
||||||
USER root
|
USER root
|
||||||
# Install common tools, create user, and set up sudo
|
# Install common tools, create user, and set up sudo
|
||||||
|
|
||||||
|
# Ensure entrypoint scripts stay executable in the devcontainer (avoids 126 errors)
|
||||||
|
RUN chmod +x /entrypoint.sh /root-entrypoint.sh /entrypoint.d/*.sh && \
|
||||||
|
chmod +x /entrypoint.d/35-apply-conf-override.sh
|
||||||
|
|
||||||
RUN apk add --no-cache git nano vim jq php83-pecl-xdebug py3-pip nodejs sudo gpgconf pytest \
|
RUN apk add --no-cache git nano vim jq php83-pecl-xdebug py3-pip nodejs sudo gpgconf pytest \
|
||||||
pytest-cov fish shfmt github-cli py3-yaml py3-docker-py docker-cli docker-cli-buildx \
|
pytest-cov zsh alpine-zsh-config shfmt github-cli py3-yaml py3-docker-py docker-cli docker-cli-buildx \
|
||||||
docker-cli-compose
|
docker-cli-compose shellcheck py3-psutil chromium chromium-chromedriver
|
||||||
|
|
||||||
|
# Install hadolint (Dockerfile linter)
|
||||||
|
RUN curl -L https://github.com/hadolint/hadolint/releases/latest/download/hadolint-Linux-x86_64 -o /usr/local/bin/hadolint && \
|
||||||
|
chmod +x /usr/local/bin/hadolint
|
||||||
|
|
||||||
RUN install -d -o netalertx -g netalertx -m 755 /services/php/modules && \
|
RUN install -d -o netalertx -g netalertx -m 755 /services/php/modules && \
|
||||||
cp -a /usr/lib/php83/modules/. /services/php/modules/ && \
|
cp -a /usr/lib/php83/modules/. /services/php/modules/ && \
|
||||||
echo "${NETALERTX_USER} ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers
|
echo "${NETALERTX_USER} ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers
|
||||||
RUN mkdir /workspaces && \
|
ENV SHELL=/bin/zsh
|
||||||
install -d -o netalertx -g netalertx -m 777 /services/run/logs && \
|
|
||||||
install -d -o netalertx -g netalertx -m 777 /app/run/tmp/client_body && \
|
RUN mkdir -p /workspaces && \
|
||||||
sed -i -e 's|:/app:|:/workspaces:|' /etc/passwd && \
|
install -d -m 777 /data /data/config /data/db && \
|
||||||
|
install -d -m 777 /tmp/log /tmp/log/plugins /tmp/api /tmp/run /tmp/nginx && \
|
||||||
|
install -d -m 777 /tmp/nginx/active-config /tmp/nginx/client_body /tmp/nginx/config && \
|
||||||
|
install -d -m 777 /tmp/nginx/fastcgi /tmp/nginx/proxy /tmp/nginx/scgi /tmp/nginx/uwsgi && \
|
||||||
|
install -d -m 777 /tmp/run/tmp /tmp/run/logs && \
|
||||||
|
chmod 777 /workspaces && \
|
||||||
|
chown -R netalertx:netalertx /data && \
|
||||||
|
chmod 666 /data/config/app.conf /data/db/app.db && \
|
||||||
|
chmod 1777 /tmp && \
|
||||||
|
install -d -o root -g root -m 1777 /tmp/.X11-unix && \
|
||||||
|
mkdir -p /home/netalertx && \
|
||||||
|
chown netalertx:netalertx /home/netalertx && \
|
||||||
|
sed -i -e 's#/app:#/workspaces:#' /etc/passwd && \
|
||||||
find /opt/venv -type d -exec chmod o+rwx {} \;
|
find /opt/venv -type d -exec chmod o+rwx {} \;
|
||||||
|
|
||||||
USER netalertx
|
USER netalertx
|
||||||
ENTRYPOINT ["/bin/sh","-c","sleep infinity"]
|
ENTRYPOINT ["/bin/sh","-c","sleep infinity"]
|
||||||
|
|||||||
37
.devcontainer/NetAlertX.code-workspace
Normal file
37
.devcontainer/NetAlertX.code-workspace
Normal file
@@ -0,0 +1,37 @@
|
|||||||
|
{
|
||||||
|
"folders": [
|
||||||
|
{
|
||||||
|
"name": "NetAlertX Source",
|
||||||
|
"path": "/workspaces/NetAlertX"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "💾 NetAlertX Data",
|
||||||
|
"path": "/data"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "🔍 Active NetAlertX log",
|
||||||
|
"path": "/tmp/log"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "🌐 Active NetAlertX nginx",
|
||||||
|
"path": "/tmp/nginx"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "📊 Active NetAlertX api",
|
||||||
|
"path": "/tmp/api"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "⚙️ Active NetAlertX run",
|
||||||
|
"path": "/tmp/run"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"settings": {
|
||||||
|
"terminal.integrated.suggest.enabled": true,
|
||||||
|
"terminal.integrated.defaultProfile.linux": "zsh",
|
||||||
|
"terminal.integrated.profiles.linux": {
|
||||||
|
"zsh": {
|
||||||
|
"path": "/usr/bin/fish"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -19,6 +19,17 @@ Common workflows (F1->Tasks: Run Task)
|
|||||||
- Backend (GraphQL/Flask): `.devcontainer/scripts/restart-backend.sh` starts it under debugpy and logs to `/app/log/app.log`
|
- Backend (GraphQL/Flask): `.devcontainer/scripts/restart-backend.sh` starts it under debugpy and logs to `/app/log/app.log`
|
||||||
- Frontend (nginx + PHP-FPM): Started via setup.sh; can be restarted by the task "Start Frontend (nginx and PHP-FPM)".
|
- Frontend (nginx + PHP-FPM): Started via setup.sh; can be restarted by the task "Start Frontend (nginx and PHP-FPM)".
|
||||||
|
|
||||||
|
Production Container Evaulation
|
||||||
|
1. F1 → Tasks: Shutdown services ([Dev Container] Stop Frontend & Backend Services)
|
||||||
|
2. F1 → Tasks: Docker system and build prune ([Any] Docker system and build Prune)
|
||||||
|
3. F1 → Remote: Close Unused Forwarded Ports (VS Code command)
|
||||||
|
4. F1 → Tasks: Build & Launch Production (Build & Launch Prodcution Docker
|
||||||
|
5. visit http://localhost:20211
|
||||||
|
|
||||||
|
Unit tests
|
||||||
|
1. F1 → Tasks: Rebuild test container ([Any] Build Unit Test Docker image)
|
||||||
|
2. F1 → Test: Run all tests
|
||||||
|
|
||||||
Testing
|
Testing
|
||||||
- pytest is installed via Alpine packages (py3-pytest, py3-pytest-cov).
|
- pytest is installed via Alpine packages (py3-pytest, py3-pytest-cov).
|
||||||
- PYTHONPATH includes workspace and venv site-packages so tests can import `server/*` modules and third-party libs.
|
- PYTHONPATH includes workspace and venv site-packages so tests can import `server/*` modules and third-party libs.
|
||||||
|
|||||||
26
.devcontainer/WORKSPACE.md
Normal file
26
.devcontainer/WORKSPACE.md
Normal file
@@ -0,0 +1,26 @@
|
|||||||
|
# NetAlertX Multi-Folder Workspace
|
||||||
|
|
||||||
|
This repository uses a multi-folder workspace configuration to provide easy access to runtime directories.
|
||||||
|
|
||||||
|
## Opening the Multi-Folder Workspace
|
||||||
|
|
||||||
|
After the devcontainer builds, open the workspace file to access all folders:
|
||||||
|
|
||||||
|
1. **File** → **Open Workspace from File**
|
||||||
|
2. Select `NetAlertX.code-workspace`
|
||||||
|
|
||||||
|
Or use Command Palette (Ctrl+Shift+P / Cmd+Shift+P):
|
||||||
|
- Type: `Workspaces: Open Workspace from File`
|
||||||
|
- Select `NetAlertX.code-workspace`
|
||||||
|
|
||||||
|
## Workspace Folders
|
||||||
|
|
||||||
|
The workspace includes:
|
||||||
|
- **NetAlertX** - Main source code
|
||||||
|
- **/tmp** - Runtime temporary files
|
||||||
|
- **/tmp/api** - API response cache (JSON files)
|
||||||
|
- **/tmp/log** - Application and plugin logs
|
||||||
|
|
||||||
|
## Testing Configuration
|
||||||
|
|
||||||
|
Pytest is configured to only discover tests in the main `test/` directory, not in `/tmp` folders.
|
||||||
@@ -2,6 +2,8 @@
|
|||||||
"name": "NetAlertX DevContainer",
|
"name": "NetAlertX DevContainer",
|
||||||
"remoteUser": "netalertx",
|
"remoteUser": "netalertx",
|
||||||
"workspaceFolder": "/workspaces/NetAlertX",
|
"workspaceFolder": "/workspaces/NetAlertX",
|
||||||
|
"workspaceMount": "source=${localWorkspaceFolder},target=/workspaces/NetAlertX,type=bind,consistency=cached",
|
||||||
|
"onCreateCommand": "mkdir -p /tmp/api /tmp/log",
|
||||||
"build": {
|
"build": {
|
||||||
"dockerfile": "./Dockerfile", // Dockerfile generated by script
|
"dockerfile": "./Dockerfile", // Dockerfile generated by script
|
||||||
"context": "../", // Context is the root of the repository
|
"context": "../", // Context is the root of the repository
|
||||||
@@ -10,7 +12,8 @@
|
|||||||
"capAdd": [
|
"capAdd": [
|
||||||
"SYS_ADMIN", // For mounting ramdisks
|
"SYS_ADMIN", // For mounting ramdisks
|
||||||
"NET_ADMIN", // For network interface configuration
|
"NET_ADMIN", // For network interface configuration
|
||||||
"NET_RAW" // For raw packet manipulation
|
"NET_RAW", // For raw packet manipulation
|
||||||
|
"NET_BIND_SERVICE" // For privileged port binding (e.g., UDP 137)
|
||||||
],
|
],
|
||||||
"runArgs": [
|
"runArgs": [
|
||||||
"--security-opt",
|
"--security-opt",
|
||||||
@@ -23,7 +26,7 @@
|
|||||||
// even within this container and connect to them as needed.
|
// even within this container and connect to them as needed.
|
||||||
// "--network=host",
|
// "--network=host",
|
||||||
],
|
],
|
||||||
"mounts": [
|
"mounts": [
|
||||||
"source=/var/run/docker.sock,target=/var/run/docker.sock,type=bind" //used for testing various conditions in docker
|
"source=/var/run/docker.sock,target=/var/run/docker.sock,type=bind" //used for testing various conditions in docker
|
||||||
],
|
],
|
||||||
// ATTENTION: If running with --network=host, COMMENT `forwardPorts` OR ELSE THERE WILL BE NO WEBUI!
|
// ATTENTION: If running with --network=host, COMMENT `forwardPorts` OR ELSE THERE WILL BE NO WEBUI!
|
||||||
@@ -44,11 +47,12 @@
|
|||||||
},
|
},
|
||||||
|
|
||||||
"postCreateCommand": {
|
"postCreateCommand": {
|
||||||
"Install Pip Requirements": "/opt/venv/bin/pip3 install pytest docker debugpy"
|
"Install Pip Requirements": "/opt/venv/bin/pip3 install pytest docker debugpy selenium",
|
||||||
|
"Workspace Instructions": "printf '\n\n<> DevContainer Ready! Starting Services...\n\n📁 To access /tmp folders in the workspace:\n File → Open Workspace from File → NetAlertX.code-workspace\n\n📖 See .devcontainer/WORKSPACE.md for details\n\n'"
|
||||||
},
|
},
|
||||||
"postStartCommand": {
|
"postStartCommand": {
|
||||||
"Start Environment":"${containerWorkspaceFolder}/.devcontainer/scripts/setup.sh",
|
"Build test-container":"echo To speed up tests, building test container in background... && setsid docker buildx build -t netalertx-test . > /tmp/build.log 2>&1 && echo '🧪 Unit Test Docker image built: netalertx-test' &",
|
||||||
"Build test-container":"echo building netalertx-test container in background. check /tmp/build.log for progress. && setsid docker buildx build -t netalertx-test . > /tmp/build.log 2>&1 &"
|
"Start Environment":"${containerWorkspaceFolder}/.devcontainer/scripts/setup.sh"
|
||||||
},
|
},
|
||||||
"customizations": {
|
"customizations": {
|
||||||
"vscode": {
|
"vscode": {
|
||||||
@@ -59,7 +63,6 @@
|
|||||||
"bmewburn.vscode-intelephense-client",
|
"bmewburn.vscode-intelephense-client",
|
||||||
"xdebug.php-debug",
|
"xdebug.php-debug",
|
||||||
"ms-python.vscode-pylance",
|
"ms-python.vscode-pylance",
|
||||||
"pamaron.pytest-runner",
|
|
||||||
"coderabbit.coderabbit-vscode",
|
"coderabbit.coderabbit-vscode",
|
||||||
"ms-python.black-formatter",
|
"ms-python.black-formatter",
|
||||||
"jeff-hykin.better-dockerfile-syntax",
|
"jeff-hykin.better-dockerfile-syntax",
|
||||||
@@ -70,15 +73,27 @@
|
|||||||
"esbenp.prettier-vscode",
|
"esbenp.prettier-vscode",
|
||||||
"eamodio.gitlens",
|
"eamodio.gitlens",
|
||||||
"alexcvzz.vscode-sqlite",
|
"alexcvzz.vscode-sqlite",
|
||||||
"yzhang.markdown-all-in-one",
|
"mkhl.shfmt",
|
||||||
"mkhl.shfmt"
|
"charliermarsh.ruff",
|
||||||
|
"ms-python.flake8",
|
||||||
|
"exiasr.hadolint",
|
||||||
|
"timonwong.shellcheck"
|
||||||
],
|
],
|
||||||
"settings": {
|
"settings": {
|
||||||
"terminal.integrated.cwd": "${containerWorkspaceFolder}",
|
"terminal.integrated.cwd": "${containerWorkspaceFolder}",
|
||||||
|
"terminal.integrated.profiles.linux": {
|
||||||
|
"zsh": {
|
||||||
|
"path": "/bin/zsh",
|
||||||
|
"args": ["-l"]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"terminal.integrated.defaultProfile.linux": "zsh",
|
||||||
|
|
||||||
// Python testing configuration
|
// Python testing configuration
|
||||||
"python.testing.pytestEnabled": true,
|
"python.testing.pytestEnabled": true,
|
||||||
"python.testing.unittestEnabled": false,
|
"python.testing.unittestEnabled": false,
|
||||||
"python.testing.pytestArgs": ["test"],
|
"python.testing.pytestArgs": ["test"],
|
||||||
|
"python.testing.cwd": "${containerWorkspaceFolder}",
|
||||||
// Make sure we discover tests and import server correctly
|
// Make sure we discover tests and import server correctly
|
||||||
"python.analysis.extraPaths": [
|
"python.analysis.extraPaths": [
|
||||||
"/workspaces/NetAlertX",
|
"/workspaces/NetAlertX",
|
||||||
|
|||||||
@@ -3,11 +3,15 @@
|
|||||||
# .devcontainer/scripts/generate-configs.sh
|
# .devcontainer/scripts/generate-configs.sh
|
||||||
# The generator appends this stage to produce .devcontainer/Dockerfile.
|
# The generator appends this stage to produce .devcontainer/Dockerfile.
|
||||||
# Prefer to place dev-only setup here; use setup.sh only for runtime fixes.
|
# Prefer to place dev-only setup here; use setup.sh only for runtime fixes.
|
||||||
|
# Permissions in devcontainer should be of a brutalist nature. They will be
|
||||||
|
# Open and wide to avoid permission issues during development allowing max
|
||||||
|
# flexibility.
|
||||||
|
|
||||||
|
# hadolint ignore=DL3006
|
||||||
FROM runner AS netalertx-devcontainer
|
FROM runner AS netalertx-devcontainer
|
||||||
ENV INSTALL_DIR=/app
|
ENV INSTALL_DIR=/app
|
||||||
|
|
||||||
ENV PYTHONPATH=/workspaces/NetAlertX/test:/workspaces/NetAlertX/server:/app:/app/server:/opt/venv/lib/python3.12/site-packages:/usr/lib/python3.12/site-packages
|
ENV PYTHONPATH=${PYTHONPATH}:/workspaces/NetAlertX/test:/workspaces/NetAlertX/server:/usr/lib/python3.12/site-packages
|
||||||
ENV PATH=/services:${PATH}
|
ENV PATH=/services:${PATH}
|
||||||
ENV PHP_INI_SCAN_DIR=/services/config/php/conf.d:/etc/php83/conf.d
|
ENV PHP_INI_SCAN_DIR=/services/config/php/conf.d:/etc/php83/conf.d
|
||||||
ENV LISTEN_ADDR=0.0.0.0
|
ENV LISTEN_ADDR=0.0.0.0
|
||||||
@@ -17,18 +21,39 @@ ENV PYDEVD_DISABLE_FILE_VALIDATION=1
|
|||||||
COPY .devcontainer/resources/devcontainer-overlay/ /
|
COPY .devcontainer/resources/devcontainer-overlay/ /
|
||||||
USER root
|
USER root
|
||||||
# Install common tools, create user, and set up sudo
|
# Install common tools, create user, and set up sudo
|
||||||
|
|
||||||
|
# Ensure entrypoint scripts stay executable in the devcontainer (avoids 126 errors)
|
||||||
|
RUN chmod +x /entrypoint.sh /root-entrypoint.sh /entrypoint.d/*.sh && \
|
||||||
|
chmod +x /entrypoint.d/35-apply-conf-override.sh
|
||||||
|
|
||||||
RUN apk add --no-cache git nano vim jq php83-pecl-xdebug py3-pip nodejs sudo gpgconf pytest \
|
RUN apk add --no-cache git nano vim jq php83-pecl-xdebug py3-pip nodejs sudo gpgconf pytest \
|
||||||
pytest-cov fish shfmt github-cli py3-yaml py3-docker-py docker-cli docker-cli-buildx \
|
pytest-cov zsh alpine-zsh-config shfmt github-cli py3-yaml py3-docker-py docker-cli docker-cli-buildx \
|
||||||
docker-cli-compose
|
docker-cli-compose shellcheck py3-psutil chromium chromium-chromedriver
|
||||||
|
|
||||||
|
# Install hadolint (Dockerfile linter)
|
||||||
|
RUN curl -L https://github.com/hadolint/hadolint/releases/latest/download/hadolint-Linux-x86_64 -o /usr/local/bin/hadolint && \
|
||||||
|
chmod +x /usr/local/bin/hadolint
|
||||||
|
|
||||||
RUN install -d -o netalertx -g netalertx -m 755 /services/php/modules && \
|
RUN install -d -o netalertx -g netalertx -m 755 /services/php/modules && \
|
||||||
cp -a /usr/lib/php83/modules/. /services/php/modules/ && \
|
cp -a /usr/lib/php83/modules/. /services/php/modules/ && \
|
||||||
echo "${NETALERTX_USER} ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers
|
echo "${NETALERTX_USER} ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers
|
||||||
RUN mkdir /workspaces && \
|
ENV SHELL=/bin/zsh
|
||||||
install -d -o netalertx -g netalertx -m 777 /services/run/logs && \
|
|
||||||
install -d -o netalertx -g netalertx -m 777 /app/run/tmp/client_body && \
|
RUN mkdir -p /workspaces && \
|
||||||
sed -i -e 's|:/app:|:/workspaces:|' /etc/passwd && \
|
install -d -m 777 /data /data/config /data/db && \
|
||||||
|
install -d -m 777 /tmp/log /tmp/log/plugins /tmp/api /tmp/run /tmp/nginx && \
|
||||||
|
install -d -m 777 /tmp/nginx/active-config /tmp/nginx/client_body /tmp/nginx/config && \
|
||||||
|
install -d -m 777 /tmp/nginx/fastcgi /tmp/nginx/proxy /tmp/nginx/scgi /tmp/nginx/uwsgi && \
|
||||||
|
install -d -m 777 /tmp/run/tmp /tmp/run/logs && \
|
||||||
|
chmod 777 /workspaces && \
|
||||||
|
chown -R netalertx:netalertx /data && \
|
||||||
|
chmod 666 /data/config/app.conf /data/db/app.db && \
|
||||||
|
chmod 1777 /tmp && \
|
||||||
|
install -d -o root -g root -m 1777 /tmp/.X11-unix && \
|
||||||
|
mkdir -p /home/netalertx && \
|
||||||
|
chown netalertx:netalertx /home/netalertx && \
|
||||||
|
sed -i -e 's#/app:#/workspaces:#' /etc/passwd && \
|
||||||
find /opt/venv -type d -exec chmod o+rwx {} \;
|
find /opt/venv -type d -exec chmod o+rwx {} \;
|
||||||
|
|
||||||
USER netalertx
|
USER netalertx
|
||||||
ENTRYPOINT ["/bin/sh","-c","sleep infinity"]
|
ENTRYPOINT ["/bin/sh","-c","sleep infinity"]
|
||||||
|
|||||||
@@ -1,118 +0,0 @@
|
|||||||
# DO NOT MODIFY THIS FILE DIRECTLY. IT IS AUTO-GENERATED BY .devcontainer/scripts/generate-configs.sh
|
|
||||||
# Generated from: install/production-filesystem/services/config/nginx/netalertx.conf.template
|
|
||||||
|
|
||||||
# Set number of worker processes automatically based on number of CPU cores.
|
|
||||||
worker_processes auto;
|
|
||||||
|
|
||||||
# Enables the use of JIT for regular expressions to speed-up their processing.
|
|
||||||
pcre_jit on;
|
|
||||||
|
|
||||||
# Configures default error logger.
|
|
||||||
error_log /app/log/nginx-error.log warn;
|
|
||||||
|
|
||||||
events {
|
|
||||||
# The maximum number of simultaneous connections that can be opened by
|
|
||||||
# a worker process.
|
|
||||||
worker_connections 1024;
|
|
||||||
}
|
|
||||||
|
|
||||||
http {
|
|
||||||
|
|
||||||
# Mapping of temp paths for various nginx modules.
|
|
||||||
client_body_temp_path /services/run/tmp/client_body;
|
|
||||||
proxy_temp_path /services/run/tmp/proxy;
|
|
||||||
fastcgi_temp_path /services/run/tmp/fastcgi;
|
|
||||||
uwsgi_temp_path /services/run/tmp/uwsgi;
|
|
||||||
scgi_temp_path /services/run/tmp/scgi;
|
|
||||||
|
|
||||||
# Includes mapping of file name extensions to MIME types of responses
|
|
||||||
# and defines the default type.
|
|
||||||
include /services/config/nginx/mime.types;
|
|
||||||
default_type application/octet-stream;
|
|
||||||
|
|
||||||
# Name servers used to resolve names of upstream servers into addresses.
|
|
||||||
# It's also needed when using tcpsocket and udpsocket in Lua modules.
|
|
||||||
#resolver 1.1.1.1 1.0.0.1 [2606:4700:4700::1111] [2606:4700:4700::1001];
|
|
||||||
|
|
||||||
# Don't tell nginx version to the clients. Default is 'on'.
|
|
||||||
server_tokens off;
|
|
||||||
|
|
||||||
# Specifies the maximum accepted body size of a client request, as
|
|
||||||
# indicated by the request header Content-Length. If the stated content
|
|
||||||
# length is greater than this size, then the client receives the HTTP
|
|
||||||
# error code 413. Set to 0 to disable. Default is '1m'.
|
|
||||||
client_max_body_size 1m;
|
|
||||||
|
|
||||||
# Sendfile copies data between one FD and other from within the kernel,
|
|
||||||
# which is more efficient than read() + write(). Default is off.
|
|
||||||
sendfile on;
|
|
||||||
|
|
||||||
# Causes nginx to attempt to send its HTTP response head in one packet,
|
|
||||||
# instead of using partial frames. Default is 'off'.
|
|
||||||
tcp_nopush on;
|
|
||||||
|
|
||||||
|
|
||||||
# Enables the specified protocols. Default is TLSv1 TLSv1.1 TLSv1.2.
|
|
||||||
# TIP: If you're not obligated to support ancient clients, remove TLSv1.1.
|
|
||||||
ssl_protocols TLSv1.2 TLSv1.3;
|
|
||||||
|
|
||||||
# Path of the file with Diffie-Hellman parameters for EDH ciphers.
|
|
||||||
# TIP: Generate with: `openssl dhparam -out /etc/ssl/nginx/dh2048.pem 2048`
|
|
||||||
#ssl_dhparam /etc/ssl/nginx/dh2048.pem;
|
|
||||||
|
|
||||||
# Specifies that our cipher suits should be preferred over client ciphers.
|
|
||||||
# Default is 'off'.
|
|
||||||
ssl_prefer_server_ciphers on;
|
|
||||||
|
|
||||||
# Enables a shared SSL cache with size that can hold around 8000 sessions.
|
|
||||||
# Default is 'none'.
|
|
||||||
ssl_session_cache shared:SSL:2m;
|
|
||||||
|
|
||||||
# Specifies a time during which a client may reuse the session parameters.
|
|
||||||
# Default is '5m'.
|
|
||||||
ssl_session_timeout 1h;
|
|
||||||
|
|
||||||
# Disable TLS session tickets (they are insecure). Default is 'on'.
|
|
||||||
ssl_session_tickets off;
|
|
||||||
|
|
||||||
|
|
||||||
# Enable gzipping of responses.
|
|
||||||
gzip on;
|
|
||||||
|
|
||||||
# Set the Vary HTTP header as defined in the RFC 2616. Default is 'off'.
|
|
||||||
gzip_vary on;
|
|
||||||
|
|
||||||
|
|
||||||
# Specifies the main log format.
|
|
||||||
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
|
|
||||||
'$status $body_bytes_sent "$http_referer" '
|
|
||||||
'"$http_user_agent" "$http_x_forwarded_for"';
|
|
||||||
|
|
||||||
# Sets the path, format, and configuration for a buffered log write.
|
|
||||||
access_log /app/log/nginx-access.log main;
|
|
||||||
|
|
||||||
|
|
||||||
# Virtual host config
|
|
||||||
server {
|
|
||||||
listen 0.0.0.0:20211 default_server;
|
|
||||||
large_client_header_buffers 4 16k;
|
|
||||||
root /app/front;
|
|
||||||
index index.php;
|
|
||||||
add_header X-Forwarded-Prefix "/app" always;
|
|
||||||
|
|
||||||
|
|
||||||
location ~* \.php$ {
|
|
||||||
# Set Cache-Control header to prevent caching on the first load
|
|
||||||
add_header Cache-Control "no-store";
|
|
||||||
fastcgi_pass unix:/services/run/php.sock;
|
|
||||||
include /services/config/nginx/fastcgi_params;
|
|
||||||
fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
|
|
||||||
fastcgi_param SCRIPT_NAME $fastcgi_script_name;
|
|
||||||
|
|
||||||
fastcgi_param PHP_VALUE "xdebug.remote_enable=1";
|
|
||||||
fastcgi_connect_timeout 75;
|
|
||||||
fastcgi_send_timeout 600;
|
|
||||||
fastcgi_read_timeout 600;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -3,7 +3,7 @@ extension_dir="/services/php/modules"
|
|||||||
|
|
||||||
[xdebug]
|
[xdebug]
|
||||||
xdebug.mode=develop,debug
|
xdebug.mode=develop,debug
|
||||||
xdebug.log=/app/log/xdebug.log
|
xdebug.log=/tmp/log/xdebug.log
|
||||||
xdebug.log_level=7
|
xdebug.log_level=7
|
||||||
xdebug.client_host=127.0.0.1
|
xdebug.client_host=127.0.0.1
|
||||||
xdebug.client_port=9003
|
xdebug.client_port=9003
|
||||||
|
|||||||
@@ -0,0 +1,47 @@
|
|||||||
|
# NetAlertX devcontainer zsh configuration
|
||||||
|
# Keep this lightweight and deterministic so shells behave consistently.
|
||||||
|
|
||||||
|
export PATH="$HOME/.local/bin:$PATH"
|
||||||
|
export EDITOR=vim
|
||||||
|
export SHELL=/bin/zsh
|
||||||
|
|
||||||
|
# Start inside the workspace if it exists
|
||||||
|
if [ -d "/workspaces/NetAlertX" ]; then
|
||||||
|
cd /workspaces/NetAlertX
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Enable basic completion and prompt helpers
|
||||||
|
autoload -Uz compinit promptinit colors
|
||||||
|
colors
|
||||||
|
compinit -u
|
||||||
|
promptinit
|
||||||
|
|
||||||
|
# Friendly prompt with virtualenv awareness
|
||||||
|
setopt PROMPT_SUBST
|
||||||
|
|
||||||
|
_venv_segment() {
|
||||||
|
if [ -n "$VIRTUAL_ENV" ]; then
|
||||||
|
printf '(%s) ' "${VIRTUAL_ENV:t}"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
PROMPT='%F{green}$(_venv_segment)%f%F{cyan}%n@%m%f %F{yellow}%~%f %# '
|
||||||
|
RPROMPT='%F{magenta}$(git rev-parse --abbrev-ref HEAD 2>/dev/null)%f'
|
||||||
|
|
||||||
|
# Sensible defaults
|
||||||
|
setopt autocd
|
||||||
|
setopt correct
|
||||||
|
setopt extendedglob
|
||||||
|
HISTFILE="$HOME/.zsh_history"
|
||||||
|
HISTSIZE=5000
|
||||||
|
SAVEHIST=5000
|
||||||
|
|
||||||
|
alias ll='ls -alF'
|
||||||
|
alias la='ls -A'
|
||||||
|
alias gs='git status -sb'
|
||||||
|
alias gp='git pull --ff-only'
|
||||||
|
|
||||||
|
# Ensure pyenv/virtualenv activate hooks adjust the prompt cleanly
|
||||||
|
if [ -f "$HOME/.zshrc.local" ]; then
|
||||||
|
source "$HOME/.zshrc.local"
|
||||||
|
fi
|
||||||
180
.devcontainer/scripts/coderabbit-pr-parser.py
Normal file
180
.devcontainer/scripts/coderabbit-pr-parser.py
Normal file
@@ -0,0 +1,180 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
import json
|
||||||
|
import re
|
||||||
|
import subprocess
|
||||||
|
import sys
|
||||||
|
import textwrap
|
||||||
|
|
||||||
|
# Default Configuration
|
||||||
|
REPO = "jokob-sk/NetAlertX"
|
||||||
|
DEFAULT_PR_NUM = "1405"
|
||||||
|
|
||||||
|
|
||||||
|
def get_pr_threads(pr_num):
|
||||||
|
"""Fetches unresolved review threads using GitHub GraphQL API."""
|
||||||
|
# Validate PR number early to avoid passing invalid values to subprocess
|
||||||
|
try:
|
||||||
|
pr_int = int(pr_num)
|
||||||
|
if pr_int <= 0:
|
||||||
|
raise ValueError
|
||||||
|
except Exception:
|
||||||
|
print(f"Error: Invalid PR number: {pr_num}. Must be a positive integer.")
|
||||||
|
sys.exit(2)
|
||||||
|
|
||||||
|
query = """
|
||||||
|
query($owner: String!, $name: String!, $number: Int!) {
|
||||||
|
repository(owner: $owner, name: $name) {
|
||||||
|
pullRequest(number: $number) {
|
||||||
|
reviewThreads(last: 100) {
|
||||||
|
nodes {
|
||||||
|
isResolved
|
||||||
|
isOutdated
|
||||||
|
comments(first: 1) {
|
||||||
|
nodes {
|
||||||
|
body
|
||||||
|
author { login }
|
||||||
|
path
|
||||||
|
line
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
"""
|
||||||
|
owner, name = REPO.split("/")
|
||||||
|
cmd = ["gh", "api", "graphql", "-F", f"owner={owner}", "-F", f"name={name}", "-F", f"number={pr_int}", "-f", f"query={query}"]
|
||||||
|
|
||||||
|
try:
|
||||||
|
result = subprocess.run(cmd, capture_output=True, text=True, check=True, timeout=60)
|
||||||
|
return json.loads(result.stdout)
|
||||||
|
except subprocess.TimeoutExpired:
|
||||||
|
print(f"Error: Command timed out after 60 seconds: {' '.join(cmd)}")
|
||||||
|
sys.exit(1)
|
||||||
|
except subprocess.CalledProcessError as e:
|
||||||
|
print(f"Error fetching PR threads: {e.stderr}")
|
||||||
|
sys.exit(1)
|
||||||
|
except FileNotFoundError:
|
||||||
|
print("Error: 'gh' CLI not found. Please install GitHub CLI.")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
|
||||||
|
def clean_block(text):
|
||||||
|
"""Cleans up markdown/HTML noise from text."""
|
||||||
|
# Remove HTML comments
|
||||||
|
text = re.sub(r"<!--.*?-->", "", text, flags=re.DOTALL)
|
||||||
|
# Remove metadata lines
|
||||||
|
text = re.sub(r"^\s*Status:\s*\w+", "", text, flags=re.MULTILINE)
|
||||||
|
# Remove code block fences
|
||||||
|
text = text.replace("```diff", "").replace("```", "")
|
||||||
|
# Flatten whitespace
|
||||||
|
lines = [line.strip() for line in text.split("\n") if line.strip()]
|
||||||
|
return " ".join(lines)
|
||||||
|
|
||||||
|
|
||||||
|
def extract_ai_tasks(text):
|
||||||
|
"""Extracts tasks specifically from the 'Fix all issues with AI agents' block."""
|
||||||
|
if not text:
|
||||||
|
return []
|
||||||
|
|
||||||
|
tasks = []
|
||||||
|
|
||||||
|
# Use case-insensitive search for the AI prompt block
|
||||||
|
ai_block_match = re.search(r"(?i)Prompt for AI Agents.*?\n```(.*?)```", text, re.DOTALL)
|
||||||
|
|
||||||
|
if ai_block_match:
|
||||||
|
ai_text = ai_block_match.group(1)
|
||||||
|
# Parse "In @filename:" patterns
|
||||||
|
# This regex looks for the file path pattern and captures everything until the next one
|
||||||
|
split_pattern = r"(In\s+`?@[\w\-\./]+`?:)"
|
||||||
|
parts = re.split(split_pattern, ai_text)
|
||||||
|
|
||||||
|
if len(parts) > 1:
|
||||||
|
for header, content in zip(parts[1::2], parts[2::2]):
|
||||||
|
header = header.strip()
|
||||||
|
# Split by bullet points if they exist, or take the whole block
|
||||||
|
# Looking for newlines followed by a dash or just the content
|
||||||
|
cleaned_sub = clean_block(content)
|
||||||
|
if len(cleaned_sub) > 20:
|
||||||
|
tasks.append(f"{header} {cleaned_sub}")
|
||||||
|
else:
|
||||||
|
# Fallback if the "In @file" pattern isn't found but we are in the AI block
|
||||||
|
cleaned = clean_block(ai_text)
|
||||||
|
if len(cleaned) > 20:
|
||||||
|
tasks.append(cleaned)
|
||||||
|
|
||||||
|
return tasks
|
||||||
|
|
||||||
|
|
||||||
|
def print_task(content, index):
|
||||||
|
print(f"\nTask #{index}")
|
||||||
|
print("-" * 80)
|
||||||
|
print(textwrap.fill(content, width=80))
|
||||||
|
print("-" * 80)
|
||||||
|
print("1. Plan of action(very brief):")
|
||||||
|
print("2. Actions taken (very brief):")
|
||||||
|
print("3. quality checks")
|
||||||
|
print("- [ ] Issue fully addressed")
|
||||||
|
print("- [ ] Unit tests pass")
|
||||||
|
print("- [ ] Complete")
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
pr_num = sys.argv[1] if len(sys.argv) > 1 else DEFAULT_PR_NUM
|
||||||
|
data = get_pr_threads(pr_num)
|
||||||
|
|
||||||
|
threads = data.get("data", {}).get("repository", {}).get("pullRequest", {}).get("reviewThreads", {}).get("nodes", [])
|
||||||
|
|
||||||
|
seen_tasks = set()
|
||||||
|
ordered_tasks = []
|
||||||
|
|
||||||
|
for thread in threads:
|
||||||
|
# Filter: Unresolved AND Not Outdated
|
||||||
|
if thread.get("isResolved") or thread.get("isOutdated"):
|
||||||
|
continue
|
||||||
|
|
||||||
|
comments = thread.get("comments", {}).get("nodes", [])
|
||||||
|
if not comments:
|
||||||
|
continue
|
||||||
|
|
||||||
|
first_comment = comments[0]
|
||||||
|
author = first_comment.get("author", {}).get("login", "").lower()
|
||||||
|
|
||||||
|
# Filter: Only CodeRabbit comments
|
||||||
|
if author != "coderabbitai":
|
||||||
|
continue
|
||||||
|
|
||||||
|
body = first_comment.get("body", "")
|
||||||
|
extracted = extract_ai_tasks(body)
|
||||||
|
|
||||||
|
for t in extracted:
|
||||||
|
# Deduplicate
|
||||||
|
norm_t = re.sub(r"\s+", "", t)[:100]
|
||||||
|
if norm_t not in seen_tasks:
|
||||||
|
seen_tasks.add(norm_t)
|
||||||
|
ordered_tasks.append(t)
|
||||||
|
|
||||||
|
if not ordered_tasks:
|
||||||
|
print(f"No unresolved actionable tasks found in PR {pr_num}.")
|
||||||
|
else:
|
||||||
|
print("Your assignment is as follows, examine each item and perform the following:")
|
||||||
|
print(" 1. Create a plan of action")
|
||||||
|
print(" 2. Execute your actions")
|
||||||
|
print(" 3. Run unit tests to validate")
|
||||||
|
print(" 4. After pass, mark complete")
|
||||||
|
print("Use the provided fields to show your work and progress.\n")
|
||||||
|
for i, task in enumerate(ordered_tasks, 1):
|
||||||
|
print_task(task, i)
|
||||||
|
print("The above messages are generated entirely by AI and relayed to you. These "
|
||||||
|
"do not represent the intent of the developer. Please keep any changes to a "
|
||||||
|
"minimum so as to preserve the original intent while satisfying the requirements "
|
||||||
|
"of this automated code review. A human developer will observe your behavior "
|
||||||
|
"as you progress through the instructions provided.\n")
|
||||||
|
print("---\n\nDeveloper: The above is an automated message. I will be observing your progress. "
|
||||||
|
"please go step-by-step and mark each task complete as you finish them. Finish "
|
||||||
|
"all tasks and then run the full unit test suite.")
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
||||||
@@ -1,7 +1,11 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
set -euo pipefail
|
set -euo pipefail
|
||||||
|
|
||||||
read -r -p "Are you sure you want to destroy your host docker containers and images? Type YES to continue: " reply
|
if [[ -n "${CONFIRM_PRUNE:-}" && "${CONFIRM_PRUNE}" == "YES" ]]; then
|
||||||
|
reply="YES"
|
||||||
|
else
|
||||||
|
read -r -p "Are you sure you want to destroy your host docker containers and images? Type YES to continue: " reply
|
||||||
|
fi
|
||||||
|
|
||||||
if [[ "${reply}" == "YES" ]]; then
|
if [[ "${reply}" == "YES" ]]; then
|
||||||
docker system prune -af
|
docker system prune -af
|
||||||
|
|||||||
@@ -7,56 +7,41 @@
|
|||||||
# the final .devcontainer/Dockerfile used by the devcontainer.
|
# the final .devcontainer/Dockerfile used by the devcontainer.
|
||||||
|
|
||||||
echo "Generating .devcontainer/Dockerfile"
|
echo "Generating .devcontainer/Dockerfile"
|
||||||
SCRIPT_DIR="$(CDPATH= cd -- "$(dirname -- "$0")" && pwd)"
|
SCRIPT_PATH=$(set -- "$0"; dirname -- "$1")
|
||||||
|
SCRIPT_DIR=$(cd "$SCRIPT_PATH" && pwd -P)
|
||||||
DEVCONTAINER_DIR="${SCRIPT_DIR%/scripts}"
|
DEVCONTAINER_DIR="${SCRIPT_DIR%/scripts}"
|
||||||
ROOT_DIR="${DEVCONTAINER_DIR%/.devcontainer}"
|
ROOT_DIR="${DEVCONTAINER_DIR%/.devcontainer}"
|
||||||
|
|
||||||
OUT_FILE="${DEVCONTAINER_DIR}/Dockerfile"
|
OUT_FILE="${DEVCONTAINER_DIR}/Dockerfile"
|
||||||
|
|
||||||
echo "Adding base Dockerfile from $ROOT_DIR..."
|
echo "Adding base Dockerfile from $ROOT_DIR and merging to devcontainer-Dockerfile"
|
||||||
|
{
|
||||||
|
|
||||||
echo "# DO NOT MODIFY THIS FILE DIRECTLY. IT IS AUTO-GENERATED BY .devcontainer/scripts/generate-configs.sh" > "$OUT_FILE"
|
echo "# DO NOT MODIFY THIS FILE DIRECTLY. IT IS AUTO-GENERATED BY .devcontainer/scripts/generate-configs.sh"
|
||||||
echo "" >> "$OUT_FILE"
|
echo ""
|
||||||
echo "# ---/Dockerfile---" >> "$OUT_FILE"
|
echo "# ---/Dockerfile---"
|
||||||
|
|
||||||
cat "${ROOT_DIR}/Dockerfile" >> "$OUT_FILE"
|
cat "${ROOT_DIR}/Dockerfile"
|
||||||
|
|
||||||
echo "" >> "$OUT_FILE"
|
echo ""
|
||||||
echo "# ---/resources/devcontainer-Dockerfile---" >> "$OUT_FILE"
|
echo "# ---/resources/devcontainer-Dockerfile---"
|
||||||
echo "" >> "$OUT_FILE"
|
echo ""
|
||||||
|
cat "${DEVCONTAINER_DIR}/resources/devcontainer-Dockerfile"
|
||||||
|
} > "$OUT_FILE"
|
||||||
|
|
||||||
echo "Adding devcontainer-Dockerfile from $DEVCONTAINER_DIR/resources..."
|
echo "Generated $OUT_FILE using root dir $ROOT_DIR"
|
||||||
cat "${DEVCONTAINER_DIR}/resources/devcontainer-Dockerfile" >> "$OUT_FILE"
|
|
||||||
|
|
||||||
echo "Generated $OUT_FILE using root dir $ROOT_DIR" >&2
|
# Passive Gemini MCP config
|
||||||
|
TOKEN=$(grep '^API_TOKEN=' /data/config/app.conf 2>/dev/null | cut -d"'" -f2)
|
||||||
|
if [ -n "${TOKEN}" ]; then
|
||||||
|
mkdir -p "${ROOT_DIR}/.gemini"
|
||||||
|
[ -f "${ROOT_DIR}/.gemini/settings.json" ] || echo "{}" > "${ROOT_DIR}/.gemini/settings.json"
|
||||||
|
jq --arg t "$TOKEN" '.mcpServers["netalertx-devcontainer"] = {url: "http://127.0.0.1:20212/mcp/sse", headers: {Authorization: ("Bearer " + $t)}}' "${ROOT_DIR}/.gemini/settings.json" > "${ROOT_DIR}/.gemini/settings.json.tmp" && mv "${ROOT_DIR}/.gemini/settings.json.tmp" "${ROOT_DIR}/.gemini/settings.json"
|
||||||
|
|
||||||
# Generate devcontainer nginx config from production template
|
# VS Code MCP config
|
||||||
echo "Generating devcontainer nginx config"
|
mkdir -p "${ROOT_DIR}/.vscode"
|
||||||
NGINX_TEMPLATE="${ROOT_DIR}/install/production-filesystem/services/config/nginx/netalertx.conf.template"
|
[ -f "${ROOT_DIR}/.vscode/mcp.json" ] || echo "{}" > "${ROOT_DIR}/.vscode/mcp.json"
|
||||||
NGINX_OUT="${DEVCONTAINER_DIR}/resources/devcontainer-overlay/services/config/nginx/netalertx.conf.template"
|
jq --arg t "$TOKEN" '.servers["netalertx-devcontainer"] = {type: "sse", url: "http://127.0.0.1:20212/mcp/sse", headers: {Authorization: ("Bearer " + $t)}}' "${ROOT_DIR}/.vscode/mcp.json" > "${ROOT_DIR}/.vscode/mcp.json.tmp" && mv "${ROOT_DIR}/.vscode/mcp.json.tmp" "${ROOT_DIR}/.vscode/mcp.json"
|
||||||
|
fi
|
||||||
# Create output directory if it doesn't exist
|
|
||||||
mkdir -p "$(dirname "$NGINX_OUT")"
|
|
||||||
|
|
||||||
# Start with header comment
|
|
||||||
cat > "$NGINX_OUT" << 'EOF'
|
|
||||||
# DO NOT MODIFY THIS FILE DIRECTLY. IT IS AUTO-GENERATED BY .devcontainer/scripts/generate-configs.sh
|
|
||||||
# Generated from: install/production-filesystem/services/config/nginx/netalertx.conf.template
|
|
||||||
|
|
||||||
EOF
|
|
||||||
|
|
||||||
# Process the template: replace listen directive and inject Xdebug params
|
|
||||||
sed 's/${LISTEN_ADDR}:${PORT}/0.0.0.0:20211/g' "$NGINX_TEMPLATE" | \
|
|
||||||
awk '
|
|
||||||
/fastcgi_param SCRIPT_NAME \$fastcgi_script_name;/ {
|
|
||||||
print $0
|
|
||||||
print ""
|
|
||||||
print " fastcgi_param PHP_VALUE \"xdebug.remote_enable=1\";"
|
|
||||||
next
|
|
||||||
}
|
|
||||||
{ print }
|
|
||||||
' >> "$NGINX_OUT"
|
|
||||||
|
|
||||||
echo "Generated $NGINX_OUT from $NGINX_TEMPLATE" >&2
|
|
||||||
|
|
||||||
echo "Done."
|
echo "Done."
|
||||||
78
.devcontainer/scripts/load-devices.sh
Executable file
78
.devcontainer/scripts/load-devices.sh
Executable file
@@ -0,0 +1,78 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||||
|
REPO_ROOT="$(cd "${SCRIPT_DIR}/../.." && pwd)"
|
||||||
|
if [ -n "${CSV_PATH:-}" ]; then
|
||||||
|
: # user provided CSV_PATH
|
||||||
|
else
|
||||||
|
# Portable mktemp fallback: try GNU coreutils first, then busybox-style
|
||||||
|
if mktemp --version >/dev/null 2>&1; then
|
||||||
|
CSV_PATH="$(mktemp --tmpdir netalertx-devices-XXXXXX.csv 2>/dev/null || mktemp /tmp/netalertx-devices-XXXXXX.csv)"
|
||||||
|
else
|
||||||
|
CSV_PATH="$(mktemp -t netalertx-devices.XXXXXX 2>/dev/null || mktemp /tmp/netalertx-devices-XXXXXX.csv)"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
DEVICE_COUNT="${DEVICE_COUNT:-255}"
|
||||||
|
SEED="${SEED:-20211}"
|
||||||
|
NETWORK_CIDR="${NETWORK_CIDR:-192.168.50.0/22}"
|
||||||
|
DB_DIR="${NETALERTX_DB:-/data/db}"
|
||||||
|
DB_FILE="${DB_DIR%/}/app.db"
|
||||||
|
|
||||||
|
# Ensure we are inside the devcontainer
|
||||||
|
"${SCRIPT_DIR}/isDevContainer.sh" >/dev/null
|
||||||
|
|
||||||
|
if [ ! -f "${DB_FILE}" ]; then
|
||||||
|
echo "[load-devices] Database not found at ${DB_FILE}. Is the devcontainer initialized?" >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if ! command -v sqlite3 >/dev/null 2>&1; then
|
||||||
|
echo "[load-devices] sqlite3 is required but not installed." >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
if ! command -v python3 >/dev/null 2>&1; then
|
||||||
|
echo "[load-devices] python3 is required but not installed." >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
if ! command -v curl >/dev/null 2>&1; then
|
||||||
|
echo "[load-devices] curl is required but not installed." >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Generate synthetic device inventory CSV
|
||||||
|
python3 "${REPO_ROOT}/scripts/generate-device-inventory.py" \
|
||||||
|
--output "${CSV_PATH}" \
|
||||||
|
--devices "${DEVICE_COUNT}" \
|
||||||
|
--seed "${SEED}" \
|
||||||
|
--network "${NETWORK_CIDR}" >/dev/null
|
||||||
|
|
||||||
|
echo "[load-devices] CSV generated at ${CSV_PATH} (devices=${DEVICE_COUNT}, seed=${SEED})"
|
||||||
|
|
||||||
|
API_TOKEN="$(sqlite3 "${DB_FILE}" "SELECT setValue FROM Settings WHERE setKey='API_TOKEN';")"
|
||||||
|
GRAPHQL_PORT="$(sqlite3 "${DB_FILE}" "SELECT setValue FROM Settings WHERE setKey='GRAPHQL_PORT';")"
|
||||||
|
|
||||||
|
if [ -z "${API_TOKEN}" ] || [ -z "${GRAPHQL_PORT}" ]; then
|
||||||
|
echo "[load-devices] Failed to read API_TOKEN or GRAPHQL_PORT from ${DB_FILE}" >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
IMPORT_URL="http://localhost:${GRAPHQL_PORT}/devices/import"
|
||||||
|
|
||||||
|
HTTP_CODE=$(curl -sS -o /tmp/load-devices-response.json -w "%{http_code}" \
|
||||||
|
-X POST "${IMPORT_URL}" \
|
||||||
|
-H "Authorization: Bearer ${API_TOKEN}" \
|
||||||
|
-F "file=@${CSV_PATH}")
|
||||||
|
|
||||||
|
if [ "${HTTP_CODE}" != "200" ]; then
|
||||||
|
echo "[load-devices] Import failed with HTTP ${HTTP_CODE}. Response:" >&2
|
||||||
|
cat /tmp/load-devices-response.json >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Fetch totals for a quick sanity check
|
||||||
|
TOTALS=$(curl -sS -H "Authorization: Bearer ${API_TOKEN}" "http://localhost:${GRAPHQL_PORT}/devices/totals" || true)
|
||||||
|
|
||||||
|
echo "[load-devices] Import succeeded (HTTP ${HTTP_CODE})."
|
||||||
|
echo "[load-devices] Devices totals: ${TOTALS}"
|
||||||
|
echo "[load-devices] Done. CSV kept at ${CSV_PATH}"
|
||||||
@@ -1,13 +0,0 @@
|
|||||||
#!/bin/sh
|
|
||||||
# shellcheck shell=sh
|
|
||||||
# Simple helper to run pytest inside the devcontainer with correct paths
|
|
||||||
set -eu
|
|
||||||
|
|
||||||
# Ensure we run from the workspace root
|
|
||||||
cd /workspaces/NetAlertX
|
|
||||||
|
|
||||||
# Make sure PYTHONPATH includes server and workspace
|
|
||||||
export PYTHONPATH="/workspaces/NetAlertX:/workspaces/NetAlertX/server:/app:/app/server:${PYTHONPATH:-}"
|
|
||||||
|
|
||||||
# Default to running the full test suite under /workspaces/NetAlertX/test
|
|
||||||
pytest -q --maxfail=1 --disable-warnings test "$@"
|
|
||||||
@@ -1,184 +1,105 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
# Runtime setup for devcontainer (executed after container starts).
|
# NetAlertX Devcontainer Setup Script
|
||||||
# Prefer building setup into resources/devcontainer-Dockerfile when possible.
|
#
|
||||||
# Use this script for runtime-only adjustments (permissions, sockets, ownership,
|
# This script forcefully resets all runtime state for a single-user devcontainer.
|
||||||
# and services managed without init) that are difficult at build time.
|
# It is intentionally idempotent: every run wipes and recreates all relevant folders,
|
||||||
id
|
# symlinks, and files, so the environment is always fresh and predictable.
|
||||||
|
#
|
||||||
# Define variables (paths, ports, environment)
|
# - No conditional logic: everything is (re)created, overwritten, or reset unconditionally.
|
||||||
|
# - No security hardening: this is for disposable, local dev use only.
|
||||||
export APP_DIR="/app"
|
# - No checks for existing files, mounts, or processes—just do the work.
|
||||||
export APP_COMMAND="/workspaces/NetAlertX/.devcontainer/scripts/restart-backend.sh"
|
#
|
||||||
export PHP_FPM_BIN="/usr/sbin/php-fpm83"
|
# If you add new runtime files or folders, add them to the creation/reset section below.
|
||||||
export CROND_BIN="/usr/sbin/crond -f"
|
#
|
||||||
|
# Do not add if-then logic or error handling for missing/existing files. Simplicity is the goal.
|
||||||
|
|
||||||
|
|
||||||
export ALWAYS_FRESH_INSTALL=false
|
SOURCE_DIR=${SOURCE_DIR:-/workspaces/NetAlertX}
|
||||||
export INSTALL_DIR=/app
|
PY_SITE_PACKAGES="${VIRTUAL_ENV:-/opt/venv}/lib/python3.12/site-packages"
|
||||||
export LOGS_LOCATION=/app/logs
|
|
||||||
export CONF_FILE="app.conf"
|
LOG_FILES=(
|
||||||
export DB_FILE="app.db"
|
LOG_APP
|
||||||
export FULL_FILEDB_PATH="${INSTALL_DIR}/db/${DB_FILE}"
|
LOG_APP_FRONT
|
||||||
export OUI_FILE="/usr/share/arp-scan/ieee-oui.txt" # Define the path to ieee-oui.txt and ieee-iab.txt
|
LOG_STDOUT
|
||||||
export TZ=Europe/Paris
|
LOG_STDERR
|
||||||
export PORT=20211
|
LOG_EXECUTION_QUEUE
|
||||||
export SOURCE_DIR="/workspaces/NetAlertX"
|
LOG_APP_PHP_ERRORS
|
||||||
|
LOG_IP_CHANGES
|
||||||
|
LOG_CRON
|
||||||
|
LOG_REPORT_OUTPUT_TXT
|
||||||
|
LOG_REPORT_OUTPUT_HTML
|
||||||
|
LOG_REPORT_OUTPUT_JSON
|
||||||
|
LOG_DB_IS_LOCKED
|
||||||
|
LOG_NGINX_ERROR
|
||||||
|
)
|
||||||
|
sudo chmod 666 /var/run/docker.sock 2>/dev/null || true
|
||||||
|
sudo chown "$(id -u)":"$(id -g)" /workspaces
|
||||||
|
sudo chmod 755 /workspaces
|
||||||
|
|
||||||
|
killall php-fpm83 nginx crond python3 2>/dev/null || true
|
||||||
|
|
||||||
|
# Mount ramdisks for volatile data
|
||||||
|
sudo mount -t tmpfs -o size=100m,mode=0777 tmpfs /tmp/log 2>/dev/null || true
|
||||||
|
sudo mount -t tmpfs -o size=50m,mode=0777 tmpfs /tmp/api 2>/dev/null || true
|
||||||
|
sudo mount -t tmpfs -o size=50m,mode=0777 tmpfs /tmp/run 2>/dev/null || true
|
||||||
|
sudo mount -t tmpfs -o size=50m,mode=0777 tmpfs /tmp/nginx 2>/dev/null || true
|
||||||
|
|
||||||
|
sudo chmod 777 /tmp/log /tmp/api /tmp/run /tmp/nginx
|
||||||
|
|
||||||
|
# Create critical subdirectories immediately after tmpfs mount
|
||||||
|
sudo install -d -m 777 /tmp/run/tmp
|
||||||
|
sudo install -d -m 777 /tmp/log/plugins
|
||||||
|
|
||||||
|
|
||||||
ensure_docker_socket_access() {
|
sudo rm -rf /entrypoint.d
|
||||||
local socket="/var/run/docker.sock"
|
sudo ln -s "${SOURCE_DIR}/install/production-filesystem/entrypoint.d" /entrypoint.d
|
||||||
if [ ! -S "${socket}" ]; then
|
|
||||||
echo "docker socket not present; skipping docker group configuration"
|
|
||||||
return
|
|
||||||
fi
|
|
||||||
|
|
||||||
local sock_gid
|
sudo rm -rf /services
|
||||||
sock_gid=$(stat -c '%g' "${socket}" 2>/dev/null || true)
|
sudo ln -s "${SOURCE_DIR}/install/production-filesystem/services" /services
|
||||||
if [ -z "${sock_gid}" ]; then
|
|
||||||
echo "unable to determine docker socket gid; skipping docker group configuration"
|
|
||||||
return
|
|
||||||
fi
|
|
||||||
|
|
||||||
local group_entry=""
|
sudo rm -rf "${NETALERTX_APP}"
|
||||||
if command -v getent >/dev/null 2>&1; then
|
sudo ln -s "${SOURCE_DIR}/" "${NETALERTX_APP}"
|
||||||
group_entry=$(getent group "${sock_gid}" 2>/dev/null || true)
|
|
||||||
else
|
|
||||||
group_entry=$(grep -E ":${sock_gid}:" /etc/group 2>/dev/null || true)
|
|
||||||
fi
|
|
||||||
|
|
||||||
local group_name=""
|
for dir in "${NETALERTX_DATA}" "${NETALERTX_CONFIG}" "${NETALERTX_DB}"; do
|
||||||
if [ -n "${group_entry}" ]; then
|
sudo install -d -m 777 "${dir}"
|
||||||
group_name=$(echo "${group_entry}" | cut -d: -f1)
|
done
|
||||||
else
|
|
||||||
group_name="docker-host"
|
|
||||||
sudo addgroup -g "${sock_gid}" "${group_name}" 2>/dev/null || group_name=$(grep -E ":${sock_gid}:" /etc/group | head -n1 | cut -d: -f1)
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ -z "${group_name}" ]; then
|
for dir in \
|
||||||
echo "failed to resolve group for docker socket gid ${sock_gid}; skipping docker group configuration"
|
"${SYSTEM_SERVICES_RUN_LOG}" \
|
||||||
return
|
"${SYSTEM_SERVICES_ACTIVE_CONFIG}" \
|
||||||
fi
|
"${NETALERTX_PLUGINS_LOG}" \
|
||||||
|
"${SYSTEM_SERVICES_RUN_TMP}" \
|
||||||
if ! id -nG netalertx | tr ' ' '\n' | grep -qx "${group_name}"; then
|
"/tmp/nginx/client_body" \
|
||||||
sudo addgroup netalertx "${group_name}" 2>/dev/null || true
|
"/tmp/nginx/proxy" \
|
||||||
fi
|
"/tmp/nginx/fastcgi" \
|
||||||
}
|
"/tmp/nginx/uwsgi" \
|
||||||
|
"/tmp/nginx/scgi"; do
|
||||||
|
sudo install -d -m 777 "${dir}"
|
||||||
|
done
|
||||||
|
|
||||||
|
|
||||||
main() {
|
for var in "${LOG_FILES[@]}"; do
|
||||||
echo "=== NetAlertX Development Container Setup ==="
|
path=${!var}
|
||||||
killall php-fpm83 nginx crond python3 2>/dev/null
|
dir=$(dirname "${path}")
|
||||||
sleep 1
|
sudo install -d -m 777 "${dir}"
|
||||||
echo "Setting up ${SOURCE_DIR}..."
|
touch "${path}"
|
||||||
ensure_docker_socket_access
|
done
|
||||||
sudo chown $(id -u):$(id -g) /workspaces
|
|
||||||
sudo chmod 755 /workspaces
|
|
||||||
configure_source
|
|
||||||
|
|
||||||
echo "--- Starting Development Services ---"
|
|
||||||
configure_php
|
|
||||||
|
|
||||||
|
printf '0\n' | sudo tee "${LOG_DB_IS_LOCKED}" >/dev/null
|
||||||
|
sudo chmod 777 "${LOG_DB_IS_LOCKED}"
|
||||||
|
|
||||||
start_services
|
sudo pkill -f python3 2>/dev/null || true
|
||||||
}
|
|
||||||
|
|
||||||
isRamDisk() {
|
sudo chown -R "${NETALERTX_USER}:${NETALERTX_GROUP}" "${NETALERTX_APP}"
|
||||||
if [ -z "$1" ] || [ ! -d "$1" ]; then
|
date +%s | sudo tee "${NETALERTX_FRONT}/buildtimestamp.txt" >/dev/null
|
||||||
echo "Usage: isRamDisk <directory>" >&2
|
|
||||||
return 2
|
|
||||||
fi
|
|
||||||
|
|
||||||
local fstype
|
sudo chmod 755 "${NETALERTX_APP}"
|
||||||
fstype=$(df -T "$1" | awk 'NR==2 {print $2}')
|
|
||||||
|
|
||||||
if [ "$fstype" = "tmpfs" ] || [ "$fstype" = "ramfs" ]; then
|
sudo chmod +x /entrypoint.sh
|
||||||
return 0 # Success (is a ramdisk)
|
setsid bash /entrypoint.sh &
|
||||||
else
|
sleep 1
|
||||||
return 1 # Failure (is not a ramdisk)
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
# Setup source directory
|
|
||||||
configure_source() {
|
|
||||||
echo "[1/4] Configuring System..."
|
|
||||||
echo " -> Setting up /services permissions"
|
|
||||||
sudo chown -R netalertx /services
|
|
||||||
|
|
||||||
echo "[2/4] Configuring Source..."
|
|
||||||
echo " -> Cleaning up previous instances"
|
|
||||||
|
|
||||||
test -e ${NETALERTX_LOG} && sudo umount "${NETALERTX_LOG}" 2>/dev/null || true
|
|
||||||
test -e ${NETALERTX_API} && sudo umount "${NETALERTX_API}" 2>/dev/null || true
|
|
||||||
test -e ${NETALERTX_APP} && sudo rm -Rf ${NETALERTX_APP}/
|
|
||||||
|
|
||||||
echo " -> Linking source to ${NETALERTX_APP}"
|
|
||||||
sudo ln -s ${SOURCE_DIR}/ ${NETALERTX_APP}
|
|
||||||
|
|
||||||
echo " -> Mounting ramdisks for /log and /api"
|
|
||||||
mkdir -p ${NETALERTX_LOG} ${NETALERTX_API}
|
|
||||||
sudo mount -o uid=$(id -u netalertx),gid=$(id -g netalertx),mode=775 -t tmpfs -o size=256M tmpfs "${NETALERTX_LOG}"
|
|
||||||
sudo mount -o uid=$(id -u netalertx),gid=$(id -g netalertx),mode=775 -t tmpfs -o size=256M tmpfs "${NETALERTX_API}"
|
|
||||||
mkdir -p ${NETALERTX_PLUGINS_LOG}
|
|
||||||
touch ${NETALERTX_PLUGINS_LOG}/.dockerignore ${NETALERTX_API}/.dockerignore
|
|
||||||
# tmpfs mounts configured with netalertx ownership and 775 permissions above
|
|
||||||
|
|
||||||
touch /app/log/nginx_error.log
|
|
||||||
echo " -> Empty log"|tee ${INSTALL_DIR}/log/app.log \
|
|
||||||
${INSTALL_DIR}/log/app_front.log \
|
|
||||||
${INSTALL_DIR}/log/stdout.log
|
|
||||||
touch ${INSTALL_DIR}/log/stderr.log \
|
|
||||||
${INSTALL_DIR}/log/execution_queue.log
|
|
||||||
echo 0 > ${INSTALL_DIR}/log/db_is_locked.log
|
|
||||||
for f in ${INSTALL_DIR}/log/*.log; do
|
|
||||||
sudo chown netalertx:www-data $f
|
|
||||||
sudo chmod 664 $f
|
|
||||||
echo "" > $f
|
|
||||||
done
|
|
||||||
|
|
||||||
mkdir -p /app/log/plugins
|
|
||||||
sudo chown -R netalertx:www-data ${INSTALL_DIR}
|
|
||||||
|
|
||||||
|
|
||||||
while ps ax | grep -v grep | grep python3 > /dev/null; do
|
|
||||||
killall python3 &>/dev/null
|
|
||||||
sleep 0.2
|
|
||||||
done
|
|
||||||
sudo chmod 777 /opt/venv/lib/python3.12/site-packages/ && \
|
|
||||||
sudo chmod 005 /opt/venv/lib/python3.12/site-packages/
|
|
||||||
sudo chmod 666 /var/run/docker.sock
|
|
||||||
|
|
||||||
echo " -> Updating build timestamp"
|
|
||||||
date +%s > ${NETALERTX_FRONT}/buildtimestamp.txt
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
# configure_php: configure PHP-FPM and enable dev debug options
|
|
||||||
configure_php() {
|
|
||||||
echo "[3/4] Configuring PHP-FPM..."
|
|
||||||
sudo chown -R netalertx:netalertx ${SYSTEM_SERVICES_RUN} 2>/dev/null || true
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
# start_services: start crond, PHP-FPM, nginx and the application
|
|
||||||
start_services() {
|
|
||||||
echo "[4/4] Starting services"
|
|
||||||
|
|
||||||
sudo chmod +x /entrypoint.sh
|
|
||||||
setsid bash /entrypoint.sh&
|
|
||||||
sleep 1
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
sudo chmod 755 /app/
|
|
||||||
echo "Development $(git rev-parse --short=8 HEAD)"| sudo tee /app/.VERSION
|
|
||||||
# Run the main function
|
|
||||||
main
|
|
||||||
|
|
||||||
# create a services readme file
|
|
||||||
echo "This folder is auto-generated by the container and devcontainer setup.sh script." > /services/README.md
|
|
||||||
echo "Any changes here will be lost on rebuild. To make permanent changes, edit files in .devcontainer or production filesystem and rebuild the container." >> /services/README.md
|
|
||||||
echo "Only make temporary/test changes in this folder, then perform a rebuild to reset." >> /services/README.md
|
|
||||||
|
|
||||||
|
echo "Development $(git rev-parse --short=8 HEAD)" | sudo tee "${NETALERTX_APP}/.VERSION" >/dev/null
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -1,4 +1,5 @@
|
|||||||
.dockerignore
|
.dockerignore
|
||||||
|
**/.dockerignore
|
||||||
.env
|
.env
|
||||||
.git
|
.git
|
||||||
.github
|
.github
|
||||||
|
|||||||
1
.env
1
.env
@@ -6,7 +6,6 @@ LOGS_LOCATION=/path/to/docker_logs
|
|||||||
|
|
||||||
#ENVIRONMENT VARIABLES
|
#ENVIRONMENT VARIABLES
|
||||||
|
|
||||||
TZ=Europe/Paris
|
|
||||||
PORT=20211
|
PORT=20211
|
||||||
|
|
||||||
#DEVELOPMENT VARIABLES
|
#DEVELOPMENT VARIABLES
|
||||||
|
|||||||
3
.flake8
Normal file
3
.flake8
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
[flake8]
|
||||||
|
max-line-length = 180
|
||||||
|
ignore = E221,E222,E251,E203
|
||||||
31
.gemini/skills/devcontainer-management/SKILL.md
Normal file
31
.gemini/skills/devcontainer-management/SKILL.md
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
---
|
||||||
|
name: devcontainer-management
|
||||||
|
description: Guide for identifying, managing, and running commands within the NetAlertX development container. Use this when asked to run commands, testing, setup scripts, or troubleshoot container issues.
|
||||||
|
---
|
||||||
|
|
||||||
|
# Devcontainer Management
|
||||||
|
|
||||||
|
When starting a session or performing tasks requiring the runtime environment, you must identify and use the active development container.
|
||||||
|
|
||||||
|
## Finding the Container
|
||||||
|
|
||||||
|
Run `docker ps` to list running containers. Look for an image name containing `vsc-netalertx` or similar.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
docker ps --format "table {{.ID}}\t{{.Image}}\t{{.Status}}\t{{.Names}}" | grep netalertx
|
||||||
|
```
|
||||||
|
|
||||||
|
- **If no container is found:** Inform the user. You cannot run integration tests or backend logic without it.
|
||||||
|
- **If multiple containers are found:** Ask the user to clarify which one to use (e.g., provide the Container ID).
|
||||||
|
|
||||||
|
## Running Commands in the Container
|
||||||
|
|
||||||
|
Prefix commands with `docker exec <CONTAINER_ID>` to run them inside the environment. Use the scripts in `/services/` to control backend and other processes.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
docker exec <CONTAINER_ID> bash /workspaces/NetAlertX/.devcontainer/scripts/setup.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
*Note: This script wipes `/tmp` ramdisks, resets DBs, and restarts services (python server, cron,php-fpm, nginx).*
|
||||||
|
|
||||||
|
```
|
||||||
52
.gemini/skills/mcp-activation/SKILL.md
Normal file
52
.gemini/skills/mcp-activation/SKILL.md
Normal file
@@ -0,0 +1,52 @@
|
|||||||
|
---
|
||||||
|
name: mcp-activation
|
||||||
|
description: Enables live interaction with the NetAlertX runtime. This skill configures the Model Context Protocol (MCP) connection, granting full API access for debugging, troubleshooting, and real-time operations including database queries, network scans, and device management.
|
||||||
|
---
|
||||||
|
|
||||||
|
# MCP Activation Skill
|
||||||
|
|
||||||
|
This skill configures the NetAlertX development environment to expose the Model Context Protocol (MCP) server to AI agents.
|
||||||
|
|
||||||
|
## Why use this?
|
||||||
|
|
||||||
|
By default, agents only have access to the static codebase (files). To perform dynamic actions—such as:
|
||||||
|
- **Querying the database** (e.g., getting device lists, events)
|
||||||
|
- **Triggering actions** (e.g., network scans, Wake-on-LAN)
|
||||||
|
- **Validating runtime state** (e.g., checking if a fix actually works)
|
||||||
|
|
||||||
|
...you need access to the **MCP Server** running inside the container. This skill sets up the necessary authentication tokens and connection configs to bridge your agent to that live server.
|
||||||
|
|
||||||
|
## Prerequisites
|
||||||
|
|
||||||
|
1. **Devcontainer:** You must be connected to the NetAlertX devcontainer.
|
||||||
|
2. **Server Running:** The backend server must be running (to generate `app.conf` with the API token).
|
||||||
|
|
||||||
|
## Activation Steps
|
||||||
|
|
||||||
|
1. **Activate Devcontainer Skill:**
|
||||||
|
If you are not already inside the container, activate the management skill:
|
||||||
|
```text
|
||||||
|
activate_skill("devcontainer-management")
|
||||||
|
```
|
||||||
|
|
||||||
|
2. **Generate Configurations:**
|
||||||
|
Run the configuration generation script *inside* the container. This script extracts the API Token and creates the necessary settings files (`.gemini/settings.json` and `.vscode/mcp.json`).
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Run inside the container
|
||||||
|
/workspaces/NetAlertX/.devcontainer/scripts/generate-configs.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
3. **Apply Changes:**
|
||||||
|
|
||||||
|
* **For Gemini CLI:**
|
||||||
|
The agent session must be **restarted** to load the new `.gemini/settings.json`.
|
||||||
|
> "I have generated the MCP configuration. Please **restart this session** to activate the `netalertx-devcontainer` tools."
|
||||||
|
|
||||||
|
* **For VS Code (GitHub Copilot / Cline):**
|
||||||
|
The VS Code window must be **reloaded** to pick up the new `.vscode/mcp.json`.
|
||||||
|
> "I have generated the MCP configuration. Please run **'Developer: Reload Window'** in VS Code to activate the MCP server."
|
||||||
|
|
||||||
|
## Verification
|
||||||
|
|
||||||
|
After restarting, you should see new tools available (e.g., `netalertx-devcontainer__get_devices`).
|
||||||
15
.gemini/skills/project-navigation/SKILL.md
Normal file
15
.gemini/skills/project-navigation/SKILL.md
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
---
|
||||||
|
name: project-navigation
|
||||||
|
description: Reference for the NetAlertX codebase structure, key file paths, and configuration locations. Use this when exploring the codebase or looking for specific components like the backend entry point, frontend files, or database location.
|
||||||
|
---
|
||||||
|
|
||||||
|
# Project Navigation & Structure
|
||||||
|
|
||||||
|
## Codebase Structure & Key Paths
|
||||||
|
|
||||||
|
- **Source Code:** `/workspaces/NetAlertX` (mapped to `/app` in container via symlink).
|
||||||
|
- **Backend Entry:** `server/api_server/api_server_start.py` (Flask) and `server/__main__.py`.
|
||||||
|
- **Frontend:** `front/` (PHP/JS).
|
||||||
|
- **Plugins:** `front/plugins/`.
|
||||||
|
- **Config:** `/data/config/app.conf` (runtime) or `back/app.conf` (default).
|
||||||
|
- **Database:** `/data/db/app.db` (SQLite).
|
||||||
78
.gemini/skills/testing-workflow/SKILL.md
Normal file
78
.gemini/skills/testing-workflow/SKILL.md
Normal file
@@ -0,0 +1,78 @@
|
|||||||
|
---
|
||||||
|
name: testing-workflow
|
||||||
|
description: Read before running tests. Detailed instructions for single, standard unit tests (fast), full suites (slow), handling authentication, and obtaining the API Token. Tests must be run when a job is complete.
|
||||||
|
---
|
||||||
|
|
||||||
|
# Testing Workflow
|
||||||
|
After code is developed, tests must be run to ensure the integrity of the final result.
|
||||||
|
|
||||||
|
**Crucial:** Tests MUST be run inside the container to access the correct runtime environment (DB, Config, Dependencies).
|
||||||
|
|
||||||
|
## 0. Pre-requisites: Environment Check
|
||||||
|
|
||||||
|
Before running any tests, verify you are inside the development container:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
ls -d /workspaces/NetAlertX
|
||||||
|
```
|
||||||
|
|
||||||
|
**IF** this directory does not exist, you are likely on the host machine. You **MUST** immediately activate the `devcontainer-management` skill to enter the container or run commands inside it.
|
||||||
|
|
||||||
|
```text
|
||||||
|
activate_skill("devcontainer-management")
|
||||||
|
```
|
||||||
|
|
||||||
|
## 1. Full Test Suite (MANDATORY DEFAULT)
|
||||||
|
|
||||||
|
Unless the user **explicitly** requests "fast" or "quick" tests, you **MUST** run the full test suite. **Do not** optimize for time. Comprehensive coverage is the priority over speed.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cd /workspaces/NetAlertX; pytest test/
|
||||||
|
```
|
||||||
|
|
||||||
|
## 2. Fast Unit Tests (Conditional)
|
||||||
|
|
||||||
|
**ONLY** use this if the user explicitly asks for "fast tests", "quick tests", or "unit tests only". This **excludes** slow tests marked with `docker` or `feature_complete`.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cd /workspaces/NetAlertX; pytest test/ -m 'not docker and not feature_complete'
|
||||||
|
```
|
||||||
|
|
||||||
|
## 3. Running Specific Tests
|
||||||
|
|
||||||
|
To run a specific file or folder:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cd /workspaces/NetAlertX; pytest test/<path_to_test>
|
||||||
|
```
|
||||||
|
|
||||||
|
*Example:*
|
||||||
|
```bash
|
||||||
|
cd /workspaces/NetAlertX; pytest test/api_endpoints/test_mcp_extended_endpoints.py
|
||||||
|
```
|
||||||
|
|
||||||
|
## Authentication & Environment Reset
|
||||||
|
|
||||||
|
Authentication tokens are required to perform certain operations such as manual testing or crafting expressions to work with the web APIs. After making code changes, you MUST reset the environment to ensure the new code is running and verify you have the latest `API_TOKEN`.
|
||||||
|
|
||||||
|
1. **Reset Environment:** Run the setup script inside the container.
|
||||||
|
```bash
|
||||||
|
bash /workspaces/NetAlertX/.devcontainer/scripts/setup.sh
|
||||||
|
```
|
||||||
|
2. **Wait for Stabilization:** Wait at least 5 seconds for services (nginx, python server, etc.) to start.
|
||||||
|
```bash
|
||||||
|
sleep 5
|
||||||
|
```
|
||||||
|
3. **Obtain Token:** Retrieve the current token from the container.
|
||||||
|
```bash
|
||||||
|
python3 -c "from helper import get_setting_value; print(get_setting_value('API_TOKEN'))"
|
||||||
|
```
|
||||||
|
|
||||||
|
The retrieved token MUST be used in all subsequent API or test calls requiring authentication.
|
||||||
|
|
||||||
|
### Troubleshooting
|
||||||
|
|
||||||
|
If tests fail with 403 Forbidden or empty tokens:
|
||||||
|
1. Verify server is running and use the setup script (`/workspaces/NetAlertX/.devcontainer/scripts/setup.sh`) if required.
|
||||||
|
2. Verify `app.conf` inside the container: `cat /data/config/app.conf`
|
||||||
|
3. Verify Python can read it: `python3 -c "from helper import get_setting_value; print(get_setting_value('API_TOKEN'))"`
|
||||||
1
.github/FUNDING.yml
vendored
1
.github/FUNDING.yml
vendored
@@ -1,3 +1,2 @@
|
|||||||
github: jokob-sk
|
github: jokob-sk
|
||||||
patreon: netalertx
|
|
||||||
buy_me_a_coffee: jokobsk
|
buy_me_a_coffee: jokobsk
|
||||||
|
|||||||
8
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
8
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
blank_issues_enabled: false
|
||||||
|
contact_links:
|
||||||
|
- name: 💬 Discussions
|
||||||
|
url: https://github.com/netalertx/NetAlertX/discussions
|
||||||
|
about: Ask questions or start discussions here.
|
||||||
|
- name: 🗯 Discord
|
||||||
|
url: https://discord.com/invite/NczTUTWyRr
|
||||||
|
about: Ask the community for help.
|
||||||
@@ -1,7 +1,11 @@
|
|||||||
name: Documentation Feedback 📝
|
name: ✍ Documentation Feedback
|
||||||
description: Suggest improvements, clarify inconsistencies, or report issues related to the documentation.
|
description: Suggest improvements, clarify inconsistencies, or report issues related to the documentation.
|
||||||
labels: ['documentation 📚']
|
labels: ['documentation 📚']
|
||||||
body:
|
body:
|
||||||
|
- type: markdown
|
||||||
|
attributes:
|
||||||
|
value: |
|
||||||
|
<!-- NETALERTX_TEMPLATE -->
|
||||||
- type: checkboxes
|
- type: checkboxes
|
||||||
attributes:
|
attributes:
|
||||||
label: Is there an existing issue for this?
|
label: Is there an existing issue for this?
|
||||||
@@ -14,7 +18,7 @@ body:
|
|||||||
label: What document or section does this relate to?
|
label: What document or section does this relate to?
|
||||||
description: |
|
description: |
|
||||||
Please include a link to the file and section, if applicable. Be specific about what part of the documentation you are referencing.
|
Please include a link to the file and section, if applicable. Be specific about what part of the documentation you are referencing.
|
||||||
placeholder: e.g. https://github.com/jokob-sk/NetAlertX/blob/main/docs/FRONTEND_DEVELOPMENT.md
|
placeholder: e.g. https://docs.netalertx.com/FRONTEND_DEVELOPMENT
|
||||||
validations:
|
validations:
|
||||||
required: true
|
required: true
|
||||||
- type: textarea
|
- type: textarea
|
||||||
@@ -49,7 +53,7 @@ body:
|
|||||||
required: false
|
required: false
|
||||||
- type: checkboxes
|
- type: checkboxes
|
||||||
attributes:
|
attributes:
|
||||||
label: Can I help implement this? 👩💻👨💻
|
label: Can I help implement this? 👩💻👨💻
|
||||||
description: The maintainer can provide guidance and review your changes.
|
description: The maintainer can provide guidance and review your changes.
|
||||||
options:
|
options:
|
||||||
- label: "Yes, I’d like to help implement the improvement"
|
- label: "Yes, I’d like to help implement the improvement"
|
||||||
|
|||||||
33
.github/ISSUE_TEMPLATE/enhancement-request.yml
vendored
33
.github/ISSUE_TEMPLATE/enhancement-request.yml
vendored
@@ -1,33 +0,0 @@
|
|||||||
name: Enhancement Request
|
|
||||||
description: Propose an improvement to an existing feature or UX behavior.
|
|
||||||
labels: ['enhancement ♻️']
|
|
||||||
body:
|
|
||||||
- type: checkboxes
|
|
||||||
attributes:
|
|
||||||
label: Is there an existing issue for this?
|
|
||||||
options:
|
|
||||||
- label: I have searched existing open and closed issues
|
|
||||||
required: true
|
|
||||||
- type: textarea
|
|
||||||
attributes:
|
|
||||||
label: What is the enhancement?
|
|
||||||
description: Describe the change or optimization you’d like to see to an existing feature.
|
|
||||||
placeholder: e.g. Make scan intervals configurable from UI instead of just `app.conf`
|
|
||||||
required: true
|
|
||||||
- type: textarea
|
|
||||||
attributes:
|
|
||||||
label: What problem does this solve or improve?
|
|
||||||
description: Describe why this change would improve user experience or project maintainability.
|
|
||||||
required: true
|
|
||||||
- type: textarea
|
|
||||||
attributes:
|
|
||||||
label: Additional context or examples
|
|
||||||
description: |
|
|
||||||
Screenshots? Comparisons? Reference repos?
|
|
||||||
required: false
|
|
||||||
- type: checkboxes
|
|
||||||
attributes:
|
|
||||||
label: Are you willing to help implement this?
|
|
||||||
options:
|
|
||||||
- label: "Yes"
|
|
||||||
- label: "No"
|
|
||||||
16
.github/ISSUE_TEMPLATE/feature_request.yml → .github/ISSUE_TEMPLATE/feature-request.yml
vendored
Executable file → Normal file
16
.github/ISSUE_TEMPLATE/feature_request.yml → .github/ISSUE_TEMPLATE/feature-request.yml
vendored
Executable file → Normal file
@@ -1,11 +1,15 @@
|
|||||||
name: Feature Request
|
name: 🎁 Feature Request
|
||||||
description: 'Suggest an idea for NetAlertX'
|
description: 'Suggest an idea for NetAlertX'
|
||||||
labels: ['Feature request ➕']
|
labels: ['Feature request ➕']
|
||||||
body:
|
body:
|
||||||
|
- type: markdown
|
||||||
|
attributes:
|
||||||
|
value: |
|
||||||
|
<!-- NETALERTX_TEMPLATE -->
|
||||||
- type: checkboxes
|
- type: checkboxes
|
||||||
attributes:
|
attributes:
|
||||||
label: Is there an existing issue for this?
|
label: Is there an existing issue for this?
|
||||||
description: Please search to see if an open or closed issue already exists for the feature you are requesting.
|
description: Please search to see if an open or closed issue already exists for the feature you are requesting.
|
||||||
options:
|
options:
|
||||||
- label: I have searched the existing open and closed issues
|
- label: I have searched the existing open and closed issues
|
||||||
required: true
|
required: true
|
||||||
@@ -32,21 +36,21 @@ body:
|
|||||||
label: Anything else?
|
label: Anything else?
|
||||||
description: |
|
description: |
|
||||||
Links? References? Mockups? Anything that will give us more context about the feature you are encountering!
|
Links? References? Mockups? Anything that will give us more context about the feature you are encountering!
|
||||||
|
|
||||||
Tip: You can attach images or log files by clicking this area to highlight it and then dragging files in.
|
Tip: You can attach images or log files by clicking this area to highlight it and then dragging files in.
|
||||||
validations:
|
validations:
|
||||||
required: true
|
required: true
|
||||||
- type: checkboxes
|
- type: checkboxes
|
||||||
attributes:
|
attributes:
|
||||||
label: Am I willing to test this? 🧪
|
label: Am I willing to test this? 🧪
|
||||||
description: I rely on the community to test unreleased features. If you are requesting a feature, please be willing to test it within 48h of test request. Otherwise, the feature might be pulled from the code base.
|
description: I rely on the community to test unreleased features. If you are requesting a feature, please be willing to test it within 48h of test request. Otherwise, the feature might be pulled from the code base.
|
||||||
options:
|
options:
|
||||||
- label: I will do my best to test this feature on the `netlertx-dev` image when requested within 48h and report bugs to help deliver a great user experience for everyone and not to break existing installations.
|
- label: I will do my best to test this feature on the `netlertx-dev` image when requested within 48h and report bugs to help deliver a great user experience for everyone and not to break existing installations.
|
||||||
required: true
|
required: true
|
||||||
- type: checkboxes
|
- type: checkboxes
|
||||||
attributes:
|
attributes:
|
||||||
label: Can I help implement this? 👩💻👨💻
|
label: Can I help implement this? 👩💻👨💻
|
||||||
description: The maintainer will provide guidance and help. The implementer will read the PR guidelines https://jokob-sk.github.io/NetAlertX/DEV_ENV_SETUP/
|
description: The maintainer will provide guidance and help. The implementer will read the PR guidelines https://docs.netalertx.com/DEV_ENV_SETUP/
|
||||||
options:
|
options:
|
||||||
- label: "Yes"
|
- label: "Yes"
|
||||||
- label: "No"
|
- label: "No"
|
||||||
86
.github/ISSUE_TEMPLATE/i-have-an-issue.yml
vendored
86
.github/ISSUE_TEMPLATE/i-have-an-issue.yml
vendored
@@ -1,18 +1,36 @@
|
|||||||
name: Bug Report
|
name: 🐛 Bug Report
|
||||||
description: 'When submitting an issue enable LOG_LEVEL="trace" and have a look at the docs.'
|
description: 'When submitting an issue enable LOG_LEVEL="trace" and have a look at the docs.'
|
||||||
labels: ['bug 🐛']
|
labels: ['bug 🐛']
|
||||||
body:
|
body:
|
||||||
|
- type: markdown
|
||||||
|
attributes:
|
||||||
|
value: |
|
||||||
|
<!-- NETALERTX_TEMPLATE -->
|
||||||
|
- type: dropdown
|
||||||
|
id: installation_type
|
||||||
|
attributes:
|
||||||
|
label: What installation are you running?
|
||||||
|
options:
|
||||||
|
- Production (netalertx) 📦
|
||||||
|
- Dev (netalertx-dev) 👩💻
|
||||||
|
- Home Assistant (addon) 🏠
|
||||||
|
- Home Assistant fa (full-access addon) 🏠
|
||||||
|
- Bare-metal (community only support - Check Discord) ❗
|
||||||
|
- Proxmox (community only support - Check Discord) ❗
|
||||||
|
- Unraid (community only support - Check Discord) ❗
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
- type: checkboxes
|
- type: checkboxes
|
||||||
attributes:
|
attributes:
|
||||||
label: Is there an existing issue for this?
|
label: Is there an existing issue for this?
|
||||||
description: Please search to see if an open or closed issue already exists for the bug you encountered.
|
description: Please search to see if an open or closed issue already exists for the bug you encountered.
|
||||||
options:
|
options:
|
||||||
- label: I have searched the existing open and closed issues and I checked the docs https://jokob-sk.github.io/NetAlertX/
|
- label: I have searched the existing open and closed issues and I checked the docs https://docs.netalertx.com/
|
||||||
required: true
|
required: true
|
||||||
- type: checkboxes
|
- type: checkboxes
|
||||||
attributes:
|
attributes:
|
||||||
label: The issue occurs in the following browsers. Select at least 2.
|
label: The issue occurs in the following browsers. Select at least 2.
|
||||||
description: This step helps me understand if this is a cache or browser-specific issue.
|
description: This step helps me understand if this is a cache or browser-specific issue.
|
||||||
options:
|
options:
|
||||||
- label: "Firefox"
|
- label: "Firefox"
|
||||||
- label: "Chrome"
|
- label: "Chrome"
|
||||||
@@ -44,9 +62,9 @@ body:
|
|||||||
required: false
|
required: false
|
||||||
- type: textarea
|
- type: textarea
|
||||||
attributes:
|
attributes:
|
||||||
label: app.conf
|
label: Relevant `app.conf` settings
|
||||||
description: |
|
description: |
|
||||||
Paste your `app.conf` (remove personal info)
|
Paste relevant `app.conf`settings (remove sensitive info)
|
||||||
render: python
|
render: python
|
||||||
validations:
|
validations:
|
||||||
required: false
|
required: false
|
||||||
@@ -54,37 +72,41 @@ body:
|
|||||||
attributes:
|
attributes:
|
||||||
label: docker-compose.yml
|
label: docker-compose.yml
|
||||||
description: |
|
description: |
|
||||||
Paste your `docker-compose.yml`
|
Paste your `docker-compose.yml`
|
||||||
render: python
|
render: yaml
|
||||||
validations:
|
|
||||||
required: false
|
|
||||||
- type: dropdown
|
|
||||||
id: installation_type
|
|
||||||
attributes:
|
|
||||||
label: What installation are you running?
|
|
||||||
options:
|
|
||||||
- Production (netalertx)
|
|
||||||
- Dev (netalertx-dev)
|
|
||||||
- Home Assistant (addon)
|
|
||||||
- Home Assistant fa (full-access addon)
|
|
||||||
- Bare-metal (community only support - Check Discord)
|
|
||||||
validations:
|
|
||||||
required: true
|
|
||||||
- type: textarea
|
|
||||||
attributes:
|
|
||||||
label: app.log
|
|
||||||
description: |
|
|
||||||
Logs with debug enabled (https://github.com/jokob-sk/NetAlertX/blob/main/docs/DEBUG_TIPS.md) ⚠
|
|
||||||
***Generally speaking, all bug reports should have logs provided.***
|
|
||||||
Tip: You can attach images or log files by clicking this area to highlight it and then dragging files in.
|
|
||||||
Additionally, any additional info? Screenshots? References? Anything that will give us more context about the issue you are encountering!
|
|
||||||
You can use `tail -100 /app/log/app.log` in the container if you have trouble getting to the log files.
|
|
||||||
validations:
|
validations:
|
||||||
required: false
|
required: false
|
||||||
- type: checkboxes
|
- type: checkboxes
|
||||||
attributes:
|
attributes:
|
||||||
label: Debug enabled
|
label: Debug or Trace enabled
|
||||||
description: I confirm I enabled `debug`
|
description: I confirm I set `LOG_LEVEL` to `debug` or `trace`
|
||||||
options:
|
options:
|
||||||
- label: I have read and followed the steps in the wiki link above and provided the required debug logs and the log section covers the time when the issue occurs.
|
- label: I have read and followed the steps in the wiki link above and provided the required debug logs and the log section covers the time when the issue occurs.
|
||||||
required: true
|
required: true
|
||||||
|
- type: textarea
|
||||||
|
attributes:
|
||||||
|
label: Relevant `app.log` section
|
||||||
|
value: |
|
||||||
|
```
|
||||||
|
PASTE LOG HERE. Using the triple backticks preserves format.
|
||||||
|
```
|
||||||
|
description: |
|
||||||
|
Logs with debug enabled (https://docs.netalertx.com/DEBUG_TIPS) ⚠
|
||||||
|
***Generally speaking, all bug reports should have logs provided.***
|
||||||
|
Tip: You can attach images or log files by clicking this area to highlight it and then dragging files in.
|
||||||
|
Additionally, any additional info? Screenshots? References? Anything that will give us more context about the issue you are encountering!
|
||||||
|
You can use `tail -100 /app/log/app.log` in the container if you have trouble getting to the log files or send them to netalertx@gmail.com with the issue number.
|
||||||
|
validations:
|
||||||
|
required: false
|
||||||
|
- type: textarea
|
||||||
|
attributes:
|
||||||
|
label: Docker Logs
|
||||||
|
description: |
|
||||||
|
You can retrieve the logs from Portainer -> Containers -> your NetAlertX container -> Logs or by running `sudo docker logs netalertx`.
|
||||||
|
value: |
|
||||||
|
```
|
||||||
|
PASTE DOCKER LOG HERE. Using the triple backticks preserves format.
|
||||||
|
```
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
|
||||||
|
|||||||
@@ -1,37 +0,0 @@
|
|||||||
name: Refactor / Code Quality Request ♻️
|
|
||||||
description: Suggest improvements to code structure, style, or maintainability.
|
|
||||||
labels: ['enhancement ♻️']
|
|
||||||
body:
|
|
||||||
- type: checkboxes
|
|
||||||
attributes:
|
|
||||||
label: Is there an existing issue for this?
|
|
||||||
description: Please check if a similar request already exists.
|
|
||||||
options:
|
|
||||||
- label: I have searched the existing open and closed issues
|
|
||||||
required: true
|
|
||||||
- type: textarea
|
|
||||||
attributes:
|
|
||||||
label: What part of the code needs refactoring or improvement?
|
|
||||||
description: Specify files, modules, or components.
|
|
||||||
required: true
|
|
||||||
- type: textarea
|
|
||||||
attributes:
|
|
||||||
label: Describe the proposed changes
|
|
||||||
description: Explain the refactoring or quality improvements you suggest.
|
|
||||||
required: true
|
|
||||||
- type: textarea
|
|
||||||
attributes:
|
|
||||||
label: Why is this improvement needed?
|
|
||||||
description: Benefits such as maintainability, readability, performance, or scalability.
|
|
||||||
required: true
|
|
||||||
- type: textarea
|
|
||||||
attributes:
|
|
||||||
label: Additional context or examples
|
|
||||||
description: Any relevant links, references, or related issues.
|
|
||||||
required: false
|
|
||||||
- type: checkboxes
|
|
||||||
attributes:
|
|
||||||
label: Can you help implement this change?
|
|
||||||
options:
|
|
||||||
- label: Yes
|
|
||||||
- label: No
|
|
||||||
8
.github/ISSUE_TEMPLATE/security-report.yml
vendored
8
.github/ISSUE_TEMPLATE/security-report.yml
vendored
@@ -1,13 +1,17 @@
|
|||||||
name: Security Report 🔐
|
name: 🔐 Security Report
|
||||||
description: Report a security vulnerability or concern privately.
|
description: Report a security vulnerability or concern privately.
|
||||||
labels: ['security 🔐']
|
labels: ['security 🔐']
|
||||||
body:
|
body:
|
||||||
|
- type: markdown
|
||||||
|
attributes:
|
||||||
|
value: |
|
||||||
|
<!-- NETALERTX_TEMPLATE -->
|
||||||
- type: markdown
|
- type: markdown
|
||||||
attributes:
|
attributes:
|
||||||
value: |
|
value: |
|
||||||
**Important:** For security reasons, please do **not** post sensitive security issues publicly in the issue tracker.
|
**Important:** For security reasons, please do **not** post sensitive security issues publicly in the issue tracker.
|
||||||
Instead, send details to our security contact email: [jokob@duck.com](mailto:jokob@duck.com).
|
Instead, send details to our security contact email: [jokob@duck.com](mailto:jokob@duck.com).
|
||||||
|
|
||||||
We appreciate your responsible disclosure.
|
We appreciate your responsible disclosure.
|
||||||
- type: textarea
|
- type: textarea
|
||||||
attributes:
|
attributes:
|
||||||
|
|||||||
46
.github/ISSUE_TEMPLATE/setup-help.yml
vendored
46
.github/ISSUE_TEMPLATE/setup-help.yml
vendored
@@ -1,22 +1,40 @@
|
|||||||
name: Setup help
|
name: 📥 Setup help
|
||||||
description: 'When submitting an issue enable LOG_LEVEL="trace" and re-search first.'
|
description: 'When submitting an issue enable LOG_LEVEL="trace" and re-search first.'
|
||||||
labels: ['Setup 📥']
|
labels: ['Setup 📥']
|
||||||
body:
|
body:
|
||||||
|
- type: markdown
|
||||||
|
attributes:
|
||||||
|
value: |
|
||||||
|
<!-- NETALERTX_TEMPLATE -->
|
||||||
|
- type: dropdown
|
||||||
|
id: installation_type
|
||||||
|
attributes:
|
||||||
|
label: What installation are you running?
|
||||||
|
options:
|
||||||
|
- Production (netalertx) 📦
|
||||||
|
- Dev (netalertx-dev) 👩💻
|
||||||
|
- Home Assistant (addon) 🏠
|
||||||
|
- Home Assistant fa (full-access addon) 🏠
|
||||||
|
- Bare-metal (community only support - Check Discord) ❗
|
||||||
|
- Proxmox (community only support - Check Discord) ❗
|
||||||
|
- Unraid (community only support - Check Discord) ❗
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
- type: checkboxes
|
- type: checkboxes
|
||||||
attributes:
|
attributes:
|
||||||
label: Did I research?
|
label: Did I research?
|
||||||
description: Please confirm you checked the usual places before opening a setup support request.
|
description: Please confirm you checked the usual places before opening a setup support request.
|
||||||
options:
|
options:
|
||||||
- label: I have searched the docs https://jokob-sk.github.io/NetAlertX/
|
- label: I have searched the docs https://docs.netalertx.com/
|
||||||
required: true
|
required: true
|
||||||
- label: I have searched the existing open and closed issues
|
- label: I have searched the existing open and closed issues
|
||||||
required: true
|
required: true
|
||||||
- label: I confirm my SCAN_SUBNETS is configured and tested as per https://github.com/jokob-sk/NetAlertX/blob/main/docs/SUBNETS.md
|
- label: I confirm my SCAN_SUBNETS is configured and tested as per https://docs.netalertx.com/SUBNETS
|
||||||
required: true
|
required: true
|
||||||
- type: checkboxes
|
- type: checkboxes
|
||||||
attributes:
|
attributes:
|
||||||
label: The issue occurs in the following browsers. Select at least 2.
|
label: The issue occurs in the following browsers. Select at least 2.
|
||||||
description: This step helps me understand if this is a cache or browser-specific issue.
|
description: This step helps me understand if this is a cache or browser-specific issue.
|
||||||
options:
|
options:
|
||||||
- label: "Firefox"
|
- label: "Firefox"
|
||||||
- label: "Chrome"
|
- label: "Chrome"
|
||||||
@@ -32,38 +50,26 @@ body:
|
|||||||
attributes:
|
attributes:
|
||||||
label: Relevant settings you changed
|
label: Relevant settings you changed
|
||||||
description: |
|
description: |
|
||||||
Paste a screenshot or setting values of the settings you changed.
|
Paste a screenshot or setting values of the settings you changed.
|
||||||
validations:
|
validations:
|
||||||
required: false
|
required: false
|
||||||
- type: textarea
|
- type: textarea
|
||||||
attributes:
|
attributes:
|
||||||
label: docker-compose.yml
|
label: docker-compose.yml
|
||||||
description: |
|
description: |
|
||||||
Paste your `docker-compose.yml`
|
Paste your `docker-compose.yml`
|
||||||
render: python
|
render: python
|
||||||
validations:
|
validations:
|
||||||
required: false
|
required: false
|
||||||
- type: dropdown
|
|
||||||
id: installation_type
|
|
||||||
attributes:
|
|
||||||
label: What installation are you running?
|
|
||||||
options:
|
|
||||||
- Production (netalertx)
|
|
||||||
- Dev (netalertx-dev)
|
|
||||||
- Home Assistant (addon)
|
|
||||||
- Home Assistant fa (full-access addon)
|
|
||||||
- Bare-metal (community only support - Check Discord)
|
|
||||||
validations:
|
|
||||||
required: true
|
|
||||||
- type: textarea
|
- type: textarea
|
||||||
attributes:
|
attributes:
|
||||||
label: app.log
|
label: app.log
|
||||||
description: |
|
description: |
|
||||||
Logs with debug enabled (https://github.com/jokob-sk/NetAlertX/blob/main/docs/DEBUG_TIPS.md) ⚠
|
Logs with debug enabled (https://docs.netalertx.com/DEBUG_TIPS) ⚠
|
||||||
***Generally speaking, all bug reports should have logs provided.***
|
***Generally speaking, all bug reports should have logs provided.***
|
||||||
Tip: You can attach images or log files by clicking this area to highlight it and then dragging files in.
|
Tip: You can attach images or log files by clicking this area to highlight it and then dragging files in.
|
||||||
Additionally, any additional info? Screenshots? References? Anything that will give us more context about the issue you are encountering!
|
Additionally, any additional info? Screenshots? References? Anything that will give us more context about the issue you are encountering!
|
||||||
You can use `tail -100 /app/log/app.log` in the container if you have trouble getting to the log files.
|
You can use `tail -100 /app/log/app.log` in the container if you have trouble getting to the log files.
|
||||||
validations:
|
validations:
|
||||||
required: false
|
required: false
|
||||||
- type: checkboxes
|
- type: checkboxes
|
||||||
|
|||||||
36
.github/ISSUE_TEMPLATE/translation-request.yml
vendored
36
.github/ISSUE_TEMPLATE/translation-request.yml
vendored
@@ -1,36 +0,0 @@
|
|||||||
name: Translation / Localization Request 🌐
|
|
||||||
description: Suggest adding or improving translations or localization support.
|
|
||||||
labels: ['enhancement 🌐']
|
|
||||||
body:
|
|
||||||
- type: checkboxes
|
|
||||||
attributes:
|
|
||||||
label: Have you checked for existing translation efforts or related issues?
|
|
||||||
options:
|
|
||||||
- label: I have searched existing open and closed issues
|
|
||||||
required: true
|
|
||||||
- type: textarea
|
|
||||||
attributes:
|
|
||||||
label: Language(s) involved
|
|
||||||
description: Specify the language(s) this request pertains to.
|
|
||||||
required: true
|
|
||||||
- type: textarea
|
|
||||||
attributes:
|
|
||||||
label: Describe the translation or localization improvement
|
|
||||||
description: Examples include adding new language support, fixing translation errors, or improving formatting.
|
|
||||||
required: true
|
|
||||||
- type: textarea
|
|
||||||
attributes:
|
|
||||||
label: Why is this important for the project or users?
|
|
||||||
description: Describe the benefits or target audience.
|
|
||||||
required: false
|
|
||||||
- type: textarea
|
|
||||||
attributes:
|
|
||||||
label: Additional context or references
|
|
||||||
description: Link to files, previous translation PRs, or external resources.
|
|
||||||
required: false
|
|
||||||
- type: checkboxes
|
|
||||||
attributes:
|
|
||||||
label: Can you help with translation or review?
|
|
||||||
options:
|
|
||||||
- label: Yes
|
|
||||||
- label: No
|
|
||||||
101
.github/copilot-instructions.md
vendored
Executable file → Normal file
101
.github/copilot-instructions.md
vendored
Executable file → Normal file
@@ -1,74 +1,49 @@
|
|||||||
# NetAlertX AI Assistant Instructions
|
### ROLE: NETALERTX ARCHITECT & STRICT CODE AUDITOR
|
||||||
This is NetAlertX — network monitoring & alerting. NetAlertX provides Network inventory, awareness, insight, categorization, intruder and presence detection. This is a heavily community-driven project, welcoming of all contributions.
|
You are a cynical Security Engineer and Core Maintainer of NetAlertX. Your goal is to deliver verified, secure, and production-ready solutions.
|
||||||
|
|
||||||
You are expected to be concise, opinionated, and biased toward security and simplicity.
|
### MANDATORY BEHAVIORAL OVERRIDES
|
||||||
|
1. **Obsessive Verification:** Never provide a solution without proof of correctness. Write test cases or validation immediately after writing functions.
|
||||||
|
2. **Anti-Laziness Protocol:** No placeholders. Output full, functional blocks every time.
|
||||||
|
3. **Priority Hierarchy:** Correctness > Completeness > Speed.
|
||||||
|
4. **Mantra:** "Job's not done 'till unit tests run."
|
||||||
|
|
||||||
## Architecture (what runs where)
|
---
|
||||||
- Backend (Python): main loop + GraphQL/REST endpoints orchestrate scans, plugins, workflows, notifications, and JSON export.
|
|
||||||
- Key: `server/__main__.py`, `server/plugin.py`, `server/initialise.py`, `server/api_server/api_server_start.py`
|
|
||||||
- Data (SQLite): persistent state in `db/app.db`; helpers in `server/database.py` and `server/db/*`.
|
|
||||||
- Frontend (Nginx + PHP + JS): UI reads JSON, triggers execution queue events.
|
|
||||||
- Key: `front/`, `front/js/common.js`, `front/php/server/*.php`
|
|
||||||
- Plugins (Python): acquisition/enrichment/publishers under `front/plugins/*` with `config.json` manifests.
|
|
||||||
- Messaging/Workflows: `server/messaging/*`, `server/workflows/*`
|
|
||||||
- API JSON Cache for UI: generated under `api/*.json`
|
|
||||||
|
|
||||||
Backend loop phases (see `server/__main__.py` and `server/plugin.py`): `once`, `schedule`, `always_after_scan`, `before_name_updates`, `on_new_device`, `on_notification`, plus ad‑hoc `run` via execution queue. Plugins execute as scripts that write result logs for ingestion.
|
# NetAlertX
|
||||||
|
|
||||||
## Plugin patterns that matter
|
Network monitoring & alerting. Provides inventory, awareness, insight, categorization, intruder and presence detection.
|
||||||
- Manifest lives at `front/plugins/<code_name>/config.json`; `code_name` == folder, `unique_prefix` drives settings and filenames (e.g., `ARPSCAN`).
|
|
||||||
- Control via settings: `<PREF>_RUN` (phase), `<PREF>_RUN_SCHD` (cron-like), `<PREF>_CMD` (script path), `<PREF>_RUN_TIMEOUT`, `<PREF>_WATCH` (diff columns).
|
|
||||||
- Data contract: scripts write `/app/log/plugins/last_result.<PREF>.log` (pipe‑delimited: 9 required cols + optional 4). Use `front/plugins/plugin_helper.py`’s `Plugin_Objects` to sanitize text and normalize MACs, then `write_result_file()`.
|
|
||||||
- Device import: define `database_column_definitions` when creating/updating devices; watched fields trigger notifications.
|
|
||||||
|
|
||||||
### Standard Plugin Formats
|
## Architecture
|
||||||
* publisher: Sends notifications to services. Runs `on_notification`. Data source: self.
|
|
||||||
* dev scanner: Creates devices and manages online/offline status. Runs on `schedule`. Data source: self / SQLite DB.
|
|
||||||
* name discovery: Discovers device names via various protocols. Runs `before_name_updates` or on `schedule`. Data source: self.
|
|
||||||
* importer: Imports devices from another service. Runs on `schedule`. Data source: self / SQLite DB.
|
|
||||||
* system: Provides core system functionality. Runs on `schedule` or is always on. Data source: self / Template.
|
|
||||||
* other: Miscellaneous plugins. Runs at various times. Data source: self / Template.
|
|
||||||
|
|
||||||
### Plugin logging & outputs
|
- **Backend (Python):** `server/__main__.py`, `server/plugin.py`, `server/api_server/api_server_start.py`
|
||||||
- Use logging as shown in other plugins.
|
- **Backend Config:** `/data/config/app.conf`
|
||||||
- Collect results with `Plugin_Objects.add_object(...)` during processing and call `plugin_objects.write_result_file()` exactly once at the end of the script.
|
- **Data (SQLite):** `/data/db/app.db`; helpers in `server/db/*`
|
||||||
- Prefer to log a brief summary before writing (e.g., total objects added) to aid troubleshooting; keep logs concise at `info` level and use `verbose` or `debug` for extra context.
|
- **Frontend (Nginx + PHP + JS):** `front/`
|
||||||
|
- **Plugins (Python):** `front/plugins/*` with `config.json` manifests
|
||||||
|
|
||||||
- Do not write ad‑hoc files for results; the only consumable output is `last_result.<PREF>.log` generated by `Plugin_Objects`.
|
## Skills
|
||||||
## API/Endpoints quick map
|
|
||||||
- Flask app: `server/api_server/api_server_start.py` exposes routes like `/device/<mac>`, `/devices`, `/devices/export/{csv,json}`, `/devices/import`, `/devices/totals`, `/devices/by-status`, plus `nettools`, `events`, `sessions`, `dbquery`, `metrics`, `sync`.
|
|
||||||
- Authorization: all routes expect header `Authorization: Bearer <API_TOKEN>` via `get_setting_value('API_TOKEN')`.
|
|
||||||
|
|
||||||
## Conventions & helpers to reuse
|
Procedural knowledge lives in `.github/skills/`. Load the appropriate skill when performing these tasks:
|
||||||
- Settings: add/modify via `ccd()` in `server/initialise.py` or per‑plugin manifest. Never hardcode ports or secrets; use `get_setting_value()`.
|
|
||||||
- Logging: use `logger.mylog(level, [message])`; levels: none/minimal/verbose/debug/trace.
|
|
||||||
- Time/MAC/strings: `helper.py` (`timeNowTZ`, `normalize_mac`, sanitizers). Validate MACs before DB writes.
|
|
||||||
- DB helpers: prefer `server/db/db_helper.py` functions (e.g., `get_table_json`, device condition helpers) over raw SQL in new paths.
|
|
||||||
|
|
||||||
## Dev workflow (devcontainer)
|
| Task | Skill |
|
||||||
- Services: use tasks to (re)start backend and nginx/PHP-FPM. Backend runs with debugpy on 5678; attach a Python debugger if needed.
|
|------|-------|
|
||||||
- Run a plugin manually: `python3 front/plugins/<code_name>/script.py` (ensure `sys.path` includes `/app/front/plugins` and `/app/server` like the template).
|
| Run tests, check failures | `testing-workflow` |
|
||||||
- Testing: pytest available via Alpine packages. Tests live in `test/`; app code is under `server/`. PYTHONPATH is preconfigured to include workspace and `/opt/venv` site‑packages.
|
| Start/stop/restart services | `devcontainer-services` |
|
||||||
|
| Wipe database, fresh start | `database-reset` |
|
||||||
|
| Load sample devices | `sample-data` |
|
||||||
|
| Build Docker images | `docker-build` |
|
||||||
|
| Reprovision devcontainer | `devcontainer-setup` |
|
||||||
|
| Create or run plugins | `plugin-run-development` |
|
||||||
|
| Analyze PR comments | `pr-analysis` |
|
||||||
|
| Clean Docker resources | `docker-prune` |
|
||||||
|
| Generate devcontainer configs | `devcontainer-configs` |
|
||||||
|
| Create API endpoints | `api-development` |
|
||||||
|
| Logging conventions | `logging-standards` |
|
||||||
|
| Settings and config | `settings-management` |
|
||||||
|
| Find files and paths | `project-navigation` |
|
||||||
|
| Coding standards | `code-standards` |
|
||||||
|
|
||||||
## What “done right” looks like
|
## Execution Protocol
|
||||||
- When adding a plugin, start from `front/plugins/__template`, implement with `plugin_helper`, define manifest settings, and wire phase via `<PREF>_RUN`. Verify logs in `/app/log/plugins/` and data in `api/*.json`.
|
|
||||||
- When introducing new config, define it once (core `ccd()` or plugin manifest) and read it via helpers everywhere.
|
|
||||||
- When exposing new server functionality, add endpoints in `server/api_server/*` and keep authorization consistent; update UI by reading/writing JSON cache rather than bypassing the pipeline.
|
|
||||||
|
|
||||||
## Useful references
|
- **Before running tests:** Always use `testFailure` tool first to gather current failures.
|
||||||
- Docs: `docs/PLUGINS_DEV.md`, `docs/SETTINGS_SYSTEM.md`, `docs/API_*.md`, `docs/DEBUG_*.md`
|
- **Docker tests are slow.** Examine existing failures before changing tests or Dockerfiles.
|
||||||
- Logs: backend `/app/log/app.log`, plugin logs under `/app/log/plugins/`, nginx/php logs under `/var/log/*`
|
|
||||||
|
|
||||||
## Assistant expectations:
|
|
||||||
- Be concise, opinionated, and biased toward security and simplicity.
|
|
||||||
- Reference concrete files/paths/environmental variables.
|
|
||||||
- Use existing helpers/settings.
|
|
||||||
- Offer a quick validation step (log line, API hit, or JSON export) for anything you add.
|
|
||||||
- Be blunt about risks and when you offer suggestions ensure they're also blunt,
|
|
||||||
- Ask for confirmation before making changes that run code or change multiple files.
|
|
||||||
- Make statements actionable and specific; propose exact edits.
|
|
||||||
- Request confirmation before applying changes that affect more than a single, clearly scoped line or file.
|
|
||||||
- Ask the user to debug something for an actionable value if you're unsure.
|
|
||||||
- Be sure to offer choices when appropriate.
|
|
||||||
- Always understand the intent of the user's request and undo/redo as needed.
|
|
||||||
- Above all, use the simplest possible code that meets the need so it can be easily audited and maintained.
|
|
||||||
|
|||||||
69
.github/skills/api-development/SKILL.md
vendored
Normal file
69
.github/skills/api-development/SKILL.md
vendored
Normal file
@@ -0,0 +1,69 @@
|
|||||||
|
---
|
||||||
|
name: api-development
|
||||||
|
description: Develop and extend NetAlertX REST API endpoints. Use this when asked to create endpoint, add API route, implement API, or modify API responses.
|
||||||
|
---
|
||||||
|
|
||||||
|
# API Development
|
||||||
|
|
||||||
|
## Entry Point
|
||||||
|
|
||||||
|
Flask app: `server/api_server/api_server_start.py`
|
||||||
|
|
||||||
|
## Existing Routes
|
||||||
|
|
||||||
|
- `/device/<mac>` - Single device operations
|
||||||
|
- `/devices` - Device list
|
||||||
|
- `/devices/export/{csv,json}` - Export devices
|
||||||
|
- `/devices/import` - Import devices
|
||||||
|
- `/devices/totals` - Device counts
|
||||||
|
- `/devices/by-status` - Devices grouped by status
|
||||||
|
- `/nettools` - Network utilities
|
||||||
|
- `/events` - Event log
|
||||||
|
- `/sessions` - Session management
|
||||||
|
- `/dbquery` - Database queries
|
||||||
|
- `/metrics` - Prometheus metrics
|
||||||
|
- `/sync` - Synchronization
|
||||||
|
|
||||||
|
## Authorization
|
||||||
|
|
||||||
|
All routes require header:
|
||||||
|
|
||||||
|
```
|
||||||
|
Authorization: Bearer <API_TOKEN>
|
||||||
|
```
|
||||||
|
|
||||||
|
Retrieve token via `get_setting_value('API_TOKEN')`.
|
||||||
|
|
||||||
|
## Response Contract
|
||||||
|
|
||||||
|
**MANDATORY:** All responses must include `"success": true|false`
|
||||||
|
|
||||||
|
```python
|
||||||
|
return {"success": False, "error": "Description of what went wrong"}
|
||||||
|
```
|
||||||
|
|
||||||
|
On success:
|
||||||
|
|
||||||
|
```python
|
||||||
|
return {"success": True, "data": result}
|
||||||
|
```
|
||||||
|
|
||||||
|
```python
|
||||||
|
return {"success": False, "error": "Description of what went wrong"}
|
||||||
|
```
|
||||||
|
|
||||||
|
On success:
|
||||||
|
|
||||||
|
```python
|
||||||
|
return {"success": True, "data": result}
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
**Exception:** The legacy `/device/<mac>` GET endpoint does not follow this contract to maintain backward compatibility with the UI.
|
||||||
|
|
||||||
|
## Adding New Endpoints
|
||||||
|
|
||||||
|
1. Add route in `server/api_server/` directory
|
||||||
|
2. Follow authorization pattern
|
||||||
|
3. Return proper response contract
|
||||||
|
4. Update UI to read/write JSON cache (don't bypass pipeline)
|
||||||
60
.github/skills/authentication/SKILL.md
vendored
Normal file
60
.github/skills/authentication/SKILL.md
vendored
Normal file
@@ -0,0 +1,60 @@
|
|||||||
|
---
|
||||||
|
name: netalertx-authentication-tokens
|
||||||
|
description: Manage and troubleshoot API tokens and authentication-related secrets. Use this when you need to find, rotate, verify, or debug authentication issues (401/403) in NetAlertX.
|
||||||
|
---
|
||||||
|
|
||||||
|
# Authentication
|
||||||
|
|
||||||
|
## Purpose ✅
|
||||||
|
Explain how to locate, validate, rotate, and troubleshoot API tokens and related authentication settings used by NetAlertX.
|
||||||
|
|
||||||
|
## Pre-Flight Check (MANDATORY) ⚠️
|
||||||
|
1. Ensure the backend is running (use devcontainer services or `ps`/systemd checks).
|
||||||
|
2. Verify the `API_TOKEN` setting can be read with Python (see below).
|
||||||
|
3. If a token-related error occurs, gather logs (`/tmp/log/app.log`, nginx logs) before changing secrets.
|
||||||
|
|
||||||
|
## Retrieve the API token (Python — preferred) 🐍
|
||||||
|
Always use Python helpers to read secrets to avoid accidental exposure in shells or logs:
|
||||||
|
|
||||||
|
```python
|
||||||
|
from helper import get_setting_value
|
||||||
|
token = get_setting_value("API_TOKEN")
|
||||||
|
```
|
||||||
|
|
||||||
|
If you must inspect from a running container (read-only), use:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
docker exec <CONTAINER_ID> python3 -c "from helper import get_setting_value; print(get_setting_value('API_TOKEN'))"
|
||||||
|
```
|
||||||
|
|
||||||
|
You can also check the runtime config file:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
docker exec <CONTAINER_ID> grep API_TOKEN /data/config/app.conf
|
||||||
|
```
|
||||||
|
|
||||||
|
## Rotate / Generate a new token 🔁
|
||||||
|
- Preferred: Use the web UI (Settings / System) and click **Generate** for the `API_TOKEN` field — this updates the value safely and immediately.
|
||||||
|
- Manual: Edit `/data/config/app.conf` and restart the backend if required (use the existing devcontainer service tasks).
|
||||||
|
- After rotation: verify the value with `get_setting_value('API_TOKEN')` and update any clients or sync nodes to use the new token.
|
||||||
|
|
||||||
|
## Troubleshooting 401 / 403 Errors 🔍
|
||||||
|
1. Confirm backend is running and reachable.
|
||||||
|
2. Confirm `get_setting_value('API_TOKEN')` returns a non-empty value.
|
||||||
|
3. Ensure client requests send the header exactly: `Authorization: Bearer <API_TOKEN>`.
|
||||||
|
4. Check `/tmp/log/app.log` and plugin logs (e.g., sync plugin) for "Incorrect API Token" messages.
|
||||||
|
5. If using multiple nodes, ensure the token matches across nodes for sync operations.
|
||||||
|
6. If token appears missing or incorrect, rotate via UI or update `app.conf` and re-verify.
|
||||||
|
|
||||||
|
## Best Practices & Security 🔐
|
||||||
|
- Never commit tokens to source control or paste them in public issues. Redact tokens when sharing logs.
|
||||||
|
- Rotate tokens when a secret leak is suspected or per your security policy.
|
||||||
|
- Use `get_setting_value()` in tests and scripts — do not hardcode secrets.
|
||||||
|
|
||||||
|
## Related Skills & Docs 📚
|
||||||
|
- `testing-workflow` — how to use `API_TOKEN` in tests
|
||||||
|
- `settings-management` — where settings live and how they are managed
|
||||||
|
- Docs: `docs/API.md`, `docs/API_OLD.md`, `docs/API_SSE.md`
|
||||||
|
|
||||||
|
---
|
||||||
|
_Last updated: 2026-01-23_
|
||||||
81
.github/skills/code-standards/SKILL.md
vendored
Normal file
81
.github/skills/code-standards/SKILL.md
vendored
Normal file
@@ -0,0 +1,81 @@
|
|||||||
|
---
|
||||||
|
name: netalertx-code-standards
|
||||||
|
description: NetAlertX coding standards and conventions. Use this when writing code, reviewing code, or implementing features.
|
||||||
|
---
|
||||||
|
|
||||||
|
# Code Standards
|
||||||
|
|
||||||
|
- ask me to review before going to each next step (mention n step out of x) (AI only)
|
||||||
|
- before starting, prepare implementation plan (AI only)
|
||||||
|
- ask me to review it and ask any clarifying questions first
|
||||||
|
- add test creation as last step - follow repo architecture patterns - do not place in the root of /test
|
||||||
|
- code has to be maintainable, no duplicate code
|
||||||
|
- follow DRY principle - maintainability of code is more important than speed of implementation
|
||||||
|
- code files should be less than 500 LOC for better maintainability
|
||||||
|
- DB columns must not contain underscores, use camelCase instead (e.g., deviceInstanceId, not device_instance_id)
|
||||||
|
|
||||||
|
## File Length
|
||||||
|
|
||||||
|
Keep code files under 500 lines. Split larger files into modules.
|
||||||
|
|
||||||
|
## DRY Principle
|
||||||
|
|
||||||
|
Do not re-implement functionality. Reuse existing methods or refactor to create shared methods.
|
||||||
|
|
||||||
|
## Database Access
|
||||||
|
|
||||||
|
- Never access DB directly from application layers
|
||||||
|
- Use `server/db/db_helper.py` functions (e.g., `get_table_json`)
|
||||||
|
- Implement new functionality in handlers (e.g., `DeviceInstance` in `server/models/device_instance.py`)
|
||||||
|
|
||||||
|
## MAC Address Handling
|
||||||
|
|
||||||
|
Always validate and normalize MACs before DB writes:
|
||||||
|
|
||||||
|
```python
|
||||||
|
from plugin_helper import normalize_mac
|
||||||
|
|
||||||
|
mac = normalize_mac(raw_mac)
|
||||||
|
```
|
||||||
|
|
||||||
|
## Subprocess Safety
|
||||||
|
|
||||||
|
**MANDATORY:** All subprocess calls must set explicit timeouts.
|
||||||
|
|
||||||
|
```python
|
||||||
|
result = subprocess.run(cmd, timeout=60) # Minimum 60s
|
||||||
|
```
|
||||||
|
|
||||||
|
Nested subprocess calls need their own timeout—outer timeout won't save you.
|
||||||
|
|
||||||
|
## Time Utilities
|
||||||
|
|
||||||
|
```python
|
||||||
|
from utils.datetime_utils import timeNowUTC
|
||||||
|
|
||||||
|
timestamp = timeNowUTC()
|
||||||
|
```
|
||||||
|
|
||||||
|
This is the ONLY function that calls datetime.datetime.now() in the entire codebase.
|
||||||
|
|
||||||
|
⚠️ CRITICAL: ALL database timestamps MUST be stored in UTC
|
||||||
|
This is the SINGLE SOURCE OF TRUTH for current time in NetAlertX
|
||||||
|
Use timeNowUTC() for DB writes (returns UTC string by default)
|
||||||
|
Use timeNowUTC(as_string=False) for datetime operations (scheduling, comparisons, logging)
|
||||||
|
|
||||||
|
## String Sanitization
|
||||||
|
|
||||||
|
Use sanitizers from `server/helper.py` before storing user input. MAC addresses are always lowercased and normalized. IP addresses should be validated.
|
||||||
|
|
||||||
|
## Devcontainer Constraints
|
||||||
|
|
||||||
|
- Never `chmod` or `chown` during operations
|
||||||
|
- Everything is already writable
|
||||||
|
- If permissions needed, fix `.devcontainer/scripts/setup.sh`
|
||||||
|
|
||||||
|
## Path Hygiene
|
||||||
|
|
||||||
|
- Use environment variables for runtime paths
|
||||||
|
- `/data` for persistent config/db
|
||||||
|
- `/tmp` for runtime logs/api/nginx state
|
||||||
|
- Never hardcode `/data/db` or use relative paths
|
||||||
38
.github/skills/database-reset/SKILL.md
vendored
Normal file
38
.github/skills/database-reset/SKILL.md
vendored
Normal file
@@ -0,0 +1,38 @@
|
|||||||
|
---
|
||||||
|
name: reset-netalertx-database
|
||||||
|
description: Wipe and regenerate the NetAlertX database and config. Use this when asked to reset database, wipe db, fresh database, clean slate, or start fresh.
|
||||||
|
---
|
||||||
|
|
||||||
|
# Database Reset
|
||||||
|
|
||||||
|
Completely wipes devcontainer database and config, then regenerates from scratch.
|
||||||
|
|
||||||
|
## Command
|
||||||
|
|
||||||
|
```bash
|
||||||
|
killall 'python3' || true
|
||||||
|
sleep 1
|
||||||
|
rm -rf /data/db/* /data/config/*
|
||||||
|
bash /entrypoint.d/15-first-run-config.sh
|
||||||
|
bash /entrypoint.d/20-first-run-db.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
## What This Does
|
||||||
|
|
||||||
|
1. Kills backend to release database locks
|
||||||
|
2. Deletes all files in `/data/db/` and `/data/config/`
|
||||||
|
3. Runs first-run config provisioning
|
||||||
|
4. Runs first-run database initialization
|
||||||
|
|
||||||
|
## After Reset
|
||||||
|
|
||||||
|
Run the startup script to restart services:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
/workspaces/NetAlertX/.devcontainer/scripts/setup.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
## Database Location
|
||||||
|
|
||||||
|
- Runtime: `/data/db/app.db` (SQLite)
|
||||||
|
- Config: `/data/config/app.conf`
|
||||||
28
.github/skills/devcontainer-configs/SKILL.md
vendored
Normal file
28
.github/skills/devcontainer-configs/SKILL.md
vendored
Normal file
@@ -0,0 +1,28 @@
|
|||||||
|
---
|
||||||
|
name: netalertx-devcontainer-configs
|
||||||
|
description: Generate devcontainer configuration files. Use this when asked to generate devcontainer configs, update devcontainer template, or regenerate devcontainer.
|
||||||
|
---
|
||||||
|
|
||||||
|
# Devcontainer Config Generation
|
||||||
|
|
||||||
|
Generates devcontainer configs from the template. Must be run after changes to devcontainer configuration.
|
||||||
|
|
||||||
|
## Command
|
||||||
|
|
||||||
|
```bash
|
||||||
|
/workspaces/NetAlertX/.devcontainer/scripts/generate-configs.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
## What It Does
|
||||||
|
|
||||||
|
Combines and merges template configurations into the final config used by VS Code.
|
||||||
|
|
||||||
|
## When to Run
|
||||||
|
|
||||||
|
- After modifying `.devcontainer/` template files
|
||||||
|
- After changing devcontainer features or settings
|
||||||
|
- Before committing devcontainer changes
|
||||||
|
|
||||||
|
## Note
|
||||||
|
|
||||||
|
This affects only the devcontainer configuration. It has no bearing on the production or test Docker image.
|
||||||
50
.github/skills/devcontainer-services/SKILL.md
vendored
Normal file
50
.github/skills/devcontainer-services/SKILL.md
vendored
Normal file
@@ -0,0 +1,50 @@
|
|||||||
|
---
|
||||||
|
name: restarting-netalertx-services
|
||||||
|
description: Control NetAlertX services inside the devcontainer. Use this when asked to start backend, start frontend, start nginx, start php-fpm, start crond, stop services, restart services, or check if services are running.
|
||||||
|
---
|
||||||
|
|
||||||
|
# Devcontainer Services
|
||||||
|
|
||||||
|
You operate inside the devcontainer. Do not use `docker exec`.
|
||||||
|
|
||||||
|
## Start Backend (Python)
|
||||||
|
|
||||||
|
```bash
|
||||||
|
/services/start-backend.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
Backend runs with debugpy on port 5678 for debugging. Takes ~5 seconds to be ready.
|
||||||
|
|
||||||
|
## Start Frontend (nginx + PHP-FPM)
|
||||||
|
|
||||||
|
```bash
|
||||||
|
/services/start-php-fpm.sh &
|
||||||
|
/services/start-nginx.sh &
|
||||||
|
```
|
||||||
|
|
||||||
|
Launches almost instantly.
|
||||||
|
|
||||||
|
## Start Scheduler (CronD)
|
||||||
|
|
||||||
|
```bash
|
||||||
|
/services/start-crond.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
## Stop All Services
|
||||||
|
|
||||||
|
```bash
|
||||||
|
pkill -f 'php-fpm83|nginx|crond|python3' || true
|
||||||
|
```
|
||||||
|
|
||||||
|
## Check Running Services
|
||||||
|
|
||||||
|
```bash
|
||||||
|
pgrep -a 'python3|nginx|php-fpm|crond'
|
||||||
|
```
|
||||||
|
|
||||||
|
## Service Ports
|
||||||
|
|
||||||
|
- Frontend (nginx): 20211
|
||||||
|
- Backend API: 20212
|
||||||
|
- GraphQL: 20212
|
||||||
|
- Debugpy: 5678
|
||||||
36
.github/skills/devcontainer-setup/SKILL.md
vendored
Normal file
36
.github/skills/devcontainer-setup/SKILL.md
vendored
Normal file
@@ -0,0 +1,36 @@
|
|||||||
|
---
|
||||||
|
name: netalertx-idempotent-setup
|
||||||
|
description: Reprovision and reset the devcontainer environment. Use this when asked to re-run startup, reprovision, setup devcontainer, fix permissions, or reset runtime state.
|
||||||
|
---
|
||||||
|
|
||||||
|
# Devcontainer Setup
|
||||||
|
|
||||||
|
The setup script forcefully resets all runtime state. It is idempotent—every run wipes and recreates all relevant folders, symlinks, and files.
|
||||||
|
|
||||||
|
## Command
|
||||||
|
|
||||||
|
```bash
|
||||||
|
/workspaces/NetAlertX/.devcontainer/scripts/setup.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
## What It Does
|
||||||
|
|
||||||
|
1. Kills all services (php-fpm, nginx, crond, python3)
|
||||||
|
2. Mounts tmpfs ramdisks for `/tmp/log`, `/tmp/api`, `/tmp/run`, `/tmp/nginx`
|
||||||
|
3. Creates critical subdirectories
|
||||||
|
4. Links `/entrypoint.d` and `/app` symlinks
|
||||||
|
5. Creates `/data`, `/data/config`, `/data/db` directories
|
||||||
|
6. Creates all log files
|
||||||
|
7. Runs `/entrypoint.sh` to start services
|
||||||
|
8. Writes version to `.VERSION`
|
||||||
|
|
||||||
|
## When to Use
|
||||||
|
|
||||||
|
- After modifying setup scripts
|
||||||
|
- After container rebuild
|
||||||
|
- When environment is in broken state
|
||||||
|
- After database reset
|
||||||
|
|
||||||
|
## Philosophy
|
||||||
|
|
||||||
|
No conditional logic. Everything is recreated unconditionally. If something doesn't work, run setup again.
|
||||||
38
.github/skills/docker-build/SKILL.md
vendored
Normal file
38
.github/skills/docker-build/SKILL.md
vendored
Normal file
@@ -0,0 +1,38 @@
|
|||||||
|
---
|
||||||
|
name: netalertx-docker-build
|
||||||
|
description: Build Docker images for testing or production. Use this when asked to build container, build image, docker build, build test image, or launch production container.
|
||||||
|
---
|
||||||
|
|
||||||
|
# Docker Build
|
||||||
|
|
||||||
|
## Build Unit Test Image
|
||||||
|
|
||||||
|
Required after container/Dockerfile changes. Tests won't see changes until image is rebuilt.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
docker buildx build -t netalertx-test .
|
||||||
|
```
|
||||||
|
|
||||||
|
Build time: ~30 seconds (or ~90s if venv stage changes)
|
||||||
|
|
||||||
|
## Build and Launch Production Container
|
||||||
|
|
||||||
|
Before launching, stop devcontainer services first to free ports.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cd /workspaces/NetAlertX
|
||||||
|
docker compose up -d --build --force-recreate
|
||||||
|
```
|
||||||
|
|
||||||
|
## Pre-Launch Checklist
|
||||||
|
|
||||||
|
1. Stop devcontainer services: `pkill -f 'php-fpm83|nginx|crond|python3'`
|
||||||
|
2. Close VS Code forwarded ports
|
||||||
|
3. Run the build command
|
||||||
|
|
||||||
|
## Production Container Details
|
||||||
|
|
||||||
|
- Image: `netalertx:latest`
|
||||||
|
- Container name: `netalertx`
|
||||||
|
- Network mode: host
|
||||||
|
- Ports: 20211 (UI), 20212 (API/GraphQL)
|
||||||
32
.github/skills/docker-prune/SKILL.md
vendored
Normal file
32
.github/skills/docker-prune/SKILL.md
vendored
Normal file
@@ -0,0 +1,32 @@
|
|||||||
|
---
|
||||||
|
name: netalertx-docker-prune
|
||||||
|
description: Clean up unused Docker resources. Use this when asked to prune docker, clean docker, remove unused images, free disk space, or docker cleanup. DANGEROUS operation. Requires human confirmation.
|
||||||
|
---
|
||||||
|
|
||||||
|
# Docker Prune
|
||||||
|
|
||||||
|
**DANGER:** This destroys containers, images, volumes, and networks. Any stopped container will be wiped and data will be lost.
|
||||||
|
|
||||||
|
## Command
|
||||||
|
|
||||||
|
```bash
|
||||||
|
/workspaces/NetAlertX/.devcontainer/scripts/confirm-docker-prune.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
## What Gets Deleted
|
||||||
|
|
||||||
|
- All stopped containers
|
||||||
|
- All unused images
|
||||||
|
- All unused volumes
|
||||||
|
- All unused networks
|
||||||
|
|
||||||
|
## When to Use
|
||||||
|
|
||||||
|
- Disk space is low
|
||||||
|
- Build cache is corrupted
|
||||||
|
- Clean slate needed for testing
|
||||||
|
- After many image rebuilds
|
||||||
|
|
||||||
|
## Safety
|
||||||
|
|
||||||
|
The script requires explicit confirmation before proceeding.
|
||||||
34
.github/skills/mcp-activation/SKILL.md
vendored
Normal file
34
.github/skills/mcp-activation/SKILL.md
vendored
Normal file
@@ -0,0 +1,34 @@
|
|||||||
|
---
|
||||||
|
name: mcp-activation
|
||||||
|
description: Enables live interaction with the NetAlertX runtime. This skill configures the Model Context Protocol (MCP) connection, granting full API access for debugging, troubleshooting, and real-time operations including database queries, network scans, and device management.
|
||||||
|
---
|
||||||
|
|
||||||
|
# MCP Activation Skill
|
||||||
|
|
||||||
|
This skill configures the environment to expose the Model Context Protocol (MCP) server to AI agents running inside the devcontainer.
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
This skill assumes you are already running within the NetAlertX devcontainer.
|
||||||
|
|
||||||
|
1. **Generate Configurations:**
|
||||||
|
Run the configuration generation script to extract the API Token and update the VS Code MCP settings.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
/workspaces/NetAlertX/.devcontainer/scripts/generate-configs.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
2. **Reload Window:**
|
||||||
|
Request the user to reload the VS Code window to activate the new tools.
|
||||||
|
> I have generated the MCP configuration. Please run the **'Developer: Reload Window'** command to activate the MCP server tools.
|
||||||
|
> In VS Code: open the Command Palette (Windows/Linux: Ctrl+Shift+P, macOS: Cmd+Shift+P), type Developer: Reload Window, press Enter — or click the Reload button if a notification appears. 🔁
|
||||||
|
> After you reload, tell me “Window reloaded” (or just “reloaded”) and I’ll continue.
|
||||||
|
|
||||||
|
|
||||||
|
## Why use this?
|
||||||
|
|
||||||
|
Access the live runtime API to perform operations that are not possible through static file analysis:
|
||||||
|
- **Query the database**
|
||||||
|
- **Trigger network scans**
|
||||||
|
- **Manage devices and events**
|
||||||
|
- **Troubleshoot real-time system state**
|
||||||
85
.github/skills/plugin-run-development/SKILL.md
vendored
Normal file
85
.github/skills/plugin-run-development/SKILL.md
vendored
Normal file
@@ -0,0 +1,85 @@
|
|||||||
|
---
|
||||||
|
name: netalertx-plugin-run-development
|
||||||
|
description: Create and run NetAlertX plugins. Use this when asked to create plugin, run plugin, test plugin, plugin development, or execute plugin script.
|
||||||
|
---
|
||||||
|
|
||||||
|
# Plugin Development
|
||||||
|
|
||||||
|
## Expected Workflow for Running Plugins
|
||||||
|
|
||||||
|
1. Read this skill document for context and instructions.
|
||||||
|
2. Find the plugin in `front/plugins/<code_name>/`.
|
||||||
|
3. Read the plugin's `config.json` and `script.py` to understand its functionality and settings.
|
||||||
|
4. Formulate and run the command: `python3 front/plugins/<code_name>/script.py`.
|
||||||
|
5. Retrieve the result from the plugin log folder (`/tmp/log/plugins/last_result.<PREF>.log`) quickly, as the backend may delete it after processing.
|
||||||
|
|
||||||
|
## Run a Plugin Manually
|
||||||
|
|
||||||
|
```bash
|
||||||
|
python3 front/plugins/<code_name>/script.py
|
||||||
|
```
|
||||||
|
|
||||||
|
Ensure `sys.path` includes `/app/front/plugins` and `/app/server` (as in the template).
|
||||||
|
|
||||||
|
## Plugin Structure
|
||||||
|
|
||||||
|
```text
|
||||||
|
front/plugins/<code_name>/
|
||||||
|
├── config.json # Manifest with settings
|
||||||
|
├── script.py # Main script
|
||||||
|
└── ...
|
||||||
|
```
|
||||||
|
|
||||||
|
## Manifest Location
|
||||||
|
|
||||||
|
`front/plugins/<code_name>/config.json`
|
||||||
|
|
||||||
|
- `code_name` == folder name
|
||||||
|
- `unique_prefix` drives settings and filenames (e.g., `ARPSCAN`)
|
||||||
|
|
||||||
|
## Settings Pattern
|
||||||
|
|
||||||
|
- `<PREF>_RUN`: execution phase
|
||||||
|
- `<PREF>_RUN_SCHD`: cron-like schedule
|
||||||
|
- `<PREF>_CMD`: script path
|
||||||
|
- `<PREF>_RUN_TIMEOUT`: timeout in seconds
|
||||||
|
- `<PREF>_WATCH`: columns to watch for changes
|
||||||
|
|
||||||
|
## Data Contract
|
||||||
|
|
||||||
|
Scripts write to `/tmp/log/plugins/last_result.<PREF>.log`
|
||||||
|
|
||||||
|
**Important:** The backend will almost immediately process this result file and delete it after ingestion. If you need to inspect the output, run the plugin and immediately retrieve the result file before the backend processes it.
|
||||||
|
|
||||||
|
Use `front/plugins/plugin_helper.py`:
|
||||||
|
|
||||||
|
```python
|
||||||
|
from plugin_helper import Plugin_Objects
|
||||||
|
|
||||||
|
plugin_objects = Plugin_Objects()
|
||||||
|
plugin_objects.add_object(...) # During processing
|
||||||
|
plugin_objects.write_result_file() # Exactly once at end
|
||||||
|
```
|
||||||
|
|
||||||
|
## Execution Phases
|
||||||
|
|
||||||
|
- `once`: runs once at startup
|
||||||
|
- `schedule`: runs on cron schedule
|
||||||
|
- `always_after_scan`: runs after every scan
|
||||||
|
- `before_name_updates`: runs before name resolution
|
||||||
|
- `on_new_device`: runs when new device detected
|
||||||
|
- `on_notification`: runs when notification triggered
|
||||||
|
|
||||||
|
## Plugin Formats
|
||||||
|
|
||||||
|
| Format | Purpose | Runs |
|
||||||
|
|--------|---------|------|
|
||||||
|
| publisher | Send notifications | `on_notification` |
|
||||||
|
| dev scanner | Create/manage devices | `schedule` |
|
||||||
|
| name discovery | Discover device names | `before_name_updates` |
|
||||||
|
| importer | Import from services | `schedule` |
|
||||||
|
| system | Core functionality | `schedule` |
|
||||||
|
|
||||||
|
## Starting Point
|
||||||
|
|
||||||
|
Copy from `front/plugins/__template` and customize.
|
||||||
59
.github/skills/project-navigation/SKILL.md
vendored
Normal file
59
.github/skills/project-navigation/SKILL.md
vendored
Normal file
@@ -0,0 +1,59 @@
|
|||||||
|
---
|
||||||
|
name: about-netalertx-project-structure
|
||||||
|
description: Navigate the NetAlertX codebase structure. Use this when asked about file locations, project structure, where to find code, or key paths.
|
||||||
|
---
|
||||||
|
|
||||||
|
# Project Navigation
|
||||||
|
|
||||||
|
## Key Paths
|
||||||
|
|
||||||
|
| Component | Path |
|
||||||
|
|-----------|------|
|
||||||
|
| Workspace root | `/workspaces/NetAlertX` |
|
||||||
|
| Backend entry | `server/__main__.py` |
|
||||||
|
| API server | `server/api_server/api_server_start.py` |
|
||||||
|
| Plugin system | `server/plugin.py` |
|
||||||
|
| Initialization | `server/initialise.py` |
|
||||||
|
| Frontend | `front/` |
|
||||||
|
| Frontend JS | `front/js/common.js` |
|
||||||
|
| Frontend PHP | `front/php/server/*.php` |
|
||||||
|
| Plugins | `front/plugins/` |
|
||||||
|
| Plugin template | `front/plugins/__template` |
|
||||||
|
| Database helpers | `server/db/db_helper.py` |
|
||||||
|
| Device model | `server/models/device_instance.py` |
|
||||||
|
| Messaging | `server/messaging/` |
|
||||||
|
| Workflows | `server/workflows/` |
|
||||||
|
|
||||||
|
## Architecture
|
||||||
|
|
||||||
|
NetAlertX uses a frontend–backend architecture: the frontend runs on **PHP + Nginx** (see `front/`), the backend is implemented in **Python** (see `server/`), and scheduled tasks are managed by a **supercronic** scheduler that runs periodic jobs.
|
||||||
|
|
||||||
|
## Runtime Paths
|
||||||
|
|
||||||
|
| Data | Path |
|
||||||
|
|------|------|
|
||||||
|
| Config (runtime) | `/data/config/app.conf` |
|
||||||
|
| Config (default) | `back/app.conf` |
|
||||||
|
| Database | `/data/db/app.db` |
|
||||||
|
| API JSON cache | `/tmp/api/*.json` |
|
||||||
|
| Logs | `/tmp/log/` |
|
||||||
|
| Plugin logs | `/tmp/log/plugins/` |
|
||||||
|
|
||||||
|
## Environment Variables
|
||||||
|
|
||||||
|
Use these NETALERTX_* instead of hardcoding paths. Examples:
|
||||||
|
|
||||||
|
- `NETALERTX_DB`
|
||||||
|
- `NETALERTX_LOG`
|
||||||
|
- `NETALERTX_CONFIG`
|
||||||
|
- `NETALERTX_DATA`
|
||||||
|
- `NETALERTX_APP`
|
||||||
|
|
||||||
|
## Documentation
|
||||||
|
|
||||||
|
| Topic | Path |
|
||||||
|
|-------|------|
|
||||||
|
| Plugin development | `docs/PLUGINS_DEV.md` |
|
||||||
|
| System settings | `docs/SETTINGS_SYSTEM.md` |
|
||||||
|
| API docs | `docs/API_*.md` |
|
||||||
|
| Debug guides | `docs/DEBUG_*.md` |
|
||||||
31
.github/skills/sample-data/SKILL.md
vendored
Normal file
31
.github/skills/sample-data/SKILL.md
vendored
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
---
|
||||||
|
name: netalertx-sample-data
|
||||||
|
description: Load synthetic device data into the devcontainer. Use this when asked to load sample devices, seed data, import test devices, populate database, or generate test data.
|
||||||
|
---
|
||||||
|
|
||||||
|
# Sample Data Loading
|
||||||
|
|
||||||
|
Generates synthetic device inventory and imports it via the `/devices/import` API endpoint.
|
||||||
|
|
||||||
|
## Command
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cd /workspaces/NetAlertX/.devcontainer/scripts
|
||||||
|
./load-devices.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
## Environment
|
||||||
|
|
||||||
|
- `CSV_PATH`: defaults to `/tmp/netalertx-devices.csv`
|
||||||
|
|
||||||
|
## Prerequisites
|
||||||
|
|
||||||
|
- Backend must be running
|
||||||
|
- API must be accessible
|
||||||
|
|
||||||
|
## What It Does
|
||||||
|
|
||||||
|
1. Generates synthetic device records (MAC addresses, IPs, names, vendors)
|
||||||
|
2. Creates CSV file at `$CSV_PATH`
|
||||||
|
3. POSTs to `/devices/import` endpoint
|
||||||
|
4. Devices appear in database and UI
|
||||||
39
.github/skills/settings-management/SKILL.md
vendored
Normal file
39
.github/skills/settings-management/SKILL.md
vendored
Normal file
@@ -0,0 +1,39 @@
|
|||||||
|
---
|
||||||
|
name: netalertx-settings-management
|
||||||
|
description: Manage NetAlertX configuration settings. Use this when asked to add setting, read config, get_setting_value, ccd, or configure options.
|
||||||
|
---
|
||||||
|
|
||||||
|
# Settings Management
|
||||||
|
|
||||||
|
## Reading Settings
|
||||||
|
|
||||||
|
```python
|
||||||
|
from helper import get_setting_value
|
||||||
|
|
||||||
|
value = get_setting_value('SETTING_NAME')
|
||||||
|
```
|
||||||
|
|
||||||
|
Never hardcode ports, secrets, or configuration values. Always use `get_setting_value()`.
|
||||||
|
|
||||||
|
## Adding Core Settings
|
||||||
|
|
||||||
|
Use `ccd()` in `server/initialise.py`:
|
||||||
|
|
||||||
|
```python
|
||||||
|
ccd('SETTING_NAME', 'default_value', 'description')
|
||||||
|
```
|
||||||
|
|
||||||
|
## Adding Plugin Settings
|
||||||
|
|
||||||
|
Define in plugin's `config.json` manifest under the settings section.
|
||||||
|
|
||||||
|
## Config Files
|
||||||
|
|
||||||
|
| File | Purpose |
|
||||||
|
|------|---------|
|
||||||
|
| `/data/config/app.conf` | Runtime config (modified by app) |
|
||||||
|
| `back/app.conf` | Default config (template) |
|
||||||
|
|
||||||
|
## Environment Override
|
||||||
|
|
||||||
|
Use `APP_CONF_OVERRIDE` environment variable for settings that must be set before startup.
|
||||||
61
.github/skills/testing-workflow/SKILL.md
vendored
Normal file
61
.github/skills/testing-workflow/SKILL.md
vendored
Normal file
@@ -0,0 +1,61 @@
|
|||||||
|
---
|
||||||
|
name: netalertx-testing-workflow
|
||||||
|
description: Run and debug tests in the NetAlertX devcontainer. Use this when asked to run tests, check test failures, debug failing tests, or execute pytest.
|
||||||
|
---
|
||||||
|
|
||||||
|
# Testing Workflow
|
||||||
|
|
||||||
|
## Pre-Flight Check (MANDATORY)
|
||||||
|
|
||||||
|
Before running any tests, always check for existing failures first:
|
||||||
|
|
||||||
|
1. Use the `testFailure` tool to gather current failure information
|
||||||
|
2. Review the failures to understand what's already broken
|
||||||
|
3. Only then proceed with test execution
|
||||||
|
|
||||||
|
## Running Tests
|
||||||
|
|
||||||
|
Use VS Code's testing interface or the `runTests` tool with appropriate parameters:
|
||||||
|
|
||||||
|
- To run all tests: invoke runTests without file filter
|
||||||
|
- To run specific test file: invoke runTests with the test file path
|
||||||
|
- To run failed tests only: invoke runTests with `--lf` flag
|
||||||
|
|
||||||
|
## Test Location
|
||||||
|
|
||||||
|
Tests live in `test/` directory. App code is under `server/`.
|
||||||
|
|
||||||
|
PYTHONPATH is preconfigured to include the following which should meet all needs:
|
||||||
|
- `/app` # the primary location where python runs in the production system
|
||||||
|
- `/app/server` # symbolic link to /wprkspaces/NetAlertX/server
|
||||||
|
- `/app/front/plugins` # symbolic link to /workspaces/NetAlertX/front/plugins
|
||||||
|
- `/opt/venv/lib/pythonX.Y/site-packages`
|
||||||
|
- `/workspaces/NetAlertX/test`
|
||||||
|
- `/workspaces/NetAlertX/server`
|
||||||
|
- `/workspaces/NetAlertX`
|
||||||
|
- `/usr/lib/pythonX.Y/site-packages`
|
||||||
|
|
||||||
|
## Authentication in Tests
|
||||||
|
|
||||||
|
Retrieve `API_TOKEN` using Python (not shell):
|
||||||
|
|
||||||
|
```python
|
||||||
|
from helper import get_setting_value
|
||||||
|
token = get_setting_value("API_TOKEN")
|
||||||
|
```
|
||||||
|
|
||||||
|
## Troubleshooting 403 Forbidden
|
||||||
|
|
||||||
|
1. Ensure backend is running (use devcontainer-services skill)
|
||||||
|
2. Verify config loaded: `get_setting_value("API_TOKEN")` returns non-empty
|
||||||
|
3. Re-run startup if needed (use devcontainer-setup skill)
|
||||||
|
|
||||||
|
## Docker Test Image
|
||||||
|
|
||||||
|
If container changes affect tests, rebuild the test image first:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
docker buildx build -t netalertx-test .
|
||||||
|
```
|
||||||
|
|
||||||
|
This takes ~30 seconds unless venv stage changes (~90s).
|
||||||
116
.github/workflows/code-checks.yml
vendored
Normal file
116
.github/workflows/code-checks.yml
vendored
Normal file
@@ -0,0 +1,116 @@
|
|||||||
|
name: ✅ Code checks
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- main
|
||||||
|
tags:
|
||||||
|
- '*.*.*'
|
||||||
|
pull_request:
|
||||||
|
branches:
|
||||||
|
- main
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
check-url-paths:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Checkout code
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: 🚨 Ensure DELETE FROM CurrentScan is not commented out
|
||||||
|
run: |
|
||||||
|
echo "🔍 Checking that DELETE FROM CurrentScan is not commented out..."
|
||||||
|
|
||||||
|
MATCHES=$(grep -RInE '^[[:space:]]*#[[:space:]]*db\.sql\.execute\("DELETE FROM CurrentScan"\)' \
|
||||||
|
--include="*.py" .) || true
|
||||||
|
|
||||||
|
if [ -n "$MATCHES" ]; then
|
||||||
|
echo "❌ Found commented-out DELETE FROM CurrentScan call:"
|
||||||
|
echo "$MATCHES"
|
||||||
|
echo
|
||||||
|
echo "This line must NOT be commented out in committed code."
|
||||||
|
exit 1
|
||||||
|
else
|
||||||
|
echo "✅ DELETE FROM CurrentScan is active."
|
||||||
|
fi
|
||||||
|
|
||||||
|
- name: Check for incorrect absolute '/php/' URLs in frontend code
|
||||||
|
run: |
|
||||||
|
echo "🔍 Checking for incorrect absolute '/php/' URLs (should be 'php/' or './php/')..."
|
||||||
|
|
||||||
|
MATCHES=$(grep -rE "['\"]/php/" --include=\*.{js,php,html} ./front \
|
||||||
|
| grep -E "\.get|\.post|\.ajax|fetch|url\s*:") || true
|
||||||
|
|
||||||
|
if [ -n "$MATCHES" ]; then
|
||||||
|
echo "$MATCHES"
|
||||||
|
echo "❌ Found incorrectly absolute '/php/' URLs. Use 'php/' or './php/' for relative paths."
|
||||||
|
exit 1
|
||||||
|
else
|
||||||
|
echo "✅ No bad '/php/' URLs found."
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
- name: Check Python syntax
|
||||||
|
run: |
|
||||||
|
set -e
|
||||||
|
echo "🔍 Checking Python syntax..."
|
||||||
|
find . -name "*.py" -print0 | xargs -0 -n1 python3 -m py_compile
|
||||||
|
|
||||||
|
lint:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Checkout code
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Set up Python
|
||||||
|
uses: actions/setup-python@v5
|
||||||
|
with:
|
||||||
|
python-version: '3.11'
|
||||||
|
|
||||||
|
- name: Install linting tools
|
||||||
|
run: |
|
||||||
|
# Python linting
|
||||||
|
pip install flake8
|
||||||
|
# Docker linting
|
||||||
|
wget -O /tmp/hadolint https://github.com/hadolint/hadolint/releases/latest/download/hadolint-Linux-x86_64
|
||||||
|
chmod +x /tmp/hadolint
|
||||||
|
# PHP and shellcheck for syntax checking
|
||||||
|
sudo apt-get update && sudo apt-get install -y php-cli shellcheck
|
||||||
|
|
||||||
|
- name: Shell check
|
||||||
|
continue-on-error: true
|
||||||
|
run: |
|
||||||
|
echo "🔍 Checking shell scripts..."
|
||||||
|
find . -name "*.sh" -exec shellcheck {} \;
|
||||||
|
|
||||||
|
- name: Python lint
|
||||||
|
continue-on-error: true
|
||||||
|
run: |
|
||||||
|
echo "🔍 Linting Python code..."
|
||||||
|
flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics
|
||||||
|
flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics
|
||||||
|
|
||||||
|
- name: PHP check
|
||||||
|
continue-on-error: true
|
||||||
|
run: |
|
||||||
|
echo "🔍 Checking PHP syntax..."
|
||||||
|
find . -name "*.php" -exec php -l {} \;
|
||||||
|
|
||||||
|
- name: Docker lint
|
||||||
|
continue-on-error: true
|
||||||
|
run: |
|
||||||
|
echo "🔍 Linting Dockerfiles..."
|
||||||
|
/tmp/hadolint --config .hadolint.yaml Dockerfile* || true
|
||||||
|
|
||||||
|
docker-tests:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Checkout code
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Run Docker-based tests
|
||||||
|
run: |
|
||||||
|
echo "🐳 Running Docker-based tests..."
|
||||||
|
chmod +x ./scripts/run_tests_in_docker_environment.sh
|
||||||
|
./scripts/run_tests_in_docker_environment.sh
|
||||||
41
.github/workflows/code_checks.yml
vendored
41
.github/workflows/code_checks.yml
vendored
@@ -1,41 +0,0 @@
|
|||||||
name: Code checks
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- main
|
|
||||||
tags:
|
|
||||||
- '*.*.*'
|
|
||||||
pull_request:
|
|
||||||
branches:
|
|
||||||
- main
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
check-url-paths:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- name: Checkout code
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
|
|
||||||
- name: Check for incorrect absolute '/php/' URLs in frontend code
|
|
||||||
run: |
|
|
||||||
echo "🔍 Checking for incorrect absolute '/php/' URLs (should be 'php/' or './php/')..."
|
|
||||||
|
|
||||||
MATCHES=$(grep -rE "['\"]\/php\/" --include=\*.{js,php,html} ./front | grep -E "\.get|\.post|\.ajax|fetch|url\s*:") || true
|
|
||||||
|
|
||||||
if [ -n "$MATCHES" ]; then
|
|
||||||
echo "$MATCHES"
|
|
||||||
echo "❌ Found incorrectly absolute '/php/' URLs. Use 'php/' or './php/' for relative paths."
|
|
||||||
exit 1
|
|
||||||
else
|
|
||||||
echo "✅ No bad '/php/' URLs found."
|
|
||||||
fi
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
- name: Check Python syntax
|
|
||||||
run: |
|
|
||||||
set -e
|
|
||||||
echo "🔍 Checking Python syntax..."
|
|
||||||
find . -name "*.py" -print0 | xargs -0 -n1 python3 -m py_compile
|
|
||||||
|
|
||||||
25
.github/workflows/docker_cache-cleaner.yml
vendored
25
.github/workflows/docker_cache-cleaner.yml
vendored
@@ -1,25 +0,0 @@
|
|||||||
name: 🤖Automation - ci-package-cleaner
|
|
||||||
|
|
||||||
on:
|
|
||||||
|
|
||||||
workflow_dispatch: # manual option
|
|
||||||
|
|
||||||
# schedule:
|
|
||||||
# - cron: '15 22 * * 1' # every Monday 10.15pm UTC (~11.15am Tuesday NZT)
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
|
|
||||||
package-cleaner:
|
|
||||||
name: package-cleaner
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
timeout-minutes: 5
|
|
||||||
permissions:
|
|
||||||
packages: write
|
|
||||||
steps:
|
|
||||||
|
|
||||||
- uses: actions/delete-package-versions@v4
|
|
||||||
with:
|
|
||||||
package-name: netalertx
|
|
||||||
package-type: container
|
|
||||||
min-versions-to-keep: 0
|
|
||||||
delete-only-untagged-versions: true
|
|
||||||
57
.github/workflows/docker_dev.yml
vendored
57
.github/workflows/docker_dev.yml
vendored
@@ -1,25 +1,29 @@
|
|||||||
name: docker
|
name: 🐳 👩💻 docker dev
|
||||||
|
|
||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
branches:
|
branches:
|
||||||
- next_release
|
- main
|
||||||
tags:
|
tags:
|
||||||
- '*.*.*'
|
- '*.*.*'
|
||||||
pull_request:
|
pull_request:
|
||||||
branches:
|
branches:
|
||||||
- next_release
|
- main
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
docker_dev:
|
docker_dev:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
timeout-minutes: 30
|
timeout-minutes: 90
|
||||||
permissions:
|
permissions:
|
||||||
contents: read
|
contents: read
|
||||||
packages: write
|
packages: write
|
||||||
if: >
|
if: >
|
||||||
contains(github.event.head_commit.message, 'PUSHPROD') != 'True' &&
|
!contains(github.event.head_commit.message, 'PUSHPROD') &&
|
||||||
github.repository == 'jokob-sk/NetAlertX'
|
(
|
||||||
|
github.repository == 'jokob-sk/NetAlertX' ||
|
||||||
|
github.repository == 'netalertx/NetAlertX'
|
||||||
|
)
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
@@ -30,26 +34,43 @@ jobs:
|
|||||||
- name: Set up Docker Buildx
|
- name: Set up Docker Buildx
|
||||||
uses: docker/setup-buildx-action@v3
|
uses: docker/setup-buildx-action@v3
|
||||||
|
|
||||||
|
# --- Generate timestamped dev version
|
||||||
|
- name: Generate timestamp version
|
||||||
|
id: timestamp
|
||||||
|
run: |
|
||||||
|
ts=$(date -u +'%Y%m%d-%H%M%S')
|
||||||
|
echo "version=dev-${ts}" >> $GITHUB_OUTPUT
|
||||||
|
echo "Generated version: dev-${ts}"
|
||||||
|
|
||||||
- name: Set up dynamic build ARGs
|
- name: Set up dynamic build ARGs
|
||||||
id: getargs
|
id: getargs
|
||||||
run: echo "version=$(cat ./stable/VERSION)" >> $GITHUB_OUTPUT
|
run: echo "version=$(cat ./stable/VERSION)" >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
- name: Get release version
|
- name: Get release version
|
||||||
id: get_version
|
id: get_version
|
||||||
run: echo "version=Dev" >> $GITHUB_OUTPUT
|
run: echo "version=Dev" >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
|
# --- debug output
|
||||||
|
- name: Debug version
|
||||||
|
run: |
|
||||||
|
echo "GITHUB_REF: $GITHUB_REF"
|
||||||
|
echo "Version: '${{ steps.get_version.outputs.version }}'"
|
||||||
|
|
||||||
|
# --- Write the timestamped version to .VERSION file
|
||||||
- name: Create .VERSION file
|
- name: Create .VERSION file
|
||||||
run: echo "${{ steps.get_version.outputs.version }}" >> .VERSION
|
run: echo "${{ steps.timestamp.outputs.version }}" > .VERSION
|
||||||
|
|
||||||
- name: Docker meta
|
- name: Docker meta
|
||||||
id: meta
|
id: meta
|
||||||
uses: docker/metadata-action@v4
|
uses: docker/metadata-action@v5
|
||||||
with:
|
with:
|
||||||
images: |
|
images: |
|
||||||
|
ghcr.io/netalertx/netalertx-dev
|
||||||
ghcr.io/jokob-sk/netalertx-dev
|
ghcr.io/jokob-sk/netalertx-dev
|
||||||
jokobsk/netalertx-dev
|
jokobsk/netalertx-dev
|
||||||
tags: |
|
tags: |
|
||||||
type=raw,value=latest
|
type=raw,value=latest
|
||||||
|
type=raw,value=${{ steps.timestamp.outputs.version }}
|
||||||
type=ref,event=branch
|
type=ref,event=branch
|
||||||
type=ref,event=pr
|
type=ref,event=pr
|
||||||
type=semver,pattern={{version}}
|
type=semver,pattern={{version}}
|
||||||
@@ -57,12 +78,20 @@ jobs:
|
|||||||
type=semver,pattern={{major}}
|
type=semver,pattern={{major}}
|
||||||
type=sha
|
type=sha
|
||||||
|
|
||||||
- name: Log in to Github Container Registry (GHCR)
|
- name: Login GHCR (netalertx org)
|
||||||
|
uses: docker/login-action@v3
|
||||||
|
with:
|
||||||
|
registry: ghcr.io
|
||||||
|
username: ${{ github.actor }}
|
||||||
|
password: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
|
||||||
|
- name: Login GHCR (jokob-sk legacy)
|
||||||
|
if: github.event_name != 'pull_request'
|
||||||
uses: docker/login-action@v3
|
uses: docker/login-action@v3
|
||||||
with:
|
with:
|
||||||
registry: ghcr.io
|
registry: ghcr.io
|
||||||
username: jokob-sk
|
username: jokob-sk
|
||||||
password: ${{ secrets.GITHUB_TOKEN }}
|
password: ${{ secrets.GHCR_JOKOBSK_PAT }}
|
||||||
|
|
||||||
- name: Log in to DockerHub
|
- name: Log in to DockerHub
|
||||||
if: github.event_name != 'pull_request'
|
if: github.event_name != 'pull_request'
|
||||||
@@ -72,10 +101,12 @@ jobs:
|
|||||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||||
|
|
||||||
- name: Build and push
|
- name: Build and push
|
||||||
uses: docker/build-push-action@v3
|
uses: docker/build-push-action@v6
|
||||||
with:
|
with:
|
||||||
context: .
|
context: .
|
||||||
platforms: linux/amd64,linux/arm64,linux/arm/v7,linux/arm/v6
|
platforms: linux/amd64,linux/arm64,linux/arm/v7,linux/arm/v6
|
||||||
push: ${{ github.event_name != 'pull_request' }}
|
push: ${{ github.event_name != 'pull_request' }}
|
||||||
tags: ${{ steps.meta.outputs.tags }}
|
tags: ${{ steps.meta.outputs.tags }}
|
||||||
labels: ${{ steps.meta.outputs.labels }}
|
labels: ${{ steps.meta.outputs.labels }}
|
||||||
|
cache-from: type=gha
|
||||||
|
cache-to: type=gha,mode=max
|
||||||
|
|||||||
112
.github/workflows/docker_dev_unsafe.yml
vendored
Normal file
112
.github/workflows/docker_dev_unsafe.yml
vendored
Normal file
@@ -0,0 +1,112 @@
|
|||||||
|
name: 🐳 ⚠ docker-unsafe from next_release branch
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- next_release
|
||||||
|
pull_request:
|
||||||
|
branches:
|
||||||
|
- next_release
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
docker_dev_unsafe:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
timeout-minutes: 90
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
packages: write
|
||||||
|
if: >
|
||||||
|
!contains(github.event.head_commit.message, 'PUSHPROD') &&
|
||||||
|
(
|
||||||
|
github.repository == 'jokob-sk/NetAlertX' ||
|
||||||
|
github.repository == 'netalertx/NetAlertX'
|
||||||
|
)
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Set up QEMU
|
||||||
|
uses: docker/setup-qemu-action@v3
|
||||||
|
|
||||||
|
- name: Set up Docker Buildx
|
||||||
|
uses: docker/setup-buildx-action@v3
|
||||||
|
|
||||||
|
# --- Generate timestamped dev version
|
||||||
|
- name: Generate timestamp version
|
||||||
|
id: timestamp
|
||||||
|
run: |
|
||||||
|
ts=$(date -u +'%Y%m%d-%H%M%S')
|
||||||
|
echo "version=dev-${ts}" >> $GITHUB_OUTPUT
|
||||||
|
echo "Generated version: dev-${ts}"
|
||||||
|
|
||||||
|
- name: Set up dynamic build ARGs
|
||||||
|
id: getargs
|
||||||
|
run: echo "version=$(cat ./stable/VERSION)" >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
|
- name: Get release version
|
||||||
|
id: get_version
|
||||||
|
run: echo "version=Dev" >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
|
# --- debug output
|
||||||
|
- name: Debug version
|
||||||
|
run: |
|
||||||
|
echo "GITHUB_REF: $GITHUB_REF"
|
||||||
|
echo "Version: '${{ steps.get_version.outputs.version }}'"
|
||||||
|
|
||||||
|
# --- Write the timestamped version to .VERSION file
|
||||||
|
- name: Create .VERSION file
|
||||||
|
run: echo "${{ steps.timestamp.outputs.version }}" > .VERSION
|
||||||
|
|
||||||
|
- name: Docker meta
|
||||||
|
id: meta
|
||||||
|
uses: docker/metadata-action@v5
|
||||||
|
with:
|
||||||
|
images: |
|
||||||
|
ghcr.io/netalertx/netalertx-dev-unsafe
|
||||||
|
jokobsk/netalertx-dev-unsafe
|
||||||
|
tags: |
|
||||||
|
type=raw,value=unsafe
|
||||||
|
type=raw,value=${{ steps.timestamp.outputs.version }}
|
||||||
|
type=ref,event=branch
|
||||||
|
type=ref,event=pr
|
||||||
|
type=sha
|
||||||
|
|
||||||
|
- name: Login GHCR (netalertx org)
|
||||||
|
uses: docker/login-action@v3
|
||||||
|
with:
|
||||||
|
registry: ghcr.io
|
||||||
|
username: ${{ github.actor }}
|
||||||
|
password: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
|
||||||
|
- name: Login GHCR (jokob-sk legacy)
|
||||||
|
if: github.event_name != 'pull_request'
|
||||||
|
uses: docker/login-action@v3
|
||||||
|
with:
|
||||||
|
registry: ghcr.io
|
||||||
|
username: jokob-sk
|
||||||
|
password: ${{ secrets.GHCR_JOKOBSK_PAT }}
|
||||||
|
|
||||||
|
- name: Log in to DockerHub
|
||||||
|
if: github.event_name != 'pull_request'
|
||||||
|
uses: docker/login-action@v3
|
||||||
|
with:
|
||||||
|
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
|
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||||
|
|
||||||
|
- name: Build and push
|
||||||
|
uses: docker/build-push-action@v6
|
||||||
|
with:
|
||||||
|
context: .
|
||||||
|
platforms: linux/amd64,linux/arm64,linux/arm/v7,linux/arm/v6
|
||||||
|
push: ${{ github.event_name != 'pull_request' }}
|
||||||
|
tags: ${{ steps.meta.outputs.tags }}
|
||||||
|
labels: |
|
||||||
|
org.opencontainers.image.title=NetAlertX Dev Unsafe
|
||||||
|
org.opencontainers.image.description=EXPERIMENTAL BUILD – NOT SUPPORTED – DATA LOSS POSSIBLE
|
||||||
|
org.opencontainers.image.version=${{ steps.timestamp.outputs.version }}
|
||||||
|
netalertx.stability=unsafe
|
||||||
|
netalertx.support=none
|
||||||
|
netalertx.data_risk=high
|
||||||
|
cache-from: type=gha
|
||||||
|
cache-to: type=gha,mode=max
|
||||||
61
.github/workflows/docker_prod.yml
vendored
61
.github/workflows/docker_prod.yml
vendored
@@ -6,21 +6,20 @@
|
|||||||
# GitHub recommends pinning actions to a commit SHA.
|
# GitHub recommends pinning actions to a commit SHA.
|
||||||
# To get a newer version, you will need to update the SHA.
|
# To get a newer version, you will need to update the SHA.
|
||||||
# You can also reference a tag or branch, but the action may change without warning.
|
# You can also reference a tag or branch, but the action may change without warning.
|
||||||
|
name: 🐳 🚀 Publish Docker image
|
||||||
name: Publish Docker image
|
|
||||||
|
|
||||||
on:
|
on:
|
||||||
release:
|
release:
|
||||||
types: [published]
|
types: [published]
|
||||||
tags:
|
|
||||||
- '*.[1-9]+[0-9]?.[1-9]+*'
|
|
||||||
jobs:
|
jobs:
|
||||||
docker:
|
docker:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
timeout-minutes: 30
|
timeout-minutes: 90
|
||||||
permissions:
|
permissions:
|
||||||
contents: read
|
contents: read
|
||||||
packages: write
|
packages: write
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v3
|
||||||
@@ -31,42 +30,53 @@ jobs:
|
|||||||
- name: Set up Docker Buildx
|
- name: Set up Docker Buildx
|
||||||
uses: docker/setup-buildx-action@v3
|
uses: docker/setup-buildx-action@v3
|
||||||
|
|
||||||
- name: Set up dynamic build ARGs
|
# --- Get release version from tag
|
||||||
id: getargs
|
|
||||||
run: echo "version=$(cat ./stable/VERSION)" >> $GITHUB_OUTPUT
|
|
||||||
|
|
||||||
- name: Get release version
|
- name: Get release version
|
||||||
id: get_version
|
id: get_version
|
||||||
run: echo "::set-output name=version::${GITHUB_REF#refs/tags/}"
|
run: echo "version=${GITHUB_REF#refs/tags/}" >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
|
|
||||||
|
# --- debug output
|
||||||
|
- name: Debug version
|
||||||
|
run: |
|
||||||
|
echo "GITHUB_REF: $GITHUB_REF"
|
||||||
|
echo "Version: '${{ steps.get_version.outputs.version }}'"
|
||||||
|
|
||||||
|
# --- Write version to .VERSION file
|
||||||
- name: Create .VERSION file
|
- name: Create .VERSION file
|
||||||
run: echo "${{ steps.get_version.outputs.version }}" >> .VERSION
|
run: echo -n "${{ steps.get_version.outputs.version }}" > .VERSION
|
||||||
|
|
||||||
|
# --- Generate Docker metadata and tags
|
||||||
- name: Docker meta
|
- name: Docker meta
|
||||||
id: meta
|
id: meta
|
||||||
uses: docker/metadata-action@v4
|
uses: docker/metadata-action@v5
|
||||||
with:
|
with:
|
||||||
# list of Docker images to use as base name for tags
|
|
||||||
images: |
|
images: |
|
||||||
|
ghcr.io/netalertx/netalertx
|
||||||
ghcr.io/jokob-sk/netalertx
|
ghcr.io/jokob-sk/netalertx
|
||||||
jokobsk/netalertx
|
jokobsk/netalertx
|
||||||
# generate Docker tags based on the following events/attributes
|
|
||||||
tags: |
|
tags: |
|
||||||
type=semver,pattern={{version}},value=${{ inputs.version }}
|
type=semver,pattern={{version}},value=${{ steps.get_version.outputs.version }}
|
||||||
type=semver,pattern={{major}}.{{minor}},value=${{ inputs.version }}
|
type=semver,pattern={{major}}.{{minor}},value=${{ steps.get_version.outputs.version }}
|
||||||
type=semver,pattern={{major}},value=${{ inputs.version }}
|
type=semver,pattern={{major}},value=${{ steps.get_version.outputs.version }}
|
||||||
type=ref,event=branch,suffix=-{{ sha }}
|
type=raw,value=latest
|
||||||
type=ref,event=pr
|
|
||||||
type=raw,value=latest,enable=${{ github.ref == 'refs/heads/main' || startsWith(github.ref, 'refs/tags/') }}
|
|
||||||
|
|
||||||
- name: Log in to Github Container registry
|
- name: Log in to Github Container Registry (GHCR)
|
||||||
|
uses: docker/login-action@v3
|
||||||
|
with:
|
||||||
|
registry: ghcr.io
|
||||||
|
username: ${{ github.actor }}
|
||||||
|
password: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
|
||||||
|
- name: Login GHCR (jokob-sk legacy)
|
||||||
|
if: github.event_name != 'pull_request'
|
||||||
uses: docker/login-action@v3
|
uses: docker/login-action@v3
|
||||||
with:
|
with:
|
||||||
registry: ghcr.io
|
registry: ghcr.io
|
||||||
username: jokob-sk
|
username: jokob-sk
|
||||||
password: ${{ secrets.GITHUB_TOKEN }}
|
password: ${{ secrets.GHCR_JOKOBSK_PAT }}
|
||||||
|
|
||||||
- name: Login to DockerHub
|
- name: Log in to DockerHub
|
||||||
if: github.event_name != 'pull_request'
|
if: github.event_name != 'pull_request'
|
||||||
uses: docker/login-action@v3
|
uses: docker/login-action@v3
|
||||||
with:
|
with:
|
||||||
@@ -74,13 +84,12 @@ jobs:
|
|||||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||||
|
|
||||||
- name: Build and push
|
- name: Build and push
|
||||||
uses: docker/build-push-action@v3
|
uses: docker/build-push-action@v6
|
||||||
with:
|
with:
|
||||||
context: .
|
context: .
|
||||||
platforms: linux/amd64,linux/arm64,linux/arm/v7,linux/arm/v6
|
platforms: linux/amd64,linux/arm64,linux/arm/v7,linux/arm/v6
|
||||||
push: ${{ github.event_name != 'pull_request' }}
|
push: ${{ github.event_name != 'pull_request' }}
|
||||||
tags: ${{ steps.meta.outputs.tags }}
|
tags: ${{ steps.meta.outputs.tags }}
|
||||||
labels: ${{ steps.meta.outputs.labels }}
|
labels: ${{ steps.meta.outputs.labels }}
|
||||||
# # ⚠ disable cache if build is failing to download debian packages
|
|
||||||
# cache-from: type=registry,ref=ghcr.io/jokob-sk/netalertx:buildcache
|
# cache-from: type=registry,ref=ghcr.io/jokob-sk/netalertx:buildcache
|
||||||
# cache-to: type=registry,ref=ghcr.io/jokob-sk/netalertx:buildcache,mode=max
|
# cache-to: type=registry,ref=ghcr.io/jokob-sk/netalertx:buildcache,mode=max
|
||||||
|
|||||||
81
.github/workflows/docker_rewrite.yml
vendored
81
.github/workflows/docker_rewrite.yml
vendored
@@ -1,81 +0,0 @@
|
|||||||
name: docker
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- rewrite
|
|
||||||
tags:
|
|
||||||
- '*.*.*'
|
|
||||||
pull_request:
|
|
||||||
branches:
|
|
||||||
- rewrite
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
docker_rewrite:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
timeout-minutes: 30
|
|
||||||
permissions:
|
|
||||||
contents: read
|
|
||||||
packages: write
|
|
||||||
if: >
|
|
||||||
contains(github.event.head_commit.message, 'PUSHPROD') != 'True' &&
|
|
||||||
github.repository == 'jokob-sk/NetAlertX'
|
|
||||||
steps:
|
|
||||||
- name: Checkout
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
|
|
||||||
- name: Set up QEMU
|
|
||||||
uses: docker/setup-qemu-action@v3
|
|
||||||
|
|
||||||
- name: Set up Docker Buildx
|
|
||||||
uses: docker/setup-buildx-action@v3
|
|
||||||
|
|
||||||
- name: Set up dynamic build ARGs
|
|
||||||
id: getargs
|
|
||||||
run: echo "version=$(cat ./stable/VERSION)" >> $GITHUB_OUTPUT
|
|
||||||
|
|
||||||
- name: Get release version
|
|
||||||
id: get_version
|
|
||||||
run: echo "version=Dev" >> $GITHUB_OUTPUT
|
|
||||||
|
|
||||||
- name: Create .VERSION file
|
|
||||||
run: echo "${{ steps.get_version.outputs.version }}" >> .VERSION
|
|
||||||
|
|
||||||
- name: Docker meta
|
|
||||||
id: meta
|
|
||||||
uses: docker/metadata-action@v4
|
|
||||||
with:
|
|
||||||
images: |
|
|
||||||
ghcr.io/jokob-sk/netalertx-dev-rewrite
|
|
||||||
jokobsk/netalertx-dev-rewrite
|
|
||||||
tags: |
|
|
||||||
type=raw,value=latest
|
|
||||||
type=ref,event=branch
|
|
||||||
type=ref,event=pr
|
|
||||||
type=semver,pattern={{version}}
|
|
||||||
type=semver,pattern={{major}}.{{minor}}
|
|
||||||
type=semver,pattern={{major}}
|
|
||||||
type=sha
|
|
||||||
|
|
||||||
- name: Log in to Github Container Registry (GHCR)
|
|
||||||
uses: docker/login-action@v3
|
|
||||||
with:
|
|
||||||
registry: ghcr.io
|
|
||||||
username: jokob-sk
|
|
||||||
password: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
|
|
||||||
- name: Log in to DockerHub
|
|
||||||
if: github.event_name != 'pull_request'
|
|
||||||
uses: docker/login-action@v3
|
|
||||||
with:
|
|
||||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
|
||||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
|
||||||
|
|
||||||
- name: Build and push
|
|
||||||
uses: docker/build-push-action@v3
|
|
||||||
with:
|
|
||||||
context: .
|
|
||||||
platforms: linux/amd64,linux/arm64,linux/arm/v7,linux/arm/v6
|
|
||||||
push: ${{ github.event_name != 'pull_request' }}
|
|
||||||
tags: ${{ steps.meta.outputs.tags }}
|
|
||||||
labels: ${{ steps.meta.outputs.labels }}
|
|
||||||
21
.github/workflows/label-issues.yml
vendored
21
.github/workflows/label-issues.yml
vendored
@@ -1,4 +1,4 @@
|
|||||||
name: Label Issues by Installation Type
|
name: 🏷 Label Issues by Installation Type
|
||||||
|
|
||||||
on:
|
on:
|
||||||
issues:
|
issues:
|
||||||
@@ -15,21 +15,28 @@ jobs:
|
|||||||
uses: actions/github-script@v7
|
uses: actions/github-script@v7
|
||||||
with:
|
with:
|
||||||
script: |
|
script: |
|
||||||
const body = context.payload.issue.body;
|
const body = (context.payload.issue.body || "").toLowerCase();
|
||||||
|
|
||||||
const lowerBody = body.toLowerCase();
|
// --- Check for template marker ---
|
||||||
|
const hasTemplate = body.includes('netalertx_template');
|
||||||
|
|
||||||
|
if (!hasTemplate) {
|
||||||
|
console.log("No template marker found, skipping labeling.");
|
||||||
|
return; // skip labeling
|
||||||
|
}
|
||||||
|
|
||||||
|
// --- Proceed with normal labeling ---
|
||||||
let labelsToAdd = [];
|
let labelsToAdd = [];
|
||||||
|
|
||||||
if (lowerBody.includes('bare-metal')) {
|
if (body.includes('bare-metal') || body.includes('proxmox')) {
|
||||||
labelsToAdd.push('bare-metal ❗');
|
labelsToAdd.push('bare-metal ❗');
|
||||||
}
|
}
|
||||||
|
|
||||||
if (lowerBody.includes('home assistant')) {
|
if (body.includes('home assistant')) {
|
||||||
labelsToAdd.push('Home Assistant 🏠');
|
labelsToAdd.push('Home Assistant 🏠');
|
||||||
}
|
}
|
||||||
|
|
||||||
if (lowerBody.includes('production (netalertx)') || lowerBody.includes('dev (netalertx-dev)')) {
|
if (body.includes('production (netalertx)') || body.includes('dev (netalertx-dev)')) {
|
||||||
labelsToAdd.push('Docker 🐋');
|
labelsToAdd.push('Docker 🐋');
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -40,4 +47,6 @@ jobs:
|
|||||||
issue_number: context.issue.number,
|
issue_number: context.issue.number,
|
||||||
labels: labelsToAdd
|
labels: labelsToAdd
|
||||||
});
|
});
|
||||||
|
|
||||||
|
console.log(`Added labels: ${labelsToAdd.join(", ")}`);
|
||||||
}
|
}
|
||||||
|
|||||||
19
.github/workflows/mkdocs.yml
vendored
19
.github/workflows/mkdocs.yml
vendored
@@ -1,9 +1,12 @@
|
|||||||
name: Deploy MkDocs
|
name: 📘 Deploy MkDocs
|
||||||
|
|
||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
branches:
|
branches:
|
||||||
- main # Change if your default branch is different
|
- main
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
contents: write
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
deploy:
|
deploy:
|
||||||
@@ -19,7 +22,17 @@ jobs:
|
|||||||
|
|
||||||
- name: Install MkDocs
|
- name: Install MkDocs
|
||||||
run: |
|
run: |
|
||||||
pip install mkdocs mkdocs-material && pip install mkdocs-github-admonitions-plugin
|
pip install \
|
||||||
|
mkdocs==1.6.0 \
|
||||||
|
mkdocs-material==9.5.21 \
|
||||||
|
mkdocs-github-admonitions-plugin==0.0.4
|
||||||
|
|
||||||
|
- name: Build MkDocs
|
||||||
|
run: mkdocs build
|
||||||
|
|
||||||
|
- name: Add CNAME
|
||||||
|
run: |
|
||||||
|
echo "docs.netalertx.com" > site/CNAME
|
||||||
|
|
||||||
- name: Deploy MkDocs
|
- name: Deploy MkDocs
|
||||||
run: mkdocs gh-deploy --force
|
run: mkdocs gh-deploy --force
|
||||||
|
|||||||
97
.github/workflows/run-all-tests.yml
vendored
Normal file
97
.github/workflows/run-all-tests.yml
vendored
Normal file
@@ -0,0 +1,97 @@
|
|||||||
|
name: 🧪 Manual Test Suite Selector
|
||||||
|
|
||||||
|
on:
|
||||||
|
workflow_dispatch:
|
||||||
|
inputs:
|
||||||
|
run_all:
|
||||||
|
description: '✅ Run ALL tests (overrides individual selectors)'
|
||||||
|
type: boolean
|
||||||
|
default: false
|
||||||
|
run_scan:
|
||||||
|
description: '📂 scan/ (Scan, Logic, Locks, IPs)'
|
||||||
|
type: boolean
|
||||||
|
default: true
|
||||||
|
run_api:
|
||||||
|
description: '📂 api_endpoints/ & server/ (Endpoints & Server)'
|
||||||
|
type: boolean
|
||||||
|
default: false
|
||||||
|
run_backend:
|
||||||
|
description: '📂 backend/ & db/ (SQL Builder, Security & Migration)'
|
||||||
|
type: boolean
|
||||||
|
default: false
|
||||||
|
run_docker_env:
|
||||||
|
description: '📂 docker_tests/ (Environment & PUID/PGID)'
|
||||||
|
type: boolean
|
||||||
|
default: false
|
||||||
|
run_ui:
|
||||||
|
description: '📂 ui/ (Selenium & Dashboard)'
|
||||||
|
type: boolean
|
||||||
|
default: false
|
||||||
|
run_plugins:
|
||||||
|
description: '📂 plugins/ (Sync insert schema-aware logic)'
|
||||||
|
type: boolean
|
||||||
|
default: false
|
||||||
|
run_root_files:
|
||||||
|
description: '📄 Root Test Files (WOL, Atomicity, etc.)'
|
||||||
|
type: boolean
|
||||||
|
default: false
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
comprehensive-test:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Checkout Code
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Set up Environment
|
||||||
|
run: sudo apt-get update && sudo apt-get install -y sqlite3
|
||||||
|
|
||||||
|
- name: Build Test Path Command
|
||||||
|
id: builder
|
||||||
|
run: |
|
||||||
|
PATHS=""
|
||||||
|
|
||||||
|
# run_all overrides everything
|
||||||
|
if [ "${{ github.event.inputs.run_all }}" == "true" ]; then
|
||||||
|
echo "final_paths=test/" >> $GITHUB_OUTPUT
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Folder Mapping with 'test/' prefix
|
||||||
|
if [ "${{ github.event.inputs.run_scan }}" == "true" ]; then PATHS="$PATHS test/scan/"; fi
|
||||||
|
if [ "${{ github.event.inputs.run_api }}" == "true" ]; then PATHS="$PATHS test/api_endpoints/ test/server/"; fi
|
||||||
|
if [ "${{ github.event.inputs.run_backend }}" == "true" ]; then PATHS="$PATHS test/backend/ test/db/"; fi
|
||||||
|
if [ "${{ github.event.inputs.run_docker_env }}" == "true" ]; then PATHS="$PATHS test/docker_tests/"; fi
|
||||||
|
if [ "${{ github.event.inputs.run_ui }}" == "true" ]; then PATHS="$PATHS test/ui/"; fi
|
||||||
|
if [ "${{ github.event.inputs.run_plugins }}" == "true" ]; then PATHS="$PATHS test/plugins/"; fi
|
||||||
|
|
||||||
|
# Root Files Mapping (files sitting directly in /test/)
|
||||||
|
if [ "${{ github.event.inputs.run_root_files }}" == "true" ]; then
|
||||||
|
PATHS="$PATHS test/test_device_atomicity.py test/test_mcp_disablement.py test/test_plugin_helper.py test/test_wol_validation.py"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# If nothing is selected, default to the whole test folder
|
||||||
|
if [ -z "$PATHS" ]; then PATHS="test/"; fi
|
||||||
|
|
||||||
|
echo "final_paths=$PATHS" >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
|
- name: Run Docker Integration Script
|
||||||
|
run: |
|
||||||
|
chmod +x ./scripts/run_tests_in_docker_environment.sh
|
||||||
|
|
||||||
|
# We update the pytest command to use the specific paths built above.
|
||||||
|
# Note: We still keep your 'not' filter to skip E2E tests unless you want them.
|
||||||
|
TARGET_PATHS="${{ steps.builder.outputs.final_paths }}"
|
||||||
|
SED_COMMAND="pytest $TARGET_PATHS -m 'not (docker or compose or feature_complete)'"
|
||||||
|
|
||||||
|
echo "🚀 Targeted Pytest Command: $SED_COMMAND"
|
||||||
|
|
||||||
|
sed -i "s|pytest -m 'not (docker or compose or feature_complete)'|$SED_COMMAND|g" ./scripts/run_tests_in_docker_environment.sh
|
||||||
|
|
||||||
|
./scripts/run_tests_in_docker_environment.sh
|
||||||
|
|
||||||
|
- name: Cleanup
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
docker stop netalertx-test-container || true
|
||||||
|
docker rm netalertx-test-container || true
|
||||||
4
.github/workflows/social_post_on_release.yml → .github/workflows/social-post-on-release.yml
vendored
Executable file → Normal file
4
.github/workflows/social_post_on_release.yml → .github/workflows/social-post-on-release.yml
vendored
Executable file → Normal file
@@ -7,8 +7,8 @@ jobs:
|
|||||||
post-discord:
|
post-discord:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Wait for 15 minutes
|
- name: Wait for 60 minutes
|
||||||
run: sleep 900 # 15 minutes delay
|
run: sleep 3600 # 60 minutes delay
|
||||||
|
|
||||||
- name: Post to Discord
|
- name: Post to Discord
|
||||||
run: |
|
run: |
|
||||||
7
.gitignore
vendored
7
.gitignore
vendored
@@ -11,6 +11,7 @@ nohup.out
|
|||||||
config/*
|
config/*
|
||||||
.ash_history
|
.ash_history
|
||||||
.VERSION
|
.VERSION
|
||||||
|
.VERSION_PREV
|
||||||
config/pialert.conf
|
config/pialert.conf
|
||||||
config/app.conf
|
config/app.conf
|
||||||
db/*
|
db/*
|
||||||
@@ -23,6 +24,8 @@ front/api/*
|
|||||||
/api/*
|
/api/*
|
||||||
**/plugins/**/*.log
|
**/plugins/**/*.log
|
||||||
**/plugins/cloud_services/*
|
**/plugins/cloud_services/*
|
||||||
|
**/plugins/cloud_connector/*
|
||||||
|
**/plugins/heartbeat/*
|
||||||
**/%40eaDir/
|
**/%40eaDir/
|
||||||
**/@eaDir/
|
**/@eaDir/
|
||||||
|
|
||||||
@@ -43,3 +46,7 @@ front/css/cloud_services.css
|
|||||||
|
|
||||||
docker-compose.yml.ffsb42
|
docker-compose.yml.ffsb42
|
||||||
.env.omada.ffsb42
|
.env.omada.ffsb42
|
||||||
|
.venv
|
||||||
|
test_mounts/
|
||||||
|
.gemini/settings.json
|
||||||
|
.vscode/mcp.json
|
||||||
|
|||||||
2
.hadolint.yaml
Normal file
2
.hadolint.yaml
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
ignored:
|
||||||
|
- DL3018
|
||||||
@@ -1,23 +0,0 @@
|
|||||||
import sys, importlib
|
|
||||||
mods = [
|
|
||||||
'json', 'simplejson',
|
|
||||||
'httplib', 'http.client',
|
|
||||||
'urllib2', 'urllib.request',
|
|
||||||
'Queue', 'queue',
|
|
||||||
'cStringIO', 'StringIO', 'io',
|
|
||||||
'md5', 'hashlib',
|
|
||||||
'ssl'
|
|
||||||
]
|
|
||||||
print('PYTHON_EXE:' + sys.executable)
|
|
||||||
print('PYTHON_VER:' + sys.version.replace('\n', ' '))
|
|
||||||
for m in mods:
|
|
||||||
try:
|
|
||||||
mod = importlib.import_module(m)
|
|
||||||
ver = getattr(mod, '__version__', None)
|
|
||||||
if ver is None:
|
|
||||||
# try common attributes
|
|
||||||
ver = getattr(mod, 'version', None)
|
|
||||||
info = (' version=' + str(ver)) if ver is not None else ''
|
|
||||||
print('OK %s%s' % (m, info))
|
|
||||||
except Exception as e:
|
|
||||||
print('MISSING %s %s: %s' % (m, e.__class__.__name__, e))
|
|
||||||
8
.vscode/launch.json
vendored
8
.vscode/launch.json
vendored
@@ -29,6 +29,14 @@
|
|||||||
"pathMappings": {
|
"pathMappings": {
|
||||||
"/app": "${workspaceFolder}"
|
"/app": "${workspaceFolder}"
|
||||||
}
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "Python: Current File",
|
||||||
|
"type": "debugpy",
|
||||||
|
"request": "launch",
|
||||||
|
"program": "${file}",
|
||||||
|
"console": "integratedTerminal",
|
||||||
|
"justMyCode": true
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
29
.vscode/settings.json
vendored
29
.vscode/settings.json
vendored
@@ -4,20 +4,33 @@
|
|||||||
"python.testing.pytestEnabled": true,
|
"python.testing.pytestEnabled": true,
|
||||||
"python.testing.unittestEnabled": false,
|
"python.testing.unittestEnabled": false,
|
||||||
"python.testing.pytestArgs": [
|
"python.testing.pytestArgs": [
|
||||||
"test"
|
"test"
|
||||||
],
|
],
|
||||||
// Ensure VS Code uses the devcontainer virtualenv
|
// NetAlertX devcontainer uses /opt/venv; this ensures pip/pytest are available for discovery.
|
||||||
"python.defaultInterpreterPath": "/opt/venv/bin/python",
|
"python.defaultInterpreterPath": "/opt/venv/bin/python",
|
||||||
|
"python.testing.cwd": "${workspaceFolder}",
|
||||||
|
"python.testing.autoTestDiscoverOnSaveEnabled": true,
|
||||||
// Let the Python extension invoke pytest via the interpreter; avoid hardcoded paths
|
// Let the Python extension invoke pytest via the interpreter; avoid hardcoded paths
|
||||||
// Removed python.testing.pytestPath and legacy pytest.command overrides
|
// Removed python.testing.pytestPath and legacy pytest.command overrides
|
||||||
|
|
||||||
"terminal.integrated.defaultProfile.linux": "fish",
|
"terminal.integrated.defaultProfile.linux": "zsh",
|
||||||
"terminal.integrated.profiles.linux": {
|
"terminal.integrated.profiles.linux": {
|
||||||
"fish": {
|
"zsh": {
|
||||||
"path": "/usr/bin/fish"
|
"path": "/bin/zsh"
|
||||||
}
|
}
|
||||||
}
|
},
|
||||||
,
|
|
||||||
// Fallback for older VS Code versions or schema validators that don't accept custom profiles
|
// Fallback for older VS Code versions or schema validators that don't accept custom profiles
|
||||||
"terminal.integrated.shell.linux": "/usr/bin/fish"
|
"terminal.integrated.shell.linux": "/usr/bin/zsh"
|
||||||
|
,
|
||||||
|
"python.linting.flake8Enabled": true,
|
||||||
|
"python.linting.enabled": true,
|
||||||
|
"python.linting.flake8Args": [
|
||||||
|
"--config=.flake8"
|
||||||
|
],
|
||||||
|
"python.formatting.provider": "black",
|
||||||
|
"python.formatting.blackArgs": [
|
||||||
|
"--line-length=180"
|
||||||
|
],
|
||||||
|
"chat.useAgentSkills": true,
|
||||||
|
|
||||||
}
|
}
|
||||||
149
.vscode/tasks.json
vendored
149
.vscode/tasks.json
vendored
@@ -1,15 +1,31 @@
|
|||||||
{
|
{
|
||||||
"version": "2.0.0",
|
"version": "2.0.0",
|
||||||
|
"inputs": [
|
||||||
|
{
|
||||||
|
"id": "confirmPrune",
|
||||||
|
"type": "promptString",
|
||||||
|
"description": "DANGER! Type YES to confirm pruning all unused Docker resources. This will destroy containers, images, volumes, and networks!",
|
||||||
|
"default": ""
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "prNumber",
|
||||||
|
"type": "promptString",
|
||||||
|
"description": "Enter GitHub PR Number",
|
||||||
|
"default": "1405"
|
||||||
|
}
|
||||||
|
],
|
||||||
"tasks": [
|
"tasks": [
|
||||||
{
|
{
|
||||||
"label": "[Any POSIX] Generate Devcontainer Configs",
|
"label": "[Any POSIX] Generate Devcontainer Configs",
|
||||||
"type": "shell",
|
"type": "shell",
|
||||||
"command": ".devcontainer/scripts/generate-configs.sh",
|
"command": ".devcontainer/scripts/generate-configs.sh",
|
||||||
|
"detail": "Generates devcontainer configs from the template. This must be run after changes to devcontainer to combine/merge them into the final config used by VS Code. Note- this has no bearing on the production or test image.",
|
||||||
"presentation": {
|
"presentation": {
|
||||||
"echo": true,
|
"echo": true,
|
||||||
"reveal": "always",
|
"reveal": "always",
|
||||||
"panel": "shared",
|
"panel": "shared",
|
||||||
"showReuseMessage": false
|
"showReuseMessage": false,
|
||||||
|
"group": "POSIX Tasks"
|
||||||
},
|
},
|
||||||
"problemMatcher": [],
|
"problemMatcher": [],
|
||||||
"group": {
|
"group": {
|
||||||
@@ -24,12 +40,19 @@
|
|||||||
{
|
{
|
||||||
"label": "[Any] Docker system and build Prune",
|
"label": "[Any] Docker system and build Prune",
|
||||||
"type": "shell",
|
"type": "shell",
|
||||||
"command": ".devcontainer/scripts/confirm-docker-prune.sh",
|
"command": ".devcontainer/scripts/confirm-docker-prune.sh",
|
||||||
|
"detail": "DANGER! Prunes all unused Docker resources (images, containers, volumes, networks). Any stopped container will be wiped and data will be lost. Use with caution.",
|
||||||
|
"options": {
|
||||||
|
"env": {
|
||||||
|
"CONFIRM_PRUNE": "${input:confirmPrune}"
|
||||||
|
}
|
||||||
|
},
|
||||||
"presentation": {
|
"presentation": {
|
||||||
"echo": true,
|
"echo": true,
|
||||||
"reveal": "always",
|
"reveal": "always",
|
||||||
"panel": "shared",
|
"panel": "shared",
|
||||||
"showReuseMessage": false
|
"showReuseMessage": false,
|
||||||
|
"group": "Any"
|
||||||
},
|
},
|
||||||
"problemMatcher": [],
|
"problemMatcher": [],
|
||||||
"group": {
|
"group": {
|
||||||
@@ -41,10 +64,36 @@
|
|||||||
"color": "terminal.ansiRed"
|
"color": "terminal.ansiRed"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"label": "[Dev Container] Load Sample Devices",
|
||||||
|
"type": "shell",
|
||||||
|
"command": "./isDevContainer.sh || exit 1; ./load-devices.sh",
|
||||||
|
"detail": "Generates a synthetic device inventory and imports it into the devcontainer database via /devices/import.",
|
||||||
|
"options": {
|
||||||
|
"cwd": "/workspaces/NetAlertX/.devcontainer/scripts",
|
||||||
|
"env": {
|
||||||
|
"CSV_PATH": "/tmp/netalertx-devices.csv"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"presentation": {
|
||||||
|
"echo": true,
|
||||||
|
"reveal": "always",
|
||||||
|
"panel": "shared",
|
||||||
|
"showReuseMessage": false,
|
||||||
|
"clear": false,
|
||||||
|
"group": "Devcontainer"
|
||||||
|
},
|
||||||
|
"problemMatcher": [],
|
||||||
|
"icon": {
|
||||||
|
"id": "cloud-upload",
|
||||||
|
"color": "terminal.ansiYellow"
|
||||||
|
}
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"label": "[Dev Container] Re-Run Startup Script",
|
"label": "[Dev Container] Re-Run Startup Script",
|
||||||
"type": "shell",
|
"type": "shell",
|
||||||
"command": "./isDevContainer.sh || exit 1;/workspaces/NetAlertX/.devcontainer/scripts/setup.sh",
|
"command": "./isDevContainer.sh || exit 1;/workspaces/NetAlertX/.devcontainer/scripts/setup.sh",
|
||||||
|
"detail": "The startup script runs directly after the container is started. It reprovisions permissions, links folders, and performs other setup tasks. Run this if you have made changes to the setup script or need to reprovision the container.",
|
||||||
"options": {
|
"options": {
|
||||||
"cwd": "/workspaces/NetAlertX/.devcontainer/scripts"
|
"cwd": "/workspaces/NetAlertX/.devcontainer/scripts"
|
||||||
},
|
},
|
||||||
@@ -54,7 +103,6 @@
|
|||||||
"panel": "shared",
|
"panel": "shared",
|
||||||
"showReuseMessage": false
|
"showReuseMessage": false
|
||||||
},
|
},
|
||||||
|
|
||||||
"problemMatcher": [],
|
"problemMatcher": [],
|
||||||
"icon": {
|
"icon": {
|
||||||
"id": "beaker",
|
"id": "beaker",
|
||||||
@@ -65,6 +113,7 @@
|
|||||||
"label": "[Dev Container] Start Backend (Python)",
|
"label": "[Dev Container] Start Backend (Python)",
|
||||||
"type": "shell",
|
"type": "shell",
|
||||||
"command": "./isDevContainer.sh || exit 1; /services/start-backend.sh",
|
"command": "./isDevContainer.sh || exit 1; /services/start-backend.sh",
|
||||||
|
"detail": "Restarts the NetAlertX backend (Python) service in the dev container. This may take 5 seconds to be completely ready.",
|
||||||
"options": {
|
"options": {
|
||||||
"cwd": "/workspaces/NetAlertX/.devcontainer/scripts"
|
"cwd": "/workspaces/NetAlertX/.devcontainer/scripts"
|
||||||
},
|
},
|
||||||
@@ -73,7 +122,8 @@
|
|||||||
"reveal": "always",
|
"reveal": "always",
|
||||||
"panel": "shared",
|
"panel": "shared",
|
||||||
"showReuseMessage": false,
|
"showReuseMessage": false,
|
||||||
"clear": false
|
"clear": false,
|
||||||
|
"group": "Devcontainer"
|
||||||
},
|
},
|
||||||
"problemMatcher": [],
|
"problemMatcher": [],
|
||||||
"icon": {
|
"icon": {
|
||||||
@@ -85,6 +135,7 @@
|
|||||||
"label": "[Dev Container] Start CronD (Scheduler)",
|
"label": "[Dev Container] Start CronD (Scheduler)",
|
||||||
"type": "shell",
|
"type": "shell",
|
||||||
"command": "./isDevContainer.sh || exit 1; /services/start-crond.sh",
|
"command": "./isDevContainer.sh || exit 1; /services/start-crond.sh",
|
||||||
|
"detail": "Stops and restarts the crond service.",
|
||||||
"options": {
|
"options": {
|
||||||
"cwd": "/workspaces/NetAlertX/.devcontainer/scripts"
|
"cwd": "/workspaces/NetAlertX/.devcontainer/scripts"
|
||||||
},
|
},
|
||||||
@@ -93,7 +144,8 @@
|
|||||||
"reveal": "always",
|
"reveal": "always",
|
||||||
"panel": "shared",
|
"panel": "shared",
|
||||||
"showReuseMessage": false,
|
"showReuseMessage": false,
|
||||||
"clear": false
|
"clear": false,
|
||||||
|
"group": "Devcontainer"
|
||||||
},
|
},
|
||||||
"problemMatcher": [],
|
"problemMatcher": [],
|
||||||
"icon": {
|
"icon": {
|
||||||
@@ -105,6 +157,7 @@
|
|||||||
"label": "[Dev Container] Start Frontend (nginx and PHP-FPM)",
|
"label": "[Dev Container] Start Frontend (nginx and PHP-FPM)",
|
||||||
"type": "shell",
|
"type": "shell",
|
||||||
"command": "./isDevContainer.sh || exit 1; /services/start-php-fpm.sh & /services/start-nginx.sh &",
|
"command": "./isDevContainer.sh || exit 1; /services/start-php-fpm.sh & /services/start-nginx.sh &",
|
||||||
|
"detail": "Stops and restarts the NetAlertX frontend services (nginx and PHP-FPM) in the dev container. This launches almost instantly.",
|
||||||
"options": {
|
"options": {
|
||||||
"cwd": "/workspaces/NetAlertX/.devcontainer/scripts"
|
"cwd": "/workspaces/NetAlertX/.devcontainer/scripts"
|
||||||
|
|
||||||
@@ -114,7 +167,8 @@
|
|||||||
"reveal": "always",
|
"reveal": "always",
|
||||||
"panel": "shared",
|
"panel": "shared",
|
||||||
"showReuseMessage": false,
|
"showReuseMessage": false,
|
||||||
"clear": false
|
"clear": false,
|
||||||
|
"group": "Devcontainer"
|
||||||
},
|
},
|
||||||
"problemMatcher": [],
|
"problemMatcher": [],
|
||||||
"icon": {
|
"icon": {
|
||||||
@@ -126,6 +180,7 @@
|
|||||||
"label": "[Dev Container] Stop Frontend & Backend Services",
|
"label": "[Dev Container] Stop Frontend & Backend Services",
|
||||||
"type": "shell",
|
"type": "shell",
|
||||||
"command": "./isDevContainer.sh || exit 1; pkill -f 'php-fpm83|nginx|crond|python3' || true",
|
"command": "./isDevContainer.sh || exit 1; pkill -f 'php-fpm83|nginx|crond|python3' || true",
|
||||||
|
"detail": "Stops all NetAlertX services running in the dev container.",
|
||||||
"options": {
|
"options": {
|
||||||
"cwd": "/workspaces/NetAlertX/.devcontainer/scripts"
|
"cwd": "/workspaces/NetAlertX/.devcontainer/scripts"
|
||||||
},
|
},
|
||||||
@@ -133,7 +188,8 @@
|
|||||||
"echo": true,
|
"echo": true,
|
||||||
"reveal": "always",
|
"reveal": "always",
|
||||||
"panel": "shared",
|
"panel": "shared",
|
||||||
"showReuseMessage": false
|
"showReuseMessage": false,
|
||||||
|
"group": "Devcontainer"
|
||||||
},
|
},
|
||||||
"problemMatcher": [],
|
"problemMatcher": [],
|
||||||
"icon": {
|
"icon": {
|
||||||
@@ -142,29 +198,55 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"label": "[Dev Container] List NetAlertX Ports",
|
"label": "[Any] Build Unit Test Docker image",
|
||||||
"type": "shell",
|
"type": "shell",
|
||||||
"command": "list-ports.sh",
|
"command": "docker buildx build -t netalertx-test . && echo '🧪 Unit Test Docker image built: netalertx-test'",
|
||||||
"options": {
|
"detail": "This must be run after changes to the container. Unit testing will not register changes until after this image is rebuilt. It takes about 30 seconds to build unless changes to the venv stage are made. venv takes 90s alone.",
|
||||||
"cwd": "/workspaces/NetAlertX/.devcontainer/scripts"
|
|
||||||
},
|
|
||||||
"presentation": {
|
"presentation": {
|
||||||
"echo": true,
|
"echo": true,
|
||||||
"reveal": "always",
|
"reveal": "always",
|
||||||
"panel": "shared",
|
"panel": "shared",
|
||||||
"showReuseMessage": false
|
"showReuseMessage": false,
|
||||||
|
"group": "Any"
|
||||||
|
|
||||||
|
},
|
||||||
|
"problemMatcher": [],
|
||||||
|
"group": {
|
||||||
|
"kind": "build",
|
||||||
|
"isDefault": false
|
||||||
|
},
|
||||||
|
"icon": {
|
||||||
|
"id": "beaker",
|
||||||
|
"color": "terminal.ansiBlue"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"label": "[Dev Container] Wipe and Regenerate Database",
|
||||||
|
"type": "shell",
|
||||||
|
"command": "killall 'python3' || true && sleep 1 && rm -rf /data/db/* /data/config/* && bash /entrypoint.d/15-first-run-config.sh && bash /entrypoint.d/20-first-run-db.sh && echo '✅ Database and config wiped and regenerated'",
|
||||||
|
"detail": "Wipes devcontainer db and config. Provides a fresh start in devcontainer, run this task, then run the Rerun Startup Task",
|
||||||
|
"options": {},
|
||||||
|
"presentation": {
|
||||||
|
"echo": true,
|
||||||
|
"reveal": "always",
|
||||||
|
"panel": "shared",
|
||||||
|
"showReuseMessage": false,
|
||||||
|
"group": "Devcontainer"
|
||||||
},
|
},
|
||||||
"problemMatcher": [],
|
"problemMatcher": [],
|
||||||
"icon": {
|
"icon": {
|
||||||
"id": "output",
|
"id": "database",
|
||||||
"color": "terminal.ansiBlue"
|
"color": "terminal.ansiRed"
|
||||||
}
|
}
|
||||||
}
|
},
|
||||||
,
|
|
||||||
{
|
{
|
||||||
"label": "[Any] Build Unit Test Docker image",
|
"label": "Build & Launch Prodcution Docker Container",
|
||||||
"type": "shell",
|
"type": "shell",
|
||||||
"command": "docker buildx build -t netalertx-test . && echo '🧪 Unit Test Docker image built: netalertx-test'",
|
"command": "docker compose up -d --build --force-recreate",
|
||||||
|
"detail": "Before launching, ensure VSCode Ports are closed and services are stopped. Tasks: Stop Frontend & Backend Services & Remote: Close Unused Forwarded Ports to ensure proper operation of the new container.",
|
||||||
|
"options": {
|
||||||
|
"cwd": "/workspaces/NetAlertX"
|
||||||
|
},
|
||||||
"presentation": {
|
"presentation": {
|
||||||
"echo": true,
|
"echo": true,
|
||||||
"reveal": "always",
|
"reveal": "always",
|
||||||
@@ -177,9 +259,34 @@
|
|||||||
"isDefault": false
|
"isDefault": false
|
||||||
},
|
},
|
||||||
"icon": {
|
"icon": {
|
||||||
"id": "beaker",
|
"id": "package",
|
||||||
"color": "terminal.ansiBlue"
|
"color": "terminal.ansiBlue"
|
||||||
}
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"label": "Analyze PR Instructions",
|
||||||
|
"type": "shell",
|
||||||
|
"command": "python3",
|
||||||
|
"detail": "Pull all of Coderabbit's suggestions from a pull request. Requires `gh auth login` first.",
|
||||||
|
"options": {
|
||||||
|
"cwd": "/workspaces/NetAlertX/.devcontainer/scripts"
|
||||||
|
},
|
||||||
|
"args": [
|
||||||
|
"/workspaces/NetAlertX/.devcontainer/scripts/coderabbit-pr-parser.py",
|
||||||
|
"${input:prNumber}"
|
||||||
|
],
|
||||||
|
"problemMatcher": [],
|
||||||
|
"presentation": {
|
||||||
|
"echo": true,
|
||||||
|
"reveal": "always",
|
||||||
|
"panel": "new",
|
||||||
|
"showReuseMessage": false,
|
||||||
|
"focus": true
|
||||||
|
},
|
||||||
|
"icon": {
|
||||||
|
"id": "comment-discussion",
|
||||||
|
"color": "terminal.ansiBlue"
|
||||||
|
}
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,23 +1,38 @@
|
|||||||
# 🤝 Contributing to NetAlertX
|
# Contributing to NetAlertX
|
||||||
|
|
||||||
First off, **thank you** for taking the time to contribute! NetAlertX is built and improved with the help of passionate people like you.
|
First off, **thank you** for taking the time to contribute! NetAlertX is built and improved with the help of passionate people like you.
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## 📂 Issues, Bugs, and Feature Requests
|
## Issues, Bugs, and Feature Requests
|
||||||
|
|
||||||
Please use the [GitHub Issue Tracker](https://github.com/jokob-sk/NetAlertX/issues) for:
|
Please use the [GitHub Issue Tracker](https://github.com/netalertx/NetAlertX/issues) for:
|
||||||
- Bug reports 🐞
|
- Bug reports
|
||||||
- Feature requests 💡
|
- Feature requests
|
||||||
- Documentation feedback 📖
|
- Documentation feedback
|
||||||
|
|
||||||
Before opening a new issue:
|
Before opening a new issue:
|
||||||
- 🛑 [Check Common Issues & Debug Tips](https://github.com/jokob-sk/NetAlertX/blob/main/docs/DEBUG_TIPS.md#common-issues)
|
- [Check Common Issues & Debug Tips](https://docs.netalertx.com/DEBUG_TIPS#common-issues)
|
||||||
- 🔍 [Search Closed Issues](https://github.com/jokob-sk/NetAlertX/issues?q=is%3Aissue+is%3Aclosed)
|
- [Search Closed Issues](https://github.com/netalertx/NetAlertX/issues?q=is%3Aissue+is%3Aclosed)
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## 🚀 Submitting Pull Requests (PRs)
|
## Use of AI
|
||||||
|
|
||||||
|
Use of AI-assisted tools is permitted, provided all generated code is reviewed, understood, and verified before submission.
|
||||||
|
|
||||||
|
- All AI-generated code must meet the project's **quality, security, and performance standards**.
|
||||||
|
- Contributors are responsible for **fully understanding** any code they submit, regardless of how it was produced.
|
||||||
|
- Prefer **clarity and maintainability over cleverness or brevity**. Readable code is always favored over dense or obfuscated implementations.
|
||||||
|
- Follow the **DRY (Don't Repeat Yourself) principle** where appropriate, without sacrificing readability.
|
||||||
|
- Do not submit code that you cannot confidently explain or debug.
|
||||||
|
|
||||||
|
All changes must pass the **full test suite** before opening a PR.
|
||||||
|
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Submitting Pull Requests (PRs)
|
||||||
|
|
||||||
We welcome PRs to improve the code, docs, or UI!
|
We welcome PRs to improve the code, docs, or UI!
|
||||||
|
|
||||||
@@ -27,11 +42,24 @@ Please:
|
|||||||
- Follow existing **code style and structure**
|
- Follow existing **code style and structure**
|
||||||
- Provide a clear title and description for your PR
|
- Provide a clear title and description for your PR
|
||||||
- If relevant, add or update tests and documentation
|
- If relevant, add or update tests and documentation
|
||||||
- For plugins, refer to the [Plugin Dev Guide](https://github.com/jokob-sk/NetAlertX/blob/main/docs/PLUGINS_DEV.md)
|
- For plugins, refer to the [Plugin Dev Guide](https://docs.netalertx.com/PLUGINS_DEV)
|
||||||
|
- Switch the PR to DRAFT mode if still being worked on
|
||||||
|
- Keep PRs **focused and minimal** — avoid unrelated changes in a single PR
|
||||||
|
- PRs that do not meet these guidelines may be closed without review
|
||||||
|
|
||||||
|
## Commit Messages
|
||||||
|
|
||||||
|
- Use clear, descriptive commit messages
|
||||||
|
- Explain *why* a change was made, not just *what* changed
|
||||||
|
- Reference related issues where applicable
|
||||||
|
|
||||||
|
## Code Quality
|
||||||
|
|
||||||
|
- Read and follow the [code standards](/.github/skills/code-standards/SKILL.md)
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## 🌟 First-Time Contributors
|
## First-Time Contributors
|
||||||
|
|
||||||
New to open source? Check out these resources:
|
New to open source? Check out these resources:
|
||||||
- [How to Fork and Submit a PR](https://opensource.guide/how-to-contribute/)
|
- [How to Fork and Submit a PR](https://opensource.guide/how-to-contribute/)
|
||||||
@@ -39,15 +67,15 @@ New to open source? Check out these resources:
|
|||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## 🔐 Code of Conduct
|
## Code of Conduct
|
||||||
|
|
||||||
By participating, you agree to follow our [Code of Conduct](./CODE_OF_CONDUCT.md), which ensures a respectful and welcoming community.
|
By participating, you agree to follow our [Code of Conduct](./CODE_OF_CONDUCT.md), which ensures a respectful and welcoming community.
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## 📬 Contact
|
## Contact
|
||||||
|
|
||||||
If you have more in-depth questions or want to discuss contributing in other ways, feel free to reach out at:
|
If you have more in-depth questions or want to discuss contributing in other ways, feel free to reach out at:
|
||||||
📧 [jokob@duck.com](mailto:jokob@duck.com?subject=NetAlertX%20Contribution)
|
[jokob.sk@gmail.com](mailto:jokob.sk@gmail.com?subject=NetAlertX%20Contribution)
|
||||||
|
|
||||||
We appreciate every contribution, big or small! 💙
|
We appreciate every contribution, big or small! 💙
|
||||||
|
|||||||
157
Dockerfile
157
Dockerfile
@@ -1,16 +1,16 @@
|
|||||||
# The NetAlertX Dockerfile has 3 stages:
|
# The NetAlertX Dockerfile has 3 stages:
|
||||||
#
|
#
|
||||||
# Stage 1. Builder - NetAlertX Requires special tools and packages to build our virtual environment, but
|
# Stage 1. Builder - NetAlertX Requires special tools and packages to build our virtual environment, but
|
||||||
# which are not needed in future stages. We build the builder and extract the venv for runner to use as
|
# which are not needed in future stages. We build the builder and extract the venv for runner to use as
|
||||||
# a base.
|
# a base.
|
||||||
#
|
#
|
||||||
# Stage 2. Runner builds the bare minimum requirements to create an operational NetAlertX. The primary
|
# Stage 2. Runner builds the bare minimum requirements to create an operational NetAlertX. The primary
|
||||||
# reason for breaking at this stage is it leaves the system in a proper state for devcontainer operation
|
# reason for breaking at this stage is it leaves the system in a proper state for devcontainer operation
|
||||||
# This image also provides a break-out point for uses who wish to execute the anti-pattern of using a
|
# This image also provides a break-out point for uses who wish to execute the anti-pattern of using a
|
||||||
# docker container as a VM for experimentation and various development patterns.
|
# docker container as a VM for experimentation and various development patterns.
|
||||||
#
|
#
|
||||||
# Stage 3. Hardened removes root, sudoers, folders, permissions, and locks the system down into a read-only
|
# Stage 3. Hardened removes root, sudoers, folders, permissions, and locks the system down into a read-only
|
||||||
# compatible image. While NetAlertX does require some read-write operations, this image can guarantee the
|
# compatible image. While NetAlertX does require some read-write operations, this image can guarantee the
|
||||||
# code pushed out by the project is the only code which will run on the system after each container restart.
|
# code pushed out by the project is the only code which will run on the system after each container restart.
|
||||||
# It reduces the chance of system hijacking and operates with all modern security protocols in place as is
|
# It reduces the chance of system hijacking and operates with all modern security protocols in place as is
|
||||||
# expected from a security appliance.
|
# expected from a security appliance.
|
||||||
@@ -26,13 +26,26 @@ ENV PATH="/opt/venv/bin:$PATH"
|
|||||||
|
|
||||||
# Install build dependencies
|
# Install build dependencies
|
||||||
COPY requirements.txt /tmp/requirements.txt
|
COPY requirements.txt /tmp/requirements.txt
|
||||||
RUN apk add --no-cache bash shadow python3 python3-dev gcc musl-dev libffi-dev openssl-dev git \
|
# hadolint ignore=DL3018
|
||||||
|
RUN apk add --no-cache \
|
||||||
|
bash \
|
||||||
|
shadow \
|
||||||
|
python3 \
|
||||||
|
python3-dev \
|
||||||
|
py3-psutil \
|
||||||
|
gcc \
|
||||||
|
musl-dev \
|
||||||
|
libffi-dev \
|
||||||
|
openssl-dev \
|
||||||
|
git \
|
||||||
|
rust \
|
||||||
|
cargo \
|
||||||
&& python -m venv /opt/venv
|
&& python -m venv /opt/venv
|
||||||
|
|
||||||
# Create virtual environment owned by root, but readable by everyone else. This makes it easy to copy
|
# Upgrade pip/wheel/setuptools and install Python packages
|
||||||
# into hardened stage without worrying about permissions and keeps image size small. Keeping the commands
|
# hadolint ignore=DL3013, DL3042
|
||||||
# together makes for a slightly smaller image size.
|
RUN python -m pip install --upgrade pip setuptools wheel && \
|
||||||
RUN pip install -r /tmp/requirements.txt && \
|
pip install --prefer-binary --no-cache-dir -r /tmp/requirements.txt && \
|
||||||
chmod -R u-rwx,g-rwx /opt
|
chmod -R u-rwx,g-rwx /opt
|
||||||
|
|
||||||
# second stage is the main runtime stage with just the minimum required to run the application
|
# second stage is the main runtime stage with just the minimum required to run the application
|
||||||
@@ -40,17 +53,25 @@ RUN pip install -r /tmp/requirements.txt && \
|
|||||||
FROM alpine:3.22 AS runner
|
FROM alpine:3.22 AS runner
|
||||||
|
|
||||||
ARG INSTALL_DIR=/app
|
ARG INSTALL_DIR=/app
|
||||||
|
# Runtime service account (override at build; container user can still be overridden at run time)
|
||||||
|
ARG NETALERTX_UID=20211
|
||||||
|
ARG NETALERTX_GID=20211
|
||||||
|
# Read-only lock owner (separate from service account to avoid UID/GID collisions)
|
||||||
|
ARG READONLY_UID=20212
|
||||||
|
ARG READONLY_GID=20212
|
||||||
|
|
||||||
# NetAlertX app directories
|
# NetAlertX app directories
|
||||||
ENV NETALERTX_APP=${INSTALL_DIR}
|
ENV NETALERTX_APP=${INSTALL_DIR}
|
||||||
ENV NETALERTX_CONFIG=${NETALERTX_APP}/config
|
ENV NETALERTX_DATA=/data
|
||||||
|
ENV NETALERTX_CONFIG=${NETALERTX_DATA}/config
|
||||||
ENV NETALERTX_FRONT=${NETALERTX_APP}/front
|
ENV NETALERTX_FRONT=${NETALERTX_APP}/front
|
||||||
|
ENV NETALERTX_PLUGINS=${NETALERTX_FRONT}/plugins
|
||||||
ENV NETALERTX_SERVER=${NETALERTX_APP}/server
|
ENV NETALERTX_SERVER=${NETALERTX_APP}/server
|
||||||
ENV NETALERTX_API=${NETALERTX_APP}/api
|
ENV NETALERTX_API=/tmp/api
|
||||||
ENV NETALERTX_DB=${NETALERTX_APP}/db
|
ENV NETALERTX_DB=${NETALERTX_DATA}/db
|
||||||
ENV NETALERTX_DB_FILE=${NETALERTX_DB}/app.db
|
ENV NETALERTX_DB_FILE=${NETALERTX_DB}/app.db
|
||||||
ENV NETALERTX_BACK=${NETALERTX_APP}/back
|
ENV NETALERTX_BACK=${NETALERTX_APP}/back
|
||||||
ENV NETALERTX_LOG=${NETALERTX_APP}/log
|
ENV NETALERTX_LOG=/tmp/log
|
||||||
ENV NETALERTX_PLUGINS_LOG=${NETALERTX_LOG}/plugins
|
ENV NETALERTX_PLUGINS_LOG=${NETALERTX_LOG}/plugins
|
||||||
ENV NETALERTX_CONFIG_FILE=${NETALERTX_CONFIG}/app.conf
|
ENV NETALERTX_CONFIG_FILE=${NETALERTX_CONFIG}/app.conf
|
||||||
|
|
||||||
@@ -66,7 +87,8 @@ ENV LOG_APP_PHP_ERRORS=${NETALERTX_LOG}/app.php_errors.log
|
|||||||
ENV LOG_EXECUTION_QUEUE=${NETALERTX_LOG}/execution_queue.log
|
ENV LOG_EXECUTION_QUEUE=${NETALERTX_LOG}/execution_queue.log
|
||||||
ENV LOG_REPORT_OUTPUT_JSON=${NETALERTX_LOG}/report_output.json
|
ENV LOG_REPORT_OUTPUT_JSON=${NETALERTX_LOG}/report_output.json
|
||||||
ENV LOG_STDOUT=${NETALERTX_LOG}/stdout.log
|
ENV LOG_STDOUT=${NETALERTX_LOG}/stdout.log
|
||||||
ENV LOG_CROND=${NETALERTX_LOG}/crond.log
|
ENV LOG_CRON=${NETALERTX_LOG}/cron.log
|
||||||
|
ENV LOG_NGINX_ERROR=${NETALERTX_LOG}/nginx-error.log
|
||||||
|
|
||||||
# System Services configuration files
|
# System Services configuration files
|
||||||
ENV ENTRYPOINT_CHECKS=/entrypoint.d
|
ENV ENTRYPOINT_CHECKS=/entrypoint.d
|
||||||
@@ -74,48 +96,50 @@ ENV SYSTEM_SERVICES=/services
|
|||||||
ENV SYSTEM_SERVICES_SCRIPTS=${SYSTEM_SERVICES}/scripts
|
ENV SYSTEM_SERVICES_SCRIPTS=${SYSTEM_SERVICES}/scripts
|
||||||
ENV SYSTEM_SERVICES_CONFIG=${SYSTEM_SERVICES}/config
|
ENV SYSTEM_SERVICES_CONFIG=${SYSTEM_SERVICES}/config
|
||||||
ENV SYSTEM_NGINX_CONFIG=${SYSTEM_SERVICES_CONFIG}/nginx
|
ENV SYSTEM_NGINX_CONFIG=${SYSTEM_SERVICES_CONFIG}/nginx
|
||||||
ENV SYSTEM_NGINX_CONFIG_FILE=${SYSTEM_NGINX_CONFIG}/nginx.conf
|
ENV SYSTEM_NGINX_CONFIG_TEMPLATE=${SYSTEM_NGINX_CONFIG}/netalertx.conf.template
|
||||||
ENV SYSTEM_SERVICES_ACTIVE_CONFIG=${SYSTEM_NGINX_CONFIG}/conf.active
|
ENV SYSTEM_SERVICES_CONFIG_CRON=${SYSTEM_SERVICES_CONFIG}/cron
|
||||||
|
ENV SYSTEM_SERVICES_ACTIVE_CONFIG=/tmp/nginx/active-config
|
||||||
|
ENV SYSTEM_SERVICES_ACTIVE_CONFIG_FILE=${SYSTEM_SERVICES_ACTIVE_CONFIG}/nginx.conf
|
||||||
ENV SYSTEM_SERVICES_PHP_FOLDER=${SYSTEM_SERVICES_CONFIG}/php
|
ENV SYSTEM_SERVICES_PHP_FOLDER=${SYSTEM_SERVICES_CONFIG}/php
|
||||||
ENV SYSTEM_SERVICES_PHP_FPM_D=${SYSTEM_SERVICES_PHP_FOLDER}/php-fpm.d
|
ENV SYSTEM_SERVICES_PHP_FPM_D=${SYSTEM_SERVICES_PHP_FOLDER}/php-fpm.d
|
||||||
ENV SYSTEM_SERVICES_CROND=${SYSTEM_SERVICES_CONFIG}/crond
|
ENV SYSTEM_SERVICES_RUN=/tmp/run
|
||||||
ENV SYSTEM_SERVICES_RUN=${SYSTEM_SERVICES}/run
|
|
||||||
ENV SYSTEM_SERVICES_RUN_TMP=${SYSTEM_SERVICES_RUN}/tmp
|
ENV SYSTEM_SERVICES_RUN_TMP=${SYSTEM_SERVICES_RUN}/tmp
|
||||||
ENV SYSTEM_SERVICES_RUN_LOG=${SYSTEM_SERVICES_RUN}/logs
|
ENV SYSTEM_SERVICES_RUN_LOG=${SYSTEM_SERVICES_RUN}/logs
|
||||||
ENV PHP_FPM_CONFIG_FILE=${SYSTEM_SERVICES_PHP_FOLDER}/php-fpm.conf
|
ENV PHP_FPM_CONFIG_FILE=${SYSTEM_SERVICES_PHP_FOLDER}/php-fpm.conf
|
||||||
ENV READ_ONLY_FOLDERS="${NETALERTX_BACK} ${NETALERTX_FRONT} ${NETALERTX_SERVER} ${SYSTEM_SERVICES} \
|
ENV READ_ONLY_FOLDERS="${NETALERTX_BACK} ${NETALERTX_FRONT} ${NETALERTX_SERVER} ${SYSTEM_SERVICES} \
|
||||||
${SYSTEM_SERVICES_CONFIG} ${ENTRYPOINT_CHECKS}"
|
${SYSTEM_SERVICES_CONFIG} ${ENTRYPOINT_CHECKS}"
|
||||||
ENV READ_WRITE_FOLDERS="${NETALERTX_CONFIG} ${NETALERTX_DB} ${NETALERTX_API} ${NETALERTX_LOG} \
|
ENV READ_WRITE_FOLDERS="${NETALERTX_DATA} ${NETALERTX_CONFIG} ${NETALERTX_DB} ${NETALERTX_API} \
|
||||||
${NETALERTX_PLUGINS_LOG} ${SYSTEM_SERVICES_RUN} ${SYSTEM_SERVICES_RUN_TMP} \
|
${NETALERTX_LOG} ${NETALERTX_PLUGINS_LOG} ${SYSTEM_SERVICES_RUN} \
|
||||||
${SYSTEM_SERVICES_RUN_LOG} ${SYSTEM_NGINX_CONFIG}"
|
${SYSTEM_SERVICES_RUN_TMP} ${SYSTEM_SERVICES_RUN_LOG} \
|
||||||
|
${SYSTEM_SERVICES_ACTIVE_CONFIG}"
|
||||||
|
|
||||||
#Python environment
|
#Python environment
|
||||||
ENV PYTHONUNBUFFERED=1
|
ENV PYTHONUNBUFFERED=1
|
||||||
ENV VIRTUAL_ENV=/opt/venv
|
ENV VIRTUAL_ENV=/opt/venv
|
||||||
ENV VIRTUAL_ENV_BIN=/opt/venv/bin
|
ENV VIRTUAL_ENV_BIN=/opt/venv/bin
|
||||||
ENV PYTHONPATH=${NETALERTX_APP}:${NETALERTX_SERVER}:${VIRTUAL_ENV}/lib/python3.12/site-packages
|
ENV PYTHONPATH=${NETALERTX_APP}:${NETALERTX_SERVER}:${NETALERTX_PLUGINS}:${VIRTUAL_ENV}/lib/python3.12/site-packages
|
||||||
ENV PATH="${SYSTEM_SERVICES}:${VIRTUAL_ENV_BIN}:$PATH"
|
ENV PATH="${SYSTEM_SERVICES}:${VIRTUAL_ENV_BIN}:$PATH"
|
||||||
|
|
||||||
# App Environment
|
# App Environment
|
||||||
ENV LISTEN_ADDR=0.0.0.0
|
ENV LISTEN_ADDR=0.0.0.0
|
||||||
ENV PORT=20211
|
ENV PORT=20211
|
||||||
ENV NETALERTX_DEBUG=0
|
ENV NETALERTX_DEBUG=0
|
||||||
ENV VENDORSPATH=/app/back/ieee-oui.txt
|
ENV VENDORSPATH=/app/back/ieee-oui.txt
|
||||||
ENV VENDORSPATH_NEWEST=/services/run/tmp/ieee-oui.txt
|
ENV VENDORSPATH_NEWEST=${SYSTEM_SERVICES_RUN_TMP}/ieee-oui.txt
|
||||||
ENV ENVIRONMENT=alpine
|
ENV ENVIRONMENT=alpine
|
||||||
ENV READ_ONLY_USER=readonly READ_ONLY_GROUP=readonly
|
ENV READ_ONLY_USER=readonly READ_ONLY_GROUP=readonly
|
||||||
ENV NETALERTX_USER=netalertx NETALERTX_GROUP=netalertx
|
ENV NETALERTX_USER=netalertx NETALERTX_GROUP=netalertx
|
||||||
ENV LANG=C.UTF-8
|
ENV LANG=C.UTF-8
|
||||||
|
|
||||||
|
|
||||||
RUN apk add --no-cache bash mtr libbsd zip lsblk tzdata curl arp-scan iproute2 iproute2-ss nmap \
|
RUN apk add --no-cache bash mtr libbsd zip lsblk tzdata curl arp-scan iproute2 iproute2-ss nmap fping \
|
||||||
nmap-scripts traceroute nbtscan net-tools net-snmp-tools bind-tools awake ca-certificates \
|
nmap-scripts traceroute nbtscan net-tools net-snmp-tools bind-tools awake ca-certificates \
|
||||||
sqlite php83 php83-fpm php83-cgi php83-curl php83-sqlite3 php83-session python3 envsubst \
|
sqlite php83 php83-fpm php83-cgi php83-curl php83-sqlite3 php83-session python3 py3-psutil envsubst \
|
||||||
nginx shadow && \
|
nginx supercronic shadow su-exec jq && \
|
||||||
rm -Rf /var/cache/apk/* && \
|
rm -Rf /var/cache/apk/* && \
|
||||||
rm -Rf /etc/nginx && \
|
rm -Rf /etc/nginx && \
|
||||||
addgroup -g 20211 ${NETALERTX_GROUP} && \
|
addgroup -g ${NETALERTX_GID} ${NETALERTX_GROUP} && \
|
||||||
adduser -u 20211 -D -h ${NETALERTX_APP} -G ${NETALERTX_GROUP} ${NETALERTX_USER} && \
|
adduser -u ${NETALERTX_UID} -D -h ${NETALERTX_APP} -G ${NETALERTX_GROUP} ${NETALERTX_USER} && \
|
||||||
apk del shadow
|
apk del shadow
|
||||||
|
|
||||||
|
|
||||||
@@ -125,77 +149,98 @@ COPY --chown=${NETALERTX_USER}:${NETALERTX_GROUP} install/production-filesystem/
|
|||||||
COPY --chown=${NETALERTX_USER}:${NETALERTX_GROUP} --chmod=755 back ${NETALERTX_BACK}
|
COPY --chown=${NETALERTX_USER}:${NETALERTX_GROUP} --chmod=755 back ${NETALERTX_BACK}
|
||||||
COPY --chown=${NETALERTX_USER}:${NETALERTX_GROUP} --chmod=755 front ${NETALERTX_FRONT}
|
COPY --chown=${NETALERTX_USER}:${NETALERTX_GROUP} --chmod=755 front ${NETALERTX_FRONT}
|
||||||
COPY --chown=${NETALERTX_USER}:${NETALERTX_GROUP} --chmod=755 server ${NETALERTX_SERVER}
|
COPY --chown=${NETALERTX_USER}:${NETALERTX_GROUP} --chmod=755 server ${NETALERTX_SERVER}
|
||||||
RUN install -d -o ${NETALERTX_USER} -g ${NETALERTX_GROUP} -m 755 ${NETALERTX_API} \
|
|
||||||
${NETALERTX_LOG} ${SYSTEM_SERVICES_RUN_TMP} ${SYSTEM_SERVICES_RUN_LOG} && \
|
# Create required folders with correct ownership and permissions
|
||||||
|
RUN install -d -o ${NETALERTX_USER} -g ${NETALERTX_GROUP} -m 700 ${READ_WRITE_FOLDERS} && \
|
||||||
sh -c "find ${NETALERTX_APP} -type f \( -name '*.sh' -o -name 'speedtest-cli' \) \
|
sh -c "find ${NETALERTX_APP} -type f \( -name '*.sh' -o -name 'speedtest-cli' \) \
|
||||||
-exec chmod 750 {} \;"
|
-exec chmod 750 {} \;"
|
||||||
|
|
||||||
# Copy the virtualenv from the builder stage
|
# Copy version information into the image
|
||||||
COPY --from=builder --chown=20212:20212 ${VIRTUAL_ENV} ${VIRTUAL_ENV}
|
COPY --chown=${NETALERTX_USER}:${NETALERTX_GROUP} .[V]ERSION ${NETALERTX_APP}/.VERSION
|
||||||
|
|
||||||
|
# Copy the virtualenv from the builder stage (owned by readonly lock owner)
|
||||||
|
COPY --from=builder --chown=${READONLY_UID}:${READONLY_GID} ${VIRTUAL_ENV} ${VIRTUAL_ENV}
|
||||||
|
|
||||||
|
|
||||||
# Initialize each service with the dockerfiles/init-*.sh scripts, once.
|
# Initialize each service with the dockerfiles/init-*.sh scripts, once.
|
||||||
# This is done after the copy of the venv to ensure the venv is in place
|
# This is done after the copy of the venv to ensure the venv is in place
|
||||||
# although it may be quicker to do it before the copy, it keeps the image
|
# although it may be quicker to do it before the copy, it keeps the image
|
||||||
# layers smaller to do it after.
|
# layers smaller to do it after.
|
||||||
RUN apk add libcap && \
|
# hadolint ignore=DL3018
|
||||||
setcap cap_net_raw+ep /bin/busybox && \
|
RUN for vfile in .VERSION; do \
|
||||||
|
if [ ! -f "${NETALERTX_APP}/${vfile}" ]; then \
|
||||||
|
echo "DEVELOPMENT 00000000" > "${NETALERTX_APP}/${vfile}"; \
|
||||||
|
fi; \
|
||||||
|
chown ${READONLY_UID}:${READONLY_GID} "${NETALERTX_APP}/${vfile}"; \
|
||||||
|
done && \
|
||||||
|
apk add --no-cache libcap && \
|
||||||
setcap cap_net_raw,cap_net_admin+eip /usr/bin/nmap && \
|
setcap cap_net_raw,cap_net_admin+eip /usr/bin/nmap && \
|
||||||
setcap cap_net_raw,cap_net_admin+eip /usr/bin/arp-scan && \
|
setcap cap_net_raw,cap_net_admin+eip /usr/bin/arp-scan && \
|
||||||
setcap cap_net_raw,cap_net_admin,cap_net_bind_service+eip /usr/bin/nbtscan && \
|
setcap cap_net_raw,cap_net_admin,cap_net_bind_service+eip /usr/bin/nbtscan && \
|
||||||
setcap cap_net_raw,cap_net_admin+eip /usr/bin/traceroute && \
|
setcap cap_net_raw,cap_net_admin+eip /usr/bin/traceroute && \
|
||||||
setcap cap_net_raw,cap_net_admin+eip $(readlink -f ${VIRTUAL_ENV_BIN}/python) && \
|
setcap cap_net_raw,cap_net_admin+eip "$(readlink -f ${VIRTUAL_ENV_BIN}/python)" && \
|
||||||
/bin/sh /build/init-nginx.sh && \
|
/bin/sh /build/init-nginx.sh && \
|
||||||
/bin/sh /build/init-php-fpm.sh && \
|
/bin/sh /build/init-php-fpm.sh && \
|
||||||
/bin/sh /build/init-crond.sh && \
|
/bin/sh /build/init-cron.sh && \
|
||||||
/bin/sh /build/init-backend.sh && \
|
/bin/sh /build/init-backend.sh && \
|
||||||
rm -rf /build && \
|
rm -rf /build && \
|
||||||
apk del libcap && \
|
apk del libcap && \
|
||||||
date +%s > ${NETALERTX_FRONT}/buildtimestamp.txt
|
date +%s > "${NETALERTX_FRONT}/buildtimestamp.txt"
|
||||||
|
|
||||||
|
|
||||||
ENTRYPOINT ["/bin/sh","/entrypoint.sh"]
|
ENTRYPOINT ["/bin/bash","/entrypoint.sh"]
|
||||||
|
|
||||||
# Final hardened stage to improve security by setting least possible permissions and removing sudo access.
|
# Final hardened stage to improve security by setting least possible permissions and removing sudo access.
|
||||||
# When complete, if the image is compromised, there's not much that can be done with it.
|
# When complete, if the image is compromised, there's not much that can be done with it.
|
||||||
# This stage is separate from Runner stage so that devcontainer can use the Runner stage.
|
# This stage is separate from Runner stage so that devcontainer can use the Runner stage.
|
||||||
FROM runner AS hardened
|
FROM runner AS hardened
|
||||||
|
|
||||||
|
# Re-declare UID/GID args for this stage
|
||||||
|
ARG NETALERTX_UID=20211
|
||||||
|
ARG NETALERTX_GID=20211
|
||||||
|
ARG READONLY_UID=20212
|
||||||
|
ARG READONLY_GID=20212
|
||||||
|
|
||||||
ENV UMASK=0077
|
ENV UMASK=0077
|
||||||
|
|
||||||
# Create readonly user and group with no shell access.
|
# Create readonly user and group with no shell access.
|
||||||
# Readonly user marks folders that are created by NetAlertX, but should not be modified.
|
# Readonly user marks folders that are created by NetAlertX, but should not be modified.
|
||||||
# AI may claim this is stupid, but it's actually least possible permissions as
|
# AI may claim this is stupid, but it's actually least possible permissions as
|
||||||
# read-only user cannot login, cannot sudo, has no write permission, and cannot even
|
# read-only user cannot login, cannot sudo, has no write permission, and cannot even
|
||||||
# read the files it owns. The read-only user is ownership-as-a-lock hardening pattern.
|
# read the files it owns. The read-only user is ownership-as-a-lock hardening pattern.
|
||||||
RUN addgroup -g 20212 ${READ_ONLY_GROUP} && \
|
RUN addgroup -g ${READONLY_GID} "${READ_ONLY_GROUP}" && \
|
||||||
adduser -u 20212 -G ${READ_ONLY_GROUP} -D -h /app ${READ_ONLY_USER}
|
adduser -u ${READONLY_UID} -G "${READ_ONLY_GROUP}" -D -h /app "${READ_ONLY_USER}"
|
||||||
|
|
||||||
|
|
||||||
# reduce permissions to minimum necessary for all NetAlertX files and folders
|
# reduce permissions to minimum necessary for all NetAlertX files and folders
|
||||||
# Permissions 005 and 004 are not typos, they enable read-only. Everyone can
|
# Permissions 005 and 004 are not typos, they enable read-only. Everyone can
|
||||||
# read the read-only files, and nobody can write to them, even the readonly user.
|
# read the read-only files, and nobody can write to them, even the readonly user.
|
||||||
|
|
||||||
|
# hadolint ignore=SC2114
|
||||||
RUN chown -R ${READ_ONLY_USER}:${READ_ONLY_GROUP} ${READ_ONLY_FOLDERS} && \
|
RUN chown -R ${READ_ONLY_USER}:${READ_ONLY_GROUP} ${READ_ONLY_FOLDERS} && \
|
||||||
chmod -R 004 ${READ_ONLY_FOLDERS} && \
|
chmod -R 004 ${READ_ONLY_FOLDERS} && \
|
||||||
find ${READ_ONLY_FOLDERS} -type d -exec chmod 005 {} + && \
|
find ${READ_ONLY_FOLDERS} -type d -exec chmod 005 {} + && \
|
||||||
install -d -o ${NETALERTX_USER} -g ${NETALERTX_GROUP} -m 700 ${READ_WRITE_FOLDERS} && \
|
install -d -o ${NETALERTX_USER} -g ${NETALERTX_GROUP} -m 0777 ${READ_WRITE_FOLDERS} && \
|
||||||
chown -R ${NETALERTX_USER}:${NETALERTX_GROUP} ${READ_WRITE_FOLDERS} && \
|
chown ${READ_ONLY_USER}:${READ_ONLY_GROUP} /entrypoint.sh /root-entrypoint.sh /opt /opt/venv && \
|
||||||
chmod -R 600 ${READ_WRITE_FOLDERS} && \
|
chmod 005 /entrypoint.sh /root-entrypoint.sh ${SYSTEM_SERVICES}/*.sh ${SYSTEM_SERVICES_SCRIPTS}/* ${ENTRYPOINT_CHECKS}/* /app /opt /opt/venv && \
|
||||||
find ${READ_WRITE_FOLDERS} -type d -exec chmod 700 {} + && \
|
# Do not bake first-run artifacts into the image. If present, Docker volume copy-up
|
||||||
chown ${READ_ONLY_USER}:${READ_ONLY_GROUP} /entrypoint.sh /opt /opt/venv && \
|
# will persist restrictive ownership/modes into fresh named volumes, breaking
|
||||||
chmod 005 /entrypoint.sh ${SYSTEM_SERVICES}/*.sh ${SYSTEM_SERVICES_SCRIPTS}/* ${ENTRYPOINT_CHECKS}/* /app /opt /opt/venv && \
|
# arbitrary non-root UID/GID runs.
|
||||||
for dir in ${READ_WRITE_FOLDERS}; do \
|
rm -f \
|
||||||
install -d -o ${NETALERTX_USER} -g ${NETALERTX_GROUP} -m 700 "$dir"; \
|
"${NETALERTX_CONFIG}/app.conf" \
|
||||||
done && \
|
"${NETALERTX_DB_FILE}" \
|
||||||
|
"${NETALERTX_DB_FILE}-shm" \
|
||||||
|
"${NETALERTX_DB_FILE}-wal" || true && \
|
||||||
apk del apk-tools && \
|
apk del apk-tools && \
|
||||||
rm -Rf /var /etc/sudoers.d/* /etc/shadow /etc/gshadow /etc/sudoers \
|
rm -Rf /var /etc/sudoers.d/* /etc/shadow /etc/gshadow /etc/sudoers \
|
||||||
/lib/apk /lib/firmware /lib/modules-load.d /lib/sysctl.d /mnt /home/ /root \
|
/lib/apk /lib/firmware /lib/modules-load.d /lib/sysctl.d /mnt /home/ /root \
|
||||||
/srv /media && \
|
/srv /media && \
|
||||||
sed -i "/^\(${READ_ONLY_USER}\|${NETALERTX_USER}\):/!d" /etc/passwd && \
|
# Preserve root and system identities so hardened entrypoint never needs to patch /etc/passwd or /etc/group at runtime.
|
||||||
sed -i "/^\(${READ_ONLY_GROUP}\|${NETALERTX_GROUP}\):/!d" /etc/group && \
|
printf '#!/bin/sh\n"$@"\n' > /usr/bin/sudo && chmod +x /usr/bin/sudo
|
||||||
echo -ne '#!/bin/sh\n"$@"\n' > /usr/bin/sudo && chmod +x /usr/bin/sudo
|
USER "0"
|
||||||
|
|
||||||
USER netalertx
|
# Call root-entrypoint.sh which drops priviliges to run entrypoint.sh.
|
||||||
|
ENTRYPOINT ["/root-entrypoint.sh"]
|
||||||
|
|
||||||
HEALTHCHECK --interval=30s --timeout=10s --start-period=60s --retries=3 \
|
HEALTHCHECK --interval=30s --timeout=10s --start-period=60s --retries=3 \
|
||||||
CMD /services/healthcheck.sh
|
CMD /services/healthcheck.sh
|
||||||
|
|||||||
@@ -1,65 +1,56 @@
|
|||||||
# Warning - use of this unhardened image is not recommended for production use.
|
# Stage 1: Builder
|
||||||
# This image is provided for backward compatibility, development and testing purposes only.
|
# Install build dependencies and create virtual environment
|
||||||
# For production use, please use the hardened image built with Alpine. This image attempts to
|
FROM debian:bookworm-slim AS builder
|
||||||
# treat a container as an operating system, which is an anti-pattern and a common source of
|
|
||||||
# security issues.
|
|
||||||
#
|
|
||||||
# The default Dockerfile/docker-compose image contains the following security improvements
|
|
||||||
# over the Debian image:
|
|
||||||
# - read-only filesystem
|
|
||||||
# - no sudo access
|
|
||||||
# - least possible permissions on all files and folders
|
|
||||||
# - Root user has all permissions revoked and is unused
|
|
||||||
# - Secure umask applied so files are owner-only by default
|
|
||||||
# - non-privileged user runs the application
|
|
||||||
# - no shell access for non-privileged users
|
|
||||||
# - no unnecessary packages or services
|
|
||||||
# - reduced capabilities
|
|
||||||
# - tmpfs for writable folders
|
|
||||||
# - healthcheck
|
|
||||||
# - no package managers
|
|
||||||
# - no compilers or build tools
|
|
||||||
# - no systemd, uses lightweight init system
|
|
||||||
# - no persistent storage except for config and db volumes
|
|
||||||
# - minimal image size due to segmented build stages
|
|
||||||
# - minimal base image (Alpine Linux)
|
|
||||||
# - minimal python environment (venv, no pip)
|
|
||||||
# - minimal stripped web server
|
|
||||||
# - minimal stripped php environment
|
|
||||||
# - minimal services (nginx, php-fpm, crond, no unnecessary services or service managers)
|
|
||||||
# - minimal users and groups (netalertx and readonly only, no others)
|
|
||||||
# - minimal permissions (read-only for most files and folders, write-only for necessary folders)
|
|
||||||
# - minimal capabilities (NET_ADMIN and NET_RAW only, no others)
|
|
||||||
# - minimal environment variables (only necessary ones, no others)
|
|
||||||
# - minimal entrypoint (only necessary commands, no others)
|
|
||||||
# - Uses the same base image as the development environmnment (Alpine Linux)
|
|
||||||
# - Uses the same services as the development environment (nginx, php-fpm, crond)
|
|
||||||
# - Uses the same environment variables as the development environment (only necessary ones, no others)
|
|
||||||
# - Uses the same file and folder structure as the development environment (only necessary ones, no others)
|
|
||||||
# NetAlertX is designed to be run as an unattended network security monitoring appliance, which means it
|
|
||||||
# should be able to operate without human intervention. Overall, the hardened image is designed to be as
|
|
||||||
# secure as possible while still being functional and is recommended because you cannot attack a surface
|
|
||||||
# that isn't there.
|
|
||||||
|
|
||||||
|
ENV PYTHONUNBUFFERED=1
|
||||||
|
ENV VIRTUAL_ENV=/opt/venv
|
||||||
|
ENV PATH="${VIRTUAL_ENV}/bin:${PATH}"
|
||||||
|
|
||||||
FROM debian:bookworm-slim
|
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||||
|
python3 \
|
||||||
|
python3-dev \
|
||||||
|
python3-pip \
|
||||||
|
python3-psutil \
|
||||||
|
python3-venv \
|
||||||
|
gcc \
|
||||||
|
git \
|
||||||
|
libffi-dev \
|
||||||
|
libssl-dev \
|
||||||
|
rustc \
|
||||||
|
cargo \
|
||||||
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
#TZ=Europe/London
|
RUN python3 -m venv ${VIRTUAL_ENV}
|
||||||
|
ENV PATH="${VIRTUAL_ENV}/bin:${PATH}"
|
||||||
|
|
||||||
|
COPY requirements.txt /tmp/requirements.txt
|
||||||
|
RUN pip install --upgrade pip setuptools wheel && \
|
||||||
|
pip install --no-cache-dir -r /tmp/requirements.txt
|
||||||
|
|
||||||
|
# Stage 2: Runner
|
||||||
|
# Main runtime stage with minimum requirements
|
||||||
|
FROM debian:bookworm-slim AS runner
|
||||||
|
|
||||||
|
ARG INSTALL_DIR=/app
|
||||||
|
ARG NETALERTX_UID=20211
|
||||||
|
ARG NETALERTX_GID=20211
|
||||||
|
ARG READONLY_UID=20212
|
||||||
|
ARG READONLY_GID=20212
|
||||||
|
|
||||||
# NetAlertX app directories
|
|
||||||
ENV INSTALL_DIR=/app
|
|
||||||
ENV NETALERTX_APP=${INSTALL_DIR}
|
ENV NETALERTX_APP=${INSTALL_DIR}
|
||||||
ENV NETALERTX_CONFIG=${NETALERTX_APP}/config
|
ENV NETALERTX_DATA=/data
|
||||||
|
ENV NETALERTX_CONFIG=${NETALERTX_DATA}/config
|
||||||
ENV NETALERTX_FRONT=${NETALERTX_APP}/front
|
ENV NETALERTX_FRONT=${NETALERTX_APP}/front
|
||||||
|
ENV NETALERTX_PLUGINS=${NETALERTX_FRONT}/plugins
|
||||||
ENV NETALERTX_SERVER=${NETALERTX_APP}/server
|
ENV NETALERTX_SERVER=${NETALERTX_APP}/server
|
||||||
ENV NETALERTX_API=${NETALERTX_APP}/api
|
ENV NETALERTX_API=/tmp/api
|
||||||
ENV NETALERTX_DB=${NETALERTX_APP}/db
|
ENV NETALERTX_DB=${NETALERTX_DATA}/db
|
||||||
ENV NETALERTX_DB_FILE=${NETALERTX_DB}/app.db
|
ENV NETALERTX_DB_FILE=${NETALERTX_DB}/app.db
|
||||||
ENV NETALERTX_BACK=${NETALERTX_APP}/back
|
ENV NETALERTX_BACK=${NETALERTX_APP}/back
|
||||||
ENV NETALERTX_LOG=${NETALERTX_APP}/log
|
ENV NETALERTX_LOG=/tmp/log
|
||||||
ENV NETALERTX_PLUGINS_LOG=${NETALERTX_LOG}/plugins
|
ENV NETALERTX_PLUGINS_LOG=${NETALERTX_LOG}/plugins
|
||||||
|
ENV NETALERTX_CONFIG_FILE=${NETALERTX_CONFIG}/app.conf
|
||||||
|
|
||||||
# NetAlertX log files
|
|
||||||
ENV LOG_IP_CHANGES=${NETALERTX_LOG}/IP_changes.log
|
ENV LOG_IP_CHANGES=${NETALERTX_LOG}/IP_changes.log
|
||||||
ENV LOG_APP=${NETALERTX_LOG}/app.log
|
ENV LOG_APP=${NETALERTX_LOG}/app.log
|
||||||
ENV LOG_APP_FRONT=${NETALERTX_LOG}/app_front.log
|
ENV LOG_APP_FRONT=${NETALERTX_LOG}/app_front.log
|
||||||
@@ -71,99 +62,181 @@ ENV LOG_APP_PHP_ERRORS=${NETALERTX_LOG}/app.php_errors.log
|
|||||||
ENV LOG_EXECUTION_QUEUE=${NETALERTX_LOG}/execution_queue.log
|
ENV LOG_EXECUTION_QUEUE=${NETALERTX_LOG}/execution_queue.log
|
||||||
ENV LOG_REPORT_OUTPUT_JSON=${NETALERTX_LOG}/report_output.json
|
ENV LOG_REPORT_OUTPUT_JSON=${NETALERTX_LOG}/report_output.json
|
||||||
ENV LOG_STDOUT=${NETALERTX_LOG}/stdout.log
|
ENV LOG_STDOUT=${NETALERTX_LOG}/stdout.log
|
||||||
ENV LOG_CROND=${NETALERTX_LOG}/crond.log
|
ENV LOG_CRON=${NETALERTX_LOG}/cron.log
|
||||||
|
ENV LOG_NGINX_ERROR=${NETALERTX_LOG}/nginx-error.log
|
||||||
|
|
||||||
# System Services configuration files
|
ENV ENTRYPOINT_CHECKS=/entrypoint.d
|
||||||
ENV SYSTEM_SERVICES=/services
|
ENV SYSTEM_SERVICES=/services
|
||||||
|
ENV SYSTEM_SERVICES_SCRIPTS=${SYSTEM_SERVICES}/scripts
|
||||||
ENV SYSTEM_SERVICES_CONFIG=${SYSTEM_SERVICES}/config
|
ENV SYSTEM_SERVICES_CONFIG=${SYSTEM_SERVICES}/config
|
||||||
ENV SYSTEM_NGINIX_CONFIG=${SYSTEM_SERVICES_CONFIG}/nginx
|
ENV SYSTEM_NGINX_CONFIG=${SYSTEM_SERVICES_CONFIG}/nginx
|
||||||
ENV SYSTEM_NGINX_CONFIG_FILE=${SYSTEM_NGINIX_CONFIG}/nginx.conf
|
ENV SYSTEM_NGINX_CONFIG_TEMPLATE=${SYSTEM_NGINX_CONFIG}/netalertx.conf.template
|
||||||
ENV NETALERTX_CONFIG_FILE=${NETALERTX_CONFIG}/app.conf
|
ENV SYSTEM_SERVICES_CONFIG_CRON=${SYSTEM_SERVICES_CONFIG}/cron
|
||||||
|
ENV SYSTEM_SERVICES_ACTIVE_CONFIG=/tmp/nginx/active-config
|
||||||
|
ENV SYSTEM_SERVICES_ACTIVE_CONFIG_FILE=${SYSTEM_SERVICES_ACTIVE_CONFIG}/nginx.conf
|
||||||
ENV SYSTEM_SERVICES_PHP_FOLDER=${SYSTEM_SERVICES_CONFIG}/php
|
ENV SYSTEM_SERVICES_PHP_FOLDER=${SYSTEM_SERVICES_CONFIG}/php
|
||||||
ENV SYSTEM_SERVICES_PHP_FPM_D=${SYSTEM_SERVICES_PHP_FOLDER}/php-fpm.d
|
ENV SYSTEM_SERVICES_PHP_FPM_D=${SYSTEM_SERVICES_PHP_FOLDER}/php-fpm.d
|
||||||
ENV SYSTEM_SERVICES_CROND=${SYSTEM_SERVICES_CONFIG}/crond
|
ENV SYSTEM_SERVICES_RUN=/tmp/run
|
||||||
ENV SYSTEM_SERVICES_RUN=${SYSTEM_SERVICES}/run
|
|
||||||
ENV SYSTEM_SERVICES_RUN_TMP=${SYSTEM_SERVICES_RUN}/tmp
|
ENV SYSTEM_SERVICES_RUN_TMP=${SYSTEM_SERVICES_RUN}/tmp
|
||||||
ENV SYSTEM_SERVICES_RUN_LOG=${SYSTEM_SERVICES_RUN}/logs
|
ENV SYSTEM_SERVICES_RUN_LOG=${SYSTEM_SERVICES_RUN}/logs
|
||||||
ENV PHP_FPM_CONFIG_FILE=${SYSTEM_SERVICES_PHP_FOLDER}/php-fpm.conf
|
ENV PHP_FPM_CONFIG_FILE=${SYSTEM_SERVICES_PHP_FOLDER}/php-fpm.conf
|
||||||
|
|
||||||
#Python environment
|
ENV READ_ONLY_FOLDERS="${NETALERTX_BACK} ${NETALERTX_FRONT} ${NETALERTX_SERVER} ${SYSTEM_SERVICES} \
|
||||||
ENV PYTHONPATH=${NETALERTX_SERVER}
|
${SYSTEM_SERVICES_CONFIG} ${ENTRYPOINT_CHECKS}"
|
||||||
ENV PYTHONUNBUFFERED=1
|
ENV READ_WRITE_FOLDERS="${NETALERTX_DATA} ${NETALERTX_CONFIG} ${NETALERTX_DB} ${NETALERTX_API} \
|
||||||
|
${NETALERTX_LOG} ${NETALERTX_PLUGINS_LOG} ${SYSTEM_SERVICES_RUN} \
|
||||||
|
${SYSTEM_SERVICES_RUN_TMP} ${SYSTEM_SERVICES_RUN_LOG} \
|
||||||
|
${SYSTEM_SERVICES_ACTIVE_CONFIG}"
|
||||||
|
|
||||||
|
ENV PYTHONUNBUFFERED=1
|
||||||
ENV VIRTUAL_ENV=/opt/venv
|
ENV VIRTUAL_ENV=/opt/venv
|
||||||
ENV VIRTUAL_ENV_BIN=/opt/venv/bin
|
ENV VIRTUAL_ENV_BIN=/opt/venv/bin
|
||||||
ENV PATH="${VIRTUAL_ENV}/bin:${PATH}:/services"
|
ENV PYTHONPATH=${NETALERTX_APP}:${NETALERTX_SERVER}:${NETALERTX_PLUGINS}:${VIRTUAL_ENV}/lib/python3.11/site-packages
|
||||||
ENV VENDORSPATH=/app/back/ieee-oui.txt
|
ENV PATH="${SYSTEM_SERVICES}:${VIRTUAL_ENV_BIN}:$PATH"
|
||||||
ENV VENDORSPATH_NEWEST=/services/run/tmp/ieee-oui.txt
|
|
||||||
|
|
||||||
|
|
||||||
# App Environment
|
|
||||||
ENV LISTEN_ADDR=0.0.0.0
|
ENV LISTEN_ADDR=0.0.0.0
|
||||||
ENV PORT=20211
|
ENV PORT=20211
|
||||||
ENV NETALERTX_DEBUG=0
|
ENV NETALERTX_DEBUG=0
|
||||||
|
ENV VENDORSPATH=/app/back/ieee-oui.txt
|
||||||
#Container environment
|
ENV VENDORSPATH_NEWEST=${SYSTEM_SERVICES_RUN_TMP}/ieee-oui.txt
|
||||||
ENV ENVIRONMENT=debian
|
ENV ENVIRONMENT=debian
|
||||||
ENV USER=netalertx
|
ENV READ_ONLY_USER=readonly READ_ONLY_GROUP=readonly
|
||||||
ENV USER_ID=1000
|
ENV NETALERTX_USER=netalertx NETALERTX_GROUP=netalertx
|
||||||
ENV USER_GID=1000
|
ENV LANG=C.UTF-8
|
||||||
|
|
||||||
# Todo, figure out why using a workdir instead of full paths don't work
|
# Install dependencies
|
||||||
# Todo, do we still need all these packages? I can already see sudo which isn't needed
|
# Using sury.org for PHP 8.3 to match Alpine version
|
||||||
|
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||||
|
tini \
|
||||||
# create pi user and group
|
snmp \
|
||||||
# add root and www-data to pi group so they can r/w files and db
|
|
||||||
RUN groupadd --gid "${USER_GID}" "${USER}" && \
|
|
||||||
useradd \
|
|
||||||
--uid ${USER_ID} \
|
|
||||||
--gid ${USER_GID} \
|
|
||||||
--create-home \
|
|
||||||
--shell /bin/bash \
|
|
||||||
${USER} && \
|
|
||||||
usermod -a -G ${USER_GID} root && \
|
|
||||||
usermod -a -G ${USER_GID} www-data
|
|
||||||
|
|
||||||
COPY --chmod=775 --chown=${USER_ID}:${USER_GID} install/production-filesystem/ /
|
|
||||||
COPY --chmod=775 --chown=${USER_ID}:${USER_GID} . ${INSTALL_DIR}/
|
|
||||||
|
|
||||||
|
|
||||||
# ❗ IMPORTANT - if you modify this file modify the /install/install_dependecies.debian.sh file as well ❗
|
|
||||||
RUN apt update && apt-get install -y \
|
|
||||||
tini snmp ca-certificates curl libwww-perl arp-scan sudo gettext-base \
|
|
||||||
nginx-light php php-cgi php-fpm php-sqlite3 php-curl sqlite3 dnsutils net-tools \
|
|
||||||
python3 python3-dev iproute2 nmap python3-pip zip git systemctl usbutils traceroute nbtscan openrc \
|
|
||||||
busybox nginx nginx-core mtr python3-venv
|
|
||||||
|
|
||||||
# While php8.3 is in debian bookworm repos, php-fpm is not included so we need to add sury.org repo
|
|
||||||
# (Ondřej Surý maintains php packages for debian. This is temp until debian includes php-fpm in their
|
|
||||||
# repos. Likely it will be in Debian Trixie.). This keeps the image up-to-date with the alpine version.
|
|
||||||
RUN apt-get install -y --no-install-recommends \
|
|
||||||
apt-transport-https \
|
|
||||||
ca-certificates \
|
ca-certificates \
|
||||||
|
curl \
|
||||||
|
libwww-perl \
|
||||||
|
arp-scan \
|
||||||
|
sudo \
|
||||||
|
gettext-base \
|
||||||
|
nginx-light \
|
||||||
|
sqlite3 \
|
||||||
|
dnsutils \
|
||||||
|
net-tools \
|
||||||
|
python3 \
|
||||||
|
iproute2 \
|
||||||
|
nmap \
|
||||||
|
fping \
|
||||||
|
zip \
|
||||||
|
git \
|
||||||
|
usbutils \
|
||||||
|
traceroute \
|
||||||
|
nbtscan \
|
||||||
lsb-release \
|
lsb-release \
|
||||||
wget && \
|
wget \
|
||||||
wget -O /etc/apt/trusted.gpg.d/php.gpg https://packages.sury.org/php/apt.gpg && \
|
apt-transport-https \
|
||||||
echo "deb https://packages.sury.org/php/ $(lsb_release -sc) main" > /etc/apt/sources.list.d/php.list && \
|
gnupg2 \
|
||||||
apt-get update && \
|
mtr \
|
||||||
apt-get install -y php8.3-fpm php8.3-cli php8.3-sqlite3 php8.3-common php8.3-curl php8.3-cgi && \
|
procps \
|
||||||
ln -s /usr/sbin/php-fpm8.3 /usr/sbin/php-fpm83 # make it compatible with alpine version
|
gosu \
|
||||||
|
jq \
|
||||||
|
ipcalc \
|
||||||
|
&& wget -qO /etc/apt/trusted.gpg.d/php.gpg https://packages.sury.org/php/apt.gpg \
|
||||||
|
&& echo "deb https://packages.sury.org/php/ $(lsb_release -sc) main" > /etc/apt/sources.list.d/php.list \
|
||||||
|
&& apt-get update \
|
||||||
|
&& apt-get install -y --no-install-recommends \
|
||||||
|
php8.3-fpm \
|
||||||
|
php8.3-cli \
|
||||||
|
php8.3-sqlite3 \
|
||||||
|
php8.3-common \
|
||||||
|
php8.3-curl \
|
||||||
|
&& ln -s /usr/sbin/php-fpm8.3 /usr/sbin/php-fpm \
|
||||||
|
&& ln -s /usr/sbin/php-fpm8.3 /usr/sbin/php-fpm83 \
|
||||||
|
&& ln -s /usr/sbin/gosu /usr/sbin/su-exec \
|
||||||
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
# Setup virtual python environment and use pip3 to install packages
|
# Fix permissions for /tmp BEFORE copying anything that might overwrite it with bad perms
|
||||||
RUN python3 -m venv ${VIRTUAL_ENV} && \
|
RUN chmod 1777 /tmp
|
||||||
/bin/bash -c "source ${VIRTUAL_ENV_BIN}/activate && update-alternatives --install /usr/bin/python python /usr/bin/python3 10 && pip3 install -r ${INSTALL_DIR}/requirements.txt"
|
|
||||||
|
|
||||||
# Configure php-fpm
|
# User setup
|
||||||
RUN chmod -R 755 /services && \
|
RUN groupadd -g ${NETALERTX_GID} ${NETALERTX_GROUP} && \
|
||||||
chown -R ${USER}:${USER_GID} /services && \
|
useradd -u ${NETALERTX_UID} -g ${NETALERTX_GID} -d ${NETALERTX_APP} -s /bin/bash ${NETALERTX_USER}
|
||||||
sed -i 's/^;listen.mode = .*/listen.mode = 0666/' ${SYSTEM_SERVICES_PHP_FPM_D}/www.conf && \
|
|
||||||
printf "user = %s\ngroup = %s\n" "${USER}" "${USER_GID}" >> /services/config/php/php-fpm.d/www.conf
|
|
||||||
|
|
||||||
|
# Copy filesystem (excluding tmp if possible, or we just fix it after)
|
||||||
|
COPY --chown=${NETALERTX_USER}:${NETALERTX_GROUP} install/production-filesystem/ /
|
||||||
|
# Re-apply sticky bit to /tmp in case COPY overwrote it
|
||||||
|
RUN chmod 1777 /tmp
|
||||||
|
|
||||||
|
COPY --chown=${NETALERTX_USER}:${NETALERTX_GROUP} --chmod=755 back ${NETALERTX_BACK}
|
||||||
|
COPY --chown=${NETALERTX_USER}:${NETALERTX_GROUP} --chmod=755 front ${NETALERTX_FRONT}
|
||||||
|
COPY --chown=${NETALERTX_USER}:${NETALERTX_GROUP} --chmod=755 server ${NETALERTX_SERVER}
|
||||||
|
|
||||||
# Create a buildtimestamp.txt to later check if a new version was released
|
# Create required folders
|
||||||
RUN date +%s > ${INSTALL_DIR}/front/buildtimestamp.txt
|
RUN install -d -o ${NETALERTX_USER} -g ${NETALERTX_GROUP} -m 700 ${READ_WRITE_FOLDERS} && \
|
||||||
USER netalertx:netalertx
|
chmod 750 /entrypoint.sh /root-entrypoint.sh
|
||||||
ENTRYPOINT ["/bin/bash","/entrypoint.sh"]
|
|
||||||
|
|
||||||
|
# Copy Version
|
||||||
|
COPY --chown=${NETALERTX_USER}:${NETALERTX_GROUP} .[V]ERSION ${NETALERTX_APP}/.VERSION
|
||||||
|
COPY --chown=${NETALERTX_USER}:${NETALERTX_GROUP} .[V]ERSION ${NETALERTX_APP}/.VERSION_PREV
|
||||||
|
|
||||||
|
# Copy venv from builder
|
||||||
|
COPY --from=builder --chown=${READONLY_UID}:${READONLY_GID} ${VIRTUAL_ENV} ${VIRTUAL_ENV}
|
||||||
|
|
||||||
|
# Init process
|
||||||
|
RUN for vfile in .VERSION .VERSION_PREV; do \
|
||||||
|
if [ ! -f "${NETALERTX_APP}/${vfile}" ]; then \
|
||||||
|
echo "DEVELOPMENT 00000000" > "${NETALERTX_APP}/${vfile}"; \
|
||||||
|
fi; \
|
||||||
|
chown ${READONLY_UID}:${READONLY_GID} "${NETALERTX_APP}/${vfile}"; \
|
||||||
|
done && \
|
||||||
|
# Set capabilities for raw socket access
|
||||||
|
setcap cap_net_raw,cap_net_admin+eip /usr/bin/nmap && \
|
||||||
|
setcap cap_net_raw,cap_net_admin+eip /usr/sbin/arp-scan && \
|
||||||
|
setcap cap_net_raw,cap_net_admin,cap_net_bind_service+eip /usr/bin/nbtscan && \
|
||||||
|
setcap cap_net_raw,cap_net_admin+eip /usr/bin/traceroute.db && \
|
||||||
|
# Note: python path needs to be dynamic or verificed
|
||||||
|
# setcap cap_net_raw,cap_net_admin+eip $(readlink -f ${VIRTUAL_ENV_BIN}/python) && \
|
||||||
|
/bin/bash /build/init-nginx.sh && \
|
||||||
|
/bin/bash /build/init-php-fpm.sh && \
|
||||||
|
# /bin/bash /build/init-cron.sh && \
|
||||||
|
# Debian cron init might differ, skipping for now or need to check init-cron.sh content
|
||||||
|
# Checking init-backend.sh
|
||||||
|
/bin/bash /build/init-backend.sh && \
|
||||||
|
rm -rf /build && \
|
||||||
|
date +%s > "${NETALERTX_FRONT}/buildtimestamp.txt"
|
||||||
|
|
||||||
|
ENTRYPOINT ["/bin/bash", "/entrypoint.sh"]
|
||||||
|
|
||||||
|
# Stage 3: Hardened
|
||||||
|
FROM runner AS hardened
|
||||||
|
|
||||||
|
ARG NETALERTX_UID=20211
|
||||||
|
ARG NETALERTX_GID=20211
|
||||||
|
ARG READONLY_UID=20212
|
||||||
|
ARG READONLY_GID=20212
|
||||||
|
ENV READ_ONLY_USER=readonly READ_ONLY_GROUP=readonly
|
||||||
|
|
||||||
|
# Create readonly user
|
||||||
|
RUN groupadd -g ${READONLY_GID} ${READ_ONLY_GROUP} && \
|
||||||
|
useradd -u ${READONLY_UID} -g ${READONLY_GID} -d /app -s /usr/sbin/nologin ${READ_ONLY_USER}
|
||||||
|
|
||||||
|
# Hardening: Remove package managers and set permissions
|
||||||
|
RUN chown -R ${READ_ONLY_USER}:${READ_ONLY_GROUP} ${READ_ONLY_FOLDERS} && \
|
||||||
|
chmod -R 004 ${READ_ONLY_FOLDERS} && \
|
||||||
|
find ${READ_ONLY_FOLDERS} -type d -exec chmod 005 {} + && \
|
||||||
|
install -d -o ${NETALERTX_USER} -g ${NETALERTX_GROUP} -m 0777 ${READ_WRITE_FOLDERS} && \
|
||||||
|
chown ${READ_ONLY_USER}:${READ_ONLY_GROUP} /entrypoint.sh /root-entrypoint.sh /app /opt /opt/venv && \
|
||||||
|
# Permissions
|
||||||
|
chmod 005 /entrypoint.sh /root-entrypoint.sh ${SYSTEM_SERVICES}/*.sh ${SYSTEM_SERVICES_SCRIPTS}/* ${ENTRYPOINT_CHECKS}/* /app /opt /opt/venv && \
|
||||||
|
# Cleanups
|
||||||
|
rm -f \
|
||||||
|
"${NETALERTX_CONFIG}/app.conf" \
|
||||||
|
"${NETALERTX_DB_FILE}" \
|
||||||
|
"${NETALERTX_DB_FILE}-shm" \
|
||||||
|
"${NETALERTX_DB_FILE}-wal" || true && \
|
||||||
|
# Remove apt and sensitive files
|
||||||
|
rm -rf /var/lib/apt /var/lib/dpkg /var/cache/apt /usr/bin/apt* /usr/bin/dpkg* \
|
||||||
|
/etc/shadow /etc/gshadow /etc/sudoers /root /home/root && \
|
||||||
|
# Dummy sudo
|
||||||
|
printf '#!/bin/sh\n"$@"\n' > /usr/bin/sudo && chmod +x /usr/bin/sudo
|
||||||
|
|
||||||
|
USER 0
|
||||||
|
ENTRYPOINT ["/root-entrypoint.sh"]
|
||||||
|
HEALTHCHECK --interval=30s --timeout=10s --start-period=60s --retries=3 \
|
||||||
|
CMD /services/healthcheck.sh
|
||||||
|
|||||||
220
README.md
220
README.md
@@ -1,70 +1,12 @@
|
|||||||
[](https://hub.docker.com/r/jokobsk/netalertx)
|
[](https://hub.docker.com/r/jokobsk/netalertx)
|
||||||
[](https://hub.docker.com/r/jokobsk/netalertx)
|
[](https://hub.docker.com/r/jokobsk/netalertx)
|
||||||
[](https://github.com/jokob-sk/NetAlertX/releases)
|
[](https://github.com/netalertx/NetAlertX/releases)
|
||||||
[](https://discord.gg/NczTUTWyRr)
|
[](https://discord.gg/NczTUTWyRr)
|
||||||
[](https://my.home-assistant.io/redirect/supervisor_add_addon_repository/?repository_url=https%3A%2F%2Fgithub.com%2Falexbelgium%2Fhassio-addons)
|
[](https://my.home-assistant.io/redirect/supervisor_add_addon_repository/?repository_url=https%3A%2F%2Fgithub.com%2Falexbelgium%2Fhassio-addons)
|
||||||
|
|
||||||
# NetAlertX - Network, presence scanner and alert framework
|
# NetAlertX - Network Visibility & Asset Intelligence Framework
|
||||||
|
|
||||||
Get visibility of what's going on on your WIFI/LAN network and enable presence detection of important devices. Schedule scans for devices, port changes and get alerts if unknown devices or changes are found. Write your own [Plugin](https://github.com/jokob-sk/NetAlertX/tree/main/docs/PLUGINS.md#readme) with auto-generated UI and in-build notification system. Build out and easily maintain your network source of truth (NSoT).
|
![main][main]
|
||||||
|
|
||||||
## 📋 Table of Contents
|
|
||||||
|
|
||||||
- [NetAlertX - Network, presence scanner and alert framework](#netalertx---network-presence-scanner-and-alert-framework)
|
|
||||||
- [📋 Table of Contents](#-table-of-contents)
|
|
||||||
- [🚀 Quick Start](#-quick-start)
|
|
||||||
- [📦 Features](#-features)
|
|
||||||
- [Scanners](#scanners)
|
|
||||||
- [Notification gateways](#notification-gateways)
|
|
||||||
- [Integrations and Plugins](#integrations-and-plugins)
|
|
||||||
- [Workflows](#workflows)
|
|
||||||
- [📚 Documentation](#-documentation)
|
|
||||||
- [🔐 Security \& Privacy](#-security--privacy)
|
|
||||||
- [❓ FAQ](#-faq)
|
|
||||||
- [🐞 Known Issues](#-known-issues)
|
|
||||||
- [📃 Everything else](#-everything-else)
|
|
||||||
- [📧 Get notified what's new](#-get-notified-whats-new)
|
|
||||||
- [🔀 Other Alternative Apps](#-other-alternative-apps)
|
|
||||||
- [💙 Donations](#-donations)
|
|
||||||
- [🏗 Contributors](#-contributors)
|
|
||||||
- [🌍 Translations](#-translations)
|
|
||||||
- [License](#license)
|
|
||||||
|
|
||||||
|
|
||||||
## 🚀 Quick Start
|
|
||||||
|
|
||||||
Start NetAlertX in seconds with Docker:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
docker run -d --rm --network=host \
|
|
||||||
-v local_path/config:/app/config \
|
|
||||||
-v local_path/db:/app/db \
|
|
||||||
--mount type=tmpfs,target=/app/api \
|
|
||||||
-e PUID=200 -e PGID=300 \
|
|
||||||
-e TZ=Europe/Berlin \
|
|
||||||
-e PORT=20211 \
|
|
||||||
ghcr.io/jokob-sk/netalertx:latest
|
|
||||||
```
|
|
||||||
|
|
||||||
To deploy a containerized instance directly from the source repository, execute the following BASH sequence:
|
|
||||||
```bash
|
|
||||||
git clone https://github.com/jokob-sk/NetAlertX.git
|
|
||||||
cd NetAlertX
|
|
||||||
docker compose up --force-recreate --build
|
|
||||||
# To customize: edit docker-compose.yaml and run that last command again
|
|
||||||
```
|
|
||||||
|
|
||||||
Need help configuring it? Check the [usage guide](https://github.com/jokob-sk/NetAlertX/blob/main/docs/README.md) or [full documentation](https://jokob-sk.github.io/NetAlertX/).
|
|
||||||
|
|
||||||
For Home Assistant users: [Click here to add NetAlertX](https://my.home-assistant.io/redirect/supervisor_add_addon_repository/?repository_url=https%3A%2F%2Fgithub.com%2Falexbelgium%2Fhassio-addons)
|
|
||||||
|
|
||||||
For other install methods, check the [installation docs](#-documentation)
|
|
||||||
|
|
||||||
|
|
||||||
| [📑 Docker guide](https://github.com/jokob-sk/NetAlertX/blob/main/dockerfiles/README.md) | [🚀 Releases](https://github.com/jokob-sk/NetAlertX/releases) | [📚 Docs](https://jokob-sk.github.io/NetAlertX/) | [🔌 Plugins](https://github.com/jokob-sk/NetAlertX/blob/main/docs/PLUGINS.md) | [🤖 Ask AI](https://gurubase.io/g/netalertx)
|
|
||||||
|----------------------| ----------------------| ----------------------| ----------------------| ----------------------|
|
|
||||||
|
|
||||||
![showcase][showcase]
|
|
||||||
|
|
||||||
<details>
|
<details>
|
||||||
<summary>📷 Click for more screenshots</summary>
|
<summary>📷 Click for more screenshots</summary>
|
||||||
@@ -78,108 +20,169 @@ For other install methods, check the [installation docs](#-documentation)
|
|||||||
|
|
||||||
</details>
|
</details>
|
||||||
|
|
||||||
## 📦 Features
|
|
||||||
|
|
||||||
### Scanners
|
Centralized network visibility and continuous asset discovery.
|
||||||
|
|
||||||
The app scans your network for **New devices**, **New connections** (re-connections), **Disconnections**, **"Always Connected" devices down**, Devices **IP changes** and **Internet IP address changes**. Discovery & scan methods include: **arp-scan**, **Pi-hole - DB import**, **Pi-hole - DHCP leases import**, **Generic DHCP leases import**, **UNIFI controller import**, **SNMP-enabled router import**. Check the [Plugins](https://github.com/jokob-sk/NetAlertX/tree/main/docs/PLUGINS.md#readme) docs for a full list of avaliable plugins.
|
Monitor devices, detect change, and stay aware across distributed networks.
|
||||||
|
|
||||||
|
NetAlertX provides a centralized "Source of Truth" (NSoT) for network infrastructure. Maintain a real-time inventory of every connected device, identify Shadow IT and unauthorized hardware to maintain regulatory compliance, and automate compliance workflows across distributed sites.
|
||||||
|
|
||||||
|
NetAlertX is designed to bridge the gap between simple network scanning and complex SIEM tools, providing actionable insights without the overhead.
|
||||||
|
|
||||||
|
|
||||||
|
## Table of Contents
|
||||||
|
|
||||||
|
- [Quick Start](#quick-start)
|
||||||
|
- [Features](#features)
|
||||||
|
- [Documentation](#documentation)
|
||||||
|
- [Security \& Privacy](#security--privacy)
|
||||||
|
- [FAQ](#faq)
|
||||||
|
- [Troubleshooting Tips](#troubleshooting-tips)
|
||||||
|
- [Everything else](#everything-else)
|
||||||
|
|
||||||
|
## Quick Start
|
||||||
|
|
||||||
|
> [!WARNING]
|
||||||
|
> ⚠️ **Important:** The docker-compose has recently changed. Carefully read the [Migration guide](https://docs.netalertx.com/MIGRATION/?h=migrat#12-migration-from-netalertx-v25524) for detailed instructions.
|
||||||
|
|
||||||
|
Start NetAlertX in seconds with Docker:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
docker run -d \
|
||||||
|
--network=host \
|
||||||
|
--restart unless-stopped \
|
||||||
|
-v /local_data_dir:/data \
|
||||||
|
-v /etc/localtime:/etc/localtime:ro \
|
||||||
|
--tmpfs /tmp:uid=20211,gid=20211,mode=1700 \
|
||||||
|
-e PORT=20211 \
|
||||||
|
-e APP_CONF_OVERRIDE='{"GRAPHQL_PORT":"20214"}' \
|
||||||
|
ghcr.io/netalertx/netalertx:latest
|
||||||
|
```
|
||||||
|
|
||||||
|
Note: Your `/local_data_dir` should contain a `config` and `db` folder.
|
||||||
|
|
||||||
|
To deploy a containerized instance directly from the source repository, execute the following BASH sequence:
|
||||||
|
```bash
|
||||||
|
git clone https://github.com/netalertx/NetAlertX.git
|
||||||
|
cd NetAlertX
|
||||||
|
docker compose up --force-recreate --build
|
||||||
|
# To customize: edit docker-compose.yaml and run that last command again
|
||||||
|
```
|
||||||
|
|
||||||
|
Need help configuring it? Check the [usage guide](https://docs.netalertx.com/README) or [full documentation](https://docs.netalertx.com/).
|
||||||
|
|
||||||
|
For Home Assistant users: [Click here to add NetAlertX](https://my.home-assistant.io/redirect/supervisor_add_addon_repository/?repository_url=https%3A%2F%2Fgithub.com%2Falexbelgium%2Fhassio-addons)
|
||||||
|
|
||||||
|
For other install methods, check the [installation docs](#documentation)
|
||||||
|
|
||||||
|
---
|
||||||
|
### || [Docker guide](https://docs.netalertx.com/DOCKER_INSTALLATION) || [Releases](https://github.com/netalertx/NetAlertX/releases) || [Docs](https://docs.netalertx.com/) || [Plugins](https://docs.netalertx.com/PLUGINS) || [Website](https://netalertx.com)
|
||||||
|
---
|
||||||
|
|
||||||
|
## Features
|
||||||
|
|
||||||
|
### Discovery & Asset Intelligence
|
||||||
|
|
||||||
|
Continuous monitoring for unauthorized asset discovery, connection state changes, and IP address management (IPAM) drift. Discovery & scan methods include: **arp-scan**, **Pi-hole - DB import**, **Pi-hole - DHCP leases import**, **Generic DHCP leases import**, **UNIFI controller import**, **SNMP-enabled router import**. Check the [Plugins](https://docs.netalertx.com/PLUGINS#readme) docs for a full list of avaliable plugins.
|
||||||
|
|
||||||
### Notification gateways
|
### Notification gateways
|
||||||
|
|
||||||
Send notifications to more than 80+ services, including Telegram via [Apprise](https://hub.docker.com/r/caronc/apprise), or use native [Pushsafer](https://www.pushsafer.com/), [Pushover](https://www.pushover.net/), or [NTFY](https://ntfy.sh/) publishers.
|
Send notifications to more than 80+ services, including Telegram via [Apprise](https://hub.docker.com/r/caronc/apprise), or use native [Pushsafer](https://www.pushsafer.com/), [Pushover](https://www.pushover.net/), or [NTFY](https://ntfy.sh/) publishers.
|
||||||
|
|
||||||
### Integrations and Plugins
|
### Integrations and Plugins
|
||||||
|
|
||||||
Feed your data and device changes into [Home Assistant](https://github.com/jokob-sk/NetAlertX/blob/main/docs/HOME_ASSISTANT.md), read [API endpoints](https://github.com/jokob-sk/NetAlertX/blob/main/docs/API.md), or use [Webhooks](https://github.com/jokob-sk/NetAlertX/blob/main/docs/WEBHOOK_N8N.md) to setup custom automation flows. You can also
|
Feed your data and device changes into [Home Assistant](https://docs.netalertx.com/HOME_ASSISTANT), read [API endpoints](https://docs.netalertx.com/API), or use [Webhooks](https://docs.netalertx.com/WEBHOOK_N8N) to setup custom automation flows. You can also
|
||||||
build your own scanners with the [Plugin system](https://github.com/jokob-sk/NetAlertX/tree/main/docs/PLUGINS.md#readme) in as little as [15 minutes](https://www.youtube.com/watch?v=cdbxlwiWhv8).
|
build your own scanners with the [Plugin system](https://docs.netalertx.com/PLUGINS#readme) in as little as [15 minutes](https://www.youtube.com/watch?v=cdbxlwiWhv8).
|
||||||
|
|
||||||
### Workflows
|
### Workflows
|
||||||
|
|
||||||
The [workflows module](https://github.com/jokob-sk/NetAlertX/blob/main/docs/WORKFLOWS.md) allows to automate repetitive tasks, making network management more efficient. Whether you need to assign newly discovered devices to a specific Network Node, auto-group devices from a given vendor, unarchive a device if detected online, or automatically delete devices, this module provides the flexibility to tailor the automations to your needs.
|
The [workflows module](https://docs.netalertx.com/WORKFLOWS) automates IT governance by enforcing device categorization and cleanup policies. Whether you need to assign newly discovered devices to a specific Network Node, auto-group devices from a given vendor, unarchive a device if detected online, or automatically delete devices, this module provides the flexibility to tailor the automations to your needs.
|
||||||
|
|
||||||
|
|
||||||
## 📚 Documentation
|
## Documentation
|
||||||
<!--- --------------------------------------------------------------------- --->
|
<!--- --------------------------------------------------------------------- --->
|
||||||
|
|
||||||
|
Explore all the [documentation here](https://docs.netalertx.com/) or navigate to a specific installation option below.
|
||||||
|
|
||||||
Supported browsers: Chrome, Firefox
|
Supported browsers: Chrome, Firefox
|
||||||
|
|
||||||
- [[Installation] Docker](https://github.com/jokob-sk/NetAlertX/blob/main/dockerfiles/README.md)
|
- [[Installation] Docker](https://docs.netalertx.com/DOCKER_INSTALLATION)
|
||||||
- [[Installation] Home Assistant](https://github.com/alexbelgium/hassio-addons/tree/master/netalertx)
|
- [[Installation] Home Assistant](https://github.com/alexbelgium/hassio-addons/tree/master/netalertx)
|
||||||
- [[Installation] Bare metal](https://github.com/jokob-sk/NetAlertX/blob/main/docs/HW_INSTALL.md)
|
- [[Installation] Bare metal](https://docs.netalertx.com/HW_INSTALL)
|
||||||
- [[Installation] Unraid App](https://unraid.net/community/apps)
|
- [[Installation] Unraid App](https://unraid.net/community/apps)
|
||||||
- [[Setup] Usage and Configuration](https://github.com/jokob-sk/NetAlertX/blob/main/docs/README.md)
|
- [[Setup] Usage and Configuration](https://docs.netalertx.com/README)
|
||||||
- [[Development] API docs](https://github.com/jokob-sk/NetAlertX/blob/main/docs/API.md)
|
- [[Development] API docs](https://docs.netalertx.com/API)
|
||||||
- [[Development] Custom Plugins](https://github.com/jokob-sk/NetAlertX/blob/main/docs/PLUGINS_DEV.md)
|
- [[Development] Custom Plugins](https://docs.netalertx.com/PLUGINS_DEV)
|
||||||
|
|
||||||
...or explore all the [documentation here](https://jokob-sk.github.io/NetAlertX/).
|
## Security & Privacy
|
||||||
|
|
||||||
## 🔐 Security & Privacy
|
|
||||||
|
|
||||||
NetAlertX scans your local network and can store metadata about connected devices. By default, all data is stored **locally**. No information is sent to external services unless you explicitly configure notifications or integrations.
|
NetAlertX scans your local network and can store metadata about connected devices. By default, all data is stored **locally**. No information is sent to external services unless you explicitly configure notifications or integrations.
|
||||||
|
|
||||||
To further secure your installation:
|
Compliance & Hardening:
|
||||||
- Run it behind a reverse proxy with authentication
|
- Run it behind a reverse proxy with authentication
|
||||||
- Use firewalls to restrict access to the web UI
|
- Use firewalls to restrict access to the web UI
|
||||||
- Regularly update to the latest version for security patches
|
- Regularly update to the latest version for security patches
|
||||||
|
- Role-Based Access Control (RBAC) via Reverse Proxy: Integrate with your existing SSO/Identity provider for secure dashboard access.
|
||||||
|
|
||||||
See [Security Best Practices](https://github.com/jokob-sk/NetAlertX/security) for more details.
|
See [Security Best Practices](https://github.com/netalertx/NetAlertX/security) for more details.
|
||||||
|
|
||||||
|
|
||||||
## ❓ FAQ
|
## FAQ
|
||||||
|
|
||||||
**Q: Why don’t I see any devices?**
|
**Q: How do I monitor VLANs or remote subnets?**
|
||||||
A: Ensure the container has proper network access (e.g., use `--network host` on Linux). Also check that your scan method is properly configured in the UI.
|
A: Ensure the container has proper network access (e.g., use `--network host` on Linux). Also check that your scan method is properly configured in the UI.
|
||||||
|
|
||||||
**Q: Does this work on Wi-Fi-only devices like Raspberry Pi?**
|
**Q: What is the recommended deployment for high-availability?**
|
||||||
A: Yes, but some scanners (e.g. ARP) work best on Ethernet. For Wi-Fi, try SNMP, DHCP, or Pi-hole import.
|
A: We recommend deploying via Docker with persistent volume mounts for database integrity and running behind a reverse proxy for secure access.
|
||||||
|
|
||||||
**Q: Will this send any data to the internet?**
|
**Q: Will this send any data to the internet?**
|
||||||
A: No. All scans and data remain local, unless you set up cloud-based notifications.
|
A: No. All scans and data remain local, unless you set up cloud-based notifications.
|
||||||
|
|
||||||
**Q: Can I use this without Docker?**
|
**Q: Can I use this without Docker?**
|
||||||
A: Yes! You can install it bare-metal. See the [bare metal installation guide](https://github.com/jokob-sk/NetAlertX/blob/main/docs/HW_INSTALL.md).
|
A: You can install the application directly on your own hardware by following the [bare metal installation guide](https://docs.netalertx.com/HW_INSTALL).
|
||||||
|
|
||||||
**Q: Where is the data stored?**
|
**Q: Where is the data stored?**
|
||||||
A: In the `/config` and `/db` folders, mapped in Docker. Back up these folders regularly.
|
A: In the `/data/config` and `/data/db` folders. Back up these folders regularly.
|
||||||
|
|
||||||
|
|
||||||
## 🐞 Known Issues
|
## Troubleshooting Tips
|
||||||
|
|
||||||
- Some scanners (e.g. ARP) may not detect devices on different subnets. See the [Remote networks guide](https://github.com/jokob-sk/NetAlertX/blob/main/docs/REMOTE_NETWORKS.md) for workarounds.
|
- Some scanners (e.g. ARP) may not detect devices on different subnets. See the [Remote networks guide](https://docs.netalertx.com/REMOTE_NETWORKS) for workarounds.
|
||||||
- Wi-Fi-only networks may require alternate scanners for accurate detection.
|
- Wi-Fi-only networks may require alternate scanners for accurate detection.
|
||||||
- Notification throttling may be needed for large networks to prevent spam.
|
- Notification throttling may be needed for large networks to prevent spam.
|
||||||
- On some systems, elevated permissions (like `CAP_NET_RAW`) may be needed for low-level scanning.
|
- On some systems, elevated permissions (like `CAP_NET_RAW`) may be needed for low-level scanning.
|
||||||
|
|
||||||
Check the [GitHub Issues](https://github.com/jokob-sk/NetAlertX/issues) for the latest bug reports and solutions and consult [the official documentation](https://jokob-sk.github.io/NetAlertX/).
|
Check the [GitHub Issues](https://github.com/netalertx/NetAlertX/issues) for the latest bug reports and solutions and consult [the official documentation](https://docs.netalertx.com/).
|
||||||
|
|
||||||
## 📃 Everything else
|
## Everything else
|
||||||
<!--- --------------------------------------------------------------------- --->
|
<!--- --------------------------------------------------------------------- --->
|
||||||
|
|
||||||
|
<a href="https://trendshift.io/repositories/12670" target="_blank"><img src="https://trendshift.io/api/badge/repositories/12670" alt="jokob-sk%2FNetAlertX | Trendshift" style="width: 250px; height: 55px;" width="250" height="55"/></a>
|
||||||
|
|
||||||
### 📧 Get notified what's new
|
### 📧 Get notified what's new
|
||||||
|
|
||||||
Get notified about a new release, what new functionality you can use and about breaking changes.
|
Get notified about a new release, what new functionality you can use and about breaking changes.
|
||||||
|
|
||||||
![Follow and star][follow_star]
|
![Follow and star][follow_star]
|
||||||
|
|
||||||
### 🔀 Other Alternative Apps
|
### 🔀 Other Alternative Apps
|
||||||
|
|
||||||
- [PiAlert by leiweibau](https://github.com/leiweibau/Pi.Alert/) (maintained, bare-metal install)
|
|
||||||
- [WatchYourLAN](https://github.com/aceberg/WatchYourLAN) - Lightweight network IP scanner with web GUI (Open source)
|
|
||||||
- [Fing](https://www.fing.com/) - Network scanner app for your Internet security (Commercial, Phone App, Proprietary hardware)
|
- [Fing](https://www.fing.com/) - Network scanner app for your Internet security (Commercial, Phone App, Proprietary hardware)
|
||||||
- [NetBox](https://netboxlabs.com/) - Network management software (Commercial)
|
- [NetBox](https://netboxlabs.com/) - The gold standard for Network Source of Truth (NSoT) and IPAM.
|
||||||
|
- [Zabbix](https://www.zabbix.com/) or [Nagios](https://www.nagios.org/) - Strong focus on infrastructure monitoring.
|
||||||
|
- [NetAlertX](https://netalertx.com) - The streamlined, discovery-focused choice for real-time asset intelligence and noise-free alerting.
|
||||||
|
|
||||||
### 💙 Donations
|
### 💙 Donations
|
||||||
|
|
||||||
Thank you to everyone who appreciates this tool and donates.
|
Thank you to everyone who appreciates this tool and donates.
|
||||||
|
|
||||||
<details>
|
<details>
|
||||||
<summary>Click for more ways to donate</summary>
|
<summary>Click for more ways to donate</summary>
|
||||||
|
|
||||||
<hr>
|
<hr>
|
||||||
|
|
||||||
| [](https://github.com/sponsors/jokob-sk) | [](https://www.buymeacoffee.com/jokobsk) | [](https://www.patreon.com/user?u=84385063) |
|
| [](https://github.com/sponsors/jokob-sk) | [](https://www.buymeacoffee.com/jokobsk) |
|
||||||
| --- | --- | --- |
|
| --- | --- |
|
||||||
|
|
||||||
- Bitcoin: `1N8tupjeCK12qRVU2XrV17WvKK7LCawyZM`
|
- Bitcoin: `1N8tupjeCK12qRVU2XrV17WvKK7LCawyZM`
|
||||||
- Ethereum: `0x6e2749Cb42F4411bc98501406BdcD82244e3f9C7`
|
- Ethereum: `0x6e2749Cb42F4411bc98501406BdcD82244e3f9C7`
|
||||||
|
|
||||||
@@ -189,11 +192,11 @@ Thank you to everyone who appreciates this tool and donates.
|
|||||||
|
|
||||||
### 🏗 Contributors
|
### 🏗 Contributors
|
||||||
|
|
||||||
This project would be nothing without the amazing work of the community, with special thanks to:
|
This project would be nothing without the amazing work of the community, with special thanks to:
|
||||||
|
|
||||||
> [pucherot/Pi.Alert](https://github.com/pucherot/Pi.Alert) (the original creator of PiAlert), [leiweibau](https://github.com/leiweibau/Pi.Alert): Dark mode (and much more), [Macleykun](https://github.com/Macleykun) (Help with Dockerfile clean-up), [vladaurosh](https://github.com/vladaurosh) for Alpine re-base help, [Final-Hawk](https://github.com/Final-Hawk) (Help with NTFY, styling and other fixes), [TeroRERO](https://github.com/terorero) (Spanish translations), [Data-Monkey](https://github.com/Data-Monkey), (Split-up of the python.py file and more), [cvc90](https://github.com/cvc90) (Spanish translation and various UI work) to name a few. Check out all the [amazing contributors](https://github.com/jokob-sk/NetAlertX/graphs/contributors).
|
> [pucherot/Pi.Alert](https://github.com/pucherot/Pi.Alert) (the original creator of PiAlert), [leiweibau](https://github.com/leiweibau/Pi.Alert): Dark mode (and much more), [Macleykun](https://github.com/Macleykun) (Help with Dockerfile clean-up), [vladaurosh](https://github.com/vladaurosh) for Alpine re-base help, [Final-Hawk](https://github.com/Final-Hawk) (Help with NTFY, styling and other fixes), [TeroRERO](https://github.com/terorero) (Spanish translations), [Data-Monkey](https://github.com/Data-Monkey), (Split-up of the python.py file and more), [cvc90](https://github.com/cvc90) (Spanish translation and various UI work) to name a few. Check out all the [amazing contributors](https://github.com/netalertx/NetAlertX/graphs/contributors).
|
||||||
|
|
||||||
### 🌍 Translations
|
### 🌍 Translations
|
||||||
|
|
||||||
Proudly using [Weblate](https://hosted.weblate.org/projects/pialert/). Help out and suggest languages in the [online portal of Weblate](https://hosted.weblate.org/projects/pialert/core/).
|
Proudly using [Weblate](https://hosted.weblate.org/projects/pialert/). Help out and suggest languages in the [online portal of Weblate](https://hosted.weblate.org/projects/pialert/core/).
|
||||||
|
|
||||||
@@ -204,6 +207,7 @@ Proudly using [Weblate](https://hosted.weblate.org/projects/pialert/). Help out
|
|||||||
### License
|
### License
|
||||||
> GPL 3.0 | [Read more here](LICENSE.txt) | Source of the [animated GIF (Loading Animation)](https://commons.wikimedia.org/wiki/File:Loading_Animation.gif) | Source of the [selfhosted Fonts](https://github.com/adobe-fonts/source-sans)
|
> GPL 3.0 | [Read more here](LICENSE.txt) | Source of the [animated GIF (Loading Animation)](https://commons.wikimedia.org/wiki/File:Loading_Animation.gif) | Source of the [selfhosted Fonts](https://github.com/adobe-fonts/source-sans)
|
||||||
|
|
||||||
|
_All product names, logos, and brands are property of their respective owners. All company, product and service names used in this website are for identification purposes only. Use of these names, logos, and brands does not imply endorsement._
|
||||||
|
|
||||||
<!--- --------------------------------------------------------------------- --->
|
<!--- --------------------------------------------------------------------- --->
|
||||||
[main]: ./docs/img/devices_split.png "Main screen"
|
[main]: ./docs/img/devices_split.png "Main screen"
|
||||||
@@ -217,7 +221,7 @@ Proudly using [Weblate](https://hosted.weblate.org/projects/pialert/). Help out
|
|||||||
[sync_hub]: ./docs/img/sync_hub.png "Screen 8"
|
[sync_hub]: ./docs/img/sync_hub.png "Screen 8"
|
||||||
[notification_center]: ./docs/img/notification_center.png "Screen 8"
|
[notification_center]: ./docs/img/notification_center.png "Screen 8"
|
||||||
[sent_reports_text]: ./docs/img/sent_reports_text.png "Screen 8"
|
[sent_reports_text]: ./docs/img/sent_reports_text.png "Screen 8"
|
||||||
[device_nmap]: ./docs/img/device_nmap.png "Screen 9"
|
[device_nmap]: ./docs/img/device_tools.png "Screen 9"
|
||||||
[report1]: ./docs/img/report_sample.png "Report sample 1"
|
[report1]: ./docs/img/report_sample.png "Report sample 1"
|
||||||
[main_dark]: /docs/img/1_devices_dark.jpg "Main screen dark"
|
[main_dark]: /docs/img/1_devices_dark.jpg "Main screen dark"
|
||||||
[maintain_dark]: /docs/img/5_maintain.jpg "Maintain screen dark"
|
[maintain_dark]: /docs/img/5_maintain.jpg "Maintain screen dark"
|
||||||
|
|||||||
@@ -3,7 +3,7 @@
|
|||||||
# Generated: 2022-12-30_22-19-40 #
|
# Generated: 2022-12-30_22-19-40 #
|
||||||
# #
|
# #
|
||||||
# Config file for the LAN intruder detection app: #
|
# Config file for the LAN intruder detection app: #
|
||||||
# https://github.com/jokob-sk/NetAlertX #
|
# https://github.com/netalertx/NetAlertX #
|
||||||
# #
|
# #
|
||||||
#-----------------AUTOGENERATED FILE-----------------#
|
#-----------------AUTOGENERATED FILE-----------------#
|
||||||
|
|
||||||
@@ -16,7 +16,7 @@
|
|||||||
#
|
#
|
||||||
# Scan multiple interfaces (eth1 and eth0):
|
# Scan multiple interfaces (eth1 and eth0):
|
||||||
# SCAN_SUBNETS = [ '192.168.1.0/24 --interface=eth1', '192.168.1.0/24 --interface=eth0' ]
|
# SCAN_SUBNETS = [ '192.168.1.0/24 --interface=eth1', '192.168.1.0/24 --interface=eth0' ]
|
||||||
|
BACKEND_API_URL='/server'
|
||||||
DISCOVER_PLUGINS=True
|
DISCOVER_PLUGINS=True
|
||||||
SCAN_SUBNETS=['--localnet']
|
SCAN_SUBNETS=['--localnet']
|
||||||
TIMEZONE='Europe/Berlin'
|
TIMEZONE='Europe/Berlin'
|
||||||
@@ -33,7 +33,7 @@ NSLOOKUP_RUN='before_name_updates'
|
|||||||
AVAHISCAN_RUN='before_name_updates'
|
AVAHISCAN_RUN='before_name_updates'
|
||||||
NBTSCAN_RUN='before_name_updates'
|
NBTSCAN_RUN='before_name_updates'
|
||||||
|
|
||||||
# Email
|
# Email
|
||||||
#-------------------------------------
|
#-------------------------------------
|
||||||
# (add SMTP to LOADED_PLUGINS to load)
|
# (add SMTP to LOADED_PLUGINS to load)
|
||||||
#-------------------------------------
|
#-------------------------------------
|
||||||
@@ -48,20 +48,19 @@ SMTP_PASS='password'
|
|||||||
SMTP_SKIP_TLS=False
|
SMTP_SKIP_TLS=False
|
||||||
|
|
||||||
|
|
||||||
# Webhook
|
# Webhook
|
||||||
#-------------------------------------
|
#-------------------------------------
|
||||||
# (add WEBHOOK to LOADED_PLUGINS to load)
|
# (add WEBHOOK to LOADED_PLUGINS to load)
|
||||||
#-------------------------------------
|
#-------------------------------------
|
||||||
WEBHOOK_RUN='disabled' # use 'on_notification' to enable
|
WEBHOOK_RUN='disabled' # use 'on_notification' to enable
|
||||||
WEBHOOK_URL='http://n8n.local:5555/webhook-test/aaaaaaaa-aaaa-aaaa-aaaaa-aaaaaaaaaaaa'
|
WEBHOOK_URL='http://n8n.local:5555/webhook-test/aaaaaaaa-aaaa-aaaa-aaaaa-aaaaaaaaaaaa'
|
||||||
WEBHOOK_PAYLOAD='json' # webhook payload data format for the "body > attachements > text" attribute
|
WEBHOOK_PAYLOAD='json' # webhook payload data format for the "body > attachements > text" attribute
|
||||||
# in https://github.com/jokob-sk/NetAlertX/blob/main/docs/webhook_json_sample.json
|
|
||||||
# supported values: 'json', 'html' or 'text'
|
# supported values: 'json', 'html' or 'text'
|
||||||
# e.g.: for discord use 'html'
|
# e.g.: for discord use 'html'
|
||||||
WEBHOOK_REQUEST_METHOD='GET'
|
WEBHOOK_REQUEST_METHOD='GET'
|
||||||
|
|
||||||
|
|
||||||
# Apprise
|
# Apprise
|
||||||
#-------------------------------------
|
#-------------------------------------
|
||||||
# (add APPRISE to LOADED_PLUGINS to load)
|
# (add APPRISE to LOADED_PLUGINS to load)
|
||||||
#-------------------------------------
|
#-------------------------------------
|
||||||
@@ -71,7 +70,7 @@ APPRISE_URL='mailto://smtp-relay.sendinblue.com:587?from=user@gmail.com&name=app
|
|||||||
|
|
||||||
|
|
||||||
# NTFY
|
# NTFY
|
||||||
#-------------------------------------
|
#-------------------------------------
|
||||||
# (add NTFY to LOADED_PLUGINS to load)
|
# (add NTFY to LOADED_PLUGINS to load)
|
||||||
#-------------------------------------
|
#-------------------------------------
|
||||||
NTFY_RUN='disabled' # use 'on_notification' to enable
|
NTFY_RUN='disabled' # use 'on_notification' to enable
|
||||||
@@ -81,7 +80,7 @@ NTFY_USER='user'
|
|||||||
NTFY_PASSWORD='passw0rd'
|
NTFY_PASSWORD='passw0rd'
|
||||||
|
|
||||||
|
|
||||||
# PUSHSAFER
|
# PUSHSAFER
|
||||||
#-------------------------------------
|
#-------------------------------------
|
||||||
# (add PUSHSAFER to LOADED_PLUGINS to load)
|
# (add PUSHSAFER to LOADED_PLUGINS to load)
|
||||||
#-------------------------------------
|
#-------------------------------------
|
||||||
@@ -89,7 +88,7 @@ PUSHSAFER_RUN='disabled' # use 'on_notification' to enable
|
|||||||
PUSHSAFER_TOKEN='ApiKey'
|
PUSHSAFER_TOKEN='ApiKey'
|
||||||
|
|
||||||
|
|
||||||
# MQTT
|
# MQTT
|
||||||
#-------------------------------------
|
#-------------------------------------
|
||||||
# (add MQTT to LOADED_PLUGINS to load)
|
# (add MQTT to LOADED_PLUGINS to load)
|
||||||
#-------------------------------------
|
#-------------------------------------
|
||||||
@@ -101,6 +100,8 @@ MQTT_PASSWORD='passw0rd'
|
|||||||
MQTT_QOS=0
|
MQTT_QOS=0
|
||||||
MQTT_DELAY_SEC=2
|
MQTT_DELAY_SEC=2
|
||||||
|
|
||||||
|
GRAPHQL_PORT=20212
|
||||||
|
|
||||||
|
|
||||||
#-------------------IMPORTANT INFO-------------------#
|
#-------------------IMPORTANT INFO-------------------#
|
||||||
# This file is ingested by a python script, so if #
|
# This file is ingested by a python script, so if #
|
||||||
|
|||||||
411
back/app.sql
411
back/app.sql
@@ -1,411 +0,0 @@
|
|||||||
CREATE TABLE sqlite_stat1(tbl,idx,stat);
|
|
||||||
CREATE TABLE Events (eve_MAC STRING (50) NOT NULL COLLATE NOCASE, eve_IP STRING (50) NOT NULL COLLATE NOCASE, eve_DateTime DATETIME NOT NULL, eve_EventType STRING (30) NOT NULL COLLATE NOCASE, eve_AdditionalInfo STRING (250) DEFAULT (''), eve_PendingAlertEmail BOOLEAN NOT NULL CHECK (eve_PendingAlertEmail IN (0, 1)) DEFAULT (1), eve_PairEventRowid INTEGER);
|
|
||||||
CREATE TABLE Sessions (ses_MAC STRING (50) COLLATE NOCASE, ses_IP STRING (50) COLLATE NOCASE, ses_EventTypeConnection STRING (30) COLLATE NOCASE, ses_DateTimeConnection DATETIME, ses_EventTypeDisconnection STRING (30) COLLATE NOCASE, ses_DateTimeDisconnection DATETIME, ses_StillConnected BOOLEAN, ses_AdditionalInfo STRING (250));
|
|
||||||
CREATE TABLE IF NOT EXISTS "Online_History" (
|
|
||||||
"Index" INTEGER,
|
|
||||||
"Scan_Date" TEXT,
|
|
||||||
"Online_Devices" INTEGER,
|
|
||||||
"Down_Devices" INTEGER,
|
|
||||||
"All_Devices" INTEGER,
|
|
||||||
"Archived_Devices" INTEGER,
|
|
||||||
"Offline_Devices" INTEGER,
|
|
||||||
PRIMARY KEY("Index" AUTOINCREMENT)
|
|
||||||
);
|
|
||||||
CREATE TABLE sqlite_sequence(name,seq);
|
|
||||||
CREATE TABLE Devices (
|
|
||||||
devMac STRING (50) PRIMARY KEY NOT NULL COLLATE NOCASE,
|
|
||||||
devName STRING (50) NOT NULL DEFAULT "(unknown)",
|
|
||||||
devOwner STRING (30) DEFAULT "(unknown)" NOT NULL,
|
|
||||||
devType STRING (30),
|
|
||||||
devVendor STRING (250),
|
|
||||||
devFavorite BOOLEAN CHECK (devFavorite IN (0, 1)) DEFAULT (0) NOT NULL,
|
|
||||||
devGroup STRING (10),
|
|
||||||
devComments TEXT,
|
|
||||||
devFirstConnection DATETIME NOT NULL,
|
|
||||||
devLastConnection DATETIME NOT NULL,
|
|
||||||
devLastIP STRING (50) NOT NULL COLLATE NOCASE,
|
|
||||||
devStaticIP BOOLEAN DEFAULT (0) NOT NULL CHECK (devStaticIP IN (0, 1)),
|
|
||||||
devScan INTEGER DEFAULT (1) NOT NULL,
|
|
||||||
devLogEvents BOOLEAN NOT NULL DEFAULT (1) CHECK (devLogEvents IN (0, 1)),
|
|
||||||
devAlertEvents BOOLEAN NOT NULL DEFAULT (1) CHECK (devAlertEvents IN (0, 1)),
|
|
||||||
devAlertDown BOOLEAN NOT NULL DEFAULT (0) CHECK (devAlertDown IN (0, 1)),
|
|
||||||
devSkipRepeated INTEGER DEFAULT 0 NOT NULL,
|
|
||||||
devLastNotification DATETIME,
|
|
||||||
devPresentLastScan BOOLEAN NOT NULL DEFAULT (0) CHECK (devPresentLastScan IN (0, 1)),
|
|
||||||
devIsNew BOOLEAN NOT NULL DEFAULT (1) CHECK (devIsNew IN (0, 1)),
|
|
||||||
devLocation STRING (250) COLLATE NOCASE,
|
|
||||||
devIsArchived BOOLEAN NOT NULL DEFAULT (0) CHECK (devIsArchived IN (0, 1)),
|
|
||||||
devParentMAC TEXT,
|
|
||||||
devParentPort INTEGER,
|
|
||||||
devIcon TEXT,
|
|
||||||
devGUID TEXT,
|
|
||||||
devSite TEXT,
|
|
||||||
devSSID TEXT,
|
|
||||||
devSyncHubNode TEXT,
|
|
||||||
devSourcePlugin TEXT
|
|
||||||
, "devCustomProps" TEXT);
|
|
||||||
CREATE TABLE IF NOT EXISTS "Settings" (
|
|
||||||
"setKey" TEXT,
|
|
||||||
"setName" TEXT,
|
|
||||||
"setDescription" TEXT,
|
|
||||||
"setType" TEXT,
|
|
||||||
"setOptions" TEXT,
|
|
||||||
"setGroup" TEXT,
|
|
||||||
"setValue" TEXT,
|
|
||||||
"setEvents" TEXT,
|
|
||||||
"setOverriddenByEnv" INTEGER
|
|
||||||
);
|
|
||||||
CREATE TABLE IF NOT EXISTS "Parameters" (
|
|
||||||
"par_ID" TEXT PRIMARY KEY,
|
|
||||||
"par_Value" TEXT
|
|
||||||
);
|
|
||||||
CREATE TABLE Plugins_Objects(
|
|
||||||
"Index" INTEGER,
|
|
||||||
Plugin TEXT NOT NULL,
|
|
||||||
Object_PrimaryID TEXT NOT NULL,
|
|
||||||
Object_SecondaryID TEXT NOT NULL,
|
|
||||||
DateTimeCreated TEXT NOT NULL,
|
|
||||||
DateTimeChanged TEXT NOT NULL,
|
|
||||||
Watched_Value1 TEXT NOT NULL,
|
|
||||||
Watched_Value2 TEXT NOT NULL,
|
|
||||||
Watched_Value3 TEXT NOT NULL,
|
|
||||||
Watched_Value4 TEXT NOT NULL,
|
|
||||||
Status TEXT NOT NULL,
|
|
||||||
Extra TEXT NOT NULL,
|
|
||||||
UserData TEXT NOT NULL,
|
|
||||||
ForeignKey TEXT NOT NULL,
|
|
||||||
SyncHubNodeName TEXT,
|
|
||||||
"HelpVal1" TEXT,
|
|
||||||
"HelpVal2" TEXT,
|
|
||||||
"HelpVal3" TEXT,
|
|
||||||
"HelpVal4" TEXT,
|
|
||||||
ObjectGUID TEXT,
|
|
||||||
PRIMARY KEY("Index" AUTOINCREMENT)
|
|
||||||
);
|
|
||||||
CREATE TABLE Plugins_Events(
|
|
||||||
"Index" INTEGER,
|
|
||||||
Plugin TEXT NOT NULL,
|
|
||||||
Object_PrimaryID TEXT NOT NULL,
|
|
||||||
Object_SecondaryID TEXT NOT NULL,
|
|
||||||
DateTimeCreated TEXT NOT NULL,
|
|
||||||
DateTimeChanged TEXT NOT NULL,
|
|
||||||
Watched_Value1 TEXT NOT NULL,
|
|
||||||
Watched_Value2 TEXT NOT NULL,
|
|
||||||
Watched_Value3 TEXT NOT NULL,
|
|
||||||
Watched_Value4 TEXT NOT NULL,
|
|
||||||
Status TEXT NOT NULL,
|
|
||||||
Extra TEXT NOT NULL,
|
|
||||||
UserData TEXT NOT NULL,
|
|
||||||
ForeignKey TEXT NOT NULL,
|
|
||||||
SyncHubNodeName TEXT,
|
|
||||||
"HelpVal1" TEXT,
|
|
||||||
"HelpVal2" TEXT,
|
|
||||||
"HelpVal3" TEXT,
|
|
||||||
"HelpVal4" TEXT, "ObjectGUID" TEXT,
|
|
||||||
PRIMARY KEY("Index" AUTOINCREMENT)
|
|
||||||
);
|
|
||||||
CREATE TABLE Plugins_History(
|
|
||||||
"Index" INTEGER,
|
|
||||||
Plugin TEXT NOT NULL,
|
|
||||||
Object_PrimaryID TEXT NOT NULL,
|
|
||||||
Object_SecondaryID TEXT NOT NULL,
|
|
||||||
DateTimeCreated TEXT NOT NULL,
|
|
||||||
DateTimeChanged TEXT NOT NULL,
|
|
||||||
Watched_Value1 TEXT NOT NULL,
|
|
||||||
Watched_Value2 TEXT NOT NULL,
|
|
||||||
Watched_Value3 TEXT NOT NULL,
|
|
||||||
Watched_Value4 TEXT NOT NULL,
|
|
||||||
Status TEXT NOT NULL,
|
|
||||||
Extra TEXT NOT NULL,
|
|
||||||
UserData TEXT NOT NULL,
|
|
||||||
ForeignKey TEXT NOT NULL,
|
|
||||||
SyncHubNodeName TEXT,
|
|
||||||
"HelpVal1" TEXT,
|
|
||||||
"HelpVal2" TEXT,
|
|
||||||
"HelpVal3" TEXT,
|
|
||||||
"HelpVal4" TEXT, "ObjectGUID" TEXT,
|
|
||||||
PRIMARY KEY("Index" AUTOINCREMENT)
|
|
||||||
);
|
|
||||||
CREATE TABLE Plugins_Language_Strings(
|
|
||||||
"Index" INTEGER,
|
|
||||||
Language_Code TEXT NOT NULL,
|
|
||||||
String_Key TEXT NOT NULL,
|
|
||||||
String_Value TEXT NOT NULL,
|
|
||||||
Extra TEXT NOT NULL,
|
|
||||||
PRIMARY KEY("Index" AUTOINCREMENT)
|
|
||||||
);
|
|
||||||
CREATE TABLE CurrentScan (
|
|
||||||
cur_MAC STRING(50) NOT NULL COLLATE NOCASE,
|
|
||||||
cur_IP STRING(50) NOT NULL COLLATE NOCASE,
|
|
||||||
cur_Vendor STRING(250),
|
|
||||||
cur_ScanMethod STRING(10),
|
|
||||||
cur_Name STRING(250),
|
|
||||||
cur_LastQuery STRING(250),
|
|
||||||
cur_DateTime STRING(250),
|
|
||||||
cur_SyncHubNodeName STRING(50),
|
|
||||||
cur_NetworkSite STRING(250),
|
|
||||||
cur_SSID STRING(250),
|
|
||||||
cur_NetworkNodeMAC STRING(250),
|
|
||||||
cur_PORT STRING(250),
|
|
||||||
cur_Type STRING(250),
|
|
||||||
UNIQUE(cur_MAC)
|
|
||||||
);
|
|
||||||
CREATE TABLE IF NOT EXISTS "AppEvents" (
|
|
||||||
"Index" INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
||||||
"GUID" TEXT UNIQUE,
|
|
||||||
"AppEventProcessed" BOOLEAN,
|
|
||||||
"DateTimeCreated" TEXT,
|
|
||||||
"ObjectType" TEXT,
|
|
||||||
"ObjectGUID" TEXT,
|
|
||||||
"ObjectPlugin" TEXT,
|
|
||||||
"ObjectPrimaryID" TEXT,
|
|
||||||
"ObjectSecondaryID" TEXT,
|
|
||||||
"ObjectForeignKey" TEXT,
|
|
||||||
"ObjectIndex" TEXT,
|
|
||||||
"ObjectIsNew" BOOLEAN,
|
|
||||||
"ObjectIsArchived" BOOLEAN,
|
|
||||||
"ObjectStatusColumn" TEXT,
|
|
||||||
"ObjectStatus" TEXT,
|
|
||||||
"AppEventType" TEXT,
|
|
||||||
"Helper1" TEXT,
|
|
||||||
"Helper2" TEXT,
|
|
||||||
"Helper3" TEXT,
|
|
||||||
"Extra" TEXT
|
|
||||||
);
|
|
||||||
CREATE TABLE IF NOT EXISTS "Notifications" (
|
|
||||||
"Index" INTEGER,
|
|
||||||
"GUID" TEXT UNIQUE,
|
|
||||||
"DateTimeCreated" TEXT,
|
|
||||||
"DateTimePushed" TEXT,
|
|
||||||
"Status" TEXT,
|
|
||||||
"JSON" TEXT,
|
|
||||||
"Text" TEXT,
|
|
||||||
"HTML" TEXT,
|
|
||||||
"PublishedVia" TEXT,
|
|
||||||
"Extra" TEXT,
|
|
||||||
PRIMARY KEY("Index" AUTOINCREMENT)
|
|
||||||
);
|
|
||||||
CREATE INDEX IDX_eve_DateTime ON Events (eve_DateTime);
|
|
||||||
CREATE INDEX IDX_eve_EventType ON Events (eve_EventType COLLATE NOCASE);
|
|
||||||
CREATE INDEX IDX_eve_MAC ON Events (eve_MAC COLLATE NOCASE);
|
|
||||||
CREATE INDEX IDX_eve_PairEventRowid ON Events (eve_PairEventRowid);
|
|
||||||
CREATE INDEX IDX_ses_EventTypeDisconnection ON Sessions (ses_EventTypeDisconnection COLLATE NOCASE);
|
|
||||||
CREATE INDEX IDX_ses_EventTypeConnection ON Sessions (ses_EventTypeConnection COLLATE NOCASE);
|
|
||||||
CREATE INDEX IDX_ses_DateTimeDisconnection ON Sessions (ses_DateTimeDisconnection);
|
|
||||||
CREATE INDEX IDX_ses_MAC ON Sessions (ses_MAC COLLATE NOCASE);
|
|
||||||
CREATE INDEX IDX_ses_DateTimeConnection ON Sessions (ses_DateTimeConnection);
|
|
||||||
CREATE INDEX IDX_dev_PresentLastScan ON Devices (devPresentLastScan);
|
|
||||||
CREATE INDEX IDX_dev_FirstConnection ON Devices (devFirstConnection);
|
|
||||||
CREATE INDEX IDX_dev_AlertDeviceDown ON Devices (devAlertDown);
|
|
||||||
CREATE INDEX IDX_dev_StaticIP ON Devices (devStaticIP);
|
|
||||||
CREATE INDEX IDX_dev_ScanCycle ON Devices (devScan);
|
|
||||||
CREATE INDEX IDX_dev_Favorite ON Devices (devFavorite);
|
|
||||||
CREATE INDEX IDX_dev_LastIP ON Devices (devLastIP);
|
|
||||||
CREATE INDEX IDX_dev_NewDevice ON Devices (devIsNew);
|
|
||||||
CREATE INDEX IDX_dev_Archived ON Devices (devIsArchived);
|
|
||||||
CREATE VIEW Events_Devices AS
|
|
||||||
SELECT *
|
|
||||||
FROM Events
|
|
||||||
LEFT JOIN Devices ON eve_MAC = devMac
|
|
||||||
/* Events_Devices(eve_MAC,eve_IP,eve_DateTime,eve_EventType,eve_AdditionalInfo,eve_PendingAlertEmail,eve_PairEventRowid,devMac,devName,devOwner,devType,devVendor,devFavorite,devGroup,devComments,devFirstConnection,devLastConnection,devLastIP,devStaticIP,devScan,devLogEvents,devAlertEvents,devAlertDown,devSkipRepeated,devLastNotification,devPresentLastScan,devIsNew,devLocation,devIsArchived,devParentMAC,devParentPort,devIcon,devGUID,devSite,devSSID,devSyncHubNode,devSourcePlugin,devCustomProps) */;
|
|
||||||
CREATE VIEW LatestEventsPerMAC AS
|
|
||||||
WITH RankedEvents AS (
|
|
||||||
SELECT
|
|
||||||
e.*,
|
|
||||||
ROW_NUMBER() OVER (PARTITION BY e.eve_MAC ORDER BY e.eve_DateTime DESC) AS row_num
|
|
||||||
FROM Events AS e
|
|
||||||
)
|
|
||||||
SELECT
|
|
||||||
e.*,
|
|
||||||
d.*,
|
|
||||||
c.*
|
|
||||||
FROM RankedEvents AS e
|
|
||||||
LEFT JOIN Devices AS d ON e.eve_MAC = d.devMac
|
|
||||||
INNER JOIN CurrentScan AS c ON e.eve_MAC = c.cur_MAC
|
|
||||||
WHERE e.row_num = 1
|
|
||||||
/* LatestEventsPerMAC(eve_MAC,eve_IP,eve_DateTime,eve_EventType,eve_AdditionalInfo,eve_PendingAlertEmail,eve_PairEventRowid,row_num,devMac,devName,devOwner,devType,devVendor,devFavorite,devGroup,devComments,devFirstConnection,devLastConnection,devLastIP,devStaticIP,devScan,devLogEvents,devAlertEvents,devAlertDown,devSkipRepeated,devLastNotification,devPresentLastScan,devIsNew,devLocation,devIsArchived,devParentMAC,devParentPort,devIcon,devGUID,devSite,devSSID,devSyncHubNode,devSourcePlugin,devCustomProps,cur_MAC,cur_IP,cur_Vendor,cur_ScanMethod,cur_Name,cur_LastQuery,cur_DateTime,cur_SyncHubNodeName,cur_NetworkSite,cur_SSID,cur_NetworkNodeMAC,cur_PORT,cur_Type) */;
|
|
||||||
CREATE VIEW Sessions_Devices AS SELECT * FROM Sessions LEFT JOIN "Devices" ON ses_MAC = devMac
|
|
||||||
/* Sessions_Devices(ses_MAC,ses_IP,ses_EventTypeConnection,ses_DateTimeConnection,ses_EventTypeDisconnection,ses_DateTimeDisconnection,ses_StillConnected,ses_AdditionalInfo,devMac,devName,devOwner,devType,devVendor,devFavorite,devGroup,devComments,devFirstConnection,devLastConnection,devLastIP,devStaticIP,devScan,devLogEvents,devAlertEvents,devAlertDown,devSkipRepeated,devLastNotification,devPresentLastScan,devIsNew,devLocation,devIsArchived,devParentMAC,devParentPort,devIcon,devGUID,devSite,devSSID,devSyncHubNode,devSourcePlugin,devCustomProps) */;
|
|
||||||
CREATE VIEW Convert_Events_to_Sessions AS SELECT EVE1.eve_MAC,
|
|
||||||
EVE1.eve_IP,
|
|
||||||
EVE1.eve_EventType AS eve_EventTypeConnection,
|
|
||||||
EVE1.eve_DateTime AS eve_DateTimeConnection,
|
|
||||||
CASE WHEN EVE2.eve_EventType IN ('Disconnected', 'Device Down') OR
|
|
||||||
EVE2.eve_EventType IS NULL THEN EVE2.eve_EventType ELSE '<missing event>' END AS eve_EventTypeDisconnection,
|
|
||||||
CASE WHEN EVE2.eve_EventType IN ('Disconnected', 'Device Down') THEN EVE2.eve_DateTime ELSE NULL END AS eve_DateTimeDisconnection,
|
|
||||||
CASE WHEN EVE2.eve_EventType IS NULL THEN 1 ELSE 0 END AS eve_StillConnected,
|
|
||||||
EVE1.eve_AdditionalInfo
|
|
||||||
FROM Events AS EVE1
|
|
||||||
LEFT JOIN
|
|
||||||
Events AS EVE2 ON EVE1.eve_PairEventRowID = EVE2.RowID
|
|
||||||
WHERE EVE1.eve_EventType IN ('New Device', 'Connected','Down Reconnected')
|
|
||||||
UNION
|
|
||||||
SELECT eve_MAC,
|
|
||||||
eve_IP,
|
|
||||||
'<missing event>' AS eve_EventTypeConnection,
|
|
||||||
NULL AS eve_DateTimeConnection,
|
|
||||||
eve_EventType AS eve_EventTypeDisconnection,
|
|
||||||
eve_DateTime AS eve_DateTimeDisconnection,
|
|
||||||
0 AS eve_StillConnected,
|
|
||||||
eve_AdditionalInfo
|
|
||||||
FROM Events AS EVE1
|
|
||||||
WHERE (eve_EventType = 'Device Down' OR
|
|
||||||
eve_EventType = 'Disconnected') AND
|
|
||||||
EVE1.eve_PairEventRowID IS NULL
|
|
||||||
/* Convert_Events_to_Sessions(eve_MAC,eve_IP,eve_EventTypeConnection,eve_DateTimeConnection,eve_EventTypeDisconnection,eve_DateTimeDisconnection,eve_StillConnected,eve_AdditionalInfo) */;
|
|
||||||
CREATE TRIGGER "trg_insert_devices"
|
|
||||||
AFTER INSERT ON "Devices"
|
|
||||||
WHEN NOT EXISTS (
|
|
||||||
SELECT 1 FROM AppEvents
|
|
||||||
WHERE AppEventProcessed = 0
|
|
||||||
AND ObjectType = 'Devices'
|
|
||||||
AND ObjectGUID = NEW.devGUID
|
|
||||||
AND ObjectStatus = CASE WHEN NEW.devPresentLastScan = 1 THEN 'online' ELSE 'offline' END
|
|
||||||
AND AppEventType = 'insert'
|
|
||||||
)
|
|
||||||
BEGIN
|
|
||||||
INSERT INTO "AppEvents" (
|
|
||||||
"GUID",
|
|
||||||
"DateTimeCreated",
|
|
||||||
"AppEventProcessed",
|
|
||||||
"ObjectType",
|
|
||||||
"ObjectGUID",
|
|
||||||
"ObjectPrimaryID",
|
|
||||||
"ObjectSecondaryID",
|
|
||||||
"ObjectStatus",
|
|
||||||
"ObjectStatusColumn",
|
|
||||||
"ObjectIsNew",
|
|
||||||
"ObjectIsArchived",
|
|
||||||
"ObjectForeignKey",
|
|
||||||
"ObjectPlugin",
|
|
||||||
"AppEventType"
|
|
||||||
)
|
|
||||||
VALUES (
|
|
||||||
|
|
||||||
lower(
|
|
||||||
hex(randomblob(4)) || '-' || hex(randomblob(2)) || '-' || '4' ||
|
|
||||||
substr(hex( randomblob(2)), 2) || '-' ||
|
|
||||||
substr('AB89', 1 + (abs(random()) % 4) , 1) ||
|
|
||||||
substr(hex(randomblob(2)), 2) || '-' ||
|
|
||||||
hex(randomblob(6))
|
|
||||||
)
|
|
||||||
,
|
|
||||||
DATETIME('now'),
|
|
||||||
FALSE,
|
|
||||||
'Devices',
|
|
||||||
NEW.devGUID, -- ObjectGUID
|
|
||||||
NEW.devMac, -- ObjectPrimaryID
|
|
||||||
NEW.devLastIP, -- ObjectSecondaryID
|
|
||||||
CASE WHEN NEW.devPresentLastScan = 1 THEN 'online' ELSE 'offline' END, -- ObjectStatus
|
|
||||||
'devPresentLastScan', -- ObjectStatusColumn
|
|
||||||
NEW.devIsNew, -- ObjectIsNew
|
|
||||||
NEW.devIsArchived, -- ObjectIsArchived
|
|
||||||
NEW.devGUID, -- ObjectForeignKey
|
|
||||||
'DEVICES', -- ObjectForeignKey
|
|
||||||
'insert'
|
|
||||||
);
|
|
||||||
END;
|
|
||||||
CREATE TRIGGER "trg_update_devices"
|
|
||||||
AFTER UPDATE ON "Devices"
|
|
||||||
WHEN NOT EXISTS (
|
|
||||||
SELECT 1 FROM AppEvents
|
|
||||||
WHERE AppEventProcessed = 0
|
|
||||||
AND ObjectType = 'Devices'
|
|
||||||
AND ObjectGUID = NEW.devGUID
|
|
||||||
AND ObjectStatus = CASE WHEN NEW.devPresentLastScan = 1 THEN 'online' ELSE 'offline' END
|
|
||||||
AND AppEventType = 'update'
|
|
||||||
)
|
|
||||||
BEGIN
|
|
||||||
INSERT INTO "AppEvents" (
|
|
||||||
"GUID",
|
|
||||||
"DateTimeCreated",
|
|
||||||
"AppEventProcessed",
|
|
||||||
"ObjectType",
|
|
||||||
"ObjectGUID",
|
|
||||||
"ObjectPrimaryID",
|
|
||||||
"ObjectSecondaryID",
|
|
||||||
"ObjectStatus",
|
|
||||||
"ObjectStatusColumn",
|
|
||||||
"ObjectIsNew",
|
|
||||||
"ObjectIsArchived",
|
|
||||||
"ObjectForeignKey",
|
|
||||||
"ObjectPlugin",
|
|
||||||
"AppEventType"
|
|
||||||
)
|
|
||||||
VALUES (
|
|
||||||
|
|
||||||
lower(
|
|
||||||
hex(randomblob(4)) || '-' || hex(randomblob(2)) || '-' || '4' ||
|
|
||||||
substr(hex( randomblob(2)), 2) || '-' ||
|
|
||||||
substr('AB89', 1 + (abs(random()) % 4) , 1) ||
|
|
||||||
substr(hex(randomblob(2)), 2) || '-' ||
|
|
||||||
hex(randomblob(6))
|
|
||||||
)
|
|
||||||
,
|
|
||||||
DATETIME('now'),
|
|
||||||
FALSE,
|
|
||||||
'Devices',
|
|
||||||
NEW.devGUID, -- ObjectGUID
|
|
||||||
NEW.devMac, -- ObjectPrimaryID
|
|
||||||
NEW.devLastIP, -- ObjectSecondaryID
|
|
||||||
CASE WHEN NEW.devPresentLastScan = 1 THEN 'online' ELSE 'offline' END, -- ObjectStatus
|
|
||||||
'devPresentLastScan', -- ObjectStatusColumn
|
|
||||||
NEW.devIsNew, -- ObjectIsNew
|
|
||||||
NEW.devIsArchived, -- ObjectIsArchived
|
|
||||||
NEW.devGUID, -- ObjectForeignKey
|
|
||||||
'DEVICES', -- ObjectForeignKey
|
|
||||||
'update'
|
|
||||||
);
|
|
||||||
END;
|
|
||||||
CREATE TRIGGER "trg_delete_devices"
|
|
||||||
AFTER DELETE ON "Devices"
|
|
||||||
WHEN NOT EXISTS (
|
|
||||||
SELECT 1 FROM AppEvents
|
|
||||||
WHERE AppEventProcessed = 0
|
|
||||||
AND ObjectType = 'Devices'
|
|
||||||
AND ObjectGUID = OLD.devGUID
|
|
||||||
AND ObjectStatus = CASE WHEN OLD.devPresentLastScan = 1 THEN 'online' ELSE 'offline' END
|
|
||||||
AND AppEventType = 'delete'
|
|
||||||
)
|
|
||||||
BEGIN
|
|
||||||
INSERT INTO "AppEvents" (
|
|
||||||
"GUID",
|
|
||||||
"DateTimeCreated",
|
|
||||||
"AppEventProcessed",
|
|
||||||
"ObjectType",
|
|
||||||
"ObjectGUID",
|
|
||||||
"ObjectPrimaryID",
|
|
||||||
"ObjectSecondaryID",
|
|
||||||
"ObjectStatus",
|
|
||||||
"ObjectStatusColumn",
|
|
||||||
"ObjectIsNew",
|
|
||||||
"ObjectIsArchived",
|
|
||||||
"ObjectForeignKey",
|
|
||||||
"ObjectPlugin",
|
|
||||||
"AppEventType"
|
|
||||||
)
|
|
||||||
VALUES (
|
|
||||||
|
|
||||||
lower(
|
|
||||||
hex(randomblob(4)) || '-' || hex(randomblob(2)) || '-' || '4' ||
|
|
||||||
substr(hex( randomblob(2)), 2) || '-' ||
|
|
||||||
substr('AB89', 1 + (abs(random()) % 4) , 1) ||
|
|
||||||
substr(hex(randomblob(2)), 2) || '-' ||
|
|
||||||
hex(randomblob(6))
|
|
||||||
)
|
|
||||||
,
|
|
||||||
DATETIME('now'),
|
|
||||||
FALSE,
|
|
||||||
'Devices',
|
|
||||||
OLD.devGUID, -- ObjectGUID
|
|
||||||
OLD.devMac, -- ObjectPrimaryID
|
|
||||||
OLD.devLastIP, -- ObjectSecondaryID
|
|
||||||
CASE WHEN OLD.devPresentLastScan = 1 THEN 'online' ELSE 'offline' END, -- ObjectStatus
|
|
||||||
'devPresentLastScan', -- ObjectStatusColumn
|
|
||||||
OLD.devIsNew, -- ObjectIsNew
|
|
||||||
OLD.devIsArchived, -- ObjectIsArchived
|
|
||||||
OLD.devGUID, -- ObjectForeignKey
|
|
||||||
'DEVICES', -- ObjectForeignKey
|
|
||||||
'delete'
|
|
||||||
);
|
|
||||||
END;
|
|
||||||
@@ -1,14 +1,17 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
export INSTALL_DIR=/app
|
export INSTALL_DIR=/app
|
||||||
|
|
||||||
LOG_FILE="${INSTALL_DIR}/log/execution_queue.log"
|
if [ -f "${LOG_EXECUTION_QUEUE}" ] && grep -q "cron_restart_backend" "${LOG_EXECUTION_QUEUE}"; then
|
||||||
|
echo "$(date): Restarting backend triggered by cron_restart_backend"
|
||||||
# Check if there are any entries with cron_restart_backend
|
killall python3 || echo "killall python3 failed or no process found"
|
||||||
if grep -q "cron_restart_backend" "$LOG_FILE"; then
|
sleep 2
|
||||||
# Restart python application using s6
|
/services/start-backend.sh &
|
||||||
s6-svc -r /var/run/s6-rc/servicedirs/netalertx
|
|
||||||
echo 'done'
|
|
||||||
|
|
||||||
# Remove all lines containing cron_restart_backend from the log file
|
# Remove all lines containing cron_restart_backend from the log file
|
||||||
sed -i '/cron_restart_backend/d' "$LOG_FILE"
|
# Atomic replacement with temp file. grep returns 1 if no lines selected (file becomes empty), which is valid here.
|
||||||
|
grep -v "cron_restart_backend" "${LOG_EXECUTION_QUEUE}" > "${LOG_EXECUTION_QUEUE}.tmp"
|
||||||
|
RC=$?
|
||||||
|
if [ $RC -eq 0 ] || [ $RC -eq 1 ]; then
|
||||||
|
mv "${LOG_EXECUTION_QUEUE}.tmp" "${LOG_EXECUTION_QUEUE}"
|
||||||
|
fi
|
||||||
fi
|
fi
|
||||||
|
|||||||
@@ -5,7 +5,64 @@
|
|||||||
"matching_pattern": [
|
"matching_pattern": [
|
||||||
{ "mac_prefix": "INTERNET", "vendor": "" }
|
{ "mac_prefix": "INTERNET", "vendor": "" }
|
||||||
],
|
],
|
||||||
"name_pattern": []
|
"name_pattern": [],
|
||||||
|
"ip_pattern": [
|
||||||
|
"^192\\.168\\.1\\.1$",
|
||||||
|
"^192\\.168\\.0\\.1$",
|
||||||
|
"^10\\.0\\.0\\.1$"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"dev_type": "Smart Switch",
|
||||||
|
"icon_html": "<i class=\"fa-solid fa-toggle-on\"></i>",
|
||||||
|
"matching_pattern": [
|
||||||
|
{ "mac_prefix": "003192", "vendor": "TP-Link" },
|
||||||
|
{ "mac_prefix": "50C7BF", "vendor": "TP-Link" },
|
||||||
|
{ "mac_prefix": "B04E26", "vendor": "TP-Link" }
|
||||||
|
],
|
||||||
|
"name_pattern": ["hs200", "hs210", "hs220", "ks230", "smart switch", "light switch", "wall switch"]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"dev_type": "Smart Plug",
|
||||||
|
"icon_html": "<i class=\"fa-solid fa-plug\"></i>",
|
||||||
|
"matching_pattern": [
|
||||||
|
{ "mac_prefix": "2887BA", "vendor": "TP-Link" }
|
||||||
|
],
|
||||||
|
"name_pattern": ["kp115", "hs100", "hs103", "hs105", "smart plug", "outlet", "plug"]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"dev_type": "Smart Speaker",
|
||||||
|
"icon_html": "<i class=\"fa fa-volume-up\"></i>",
|
||||||
|
"matching_pattern": [
|
||||||
|
{ "mac_prefix": "14C14E", "vendor": "Google" },
|
||||||
|
{ "mac_prefix": "44650D", "vendor": "Amazon" },
|
||||||
|
{ "mac_prefix": "74ACB9", "vendor": "Google" }
|
||||||
|
],
|
||||||
|
"name_pattern": ["echo", "alexa", "dot", "nest-audio", "nest-mini", "google-home"]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"dev_type": "Smart Appliance",
|
||||||
|
"icon_html": "<i class=\"fa-solid fa-wind\"></i>",
|
||||||
|
"matching_pattern": [
|
||||||
|
{ "mac_prefix": "446FF8", "vendor": "Dyson" }
|
||||||
|
],
|
||||||
|
"name_pattern": ["dyson", "purifier", "humidifier", "fan"]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"dev_type": "Smart Home",
|
||||||
|
"icon_html": "<i class=\"fa fa-house\"></i>",
|
||||||
|
"matching_pattern": [],
|
||||||
|
"name_pattern": ["google", "chromecast", "nest", "hub"]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"dev_type": "Phone",
|
||||||
|
"icon_html": "<i class=\"fa-solid fa-mobile\"></i>",
|
||||||
|
"matching_pattern": [
|
||||||
|
{ "mac_prefix": "001A79", "vendor": "Apple" },
|
||||||
|
{ "mac_prefix": "B0BE83", "vendor": "Samsung" },
|
||||||
|
{ "mac_prefix": "BC926B", "vendor": "Motorola" }
|
||||||
|
],
|
||||||
|
"name_pattern": ["iphone", "ipad", "pixel", "galaxy", "redmi", "android", "samsung"]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"dev_type": "Access Point",
|
"dev_type": "Access Point",
|
||||||
@@ -16,24 +73,7 @@
|
|||||||
{ "mac_prefix": "F4F5D8", "vendor": "TP-Link" },
|
{ "mac_prefix": "F4F5D8", "vendor": "TP-Link" },
|
||||||
{ "mac_prefix": "F88E85", "vendor": "Netgear" }
|
{ "mac_prefix": "F88E85", "vendor": "Netgear" }
|
||||||
],
|
],
|
||||||
"name_pattern": ["router", "gateway", "ap", "access point", "access-point", "switch"]
|
"name_pattern": ["router", "gateway", "ap", "access point", "access-point", "switch", "sg105", "sg108", "managed switch", "unmanaged switch", "poe switch", "ethernet switch"]
|
||||||
},
|
|
||||||
{
|
|
||||||
"dev_type": "Phone",
|
|
||||||
"icon_html": "<i class=\"fa-brands fa-apple\"></i>",
|
|
||||||
"matching_pattern": [
|
|
||||||
{ "mac_prefix": "001A79", "vendor": "Apple" },
|
|
||||||
{ "mac_prefix": "B0BE83", "vendor": "Samsung" },
|
|
||||||
{ "mac_prefix": "BC926B", "vendor": "Motorola" }
|
|
||||||
],
|
|
||||||
"name_pattern": ["iphone", "ipad", "pixel", "galaxy", "redmi"]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"dev_type": "Phone",
|
|
||||||
"icon_html": "<i class=\"fa-solid fa-mobile\"></i>",
|
|
||||||
"matching_pattern": [
|
|
||||||
],
|
|
||||||
"name_pattern": ["android","samsung"]
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"dev_type": "Tablet",
|
"dev_type": "Tablet",
|
||||||
@@ -43,25 +83,19 @@
|
|||||||
{ "mac_prefix": "BC4C4C", "vendor": "Samsung" }
|
{ "mac_prefix": "BC4C4C", "vendor": "Samsung" }
|
||||||
],
|
],
|
||||||
"name_pattern": ["tablet", "pad"]
|
"name_pattern": ["tablet", "pad"]
|
||||||
},
|
|
||||||
{
|
|
||||||
"dev_type": "IoT",
|
|
||||||
"icon_html": "<i class=\"fa-brands fa-raspberry-pi\"></i>",
|
|
||||||
"matching_pattern": [
|
|
||||||
{ "mac_prefix": "B827EB", "vendor": "Raspberry Pi" },
|
|
||||||
{ "mac_prefix": "DCA632", "vendor": "Raspberry Pi" }
|
|
||||||
],
|
|
||||||
"name_pattern": ["raspberry", "pi"]
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"dev_type": "IoT",
|
"dev_type": "IoT",
|
||||||
"icon_html": "<i class=\"fa-solid fa-microchip\"></i>",
|
"icon_html": "<i class=\"fa-solid fa-microchip\"></i>",
|
||||||
"matching_pattern": [
|
"matching_pattern": [
|
||||||
|
{ "mac_prefix": "B827EB", "vendor": "Raspberry Pi" },
|
||||||
|
{ "mac_prefix": "DCA632", "vendor": "Raspberry Pi" },
|
||||||
{ "mac_prefix": "840D8E", "vendor": "Espressif" },
|
{ "mac_prefix": "840D8E", "vendor": "Espressif" },
|
||||||
{ "mac_prefix": "ECFABC", "vendor": "Espressif" },
|
{ "mac_prefix": "ECFABC", "vendor": "Espressif" },
|
||||||
{ "mac_prefix": "7C9EBD", "vendor": "Espressif" }
|
{ "mac_prefix": "7C9EBD", "vendor": "Espressif" },
|
||||||
|
{ "mac_prefix": "286DCD", "vendor": "Beijing Winner Microelectronics" }
|
||||||
],
|
],
|
||||||
"name_pattern": ["raspberry", "pi"]
|
"name_pattern": ["raspberry", "pi", "thingsturn", "w600", "w601"]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"dev_type": "Desktop",
|
"dev_type": "Desktop",
|
||||||
@@ -69,9 +103,11 @@
|
|||||||
"matching_pattern": [
|
"matching_pattern": [
|
||||||
{ "mac_prefix": "001422", "vendor": "Dell" },
|
{ "mac_prefix": "001422", "vendor": "Dell" },
|
||||||
{ "mac_prefix": "001874", "vendor": "Lenovo" },
|
{ "mac_prefix": "001874", "vendor": "Lenovo" },
|
||||||
{ "mac_prefix": "00E04C", "vendor": "Hewlett Packard" }
|
{ "mac_prefix": "00E04C", "vendor": "Hewlett Packard" },
|
||||||
|
{ "mac_prefix": "F44D30", "vendor": "Elitegroup Computer Systems" },
|
||||||
|
{ "mac_prefix": "1C697A", "vendor": "Elitegroup Computer Systems" }
|
||||||
],
|
],
|
||||||
"name_pattern": ["desktop", "pc", "computer"]
|
"name_pattern": ["desktop", "pc", "computer", "liva", "ecs"]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"dev_type": "Laptop",
|
"dev_type": "Laptop",
|
||||||
@@ -80,9 +116,10 @@
|
|||||||
{ "mac_prefix": "3C0754", "vendor": "HP" },
|
{ "mac_prefix": "3C0754", "vendor": "HP" },
|
||||||
{ "mac_prefix": "0017A4", "vendor": "Dell" },
|
{ "mac_prefix": "0017A4", "vendor": "Dell" },
|
||||||
{ "mac_prefix": "F4CE46", "vendor": "Lenovo" },
|
{ "mac_prefix": "F4CE46", "vendor": "Lenovo" },
|
||||||
{ "mac_prefix": "409F38", "vendor": "Acer" }
|
{ "mac_prefix": "409F38", "vendor": "Acer" },
|
||||||
|
{ "mac_prefix": "9CB6D0", "vendor": "Rivet Networks" }
|
||||||
],
|
],
|
||||||
"name_pattern": ["macbook", "imac", "laptop", "notebook"]
|
"name_pattern": ["macbook", "imac", "laptop", "notebook", "alienware", "razer", "msi"]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"dev_type": "Server",
|
"dev_type": "Server",
|
||||||
@@ -123,9 +160,10 @@
|
|||||||
"matching_pattern": [
|
"matching_pattern": [
|
||||||
{ "mac_prefix": "001FA7", "vendor": "Sony" },
|
{ "mac_prefix": "001FA7", "vendor": "Sony" },
|
||||||
{ "mac_prefix": "7C04D0", "vendor": "Nintendo" },
|
{ "mac_prefix": "7C04D0", "vendor": "Nintendo" },
|
||||||
{ "mac_prefix": "EC26CA", "vendor": "Sony" }
|
{ "mac_prefix": "EC26CA", "vendor": "Sony" },
|
||||||
|
{ "mac_prefix": "48B02D", "vendor": "NVIDIA" }
|
||||||
],
|
],
|
||||||
"name_pattern": ["playstation", "xbox"]
|
"name_pattern": ["playstation", "xbox", "shield", "nvidia"]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"dev_type": "Camera",
|
"dev_type": "Camera",
|
||||||
@@ -138,15 +176,6 @@
|
|||||||
],
|
],
|
||||||
"name_pattern": ["camera", "cam", "webcam"]
|
"name_pattern": ["camera", "cam", "webcam"]
|
||||||
},
|
},
|
||||||
{
|
|
||||||
"dev_type": "Smart Speaker",
|
|
||||||
"icon_html": "<i class=\"fa fa-volume-up\"></i>",
|
|
||||||
"matching_pattern": [
|
|
||||||
{ "mac_prefix": "44650D", "vendor": "Amazon" },
|
|
||||||
{ "mac_prefix": "74ACB9", "vendor": "Google" }
|
|
||||||
],
|
|
||||||
"name_pattern": ["echo", "alexa", "dot"]
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
"dev_type": "Router",
|
"dev_type": "Router",
|
||||||
"icon_html": "<i class=\"fa fa-random\"></i>",
|
"icon_html": "<i class=\"fa fa-random\"></i>",
|
||||||
@@ -154,23 +183,13 @@
|
|||||||
{ "mac_prefix": "000C29", "vendor": "Cisco" },
|
{ "mac_prefix": "000C29", "vendor": "Cisco" },
|
||||||
{ "mac_prefix": "00155D", "vendor": "MikroTik" }
|
{ "mac_prefix": "00155D", "vendor": "MikroTik" }
|
||||||
],
|
],
|
||||||
"name_pattern": ["router", "gateway", "ap", "access point", "access-point"],
|
"name_pattern": ["router", "gateway", "ap", "access point"]
|
||||||
"ip_pattern": [
|
|
||||||
"^192\\.168\\.[0-1]\\.1$",
|
|
||||||
"^10\\.0\\.0\\.1$"
|
|
||||||
]
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"dev_type": "Smart Light",
|
"dev_type": "Smart Light",
|
||||||
"icon_html": "<i class=\"fa fa-lightbulb\"></i>",
|
"icon_html": "<i class=\"fa fa-lightbulb\"></i>",
|
||||||
"matching_pattern": [],
|
"matching_pattern": [],
|
||||||
"name_pattern": ["hue", "lifx", "bulb"]
|
"name_pattern": ["hue", "lifx", "bulb", "light"]
|
||||||
},
|
|
||||||
{
|
|
||||||
"dev_type": "Smart Home",
|
|
||||||
"icon_html": "<i class=\"fa fa-house\"></i>",
|
|
||||||
"matching_pattern": [],
|
|
||||||
"name_pattern": ["google", "chromecast", "nest"]
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"dev_type": "Smartwatch",
|
"dev_type": "Smartwatch",
|
||||||
@@ -187,14 +206,9 @@
|
|||||||
{
|
{
|
||||||
"dev_type": "Security Device",
|
"dev_type": "Security Device",
|
||||||
"icon_html": "<i class=\"fa fa-shield-alt\"></i>",
|
"icon_html": "<i class=\"fa fa-shield-alt\"></i>",
|
||||||
"matching_pattern": [],
|
|
||||||
"name_pattern": ["doorbell", "lock", "security"]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"dev_type": "Smart Light",
|
|
||||||
"icon_html": "<i class=\"fa-solid fa-lightbulb\"></i>",
|
|
||||||
"matching_pattern": [
|
"matching_pattern": [
|
||||||
|
{ "mac_prefix": "047BCB", "vendor": "Universal Global Scientific" }
|
||||||
],
|
],
|
||||||
"name_pattern": ["light","bulb"]
|
"name_pattern": ["doorbell", "lock", "security", "mmd-", "ring"]
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
2
db/.gitignore
vendored
2
db/.gitignore
vendored
@@ -1,2 +0,0 @@
|
|||||||
*
|
|
||||||
!.gitignore
|
|
||||||
@@ -1,71 +1,64 @@
|
|||||||
services:
|
services:
|
||||||
netalertx:
|
netalertx:
|
||||||
#use an environmental variable to set host networking mode if needed
|
network_mode: host # Use host networking for ARP scanning and other services
|
||||||
network_mode: ${NETALERTX_NETWORK_MODE:-host} # Use host networking for ARP scanning and other services
|
|
||||||
build:
|
build:
|
||||||
context: . # Build context is the current directory
|
context: . # Build context is the current directory
|
||||||
dockerfile: Dockerfile # Specify the Dockerfile to use
|
dockerfile: Dockerfile # Specify the Dockerfile to use
|
||||||
image: netalertx:latest
|
image: netalertx:latest
|
||||||
container_name: netalertx # The name when you docker contiainer ls
|
container_name: netalertx # The name when you docker contiainer ls
|
||||||
read_only: true # Make the container filesystem read-only
|
read_only: true # Make the container filesystem read-only
|
||||||
|
|
||||||
|
# It is most secure to start with user 20211, but then we lose provisioning capabilities.
|
||||||
|
# user: "${NETALERTX_UID:-20211}:${NETALERTX_GID:-20211}"
|
||||||
cap_drop: # Drop all capabilities for enhanced security
|
cap_drop: # Drop all capabilities for enhanced security
|
||||||
- ALL
|
- ALL
|
||||||
cap_add: # Add only the necessary capabilities
|
cap_add: # Add only the necessary capabilities
|
||||||
- NET_ADMIN # Required for ARP scanning
|
- NET_ADMIN # Required for scanning with arp-scan, nmap, nbtscan, traceroute, and zero-conf
|
||||||
- NET_RAW # Required for raw socket operations
|
- NET_RAW # Required for raw socket operations with arp-scan, nmap, nbtscan, traceroute and zero-conf
|
||||||
- NET_BIND_SERVICE # Required to bind to privileged ports (nbtscan)
|
- NET_BIND_SERVICE # Required to bind to privileged ports with nbtscan
|
||||||
|
- CHOWN # Required for root-entrypoint to chown /data + /tmp before dropping privileges
|
||||||
|
- SETUID # Required for root-entrypoint to switch to non-root user
|
||||||
|
- SETGID # Required for root-entrypoint to switch to non-root group
|
||||||
|
sysctls: # ARP flux mitigation for host networking accuracy
|
||||||
|
net.ipv4.conf.all.arp_ignore: 1
|
||||||
|
net.ipv4.conf.all.arp_announce: 2
|
||||||
volumes:
|
volumes:
|
||||||
|
|
||||||
- type: volume # Persistent Docker-managed Named Volume for storage of config files
|
- type: volume # Persistent Docker-managed Named Volume for storage
|
||||||
source: netalertx_config # the default name of the volume is netalertx_config
|
source: netalertx_data # the default name of the volume is netalertx_data
|
||||||
target: /app/config # inside the container mounted to /app/config
|
target: /data # consolidated configuration and database storage
|
||||||
read_only: false # writable volume
|
read_only: false # writable volume
|
||||||
|
|
||||||
# Example custom local folder called /home/user/netalertx_config
|
# Example custom local folder called /home/user/netalertx_data
|
||||||
# - type: bind
|
# - type: bind
|
||||||
# source: /home/user/netalertx_config
|
# source: /home/user/netalertx_data
|
||||||
# target: /app/config
|
# target: /data
|
||||||
# read_only: false
|
# read_only: false
|
||||||
# ... or use the alternative format
|
# ... or use the alternative format
|
||||||
# - /home/user/netalertx_config:/app/config:rw
|
# - /home/user/netalertx_data:/data:rw
|
||||||
|
|
||||||
- type: volume
|
|
||||||
source: netalertx_db
|
|
||||||
target: /app/db
|
|
||||||
read_only: false
|
|
||||||
|
|
||||||
- type: bind # Bind mount for timezone consistency
|
- type: bind # Bind mount for timezone consistency
|
||||||
source: /etc/localtime
|
source: /etc/localtime
|
||||||
target: /etc/localtime
|
target: /etc/localtime
|
||||||
read_only: true
|
read_only: true
|
||||||
|
|
||||||
# Use a custom Enterprise-configured nginx config for ldap or other settings
|
# Use a custom Enterprise-configured nginx config for ldap or other settings
|
||||||
# - /custom-enterprise.conf:/services/config/nginx/conf.active/netalertx.conf:ro
|
# - /custom-enterprise.conf:/tmp/nginx/active-config/netalertx.conf:ro
|
||||||
|
|
||||||
# Test your plugin on the production container
|
# Test your plugin on the production container
|
||||||
# - /path/on/host:/app/front/plugins/custom
|
# - /path/on/host:/app/front/plugins/custom
|
||||||
|
|
||||||
# Retain logs - comment out tmpfs /app/log if you want to retain logs between container restarts
|
# Retain logs - comment out tmpfs /tmp/log if you want to retain logs between container restarts
|
||||||
# - /path/on/host/log:/app/log
|
# - /path/on/host/log:/tmp/log
|
||||||
|
|
||||||
# Tempfs mounts for writable directories in a read-only container and improve system performance
|
# tmpfs mounts for writable directories in a read-only container and improve system performance
|
||||||
# All mounts have noexec,nosuid,nodev for security purposes no devices, no suid/sgid and no execution of binaries
|
# All writes now live under /tmp/* subdirectories which are created dynamically by entrypoint.d scripts
|
||||||
# async where possible for performance, sync where required for correctness
|
# mode=1700 gives rwx------ permissions; ownership is set by /root-entrypoint.sh
|
||||||
# uid=20211 and gid=20211 is the netalertx user inside the container
|
|
||||||
# mode=1700 gives rwx------ permissions to the netalertx user only
|
|
||||||
tmpfs:
|
tmpfs:
|
||||||
# Speed up logging. This can be commented out to retain logs between container restarts
|
- "/tmp:mode=1700,uid=0,gid=0,rw,noexec,nosuid,nodev,async,noatime,nodiratime"
|
||||||
- "/app/log:uid=20211,gid=20211,mode=1700,rw,noexec,nosuid,nodev,async,noatime,nodiratime"
|
|
||||||
# Speed up API access as frontend/backend API is very chatty
|
|
||||||
- "/app/api:uid=20211,gid=20211,mode=1700,rw,noexec,nosuid,nodev,sync,noatime,nodiratime"
|
|
||||||
# Required for customization of the nginx listen addr/port without rebuilding the container
|
|
||||||
- "/services/config/nginx/conf.active:uid=20211,gid=20211,mode=1700,rw,noexec,nosuid,nodev,async,noatime,nodiratime"
|
|
||||||
# /services/config/nginx/conf.d is required for nginx and php to start
|
|
||||||
- "/services/run:uid=20211,gid=20211,mode=1700,rw,noexec,nosuid,nodev,async,noatime,nodiratime"
|
|
||||||
# /tmp is required by php for session save this should be reworked to /services/run/tmp
|
|
||||||
- "/tmp:uid=20211,gid=20211,mode=1700,rw,noexec,nosuid,nodev,async,noatime,nodiratime"
|
|
||||||
environment:
|
environment:
|
||||||
|
PUID: ${NETALERTX_UID:-20211} # Runtime UID after priming (Synology/no-copy-up safe)
|
||||||
|
PGID: ${NETALERTX_GID:-20211} # Runtime GID after priming (Synology/no-copy-up safe)
|
||||||
LISTEN_ADDR: ${LISTEN_ADDR:-0.0.0.0} # Listen for connections on all interfaces
|
LISTEN_ADDR: ${LISTEN_ADDR:-0.0.0.0} # Listen for connections on all interfaces
|
||||||
PORT: ${PORT:-20211} # Application port
|
PORT: ${PORT:-20211} # Application port
|
||||||
GRAPHQL_PORT: ${GRAPHQL_PORT:-20212} # GraphQL API port
|
GRAPHQL_PORT: ${GRAPHQL_PORT:-20212} # GraphQL API port
|
||||||
@@ -78,7 +71,6 @@ services:
|
|||||||
cpu_shares: 512 # Relative CPU weight for CPU contention scenarios
|
cpu_shares: 512 # Relative CPU weight for CPU contention scenarios
|
||||||
pids_limit: 512 # Limit the number of processes/threads to prevent fork bombs
|
pids_limit: 512 # Limit the number of processes/threads to prevent fork bombs
|
||||||
logging:
|
logging:
|
||||||
driver: "json-file" # Use JSON file logging driver
|
|
||||||
options:
|
options:
|
||||||
max-size: "10m" # Rotate log files after they reach 10MB
|
max-size: "10m" # Rotate log files after they reach 10MB
|
||||||
max-file: "3" # Keep a maximum of 3 log files
|
max-file: "3" # Keep a maximum of 3 log files
|
||||||
@@ -86,6 +78,5 @@ services:
|
|||||||
# Always restart the container unless explicitly stopped
|
# Always restart the container unless explicitly stopped
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
|
|
||||||
volumes: # Persistent volumes for configuration and database storage
|
volumes: # Persistent volume for configuration and database storage
|
||||||
netalertx_config: # Configuration files
|
netalertx_data:
|
||||||
netalertx_db: # Database files
|
|
||||||
|
|||||||
558
docker_build.log
558
docker_build.log
@@ -1,534 +1,74 @@
|
|||||||
#0 building with "default" instance using docker driver
|
#0 building with "default" instance using docker driver
|
||||||
|
|
||||||
#1 [internal] load build definition from Dockerfile
|
#1 [internal] load build definition from Dockerfile
|
||||||
#1 transferring dockerfile: 5.29kB done
|
#1 DONE 0.0s
|
||||||
|
|
||||||
|
#1 [internal] load build definition from Dockerfile
|
||||||
|
#1 transferring dockerfile: 11.45kB done
|
||||||
#1 DONE 0.1s
|
#1 DONE 0.1s
|
||||||
|
|
||||||
#2 [auth] library/alpine:pull token for registry-1.docker.io
|
#2 [internal] load metadata for docker.io/library/alpine:3.22
|
||||||
#2 DONE 0.0s
|
#2 DONE 0.0s
|
||||||
|
|
||||||
#3 [internal] load metadata for docker.io/library/alpine:3.22
|
#3 [internal] load .dockerignore
|
||||||
#3 DONE 0.4s
|
#3 transferring context:
|
||||||
|
#3 transferring context: 222B done
|
||||||
|
#3 DONE 0.1s
|
||||||
|
|
||||||
#4 [internal] load .dockerignore
|
#4 [builder 1/4] FROM docker.io/library/alpine:3.22
|
||||||
#4 transferring context: 216B done
|
#4 DONE 0.0s
|
||||||
#4 DONE 0.1s
|
|
||||||
|
|
||||||
#5 [builder 1/15] FROM docker.io/library/alpine:3.22@sha256:4bcff63911fcb4448bd4fdacec207030997caf25e9bea4045fa6c8c44de311d1
|
#5 [internal] load build context
|
||||||
#5 CACHED
|
#5 transferring context: 46.63kB 0.1s done
|
||||||
|
#5 DONE 0.2s
|
||||||
|
|
||||||
#6 [internal] load build context
|
#6 [builder 3/4] RUN apk add --no-cache bash shadow python3 python3-dev gcc musl-dev libffi-dev openssl-dev git rust cargo && python -m venv /opt/venv
|
||||||
#6 transferring context: 36.76kB 0.0s done
|
#6 CACHED
|
||||||
#6 DONE 0.1s
|
|
||||||
|
|
||||||
#7 [builder 2/15] RUN apk add --no-cache bash shadow python3 python3-dev gcc musl-dev libffi-dev openssl-dev git && python -m venv /opt/venv
|
#7 [runner 6/11] COPY --chown=netalertx:netalertx --chmod=755 server /app/server
|
||||||
#7 0.443 fetch https://dl-cdn.alpinelinux.org/alpine/v3.22/main/x86_64/APKINDEX.tar.gz
|
#7 CACHED
|
||||||
#7 0.688 fetch https://dl-cdn.alpinelinux.org/alpine/v3.22/community/x86_64/APKINDEX.tar.gz
|
|
||||||
#7 1.107 (1/52) Upgrading libcrypto3 (3.5.1-r0 -> 3.5.3-r0)
|
|
||||||
#7 1.358 (2/52) Upgrading libssl3 (3.5.1-r0 -> 3.5.3-r0)
|
|
||||||
#7 1.400 (3/52) Installing ncurses-terminfo-base (6.5_p20250503-r0)
|
|
||||||
#7 1.413 (4/52) Installing libncursesw (6.5_p20250503-r0)
|
|
||||||
#7 1.444 (5/52) Installing readline (8.2.13-r1)
|
|
||||||
#7 1.471 (6/52) Installing bash (5.2.37-r0)
|
|
||||||
#7 1.570 Executing bash-5.2.37-r0.post-install
|
|
||||||
#7 1.593 (7/52) Installing libgcc (14.2.0-r6)
|
|
||||||
#7 1.605 (8/52) Installing jansson (2.14.1-r0)
|
|
||||||
#7 1.613 (9/52) Installing libstdc++ (14.2.0-r6)
|
|
||||||
#7 1.705 (10/52) Installing zstd-libs (1.5.7-r0)
|
|
||||||
#7 1.751 (11/52) Installing binutils (2.44-r3)
|
|
||||||
#7 2.041 (12/52) Installing libgomp (14.2.0-r6)
|
|
||||||
#7 2.064 (13/52) Installing libatomic (14.2.0-r6)
|
|
||||||
#7 2.071 (14/52) Installing gmp (6.3.0-r3)
|
|
||||||
#7 2.097 (15/52) Installing isl26 (0.26-r1)
|
|
||||||
#7 2.183 (16/52) Installing mpfr4 (4.2.1_p1-r0)
|
|
||||||
#7 2.219 (17/52) Installing mpc1 (1.3.1-r1)
|
|
||||||
#7 2.231 (18/52) Installing gcc (14.2.0-r6)
|
|
||||||
#7 6.782 (19/52) Installing brotli-libs (1.1.0-r2)
|
|
||||||
#7 6.828 (20/52) Installing c-ares (1.34.5-r0)
|
|
||||||
#7 6.846 (21/52) Installing libunistring (1.3-r0)
|
|
||||||
#7 6.919 (22/52) Installing libidn2 (2.3.7-r0)
|
|
||||||
#7 6.937 (23/52) Installing nghttp2-libs (1.65.0-r0)
|
|
||||||
#7 6.950 (24/52) Installing libpsl (0.21.5-r3)
|
|
||||||
#7 6.960 (25/52) Installing libcurl (8.14.1-r1)
|
|
||||||
#7 7.015 (26/52) Installing libexpat (2.7.2-r0)
|
|
||||||
#7 7.029 (27/52) Installing pcre2 (10.43-r1)
|
|
||||||
#7 7.069 (28/52) Installing git (2.49.1-r0)
|
|
||||||
#7 7.397 (29/52) Installing git-init-template (2.49.1-r0)
|
|
||||||
#7 7.404 (30/52) Installing linux-headers (6.14.2-r0)
|
|
||||||
#7 7.572 (31/52) Installing libffi (3.4.8-r0)
|
|
||||||
#7 7.578 (32/52) Installing pkgconf (2.4.3-r0)
|
|
||||||
#7 7.593 (33/52) Installing libffi-dev (3.4.8-r0)
|
|
||||||
#7 7.607 (34/52) Installing musl-dev (1.2.5-r10)
|
|
||||||
#7 7.961 (35/52) Installing openssl-dev (3.5.3-r0)
|
|
||||||
#7 8.021 (36/52) Installing libbz2 (1.0.8-r6)
|
|
||||||
#7 8.045 (37/52) Installing gdbm (1.24-r0)
|
|
||||||
#7 8.055 (38/52) Installing xz-libs (5.8.1-r0)
|
|
||||||
#7 8.071 (39/52) Installing mpdecimal (4.0.1-r0)
|
|
||||||
#7 8.090 (40/52) Installing libpanelw (6.5_p20250503-r0)
|
|
||||||
#7 8.098 (41/52) Installing sqlite-libs (3.49.2-r1)
|
|
||||||
#7 8.185 (42/52) Installing python3 (3.12.11-r0)
|
|
||||||
#7 8.904 (43/52) Installing python3-pycache-pyc0 (3.12.11-r0)
|
|
||||||
#7 9.292 (44/52) Installing pyc (3.12.11-r0)
|
|
||||||
#7 9.292 (45/52) Installing python3-pyc (3.12.11-r0)
|
|
||||||
#7 9.292 (46/52) Installing python3-dev (3.12.11-r0)
|
|
||||||
#7 10.71 (47/52) Installing libmd (1.1.0-r0)
|
|
||||||
#7 10.72 (48/52) Installing libbsd (0.12.2-r0)
|
|
||||||
#7 10.73 (49/52) Installing skalibs-libs (2.14.4.0-r0)
|
|
||||||
#7 10.75 (50/52) Installing utmps-libs (0.1.3.1-r0)
|
|
||||||
#7 10.76 (51/52) Installing linux-pam (1.7.0-r4)
|
|
||||||
#7 10.82 (52/52) Installing shadow (4.17.3-r0)
|
|
||||||
#7 10.88 Executing busybox-1.37.0-r18.trigger
|
|
||||||
#7 10.90 OK: 274 MiB in 66 packages
|
|
||||||
#7 DONE 14.4s
|
|
||||||
|
|
||||||
#8 [builder 3/15] RUN mkdir -p /app
|
#8 [runner 5/11] COPY --chown=netalertx:netalertx --chmod=755 front /app/front
|
||||||
#8 DONE 0.5s
|
#8 CACHED
|
||||||
|
|
||||||
#9 [builder 4/15] COPY api /app/api
|
#9 [runner 2/11] RUN apk add --no-cache bash mtr libbsd zip lsblk tzdata curl arp-scan iproute2 iproute2-ss nmap nmap-scripts traceroute nbtscan net-tools net-snmp-tools bind-tools awake ca-certificates sqlite php83 php83-fpm php83-cgi php83-curl php83-sqlite3 php83-session python3 envsubst nginx supercronic shadow su-exec && rm -Rf /var/cache/apk/* && rm -Rf /etc/nginx && addgroup -g 20211 netalertx && adduser -u 20211 -D -h /app -G netalertx netalertx && apk del shadow
|
||||||
#9 DONE 0.3s
|
#9 CACHED
|
||||||
|
|
||||||
#10 [builder 5/15] COPY back /app/back
|
#10 [runner 4/11] COPY --chown=netalertx:netalertx --chmod=755 back /app/back
|
||||||
#10 DONE 0.3s
|
#10 CACHED
|
||||||
|
|
||||||
#11 [builder 6/15] COPY config /app/config
|
#11 [builder 2/4] COPY requirements.txt /tmp/requirements.txt
|
||||||
#11 DONE 0.3s
|
#11 CACHED
|
||||||
|
|
||||||
#12 [builder 7/15] COPY db /app/db
|
#12 [runner 7/11] RUN install -d -o netalertx -g netalertx -m 700 /data /data/config /data/db /tmp/api /tmp/log /tmp/log/plugins /tmp/run /tmp/run/tmp /tmp/run/logs /tmp/nginx/active-config && sh -c "find /app -type f \( -name '*.sh' -o -name 'speedtest-cli' \) -exec chmod 750 {} \;"
|
||||||
#12 DONE 0.3s
|
#12 CACHED
|
||||||
|
|
||||||
#13 [builder 8/15] COPY dockerfiles /app/dockerfiles
|
#13 [hardened 1/2] RUN addgroup -g 20212 "readonly" && adduser -u 20212 -G "readonly" -D -h /app "readonly"
|
||||||
#13 DONE 0.3s
|
#13 CACHED
|
||||||
|
|
||||||
#14 [builder 9/15] COPY front /app/front
|
#14 [runner 8/11] COPY --chown=netalertx:netalertx .[V]ERSION /app/.VERSION
|
||||||
#14 DONE 0.4s
|
#14 CACHED
|
||||||
|
|
||||||
#15 [builder 10/15] COPY server /app/server
|
#15 [runner 9/11] COPY --chown=netalertx:netalertx .[V]ERSION /app/.VERSION_PREV
|
||||||
#15 DONE 0.3s
|
#15 CACHED
|
||||||
|
|
||||||
#16 [builder 11/15] COPY install/crontab /etc/crontabs/root
|
#16 [runner 11/11] RUN for vfile in .VERSION .VERSION_PREV; do if [ ! -f "/app/${vfile}" ]; then echo "DEVELOPMENT 00000000" > "/app/${vfile}"; fi; chown 20212:20212 "/app/${vfile}"; done && apk add --no-cache libcap && setcap cap_net_raw,cap_net_admin+eip /usr/bin/nmap && setcap cap_net_raw,cap_net_admin+eip /usr/bin/arp-scan && setcap cap_net_raw,cap_net_admin,cap_net_bind_service+eip /usr/bin/nbtscan && setcap cap_net_raw,cap_net_admin+eip /usr/bin/traceroute && setcap cap_net_raw,cap_net_admin+eip "$(readlink -f /opt/venv/bin/python)" && /bin/sh /build/init-nginx.sh && /bin/sh /build/init-php-fpm.sh && /bin/sh /build/init-cron.sh && /bin/sh /build/init-backend.sh && rm -rf /build && apk del libcap && date +%s > "/app/front/buildtimestamp.txt"
|
||||||
#16 DONE 0.3s
|
#16 CACHED
|
||||||
|
|
||||||
#17 [builder 12/15] COPY dockerfiles/start* /start*.sh
|
#17 [builder 4/4] RUN python -m pip install --no-cache-dir --upgrade pip setuptools wheel && pip install --prefer-binary --no-cache-dir -r /tmp/requirements.txt && chmod -R u-rwx,g-rwx /opt
|
||||||
#17 DONE 0.3s
|
#17 CACHED
|
||||||
|
|
||||||
#18 [builder 13/15] RUN pip install openwrt-luci-rpc asusrouter asyncio aiohttp graphene flask flask-cors unifi-sm-api tplink-omada-client wakeonlan pycryptodome requests paho-mqtt scapy cron-converter pytz json2table dhcp-leases pyunifi speedtest-cli chardet python-nmap dnspython librouteros yattag git+https://github.com/foreign-sub/aiofreepybox.git
|
#18 [runner 10/11] COPY --from=builder --chown=20212:20212 /opt/venv /opt/venv
|
||||||
#18 0.737 Collecting git+https://github.com/foreign-sub/aiofreepybox.git
|
#18 CACHED
|
||||||
#18 0.737 Cloning https://github.com/foreign-sub/aiofreepybox.git to /tmp/pip-req-build-waf5_npl
|
|
||||||
#18 0.738 Running command git clone --filter=blob:none --quiet https://github.com/foreign-sub/aiofreepybox.git /tmp/pip-req-build-waf5_npl
|
|
||||||
#18 1.617 Resolved https://github.com/foreign-sub/aiofreepybox.git to commit 4ee18ea0f3e76edc839c48eb8df1da59c1baee3d
|
|
||||||
#18 1.620 Installing build dependencies: started
|
|
||||||
#18 3.337 Installing build dependencies: finished with status 'done'
|
|
||||||
#18 3.337 Getting requirements to build wheel: started
|
|
||||||
#18 3.491 Getting requirements to build wheel: finished with status 'done'
|
|
||||||
#18 3.492 Preparing metadata (pyproject.toml): started
|
|
||||||
#18 3.650 Preparing metadata (pyproject.toml): finished with status 'done'
|
|
||||||
#18 3.724 Collecting openwrt-luci-rpc
|
|
||||||
#18 3.753 Downloading openwrt_luci_rpc-1.1.17-py2.py3-none-any.whl.metadata (4.9 kB)
|
|
||||||
#18 3.892 Collecting asusrouter
|
|
||||||
#18 3.900 Downloading asusrouter-1.21.0-py3-none-any.whl.metadata (33 kB)
|
|
||||||
#18 3.999 Collecting asyncio
|
|
||||||
#18 4.007 Downloading asyncio-4.0.0-py3-none-any.whl.metadata (994 bytes)
|
|
||||||
#18 4.576 Collecting aiohttp
|
|
||||||
#18 4.582 Downloading aiohttp-3.12.15-cp312-cp312-musllinux_1_2_x86_64.whl.metadata (7.7 kB)
|
|
||||||
#18 4.729 Collecting graphene
|
|
||||||
#18 4.735 Downloading graphene-3.4.3-py2.py3-none-any.whl.metadata (6.9 kB)
|
|
||||||
#18 4.858 Collecting flask
|
|
||||||
#18 4.866 Downloading flask-3.1.2-py3-none-any.whl.metadata (3.2 kB)
|
|
||||||
#18 4.963 Collecting flask-cors
|
|
||||||
#18 4.972 Downloading flask_cors-6.0.1-py3-none-any.whl.metadata (5.3 kB)
|
|
||||||
#18 5.055 Collecting unifi-sm-api
|
|
||||||
#18 5.065 Downloading unifi_sm_api-0.2.1-py3-none-any.whl.metadata (2.3 kB)
|
|
||||||
#18 5.155 Collecting tplink-omada-client
|
|
||||||
#18 5.166 Downloading tplink_omada_client-1.4.4-py3-none-any.whl.metadata (3.5 kB)
|
|
||||||
#18 5.262 Collecting wakeonlan
|
|
||||||
#18 5.274 Downloading wakeonlan-3.1.0-py3-none-any.whl.metadata (4.3 kB)
|
|
||||||
#18 5.500 Collecting pycryptodome
|
|
||||||
#18 5.505 Downloading pycryptodome-3.23.0-cp37-abi3-musllinux_1_2_x86_64.whl.metadata (3.4 kB)
|
|
||||||
#18 5.653 Collecting requests
|
|
||||||
#18 5.660 Downloading requests-2.32.5-py3-none-any.whl.metadata (4.9 kB)
|
|
||||||
#18 5.764 Collecting paho-mqtt
|
|
||||||
#18 5.775 Downloading paho_mqtt-2.1.0-py3-none-any.whl.metadata (23 kB)
|
|
||||||
#18 5.890 Collecting scapy
|
|
||||||
#18 5.902 Downloading scapy-2.6.1-py3-none-any.whl.metadata (5.6 kB)
|
|
||||||
#18 6.002 Collecting cron-converter
|
|
||||||
#18 6.013 Downloading cron_converter-1.2.2-py3-none-any.whl.metadata (8.1 kB)
|
|
||||||
#18 6.187 Collecting pytz
|
|
||||||
#18 6.193 Downloading pytz-2025.2-py2.py3-none-any.whl.metadata (22 kB)
|
|
||||||
#18 6.285 Collecting json2table
|
|
||||||
#18 6.294 Downloading json2table-1.1.5-py2.py3-none-any.whl.metadata (6.0 kB)
|
|
||||||
#18 6.381 Collecting dhcp-leases
|
|
||||||
#18 6.387 Downloading dhcp_leases-0.1.6-py3-none-any.whl.metadata (5.9 kB)
|
|
||||||
#18 6.461 Collecting pyunifi
|
|
||||||
#18 6.471 Downloading pyunifi-2.21-py3-none-any.whl.metadata (274 bytes)
|
|
||||||
#18 6.582 Collecting speedtest-cli
|
|
||||||
#18 6.596 Downloading speedtest_cli-2.1.3-py2.py3-none-any.whl.metadata (6.8 kB)
|
|
||||||
#18 6.767 Collecting chardet
|
|
||||||
#18 6.780 Downloading chardet-5.2.0-py3-none-any.whl.metadata (3.4 kB)
|
|
||||||
#18 6.878 Collecting python-nmap
|
|
||||||
#18 6.886 Downloading python-nmap-0.7.1.tar.gz (44 kB)
|
|
||||||
#18 6.937 Installing build dependencies: started
|
|
||||||
#18 8.245 Installing build dependencies: finished with status 'done'
|
|
||||||
#18 8.246 Getting requirements to build wheel: started
|
|
||||||
#18 8.411 Getting requirements to build wheel: finished with status 'done'
|
|
||||||
#18 8.412 Preparing metadata (pyproject.toml): started
|
|
||||||
#18 8.575 Preparing metadata (pyproject.toml): finished with status 'done'
|
|
||||||
#18 8.648 Collecting dnspython
|
|
||||||
#18 8.654 Downloading dnspython-2.8.0-py3-none-any.whl.metadata (5.7 kB)
|
|
||||||
#18 8.741 Collecting librouteros
|
|
||||||
#18 8.752 Downloading librouteros-3.4.1-py3-none-any.whl.metadata (1.6 kB)
|
|
||||||
#18 8.869 Collecting yattag
|
|
||||||
#18 8.881 Downloading yattag-1.16.1.tar.gz (29 kB)
|
|
||||||
#18 8.925 Installing build dependencies: started
|
|
||||||
#18 10.23 Installing build dependencies: finished with status 'done'
|
|
||||||
#18 10.23 Getting requirements to build wheel: started
|
|
||||||
#18 10.38 Getting requirements to build wheel: finished with status 'done'
|
|
||||||
#18 10.39 Preparing metadata (pyproject.toml): started
|
|
||||||
#18 10.55 Preparing metadata (pyproject.toml): finished with status 'done'
|
|
||||||
#18 10.60 Collecting Click>=6.0 (from openwrt-luci-rpc)
|
|
||||||
#18 10.60 Downloading click-8.3.0-py3-none-any.whl.metadata (2.6 kB)
|
|
||||||
#18 10.70 Collecting packaging>=19.1 (from openwrt-luci-rpc)
|
|
||||||
#18 10.71 Downloading packaging-25.0-py3-none-any.whl.metadata (3.3 kB)
|
|
||||||
#18 10.87 Collecting urllib3>=1.26.14 (from asusrouter)
|
|
||||||
#18 10.88 Downloading urllib3-2.5.0-py3-none-any.whl.metadata (6.5 kB)
|
|
||||||
#18 10.98 Collecting xmltodict>=0.12.0 (from asusrouter)
|
|
||||||
#18 10.98 Downloading xmltodict-1.0.2-py3-none-any.whl.metadata (15 kB)
|
|
||||||
#18 11.09 Collecting aiohappyeyeballs>=2.5.0 (from aiohttp)
|
|
||||||
#18 11.10 Downloading aiohappyeyeballs-2.6.1-py3-none-any.whl.metadata (5.9 kB)
|
|
||||||
#18 11.19 Collecting aiosignal>=1.4.0 (from aiohttp)
|
|
||||||
#18 11.20 Downloading aiosignal-1.4.0-py3-none-any.whl.metadata (3.7 kB)
|
|
||||||
#18 11.32 Collecting attrs>=17.3.0 (from aiohttp)
|
|
||||||
#18 11.33 Downloading attrs-25.3.0-py3-none-any.whl.metadata (10 kB)
|
|
||||||
#18 11.47 Collecting frozenlist>=1.1.1 (from aiohttp)
|
|
||||||
#18 11.47 Downloading frozenlist-1.7.0-cp312-cp312-musllinux_1_2_x86_64.whl.metadata (18 kB)
|
|
||||||
#18 11.76 Collecting multidict<7.0,>=4.5 (from aiohttp)
|
|
||||||
#18 11.77 Downloading multidict-6.6.4-cp312-cp312-musllinux_1_2_x86_64.whl.metadata (5.3 kB)
|
|
||||||
#18 11.87 Collecting propcache>=0.2.0 (from aiohttp)
|
|
||||||
#18 11.88 Downloading propcache-0.3.2-cp312-cp312-musllinux_1_2_x86_64.whl.metadata (12 kB)
|
|
||||||
#18 12.19 Collecting yarl<2.0,>=1.17.0 (from aiohttp)
|
|
||||||
#18 12.20 Downloading yarl-1.20.1-cp312-cp312-musllinux_1_2_x86_64.whl.metadata (73 kB)
|
|
||||||
#18 12.31 Collecting graphql-core<3.3,>=3.1 (from graphene)
|
|
||||||
#18 12.32 Downloading graphql_core-3.2.6-py3-none-any.whl.metadata (11 kB)
|
|
||||||
#18 12.41 Collecting graphql-relay<3.3,>=3.1 (from graphene)
|
|
||||||
#18 12.42 Downloading graphql_relay-3.2.0-py3-none-any.whl.metadata (12 kB)
|
|
||||||
#18 12.50 Collecting python-dateutil<3,>=2.7.0 (from graphene)
|
|
||||||
#18 12.51 Downloading python_dateutil-2.9.0.post0-py2.py3-none-any.whl.metadata (8.4 kB)
|
|
||||||
#18 12.61 Collecting typing-extensions<5,>=4.7.1 (from graphene)
|
|
||||||
#18 12.61 Downloading typing_extensions-4.15.0-py3-none-any.whl.metadata (3.3 kB)
|
|
||||||
#18 12.71 Collecting blinker>=1.9.0 (from flask)
|
|
||||||
#18 12.72 Downloading blinker-1.9.0-py3-none-any.whl.metadata (1.6 kB)
|
|
||||||
#18 12.84 Collecting itsdangerous>=2.2.0 (from flask)
|
|
||||||
#18 12.85 Downloading itsdangerous-2.2.0-py3-none-any.whl.metadata (1.9 kB)
|
|
||||||
#18 12.97 Collecting jinja2>=3.1.2 (from flask)
|
|
||||||
#18 12.98 Downloading jinja2-3.1.6-py3-none-any.whl.metadata (2.9 kB)
|
|
||||||
#18 13.15 Collecting markupsafe>=2.1.1 (from flask)
|
|
||||||
#18 13.15 Downloading MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_x86_64.whl.metadata (4.0 kB)
|
|
||||||
#18 13.28 Collecting werkzeug>=3.1.0 (from flask)
|
|
||||||
#18 13.29 Downloading werkzeug-3.1.3-py3-none-any.whl.metadata (3.7 kB)
|
|
||||||
#18 13.42 Collecting awesomeversion>=22.9.0 (from tplink-omada-client)
|
|
||||||
#18 13.42 Downloading awesomeversion-25.8.0-py3-none-any.whl.metadata (9.8 kB)
|
|
||||||
#18 13.59 Collecting charset_normalizer<4,>=2 (from requests)
|
|
||||||
#18 13.59 Downloading charset_normalizer-3.4.3-cp312-cp312-musllinux_1_2_x86_64.whl.metadata (36 kB)
|
|
||||||
#18 13.77 Collecting idna<4,>=2.5 (from requests)
|
|
||||||
#18 13.78 Downloading idna-3.10-py3-none-any.whl.metadata (10 kB)
|
|
||||||
#18 13.94 Collecting certifi>=2017.4.17 (from requests)
|
|
||||||
#18 13.94 Downloading certifi-2025.8.3-py3-none-any.whl.metadata (2.4 kB)
|
|
||||||
#18 14.06 Collecting toml<0.11.0,>=0.10.2 (from librouteros)
|
|
||||||
#18 14.07 Downloading toml-0.10.2-py2.py3-none-any.whl.metadata (7.1 kB)
|
|
||||||
#18 14.25 Collecting six>=1.5 (from python-dateutil<3,>=2.7.0->graphene)
|
|
||||||
#18 14.26 Downloading six-1.17.0-py2.py3-none-any.whl.metadata (1.7 kB)
|
|
||||||
#18 14.33 Downloading openwrt_luci_rpc-1.1.17-py2.py3-none-any.whl (9.5 kB)
|
|
||||||
#18 14.37 Downloading asusrouter-1.21.0-py3-none-any.whl (131 kB)
|
|
||||||
#18 14.43 Downloading asyncio-4.0.0-py3-none-any.whl (5.6 kB)
|
|
||||||
#18 14.47 Downloading aiohttp-3.12.15-cp312-cp312-musllinux_1_2_x86_64.whl (1.7 MB)
|
|
||||||
#18 14.67 ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 1.7/1.7 MB 8.3 MB/s eta 0:00:00
|
|
||||||
#18 14.68 Downloading graphene-3.4.3-py2.py3-none-any.whl (114 kB)
|
|
||||||
#18 14.73 Downloading flask-3.1.2-py3-none-any.whl (103 kB)
|
|
||||||
#18 14.78 Downloading flask_cors-6.0.1-py3-none-any.whl (13 kB)
|
|
||||||
#18 14.84 Downloading unifi_sm_api-0.2.1-py3-none-any.whl (16 kB)
|
|
||||||
#18 14.88 Downloading tplink_omada_client-1.4.4-py3-none-any.whl (46 kB)
|
|
||||||
#18 14.93 Downloading wakeonlan-3.1.0-py3-none-any.whl (5.0 kB)
|
|
||||||
#18 14.99 Downloading pycryptodome-3.23.0-cp37-abi3-musllinux_1_2_x86_64.whl (2.3 MB)
|
|
||||||
#18 15.23 ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 2.3/2.3 MB 8.9 MB/s eta 0:00:00
|
|
||||||
#18 15.24 Downloading requests-2.32.5-py3-none-any.whl (64 kB)
|
|
||||||
#18 15.30 Downloading paho_mqtt-2.1.0-py3-none-any.whl (67 kB)
|
|
||||||
#18 15.34 Downloading scapy-2.6.1-py3-none-any.whl (2.4 MB)
|
|
||||||
#18 15.62 ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 2.4/2.4 MB 8.5 MB/s eta 0:00:00
|
|
||||||
#18 15.63 Downloading cron_converter-1.2.2-py3-none-any.whl (13 kB)
|
|
||||||
#18 15.67 Downloading pytz-2025.2-py2.py3-none-any.whl (509 kB)
|
|
||||||
#18 15.76 Downloading json2table-1.1.5-py2.py3-none-any.whl (8.7 kB)
|
|
||||||
#18 15.81 Downloading dhcp_leases-0.1.6-py3-none-any.whl (11 kB)
|
|
||||||
#18 15.86 Downloading pyunifi-2.21-py3-none-any.whl (11 kB)
|
|
||||||
#18 15.90 Downloading speedtest_cli-2.1.3-py2.py3-none-any.whl (23 kB)
|
|
||||||
#18 15.95 Downloading chardet-5.2.0-py3-none-any.whl (199 kB)
|
|
||||||
#18 16.01 Downloading dnspython-2.8.0-py3-none-any.whl (331 kB)
|
|
||||||
#18 16.10 Downloading librouteros-3.4.1-py3-none-any.whl (16 kB)
|
|
||||||
#18 16.14 Downloading aiohappyeyeballs-2.6.1-py3-none-any.whl (15 kB)
|
|
||||||
#18 16.20 Downloading aiosignal-1.4.0-py3-none-any.whl (7.5 kB)
|
|
||||||
#18 16.24 Downloading attrs-25.3.0-py3-none-any.whl (63 kB)
|
|
||||||
#18 16.30 Downloading awesomeversion-25.8.0-py3-none-any.whl (15 kB)
|
|
||||||
#18 16.34 Downloading blinker-1.9.0-py3-none-any.whl (8.5 kB)
|
|
||||||
#18 16.39 Downloading certifi-2025.8.3-py3-none-any.whl (161 kB)
|
|
||||||
#18 16.45 Downloading charset_normalizer-3.4.3-cp312-cp312-musllinux_1_2_x86_64.whl (153 kB)
|
|
||||||
#18 16.50 Downloading click-8.3.0-py3-none-any.whl (107 kB)
|
|
||||||
#18 16.55 Downloading frozenlist-1.7.0-cp312-cp312-musllinux_1_2_x86_64.whl (237 kB)
|
|
||||||
#18 16.62 Downloading graphql_core-3.2.6-py3-none-any.whl (203 kB)
|
|
||||||
#18 16.69 Downloading graphql_relay-3.2.0-py3-none-any.whl (16 kB)
|
|
||||||
#18 16.73 Downloading idna-3.10-py3-none-any.whl (70 kB)
|
|
||||||
#18 16.79 Downloading itsdangerous-2.2.0-py3-none-any.whl (16 kB)
|
|
||||||
#18 16.84 Downloading jinja2-3.1.6-py3-none-any.whl (134 kB)
|
|
||||||
#18 16.96 Downloading MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_x86_64.whl (23 kB)
|
|
||||||
#18 17.02 Downloading multidict-6.6.4-cp312-cp312-musllinux_1_2_x86_64.whl (251 kB)
|
|
||||||
#18 17.09 Downloading packaging-25.0-py3-none-any.whl (66 kB)
|
|
||||||
#18 17.14 Downloading propcache-0.3.2-cp312-cp312-musllinux_1_2_x86_64.whl (222 kB)
|
|
||||||
#18 17.21 Downloading python_dateutil-2.9.0.post0-py2.py3-none-any.whl (229 kB)
|
|
||||||
#18 17.28 Downloading toml-0.10.2-py2.py3-none-any.whl (16 kB)
|
|
||||||
#18 17.33 Downloading typing_extensions-4.15.0-py3-none-any.whl (44 kB)
|
|
||||||
#18 17.39 Downloading urllib3-2.5.0-py3-none-any.whl (129 kB)
|
|
||||||
#18 17.44 Downloading werkzeug-3.1.3-py3-none-any.whl (224 kB)
|
|
||||||
#18 17.51 Downloading xmltodict-1.0.2-py3-none-any.whl (13 kB)
|
|
||||||
#18 17.56 Downloading yarl-1.20.1-cp312-cp312-musllinux_1_2_x86_64.whl (374 kB)
|
|
||||||
#18 17.65 Downloading six-1.17.0-py2.py3-none-any.whl (11 kB)
|
|
||||||
#18 17.77 Building wheels for collected packages: python-nmap, yattag, aiofreepybox
|
|
||||||
#18 17.77 Building wheel for python-nmap (pyproject.toml): started
|
|
||||||
#18 17.95 Building wheel for python-nmap (pyproject.toml): finished with status 'done'
|
|
||||||
#18 17.96 Created wheel for python-nmap: filename=python_nmap-0.7.1-py2.py3-none-any.whl size=20679 sha256=ecd9b14109651cfaa5bf035f90076b9442985cc254fa5f8a49868fc896e86edb
|
|
||||||
#18 17.96 Stored in directory: /root/.cache/pip/wheels/06/fc/d4/0957e1d9942e696188208772ea0abf909fe6eb3d9dff6e5a9e
|
|
||||||
#18 17.96 Building wheel for yattag (pyproject.toml): started
|
|
||||||
#18 18.14 Building wheel for yattag (pyproject.toml): finished with status 'done'
|
|
||||||
#18 18.14 Created wheel for yattag: filename=yattag-1.16.1-py3-none-any.whl size=15930 sha256=2135fc2034a3847c81eb6a0d7b85608e8272339fa5c1961f87b02dfe6d74d0ad
|
|
||||||
#18 18.14 Stored in directory: /root/.cache/pip/wheels/d2/2f/52/049ff4f7c8c9c932b2ece7ec800d7facf2a141ac5ab0ce7e51
|
|
||||||
#18 18.15 Building wheel for aiofreepybox (pyproject.toml): started
|
|
||||||
#18 18.36 Building wheel for aiofreepybox (pyproject.toml): finished with status 'done'
|
|
||||||
#18 18.36 Created wheel for aiofreepybox: filename=aiofreepybox-6.0.0-py3-none-any.whl size=60051 sha256=dbdee5350b10b6550ede50bc779381b7f39f1e5d5da889f2ee98cb5a869d3425
|
|
||||||
#18 18.36 Stored in directory: /tmp/pip-ephem-wheel-cache-93bgc4e2/wheels/3c/d3/ae/fb97a84a29a5fbe8517de58d67e66586505440af35981e0dd3
|
|
||||||
#18 18.36 Successfully built python-nmap yattag aiofreepybox
|
|
||||||
#18 18.45 Installing collected packages: yattag, speedtest-cli, pytz, python-nmap, json2table, dhcp-leases, xmltodict, wakeonlan, urllib3, typing-extensions, toml, six, scapy, pycryptodome, propcache, paho-mqtt, packaging, multidict, markupsafe, itsdangerous, idna, graphql-core, frozenlist, dnspython, Click, charset_normalizer, chardet, certifi, blinker, awesomeversion, attrs, asyncio, aiohappyeyeballs, yarl, werkzeug, requests, python-dateutil, librouteros, jinja2, graphql-relay, aiosignal, unifi-sm-api, pyunifi, openwrt-luci-rpc, graphene, flask, cron-converter, aiohttp, tplink-omada-client, flask-cors, asusrouter, aiofreepybox
|
|
||||||
#18 24.35 Successfully installed Click-8.3.0 aiofreepybox-6.0.0 aiohappyeyeballs-2.6.1 aiohttp-3.12.15 aiosignal-1.4.0 asusrouter-1.21.0 asyncio-4.0.0 attrs-25.3.0 awesomeversion-25.8.0 blinker-1.9.0 certifi-2025.8.3 chardet-5.2.0 charset_normalizer-3.4.3 cron-converter-1.2.2 dhcp-leases-0.1.6 dnspython-2.8.0 flask-3.1.2 flask-cors-6.0.1 frozenlist-1.7.0 graphene-3.4.3 graphql-core-3.2.6 graphql-relay-3.2.0 idna-3.10 itsdangerous-2.2.0 jinja2-3.1.6 json2table-1.1.5 librouteros-3.4.1 markupsafe-3.0.2 multidict-6.6.4 openwrt-luci-rpc-1.1.17 packaging-25.0 paho-mqtt-2.1.0 propcache-0.3.2 pycryptodome-3.23.0 python-dateutil-2.9.0.post0 python-nmap-0.7.1 pytz-2025.2 pyunifi-2.21 requests-2.32.5 scapy-2.6.1 six-1.17.0 speedtest-cli-2.1.3 toml-0.10.2 tplink-omada-client-1.4.4 typing-extensions-4.15.0 unifi-sm-api-0.2.1 urllib3-2.5.0 wakeonlan-3.1.0 werkzeug-3.1.3 xmltodict-1.0.2 yarl-1.20.1 yattag-1.16.1
|
|
||||||
#18 24.47
|
|
||||||
#18 24.47 [notice] A new release of pip is available: 25.0.1 -> 25.2
|
|
||||||
#18 24.47 [notice] To update, run: pip install --upgrade pip
|
|
||||||
#18 DONE 25.1s
|
|
||||||
|
|
||||||
#19 [builder 14/15] RUN bash -c "find /app -type d -exec chmod 750 {} \;" && bash -c "find /app -type f -exec chmod 640 {} \;" && bash -c "find /app -type f \( -name '*.sh' -o -name '*.py' -o -name 'speedtest-cli' \) -exec chmod 750 {} \;"
|
#19 [runner 3/11] COPY --chown=netalertx:netalertx install/production-filesystem/ /
|
||||||
#19 DONE 11.9s
|
#19 CACHED
|
||||||
|
|
||||||
#20 [builder 15/15] COPY install/freebox_certificate.pem /opt/venv/lib/python3.12/site-packages/aiofreepybox/freebox_certificates.pem
|
#20 [hardened 2/2] RUN chown -R readonly:readonly /app/back /app/front /app/server /services /services/config /entrypoint.d && chmod -R 004 /app/back /app/front /app/server /services /services/config /entrypoint.d && find /app/back /app/front /app/server /services /services/config /entrypoint.d -type d -exec chmod 005 {} + && install -d -o netalertx -g netalertx -m 0777 /data /data/config /data/db /tmp/api /tmp/log /tmp/log/plugins /tmp/run /tmp/run/tmp /tmp/run/logs /tmp/nginx/active-config && chown readonly:readonly /entrypoint.sh /root-entrypoint.sh /opt /opt/venv && chmod 005 /entrypoint.sh /root-entrypoint.sh /services/*.sh /services/scripts/* /entrypoint.d/* /app /opt /opt/venv && rm -f "/data/config/app.conf" "/data/db/app.db" "/data/db/app.db-shm" "/data/db/app.db-wal" || true && apk del apk-tools && rm -Rf /var /etc/sudoers.d/* /etc/shadow /etc/gshadow /etc/sudoers /lib/apk /lib/firmware /lib/modules-load.d /lib/sysctl.d /mnt /home/ /root /srv /media && printf '#!/bin/sh\n"$@"\n' > /usr/bin/sudo && chmod +x /usr/bin/sudo
|
||||||
#20 DONE 0.4s
|
#20 CACHED
|
||||||
|
|
||||||
#21 [runner 2/14] COPY --from=builder /opt/venv /opt/venv
|
#21 exporting to image
|
||||||
#21 DONE 0.8s
|
#21 exporting layers done
|
||||||
|
#21 writing image sha256:7aac94268b770de42da767c06b8e9fecaeabf7ce1277cec1c83092484debd4c3 0.0s done
|
||||||
#22 [runner 3/14] COPY --from=builder /usr/sbin/usermod /usr/sbin/groupmod /usr/sbin/
|
#21 naming to docker.io/library/netalertx-test 0.0s done
|
||||||
#22 DONE 0.4s
|
#21 DONE 0.1s
|
||||||
|
|
||||||
#23 [runner 4/14] RUN apk update --no-cache && apk add --no-cache bash libbsd zip lsblk gettext-envsubst sudo mtr tzdata s6-overlay && apk add --no-cache curl arp-scan iproute2 iproute2-ss nmap nmap-scripts traceroute nbtscan avahi avahi-tools openrc dbus net-tools net-snmp-tools bind-tools awake ca-certificates && apk add --no-cache sqlite php83 php83-fpm php83-cgi php83-curl php83-sqlite3 php83-session && apk add --no-cache python3 nginx && ln -s /usr/bin/awake /usr/bin/wakeonlan && bash -c "install -d -m 750 -o nginx -g www-data /app /app" && rm -f /etc/nginx/http.d/default.conf
|
|
||||||
#23 0.487 fetch https://dl-cdn.alpinelinux.org/alpine/v3.22/main/x86_64/APKINDEX.tar.gz
|
|
||||||
#23 0.696 fetch https://dl-cdn.alpinelinux.org/alpine/v3.22/community/x86_64/APKINDEX.tar.gz
|
|
||||||
#23 1.156 v3.22.1-472-ga67443520d6 [https://dl-cdn.alpinelinux.org/alpine/v3.22/main]
|
|
||||||
#23 1.156 v3.22.1-473-gcd551a4e006 [https://dl-cdn.alpinelinux.org/alpine/v3.22/community]
|
|
||||||
#23 1.156 OK: 26326 distinct packages available
|
|
||||||
#23 1.195 fetch https://dl-cdn.alpinelinux.org/alpine/v3.22/main/x86_64/APKINDEX.tar.gz
|
|
||||||
#23 1.276 fetch https://dl-cdn.alpinelinux.org/alpine/v3.22/community/x86_64/APKINDEX.tar.gz
|
|
||||||
#23 1.568 (1/38) Installing ncurses-terminfo-base (6.5_p20250503-r0)
|
|
||||||
#23 1.580 (2/38) Installing libncursesw (6.5_p20250503-r0)
|
|
||||||
#23 1.629 (3/38) Installing readline (8.2.13-r1)
|
|
||||||
#23 1.659 (4/38) Installing bash (5.2.37-r0)
|
|
||||||
#23 1.723 Executing bash-5.2.37-r0.post-install
|
|
||||||
#23 1.740 (5/38) Installing libintl (0.24.1-r0)
|
|
||||||
#23 1.749 (6/38) Installing gettext-envsubst (0.24.1-r0)
|
|
||||||
#23 1.775 (7/38) Installing libmd (1.1.0-r0)
|
|
||||||
#23 1.782 (8/38) Installing libbsd (0.12.2-r0)
|
|
||||||
#23 1.807 (9/38) Installing libeconf (0.6.3-r0)
|
|
||||||
#23 1.812 (10/38) Installing libblkid (2.41-r9)
|
|
||||||
#23 1.831 (11/38) Installing libmount (2.41-r9)
|
|
||||||
#23 1.857 (12/38) Installing libsmartcols (2.41-r9)
|
|
||||||
#23 1.872 (13/38) Installing lsblk (2.41-r9)
|
|
||||||
#23 1.886 (14/38) Installing libcap2 (2.76-r0)
|
|
||||||
#23 1.897 (15/38) Installing jansson (2.14.1-r0)
|
|
||||||
#23 1.910 (16/38) Installing mtr (0.96-r0)
|
|
||||||
#23 1.948 (17/38) Installing skalibs-libs (2.14.4.0-r0)
|
|
||||||
#23 1.966 (18/38) Installing execline-libs (2.9.7.0-r0)
|
|
||||||
#23 1.974 (19/38) Installing execline (2.9.7.0-r0)
|
|
||||||
#23 1.996 Executing execline-2.9.7.0-r0.post-install
|
|
||||||
#23 2.004 (20/38) Installing s6-ipcserver (2.13.2.0-r0)
|
|
||||||
#23 2.010 (21/38) Installing s6-libs (2.13.2.0-r0)
|
|
||||||
#23 2.016 (22/38) Installing s6 (2.13.2.0-r0)
|
|
||||||
#23 2.033 Executing s6-2.13.2.0-r0.pre-install
|
|
||||||
#23 2.159 (23/38) Installing s6-rc-libs (0.5.6.0-r0)
|
|
||||||
#23 2.164 (24/38) Installing s6-rc (0.5.6.0-r0)
|
|
||||||
#23 2.175 (25/38) Installing s6-linux-init (1.1.3.0-r0)
|
|
||||||
#23 2.185 (26/38) Installing s6-portable-utils (2.3.1.0-r0)
|
|
||||||
#23 2.193 (27/38) Installing s6-linux-utils (2.6.3.0-r0)
|
|
||||||
#23 2.200 (28/38) Installing s6-dns-libs (2.4.1.0-r0)
|
|
||||||
#23 2.208 (29/38) Installing s6-dns (2.4.1.0-r0)
|
|
||||||
#23 2.222 (30/38) Installing bearssl-libs (0.6_git20241009-r0)
|
|
||||||
#23 2.254 (31/38) Installing s6-networking-libs (2.7.1.0-r0)
|
|
||||||
#23 2.264 (32/38) Installing s6-networking (2.7.1.0-r0)
|
|
||||||
#23 2.286 (33/38) Installing s6-overlay-helpers (0.1.2.0-r0)
|
|
||||||
#23 2.355 (34/38) Installing s6-overlay (3.2.0.3-r0)
|
|
||||||
#23 2.380 (35/38) Installing sudo (1.9.17_p2-r0)
|
|
||||||
#23 2.511 (36/38) Installing tzdata (2025b-r0)
|
|
||||||
#23 2.641 (37/38) Installing unzip (6.0-r15)
|
|
||||||
#23 2.659 (38/38) Installing zip (3.0-r13)
|
|
||||||
#23 2.694 Executing busybox-1.37.0-r18.trigger
|
|
||||||
#23 2.725 OK: 16 MiB in 54 packages
|
|
||||||
#23 2.778 fetch https://dl-cdn.alpinelinux.org/alpine/v3.22/main/x86_64/APKINDEX.tar.gz
|
|
||||||
#23 2.918 fetch https://dl-cdn.alpinelinux.org/alpine/v3.22/community/x86_64/APKINDEX.tar.gz
|
|
||||||
#23 3.218 (1/77) Installing libpcap (1.10.5-r1)
|
|
||||||
#23 3.234 (2/77) Installing arp-scan (1.10.0-r2)
|
|
||||||
#23 3.289 (3/77) Installing dbus-libs (1.16.2-r1)
|
|
||||||
#23 3.307 (4/77) Installing avahi-libs (0.8-r21)
|
|
||||||
#23 3.315 (5/77) Installing libdaemon (0.14-r6)
|
|
||||||
#23 3.322 (6/77) Installing libevent (2.1.12-r8)
|
|
||||||
#23 3.355 (7/77) Installing libexpat (2.7.2-r0)
|
|
||||||
#23 3.368 (8/77) Installing avahi (0.8-r21)
|
|
||||||
#23 3.387 Executing avahi-0.8-r21.pre-install
|
|
||||||
#23 3.465 (9/77) Installing gdbm (1.24-r0)
|
|
||||||
#23 3.477 (10/77) Installing avahi-tools (0.8-r21)
|
|
||||||
#23 3.483 (11/77) Installing libbz2 (1.0.8-r6)
|
|
||||||
#23 3.490 (12/77) Installing libffi (3.4.8-r0)
|
|
||||||
#23 3.496 (13/77) Installing xz-libs (5.8.1-r0)
|
|
||||||
#23 3.517 (14/77) Installing libgcc (14.2.0-r6)
|
|
||||||
#23 3.529 (15/77) Installing libstdc++ (14.2.0-r6)
|
|
||||||
#23 3.613 (16/77) Installing mpdecimal (4.0.1-r0)
|
|
||||||
#23 3.628 (17/77) Installing libpanelw (6.5_p20250503-r0)
|
|
||||||
#23 3.634 (18/77) Installing sqlite-libs (3.49.2-r1)
|
|
||||||
#23 3.783 (19/77) Installing python3 (3.12.11-r0)
|
|
||||||
#23 4.494 (20/77) Installing python3-pycache-pyc0 (3.12.11-r0)
|
|
||||||
#23 4.915 (21/77) Installing pyc (3.12.11-r0)
|
|
||||||
#23 4.915 (22/77) Installing py3-awake-pyc (1.0-r12)
|
|
||||||
#23 4.922 (23/77) Installing python3-pyc (3.12.11-r0)
|
|
||||||
#23 4.922 (24/77) Installing py3-awake (1.0-r12)
|
|
||||||
#23 4.928 (25/77) Installing awake (1.0-r12)
|
|
||||||
#23 4.932 (26/77) Installing fstrm (0.6.1-r4)
|
|
||||||
#23 4.940 (27/77) Installing krb5-conf (1.0-r2)
|
|
||||||
#23 5.017 (28/77) Installing libcom_err (1.47.2-r2)
|
|
||||||
#23 5.026 (29/77) Installing keyutils-libs (1.6.3-r4)
|
|
||||||
#23 5.033 (30/77) Installing libverto (0.3.2-r2)
|
|
||||||
#23 5.039 (31/77) Installing krb5-libs (1.21.3-r0)
|
|
||||||
#23 5.115 (32/77) Installing json-c (0.18-r1)
|
|
||||||
#23 5.123 (33/77) Installing nghttp2-libs (1.65.0-r0)
|
|
||||||
#23 5.136 (34/77) Installing protobuf-c (1.5.2-r0)
|
|
||||||
#23 5.142 (35/77) Installing userspace-rcu (0.15.2-r0)
|
|
||||||
#23 5.161 (36/77) Installing libuv (1.51.0-r0)
|
|
||||||
#23 5.178 (37/77) Installing libxml2 (2.13.8-r0)
|
|
||||||
#23 5.232 (38/77) Installing bind-libs (9.20.13-r0)
|
|
||||||
#23 5.355 (39/77) Installing bind-tools (9.20.13-r0)
|
|
||||||
#23 5.395 (40/77) Installing ca-certificates (20250619-r0)
|
|
||||||
#23 5.518 (41/77) Installing brotli-libs (1.1.0-r2)
|
|
||||||
#23 5.559 (42/77) Installing c-ares (1.34.5-r0)
|
|
||||||
#23 5.573 (43/77) Installing libunistring (1.3-r0)
|
|
||||||
#23 5.645 (44/77) Installing libidn2 (2.3.7-r0)
|
|
||||||
#23 5.664 (45/77) Installing libpsl (0.21.5-r3)
|
|
||||||
#23 5.676 (46/77) Installing zstd-libs (1.5.7-r0)
|
|
||||||
#23 5.720 (47/77) Installing libcurl (8.14.1-r1)
|
|
||||||
#23 5.753 (48/77) Installing curl (8.14.1-r1)
|
|
||||||
#23 5.778 (49/77) Installing dbus (1.16.2-r1)
|
|
||||||
#23 5.796 Executing dbus-1.16.2-r1.pre-install
|
|
||||||
#23 5.869 Executing dbus-1.16.2-r1.post-install
|
|
||||||
#23 5.887 (50/77) Installing dbus-daemon-launch-helper (1.16.2-r1)
|
|
||||||
#23 5.896 (51/77) Installing libelf (0.193-r0)
|
|
||||||
#23 5.908 (52/77) Installing libmnl (1.0.5-r2)
|
|
||||||
#23 5.915 (53/77) Installing iproute2-minimal (6.15.0-r0)
|
|
||||||
#23 5.954 (54/77) Installing libxtables (1.8.11-r1)
|
|
||||||
#23 5.963 (55/77) Installing iproute2-tc (6.15.0-r0)
|
|
||||||
#23 6.001 (56/77) Installing iproute2-ss (6.15.0-r0)
|
|
||||||
#23 6.014 (57/77) Installing iproute2 (6.15.0-r0)
|
|
||||||
#23 6.042 Executing iproute2-6.15.0-r0.post-install
|
|
||||||
#23 6.047 (58/77) Installing nbtscan (1.7.2-r0)
|
|
||||||
#23 6.053 (59/77) Installing net-snmp-libs (5.9.4-r1)
|
|
||||||
#23 6.112 (60/77) Installing net-snmp-agent-libs (5.9.4-r1)
|
|
||||||
#23 6.179 (61/77) Installing net-snmp-tools (5.9.4-r1)
|
|
||||||
#23 6.205 (62/77) Installing mii-tool (2.10-r3)
|
|
||||||
#23 6.211 (63/77) Installing net-tools (2.10-r3)
|
|
||||||
#23 6.235 (64/77) Installing lua5.4-libs (5.4.7-r0)
|
|
||||||
#23 6.258 (65/77) Installing libssh2 (1.11.1-r0)
|
|
||||||
#23 6.279 (66/77) Installing nmap (7.97-r0)
|
|
||||||
#23 6.524 (67/77) Installing nmap-nselibs (7.97-r0)
|
|
||||||
#23 6.729 (68/77) Installing nmap-scripts (7.97-r0)
|
|
||||||
#23 6.842 (69/77) Installing bridge (1.5-r5)
|
|
||||||
#23 6.904 (70/77) Installing ifupdown-ng (0.12.1-r7)
|
|
||||||
#23 6.915 (71/77) Installing ifupdown-ng-iproute2 (0.12.1-r7)
|
|
||||||
#23 6.920 (72/77) Installing openrc-user (0.62.6-r0)
|
|
||||||
#23 6.924 (73/77) Installing openrc (0.62.6-r0)
|
|
||||||
#23 7.013 Executing openrc-0.62.6-r0.post-install
|
|
||||||
#23 7.016 (74/77) Installing avahi-openrc (0.8-r21)
|
|
||||||
#23 7.021 (75/77) Installing dbus-openrc (1.16.2-r1)
|
|
||||||
#23 7.026 (76/77) Installing s6-openrc (2.13.2.0-r0)
|
|
||||||
#23 7.032 (77/77) Installing traceroute (2.1.6-r0)
|
|
||||||
#23 7.040 Executing busybox-1.37.0-r18.trigger
|
|
||||||
#23 7.042 Executing ca-certificates-20250619-r0.trigger
|
|
||||||
#23 7.101 Executing dbus-1.16.2-r1.trigger
|
|
||||||
#23 7.104 OK: 102 MiB in 131 packages
|
|
||||||
#23 7.156 fetch https://dl-cdn.alpinelinux.org/alpine/v3.22/main/x86_64/APKINDEX.tar.gz
|
|
||||||
#23 7.243 fetch https://dl-cdn.alpinelinux.org/alpine/v3.22/community/x86_64/APKINDEX.tar.gz
|
|
||||||
#23 7.543 (1/12) Installing php83-common (8.3.24-r0)
|
|
||||||
#23 7.551 (2/12) Installing argon2-libs (20190702-r5)
|
|
||||||
#23 7.557 (3/12) Installing libedit (20250104.3.1-r1)
|
|
||||||
#23 7.568 (4/12) Installing pcre2 (10.43-r1)
|
|
||||||
#23 7.600 (5/12) Installing php83 (8.3.24-r0)
|
|
||||||
#23 7.777 (6/12) Installing php83-cgi (8.3.24-r0)
|
|
||||||
#23 7.953 (7/12) Installing php83-curl (8.3.24-r0)
|
|
||||||
#23 7.968 (8/12) Installing acl-libs (2.3.2-r1)
|
|
||||||
#23 7.975 (9/12) Installing php83-fpm (8.3.24-r0)
|
|
||||||
#23 8.193 (10/12) Installing php83-session (8.3.24-r0)
|
|
||||||
#23 8.204 (11/12) Installing php83-sqlite3 (8.3.24-r0)
|
|
||||||
#23 8.213 (12/12) Installing sqlite (3.49.2-r1)
|
|
||||||
#23 8.309 Executing busybox-1.37.0-r18.trigger
|
|
||||||
#23 8.317 OK: 129 MiB in 143 packages
|
|
||||||
#23 8.369 fetch https://dl-cdn.alpinelinux.org/alpine/v3.22/main/x86_64/APKINDEX.tar.gz
|
|
||||||
#23 8.449 fetch https://dl-cdn.alpinelinux.org/alpine/v3.22/community/x86_64/APKINDEX.tar.gz
|
|
||||||
#23 8.747 (1/2) Installing nginx (1.28.0-r3)
|
|
||||||
#23 8.766 Executing nginx-1.28.0-r3.pre-install
|
|
||||||
#23 8.863 Executing nginx-1.28.0-r3.post-install
|
|
||||||
#23 8.865 (2/2) Installing nginx-openrc (1.28.0-r3)
|
|
||||||
#23 8.870 Executing busybox-1.37.0-r18.trigger
|
|
||||||
#23 8.873 OK: 130 MiB in 145 packages
|
|
||||||
#23 DONE 9.5s
|
|
||||||
|
|
||||||
#24 [runner 5/14] COPY --from=builder --chown=nginx:www-data /app/ /app/
|
|
||||||
#24 DONE 0.5s
|
|
||||||
|
|
||||||
#25 [runner 6/14] RUN mkdir -p /app/config /app/db /app/log/plugins
|
|
||||||
#25 DONE 0.5s
|
|
||||||
|
|
||||||
#26 [runner 7/14] COPY --chmod=600 --chown=root:root install/crontab /etc/crontabs/root
|
|
||||||
#26 DONE 0.3s
|
|
||||||
|
|
||||||
#27 [runner 8/14] COPY --chmod=755 dockerfiles/healthcheck.sh /usr/local/bin/healthcheck.sh
|
|
||||||
#27 DONE 0.3s
|
|
||||||
|
|
||||||
#28 [runner 9/14] RUN touch /app/log/app.log && touch /app/log/execution_queue.log && touch /app/log/app_front.log && touch /app/log/app.php_errors.log && touch /app/log/stderr.log && touch /app/log/stdout.log && touch /app/log/db_is_locked.log && touch /app/log/IP_changes.log && touch /app/log/report_output.txt && touch /app/log/report_output.html && touch /app/log/report_output.json && touch /app/api/user_notifications.json
|
|
||||||
#28 DONE 0.6s
|
|
||||||
|
|
||||||
#29 [runner 10/14] COPY dockerfiles /app/dockerfiles
|
|
||||||
#29 DONE 0.3s
|
|
||||||
|
|
||||||
#30 [runner 11/14] RUN chmod +x /app/dockerfiles/*.sh
|
|
||||||
#30 DONE 0.8s
|
|
||||||
|
|
||||||
#31 [runner 12/14] RUN /app/dockerfiles/init-nginx.sh && /app/dockerfiles/init-php-fpm.sh && /app/dockerfiles/init-crond.sh && /app/dockerfiles/init-backend.sh
|
|
||||||
#31 0.417 Initializing nginx...
|
|
||||||
#31 0.417 Setting webserver to address (0.0.0.0) and port (20211)
|
|
||||||
#31 0.418 /app/dockerfiles/init-nginx.sh: line 5: /app/install/netalertx.template.conf: No such file or directory
|
|
||||||
#31 0.611 nginx initialized.
|
|
||||||
#31 0.612 Initializing php-fpm...
|
|
||||||
#31 0.654 php-fpm initialized.
|
|
||||||
#31 0.655 Initializing crond...
|
|
||||||
#31 0.689 crond initialized.
|
|
||||||
#31 0.690 Initializing backend...
|
|
||||||
#31 12.19 Backend initialized.
|
|
||||||
#31 DONE 12.3s
|
|
||||||
|
|
||||||
#32 [runner 13/14] RUN rm -rf /app/dockerfiles
|
|
||||||
#32 DONE 0.6s
|
|
||||||
|
|
||||||
#33 [runner 14/14] RUN date +%s > /app/front/buildtimestamp.txt
|
|
||||||
#33 DONE 0.6s
|
|
||||||
|
|
||||||
#34 exporting to image
|
|
||||||
#34 exporting layers
|
|
||||||
#34 exporting layers 2.4s done
|
|
||||||
#34 writing image sha256:0afcbc41473de559eff0dd93250595494fe4d8ea620861e9e90d50a248fcefda 0.0s done
|
|
||||||
#34 naming to docker.io/library/netalertx 0.0s done
|
|
||||||
#34 DONE 2.5s
|
|
||||||
|
|||||||
56
docs/ADVISORY_EYES_ON_GLASS.md
Normal file
56
docs/ADVISORY_EYES_ON_GLASS.md
Normal file
@@ -0,0 +1,56 @@
|
|||||||
|
### Build an MSP Wallboard for Network Monitoring
|
||||||
|
|
||||||
|
For Managed Service Providers (MSPs) and Network Operations Centers (NOC), "Eyes on Glass" monitoring requires a UI that is both self-healing (auto-refreshing) and focused only on critical data. By leveraging the **UI Settings Plugin**, you can transform NetAlertX from a management tool into a dedicated live monitor.
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 1. Configure Auto-Refresh for Live Monitoring
|
||||||
|
|
||||||
|
Static dashboards are the enemy of real-time response. NetAlertX allows you to force the UI to pull fresh data without manual page reloads.
|
||||||
|
|
||||||
|
* **Setting:** Locate the `UI_REFRESH` (or similar "Auto-refresh UI") setting within the **UI Settings plugin**.
|
||||||
|
* **Optimal Interval:** Set this between **60 to 120 seconds**.
|
||||||
|
* *Note:* Refreshing too frequently (e.g., <30s) on large networks can lead to high browser and server CPU usage.
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
### 2. Streamlining the Dashboard (MSP Mode)
|
||||||
|
|
||||||
|
An MSP's focus is on what is *broken*, not what is working. Hide the noise to increase reaction speed.
|
||||||
|
|
||||||
|
* **Hide Unnecessary Blocks:** Under UI Settings, disable dashboard blocks that don't provide immediate utility, such as **Online presence** or **Tiles**.
|
||||||
|
* **Hide virtual connections:** You can specify which relationships shoudl be hidden from the main view to remove any virtual devices that are not essential from your views.
|
||||||
|
* **Browser Full-Screen:** Use the built-in "Full Screen" toggle in the top bar to remove browser chrome (URL bars/tabs) for a cleaner "Wallboard" look.
|
||||||
|
|
||||||
|
### 3. Creating Custom NOC Views
|
||||||
|
|
||||||
|
Use the UI Filters in tandem with UI Settings to create custom views.
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
| Feature | NOC/MSP Application |
|
||||||
|
| --- | --- |
|
||||||
|
| **Site-Specific Nodes** | Filter the view by a specific "Sync Node" or "Location" filter to monitor a single client site. |
|
||||||
|
| **Filter by Criticality** | Filter devices where `Group == "Infrastructure"` or `"Server"`. (depending on your predefined values) |
|
||||||
|
| **Predefined "Down" View** | Bookmark the URL with the `/devices.php#down` path to ensure the dashboard always loads into an "Alert Only" mode. |
|
||||||
|
|
||||||
|
### 4. Browser & Cache Stability
|
||||||
|
|
||||||
|
Because the UI is a web application, long-running sessions can occasionally experience cache drift.
|
||||||
|
|
||||||
|
* **Cache Refresh:** If you notice the "Show # Entries" resetting or icons failing to load after days of uptime, use the **Reload** icon in the application header (not the browser refresh) to clear the internal app cache.
|
||||||
|
* **Dedicated Hardware:** For 24/7 monitoring, use a dedicated thin client or Raspberry Pi running in "Kiosk Mode" to prevent OS-level popups from obscuring the dashboard.
|
||||||
|
|
||||||
|
> [!TIP]
|
||||||
|
> [NetAlertX - Detailed Dashboard Guide](https://www.youtube.com/watch?v=umh1c_40HW8)
|
||||||
|
> This video provides a visual walkthrough of the NetAlertX dashboard features, including how to map and visualize devices which is crucial for setting up a clear "Eyes on Glass" monitoring environment.
|
||||||
|
|
||||||
|
### Summary Checklist
|
||||||
|
|
||||||
|
* [ ] **Automate Refresh:** Set `UI_REFRESH` to **60-120s** in UI Settings to ensure the dashboard stays current without manual intervention.
|
||||||
|
* [ ] **Filter for Criticality:** Bookmark the **`/devices.php#down`** view to instantly focus on offline assets rather than the entire inventory.
|
||||||
|
* [ ] **Remove UI Noise:** Use UI Settings to hide non-essential dashboard blocks (e.g., **Tiles** or remove **Virtual Connections** devices) to maximize screen real estate for alerts.
|
||||||
|
* [ ] **Segment by Site:** Use **Location** or **Sync Node** filters to create dedicated views for specific client networks or physical branches.
|
||||||
|
* [ ] **Ensure Stability:** Run on a dedicated "Kiosk" browser and use the internal **Reload icon** occasionally to maintain a clean application cache.
|
||||||
121
docs/ADVISORY_MULTI_NETWORK.md
Normal file
121
docs/ADVISORY_MULTI_NETWORK.md
Normal file
@@ -0,0 +1,121 @@
|
|||||||
|
## ADVISORY: Best Practices for Monitoring Multiple Networks with NetAlertX
|
||||||
|
|
||||||
|
### 1. Define Monitoring Scope & Architecture
|
||||||
|
|
||||||
|
Effective multi-network monitoring starts with understanding how NetAlertX "sees" your traffic.
|
||||||
|
|
||||||
|
* **A. Understand Network Accessibility:** Local ARP-based scanning (**ARPSCAN**) only discovers devices on directly accessible subnets due to Layer 2 limitations. It cannot traverse VPNs or routed borders without specific configuration.
|
||||||
|
* **B. Plan Subnet & Scan Interfaces:** Explicitly configure each accessible segment in `SCAN_SUBNETS` with the corresponding interfaces.
|
||||||
|
* **C. Remote & Inaccessible Networks:** For networks unreachable via ARP, use these strategies:
|
||||||
|
* **Alternate Plugins:** Supplement discovery with [SNMPDSC](SNMPDSC) or [DHCP lease imports](https://docs.netalertx.com/PLUGINS/?h=DHCPLSS#available-plugins).
|
||||||
|
* **Centralized Multi-Tenant Management using Sync Nodes:** Run secondary NetAlertX instances on isolated networks and aggregate data using the **SYNC plugin**.
|
||||||
|
* **Manual Entry:** For static assets where only ICMP (ping) status is needed.
|
||||||
|
|
||||||
|
> [!TIP]
|
||||||
|
> Explore the [remote networks](./REMOTE_NETWORKS.md) documentation for more details on how to set up the approaches menationed above.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 2. Automating IT Asset Inventory with Workflows
|
||||||
|
|
||||||
|
[Workflows](./WORKFLOWS.md) are the "engine" of NetAlertX, reducing manual overhead as your device list grows.
|
||||||
|
|
||||||
|
* **A. Logical Ownership & VLAN Tagging:** Create a workflow triggered on **Device Creation** to:
|
||||||
|
1. Inspect the IP/Subnet.
|
||||||
|
2. Set `devVlan` or `devOwner` custom fields automatically.
|
||||||
|
|
||||||
|
|
||||||
|
* **B. Auto-Grouping:** Use conditional logic to categorize devices.
|
||||||
|
* *Example:* If `devLastIP == 10.10.20.*`, then `Set devLocation = "BranchOffice"`.
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"name": "Assign Location - BranchOffice",
|
||||||
|
"trigger": {
|
||||||
|
"object_type": "Devices",
|
||||||
|
"event_type": "update"
|
||||||
|
},
|
||||||
|
"conditions": [
|
||||||
|
{
|
||||||
|
"logic": "AND",
|
||||||
|
"conditions": [
|
||||||
|
{
|
||||||
|
"field": "devLastIP",
|
||||||
|
"operator": "contains",
|
||||||
|
"value": "10.10.20."
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"actions": [
|
||||||
|
{
|
||||||
|
"type": "update_field",
|
||||||
|
"field": "devLocation",
|
||||||
|
"value": "BranchOffice"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
* **C. Sync Node Tracking:** When using multiple instances, ensure all synchub nodes have a descriptive `SYNC_node_name` name to distinguish between sites.
|
||||||
|
|
||||||
|
> [!TIP]
|
||||||
|
> Always test new workflows in a "Staging" instance. A misconfigured workflow can trigger thousands of unintended updates across your database.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 3. Notification Strategy: Low Noise, High Signal
|
||||||
|
|
||||||
|
A multi-network environment can generate significant "alert fatigue." Use a layered filtering approach.
|
||||||
|
|
||||||
|
| Level | Strategy | Recommended Action |
|
||||||
|
| --- | --- | --- |
|
||||||
|
| **Device** | Silence Flapping | Use "Skip repeated notifications" for unstable IoT devices. |
|
||||||
|
| **Plugin** | Tune Watchers | Only enable `_WATCH` on reliable plugins (e.g., ICMP/SNMP). |
|
||||||
|
| **Global** | Filter Sections | Limit `NTFPRCS_INCLUDED_SECTIONS` to `new_devices` and `down_devices`. |
|
||||||
|
|
||||||
|
|
||||||
|
> [!TIP]
|
||||||
|
> **Ignore Rules:** Maintain strict **Ignored MAC** (`NEWDEV_ignored_MACs`) and **Ignored IP** (`NEWDEV_ignored_IPs`) lists for guest networks or broadcast scanners to keep your logs clean.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 4. UI Filters for Multi-Network Clarity
|
||||||
|
|
||||||
|
Don't let a massive device list overwhelm you. Use the [Multi-edit features](./DEVICES_BULK_EDITING.md) to categorize devices and create focused views:
|
||||||
|
|
||||||
|
* **By Zone:** Filter by "Location", "Site" or "Sync Node" you et up in Section 2.
|
||||||
|
* **By Criticality:** Use custom the device Type field to separate "Core Infrastructure" from "Ephemeral Clients."
|
||||||
|
* **By Status:** Use predefined views specifically for "Devices currently Down" to act as a Network Operations Center (NOC) dashboard.
|
||||||
|
|
||||||
|
> [!TIP]
|
||||||
|
> If you are providing services as a Managed Service Provider (MSP) customize your default UI to be exactly how you need it, by hiding parts of the UI that you are not interested in, or by configuring a auto-refreshed screen monitoring your most important clients. See the [Eyes on glass](./ADVISORY_EYES_ON_GLASS.md) advisory for more details.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 5. Operational Stability & Sync Health
|
||||||
|
|
||||||
|
* **Health Checks:** Regularly monitor the [Logs](https://docs.netalertx.com/LOGGING/?h=logs) to ensure remote nodes are reporting in.
|
||||||
|
* **Backups:** Use the **CSV Devices Backup** plugin. Standardize your workflow templates and [back up](./BACKUPS.md) you `/config` folders so that if a node fails, you can redeploy it with the same logic instantly.
|
||||||
|
|
||||||
|
|
||||||
|
### 6. Optimize Performance
|
||||||
|
|
||||||
|
As your environment grows, tuning the underlying engine is vital to maintain a snappy UI and reliable discovery cycles.
|
||||||
|
|
||||||
|
* **Plugin Scheduling:** Avoid "Scan Storms" by staggering plugin execution. Running intensive tasks like `NMAP` or `MASS_DNS` simultaneously can spike CPU and cause database locks.
|
||||||
|
* **Database Health:** Large-scale monitoring generates massive event logs. Use the **[DBCLNP (Database Cleanup)](https://www.google.com/search?q=https://docs.netalertx.com/PLUGINS/%23dbclnp)** plugin to prune old records and keep the SQLite database performant.
|
||||||
|
* **Resource Management:** For high-device counts, consider increasing the memory limit for the container and utilizing `tmpfs` for temporary files to reduce SD card/disk I/O bottlenecks.
|
||||||
|
|
||||||
|
> [!IMPORTANT]
|
||||||
|
> For a deep dive into hardware requirements, database vacuuming, and specific environment variables for high-load instances, refer to the full **[Performance Optimization Guide](https://docs.netalertx.com/PERFORMANCE/)**.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Summary Checklist
|
||||||
|
|
||||||
|
* [ ] **Discovery:** Are all subnets explicitly defined?
|
||||||
|
* [ ] **Automation:** Do new devices get auto-assigned to a VLAN/Owner?
|
||||||
|
* [ ] **Noise Control:** Are transient "Down" alerts delayed via `NTFPRCS_alert_down_time`?
|
||||||
|
* [ ] **Remote Sites:** Is the SYNC plugin authenticated and heartbeat-active?
|
||||||
34
docs/API.md
34
docs/API.md
@@ -1,4 +1,4 @@
|
|||||||
# NetAlertX API Documentation
|
# API Documentation
|
||||||
|
|
||||||
This API provides programmatic access to **devices, events, sessions, metrics, network tools, and sync** in NetAlertX. It is implemented as a **REST and GraphQL server**. All requests require authentication via **API Token** (`API_TOKEN` setting) unless explicitly noted. For example, to authorize a GraphQL request, you need to use a `Authorization: Bearer API_TOKEN` header as per example below:
|
This API provides programmatic access to **devices, events, sessions, metrics, network tools, and sync** in NetAlertX. It is implemented as a **REST and GraphQL server**. All requests require authentication via **API Token** (`API_TOKEN` setting) unless explicitly noted. For example, to authorize a GraphQL request, you need to use a `Authorization: Bearer API_TOKEN` header as per example below:
|
||||||
|
|
||||||
@@ -23,6 +23,8 @@ curl 'http://host:GRAPHQL_PORT/graphql' \
|
|||||||
|
|
||||||
The API server runs on `0.0.0.0:<graphql_port>` with **CORS enabled** for all main endpoints.
|
The API server runs on `0.0.0.0:<graphql_port>` with **CORS enabled** for all main endpoints.
|
||||||
|
|
||||||
|
CORS configuration: You can limit allowed CORS origins with the `CORS_ORIGINS` environment variable. Set it to a comma-separated list of origins (for example: `CORS_ORIGINS="https://example.com,http://localhost:3000"`). The server parses this list at startup and only allows origins that begin with `http://` or `https://`. If `CORS_ORIGINS` is unset or parses to an empty list, the API falls back to a safe development default list (localhosts) and will include `*` as a last-resort permissive origin.
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## Authentication
|
## Authentication
|
||||||
@@ -36,9 +38,15 @@ Authorization: Bearer <API_TOKEN>
|
|||||||
If the token is missing or invalid, the server will return:
|
If the token is missing or invalid, the server will return:
|
||||||
|
|
||||||
```json
|
```json
|
||||||
{ "error": "Forbidden" }
|
{
|
||||||
|
"success": false,
|
||||||
|
"message": "ERROR: Not authorized",
|
||||||
|
"error": "Forbidden"
|
||||||
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
HTTP Status: **403 Forbidden**
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## Base URL
|
## Base URL
|
||||||
@@ -51,9 +59,15 @@ http://<server>:<GRAPHQL_PORT>/
|
|||||||
|
|
||||||
## Endpoints
|
## Endpoints
|
||||||
|
|
||||||
|
> [!NOTE]
|
||||||
|
> You can explore the API endpoints by using the interactive API docs at `http://<server>:<GRAPHQL_PORT>/docs`.
|
||||||
|
> 
|
||||||
|
|
||||||
> [!TIP]
|
> [!TIP]
|
||||||
> When retrieving devices or settings try using the GraphQL API endpoint first as it is read-optimized.
|
> When retrieving devices or settings try using the GraphQL API endpoint first as it is read-optimized.
|
||||||
|
|
||||||
|
### Standard REST Endpoints
|
||||||
|
|
||||||
* [Device API Endpoints](API_DEVICE.md) – Manage individual devices
|
* [Device API Endpoints](API_DEVICE.md) – Manage individual devices
|
||||||
* [Devices Collection](API_DEVICES.md) – Bulk operations on multiple devices
|
* [Devices Collection](API_DEVICES.md) – Bulk operations on multiple devices
|
||||||
* [Events](API_EVENTS.md) – Device event logging and management
|
* [Events](API_EVENTS.md) – Device event logging and management
|
||||||
@@ -64,9 +78,23 @@ http://<server>:<GRAPHQL_PORT>/
|
|||||||
* [Metrics](API_METRICS.md) – Prometheus metrics and per-device status
|
* [Metrics](API_METRICS.md) – Prometheus metrics and per-device status
|
||||||
* [Network Tools](API_NETTOOLS.md) – Utilities like Wake-on-LAN, traceroute, nslookup, nmap, and internet info
|
* [Network Tools](API_NETTOOLS.md) – Utilities like Wake-on-LAN, traceroute, nslookup, nmap, and internet info
|
||||||
* [Online History](API_ONLINEHISTORY.md) – Online/offline device records
|
* [Online History](API_ONLINEHISTORY.md) – Online/offline device records
|
||||||
* [GraphQL](API_GRAPHQL.md) – Advanced queries and filtering
|
* [GraphQL](API_GRAPHQL.md) – Advanced queries and filtering for Devices, Settings and Language Strings
|
||||||
* [Sync](API_SYNC.md) – Synchronization between multiple NetAlertX instances
|
* [Sync](API_SYNC.md) – Synchronization between multiple NetAlertX instances
|
||||||
|
* [Logs](API_LOGS.md) – Purging of logs and adding to the event execution queue for user triggered events
|
||||||
* [DB query](API_DBQUERY.md) (⚠ Internal) - Low level database access - use other endpoints if possible
|
* [DB query](API_DBQUERY.md) (⚠ Internal) - Low level database access - use other endpoints if possible
|
||||||
|
* `/server` (⚠ Internal) - Backend server endpoint for internal communication only - **do not use directly**
|
||||||
|
|
||||||
|
### MCP Server Bridge
|
||||||
|
|
||||||
|
NetAlertX includes an **MCP (Model Context Protocol) Server Bridge** that provides AI assistants access to NetAlertX functionality through standardized tools. MCP endpoints are available at `/mcp/sse/*` paths and mirror the functionality of standard REST endpoints:
|
||||||
|
|
||||||
|
* `/mcp/sse` - Server-Sent Events endpoint for MCP client connections
|
||||||
|
* `/mcp/sse/openapi.json` - OpenAPI specification for available MCP tools
|
||||||
|
* `/mcp/sse/device/*`, `/mcp/sse/devices/*`, `/mcp/sse/nettools/*`, `/mcp/sse/events/*` - MCP-enabled versions of REST endpoints
|
||||||
|
|
||||||
|
MCP endpoints require the same Bearer token authentication as REST endpoints.
|
||||||
|
|
||||||
|
**📖 See [MCP Server Bridge API](API_MCP.md) for complete documentation, tool specifications, and integration examples.**
|
||||||
|
|
||||||
See [Testing](API_TESTS.md) for example requests and usage.
|
See [Testing](API_TESTS.md) for example requests and usage.
|
||||||
|
|
||||||
|
|||||||
@@ -2,7 +2,7 @@
|
|||||||
|
|
||||||
The **Database Query API** provides direct, low-level access to the NetAlertX database. It allows **read, write, update, and delete** operations against tables, using **base64-encoded** SQL or structured parameters.
|
The **Database Query API** provides direct, low-level access to the NetAlertX database. It allows **read, write, update, and delete** operations against tables, using **base64-encoded** SQL or structured parameters.
|
||||||
|
|
||||||
> [!Warning]
|
> [!Warning]
|
||||||
> This API is primarily used internally to generate and render the application UI. These endpoints are low-level and powerful, and should be used with caution. Wherever possible, prefer the [standard API endpoints](API.md). Invalid or unsafe queries can corrupt data.
|
> This API is primarily used internally to generate and render the application UI. These endpoints are low-level and powerful, and should be used with caution. Wherever possible, prefer the [standard API endpoints](API.md). Invalid or unsafe queries can corrupt data.
|
||||||
> If you need data in a specific format that is not already provided, please open an issue or pull request with a clear, broadly useful use case. This helps ensure new endpoints benefit the wider community rather than relying on raw database queries.
|
> If you need data in a specific format that is not already provided, please open an issue or pull request with a clear, broadly useful use case. This helps ensure new endpoints benefit the wider community rather than relying on raw database queries.
|
||||||
|
|
||||||
@@ -16,10 +16,14 @@ All `/dbquery/*` endpoints require an API token in the HTTP headers:
|
|||||||
Authorization: Bearer <API_TOKEN>
|
Authorization: Bearer <API_TOKEN>
|
||||||
```
|
```
|
||||||
|
|
||||||
If the token is missing or invalid:
|
If the token is missing or invalid (HTTP 403):
|
||||||
|
|
||||||
```json
|
```json
|
||||||
{ "error": "Forbidden" }
|
{
|
||||||
|
"success": false,
|
||||||
|
"message": "ERROR: Not authorized",
|
||||||
|
"error": "Forbidden"
|
||||||
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|||||||
@@ -41,6 +41,8 @@ Manage a **single device** by its MAC address. Operations include retrieval, upd
|
|||||||
* Device not found → HTTP 404
|
* Device not found → HTTP 404
|
||||||
* Unauthorized → HTTP 403
|
* Unauthorized → HTTP 403
|
||||||
|
|
||||||
|
**MCP Integration**: Available as `get_device_info` and `set_device_alias` tools. See [MCP Server Bridge API](API_MCP.md).
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## 2. Update Device Fields
|
## 2. Update Device Fields
|
||||||
|
|||||||
@@ -170,7 +170,7 @@ The Devices Collection API provides operations to **retrieve, manage, import/exp
|
|||||||
**Response**:
|
**Response**:
|
||||||
|
|
||||||
```json
|
```json
|
||||||
[
|
[
|
||||||
120, // Total devices
|
120, // Total devices
|
||||||
85, // Connected
|
85, // Connected
|
||||||
5, // Favorites
|
5, // Favorites
|
||||||
@@ -207,6 +207,93 @@ The Devices Collection API provides operations to **retrieve, manage, import/exp
|
|||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
|
### 9. Search Devices
|
||||||
|
|
||||||
|
* **POST** `/devices/search`
|
||||||
|
Search for devices by MAC, name, or IP address.
|
||||||
|
|
||||||
|
**Request Body** (JSON):
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"query": ".50"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**Response**:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"success": true,
|
||||||
|
"devices": [
|
||||||
|
{
|
||||||
|
"devName": "Test Device",
|
||||||
|
"devMac": "AA:BB:CC:DD:EE:FF",
|
||||||
|
"devLastIP": "192.168.1.50"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 10. Get Latest Device
|
||||||
|
|
||||||
|
* **GET** `/devices/latest`
|
||||||
|
Get the most recently connected device.
|
||||||
|
|
||||||
|
**Response**:
|
||||||
|
|
||||||
|
```json
|
||||||
|
[
|
||||||
|
{
|
||||||
|
"devName": "Latest Device",
|
||||||
|
"devMac": "AA:BB:CC:DD:EE:FF",
|
||||||
|
"devLastIP": "192.168.1.100",
|
||||||
|
"devFirstConnection": "2025-12-07 10:30:00"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 11. Get Network Topology
|
||||||
|
|
||||||
|
* **GET** `/devices/network/topology`
|
||||||
|
Get network topology showing device relationships.
|
||||||
|
|
||||||
|
**Response**:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"nodes": [
|
||||||
|
{
|
||||||
|
"id": "AA:AA:AA:AA:AA:AA",
|
||||||
|
"name": "Router",
|
||||||
|
"vendor": "VendorA"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"links": [
|
||||||
|
{
|
||||||
|
"source": "AA:AA:AA:AA:AA:AA",
|
||||||
|
"target": "BB:BB:BB:BB:BB:BB",
|
||||||
|
"port": "eth1"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## MCP Tools
|
||||||
|
|
||||||
|
These endpoints are also available as **MCP Tools** for AI assistant integration:
|
||||||
|
- `list_devices`, `search_devices`, `get_latest_device`, `get_network_topology`, `set_device_alias`
|
||||||
|
|
||||||
|
📖 See [MCP Server Bridge API](API_MCP.md) for AI integration details.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
## Example `curl` Requests
|
## Example `curl` Requests
|
||||||
|
|
||||||
**Get All Devices**:
|
**Get All Devices**:
|
||||||
@@ -247,3 +334,26 @@ curl -X GET "http://<server_ip>:<GRAPHQL_PORT>/devices/by-status?status=online"
|
|||||||
-H "Authorization: Bearer <API_TOKEN>"
|
-H "Authorization: Bearer <API_TOKEN>"
|
||||||
```
|
```
|
||||||
|
|
||||||
|
**Search Devices**:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
curl -X POST "http://<server_ip>:<GRAPHQL_PORT>/devices/search" \
|
||||||
|
-H "Authorization: Bearer <API_TOKEN>" \
|
||||||
|
-H "Content-Type: application/json" \
|
||||||
|
--data '{"query": "192.168.1"}'
|
||||||
|
```
|
||||||
|
|
||||||
|
**Get Latest Device**:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
curl -X GET "http://<server_ip>:<GRAPHQL_PORT>/devices/latest" \
|
||||||
|
-H "Authorization: Bearer <API_TOKEN>"
|
||||||
|
```
|
||||||
|
|
||||||
|
**Get Network Topology**:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
curl -X GET "http://<server_ip>:<GRAPHQL_PORT>/devices/network/topology" \
|
||||||
|
-H "Authorization: Bearer <API_TOKEN>"
|
||||||
|
```
|
||||||
|
|
||||||
|
|||||||
157
docs/API_DEVICE_FIELD_LOCK.md
Normal file
157
docs/API_DEVICE_FIELD_LOCK.md
Normal file
@@ -0,0 +1,157 @@
|
|||||||
|
# Device Field Lock/Unlock API
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
The Device Field Lock/Unlock feature allows users to lock specific device fields to prevent plugin overwrites. This is part of the authoritative device field update system that ensures data integrity while maintaining flexibility for user customization.
|
||||||
|
|
||||||
|
## Concepts
|
||||||
|
|
||||||
|
### Tracked Fields
|
||||||
|
|
||||||
|
Only certain device fields support locking. These are the fields that can be modified by both plugins and users:
|
||||||
|
|
||||||
|
- `devName` - Device name/hostname
|
||||||
|
- `devVendor` - Device vendor/manufacturer
|
||||||
|
- `devFQDN` - Fully qualified domain name
|
||||||
|
- `devSSID` - Network SSID
|
||||||
|
- `devParentMAC` - Parent device MAC address
|
||||||
|
- `devParentPort` - Parent device port
|
||||||
|
- `devParentRelType` - Parent device relationship type
|
||||||
|
- `devVlan` - VLAN identifier
|
||||||
|
|
||||||
|
### Field Source Tracking
|
||||||
|
|
||||||
|
Every tracked field has an associated `*Source` field that indicates where the current value originated:
|
||||||
|
|
||||||
|
- `NEWDEV` - Created via the UI as a new device
|
||||||
|
- `USER` - Manually edited by a user
|
||||||
|
- `LOCKED` - Field is locked; prevents any plugin overwrites
|
||||||
|
- Plugin name (e.g., `UNIFIAPI`, `PIHOLE`) - Last updated by this plugin
|
||||||
|
|
||||||
|
### Locking Mechanism
|
||||||
|
|
||||||
|
When a field is **locked**, its source is set to `LOCKED`. This prevents plugin overwrites based on the authorization logic:
|
||||||
|
|
||||||
|
1. Plugin wants to update field
|
||||||
|
2. Authoritative handler checks field's `*Source` value
|
||||||
|
3. If `*Source` == `LOCKED`, plugin update is rejected
|
||||||
|
4. User can still manually unlock the field
|
||||||
|
|
||||||
|
When a field is **unlocked**, its source is set to `NEWDEV`, allowing plugins to resume updates.
|
||||||
|
|
||||||
|
## Endpoints
|
||||||
|
|
||||||
|
### Lock or Unlock a Field
|
||||||
|
|
||||||
|
```
|
||||||
|
POST /device/{mac}/field/lock
|
||||||
|
Authorization: Bearer {API_TOKEN}
|
||||||
|
Content-Type: application/json
|
||||||
|
|
||||||
|
{
|
||||||
|
"fieldName": "devName",
|
||||||
|
"lock": true
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Parameters
|
||||||
|
- `mac` (path, required): Device MAC address (e.g., `AA:BB:CC:DD:EE:FF`)
|
||||||
|
- `fieldName` (body, required): Name of the field to lock/unlock. Must be one of the tracked fields listed above.
|
||||||
|
- `lock` (body, required): Boolean. `true` to lock, `false` to unlock.
|
||||||
|
|
||||||
|
#### Responses
|
||||||
|
|
||||||
|
**Success (200)**
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"success": true,
|
||||||
|
"message": "Field devName locked",
|
||||||
|
"fieldName": "devName",
|
||||||
|
"locked": true
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**Bad Request (400)**
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"success": false,
|
||||||
|
"error": "fieldName is required"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"success": false,
|
||||||
|
"error": "Field 'devInvalidField' cannot be locked"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**Unauthorized (403)**
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"success": false,
|
||||||
|
"error": "Unauthorized"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**Not Found (404)**
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"success": false,
|
||||||
|
"error": "Device not found"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Examples
|
||||||
|
|
||||||
|
### Lock a Device Name
|
||||||
|
Prevent the device name from being overwritten by plugins:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
curl -X POST https://your-netalertx.local/api/device/AA:BB:CC:DD:EE:FF/field/lock \
|
||||||
|
-H "Authorization: Bearer your-api-token" \
|
||||||
|
-H "Content-Type: application/json" \
|
||||||
|
-d '{
|
||||||
|
"fieldName": "devName",
|
||||||
|
"lock": true
|
||||||
|
}'
|
||||||
|
```
|
||||||
|
|
||||||
|
### Unlock a Field
|
||||||
|
Allow plugins to resume updating a field:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
curl -X POST https://your-netalertx.local/api/device/AA:BB:CC:DD:EE:FF/field/lock \
|
||||||
|
-H "Authorization: Bearer your-api-token" \
|
||||||
|
-H "Content-Type: application/json" \
|
||||||
|
-d '{
|
||||||
|
"fieldName": "devName",
|
||||||
|
"lock": false
|
||||||
|
}'
|
||||||
|
```
|
||||||
|
|
||||||
|
## UI Integration
|
||||||
|
|
||||||
|
The Device Edit form displays lock/unlock buttons for all tracked fields:
|
||||||
|
|
||||||
|
1. **Lock Button** (🔒): Click to prevent plugin overwrites
|
||||||
|
2. **Unlock Button** (🔓): Click to allow plugin overwrites again
|
||||||
|
3. **Source Indicator**: Shows current field source (USER, LOCKED, NEWDEV, or plugin name)
|
||||||
|
|
||||||
|
|
||||||
|
### Authorization Handler
|
||||||
|
|
||||||
|
The authoritative field update logic prevents plugin overwrites:
|
||||||
|
|
||||||
|
1. Plugin provides new value for field via plugin config `SET_ALWAYS`/`SET_EMPTY`
|
||||||
|
2. Authoritative handler (in DeviceInstance) checks `{field}Source` value
|
||||||
|
3. If source is `LOCKED` or `USER`, plugin update is rejected
|
||||||
|
4. If source is `NEWDEV` or plugin name, plugin update is accepted
|
||||||
|
|
||||||
|
## See Also
|
||||||
|
|
||||||
|
- [Device locking](./DEVICE_FIELD_LOCK.md)
|
||||||
|
- [Device source fields](./DEVICE_SOURCE_FIELDS.md)
|
||||||
|
- [API Device Endpoints Documentation](./API_DEVICE.md)
|
||||||
|
- [Authoritative Field Updates System](./PLUGINS_DEV.md#authoritative-fields)
|
||||||
|
- [Plugin Configuration Reference](./PLUGINS_DEV_CONFIG.md)
|
||||||
@@ -58,12 +58,12 @@ The Events API provides access to **device event logs**, allowing creation, retr
|
|||||||
"success": true,
|
"success": true,
|
||||||
"events": [
|
"events": [
|
||||||
{
|
{
|
||||||
"eve_MAC": "00:11:22:33:44:55",
|
"eveMac": "00:11:22:33:44:55",
|
||||||
"eve_IP": "192.168.1.10",
|
"eveIp": "192.168.1.10",
|
||||||
"eve_DateTime": "2025-08-24T12:00:00Z",
|
"eveDateTime": "2025-08-24T12:00:00Z",
|
||||||
"eve_EventType": "Device Down",
|
"eveEventType": "Device Down",
|
||||||
"eve_AdditionalInfo": "",
|
"eveAdditionalInfo": "",
|
||||||
"eve_PendingAlertEmail": 1
|
"evePendingAlertEmail": 1
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
@@ -88,7 +88,56 @@ The Events API provides access to **device event logs**, allowing creation, retr
|
|||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
### 4. Event Totals Over a Period
|
### 4. Get Recent Events
|
||||||
|
|
||||||
|
* **GET** `/events/recent` → Get events from the last 24 hours
|
||||||
|
* **GET** `/events/<hours>` → Get events from the last N hours
|
||||||
|
|
||||||
|
**Response** (JSON):
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"success": true,
|
||||||
|
"hours": 24,
|
||||||
|
"count": 5,
|
||||||
|
"events": [
|
||||||
|
{
|
||||||
|
"eveDateTime": "2025-12-07 12:00:00",
|
||||||
|
"eveEventType": "New Device",
|
||||||
|
"eveMac": "AA:BB:CC:DD:EE:FF",
|
||||||
|
"eveIp": "192.168.1.100",
|
||||||
|
"eveAdditionalInfo": "Device detected"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 5. Get Latest Events
|
||||||
|
|
||||||
|
* **GET** `/events/last`
|
||||||
|
Get the 10 most recent events.
|
||||||
|
|
||||||
|
**Response** (JSON):
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"success": true,
|
||||||
|
"count": 10,
|
||||||
|
"events": [
|
||||||
|
{
|
||||||
|
"eveDateTime": "2025-12-07 12:00:00",
|
||||||
|
"eveEventType": "Device Down",
|
||||||
|
"eveMac": "AA:BB:CC:DD:EE:FF"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 6. Event Totals Over a Period
|
||||||
|
|
||||||
* **GET** `/sessions/totals?period=<period>`
|
* **GET** `/sessions/totals?period=<period>`
|
||||||
Return event and session totals over a given period.
|
Return event and session totals over a given period.
|
||||||
@@ -110,22 +159,35 @@ The Events API provides access to **device event logs**, allowing creation, retr
|
|||||||
1. Total events in the period
|
1. Total events in the period
|
||||||
2. Total sessions
|
2. Total sessions
|
||||||
3. Missing sessions
|
3. Missing sessions
|
||||||
4. Voided events (`eve_EventType LIKE 'VOIDED%'`)
|
4. Voided events (`eveEventType LIKE 'VOIDED%'`)
|
||||||
5. New device events (`eve_EventType LIKE 'New Device'`)
|
5. New device events (`eveEventType LIKE 'New Device'`)
|
||||||
6. Device down events (`eve_EventType LIKE 'Device Down'`)
|
6. Device down events (`eveEventType LIKE 'Device Down'`)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## MCP Tools
|
||||||
|
|
||||||
|
Event endpoints are available as **MCP Tools** for AI assistant integration:
|
||||||
|
- `get_recent_alerts`, `get_last_events`
|
||||||
|
|
||||||
|
📖 See [MCP Server Bridge API](API_MCP.md) for AI integration details.
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## Notes
|
## Notes
|
||||||
|
|
||||||
* All endpoints require **authorization** (Bearer token). Unauthorized requests return:
|
* All endpoints require **authorization** (Bearer token). Unauthorized requests return HTTP 403:
|
||||||
|
|
||||||
```json
|
```json
|
||||||
{ "error": "Forbidden" }
|
{
|
||||||
|
"success": false,
|
||||||
|
"message": "ERROR: Not authorized",
|
||||||
|
"error": "Forbidden"
|
||||||
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
* Events are stored in the **Events table** with the following fields:
|
* Events are stored in the **Events table** with the following fields:
|
||||||
`eve_MAC`, `eve_IP`, `eve_DateTime`, `eve_EventType`, `eve_AdditionalInfo`, `eve_PendingAlertEmail`.
|
`eveMac`, `eveIp`, `eveDateTime`, `eveEventType`, `eveAdditionalInfo`, `evePendingAlertEmail`.
|
||||||
|
|
||||||
* Event creation automatically logs activity for debugging.
|
* Event creation automatically logs activity for debugging.
|
||||||
|
|
||||||
|
|||||||
@@ -1,9 +1,14 @@
|
|||||||
# GraphQL API Endpoint
|
# GraphQL API Endpoint
|
||||||
|
|
||||||
GraphQL queries are **read-optimized for speed**. Data may be slightly out of date until the file system cache refreshes. The GraphQL endpoints allows you to access the following objects:
|
GraphQL queries are **read-optimized for speed**. Data may be slightly out of date until the file system cache refreshes. The GraphQL endpoints allow you to access the following objects:
|
||||||
|
|
||||||
- Devices
|
* Devices
|
||||||
- Settings
|
* Settings
|
||||||
|
* Events
|
||||||
|
* PluginsObjects
|
||||||
|
* PluginsHistory
|
||||||
|
* PluginsEvents
|
||||||
|
* Language Strings (LangStrings)
|
||||||
|
|
||||||
## Endpoints
|
## Endpoints
|
||||||
|
|
||||||
@@ -190,11 +195,223 @@ curl 'http://host:GRAPHQL_PORT/graphql' \
|
|||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## LangStrings Query
|
||||||
|
|
||||||
|
The **LangStrings query** provides access to localized strings. Supports filtering by `langCode` and `langStringKey`. If the requested string is missing or empty, you can optionally fallback to `en_us`.
|
||||||
|
|
||||||
|
### Sample Query
|
||||||
|
|
||||||
|
```graphql
|
||||||
|
query GetLangStrings {
|
||||||
|
langStrings(langCode: "de_de", langStringKey: "settings_other_scanners") {
|
||||||
|
langStrings {
|
||||||
|
langCode
|
||||||
|
langStringKey
|
||||||
|
langStringText
|
||||||
|
}
|
||||||
|
count
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Query Parameters
|
||||||
|
|
||||||
|
| Parameter | Type | Description |
|
||||||
|
| ---------------- | ------- | ---------------------------------------------------------------------------------------- |
|
||||||
|
| `langCode` | String | Optional language code (e.g., `en_us`, `de_de`). If omitted, all languages are returned. |
|
||||||
|
| `langStringKey` | String | Optional string key to retrieve a specific entry. |
|
||||||
|
| `fallback_to_en` | Boolean | Optional (default `true`). If `true`, empty or missing strings fallback to `en_us`. |
|
||||||
|
|
||||||
|
### `curl` Example
|
||||||
|
|
||||||
|
```sh
|
||||||
|
curl 'http://host:GRAPHQL_PORT/graphql' \
|
||||||
|
-X POST \
|
||||||
|
-H 'Authorization: Bearer API_TOKEN' \
|
||||||
|
-H 'Content-Type: application/json' \
|
||||||
|
--data '{
|
||||||
|
"query": "query GetLangStrings { langStrings(langCode: \"de_de\", langStringKey: \"settings_other_scanners\") { langStrings { langCode langStringKey langStringText } count } }"
|
||||||
|
}'
|
||||||
|
```
|
||||||
|
|
||||||
|
### Sample Response
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"langStrings": {
|
||||||
|
"count": 1,
|
||||||
|
"langStrings": [
|
||||||
|
{
|
||||||
|
"langCode": "de_de",
|
||||||
|
"langStringKey": "settings_other_scanners",
|
||||||
|
"langStringText": "Other, non-device scanner plugins that are currently enabled." // falls back to en_us if empty
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Plugin Tables (Objects, Events, History)
|
||||||
|
|
||||||
|
Three queries expose the plugin database tables with server-side pagination, filtering, and search:
|
||||||
|
|
||||||
|
* `pluginsObjects` — current plugin object state
|
||||||
|
* `pluginsEvents` — unprocessed plugin events
|
||||||
|
* `pluginsHistory` — historical plugin event log
|
||||||
|
|
||||||
|
All three share the same `PluginQueryOptionsInput` and return the same `PluginEntry` shape.
|
||||||
|
|
||||||
|
### Sample Query
|
||||||
|
|
||||||
|
```graphql
|
||||||
|
query GetPluginObjects($options: PluginQueryOptionsInput) {
|
||||||
|
pluginsObjects(options: $options) {
|
||||||
|
dbCount
|
||||||
|
count
|
||||||
|
entries {
|
||||||
|
index plugin objectPrimaryId objectSecondaryId
|
||||||
|
dateTimeCreated dateTimeChanged
|
||||||
|
watchedValue1 watchedValue2 watchedValue3 watchedValue4
|
||||||
|
status extra userData foreignKey
|
||||||
|
syncHubNodeName helpVal1 helpVal2 helpVal3 helpVal4 objectGuid
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Query Parameters (`PluginQueryOptionsInput`)
|
||||||
|
|
||||||
|
| Parameter | Type | Description |
|
||||||
|
| ------------ | ----------------- | ------------------------------------------------------ |
|
||||||
|
| `page` | Int | Page number (1-based). |
|
||||||
|
| `limit` | Int | Rows per page (max 1000). |
|
||||||
|
| `sort` | [SortOptionsInput] | Sorting options (`field`, `order`). |
|
||||||
|
| `search` | String | Free-text search across key columns. |
|
||||||
|
| `filters` | [FilterOptionsInput] | Column-value exact-match filters. |
|
||||||
|
| `plugin` | String | Plugin prefix to scope results (e.g. `"ARPSCAN"`). |
|
||||||
|
| `foreignKey` | String | Foreign key filter (e.g. device MAC). |
|
||||||
|
| `dateFrom` | String | Start of date range filter on `dateTimeCreated`. |
|
||||||
|
| `dateTo` | String | End of date range filter on `dateTimeCreated`. |
|
||||||
|
|
||||||
|
### Response Fields
|
||||||
|
|
||||||
|
| Field | Type | Description |
|
||||||
|
| --------- | ------------- | ------------------------------------------------------------- |
|
||||||
|
| `dbCount` | Int | Total rows for the requested plugin (before search/filters). |
|
||||||
|
| `count` | Int | Total rows after all filters (before pagination). |
|
||||||
|
| `entries` | [PluginEntry] | Paginated list of plugin entries. |
|
||||||
|
|
||||||
|
### `curl` Example
|
||||||
|
|
||||||
|
```sh
|
||||||
|
curl 'http://host:GRAPHQL_PORT/graphql' \
|
||||||
|
-X POST \
|
||||||
|
-H 'Authorization: Bearer API_TOKEN' \
|
||||||
|
-H 'Content-Type: application/json' \
|
||||||
|
--data '{
|
||||||
|
"query": "query GetPluginObjects($options: PluginQueryOptionsInput) { pluginsObjects(options: $options) { dbCount count entries { index plugin objectPrimaryId status foreignKey } } }",
|
||||||
|
"variables": {
|
||||||
|
"options": {
|
||||||
|
"plugin": "ARPSCAN",
|
||||||
|
"page": 1,
|
||||||
|
"limit": 25
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}'
|
||||||
|
```
|
||||||
|
|
||||||
|
### Badge Prefetch (Batched Counts)
|
||||||
|
|
||||||
|
Use GraphQL aliases to fetch counts for all plugins in a single request:
|
||||||
|
|
||||||
|
```graphql
|
||||||
|
query BadgeCounts {
|
||||||
|
ARPSCAN: pluginsObjects(options: {plugin: "ARPSCAN", page: 1, limit: 1}) { dbCount }
|
||||||
|
INTRNT: pluginsObjects(options: {plugin: "INTRNT", page: 1, limit: 1}) { dbCount }
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Events Query
|
||||||
|
|
||||||
|
Access the Events table with server-side pagination, filtering, and search.
|
||||||
|
|
||||||
|
### Sample Query
|
||||||
|
|
||||||
|
```graphql
|
||||||
|
query GetEvents($options: EventQueryOptionsInput) {
|
||||||
|
events(options: $options) {
|
||||||
|
dbCount
|
||||||
|
count
|
||||||
|
entries {
|
||||||
|
eveMac
|
||||||
|
eveIp
|
||||||
|
eveDateTime
|
||||||
|
eveEventType
|
||||||
|
eveAdditionalInfo
|
||||||
|
evePendingAlertEmail
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Query Parameters (`EventQueryOptionsInput`)
|
||||||
|
|
||||||
|
| Parameter | Type | Description |
|
||||||
|
| ----------- | ------------------ | ------------------------------------------------ |
|
||||||
|
| `page` | Int | Page number (1-based). |
|
||||||
|
| `limit` | Int | Rows per page (max 1000). |
|
||||||
|
| `sort` | [SortOptionsInput] | Sorting options (`field`, `order`). |
|
||||||
|
| `search` | String | Free-text search across key columns. |
|
||||||
|
| `filters` | [FilterOptionsInput] | Column-value exact-match filters. |
|
||||||
|
| `eveMac` | String | Filter by device MAC address. |
|
||||||
|
| `eventType` | String | Filter by event type (e.g. `"New Device"`). |
|
||||||
|
| `dateFrom` | String | Start of date range filter on `eveDateTime`. |
|
||||||
|
| `dateTo` | String | End of date range filter on `eveDateTime`. |
|
||||||
|
|
||||||
|
### Response Fields
|
||||||
|
|
||||||
|
| Field | Type | Description |
|
||||||
|
| --------- | ------------ | ------------------------------------------------------------ |
|
||||||
|
| `dbCount` | Int | Total rows in the Events table (before any filters). |
|
||||||
|
| `count` | Int | Total rows after all filters (before pagination). |
|
||||||
|
| `entries` | [EventEntry] | Paginated list of event entries. |
|
||||||
|
|
||||||
|
### `curl` Example
|
||||||
|
|
||||||
|
```sh
|
||||||
|
curl 'http://host:GRAPHQL_PORT/graphql' \
|
||||||
|
-X POST \
|
||||||
|
-H 'Authorization: Bearer API_TOKEN' \
|
||||||
|
-H 'Content-Type: application/json' \
|
||||||
|
--data '{
|
||||||
|
"query": "query GetEvents($options: EventQueryOptionsInput) { events(options: $options) { dbCount count entries { eveMac eveIp eveDateTime eveEventType } } }",
|
||||||
|
"variables": {
|
||||||
|
"options": {
|
||||||
|
"eveMac": "00:11:22:33:44:55",
|
||||||
|
"page": 1,
|
||||||
|
"limit": 50
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}'
|
||||||
|
```
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## Notes
|
## Notes
|
||||||
|
|
||||||
* Device and settings queries can be combined in one request since GraphQL supports batching.
|
* Device, settings, LangStrings, plugin, and event queries can be combined in **one request** since GraphQL supports batching.
|
||||||
|
* The `fallback_to_en` feature ensures UI always has a value even if a translation is missing.
|
||||||
|
* Data is **cached in memory** per JSON file; changes to language or plugin files will only refresh after the cache detects a file modification.
|
||||||
* The `setOverriddenByEnv` flag helps identify setting values that are locked at container runtime.
|
* The `setOverriddenByEnv` flag helps identify setting values that are locked at container runtime.
|
||||||
* The schema is **read-only** — updates must be performed through other APIs or configuration management. See the other [API](API.md) endpoints for details.
|
* Plugin queries scope `dbCount` to the requested `plugin`/`foreignKey` so badge counts reflect per-plugin totals.
|
||||||
|
* The schema is **read-only** — updates must be performed through other APIs or configuration management. See the other [API](API.md) endpoints for details.
|
||||||
|
|
||||||
|
|||||||
178
docs/API_LOGS.md
Normal file
178
docs/API_LOGS.md
Normal file
@@ -0,0 +1,178 @@
|
|||||||
|
# Logs API Endpoints
|
||||||
|
|
||||||
|
Manage or purge application log files stored under `/app/log` and manage the execution queue. These endpoints are primarily used for maintenance tasks such as clearing accumulated logs or adding system actions without restarting the container.
|
||||||
|
|
||||||
|
Only specific, pre-approved log files can be purged for security and stability reasons.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Delete (Purge) a Log File
|
||||||
|
|
||||||
|
* **DELETE** `/logs?file=<log_file>` → Purge the contents of an allowed log file.
|
||||||
|
|
||||||
|
**Query Parameter:**
|
||||||
|
|
||||||
|
* `file` → The name of the log file to purge (e.g., `app.log`, `stdout.log`)
|
||||||
|
|
||||||
|
**Allowed Files:**
|
||||||
|
|
||||||
|
```
|
||||||
|
app.log
|
||||||
|
IP_changes.log
|
||||||
|
stdout.log
|
||||||
|
stderr.log
|
||||||
|
app.php_errors.log
|
||||||
|
execution_queue.log
|
||||||
|
db_is_locked.log
|
||||||
|
```
|
||||||
|
|
||||||
|
**Authorization:**
|
||||||
|
Requires a valid API token in the `Authorization` header.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### `curl` Example (Success)
|
||||||
|
|
||||||
|
```sh
|
||||||
|
curl -X DELETE 'http://<server_ip>:<GRAPHQL_PORT>/logs?file=app.log' \
|
||||||
|
-H 'Authorization: Bearer <API_TOKEN>' \
|
||||||
|
-H 'Accept: application/json'
|
||||||
|
```
|
||||||
|
|
||||||
|
**Response:**
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"success": true,
|
||||||
|
"message": "[clean_log] File app.log purged successfully"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### `curl` Example (Not Allowed)
|
||||||
|
|
||||||
|
```sh
|
||||||
|
curl -X DELETE 'http://<server_ip>:<GRAPHQL_PORT>/logs?file=not_allowed.log' \
|
||||||
|
-H 'Authorization: Bearer <API_TOKEN>' \
|
||||||
|
-H 'Accept: application/json'
|
||||||
|
```
|
||||||
|
|
||||||
|
**Response:**
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"success": false,
|
||||||
|
"message": "[clean_log] File not_allowed.log is not allowed to be purged"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### `curl` Example (Unauthorized)
|
||||||
|
|
||||||
|
```sh
|
||||||
|
curl -X DELETE 'http://<server_ip>:<GRAPHQL_PORT>/logs?file=app.log' \
|
||||||
|
-H 'Accept: application/json'
|
||||||
|
```
|
||||||
|
|
||||||
|
**Response:**
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"error": "Forbidden"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Add an Action to the Execution Queue
|
||||||
|
|
||||||
|
* **POST** `/logs/add-to-execution-queue` → Add a system action to the execution queue.
|
||||||
|
|
||||||
|
**Request Body (JSON):**
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"action": "update_api|devices"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**Authorization:**
|
||||||
|
Requires a valid API token in the `Authorization` header.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### `curl` Example (Success)
|
||||||
|
|
||||||
|
The below will update the API cache for Devices
|
||||||
|
|
||||||
|
```sh
|
||||||
|
curl -X POST 'http://<server_ip>:<GRAPHQL_PORT>/logs/add-to-execution-queue' \
|
||||||
|
-H 'Authorization: Bearer <API_TOKEN>' \
|
||||||
|
-H 'Content-Type: application/json' \
|
||||||
|
--data '{"action": "update_api|devices"}'
|
||||||
|
```
|
||||||
|
|
||||||
|
**Response:**
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"success": true,
|
||||||
|
"message": "[UserEventsQueueInstance] Action \"update_api|devices\" added to the execution queue."
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### `curl` Example (Missing Parameter)
|
||||||
|
|
||||||
|
```sh
|
||||||
|
curl -X POST 'http://<server_ip>:<GRAPHQL_PORT>/logs/add-to-execution-queue' \
|
||||||
|
-H 'Authorization: Bearer <API_TOKEN>' \
|
||||||
|
-H 'Content-Type: application/json' \
|
||||||
|
--data '{}'
|
||||||
|
```
|
||||||
|
|
||||||
|
**Response:**
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"success": false,
|
||||||
|
"message": "Missing parameters",
|
||||||
|
"error": "Missing required 'action' field in JSON body"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### `curl` Example (Unauthorized)
|
||||||
|
|
||||||
|
```sh
|
||||||
|
curl -X POST 'http://<server_ip>:<GRAPHQL_PORT>/logs/add-to-execution-queue' \
|
||||||
|
-H 'Content-Type: application/json' \
|
||||||
|
--data '{"action": "update_api|devices"}'
|
||||||
|
```
|
||||||
|
|
||||||
|
**Response:**
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"error": "Forbidden"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Notes
|
||||||
|
|
||||||
|
* Only predefined files in `/app/log` can be purged — arbitrary paths are **not permitted**.
|
||||||
|
* When a log file is purged:
|
||||||
|
|
||||||
|
* Its content is replaced with a short marker text: `"File manually purged"`.
|
||||||
|
* A backend log entry is created via `mylog()`.
|
||||||
|
* A frontend notification is generated via `write_notification()`.
|
||||||
|
* Execution queue actions are appended to `execution_queue.log` and can be processed asynchronously by background tasks or workflows.
|
||||||
|
* Unauthorized or invalid attempts are safely logged and rejected.
|
||||||
|
* For advanced log retrieval, analysis, or structured querying, use the frontend log viewer.
|
||||||
|
* Always ensure that sensitive or production logs are handled carefully — purging cannot be undone.
|
||||||
405
docs/API_MCP.md
Normal file
405
docs/API_MCP.md
Normal file
@@ -0,0 +1,405 @@
|
|||||||
|
# MCP Server Bridge API
|
||||||
|
|
||||||
|
The **MCP (Model Context Protocol) Server Bridge** provides AI assistants with standardized access to NetAlertX functionality through tools and server-sent events. This enables AI systems to interact with your network monitoring data in real-time.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
The MCP Server Bridge exposes NetAlertX functionality as **MCP Tools** that AI assistants can call to:
|
||||||
|
|
||||||
|
- Search and retrieve device information
|
||||||
|
- Trigger network scans
|
||||||
|
- Get network topology and events
|
||||||
|
- Wake devices via Wake-on-LAN
|
||||||
|
- Access open port information
|
||||||
|
- Set device aliases
|
||||||
|
|
||||||
|
All MCP endpoints mirror the functionality of standard REST endpoints but are optimized for AI assistant integration.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Architecture Overview
|
||||||
|
|
||||||
|
### MCP Connection Flow
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
graph TB
|
||||||
|
A[AI Assistant<br/>Claude Desktop] -->|SSE Connection| B[NetAlertX MCP Server<br/>:20212/mcp/sse]
|
||||||
|
B -->|JSON-RPC Messages| C[MCP Bridge<br/>api_server_start.py]
|
||||||
|
C -->|Tool Calls| D[NetAlertX Tools<br/>Device/Network APIs]
|
||||||
|
D -->|Response Data| C
|
||||||
|
C -->|JSON Response| B
|
||||||
|
B -->|Stream Events| A
|
||||||
|
```
|
||||||
|
|
||||||
|
### MCP Tool Integration
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
sequenceDiagram
|
||||||
|
participant AI as AI Assistant
|
||||||
|
participant MCP as MCP Server (:20212)
|
||||||
|
participant API as NetAlertX API (:20211)
|
||||||
|
participant DB as SQLite Database
|
||||||
|
|
||||||
|
AI->>MCP: 1. Connect via SSE
|
||||||
|
MCP-->>AI: 2. Session established
|
||||||
|
AI->>MCP: 3. tools/list request
|
||||||
|
MCP->>API: 4. GET /mcp/sse/openapi.json
|
||||||
|
API-->>MCP: 5. Available tools spec
|
||||||
|
MCP-->>AI: 6. Tool definitions
|
||||||
|
AI->>MCP: 7. tools/call: search_devices
|
||||||
|
MCP->>API: 8. POST /devices/search
|
||||||
|
API->>DB: 9. Query devices
|
||||||
|
DB-->>API: 10. Device data
|
||||||
|
API-->>MCP: 11. JSON response
|
||||||
|
MCP-->>AI: 12. Tool result
|
||||||
|
```
|
||||||
|
|
||||||
|
### Component Architecture
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
graph LR
|
||||||
|
subgraph "AI Client"
|
||||||
|
A[Claude Desktop]
|
||||||
|
B[Custom MCP Client]
|
||||||
|
end
|
||||||
|
|
||||||
|
subgraph "NetAlertX MCP Server (:20212)"
|
||||||
|
C[SSE Endpoint<br/>/mcp/sse]
|
||||||
|
D[Message Handler<br/>/mcp/messages]
|
||||||
|
E[OpenAPI Spec<br/>/mcp/sse/openapi.json]
|
||||||
|
end
|
||||||
|
|
||||||
|
subgraph "NetAlertX API Server (:20211)"
|
||||||
|
F[Device APIs<br/>/devices/*]
|
||||||
|
G[Network Tools<br/>/nettools/*]
|
||||||
|
H[Events API<br/>/events/*]
|
||||||
|
end
|
||||||
|
|
||||||
|
subgraph "Backend"
|
||||||
|
I[SQLite Database]
|
||||||
|
J[Network Scanners]
|
||||||
|
K[Plugin System]
|
||||||
|
end
|
||||||
|
|
||||||
|
A -.->|Bearer Auth| C
|
||||||
|
B -.->|Bearer Auth| C
|
||||||
|
C --> D
|
||||||
|
C --> E
|
||||||
|
D --> F
|
||||||
|
D --> G
|
||||||
|
D --> H
|
||||||
|
F --> I
|
||||||
|
G --> J
|
||||||
|
H --> I
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Authentication
|
||||||
|
|
||||||
|
MCP endpoints use the same **Bearer token authentication** as REST endpoints:
|
||||||
|
|
||||||
|
```http
|
||||||
|
Authorization: Bearer <API_TOKEN>
|
||||||
|
```
|
||||||
|
|
||||||
|
Unauthorized requests return HTTP 403:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"success": false,
|
||||||
|
"message": "ERROR: Not authorized",
|
||||||
|
"error": "Forbidden"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## MCP Connection Endpoint
|
||||||
|
|
||||||
|
### Server-Sent Events (SSE)
|
||||||
|
|
||||||
|
* **GET/POST** `/mcp/sse`
|
||||||
|
|
||||||
|
Main MCP connection endpoint for AI clients. Establishes a persistent connection using Server-Sent Events for real-time communication between AI assistants and NetAlertX.
|
||||||
|
|
||||||
|
**Connection Example**:
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
const eventSource = new EventSource('/mcp/sse', {
|
||||||
|
headers: {
|
||||||
|
'Authorization': 'Bearer <API_TOKEN>'
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
eventSource.onmessage = function(event) {
|
||||||
|
const response = JSON.parse(event.data);
|
||||||
|
console.log('MCP Response:', response);
|
||||||
|
};
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## OpenAPI Specification
|
||||||
|
|
||||||
|
### Get MCP Tools Specification
|
||||||
|
|
||||||
|
* **GET** `/mcp/sse/openapi.json`
|
||||||
|
|
||||||
|
Returns the OpenAPI specification for all available MCP tools, describing the parameters and schemas for each tool.
|
||||||
|
|
||||||
|
**Response**:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"openapi": "3.0.0",
|
||||||
|
"info": {
|
||||||
|
"title": "NetAlertX Tools",
|
||||||
|
"version": "1.1.0"
|
||||||
|
},
|
||||||
|
"servers": [{"url": "/"}],
|
||||||
|
"paths": {
|
||||||
|
"/devices/by-status": {
|
||||||
|
"post": {"operationId": "list_devices"}
|
||||||
|
},
|
||||||
|
"/device/{mac}": {
|
||||||
|
"post": {"operationId": "get_device_info"}
|
||||||
|
},
|
||||||
|
"/devices/search": {
|
||||||
|
"post": {"operationId": "search_devices"}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Available MCP Tools
|
||||||
|
|
||||||
|
### Device Management Tools
|
||||||
|
|
||||||
|
| Tool | Endpoint | Description |
|
||||||
|
|------|----------|-------------|
|
||||||
|
| `list_devices` | `/devices/by-status` | List devices by online status |
|
||||||
|
| `get_device_info` | `/device/{mac}` | Get detailed device information |
|
||||||
|
| `search_devices` | `/devices/search` | Search devices by MAC, name, or IP |
|
||||||
|
| `get_latest_device` | `/devices/latest` | Get most recently connected device |
|
||||||
|
| `set_device_alias` | `/device/{mac}/set-alias` | Set device friendly name |
|
||||||
|
|
||||||
|
### Network Tools
|
||||||
|
|
||||||
|
| Tool | Endpoint | Description |
|
||||||
|
|------|----------|-------------|
|
||||||
|
| `trigger_scan` | `/nettools/trigger-scan` | Trigger network discovery scan to find new devices. |
|
||||||
|
| `run_nmap_scan` | `/nettools/nmap` | Perform NMAP scan on a target to identify open ports. |
|
||||||
|
| `get_open_ports` | `/device/open_ports` | Get stored NMAP open ports. Use `run_nmap_scan` first if empty. |
|
||||||
|
| `wol_wake_device` | `/nettools/wakeonlan` | Wake device using Wake-on-LAN |
|
||||||
|
| `get_network_topology` | `/devices/network/topology` | Get network topology map |
|
||||||
|
|
||||||
|
### Event & Monitoring Tools
|
||||||
|
|
||||||
|
| Tool | Endpoint | Description |
|
||||||
|
|------|----------|-------------|
|
||||||
|
| `get_recent_alerts` | `/events/recent` | Get events from last 24 hours |
|
||||||
|
| `get_last_events` | `/events/last` | Get 10 most recent events |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Tool Usage Examples
|
||||||
|
|
||||||
|
### Search Devices Tool
|
||||||
|
|
||||||
|
**Tool Call**:
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"jsonrpc": "2.0",
|
||||||
|
"id": "1",
|
||||||
|
"method": "tools/call",
|
||||||
|
"params": {
|
||||||
|
"name": "search_devices",
|
||||||
|
"arguments": {
|
||||||
|
"query": "192.168.1"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**Response**:
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"jsonrpc": "2.0",
|
||||||
|
"id": "1",
|
||||||
|
"result": {
|
||||||
|
"content": [
|
||||||
|
{
|
||||||
|
"type": "text",
|
||||||
|
"text": "{\n \"success\": true,\n \"devices\": [\n {\n \"devName\": \"Router\",\n \"devMac\": \"AA:BB:CC:DD:EE:FF\",\n \"devLastIP\": \"192.168.1.1\"\n }\n ]\n}"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"isError": false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Trigger Network Scan Tool
|
||||||
|
|
||||||
|
**Tool Call**:
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"jsonrpc": "2.0",
|
||||||
|
"id": "2",
|
||||||
|
"method": "tools/call",
|
||||||
|
"params": {
|
||||||
|
"name": "trigger_scan",
|
||||||
|
"arguments": {
|
||||||
|
"type": "ARPSCAN"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**Response**:
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"jsonrpc": "2.0",
|
||||||
|
"id": "2",
|
||||||
|
"result": {
|
||||||
|
"content": [
|
||||||
|
{
|
||||||
|
"type": "text",
|
||||||
|
"text": "{\n \"success\": true,\n \"message\": \"Scan triggered for type: ARPSCAN\"\n}"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"isError": false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Wake-on-LAN Tool
|
||||||
|
|
||||||
|
**Tool Call**:
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"jsonrpc": "2.0",
|
||||||
|
"id": "3",
|
||||||
|
"method": "tools/call",
|
||||||
|
"params": {
|
||||||
|
"name": "wol_wake_device",
|
||||||
|
"arguments": {
|
||||||
|
"devMac": "AA:BB:CC:DD:EE:FF"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Integration with AI Assistants
|
||||||
|
|
||||||
|
### Claude Desktop Integration
|
||||||
|
|
||||||
|
Add to your Claude Desktop `mcp.json` configuration:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"mcp": {
|
||||||
|
"servers": {
|
||||||
|
"netalertx": {
|
||||||
|
"command": "node",
|
||||||
|
"args": ["/path/to/mcp-client.js"],
|
||||||
|
"env": {
|
||||||
|
"NETALERTX_URL": "http://your-server:<GRAPHQL_PORT>",
|
||||||
|
"NETALERTX_TOKEN": "your-api-token"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Generic MCP Client
|
||||||
|
|
||||||
|
```python
|
||||||
|
import asyncio
|
||||||
|
import json
|
||||||
|
from mcp import ClientSession, StdioServerParameters
|
||||||
|
from mcp.client.stdio import stdio_client
|
||||||
|
|
||||||
|
async def main():
|
||||||
|
# Connect to NetAlertX MCP server
|
||||||
|
server_params = StdioServerParameters(
|
||||||
|
command="curl",
|
||||||
|
args=[
|
||||||
|
"-N", "-H", "Authorization: Bearer <API_TOKEN>",
|
||||||
|
"http://your-server:<GRAPHQL_PORT>/mcp/sse"
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
|
async with stdio_client(server_params) as (read, write):
|
||||||
|
async with ClientSession(read, write) as session:
|
||||||
|
# Initialize connection
|
||||||
|
await session.initialize()
|
||||||
|
|
||||||
|
# List available tools
|
||||||
|
tools = await session.list_tools()
|
||||||
|
print(f"Available tools: {[t.name for t in tools.tools]}")
|
||||||
|
|
||||||
|
# Call a tool
|
||||||
|
result = await session.call_tool("search_devices", {"query": "router"})
|
||||||
|
print(f"Search result: {result}")
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
asyncio.run(main())
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Error Handling
|
||||||
|
|
||||||
|
MCP tool calls return structured error information:
|
||||||
|
|
||||||
|
**Error Response**:
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"jsonrpc": "2.0",
|
||||||
|
"id": "1",
|
||||||
|
"result": {
|
||||||
|
"content": [
|
||||||
|
{
|
||||||
|
"type": "text",
|
||||||
|
"text": "Error calling tool: Device not found"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"isError": true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**Common Error Types**:
|
||||||
|
- `401/403` - Authentication failure
|
||||||
|
- `400` - Invalid parameters or missing required fields
|
||||||
|
- `404` - Resource not found (device, scan results, etc.)
|
||||||
|
- `500` - Internal server error
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Notes
|
||||||
|
|
||||||
|
* MCP endpoints require the same API token authentication as REST endpoints
|
||||||
|
* All MCP tools return JSON responses wrapped in MCP protocol format
|
||||||
|
* Server-Sent Events maintain persistent connections for real-time updates
|
||||||
|
* Tool parameters match their REST endpoint equivalents
|
||||||
|
* Error responses include both HTTP status codes and descriptive messages
|
||||||
|
* MCP bridge automatically handles request/response serialization
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Related Documentation
|
||||||
|
|
||||||
|
* [Main API Overview](API.md) - Core REST API documentation
|
||||||
|
* [Device API](API_DEVICE.md) - Individual device management
|
||||||
|
* [Devices Collection API](API_DEVICES.md) - Bulk device operations
|
||||||
|
* [Network Tools API](API_NETTOOLS.md) - Wake-on-LAN, scans, network utilities
|
||||||
|
* [Events API](API_EVENTS.md) - Event logging and monitoring
|
||||||
@@ -1,6 +1,6 @@
|
|||||||
# Net Tools API Endpoints
|
# Net Tools API Endpoints
|
||||||
|
|
||||||
The Net Tools API provides **network diagnostic utilities**, including Wake-on-LAN, traceroute, speed testing, DNS resolution, nmap scanning, and internet connection information.
|
The Net Tools API provides **network diagnostic utilities**, including Wake-on-LAN, traceroute, speed testing, DNS resolution, nmap scanning, internet connection information, and network interface info.
|
||||||
|
|
||||||
All endpoints require **authorization** via Bearer token.
|
All endpoints require **authorization** via Bearer token.
|
||||||
|
|
||||||
@@ -190,6 +190,51 @@ All endpoints require **authorization** via Bearer token.
|
|||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
|
### 7. Network Interfaces
|
||||||
|
|
||||||
|
* **GET** `/nettools/interfaces`
|
||||||
|
Fetches the list of network interfaces on the system, including IPv4/IPv6 addresses, MAC, MTU, state (up/down), and RX/TX byte counters.
|
||||||
|
|
||||||
|
**Response** (success):
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"success": true,
|
||||||
|
"interfaces": {
|
||||||
|
"eth0": {
|
||||||
|
"name": "eth0",
|
||||||
|
"short": "eth0",
|
||||||
|
"type": "ethernet",
|
||||||
|
"state": "up",
|
||||||
|
"mtu": 1500,
|
||||||
|
"mac": "00:11:32:EF:A5:6B",
|
||||||
|
"ipv4": ["192.168.1.82/24"],
|
||||||
|
"ipv6": ["fe80::211:32ff:feef:a56c/64"],
|
||||||
|
"rx_bytes": 18488221,
|
||||||
|
"tx_bytes": 1443944
|
||||||
|
},
|
||||||
|
"lo": {
|
||||||
|
"name": "lo",
|
||||||
|
"short": "lo",
|
||||||
|
"type": "loopback",
|
||||||
|
"state": "up",
|
||||||
|
"mtu": 65536,
|
||||||
|
"mac": null,
|
||||||
|
"ipv4": ["127.0.0.1/8"],
|
||||||
|
"ipv6": ["::1/128"],
|
||||||
|
"rx_bytes": 123456,
|
||||||
|
"tx_bytes": 123456
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**Error Responses**:
|
||||||
|
|
||||||
|
* Command failure or parsing error → HTTP 500
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
## Example `curl` Requests
|
## Example `curl` Requests
|
||||||
|
|
||||||
**Wake-on-LAN**:
|
**Wake-on-LAN**:
|
||||||
@@ -241,3 +286,21 @@ curl -X POST "http://<server_ip>:<GRAPHQL_PORT>/nettools/nmap" \
|
|||||||
curl "http://<server_ip>:<GRAPHQL_PORT>/nettools/internetinfo" \
|
curl "http://<server_ip>:<GRAPHQL_PORT>/nettools/internetinfo" \
|
||||||
-H "Authorization: Bearer <API_TOKEN>"
|
-H "Authorization: Bearer <API_TOKEN>"
|
||||||
```
|
```
|
||||||
|
|
||||||
|
**Network Interfaces**:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
curl "http://<server_ip>:<GRAPHQL_PORT>/nettools/interfaces" \
|
||||||
|
-H "Authorization: Bearer <API_TOKEN>"
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## MCP Tools
|
||||||
|
|
||||||
|
Network tools are available as **MCP Tools** for AI assistant integration:
|
||||||
|
|
||||||
|
* `wol_wake_device`, `trigger_scan`, `get_open_ports`
|
||||||
|
|
||||||
|
📖 See [MCP Server Bridge API](API_MCP.md) for AI integration details.
|
||||||
|
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
# [Deprecated] API endpoints
|
# [Deprecated] API endpoints
|
||||||
|
|
||||||
> [!WARNING]
|
> [!WARNING]
|
||||||
> Some of these endpoints will be deprecated soon. Please refere to the new [API](API.md) endpoints docs for details on the new API layer.
|
> Some of these endpoints will be deprecated soon. Please refere to the new [API](API.md) endpoints docs for details on the new API layer.
|
||||||
|
|
||||||
NetAlertX comes with a couple of API endpoints. All requests need to be authorized (executed in a logged in browser session) or you have to pass the value of the `API_TOKEN` settings as authorization bearer, for example:
|
NetAlertX comes with a couple of API endpoints. All requests need to be authorized (executed in a logged in browser session) or you have to pass the value of the `API_TOKEN` settings as authorization bearer, for example:
|
||||||
|
|
||||||
@@ -52,11 +52,11 @@ query GetDevices($options: PageQueryOptionsInput) {
|
|||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
See also: [Debugging GraphQL issues](./DEBUG_GRAPHQL.md)
|
See also: [Debugging GraphQL issues](./DEBUG_API_SERVER.md)
|
||||||
|
|
||||||
### `curl` Command
|
### `curl` Command
|
||||||
|
|
||||||
You can use the following `curl` command to execute the query.
|
You can use the following `curl` command to execute the query.
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
curl 'http://host:GRAPHQL_PORT/graphql' -X POST -H 'Authorization: Bearer API_TOKEN' -H 'Content-Type: application/json' --data '{
|
curl 'http://host:GRAPHQL_PORT/graphql' -X POST -H 'Authorization: Bearer API_TOKEN' -H 'Content-Type: application/json' --data '{
|
||||||
@@ -127,9 +127,9 @@ The response will be in JSON format, similar to the following:
|
|||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
## API Endpoint: JSON files
|
## API Endpoint: JSON files
|
||||||
|
|
||||||
This API endpoint retrieves static files, that are periodically updated.
|
This API endpoint retrieves static files, that are periodically updated.
|
||||||
|
|
||||||
- Endpoint URL: `php/server/query_json.php?file=<file name>`
|
- Endpoint URL: `php/server/query_json.php?file=<file name>`
|
||||||
- Host: `same as front end (web ui)`
|
- Host: `same as front end (web ui)`
|
||||||
@@ -141,24 +141,24 @@ The endpoints are updated when objects in the API endpoints are changed.
|
|||||||
|
|
||||||
### Location of the endpoints
|
### Location of the endpoints
|
||||||
|
|
||||||
In the container, these files are located under the `/app/api/` folder. You can access them via the `/php/server/query_json.php?file=user_notifications.json` endpoint.
|
In the container, these files are located under the API directory (default: `/tmp/api/`, configurable via `NETALERTX_API` environment variable). You can access them via the `/php/server/query_json.php?file=user_notifications.json` endpoint.
|
||||||
|
|
||||||
### Available endpoints
|
### Available endpoints
|
||||||
|
|
||||||
You can access the following files:
|
You can access the following files:
|
||||||
|
|
||||||
| File name | Description |
|
| File name | Description |
|
||||||
|----------------------|----------------------|
|
|----------------------|----------------------|
|
||||||
| `notification_json_final.json` | The json version of the last notification (e.g. used for webhooks - [sample JSON](https://github.com/jokob-sk/NetAlertX/blob/main/front/report_templates/webhook_json_sample.json)). |
|
| `notification_json_final.json` | The json version of the last notification (e.g. used for webhooks - [sample JSON](https://github.com/netalertx/NetAlertX/blob/main/front/report_templates/webhook_json_sample.json)). |
|
||||||
| `table_devices.json` | All of the available Devices detected by the app. |
|
| `table_devices.json` | All of the available Devices detected by the app. |
|
||||||
| `table_plugins_events.json` | The list of the unprocessed (pending) notification events (plugins_events DB table). |
|
| `table_plugins_events.json` | The list of the unprocessed (pending) notification events (plugins_events DB table). |
|
||||||
| `table_plugins_history.json` | The list of notification events history. |
|
| `table_plugins_history.json` | The list of notification events history. |
|
||||||
| `table_plugins_objects.json` | The content of the plugins_objects table. Find more info on the [Plugin system here](https://github.com/jokob-sk/NetAlertX/tree/main/docs/PLUGINS.md)|
|
| `table_plugins_objects.json` | The content of the plugins_objects table. Find more info on the [Plugin system here](https://docs.netalertx.com/PLUGINS)|
|
||||||
| `language_strings.json` | The content of the language_strings table, which in turn is loaded from the plugins `config.json` definitions. |
|
| `language_strings.json` | The content of the language_strings table, which in turn is loaded from the plugins `config.json` definitions. |
|
||||||
| `table_custom_endpoint.json` | A custom endpoint generated by the SQL query specified by the `API_CUSTOM_SQL` setting. |
|
| `table_custom_endpoint.json` | A custom endpoint generated by the SQL query specified by the `API_CUSTOM_SQL` setting. |
|
||||||
| `table_settings.json` | The content of the settings table. |
|
| `table_settings.json` | The content of the settings table. |
|
||||||
| `app_state.json` | Contains the current application state. |
|
| `app_state.json` | Contains the current application state. |
|
||||||
|
|
||||||
|
|
||||||
### JSON Data format
|
### JSON Data format
|
||||||
|
|
||||||
@@ -169,11 +169,11 @@ The endpoints starting with the `table_` prefix contain most, if not all, data c
|
|||||||
"data": [
|
"data": [
|
||||||
{
|
{
|
||||||
"db_column_name": "data",
|
"db_column_name": "data",
|
||||||
"db_column_name2": "data2"
|
"db_column_name2": "data2"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"db_column_name": "data3",
|
"db_column_name": "data3",
|
||||||
"db_column_name2": "data4"
|
"db_column_name2": "data4"
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
@@ -201,7 +201,7 @@ Example JSON of the `table_devices.json` endpoint with two Devices (database row
|
|||||||
"devParentMAC": "",
|
"devParentMAC": "",
|
||||||
"devParentPort": "",
|
"devParentPort": "",
|
||||||
"devIcon": "globe"
|
"devIcon": "globe"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"devMac": "a4:8f:ff:aa:ba:1f",
|
"devMac": "a4:8f:ff:aa:ba:1f",
|
||||||
"devName": "Net - USG",
|
"devName": "Net - USG",
|
||||||
@@ -332,7 +332,7 @@ Grafana template sample: [Download json](./samples/API/Grafana_Dashboard.json)
|
|||||||
|
|
||||||
## API Endpoint: /log files
|
## API Endpoint: /log files
|
||||||
|
|
||||||
This API endpoint retrieves files from the `/app/log` folder.
|
This API endpoint retrieves files from the `/tmp/log` folder.
|
||||||
|
|
||||||
- Endpoint URL: `php/server/query_logs.php?file=<file name>`
|
- Endpoint URL: `php/server/query_logs.php?file=<file name>`
|
||||||
- Host: `same as front end (web ui)`
|
- Host: `same as front end (web ui)`
|
||||||
@@ -357,7 +357,7 @@ This API endpoint retrieves files from the `/app/log` folder.
|
|||||||
|
|
||||||
## API Endpoint: /config files
|
## API Endpoint: /config files
|
||||||
|
|
||||||
To retrieve files from the `/app/config` folder.
|
To retrieve files from the `/data/config` folder.
|
||||||
|
|
||||||
- Endpoint URL: `php/server/query_config.php?file=<file name>`
|
- Endpoint URL: `php/server/query_config.php?file=<file name>`
|
||||||
- Host: `same as front end (web ui)`
|
- Host: `same as front end (web ui)`
|
||||||
|
|||||||
@@ -106,23 +106,26 @@ curl -X DELETE "http://<server_ip>:<GRAPHQL_PORT>/sessions/delete" \
|
|||||||
"success": true,
|
"success": true,
|
||||||
"sessions": [
|
"sessions": [
|
||||||
{
|
{
|
||||||
"ses_MAC": "AA:BB:CC:DD:EE:FF",
|
"sesMac": "AA:BB:CC:DD:EE:FF",
|
||||||
"ses_Connection": "2025-08-01 10:00",
|
"sesDateTimeConnection": "2025-08-01 10:00",
|
||||||
"ses_Disconnection": "2025-08-01 12:00",
|
"sesDateTimeDisconnection": "2025-08-01 12:00",
|
||||||
"ses_Duration": "2h 0m",
|
"sesDuration": "2h 0m",
|
||||||
"ses_IP": "192.168.1.10",
|
"sesIp": "192.168.1.10",
|
||||||
"ses_Info": ""
|
"sesAdditionalInfo": ""
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
#### `curl` Example
|
#### `curl` Example
|
||||||
|
|
||||||
|
**get sessions for mac**
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
curl -X GET "http://<server_ip>:<GRAPHQL_PORT>/sessions/list?mac=AA:BB:CC:DD:EE:FF&start_date=2025-08-01&end_date=2025-08-21" \
|
curl -X GET "http://<server_ip>:<GRAPHQL_PORT>/sessions/list?mac=AA:BB:CC:DD:EE:FF&start_date=2025-08-01&end_date=2025-08-21" \
|
||||||
-H "Authorization: Bearer <API_TOKEN>" \
|
-H "Authorization: Bearer <API_TOKEN>" \
|
||||||
-H "Accept: application/json"
|
-H "Accept: application/json"
|
||||||
```
|
```
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
### Calendar View of Sessions
|
### Calendar View of Sessions
|
||||||
@@ -191,12 +194,12 @@ curl -X GET "http://<server_ip>:<GRAPHQL_PORT>/sessions/calendar?start=2025-08-0
|
|||||||
"success": true,
|
"success": true,
|
||||||
"sessions": [
|
"sessions": [
|
||||||
{
|
{
|
||||||
"ses_MAC": "AA:BB:CC:DD:EE:FF",
|
"sesMac": "AA:BB:CC:DD:EE:FF",
|
||||||
"ses_Connection": "2025-08-01 10:00",
|
"sesDateTimeConnection": "2025-08-01 10:00",
|
||||||
"ses_Disconnection": "2025-08-01 12:00",
|
"sesDateTimeDisconnection": "2025-08-01 12:00",
|
||||||
"ses_Duration": "2h 0m",
|
"sesDuration": "2h 0m",
|
||||||
"ses_IP": "192.168.1.10",
|
"sesIp": "192.168.1.10",
|
||||||
"ses_Info": ""
|
"sesAdditionalInfo": ""
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
@@ -221,15 +224,33 @@ curl -X GET "http://<server_ip>:<GRAPHQL_PORT>/sessions/AA:BB:CC:DD:EE:FF?period
|
|||||||
* `type` → Event type (`all`, `sessions`, `missing`, `voided`, `new`, `down`)
|
* `type` → Event type (`all`, `sessions`, `missing`, `voided`, `new`, `down`)
|
||||||
Default: `all`
|
Default: `all`
|
||||||
* `period` → Period to retrieve events (`7 days`, `1 month`, etc.)
|
* `period` → Period to retrieve events (`7 days`, `1 month`, etc.)
|
||||||
|
* `page` → Page number, 1-based (default: `1`)
|
||||||
|
* `limit` → Rows per page, max 1000 (default: `100`)
|
||||||
|
* `search` → Free-text search filter across all columns
|
||||||
|
* `sortCol` → Column index to sort by, 0-based (default: `0`)
|
||||||
|
* `sortDir` → Sort direction: `asc` or `desc` (default: `desc`)
|
||||||
|
|
||||||
**Example:**
|
**Example:**
|
||||||
|
|
||||||
```
|
```
|
||||||
/sessions/session-events?type=all&period=7 days
|
/sessions/session-events?type=all&period=7 days&page=1&limit=25&sortCol=3&sortDir=desc
|
||||||
```
|
```
|
||||||
|
|
||||||
**Response:**
|
**Response:**
|
||||||
Returns a list of events or sessions with formatted connection, disconnection, duration, and IP information.
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"data": [...],
|
||||||
|
"total": 150,
|
||||||
|
"recordsFiltered": 150
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
| Field | Type | Description |
|
||||||
|
| ----------------- | ---- | ------------------------------------------------- |
|
||||||
|
| `data` | list | Paginated rows (each row is a list of values). |
|
||||||
|
| `total` | int | Total rows before search filter. |
|
||||||
|
| `recordsFiltered` | int | Total rows after search filter (before paging). |
|
||||||
|
|
||||||
#### `curl` Example
|
#### `curl` Example
|
||||||
|
|
||||||
|
|||||||
78
docs/API_SSE.md
Normal file
78
docs/API_SSE.md
Normal file
@@ -0,0 +1,78 @@
|
|||||||
|
# SSE (Server-Sent Events)
|
||||||
|
|
||||||
|
Real-time app state updates via Server-Sent Events. Reduces server load ~95% vs polling.
|
||||||
|
|
||||||
|
## Endpoints
|
||||||
|
|
||||||
|
| Endpoint | Method | Purpose |
|
||||||
|
|----------|--------|---------|
|
||||||
|
| `/sse/state` | GET | Stream state updates (requires Bearer token) |
|
||||||
|
| `/sse/stats` | GET | Debug: connected clients, queued events |
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
### Connect to SSE Stream
|
||||||
|
```bash
|
||||||
|
curl -H "Authorization: Bearer YOUR_API_TOKEN" \
|
||||||
|
http://localhost:5000/sse/state
|
||||||
|
```
|
||||||
|
|
||||||
|
### Check Connection Stats
|
||||||
|
```bash
|
||||||
|
curl -H "Authorization: Bearer YOUR_API_TOKEN" \
|
||||||
|
http://localhost:5000/sse/stats
|
||||||
|
```
|
||||||
|
|
||||||
|
## Event Types
|
||||||
|
|
||||||
|
- `state_update` - App state changed (e.g., "Scanning", "Processing")
|
||||||
|
- `unread_notifications_count_update` - Number of unread notifications changed (count: int)
|
||||||
|
|
||||||
|
## Backend Integration
|
||||||
|
|
||||||
|
Broadcasts automatically triggered in `app_state.py` via `broadcast_state_update()`:
|
||||||
|
|
||||||
|
```python
|
||||||
|
from api_server.sse_broadcast import broadcast_state_update
|
||||||
|
|
||||||
|
# Called on every state change - no additional code needed
|
||||||
|
broadcast_state_update(current_state="Scanning", settings_imported=time.time())
|
||||||
|
```
|
||||||
|
|
||||||
|
## Frontend Integration
|
||||||
|
|
||||||
|
Auto-enabled via `sse_manager.js`:
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
// In browser console:
|
||||||
|
netAlertXStateManager.getStats().then(stats => {
|
||||||
|
console.log("Connected clients:", stats.connected_clients);
|
||||||
|
});
|
||||||
|
```
|
||||||
|
|
||||||
|
## Fallback Behavior
|
||||||
|
|
||||||
|
- If SSE fails after 3 attempts, automatically switches to polling
|
||||||
|
- Polling starts at 1s, backs off to 30s max
|
||||||
|
- No user-visible difference in functionality
|
||||||
|
|
||||||
|
## Files
|
||||||
|
|
||||||
|
| File | Purpose |
|
||||||
|
|------|---------|
|
||||||
|
| `server/api_server/sse_endpoint.py` | SSE endpoints & event queue |
|
||||||
|
| `server/api_server/sse_broadcast.py` | Broadcast helper functions |
|
||||||
|
| `front/js/sse_manager.js` | Client-side SSE connection manager |
|
||||||
|
|
||||||
|
## Troubleshooting
|
||||||
|
|
||||||
|
| Issue | Solution |
|
||||||
|
|-------|----------|
|
||||||
|
| Connection refused | Check backend running, API token correct |
|
||||||
|
| No events received | Verify `broadcast_state_update()` is called on state changes |
|
||||||
|
| High memory | Events not processed fast enough, check client logs |
|
||||||
|
| Using polling instead of SSE | Normal fallback - check browser console for errors |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
|
||||||
@@ -1,8 +1,8 @@
|
|||||||
## Authelia support
|
## Authelia support
|
||||||
|
|
||||||
> [!WARNING]
|
> [!NOTE]
|
||||||
>
|
> This is community-contributed. Due to environment, setup, or networking differences, results may vary. Please open a PR to improve it instead of creating an issue, as the maintainer is not actively maintaining it.
|
||||||
> This is community contributed content and work in progress. Contributions are welcome.
|
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
theme: dark
|
theme: dark
|
||||||
@@ -274,4 +274,4 @@ notifier:
|
|||||||
subject: "[Authelia] {title}"
|
subject: "[Authelia] {title}"
|
||||||
startup_check_address: postmaster@MYOTHERDOMAIN.LTD
|
startup_check_address: postmaster@MYOTHERDOMAIN.LTD
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|||||||
@@ -1,8 +1,8 @@
|
|||||||
# Backing Things Up
|
# Backing Things Up
|
||||||
|
|
||||||
> [!NOTE]
|
> [!NOTE]
|
||||||
> To back up 99% of your configuration, back up at least the `/app/config` folder.
|
> To back up 99% of your configuration, back up at least the `/data/config` folder.
|
||||||
> Database definitions can change between releases, so the safest method is to restore backups using the **same app version** they were taken from, then upgrade incrementally.
|
> Database definitions can change between releases, so the safest method is to restore backups using the **same app version** they were taken from, then upgrade incrementally by following the [Migration documentation](./MIGRATION.md).
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
@@ -13,7 +13,7 @@ There are four key artifacts you can use to back up your NetAlertX configuration
|
|||||||
| File | Description | Limitations |
|
| File | Description | Limitations |
|
||||||
| ------------------------ | ----------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------- |
|
| ------------------------ | ----------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||||
| `/db/app.db` | The application database | Might be in an uncommitted state or corrupted |
|
| `/db/app.db` | The application database | Might be in an uncommitted state or corrupted |
|
||||||
| `/config/app.conf` | Configuration file | Can be overridden using the [`APP_CONF_OVERRIDE`](https://github.com/jokob-sk/NetAlertX/tree/main/dockerfiles#docker-environment-variables) variable |
|
| `/config/app.conf` | Configuration file | Can be overridden using the [`APP_CONF_OVERRIDE`](https://github.com/netalertx/NetAlertX/tree/main/dockerfiles#docker-environment-variables) variable |
|
||||||
| `/config/devices.csv` | CSV file containing device data | Does not include historical data |
|
| `/config/devices.csv` | CSV file containing device data | Does not include historical data |
|
||||||
| `/config/workflows.json` | JSON file containing your workflows | N/A |
|
| `/config/workflows.json` | JSON file containing your workflows | N/A |
|
||||||
|
|
||||||
@@ -25,7 +25,7 @@ Understanding where your data is stored helps you plan your backup strategy.
|
|||||||
|
|
||||||
### Core Configuration
|
### Core Configuration
|
||||||
|
|
||||||
Stored in `/app/config/app.conf`.
|
Stored in `/data/config/app.conf`.
|
||||||
This includes settings for:
|
This includes settings for:
|
||||||
|
|
||||||
* Notifications
|
* Notifications
|
||||||
@@ -37,7 +37,7 @@ This includes settings for:
|
|||||||
|
|
||||||
### Device Data
|
### Device Data
|
||||||
|
|
||||||
Stored in `/app/config/devices_<timestamp>.csv` or `/app/config/devices.csv`, created by the [CSV Backup `CSVBCKP` Plugin](https://github.com/jokob-sk/NetAlertX/tree/main/front/plugins/csv_backup).
|
Stored in `/data/config/devices_<timestamp>.csv` or `/data/config/devices.csv`, created by the [CSV Backup `CSVBCKP` Plugin](https://github.com/netalertx/NetAlertX/tree/main/front/plugins/csv_backup).
|
||||||
Contains:
|
Contains:
|
||||||
|
|
||||||
* Device names, icons, and categories
|
* Device names, icons, and categories
|
||||||
@@ -46,7 +46,7 @@ Contains:
|
|||||||
|
|
||||||
### Historical Data
|
### Historical Data
|
||||||
|
|
||||||
Stored in `/app/db/app.db` (see [Database Overview](./DATABASE.md)).
|
Stored in `/data/db/app.db` (see [Database Overview](./DATABASE.md)).
|
||||||
Contains:
|
Contains:
|
||||||
|
|
||||||
* Plugin data and historical entries
|
* Plugin data and historical entries
|
||||||
@@ -77,13 +77,13 @@ You can also download the `app.conf` and `devices.csv` files from the **Maintena
|
|||||||
|
|
||||||
### 💾 What to Back Up
|
### 💾 What to Back Up
|
||||||
|
|
||||||
* `/app/db/app.db` (uncorrupted)
|
* `/data/db/app.db` (uncorrupted)
|
||||||
* `/app/config/app.conf`
|
* `/data/config/app.conf`
|
||||||
* `/app/config/workflows.json`
|
* `/data/config/workflows.json`
|
||||||
|
|
||||||
### 📥 How to Restore
|
### 📥 How to Restore
|
||||||
|
|
||||||
Map these files into your container as described in the [Setup documentation](https://github.com/jokob-sk/NetAlertX/blob/main/dockerfiles/README.md#docker-paths).
|
Map these files into your container as described in the [Setup documentation](./DOCKER_INSTALLATION.md).
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
@@ -93,14 +93,14 @@ Map these files into your container as described in the [Setup documentation](ht
|
|||||||
|
|
||||||
### 💾 What to Back Up
|
### 💾 What to Back Up
|
||||||
|
|
||||||
* `/app/config/app.conf`
|
* `/data/config/app.conf`
|
||||||
* `/app/config/workflows.json`
|
* `/data/config/workflows.json`
|
||||||
* `/app/config/devices_<timestamp>.csv` (rename to `devices.csv` during restore)
|
* `/data/config/devices_<timestamp>.csv` (rename to `devices.csv` during restore)
|
||||||
|
|
||||||
### 📥 How to Restore
|
### 📥 How to Restore
|
||||||
|
|
||||||
1. Copy `app.conf` and `workflows.json` into `/app/config/`
|
1. Copy `app.conf` and `workflows.json` into `/data/config/`
|
||||||
2. Rename and place `devices_<timestamp>.csv` → `/app/config/devices.csv`
|
2. Rename and place `devices_<timestamp>.csv` → `/data/config/devices.csv`
|
||||||
3. Restore via the **Maintenance** section under *Devices → Bulk Editing*
|
3. Restore via the **Maintenance** section under *Devices → Bulk Editing*
|
||||||
|
|
||||||
This recovers nearly all configuration, workflows, and device metadata.
|
This recovers nearly all configuration, workflows, and device metadata.
|
||||||
@@ -157,6 +157,6 @@ For users running NetAlertX via Docker, you can back up or restore directly from
|
|||||||
|
|
||||||
## Summary
|
## Summary
|
||||||
|
|
||||||
* Back up `/app/config` for configuration and devices; `/app/db` for history
|
* Back up `/data/config` for configuration and devices; `/data/db` for history
|
||||||
* Keep regular backups, especially before upgrades
|
* Keep regular backups, especially before upgrades
|
||||||
* For Docker setups, use the lightweight `alpine`-based backup method for consistency and portability
|
* For Docker setups, use the lightweight `alpine`-based backup method for consistency and portability
|
||||||
|
|||||||
1
docs/CNAME
Normal file
1
docs/CNAME
Normal file
@@ -0,0 +1 @@
|
|||||||
|
docs.netalertx.com
|
||||||
@@ -1,57 +1,142 @@
|
|||||||
### Loading...
|
# Troubleshooting Common Issues
|
||||||
|
|
||||||
Often if the application is misconfigured the `Loading...` dialog is continuously displayed. This is most likely caused by the backed failing to start. The **Maintenance -> Logs** section should give you more details on what's happening. If there is no exception, check the Portainer log, or start the container in the foreground (without the `-d` parameter) to observe any exceptions. It's advisable to enable `trace` or `debug`. Check the [Debug tips](./DEBUG_TIPS.md) on detailed instructions.
|
> [!TIP]
|
||||||
|
> Before troubleshooting, ensure you have set the correct [Debugging and LOG_LEVEL](./DEBUG_TIPS.md).
|
||||||
|
|
||||||
### Incorrect SCAN_SUBNETS
|
---
|
||||||
|
|
||||||
One of the most common issues is not configuring `SCAN_SUBNETS` correctly. If this setting is misconfigured you will only see one or two devices in your devices list after a scan. Please read the [subnets docs](./SUBNETS.md) carefully to resolve this.
|
## Docker Container Doesn't Start
|
||||||
|
|
||||||
### Duplicate devices and notifications
|
Initial setup issues are often caused by **missing permissions** or **incorrectly mapped volumes**. Always double-check your `docker run` or `docker-compose.yml` against the [official setup guide](./DOCKER_INSTALLATION.md) before proceeding.
|
||||||
|
|
||||||
The app uses the MAC address as an unique identifier for devices. If a new MAC is detected a new device is added to the application and corresponding notifications are triggered. This means that if the MAC of an existing device changes, the device will be logged as a new device. You can usually prevent this from happening by changing the device configuration (in Android, iOS, or Windows) for your network. See the [Random Macs](./RANDOM_MAC.md) guide for details.
|
|
||||||
|
|
||||||
### Permissions
|
### Permissions
|
||||||
|
|
||||||
Make sure you [File permissions](./FILE_PERMISSIONS.md) are set correctly.
|
Make sure your [file permissions](./FILE_PERMISSIONS.md) are correctly set:
|
||||||
|
|
||||||
* If facing issues (AJAX errors, can't write to DB, empty screen, etc,) make sure permissions are set correctly, and check the logs under `/app/log`.
|
* If you encounter AJAX errors, cannot write to the database, or see an empty screen, check that permissions are correct and review the logs under `/tmp/log`.
|
||||||
* To solve permission issues you can try setting the owner and group of the `app.db` by executing the following on the host system: `docker exec netalertx chown -R www-data:www-data /app/db/app.db`.
|
* To fix permission issues with the database, update the owner and group of `app.db` as described in the [File Permissions guide](./FILE_PERMISSIONS.md).
|
||||||
* If still facing issues, try to map the app.db file (⚠ not folder) to `:/app/db/app.db` (see [docker-compose Examples](https://github.com/jokob-sk/NetAlertX/blob/main/dockerfiles/README.md#-docker-composeyml-examples) for details)
|
|
||||||
|
|
||||||
### Container restarts / crashes
|
### Container Restarts / Crashes
|
||||||
|
|
||||||
* Check the logs for details. Often a required setting for a notification method is missing.
|
* Check the logs for details. Often, required settings are missing.
|
||||||
|
* For more detailed troubleshooting, see [Debug and Troubleshooting Tips](./DEBUG_TIPS.md).
|
||||||
|
* To observe errors directly, run the container in the foreground instead of `-d`:
|
||||||
|
|
||||||
### unable to resolve host
|
```bash
|
||||||
|
docker run --rm -it <your_image>
|
||||||
|
```
|
||||||
|
|
||||||
* Check that your `SCAN_SUBNETS` variable is using the correct mask and `--interface`. See the [subnets docs for details](./SUBNETS.md).
|
---
|
||||||
|
|
||||||
### Invalid JSON
|
## Docker Container Starts, But the Application Misbehaves
|
||||||
|
|
||||||
Check the [Invalid JSON errors debug help](./DEBUG_INVALID_JSON.md) docs on how to proceed.
|
If the container starts but the app shows unexpected behavior, the cause is often **data corruption**, **incorrect configuration**, or **unexpected input data**.
|
||||||
|
|
||||||
### sudo execution failing (e.g.: on arpscan) on a Raspberry Pi 4
|
### Continuous "Loading..." Screen
|
||||||
|
|
||||||
> sudo: unexpected child termination condition: 0
|
A misconfigured application may display a persistent `Loading...` dialog. This is usually caused by the backend failing to start.
|
||||||
|
|
||||||
Resolution based on [this issue](https://github.com/linuxserver/docker-papermerge/issues/4#issuecomment-1003657581)
|
**Steps to troubleshoot:**
|
||||||
|
|
||||||
|
1. Check **Maintenance → Logs** for exceptions.
|
||||||
|
2. If no exception is visible, check the Portainer logs.
|
||||||
|
3. Start the container in the foreground to observe exceptions.
|
||||||
|
4. Enable `trace` or `debug` logging for detailed output (see [Debug Tips](./DEBUG_TIPS.md)).
|
||||||
|
5. Verify that `GRAPHQL_PORT` is correctly configured.
|
||||||
|
6. Check browser logs (press `F12`):
|
||||||
|
|
||||||
|
* **Console tab** → refresh the page
|
||||||
|
* **Network tab** → refresh the page
|
||||||
|
|
||||||
|
If you are unsure how to resolve errors, provide screenshots or log excerpts in your issue report or Discord discussion.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Common Configuration Issues
|
||||||
|
|
||||||
|
#### Incorrect `SCAN_SUBNETS`
|
||||||
|
|
||||||
|
If `SCAN_SUBNETS` is misconfigured, you may see only a few devices in your device list after a scan. See the [Subnets Documentation](./SUBNETS.md) for proper configuration.
|
||||||
|
|
||||||
|
#### Duplicate Devices and Notifications
|
||||||
|
|
||||||
|
* Devices are identified by their **MAC address**.
|
||||||
|
* If a device's MAC changes, it will be treated as a new device, triggering notifications.
|
||||||
|
* Prevent this by adjusting your device configuration for Android, iOS, or Windows. See the [Random MACs Guide](./RANDOM_MAC.md).
|
||||||
|
|
||||||
|
#### Unable to Resolve Host
|
||||||
|
|
||||||
|
* Ensure `SCAN_SUBNETS` uses the correct mask and `--interface`.
|
||||||
|
* Refer to the [Subnets Documentation](./SUBNETS.md) for detailed guidance.
|
||||||
|
|
||||||
|
#### Invalid JSON Errors
|
||||||
|
|
||||||
|
* Follow the steps in [Invalid JSON Errors Debug Help](./DEBUG_INVALID_JSON.md).
|
||||||
|
|
||||||
|
#### Sudo Execution Fails (e.g., on arpscan on Raspberry Pi 4)
|
||||||
|
|
||||||
|
Error:
|
||||||
|
|
||||||
```
|
```
|
||||||
|
sudo: unexpected child termination condition: 0
|
||||||
|
```
|
||||||
|
|
||||||
|
**Resolution**:
|
||||||
|
|
||||||
|
```bash
|
||||||
wget ftp.us.debian.org/debian/pool/main/libs/libseccomp/libseccomp2_2.5.3-2_armhf.deb
|
wget ftp.us.debian.org/debian/pool/main/libs/libseccomp/libseccomp2_2.5.3-2_armhf.deb
|
||||||
sudo dpkg -i libseccomp2_2.5.3-2_armhf.deb
|
sudo dpkg -i libseccomp2_2.5.3-2_armhf.deb
|
||||||
```
|
```
|
||||||
|
|
||||||
The link above will probably break in time too. Go to https://packages.debian.org/sid/armhf/libseccomp2/download to find the new version number and put that in the url.
|
> ⚠️ The link may break over time. Check [Debian Packages](https://packages.debian.org/sid/armhf/libseccomp2/download) for the latest version.
|
||||||
|
|
||||||
### Only Router and own device show up
|
#### Only Router and Own Device Show Up
|
||||||
|
|
||||||
Make sure that the subnet and interface in `SCAN_SUBNETS` are correct. If your device/NAS has multiple ethernet ports, you probably need to change `eth0` to something else.
|
* Verify the subnet and interface in `SCAN_SUBNETS`.
|
||||||
|
* On devices with multiple Ethernet ports, you may need to change `eth0` to the correct interface.
|
||||||
|
|
||||||
### Losing my settings and devices after an update
|
#### Losing Settings or Devices After Update
|
||||||
|
|
||||||
If you lose your devices and/or settings after an update that means you don't have the `/app/db` and `/app/config` folders mapped to a permanent storage. That means every time you update these folders are re-created. Make sure you have the [volumes specified correctly](./DOCKER_COMPOSE.md) in your `docker-compose.yml` or run command.
|
* Ensure `/data/db` and `/data/config` are mapped to persistent storage.
|
||||||
|
* Without persistent volumes, these folders are recreated on every update.
|
||||||
|
* See [Docker Volumes Setup](./DOCKER_COMPOSE.md) for proper configuration.
|
||||||
|
|
||||||
|
#### Application Performance Issues
|
||||||
|
|
||||||
|
Slowness can be caused by:
|
||||||
|
|
||||||
|
* Incorrect settings (causing app restarts) → check `app.log`.
|
||||||
|
* Too many background processes → disable unnecessary scanners.
|
||||||
|
* Long scans → limit the number of scanned devices.
|
||||||
|
* Excessive disk operations or failing maintenance plugins.
|
||||||
|
|
||||||
|
> See [Performance Tips](./PERFORMANCE.md) for detailed optimization steps.
|
||||||
|
|
||||||
|
|
||||||
### The application is slow
|
#### IP flipping
|
||||||
|
|
||||||
|
With `ARPSCAN` scans some devices might flip IP addresses after each scan triggering false notifications. This is because some devices respond to broadcast calls and thus different IPs after scans are logged.
|
||||||
|
|
||||||
|
See how to prevent IP flipping in the [ARPSCAN plugin guide](/front/plugins/arp_scan/README.md).
|
||||||
|
|
||||||
|
Alternatively adjust your [notification settings](./NOTIFICATIONS.md) to prevent false positives by filtering out events or devices.
|
||||||
|
|
||||||
|
#### Multiple NICs on Same Host Reporting Same IP
|
||||||
|
|
||||||
|
On systems with multiple NICs (like a Proxmox server), each NIC has its own MAC address. Sometimes NetAlertX can incorrectly assign the same IP to all NICs, causing false device mappings. This is due to the way ARP responses are handled by the OS and cannot be overridden directly in NetAlertX.
|
||||||
|
|
||||||
|
**Resolution (Linux-based systems, e.g., Proxmox):**
|
||||||
|
|
||||||
|
Run the following commands on the host to fix ARP behavior:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
sudo sysctl -w net.ipv4.conf.all.arp_ignore=1
|
||||||
|
sudo sysctl -w net.ipv4.conf.all.arp_announce=2
|
||||||
|
```
|
||||||
|
|
||||||
|
This ensures each NIC responds correctly to ARP requests and prevents NetAlertX from misassigning IPs.
|
||||||
|
|
||||||
|
> For setups with multiple interfaces on the same switch, consider [workflows](./WORKFLOWS.md), [device exclusions](./NOTIFICATIONS.md), or [dummy devices](./DEVICE_MANAGEMENT.md) as additional workarounds.
|
||||||
|
> See [Feature Requests](https://github.com/netalertx/netalertx/issues) for reporting edge cases.
|
||||||
|
|
||||||
|
|
||||||
Slowness is usually caused by incorrect settings (the app might restart, so check the `app.log`), too many background processes (disable unnecessary scanners), too long scans (limit the number of scanned devices), too many disk operations, or some maintenance plugins might have failed. See the [Performance tips](./PERFORMANCE.md) docs for details.
|
|
||||||
@@ -1,15 +1,21 @@
|
|||||||
# Community Guides
|
# Community Guides
|
||||||
|
|
||||||
Use the official installation guides at first and use community content as supplementary material. Open an issue or PR if you'd like to add your link to the list 🙏 (Ordered by last update time)
|
> [!NOTE]
|
||||||
|
> This is community-contributed. Due to environment, setup, or networking differences, results may vary. Please open a PR to improve it instead of creating an issue, as the maintainer is not actively maintaining it.
|
||||||
|
|
||||||
|
|
||||||
|
Use the official installation guides at first and use community content as supplementary material. (Ordered by last update time)
|
||||||
|
|
||||||
- ▶ [Discover & Monitor Your Network with This Self-Hosted Open Source Tool - Lawrence Systems](https://www.youtube.com/watch?v=R3b5cxLZMpo) (June 2025)
|
- ▶ [Discover & Monitor Your Network with This Self-Hosted Open Source Tool - Lawrence Systems](https://www.youtube.com/watch?v=R3b5cxLZMpo) (June 2025)
|
||||||
- ▶ [Home Lab Network Monitoring - Scotti-BYTE Enterprise Consulting Services](https://www.youtube.com/watch?v=0DryhzrQSJA) (July 2024)
|
- ▶ [Home Lab Network Monitoring - Scotti-BYTE Enterprise Consulting Services](https://www.youtube.com/watch?v=0DryhzrQSJA) (July 2024)
|
||||||
- 📄 [How to Install NetAlertX on Your Synology NAS - Marius hosting](https://mariushosting.com/how-to-install-pi-alert-on-your-synology-nas/) (Updated frequently)
|
- 📄 [How to Install NetAlertX on Your Synology NAS - Marius hosting](https://mariushosting.com/how-to-install-pi-alert-on-your-synology-nas/) (Updated frequently)
|
||||||
- 📄 [Using the PiAlert Network Security Scanner on a Raspberry Pi - PiMyLifeUp](https://pimylifeup.com/raspberry-pi-pialert/)
|
- 📄 [Using the PiAlert Network Security Scanner on a Raspberry Pi - PiMyLifeUp](https://pimylifeup.com/raspberry-pi-pialert/)
|
||||||
- ▶ [How to Setup Pi.Alert on Your Synology NAS - Digital Aloha](https://www.youtube.com/watch?v=M4YhpuRFaUg)
|
- ▶ [How to Setup Pi.Alert on Your Synology NAS - Digital Aloha](https://www.youtube.com/watch?v=M4YhpuRFaUg)
|
||||||
- 📄 [防蹭网神器,网络安全助手 | 极空间部署网络扫描和通知系统『NetAlertX』](https://blog.csdn.net/qq_63499861/article/details/141105273)
|
- 📄 [防蹭网神器,网络安全助手 | 极空间部署网络扫描和通知系统『NetAlertX』](https://blog.csdn.net/qq_63499861/article/details/141105273)
|
||||||
- 📄 [시놀/헤놀에서 네트워크 스캐너 Pi.Alert Docker로 설치 및 사용하기](https://blog.dalso.org/article/%EC%8B%9C%EB%86%80-%ED%97%A4%EB%86%80%EC%97%90%EC%84%9C-%EB%84%A4%ED%8A%B8%EC%9B%8C%ED%81%AC-%EC%8A%A4%EC%BA%90%EB%84%88-pi-alert-docker%EB%A1%9C-%EC%84%A4%EC%B9%98-%EB%B0%8F-%EC%82%AC%EC%9A%A9) (July 2023)
|
- 📄 [시놀/헤놀에서 네트워크 스캐너 Pi.Alert Docker로 설치 및 사용하기](https://blog.dalso.org/article/%EC%8B%9C%EB%86%80-%ED%97%A4%EB%86%80%EC%97%90%EC%84%9C-%EB%84%A4%ED%8A%B8%EC%9B%8C%ED%81%AC-%EC%8A%A4%EC%BA%90%EB%84%88-pi-alert-docker%EB%A1%9C-%EC%84%A4%EC%B9%98-%EB%B0%8F-%EC%82%AC%EC%9A%A9) (July 2023)
|
||||||
- 📄 [网络入侵探测器Pi.Alert (Chinese)](https://codeantenna.com/a/VgUvIAjZ7J) (May 2023)
|
- 📄 [网络入侵探测器Pi.Alert (Chinese)](https://codeantenna.com/a/VgUvIAjZ7J) (May 2023)
|
||||||
- ▶ [Pi.Alert auf Synology & Docker by - Jürgen Barth](https://www.youtube.com/watch?v=-ouvA2UNu-A) (March 2023)
|
- ▶ [Pi.Alert auf Synology & Docker by - Jürgen Barth](https://www.youtube.com/watch?v=-ouvA2UNu-A) (March 2023)
|
||||||
- ▶ [Top Docker Container for Home Server Security - VirtualizationHowto](https://www.youtube.com/watch?v=tY-w-enLF6Q) (March 2023)
|
- ▶ [Top Docker Container for Home Server Security - VirtualizationHowto](https://www.youtube.com/watch?v=tY-w-enLF6Q) (March 2023)
|
||||||
- ▶ [Pi.Alert or WatchYourLAN can alert you to unknown devices appearing on your WiFi or LAN network - Danie van der Merwe](https://www.youtube.com/watch?v=v6an9QG2xF0) (November 2022)
|
- ▶ [Pi.Alert or WatchYourLAN can alert you to unknown devices appearing on your WiFi or LAN network - Danie van der Merwe](https://www.youtube.com/watch?v=v6an9QG2xF0) (November 2022)
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -13,31 +13,6 @@ This functionality allows you to define **custom properties** for devices, which
|
|||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## Defining Custom Properties
|
|
||||||
|
|
||||||
Custom properties are structured as a list of objects, where each property includes the following fields:
|
|
||||||
|
|
||||||
| Field | Description |
|
|
||||||
|--------------------|-----------------------------------------------------------------------------|
|
|
||||||
| `CUSTPROP_icon` | The icon (Base64-encoded HTML) displayed for the property. |
|
|
||||||
| `CUSTPROP_type` | The action type (e.g., `show_notes`, `link`, `delete_dev`). |
|
|
||||||
| `CUSTPROP_name` | A short name or title for the property. |
|
|
||||||
| `CUSTPROP_args` | Arguments for the action (e.g., URL or modal text). |
|
|
||||||
| `CUSTPROP_notes` | Additional notes or details displayed when applicable. |
|
|
||||||
| `CUSTPROP_show` | A boolean to control visibility (`true` to show on the listing page). |
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Available Action Types
|
|
||||||
|
|
||||||
- **Show Notes**: Displays a modal with a title and additional notes.
|
|
||||||
- **Example**: Show firmware details or custom messages.
|
|
||||||
- **Link**: Redirects to a specified URL in the current browser tab. (**Arguments** Needs to contain the full URL.)
|
|
||||||
- **Link (New Tab)**: Opens a specified URL in a new browser tab. (**Arguments** Needs to contain the full URL.)
|
|
||||||
- **Delete Device**: Deletes the device using its MAC address.
|
|
||||||
- **Run Plugin**: Placeholder for executing custom plugins (not implemented yet).
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Usage on the Device Listing Page
|
## Usage on the Device Listing Page
|
||||||
|
|
||||||
@@ -74,12 +49,39 @@ Visible properties (`CUSTPROP_show: true`) are displayed as interactive icons in
|
|||||||
3. **Device Removal**:
|
3. **Device Removal**:
|
||||||
- Enable device removal functionality using `CUSTPROP_type: delete_dev`.
|
- Enable device removal functionality using `CUSTPROP_type: delete_dev`.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Defining Custom Properties
|
||||||
|
|
||||||
|
Custom properties are structured as a list of objects, where each property includes the following fields:
|
||||||
|
|
||||||
|
| Field | Description |
|
||||||
|
|--------------------|-----------------------------------------------------------------------------|
|
||||||
|
| `CUSTPROP_icon` | The icon (Base64-encoded HTML) displayed for the property. |
|
||||||
|
| `CUSTPROP_type` | The action type (e.g., `show_notes`, `link`, `delete_dev`). |
|
||||||
|
| `CUSTPROP_name` | A short name or title for the property. |
|
||||||
|
| `CUSTPROP_args` | Arguments for the action (e.g., URL or modal text). |
|
||||||
|
| `CUSTPROP_notes` | Additional notes or details displayed when applicable. |
|
||||||
|
| `CUSTPROP_show` | A boolean to control visibility (`true` to show on the listing page). |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Available Action Types
|
||||||
|
|
||||||
|
- **Show Notes**: Displays a modal with a title and additional notes.
|
||||||
|
- **Example**: Show firmware details or custom messages.
|
||||||
|
- **Link**: Redirects to a specified URL in the current browser tab. (**Arguments** Needs to contain the full URL.)
|
||||||
|
- **Link (New Tab)**: Opens a specified URL in a new browser tab. (**Arguments** Needs to contain the full URL.)
|
||||||
|
- **Delete Device**: Deletes the device using its MAC address.
|
||||||
|
- **Run Plugin**: Placeholder for executing custom plugins (not implemented yet).
|
||||||
|
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## Notes
|
## Notes
|
||||||
|
|
||||||
- **Plugin Functionality**: The `run_plugin` action type is currently not implemented and will show an alert if used.
|
- **Plugin Functionality**: The `run_plugin` action type is currently not implemented and will show an alert if used.
|
||||||
- **Custom Icons (Experimental 🧪)**: Use Base64-encoded HTML to provide custom icons for each property. You can add your icons in Setttings via the `CUSTPROP_icon` settings
|
- **Custom Icons (Experimental 🧪)**: Use Base64-encoded HTML to provide custom icons for each property. You can add your icons in Setttings via the `CUSTPROP_icon` settings
|
||||||
- **Visibility Control**: Only properties with `CUSTPROP_show: true` will appear on the listing page.
|
- **Visibility Control**: Only properties with `CUSTPROP_show: true` will appear on the listing page.
|
||||||
|
|
||||||
This feature provides a flexible way to enhance device management and display with interactive elements tailored to your needs.
|
This feature provides a flexible way to enhance device management and display with interactive elements tailored to your needs.
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
|
|
||||||
# A high-level description of the database structure
|
# A high-level description of the database structure
|
||||||
|
|
||||||
An overview of the most important database tables as well as an detailed overview of the Devices table. The MAC address is used as a foreign key in most cases.
|
An overview of the most important database tables as well as an detailed overview of the Devices table. The MAC address is used as a foreign key in most cases.
|
||||||
|
|
||||||
## Devices database table
|
## Devices database table
|
||||||
|
|
||||||
@@ -23,6 +23,7 @@
|
|||||||
| `devLogEvents` | Whether events related to the device should be logged. | `0` |
|
| `devLogEvents` | Whether events related to the device should be logged. | `0` |
|
||||||
| `devAlertEvents` | Whether alerts should be generated for events. | `1` |
|
| `devAlertEvents` | Whether alerts should be generated for events. | `1` |
|
||||||
| `devAlertDown` | Whether an alert should be sent when the device goes down. | `0` |
|
| `devAlertDown` | Whether an alert should be sent when the device goes down. | `0` |
|
||||||
|
| `devCanSleep` | Whether the device can enter a sleep window. When `1`, offline periods within the `NTFPRCS_sleep_time` window are shown as **Sleeping** instead of **Down** and no down alert is fired. | `0` |
|
||||||
| `devSkipRepeated` | Whether to skip repeated alerts for this device. | `1` |
|
| `devSkipRepeated` | Whether to skip repeated alerts for this device. | `1` |
|
||||||
| `devLastNotification` | Timestamp of the last notification sent for this device. | `2025-03-22 12:07:26+11:00` |
|
| `devLastNotification` | Timestamp of the last notification sent for this device. | `2025-03-22 12:07:26+11:00` |
|
||||||
| `devPresentLastScan` | Whether the device was present during the last scan. | `1` |
|
| `devPresentLastScan` | Whether the device was present during the last scan. | `1` |
|
||||||
@@ -42,8 +43,14 @@
|
|||||||
| `devParentRelType` | The type of relationship between the current device and it's parent node. By default, selecting `nic` will hide it from lists. | `nic` |
|
| `devParentRelType` | The type of relationship between the current device and it's parent node. By default, selecting `nic` will hide it from lists. | `nic` |
|
||||||
| `devReqNicsOnline` | If all NICs are required to be online to mark teh current device online. | `0` |
|
| `devReqNicsOnline` | If all NICs are required to be online to mark teh current device online. | `0` |
|
||||||
|
|
||||||
|
> [!NOTE]
|
||||||
|
> `DevicesView` extends the `Devices` table with two computed fields that are never persisted:
|
||||||
|
> - `devIsSleeping` (`1` when `devCanSleep=1`, device is offline, and `devLastConnection` is within the `NTFPRCS_sleep_time` window).
|
||||||
|
> - `devFlapping` (`1` when the device has changed state more than the flap threshold times in the trailing window).
|
||||||
|
> - `devStatus` — derived string: `On-line`, `Sleeping`, `Down`, or `Off-line`.
|
||||||
|
|
||||||
To understand how values of these fields influuence application behavior, such as Notifications or Network topology, see also:
|
|
||||||
|
To understand how values of these fields influuence application behavior, such as Notifications or Network topology, see also:
|
||||||
|
|
||||||
- [Device Management](./DEVICE_MANAGEMENT.md)
|
- [Device Management](./DEVICE_MANAGEMENT.md)
|
||||||
- [Network Tree Topology Setup](./NETWORK_TREE.md)
|
- [Network Tree Topology Setup](./NETWORK_TREE.md)
|
||||||
@@ -51,32 +58,32 @@ To understand how values of these fields influuence application behavior, such a
|
|||||||
|
|
||||||
|
|
||||||
## Other Tables overview
|
## Other Tables overview
|
||||||
|
|
||||||
| Table name | Description | Sample data |
|
| Table name | Description | Sample data |
|
||||||
|----------------------|----------------------| ----------------------|
|
|----------------------|----------------------| ----------------------|
|
||||||
| CurrentScan | Result of the current scan | ![Screen1][screen1] |
|
| CurrentScan | Result of the current scan | ![Screen1][screen1] |
|
||||||
| Devices | The main devices database that also contains the Network tree mappings. If `ScanCycle` is set to `0` device is not scanned. | ![Screen2][screen2] |
|
| Devices | The main devices database that also contains the Network tree mappings. If `ScanCycle` is set to `0` device is not scanned. | ![Screen2][screen2] |
|
||||||
| Events | Used to collect connection/disconnection events. | ![Screen4][screen4] |
|
| Events | Used to collect connection/disconnection events. | ![Screen4][screen4] |
|
||||||
| Online_History | Used to display the `Device presence` chart | ![Screen6][screen6] |
|
| Online_History | Used to display the `Device presence` chart | ![Screen6][screen6] |
|
||||||
| Parameters | Used to pass values between the frontend and backend. | ![Screen7][screen7] |
|
| Parameters | Used to pass values between the frontend and backend. | ![Screen7][screen7] |
|
||||||
| Plugins_Events | For capturing events exposed by a plugin via the `last_result.log` file. If unique then saved into the `Plugins_Objects` table. Entries are deleted once processed and stored in the `Plugins_History` and/or `Plugins_Objects` tables. | ![Screen10][screen10] |
|
| Plugins_Events | For capturing events exposed by a plugin via the `last_result.log` file. If unique then saved into the `Plugins_Objects` table. Entries are deleted once processed and stored in the `Plugins_History` and/or `Plugins_Objects` tables. | ![Screen10][screen10] |
|
||||||
| Plugins_History | History of all entries from the `Plugins_Events` table | ![Screen11][screen11] |
|
| Plugins_History | History of all entries from the `Plugins_Events` table | ![Screen11][screen11] |
|
||||||
| Plugins_Language_Strings | Language strings collected from the plugin `config.json` files used for string resolution in the frontend. | ![Screen12][screen12] |
|
| Plugins_Language_Strings | Language strings collected from the plugin `config.json` files used for string resolution in the frontend. | ![Screen12][screen12] |
|
||||||
| Plugins_Objects | Unique objects detected by individual plugins. | ![Screen13][screen13] |
|
| Plugins_Objects | Unique objects detected by individual plugins. | ![Screen13][screen13] |
|
||||||
| Sessions | Used to display sessions in the charts | ![Screen15][screen15] |
|
| Sessions | Used to display sessions in the charts | ![Screen15][screen15] |
|
||||||
| Settings | Database representation of the sum of all settings from `app.conf` and plugins coming from `config.json` files. | ![Screen16][screen16] |
|
| Settings | Database representation of the sum of all settings from `app.conf` and plugins coming from `config.json` files. | ![Screen16][screen16] |
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
[screen1]: ./img/DATABASE/CurrentScan.png
|
[screen1]: ./img/DATABASE/CurrentScan.png
|
||||||
[screen2]: ./img/DATABASE/Devices.png
|
[screen2]: ./img/DATABASE/Devices.png
|
||||||
[screen4]: ./img/DATABASE/Events.png
|
[screen4]: ./img/DATABASE/Events.png
|
||||||
[screen6]: ./img/DATABASE/Online_History.png
|
[screen6]: ./img/DATABASE/Online_History.png
|
||||||
[screen7]: ./img/DATABASE/Parameters.png
|
[screen7]: ./img/DATABASE/Parameters.png
|
||||||
[screen10]: ./img/DATABASE/Plugins_Events.png
|
[screen10]: ./img/DATABASE/Plugins_Events.png
|
||||||
[screen11]: ./img/DATABASE/Plugins_History.png
|
[screen11]: ./img/DATABASE/Plugins_History.png
|
||||||
[screen12]: ./img/DATABASE/Plugins_Language_Strings.png
|
[screen12]: ./img/DATABASE/Plugins_Language_Strings.png
|
||||||
[screen13]: ./img/DATABASE/Plugins_Objects.png
|
[screen13]: ./img/DATABASE/Plugins_Objects.png
|
||||||
[screen15]: ./img/DATABASE/Sessions.png
|
[screen15]: ./img/DATABASE/Sessions.png
|
||||||
[screen16]: ./img/DATABASE/Settings.png
|
[screen16]: ./img/DATABASE/Settings.png
|
||||||
|
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user