Process list
mysql \
--host=dns.of.ip \
--user=root \
--password='zabbix' \
--database=zabbix \
--execute="
SHOW FULL PROCESSLIST;
" > /tmp/mysql.process.list.$(date +%Y%m%d.%H%M).txt
Backup directories which can be related to Zabbix
cd /usr/share/zabbix && mkdir -p ~/backup${PWD} && cp -a * ~/backup${PWD}
cd /etc/zabbix && mkdir -p ~/backup${PWD} && cp -a * ~/backup${PWD}
cd /usr/lib/zabbix && mkdir -p ~/backup${PWD} && cp -a * ~/backup${PWD}
cd /etc/cron.d && mkdir -p ~/backup${PWD} && cp -a * ~/backup${PWD}
cd /usr/local/bin && mkdir -p ~/backup${PWD} && cp -a * ~/backup${PWD}
cd /etc/nginx/conf.d && mkdir -p ~/backup${PWD} && cp -a * ~/backup${PWD}
cd /etc/php-fpm.d && mkdir -p ~/backup${PWD} && cp -a * ~/backup${PWD}
Create a backup user
mysql -e "
CREATE USER 'zbx_backup'@'127.0.0.1' IDENTIFIED BY 'zabbix';
GRANT LOCK TABLES, SELECT ON zabbix.* TO 'zbx_backup'@'127.0.0.1';
GRANT RELOAD ON *.* TO 'zbx_backup'@'127.0.0.1';
FLUSH PRIVILEGES;
"
Install credentials file for read-only backup
cat << 'EOF' > /etc/zabbix/zabbix_backup.cnf
[mysqldump]
host=127.0.0.1
user=zbx_backup
password=zabbix
EOF
30 days configuration backup if MySQL 8
BACKUP_DIR=/backup
DBNAME=zabbix
DATE=$(date +%Y%m%d.%H%M)
mkdir -p "$BACKUP_DIR"
echo "schema"
mysqldump \
--defaults-file=/etc/zabbix/zabbix_backup.cnf \
--set-gtid-purged=OFF \
--flush-logs \
--single-transaction \
--create-options \
--no-data \
$DBNAME | gzip --fast > "$BACKUP_DIR/schema.sql.gz" && \
echo "data" && mysqldump \
--defaults-file=/etc/zabbix/zabbix_backup.cnf \
--flush-logs \
--single-transaction \
--no-tablespaces \
--set-gtid-purged=OFF \
--ignore-table=$DBNAME.history \
--ignore-table=$DBNAME.history_log \
--ignore-table=$DBNAME.history_str \
--ignore-table=$DBNAME.history_text \
--ignore-table=$DBNAME.history_uint \
--ignore-table=$DBNAME.trends \
--ignore-table=$DBNAME.trends_uint \
$DBNAME > "$BACKUP_DIR/data.sql" && \
echo "compressing data" && gzip --fast "$BACKUP_DIR/data.sql" && \
echo "quick restore" && mysqldump \
--defaults-file=/etc/zabbix/zabbix_backup.cnf \
--flush-logs \
--single-transaction \
--ignore-table=$DBNAME.history \
--ignore-table=$DBNAME.history_log \
--ignore-table=$DBNAME.history_str \
--ignore-table=$DBNAME.history_text \
--ignore-table=$DBNAME.history_uint \
--ignore-table=$DBNAME.trends \
--ignore-table=$DBNAME.trends_uint \
$DBNAME > "$BACKUP_DIR/quick.restore.sql" && \
echo "compressing quick restore" && gzip --fast "$BACKUP_DIR/quick.restore.sql"
mv "$BACKUP_DIR/schema.sql.gz" "$BACKUP_DIR/schema.$DATE.sql.gz"
mv "$BACKUP_DIR/data.sql.gz" "$BACKUP_DIR/data.$DATE.sql.gz"
mv "$BACKUP_DIR/quick.restore.sql.gz" "$BACKUP_DIR/quick.restore.$DATE.sql.gz"
find /backup -type f -mtime +30
find /backup -type f -mtime +30 -delete
Mariadb backup
BACKUP_DIR=/backup
KEEP_DAYS=90
DBNAME=zabbix
DATE=$(date +%Y%m%d.%H%M)
mkdir -p "$BACKUP_DIR"
echo "schema"
mysqldump \
--defaults-file=~/zabbix_backup.cnf \
--flush-logs \
--single-transaction \
--create-options \
--no-data \
$DBNAME | gzip --fast > "$BACKUP_DIR/schema.sql.gz" && \
echo "data" && \
mysqldump \
--defaults-file=~/zabbix_backup.cnf \
--flush-logs \
--single-transaction \
--no-tablespaces \
--ignore-table=$DBNAME.history \
--ignore-table=$DBNAME.history_log \
--ignore-table=$DBNAME.history_str \
--ignore-table=$DBNAME.history_text \
--ignore-table=$DBNAME.history_uint \
--ignore-table=$DBNAME.trends \
--ignore-table=$DBNAME.trends_uint \
$DBNAME > "$BACKUP_DIR/data.sql" && \
echo "compressing data" && gzip --fast "$BACKUP_DIR/data.sql" && \
echo "snapshot" && mysqldump \
--defaults-file=~/zabbix_backup.cnf \
--flush-logs \
--single-transaction \
--ignore-table=$DBNAME.history \
--ignore-table=$DBNAME.history_log \
--ignore-table=$DBNAME.history_str \
--ignore-table=$DBNAME.history_text \
--ignore-table=$DBNAME.history_uint \
--ignore-table=$DBNAME.trends \
--ignore-table=$DBNAME.trends_uint \
$DBNAME > "$BACKUP_DIR/snapshot.sql" && \
echo "compressing snapshot" && gzip --fast "$BACKUP_DIR/snapshot.sql" && \
mv "$BACKUP_DIR/schema.sql.gz" "$BACKUP_DIR/schema.$DATE.sql.gz" && \
mv "$BACKUP_DIR/data.sql.gz" "$BACKUP_DIR/data.$DATE.sql.gz" && \
mv "$BACKUP_DIR/snapshot.sql.gz" "$BACKUP_DIR/snapshot.$DATE.sql.gz"
if [ -d "$BACKUP_DIR" ]; then
echo "deleting files older than $KEEP_DAYS days:"
find "$BACKUP_DIR" -maxdepth 1 -type f -name '*.gz' -mtime +$KEEP_DAYS
find "$BACKUP_DIR" -maxdepth 1 -type f -name '*.gz' -mtime +$KEEP_DAYS -delete
fi
Rotate backups for 30 days
find /backup -type f -mtime +30
find /backup -type f -mtime +30 -delete
Insert to a temporary table
zcat trends_uint.sql.gz | sed 's|trends_uint|trends_uint_old|' | mysql zabbix
zcat history_uint.sql.gz | sed 's|history_uint|history_uint_old|' | mysql zabbix
zcat trends.sql.gz | sed 's|trends|trends_old|' | mysql zabbix
zcat history.sql.gz | sed 's|history|history_old|' | mysql zabbix
Backup to a remote system
mysqldump \
--defaults-file=/root/.my.cnf \
--single-transaction \
--no-create-info \
zabbix history_text \
--where="clock BETWEEN 1690257300 AND 1690279380" | \
gzip --stdout | ssh root@192.168.88.15 "cat > /dev/shm/history_text.sql.gz"
Backup and compress zabbix server config with a purpose to restore it on other machine
tar --create --verbose --use-compress-program='gzip -9' /etc/zabbix/zabbix_server.conf | base64 -w0 | sed 's|^|cd / \&\& echo "|' | sed 's%$%" | base64 --decode | gunzip | tar -xv%' && echo
Backup and compress Zabbix agent 2 config with a purpose to restore it on other machine
tar --create --verbose --use-compress-program='gzip -9' /etc/zabbix/zabbix_agent2.conf | base64 -w0 | sed 's|^|cd / \&\& echo "|' | sed 's%$%" | base64 --decode | gunzip | tar -xv%' && echo
Backup and compress partitioning script and configuration files
tar --create --verbose --use-compress-program='gzip -9' /etc/zabbix/zabbix_partitioning.conf /usr/local/bin/zabbix_partitioning.py /etc/cron.d/zabbix_partitioning | base64 -w0 | sed 's|^|cd / \&\& echo "|' | sed 's%$%" | base64 --decode | gunzip | tar -xv%' && echo
Compress frontend config and logs and split into 15 MB parts
tar --create --verbose --use-compress-program='xz' /etc/zabbix /etc/nginx /etc/httpd /etc/apache2 /etc/php-fpm.d /var/log/nginx /var/log/httpd /var/log/apache2 /var/log/php-fpm  | split -b 15MiB - /tmp/frontend.config.and.logs.tar.xz_
Backup all frontend modules with 'xz' compression
tar --create --verbose --use-compress-program='xz -9' /usr/share/zabbix/modules | base64 -w0 | sed 's|^|cd / \&\& echo "|' | sed 's%$%" | base64 --decode | unxz | tar -xv%' && echo

Download this section: https://aigarskadikis.github.io/src/backup.sh
Fancy syntax highlighter? Read same page on GitHub: https://github.com/aigarskadikis/aigarskadikis.github.io/blob/main/src/backup.sh

Hog processes, hog
ps -eo pcpu,pmem,pid,ppid,user,stat,args | sort -k 1 -r | head -6 | sed 's/$/\n/'
Rotate between values
echo "
one
two
" | grep -v "^$" | while IFS= read -r LINE
do {
echo $LINE
} done
Debug snmp
snmpwalk -v'2c' -c'public' -Dsnmp IP OID
Debug snmp transport
snmpwalk -v'2c' -c'public' -Dtransport IP OID
Debug in hex format
snmpwalk -v'2c' -c'public' -dd IP OID
Poller busy
watch -n1 'ps auxww | grep -Eo "[:] poller #.*"'
Received traffic which originated only from remote host
tcpdump -e -i any -c 1000 -nn -tt -q -l 'ip and inbound and tcp[tcpflags] & tcp-syn != 0 and tcp[tcpflags] & tcp-ack == 0'
Convert to readable date
tail -99f /var/log/zabbix/zabbix_proxy.log | sed 's/\([0-9]\+\):\(....\)\(..\)\(..\):\(..\)\(..\)\(..\)/\2-\3-\4 \5:\6:\7/'
Convert to readable date including PID
tail -99f /var/log/zabbix/zabbix_proxy.log | sed 's/\([0-9]\+\):\(....\)\(..\)\(..\):\(..\)\(..\)\(......\)/\2-\3-\4 \5:\6:\7 PID:\1/'
Look for 2nd match between "start pattern" and "end pattern"
cat /var/log/zabbix/zabbix_proxy.log |\
awk '
/start pattern/ {
found++
}
found == 2
/end pattern/ && found == 2 {
exit
}'
Prometheus pattern capture
cat /var/log/zabbix/zabbix_proxy.log |\
awk '
/Prometheus raw data start/ {
found++
}
found == 2
/Prometheus raw data end/ && found == 2 {
exit
}' | grep –v 'Prometheus raw data' > /tmp/prom.input.txt
Backup whole etc
DATE=`date '+%Y.%m.%d.%H.%M'` && cd /etc && mkdir -p ~/backup${DATE}${PWD} && cp -a * ~/backup${DATE}${PWD}
Extract creation of tables. Remove break line, show printable characters add _new at the end
cat schema.sql | \
tr -d '\n' | \
sed 's|;|;\n|g' | \
grep -E "^CREATE TABLE (history|trends).*" | \
sed -E '
s|\s+| |g;
s|CREATE TABLE (history[^ (,]*)|CREATE TABLE \1_new|;
s|CREATE TABLE (trends[^ (,]*)|CREATE TABLE \1_new|' > /tmp/create_new.sql
Create fork of timescaledb
cat schema.sql | \
sed 's|^.UPDATE config.*||' | \
sed "s|', 'clock', chunk_time_i
nterval|_new', 'clock', chunk_time_interval|"
Remaster timescaledb create script to work with _new tables
cat schema.sql | \
sed 's|^.UPDATE config.*||' | \
sed "s|', 'clock', chunk_time_interval|_new', 'clock', chunk_time_interval|" > /tmp/enable.for.new.sql
Prepare temporary tables
DB=sample6023
OLD=_old
NEW=_new
echo "
history
history_uint
history_str
history_log
history_text
trends
trends_uint
" | \
grep -v "^$" | \
while IFS= read -r TABLE
do {
psql $DB --command="
ALTER TABLE $TABLE RENAME TO $TABLE$OLD;
ALTER TABLE $TABLE$NEW RENAME TO $TABLE;
ALTER TABLE $TABLE OWNER TO zabbix;
CREATE TABLE $TABLE$NEW (LIKE $TABLE INCLUDING ALL);
ALTER TABLE $TABLE$NEW OWNER TO zabbix;
"
} done
Convert the biggest data into hyper tables. This process will take multiple hours/days
DB=sample6023
OLD=_old
NEW=_new
echo "
trends
trends_uint
history
history_uint
history_str
history_log
history_text
" | \
grep -v "^$" | \
while IFS= read -r TABLE
do {
echo "converting $TABLE$OLD into hyper tables started at $(date)"
psql $DB --command="
INSERT INTO $TABLE$NEW SELECT * FROM $TABLE$OLD ON CONFLICT DO NOTHING;
"
} done
Merga data together
DB=sample6023
OLD=_old
NEW=_new
TMP=_tmp
echo "
trends
trends_uint
history
history_uint
history_str
history_log
history_text
" | \
grep -v "^$" | \
while IFS= read -r TABLE
do {
echo "merging all data together for $TABLE started at $(date)"
psql $DB --command="
ALTER TABLE $TABLE RENAME TO $TABLE$TMP;
ALTER TABLE $TABLE$NEW RENAME TO $TABLE;
INSERT INTO $TABLE SELECT * FROM $TABLE$TMP ON CONFLICT DO NOTHING;
"
} done
Drop unnecary tables. Print sensitive statements
OLD=_old
TMP=_tmp
echo "
trends
trends_uint
history
history_uint
history_str
history_log
history_text
" | \
grep -v "^$" | \
while IFS= read -r TABLE
do {
echo "DROP TABLE $TABLE$TMP;"
echo "DROP TABLE $TABLE$OLD;"
} done
Oracle connection test
cd /opt/oracle/instantclient_23_7 && ./sqlplus "system/oracle@(DESCRIPTION=(ADDRESS=(PROTOCOL=TCP)(Host=10.10.6.26)(Port=49161))(CONNECT_DATA=(SID=xe)))"
Postgresql connection test
PGHOST=10.10.6.23 PGPORT=5432 PGUSER=zbx_monitor PGPASSWORD='passwordGoesHere' psql
MSSQL connection test. '-C' means Trust Server Certificate
sqlcmd --server '10.10.4.21' --user-name 'zbx_monitor' --password 'passwordGoesHere' -C
Memory situation in megabytes
ps -eo rss,pid,user,command --sort -size | awk '{ hr=$1/1024 ; printf("%13.2f Mb ",hr) } { for ( x=4 ; x<=NF ; x++ ) { printf("%s ",$x) } print "" }' | cut -d "" -f2 | cut -d "-" -f1
Interface unavailable
grep :20250124:01 /var/log/zabbix/zabbix_server.log | grep -Eo "\S+ interface unavailable" | sort | uniq -c
grep :20250124:03 /var/log/zabbix/zabbix_server.log | grep -Eo "\S+ interface unavailable" | sort | uniq -c
Target second appierence of block
awk '/Prometheus raw data start/{found++} found==2; /Prometheus raw data end/ && found==2{exit}' /var/log/zabbix/zabbix_proxy.log
Merge back
cat frontend.config.and.logs.tar.xz_* > together.tar.xz
Yt-dlp best audio and video
yt-dlp -f "bv[ext!=webm]+ba/b[ext!=webm]" --download-archive archive.log
Delete line with sed
sed -i '/^Hostname=Zabbix proxy$/d' /etc/zabbix/zabbix_proxy.conf
sed -i '/^Hostname=Zabbix server$/d' /etc/zabbix/zabbix_agentd.conf
sed -i '/^Hostname=Zabbix server$/d' /etc/zabbix/zabbix_agent2.conf
Use rdesktop with 150 dpi scalling
rdesktop -u 'Administrator' -p 'Passw0rd' -g 1920x1080@150 -a 32 -x 1 -b ip.address.of.client -x 0x80
List files inside package rpm
rpm -ql package_name
Convert short IPv6 format to long
python3 -c "import ipaddress; print(ipaddress.IPv6Address('2001:db8::1').exploded)"
Test port
nc -zv ip 10050
Crop video from 1920x1200 to 1920x1080p
ffmpeg -i input.mkv -vf "crop=1920:1080:0:60" -c:v libx264 -crf 18 -preset veryfast -c:a copy output.mkv
Screen with logging enabled
screen -L -Logfile /tmp/screen.log
Avoid asking for service restarts on Ubuntu 22/24
echo "\$nrconf{restart} = 'a';" | sudo tee /etc/needrestart/conf.d/no-prompt.conf
Check what exactly perform write
iotop --kilobytes --delay=3 --iter=5
History syncer running
watch -n1 "ps auxww | grep -Eo '[:] history syncer.*'"
See the origin of data collection
strace -s 2048 -f -v -o /tmp/sar.log sar 1 1
cat /tmp/sar.log | grep '/proc/stat'
Volume configuration
lvm lvs -o+lv_layout,stripes > /tmp/volume.conf.txt
CPU information
lscpu > /tmp/lscpu.txt
2 snapshots of busyest CPU processes
top -b -o +%CPU -n 2 2>&1 1> /tmp/top.cpu.txt
Install iperf3, iostat fio
dnf install sysstat fio iperf3
Test disk speed
cd /var/lib/mysql
fio --name TEST --eta-newline=5s --filename=test.img --rw=randwrite --size=500m --io_size=10g --blocksize=4k --ioengine=libaio --fsync=1 --iodepth=32 --direct=1 --numjobs=1 --runtime=60 --group_reporting
QD1 4k Random Write - IO performance/IOPS: >= 3000 IOPS
fio --name TEST --eta-newline=5s --filename=test.img --rw=randwrite --size=500m --io_size=10g --blocksize=4k --ioengine=libaio --fsync=1 --iodepth=1 --direct=1 --numjobs=1 --runtime=60 --group_reporting
QD32 4k Random Write - "multithread" IO performance/IOPS: >= 10000 IOPS
fio --name TEST --eta-newline=5s --filename=test.img --rw=randwrite --size=500m --io_size=10g --blocksize=4k --ioengine=libaio --fsync=1 --iodepth=32 --direct=1 --numjobs=1 --runtime=60 --group_reporting
QD32 2048kb - bandwidth test/Throughput bound: > 250-300 MByte/s
fio --name TEST --eta-newline=5s --filename=test.img --rw=randwrite --size=500m --io_size=10g --blocksize=2048kb --ioengine=libaio --fsync=1 --iodepth=32 --direct=1 --numjobs=1 --runtime=60 --group_reporting
Process list for 120 (24x5) seconds
for i in `seq 1 24`; do echo $(date) >> /tmp/process.list.txt && ps auxww >> /tmp/process.list.txt && echo "=======" >> /tmp/process.list.txt && sleep 5; done
Size of table partitions
du -lah /var/lib/mysql | awk '{ print $2,"",$1 }' | grep "#" | sort
TCP state statistics for 2 minutes
for i in `seq 1 120`; do echo $(date) | tee -a /tmp/tcp.state.txt && awk '{print $4}' /proc/net/tcp /proc/net/tcp6 | grep -v st | sort | uniq -c | tee -a /tmp/tcp.state.txt && wc -l /proc/net/tcp | tee -a /tmp/tcp.state.txt && wc -l /proc/net/tcp6 | tee -a /tmp/tcp.state.txt && echo "=======" | tee -a /tmp/tcp.state.txt && sleep 1; done
Discovere processes
watch -n1 "ps auxww | grep -Eo '[:] discoverer #.*'"
Set iperf3 on listening port
iperf3 -s -p 10050
Send data
iperf3 -c address.of.agent.server -p 10050 -t 10
Disk utilisation
iostat -x 1
Remove leading and trailing space with sed
sed 's/^[\t ]*//g;s/[\t ]*$//g'
Endless loop to deliver metric
while true; do zabbix_sender -z 127.0.0.1 -s $(hostname) -k agent.ping -o 1; sleep 30; done
Statistics per history write cache
while true; do echo "$(date '+%Y-%m-%d %H:%M:%S') $(zabbix_server -R diaginfo=historycache | grep -A1 "Top.values" | grep "[i]temid")" >> /tmp/top.itemid.log; sleep 1; done
Test disk throughput
cd /var/lib/mysql
dd if=/dev/urandom of=512M bs=1M count=512 oflag=direct
rm 512M
dd if=/dev/urandom of=5GB bs=1M count=5120 oflag=direct
rm 5GB
dd if=/dev/urandom of=50GB bs=1M count=51200 oflag=direct
rm 50GB
Simulate javascript code without placing file in filesystem
zabbix_js --script <(echo 'return 1;') --param '' --loglevel 4 --timeout 60
Feed the output of file into javascript program
zabbix_js --script <(echo 'return value;') --loglevel 3 --timeout 60 --input <(grep -v "^$\|#" /etc/zabbix/zabbix_agentd.conf | sort)
Erase dublicate data in table 'history_text'. This does NOT work like discard unchanged
mysql \
--database='zabbix' \
--silent \
--skip-column-names \
--batch \
--execute="
SELECT items.itemid
FROM items, hosts
WHERE hosts.hostid=items.hostid
AND hosts.status IN (0,1)
AND items.value_type=4
AND items.flags IN (0,4)
" | \
while IFS= read -r ITEMID
do {
echo $ITEMID
sleep 0.01
echo "
DELETE t1 FROM history_text t1
INNER JOIN history_text t2
WHERE t1.clock < t2.clock
AND t1.value=t2.value
AND t1.itemid=t2.itemid
AND t1.itemid=$ITEMID
" | mysql zabbix
} done
Print metrics on screen
while true; do
echo -n 'total: ' && zabbix_get -s 127.0.0.1 -k system.cpu.util[];
echo -n 'user: ' && zabbix_get -s 127.0.0.1 -k system.cpu.util[,user];
echo -n 'system: '&& zabbix_get -s 127.0.0.1 -k system.cpu.util[,system];
echo -n 'guest: ' && zabbix_get -s 127.0.0.1 -k system.cpu.util[,guest];
echo -n 'guest_nice: ' && zabbix_get -s 127.0.0.1 -k system.cpu.util[,guest_nice];
echo '------------'
sleep 1; done;
Compress /var/lib/mysql by using gzip
tar --create --verbose --use-compress-program='gzip --fast' --file=/tmp/var.lib.mysql.tar.gz /var/lib/mysql
Suggestions based on OS
cat /etc/*release* > /tmp/$(hostname).os.txt
df -m > /tmp/$(hostname).disk.txt
free -m > /tmp/$(hostname).memory.usage.txt
cat /proc/meminfo > /tmp/$(hostname).meminfo.txt
cat /proc/cpuinfo > /tmp/$(hostname).cpuinfo.txt
mysql -e "show variables" > /tmp/$(hostname).mysql.variables.txt
sysctl -a > /tmp/$(hostname).kernel.settings.txt
ps -xafuww > /tmp/$(hostname).process.list.tree.txt
Stats about MySQL data directory
du -lah /var/lib/mysql | awk '{ print $2,"",$1 }' | sort > /tmp/mysql.files.human.readable.txt
du -lab /var/lib/mysql | awk '{ print $2,"",$1 }' | sort > /tmp/mysql.files.size.in.bytes.txt
du -lab /var/lib/mysql | sort -n > /tmp/biggest.mysql.files.txt
Follow unreachable poller with 2 digits. Print IP address
tail -999f /var/log/zabbix/zabbix_proxy.log | grep $(ps auxww|grep "[u]nreachable poller #99" | awk '{ print $2 }'): | grep -E "\[[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+\]"
Screenshot
google-chrome --no-sandbox --headless --print-to-pdf=/tmp/zabbix-https.pdf yourfrontendlink
chrome --no-sandbox --headless --print-to-pdf=/tmp/zabbix-https.pdf yourfrontendlink
Restore
rm /var/lib/mysql/* -rf
tar xvf /tmp/var.lib.mysql.tar.gz --directory=/var/lib/mysql
Backup /etc
tar --create --verbose --use-compress-program='gzip --best' --file=/tmp/etc.tar.gz /etc
Backup /etc with maximum compression
tar --create --verbose --use-compress-program='xz' --file=/tmp/etc.tar.xz /etc
Show printable charactars
cat file.html | tr -d '\n' | tr -cd '[:print:]'
Show printable characters on windows
cat file.html | tr -cd '[:print:]'
"sid" in the web server log is half (the end of) of the "sessionid" in the database. Zabbix 6.0, NGINX
tail -9999f /var/log/nginx/access.log | grep -Eo "sid.*dashboardid=\S+"
Sum together 5th column
ls -lb /var/lib/mysql/zabbix | grep '#p2024_10_08' | awk '{sum += $5} END {print sum}'
Show how much data generate in a specific date
ls -lb /var/lib/mysql/zabbix | grep '#p2024_10_08' | awk '{sum += $5} END {print sum / (1024^3) " GB"}'
Backup etc to home directory in deadable format
cd /etc && mkdir -p ~/backup${PWD} && cp -a * ~/backup${PWD}
List biggest partitions
ls --sort=size -lh | grep '#' | tac
Send all UDP 162 traffic into a human readable log
tcpdump -l -i any udp dst port 162 -x -vv 1>&2 >/tmp/udp162.log
Transport data. On destination server go to directory where files must be places. set to listen on port 1234
nc -l 1234 | tar -xvf -
Transport data. On source server. start sending. replace x.y.z.w with real IP address
tar cf - . | nc x.y.z.w 1234
Compress /var/lib/mysql by using lz4
dnf install lz4
tar --create --verbose --use-compress-program='lz4' --file=/tmp/var.lib.mysql.tar.lz4 /var/lib/mysql
Restore
rm /var/lib/mysql/* -rf
cd /
unlz4 /tmp/var.lib.mysql.tar.lz4 | tar xvf -
Track when trends are completed
watch -n.2 'mysql -e "show full processlist;" | grep insert'
Take poller #11, increase log level to 5 and stream live situation of what kind of items are fetched and how fast. Zabbix 6.0, 6.2, 6.4
zabbix_proxy -R log_level_increase="poller",11
zabbix_proxy -R log_level_increase="poller",11
tail -999f /var/log/zabbix/zabbix_proxy.log | grep "$(ps auxww | grep ":[ ]poller #11 " | awk '{ print $2 }')" | grep -E 'interfaceid:\S+ itemid:\S+ type:\S+|zbx_setproctitle.*idle'
Take poller #11, increase log level to 5 and stream live situation of what kind of items are fetched and how fast. Zabbix 5.0
zabbix_proxy -R log_level_increase="poller",1
zabbix_proxy -R log_level_increase="poller",1
tail -999f /var/log/zabbix/zabbix_proxy.log | grep "$(ps auxww | grep ":[ ]poller #1 " | awk '{ print $2 }')" | grep -E 'hostid:\S+ itemid:\S+ type:\S+|zbx_setproctitle.*idle'
tail -99f /var/log/zabbix/zabbix_proxy.log | grep "$(ps auxww | grep ":[ ]poller #1 " | awk '{ print $2 }')" | grep -Eo 'hostid:\S+ itemid:\S+ type:\S+|got.*sec.*idle'
Active checks failing
grep -Eo "cannot send list of active checks.*" /var/log/zabbix/zabbix_proxy.log | sort | uniq -c
Outgoing ports, persistent connection
ss --tcp --numeric --processes | grep zabbix_server
Process list with parents
ps -xafuww > /tmp/process.list.$(hostname).txt
Top memory processes
ps -eo time,start_time,pcpu,pmem,user,args --sort pmem > /tmp/top.mem.txt
Hungry CPU processes
ps -eo time,start_time,pcpu,pmem,user,args --sort time > /tmp/top.cpu.txt
All socket statistics
ss --all --numeric --processes > /tmp/$(hostname).socket.statistics.txt
Only listening ports and explanation
ss --tcp --numeric --listen --processes > /tmp/$(hostname).listening.ports.txt
All installed package names
rpm -qa > /tmp/$(hostname).installed.rpms.txt
Generate random password by using bash tools
< /dev/urandom tr -dc A-Za-z0-9 | head -c${1:-20};echo;
< /dev/urandom tr -dc A-Za-z0-9 | head -c${1:-24};echo;
< /dev/urandom tr -dc A-Za-z0-9 | head -c${1:-32};echo;
< /dev/urandom tr -dc 'A-Za-z0-9~!@#$%^&*()-+<>.,/\"' | head -c${1:-20};echo;
Php official memory setting
find /etc -name zabbix.conf -exec grep --with-filename memory {} \;
A sum per column
ls -lb | grep "history_str#" | awk '{ print $5 }' | python -c "import sys; print(sum(int(l) for l in sys.stdin))"
Test item key
zabbix_agent2 -t 'web.certificate.get[www.linkedin.com,443,]'
su zabbix --shell /bin/bash --command "zabbix_agent2 -t 'web.certificate.get[www.linkedin.com,443,]'"
Backup
psql z50 -c "COPY (SELECT * FROM trends) TO stdout DELIMITER ',' CSV" | lz4 > /tmp/z50.trends.csv.lz4
psql z50 -c "COPY (SELECT * FROM trends_uint) TO stdout DELIMITER ',' CSV" | lz4 > /tmp/z50.trends_uint.csv.lz4
Restore
psql z50 -c "\COPY trends FROM PROGRAM 'lz4cat /tmp/z50.trends.csv.lz4' DELIMITER ',' CSV"
psql z50 -c "\COPY trends_uint FROM PROGRAM 'lz4cat /tmp/z50.trends_uint.csv.lz4' DELIMITER ',' CSV"
Erase dublicate data in table 'history_str'. This does NOT work like discard unchanged
mysql \
--database='zabbix' \
--silent \
--skip-column-names \
--batch \
--execute="
SELECT items.itemid
FROM items, hosts
WHERE hosts.hostid=items.hostid
AND hosts.status IN (0,1)
AND items.value_type=1
AND items.flags IN (0,4)
" | \
while IFS= read -r ITEMID
do {
echo $ITEMID
sleep 0.01
echo "
DELETE t1 FROM history_str t1
INNER JOIN history_str t2
WHERE t1.clock < t2.clock
AND t1.value=t2.value
AND t1.itemid=t2.itemid
AND t1.itemid=$ITEMID
" | mysql zabbix
} done
Discard unchanged 'history_text' for all item IDs
mysql \
--database='zabbix' \
--silent \
--skip-column-names \
--batch \
--execute="
SELECT items.itemid
FROM items, hosts
WHERE hosts.hostid=items.hostid
AND hosts.status IN (0,1)
AND items.value_type=4
AND items.flags IN (0,4)
" | \
while IFS= read -r ITEMID
do {
echo $ITEMID
sleep 0.01
echo "
DELETE FROM history_text WHERE itemid=$ITEMID AND clock IN (
SELECT clock from (
SELECT clock, value, r, v2 FROM (
SELECT clock, value, LEAD(value,1) OVER (order by clock) AS v2,
CASE
WHEN value <> LEAD(value,1) OVER (order by clock)
THEN value
ELSE 'zero'
END AS r
FROM history_text WHERE itemid=$ITEMID
) x2
where r = 'zero'
) x3
WHERE v2 IS NOT NULL
)
" | mysql zabbix
} done
Discard unchanged 'history_str' for all item IDs
mysql \
--database='zabbix' \
--silent \
--skip-column-names \
--batch \
--execute="
SELECT items.itemid
FROM items, hosts
WHERE hosts.hostid=items.hostid
AND hosts.status IN (0,1)
AND items.value_type=1
AND items.flags IN (0,4)
" | \
while IFS= read -r ITEMID
do {
echo $ITEMID
sleep 0.01
echo "
DELETE FROM history_str WHERE itemid=$ITEMID AND clock IN (
SELECT clock from (
SELECT clock, value, r, v2 FROM (
SELECT clock, value, LEAD(value,1) OVER (order by clock) AS v2,
CASE
WHEN value <> LEAD(value,1) OVER (order by clock)
THEN value
ELSE 'zero'
END AS r
FROM history_str WHERE itemid=$ITEMID
) x2
where r = 'zero'
) x3
WHERE v2 IS NOT NULL
)
" | mysql zabbix
} done
Before starting service 'zabbix-proxy' truncate all data tables
DB=zabbix_proxy
CREDENTIALS=/root/.my.cnf
mysql \
--defaults-file=$CREDENTIALS \
--database=$DB \
--silent \
--skip-column-names \
--batch \
--execute="SELECT COUNT(*) FROM hosts WHERE status=3;" | \
grep -E "^0$" && mysql \
--defaults-file=$CREDENTIALS \
--database=$DB \
--silent \
--skip-column-names \
--batch \
--execute="SELECT table_name FROM ids" | while IFS= read -r TABLE
do {
echo mysql --defaults-file=$CREDENTIALS --database=$DB --execute="TRUNCATE TABLE $TABLE"
} done || echo "this seems like a central zabbix server, because there are template objects in database"
History_text discard unchanged (value_type=4)
echo "polishing history_text (value_type=4)"
PGHOST=pg16 PGPORT=7416 psql z70 --tuples-only --no-align --command="
SELECT items.itemid
FROM items, hosts
WHERE hosts.hostid=items.hostid
AND hosts.status IN (0,1)
AND items.value_type=4
AND items.flags IN (0,4)
" | \
while IFS= read -r ITEMID
do {
echo -n "$ITEMID "
echo "
DELETE FROM history_text WHERE itemid=$ITEMID AND clock IN (
SELECT clock from (
SELECT clock, value, r, v2 FROM (
SELECT clock, value, LEAD(value,1) OVER (order by clock) AS v2,
CASE
WHEN value <> LEAD(value,1) OVER (order by clock)
THEN value
ELSE 'zero'
END AS r
FROM history_text WHERE itemid=$ITEMID
) x2
where r = 'zero'
) x3
WHERE v2 IS NOT NULL
)" | PGHOST=pg16 PGPORT=7416 psql z70
} done
History_str discard unchanged
echo "polishing history_str (value_type=1)"
PGHOST=pg16 PGPORT=7416 psql z70 --tuples-only --no-align --command="
SELECT items.itemid
FROM items, hosts
WHERE hosts.hostid=items.hostid
AND hosts.status IN (0,1)
AND items.value_type=1
AND items.flags IN (0,4)
" | \
while IFS= read -r ITEMID
do {
echo -n "$ITEMID "
echo "
DELETE FROM history_str WHERE itemid=$ITEMID AND clock IN (
SELECT clock from (
SELECT clock, value, r, v2 FROM (
SELECT clock, value, LEAD(value,1) OVER (order by clock) AS v2,
CASE
WHEN value <> LEAD(value,1) OVER (order by clock)
THEN value
ELSE 'zero'
END AS r
FROM history_str WHERE itemid=$ITEMID
) x2
where r = 'zero'
) x3
WHERE v2 IS NOT NULL
)" | PGHOST=pg16 PGPORT=7416 psql z70
} done
Maintain timescaledb partitions
PGHOST=pg16 PGPORT=7416 psql z70 -c "
SELECT drop_chunks(relation=>'history_log', older_than=>extract(epoch from now()::DATE - 5)::integer);
SELECT drop_chunks(relation=>'history_uint', older_than=>extract(epoch from now()::DATE - 9)::integer);
SELECT drop_chunks(relation=>'history', older_than=>extract(epoch from now()::DATE - 9)::integer);
SELECT drop_chunks(relation=>'history_bin', older_than=>extract(epoch from now()::DATE - 3)::integer);
"

Download this section: https://aigarskadikis.github.io/src/bash.sh
Fancy syntax highlighter? Read same page on GitHub: https://github.com/aigarskadikis/aigarskadikis.github.io/blob/main/src/bash.sh

Zabbix 6.0 LTS repository
rpm -Uvh https://repo.zabbix.com/zabbix/6.0/rhel/8/x86_64/zabbix-release-6.0-2.el8.noarch.rpm
Familiar utilities
dnf -y install strace vim tmux jq zabbix-get zabbix-sender zabbix-js
SNMP trap dependencies. Snmptrap (net-snmp-utils), snmpwalk (net-snmp-utils), snmptrapd (net-snmp), dependencies for zabbix_trap_receiver.pl (net-snmp-perl)
dnf -y install net-snmp-utils net-snmp-perl net-snmp
Python 3 with YAML and MySQL connector support
dnf -y install python3 python3-PyMySQL python3-PyYAML
Install MySQL server on Oracle Linux 8, RHEL 8, Rocky Linux 8
dnf -y install mysql-server
Work with Zabbix source
dnf -y install git automake gcc

Download this section: https://aigarskadikis.github.io/src/el8.sh
Fancy syntax highlighter? Read same page on GitHub: https://github.com/aigarskadikis/aigarskadikis.github.io/blob/main/src/el8.sh

Convert test string to LLD macro
="{#"&UPPER(A2)&"}"
Substitute
=SUBSTITUTE(A2, "entPhysical", "")
Extract "ENTITY-MIB" from "ENTITY-MIB::entPhysicalDescr"
=LEFT(A2,FIND("::",A2)-1)
Extract "entPhysicalDescr" from "ENTITY-MIB::entPhysicalDescr"
=RIGHT(A2,LEN(A2)-FIND("::",A2)-1)

Download this section: https://aigarskadikis.github.io/src/excel.sh
Fancy syntax highlighter? Read same page on GitHub: https://github.com/aigarskadikis/aigarskadikis.github.io/blob/main/src/excel.sh

MySQL monitoring through Zabbix agent 2
zabbix_get -s 127.0.0.1 -k 'mysql.get_status_variables["tcp://127.0.0.1:3306","zbx_monitor","zabbix"]'
PostgreSQL monitoring through Zabbix agent 2
zabbix_get -s 127.0.0.1 -k 'pgsql.dbstat["tcp://127.0.0.1:5432","zbx_monitor","zabbix"]'
Oracle 19c monitoring through Zabbix agent 2
zabbix_get -s 127.0.0.1 -k 'oracle.instance.info["tcp://127.0.0.1:1521","zabbix_mon","zabbix","ORCLPDB1"]'

Download this section: https://aigarskadikis.github.io/src/get.sh
Fancy syntax highlighter? Read same page on GitHub: https://github.com/aigarskadikis/aigarskadikis.github.io/blob/main/src/get.sh

Check if master processes is runnign from server, proxy or agent
ps auxww | grep "[z]abbix.*conf"
How long takes one configuration synchronization cycle. Command suitable for "zabbix_server" and "zabbix_proxy"
ps auxww | grep "[c]onfiguration.*synced"
Zabbix server configuration file
grep -v "^$\|#" /etc/zabbix/zabbix_server.conf | sort
Observe credentials how zabbix-server-mysql connects to database
grep ^DB /etc/zabbix/zabbix_server.conf
grep "DB\|Include" /etc/zabbix/zabbix_server.conf | grep -v '#'
"trapper" processes of zabbix_server or zabbix_proxy
ps auxww | grep '[:] trapper'
"history syncer" of zabbix_server or zabbix_proxy
ps auxww | grep '[:] history syncer'
All restarts. Gracefull restarts
grep "Starting Zabbix Server\|Zabbix Server stopped" /var/log/zabbix/zabbix_server.log
zcat /var/log/zabbix/zabbix_server*gz | grep "Starting Zabbix Server\|Zabbix Server stopped"
Check slow queries
grep "slow query" /var/log/zabbix/zabbix_server.log
Differences between backend nodes
grep -r '=' /etc/zabbix/zabbix_server.d/*
How much time take for housekeeper process to complete
grep housekeeper /var/log/zabbix/zabbix_server.log
Most busy item for preprocessing worker
grep zabbix_server.log | grep -Eo "In preprocessor_enqueue_dependent.*" | sort | uniq -c | sort

Download this section: https://aigarskadikis.github.io/src/grep.sh
Fancy syntax highlighter? Read same page on GitHub: https://github.com/aigarskadikis/aigarskadikis.github.io/blob/main/src/grep.sh

Check that the JMX agent requires SSL for RMI registry
curl -vvv -k https://127.0.0.1:9990
openssl s_client -showcerts -connect 127.0.0.1:9990

Download this section: https://aigarskadikis.github.io/src/jmx.sh
Fancy syntax highlighter? Read same page on GitHub: https://github.com/aigarskadikis/aigarskadikis.github.io/blob/main/src/jmx.sh

List of MySQL tables/partitions by size
du -ab /var/lib/mysql > /tmp/size.of.tables.txt
du -ah /var/lib/mysql > /tmp/size.of.tables.human.readable.txt
Backup data directory. Fastest way time-wise
systemctl stop mysqld
tar --create --verbose --use-compress-program='gzip --fast' --file=/tmp/var.lib.mysql.tar.gz /var/lib/mysql
Check "Max open files" for MySQL daemon. This is important if a lot of DB table partition are used
cat /proc/$(ps auxww | grep "[m]ysqld" | awk '{print $2}')/limits | grep "Max open files"
Authorize in MySQL
mysql --host=127.0.0.1 --database=zabbixDB --user=zbx_srv --password='zabbix' --port=3306
Track how DB table partitions grow
cd /var/lib/mysql/zabbix && watch -n1 'ls -Rltr | tail -10'
List of open files per MySQL server. If tables has a lot of partitions it will be a lot of files
lsof -p $(pidof mysqld) > /tmp/mysqld.list.open.files.txt
How much data is generated in 24h
du -lah /var/lib/mysql/zabbix | grep "$(date --date='2 days ago' '+p%Y_%m_%d')"
du -lah /var/lib/mysql/zabbix | grep "$(date --date='2 days ago' '+p%Y%m%d0000')"
Extract legacy SNMP agent items in use
mysql -sN --batch --host=127.0.0.1 --user=root --password='zabbix' --database=zabbix --execute="
SELECT DISTINCT templates.name, items.flags, items.name, items.snmp_oid FROM hosts templates, hosts_templates, items
WHERE templates.hostid=items.hostid AND templates.hostid=hosts_templates.templateid AND hosts_templates.hostid IN (
SELECT hostid FROM hosts WHERE status=0
)
AND items.type=20
AND templates.status=3
ORDER BY 1,2,3;
" > /tmp/all.snmp.templates.in.use.with.legacy.snmp.agent.item.tsv
Track progress of "OPTIMIZE TABLE" in MySQL
watch -n1 "ls -Rltr /var/lib/mysql/zabbix | grep '#sql'"
Schema backup for MySQL 8. Useful for scripts
mysqldump \
--defaults-file=/root/.my.cnf \
--flush-logs \
--single-transaction \
--set-gtid-purged=OFF \
--create-options \
--no-data \
zabbix | gzip --fast > schema.sql.gz
Create backup with a purpose to set up master to master replication
mysqldump --source-data --all-databases \
--set-gtid-purged=OFF \
--ignore-table=zabbix.history \
--ignore-table=zabbix.history_uint \
--ignore-table=zabbix.history_str \
--ignore-table=zabbix.history_text \
--ignore-table=zabbix.history_log \
--ignore-table=zabbix.trends \
--ignore-table=zabbix.trends_uint | gzip --fast > /tmp/backup.sql.gz
chmod 777 /tmp/backup.sql.gz
Schema dump for historical tables only. Backup schema for 7 historical tables. usefull if need to repair replication as fast as possible
mysqldump --set-gtid-purged=OFF --no-data zabbix history history_uint history_str history_log history_text trends trends_uint > empty.data.tables.with.partitions.sql
Data backup with gz compression
mysqldump \
--defaults-file=/root/.my.cnf \
--set-gtid-purged=OFF \
--flush-logs \
--single-transaction \
--no-create-info \
--ignore-table=zabbix.history \
--ignore-table=zabbix.history_log \
--ignore-table=zabbix.history_str \
--ignore-table=zabbix.history_text \
--ignore-table=zabbix.history_uint \
--ignore-table=zabbix.trends \
--ignore-table=zabbix.trends_uint \
zabbix > data.sql && \
gzip data.sql
Data backup with xz compression
mysqldump \
--defaults-file=/root/.my.cnf \
--set-gtid-purged=OFF \
--flush-logs \
--single-transaction \
--no-create-info \
--ignore-table=zabbix.history \
--ignore-table=zabbix.history_log \
--ignore-table=zabbix.history_str \
--ignore-table=zabbix.history_text \
--ignore-table=zabbix.history_uint \
--ignore-table=zabbix.trends \
--ignore-table=zabbix.trends_uint \
zabbix > data.sql && \
xz data.sql
Passwordless access for read-only user
cd && cat << 'EOF' > .my.cnf
[client]
host=192.168.88.101
user=zbx_ro
password=passwd_ro_zbx
EOF
Extend open_files_limit for service mysqld. Could not increase number of max_open_files to more than
mkdir -p /etc/systemd/system/mysqld.service.d && cd /etc/systemd/system/mysqld.service.d && cat << 'EOF' > override.conf
[Service]
LimitNOFILE=65535
EOF
systemctl --system daemon-reload
mysql_pid=$(ps aux | grep "mysql" | head -n 1 | awk '{print $2}')
cat /proc/$mysql_pid/limits
On-the-fly configuration backup. Check if this is not the node holding the Virtaul IP address. Do a backup on slave
ip a | grep "192.168.88.55" || mysqldump \
--defaults-file=/root/.my.cnf \
--set-gtid-purged=OFF \
--flush-logs \
--single-transaction \
--ignore-table=zabbix.history \
--ignore-table=zabbix.history_log \
--ignore-table=zabbix.history_str \
--ignore-table=zabbix.history_text \
--ignore-table=zabbix.history_uint \
--ignore-table=zabbix.trends \
--ignore-table=zabbix.trends_uint \
zabbix | gzip > quick.restore.sql.gz
MySQL 8.0 schema backup for MySQL 8. Credentials embeded in command
mysqldump \
--host=127.0.0.1 \
--user=root \
--password='zabbix' \
--set-gtid-purged=OFF \
--flush-logs \
--single-transaction \
--create-options \
--no-data \
zabbix | gzip --fast > schema.sql.gz
Backup "_old" tables with fastest compression possible
DB=zabbix
DEST=/mnt/zabbixtemp
mkdir -p "$DEST"
mysqldump $DB trends_uint_old | lz4 > "$DEST/trends_uint_old.sql.lz4" &
mysqldump $DB trends_old | lz4 > "$DEST/trends_old.sql.lz4" &
mysqldump $DB history_uint_old | lz4 > "$DEST/history_uint_old.sql.lz4" &
mysqldump $DB history_old | lz4 > "$DEST/history_old.sql.lz4" &
mysqldump $DB history_str_old | lz4 > "$DEST/history_str_old.sql.lz4" &
mysqldump $DB history_log_old | lz4 > "$DEST/history_log_old.sql.lz4" &
mysqldump $DB history_text_old | lz4 > "$DEST/history_text_old.sql.lz4" &
MariaDB 10.3 schema backup
mysqldump \
--defaults-file=/root/.my.cnf \
--flush-logs \
--single-transaction \
--create-options \
--no-data \
zabbix > /root/schema52.sql
MariaDB 10.3 data backup without history
mysqldump \
--defaults-file=/root/.my.cnf \
--flush-logs \
--single-transaction \
--no-create-info \
--skip-triggers \
--ignore-table=zabbix.history \
--ignore-table=zabbix.history_log \
--ignore-table=zabbix.history_str \
--ignore-table=zabbix.history_text \
--ignore-table=zabbix.history_uint \
--ignore-table=zabbix.trends \
--ignore-table=zabbix.trends_uint \
zabbix | gzip --fast > /root/data52.sql.gz
Backup historical data individualu compress with gzip
tmux
mysqldump --defaults-file=/root/.my.cnf --flush-logs --single-transaction --no-create-info --skip-triggers zabbix trends_uint | gzip --fast > /root/trends_uint.sql.gz
mysqldump --defaults-file=/root/.my.cnf --flush-logs --single-transaction --no-create-info --skip-triggers zabbix trends | gzip --fast > /root/trends.sql.gz
mysqldump --defaults-file=/root/.my.cnf --flush-logs --single-transaction --no-create-info --skip-triggers zabbix history_uint | gzip --fast > /root/history_uint.sql.gz
mysqldump --defaults-file=/root/.my.cnf --flush-logs --single-transaction --no-create-info --skip-triggers zabbix history | gzip --fast > /root/history.sql.gz
mysqldump --defaults-file=/root/.my.cnf --flush-logs --single-transaction --no-create-info --skip-triggers zabbix history_str | gzip --fast > /root/history_str.sql.gz
mysqldump --defaults-file=/root/.my.cnf --flush-logs --single-transaction --no-create-info --skip-triggers zabbix history_text | gzip --fast > /root/history_text.sql.gz
mysqldump --defaults-file=/root/.my.cnf --flush-logs --single-transaction --no-create-info --skip-triggers zabbix history_log | gzip --fast > /root/history_log.sql.gz
Backup historical data individualy. Compress with lz4
tmux
mysqldump --defaults-file=/root/.my.cnf --flush-logs --single-transaction --no-create-info --skip-triggers zabbix trends_uint | lz4 > /root/trends_uint.sql.lz4
mysqldump --defaults-file=/root/.my.cnf --flush-logs --single-transaction --no-create-info --skip-triggers zabbix trends | lz4 > /root/trends.sql.lz4
mysqldump --defaults-file=/root/.my.cnf --flush-logs --single-transaction --no-create-info --skip-triggers zabbix history_uint | lz4 > /root/history_uint.sql.lz4
mysqldump --defaults-file=/root/.my.cnf --flush-logs --single-transaction --no-create-info --skip-triggers zabbix history | lz4 > /root/history.sql.lz4
mysqldump --defaults-file=/root/.my.cnf --flush-logs --single-transaction --no-create-info --skip-triggers zabbix history_str | lz4 > /root/history_str.sql.lz4
mysqldump --defaults-file=/root/.my.cnf --flush-logs --single-transaction --no-create-info --skip-triggers zabbix history_text | lz4 > /root/history_text.sql.lz4
mysqldump --defaults-file=/root/.my.cnf --flush-logs --single-transaction --no-create-info --skip-triggers zabbix history_log | lz4 > /root/history_log.sql.lz4
Backup current data tables with fastest compression possible
DB=zabbix
DEST=/mnt/zabbixtemp
mkdir -p "$DEST"
mysqldump $DB trends_uint | lz4 > "$DEST/trends_uint.sql.lz4" &
mysqldump $DB trends | lz4 > "$DEST/trends.sql.lz4" &
mysqldump $DB history_uint | lz4 > "$DEST/history_uint.sql.lz4" &
mysqldump $DB history | lz4 > "$DEST/history.sql.lz4" &
mysqldump $DB history_str | lz4 > "$DEST/history_str.sql.lz4" &
mysqldump $DB history_log | lz4 > "$DEST/history_log.sql.lz4" &
mysqldump $DB history_text | lz4 > "$DEST/history_text.sql.lz4" &
Restore from sql.gz
time zcat /root/trends_uint.sql.gz | sed 's|trends_uint|trends_uint_old|' | mysql --defaults-file=/root/.my.cnf zabbix
time zcat /root/history_uint.sql.gz | sed 's|history_uint|history_uint_old|' | mysql --defaults-file=/root/.my.cnf zabbix
time zcat /root/trends.sql.gz | sed 's|trends|trends_old|' | mysql --defaults-file=/root/.my.cnf zabbix
time zcat /root/history.sql.gz | sed 's|history|history_old|' | mysql --defaults-file=/root/.my.cnf zabbix
time zcat /root/history_str.sql.gz | sed 's|history_str|history_str_old|' | mysql --defaults-file=/root/.my.cnf zabbix
time zcat /root/history_text.sql.gz | sed 's|history_text|history_text_old|' | mysql --defaults-file=/root/.my.cnf zabbix
time zcat /root/history_log.sql.gz | sed 's|history_log|history_log_old|' | mysql --defaults-file=/root/.my.cnf zabbix
Restore from sql.lz4
time unlz4 /root/trends_uint.sql.lz4 | sed 's|trends_uint|trends_uint_old|' | mysql --defaults-file=/root/.my.cnf zabbix
time unlz4 /root/history_uint.sql.lz4 | sed 's|history_uint|history_uint_old|' | mysql --defaults-file=/root/.my.cnf zabbix
time unlz4 /root/trends.sql.lz4 | sed 's|trends|trends_old|' | mysql --defaults-file=/root/.my.cnf zabbix
time unlz4 /root/history.sql.lz4 | sed 's|history|history_old|' | mysql --defaults-file=/root/.my.cnf zabbix
time unlz4 /root/history_str.sql.lz4 | sed 's|history_str|history_str_old|' | mysql --defaults-file=/root/.my.cnf zabbix
time unlz4 /root/history_text.sql.lz4 | sed 's|history_text|history_text_old|' | mysql --defaults-file=/root/.my.cnf zabbix
time unlz4 /root/history_log.sql.lz4 | sed 's|history_log|history_log_old|' | mysql --defaults-file=/root/.my.cnf zabbix

Download this section: https://aigarskadikis.github.io/src/my.sh
Fancy syntax highlighter? Read same page on GitHub: https://github.com/aigarskadikis/aigarskadikis.github.io/blob/main/src/my.sh

Switch to SQL mode
\sql
Change global variable in order to allow MySQL work with external directories:
set global local_infile = 'on';
Switch to JavaScript mode
\js
Set DB name
DBname="zabbix"
Set destination directory
DBdumpath="/tmp"
Download schema
util.dumpSchemas([DBname],DBdumpath+"/zabbix_schema.dump", {ddlOnly: true, threads: 8, showProgress: true});
Dump all
DBname="zabbix"
DBdumpath="/tmp"
util.dumpTables(DBname, [], DBdumpath + DBname, {"all": true,"threads": 4,"bytesPerChunk": "256M"});
Download configuration
util.dumpSchemas(
[DBname],
DBdumpath+"/zabbix_configuration.dump", {
excludeTables:[
DBname+".history",
DBname+".history_uint",
DBname+".history_log",
DBname+".history_text",
DBname+".history_str",
DBname+".trends",
DBname+".trends_uint"
], threads: 8, showProgress: true}
);
Download configuration with zstd compression
util.dumpSchemas(
[DBname],
DBdumpath+"/zabbix_configuration.dump.zstd", {
excludeTables:[
DBname+".history",
DBname+".history_uint",
DBname+".history_log",
DBname+".history_text",
DBname+".history_str",
DBname+".trends",
DBname+".trends_uint"
], threads: 8, compression: "zstd", showProgress: true}
);
Download configuration with gzip compression
util.dumpSchemas(
[DBname],
DBdumpath+"/zabbix_configuration.dump.gzip", {
excludeTables:[
DBname+".history",
DBname+".history_uint",
DBname+".history_log",
DBname+".history_text",
DBname+".history_str",
DBname+".trends",
DBname+".trends_uint"
], threads: 8, compression: "gzip", showProgress: true}
);

Download this section: https://aigarskadikis.github.io/src/mysqlsh.sh
Fancy syntax highlighter? Read same page on GitHub: https://github.com/aigarskadikis/aigarskadikis.github.io/blob/main/src/mysqlsh.sh

Extract configuration dump from old server
pg_dump \
--dbname=DATABASE \
--host=HOST \
--username=USER \
--file=zabbix.dump \
--format=custom \
--blobs \
--verbose \
--exclude-table-data '*.history*' \
--exclude-table-data '*.trends*'
Restore historical data in background
psql zabbix -c "\COPY trends_old FROM PROGRAM 'lz4cat /tmp/z64.trends_old.tsv.lz4' DELIMITER E'\t' CSV"
psql zabbix -c "\COPY trends_uint_old FROM PROGRAM 'lz4cat /tmp/z64.trends_uint_old.tsv.lz4' DELIMITER E'\t' CSV"
psql zabbix -c "\COPY history_old FROM PROGRAM 'lz4cat /tmp/z64.history_old.tsv.lz4' DELIMITER E'\t' CSV"
psql zabbix -c "\COPY history_uint_old FROM PROGRAM 'lz4cat /tmp/z64.history_uint_old.tsv.lz4' DELIMITER E'\t' CSV"
psql zabbix -c "\COPY history_str_old FROM PROGRAM 'lz4cat /tmp/z64.history_str_old.tsv.lz4' DELIMITER E'\t' CSV"
psql zabbix -c "\COPY history_text_old FROM PROGRAM 'lz4cat /tmp/z64.history_text_old.tsv.lz4' DELIMITER E'\t' CSV"
psql zabbix -c "\COPY history_log_old FROM PROGRAM 'lz4cat /tmp/z64.history_log_old.tsv.lz4' DELIMITER E'\t' CSV"
PostgreSQL rename operations
ALTER TABLE trends_uint RENAME TO trends_uint_tmp; ALTER TABLE trends_uint_old RENAME TO trends_uint; INSERT INTO trends_uint SELECT * FROM trends_uint_tmp ON CONFLICT DO NOTHING;
ALTER TABLE trends RENAME TO trends_tmp; ALTER TABLE trends_old RENAME TO trends; INSERT INTO trends SELECT * FROM trends_tmp ON CONFLICT DO NOTHING;
ALTER TABLE history_uint RENAME TO history_uint_tmp; ALTER TABLE history_uint_old RENAME TO history_uint; INSERT INTO history_uint SELECT * FROM  history_uint_tmp ON CONFLICT DO NOTHING;
ALTER TABLE history RENAME TO history_tmp; ALTER TABLE history_old RENAME TO history; INSERT INTO history SELECT * FROM history_tmp ON CONFLICT DO NOTHING;
ALTER TABLE history_str RENAME TO history_str_tmp; ALTER TABLE history_str_old RENAME TO history_str; INSERT INTO history_str SELECT * FROM history_str_tmp ON CONFLICT DO NOTHING;
ALTER TABLE history_text RENAME TO history_text_tmp; ALTER TABLE history_text_old RENAME TO history_text; INSERT INTO history_text SELECT * FROM history_text_tmp ON CONFLICT DO NOTHING;
ALTER TABLE history_log RENAME TO history_log_tmp; ALTER TABLE history_log_old RENAME TO history_log; INSERT INTO history_log SELECT * FROM history_log_tmp ON CONFLICT DO NOTHING;
Restore
pg_restore \
--dbname=DATABASE \
--host=HOST \
zabbix.dump
Backup individual historical table
PGHOST=127.0.0.1 \
PGPORT=5432 \
PGUSER=postgres \
PGPASSWORD=zabbix \
psql --dbname=zabbix \
-c "COPY (SELECT * FROM history_uint) TO stdout DELIMITER ',' CSV" | \
xz > /tmp/history_uint.csv.xz
Restore
psql zabbix -c "\COPY history_uint FROM PROGRAM 'xzcat /tmp/history_uint.csv.xz' DELIMITER ',' CSV"

Download this section: https://aigarskadikis.github.io/src/pg.sh
Fancy syntax highlighter? Read same page on GitHub: https://github.com/aigarskadikis/aigarskadikis.github.io/blob/main/src/pg.sh

Snmpsim
docker run -v /usr/share/snmpsim/data:/usr/local/snmpsim/data -p 1024:161/udp tandrup/snmpsim
Postgresql 17 with timescaledb
podman pull timescale/timescaledb:2.18.1-pg17
mkdir -p ${HOME}/postgresql/17
podman run --name pg17ts2181 -t \
-e PGDATA=/var/lib/postgresql/data/pgdata \
-v ${HOME}/postgresql/17:/var/lib/postgresql/data \
-e POSTGRES_PASSWORD="zabbix" \
-e POSTGRES_DB="dummy_db" \
-p 7417:5432 \
-d timescale/timescaledb:2.18.1-pg17
Pod podman unqualified-search. Did not resolve to an alias and no unqualified-search registries are defined in "/etc/containers/registries.conf"
echo 'unqualified-search-registries = ["docker.io"]' | sudo tee /etc/containers/registries.conf
Reinstall version of zabbix monitoring proxy k8s
sed -i 's|alpine-7.0.*$|alpine-7.0.9|g' values.yaml
helm uninstall my-release && helm install my-release -f values.yaml ./
kubectl get secret zabbix-service-account -n default -o jsonpath={.data.token} | base64 -d
Enter bash in k8s env
kubectl exec -it $(kubectl get pods | grep -Eo "zabbix-proxy\S+") -- /bin/bash
Restart pod (by deleting it) k8s
kubectl get pods | grep -Eo "zabbix-proxy\S+" | xargs kubectl delete pod
Tail for block to appier k8s
kubectl logs -f zabbix-proxy-5d6d97564d-d4sf2 | awk '
/Prometheus raw data start/ { capture=1; data="" }
capture { data = data ? data ORS $0 : $0 }
/Prometheus raw data end/ { print data; capture=0; fflush() }'

Download this section: https://aigarskadikis.github.io/src/pod.sh
Fancy syntax highlighter? Read same page on GitHub: https://github.com/aigarskadikis/aigarskadikis.github.io/blob/main/src/pod.sh

Discard (rename) current data tables from the application layer. Set back an empty table
DB=zabbix
OLD=_old
MIGRATE=_migrate
echo "
history
history_uint
history_str
history_log
history_text
trends
trends_uint
" | \
grep -v "^$" | \
while IFS= read -r TABLE
do {
psql $DB --command="
ALTER TABLE $TABLE RENAME TO $TABLE$OLD;
CREATE TABLE $TABLE (LIKE $TABLE$OLD INCLUDING ALL);
ALTER TABLE $TABLE OWNER TO zabbix;
CREATE TABLE $TABLE$MIGRATE (LIKE $TABLE$OLD INCLUDING ALL);
ALTER TABLE $TABLE$MIGRATE OWNER TO zabbix;
"
} done
Download historical data
DB=zabbix
date
echo "
history_old
history_uint_old
history_str_old
history_log_old
history_text_old
trends_old
trends_uint_old
" | \
grep -v "^$" | \
while IFS= read -r TABLE
do {
date
echo $TABLE
PGUSER=postgres \
psql --dbname=$DB \
--command="COPY (SELECT * FROM $TABLE) TO stdout DELIMITER E'\t' CSV" |\
lz4 > /tmp/$DB.$TABLE.tsv.lz4
} done
date
ls -lh /tmp/$DB.*
cd
Validate if some data is stored
DB=zabbix
echo "
history_old
history_uint_old
history_str_old
history_log_old
history_text_old
trends_old
trends_uint_old
" | \
grep -v "^$" | \
while IFS= read -r TABLE
do {
echo $TABLE
ls -lh /tmp/$DB.$TABLE.tsv.lz4
lz4cat /tmp/$DB.$TABLE.tsv.lz4 | head -1
} done
cd
Download current
DB=zabbix
date
echo "
history
history_uint
history_str
history_log
history_text
trends
trends_uint
" | \
grep -v "^$" | \
while IFS= read -r TABLE
do {
date
echo $TABLE
PGUSER=postgres \
psql --dbname=$DB \
--command="COPY (SELECT * FROM $TABLE) TO stdout DELIMITER E'\t' CSV" |\
lz4 > /tmp/$DB.$TABLE.tsv.lz4
} done
ls -lh /tmp/$DB.*
date
cd
Restore all graphs in background
DB=zabbix
date
OLD=_old
MIGRATE=_migrate
echo "
trends
trends_uint
history
history_uint
history_str
history_log
history_text
" | \
grep -v "^$" | \
while IFS= read -r TABLE
do {
date
echo "big upload per ${TABLE}${OLD}"
psql $DB --command="\COPY ${TABLE}${OLD} FROM PROGRAM 'lz4cat /tmp/${DB}.${TABLE}${OLD}.tsv.lz4' DELIMITER E'\t' CSV"
date
echo "upload data per ${TABLE}${MIGRATE} during migration"
psql $DB --command="\COPY ${TABLE}${MIGRATE} FROM PROGRAM 'lz4cat /tmp/${DB}.${TABLE}.tsv.lz4' DELIMITER E'\t' CSV"
date
echo "merging all data together inside ${TABLE}${OLD}"
psql $DB --command="INSERT INTO ${TABLE}${OLD} SELECT * FROM ${TABLE}${MIGRATE} ON CONFLICT DO NOTHING;"
} done
date
cd

Download this section: https://aigarskadikis.github.io/src/postgres.sh
Fancy syntax highlighter? Read same page on GitHub: https://github.com/aigarskadikis/aigarskadikis.github.io/blob/main/src/postgres.sh

Reinstall values which have been already customized
sed -i 's|^CacheUpdateFrequency=.*|CacheUpdateFrequency=120|' /etc/zabbix/zabbix_server.conf && grep ^CacheUpdateFrequency /etc/zabbix/zabbix_server.conf
sed -i 's|^ValueCacheSize=.*|ValueCacheSize=768M|' /etc/zabbix/zabbix_server.conf && grep ^ValueCacheSize /etc/zabbix/zabbix_server.conf
sed -i 's|^TrendCacheSize=.*|TrendCacheSize=256M|' /etc/zabbix/zabbix_server.conf && grep ^TrendCacheSize /etc/zabbix/zabbix_server.conf
sed -i 's|^StartDiscoverers=.*|StartDiscoverers=20|' /etc/zabbix/zabbix_server.conf && grep ^StartDiscoverers /etc/zabbix/zabbix_server.conf
Summarize configuration file
grep -v "^$\|#" /etc/zabbix/zabbix_server.conf | sort
Restart and follow log
systemctl restart zabbix-server && tail -f /var/log/zabbix/zabbix_server.log

Download this section: https://aigarskadikis.github.io/src/sed.sh
Fancy syntax highlighter? Read same page on GitHub: https://github.com/aigarskadikis/aigarskadikis.github.io/blob/main/src/sed.sh

List existing context
ls -lZ /etc/zabbix
Install context
sudo chcon system_u:object_r:etc_t:s0 /etc/zabbix/another.conf
Make it persistent across relabels (like restorecon or policy reloads):
sudo semanage fcontext -a -t etc_t "/etc/zabbix/another.conf"
sudo restorecon -v /etc/zabbix/another.conf
Replicate context from another file
sudo chcon --reference=/etc/zabbix/zabbix_java_gateway.conf /etc/zabbix/another.conf

Download this section: https://aigarskadikis.github.io/src/selinux.sh
Fancy syntax highlighter? Read same page on GitHub: https://github.com/aigarskadikis/aigarskadikis.github.io/blob/main/src/selinux.sh

Install dependencies on ubuntu 22
apt install snmp snmptrapd libsnmp-perl snmptt snmp-mibs-downloader -y
Mimic SNMP traps
echo "$(date +%Y%m%d.%H%M%S) ZBXTRAP 10.10.10.10
2nd line
3rd line
4rt line" | tee --append /tmp/zabbix_traps.tmp
Snmptrapd in foreground
strace -s 1024 -o /tmp/snmptrapd.strace.log snmptrapd -f -C -Le -Dusm -c /etc/snmp/snmptrapd.conf
Capture traps traffic
tcpdump -npi any -s 0 -w /tmp/udp162.pcap udp and host 10.133.112.87
Send test trap SNMPv3
snmptrap -v 3 -n "" -a SHA -A testtest -x AES -X testtest -l authPriv -u SNMPv3username -e 0x80000634b210008894719abe08 10.133.80.228 0 1.2.3
Turn down 'snmpd'. This was installed to solve dependencies
systemctl stop snmpd && systemctl disable snmpd
Turn down 'snmptrapd' to reconfigure service
systemctl stop snmptrapd
Make a backup of old config, install new
mv /etc/snmp/snmptrapd.conf /etc/snmp/original.snmptrapd.conf
Install example config to catpur SNMPv2 traps and SNMPv3 traps
cd / && echo "/Td6WFoAAATm1rRGAgAhARwAAAAQz1jM4Cf/AhVdADKdCIYUyy9sMvNjsbbzalh2a+CieFE1VBk12M+iJ9k2na4QOO9Ofnui3QKfd3E/1VgKRwkM3Epl5SMnZfJ+Cco4JO/AyUapb6yMjEt3XsUk7BjRRaUGM0uAlufs91DZkXRkomCIZcsUZrGk8QVyxc1g+OrLpcr1Gg25NNexih87iDdzxxXIVJgzVqeurca2iV8PwHaIeBXGVdbKgnDrACTAfUmxOg07Y1YRNqNGilYXa4zP6mlr2G8L/KlkUmJf6ubOEorh1eMX1zWVeyRK1XBCx+6WKTsxTijP20A7rttOg7s/+Z7PVXRdghD91fIRWVby9K35SKcjkn1B7BUNwC4X586M6S04Pqgb+PamryiP9Ct2w7IgMiIUcCrzC4LZuRRpzbOKzLL5NbYeeM5QOxK6/PvsM1MlZwNf+5F3ubEr4eObU517xhW+bSvntYwetQTJm+5bcOTnu+HkJaVinw58QEIsCjOOtAJmqcKkrlvo2nYuobkRW64ME3nA2TdcqJ2BZQ2o5/rKwn7ZKmCKNgxffdMELcji4pRjALX1lC8g/hyEi+ysu34iA/ciq1HJVEO9ulGVAmrr9OdmqbZXWiPs7pg7Epcjr9CKQjsORS3R/3Gsf7umyWtWnCKpex48gClLRQPv2bI2WtGG6V6uXyPyBLUPiHKj6u74SAQJ/235xmovoLNuGl5bQ9DSrp0bzRi7qhc+AAAAALkbuP45VSNNAAGxBIBQAABGKqZmscRn+wIAAAAABFla" | base64 --decode | unxz | tar -xv
Install example config to catpur SNMPv2 traps and SNMPv3 traps fro Ubuntu 22
cd / && echo "/Td6WFoAAATm1rRGAgAhARwAAAAQz1jM4Cf/AYNdADKdCIYUyy9sMvNjsbbzalh2a+CieFE1VBk116qSpEChzLfeNm/VMSFs0glXd1kGmtWdaahiXsCj/lmFbJdNPBKh2MwwBi9c7ONuSaWL+cEZOEGH3966HOgmoL6XtkTd0ilQ4in/LDfhRuwL4659LqCUDqASYem/P0OmvYVnLyAIIrIgRl+QwqRYQyGtU8YrjfRJMuGU4djjwWSC2Elx8WEIr1Q12VTLcNoeXeMkOaonxhmtWgZ5mzTedKf+Bydc+lnUQjCe/aDIw2CL3Z89BKaYLh7LlqqdQEEZhzQQhLkfZBx/zsjiZ4hGZFqG+/HOuJ0uKiCSeySmAoTZT/ASHBpWW391kPZsefEjx3ScD6qwVoO7aDC+1i9FnuBNeD/EvLMYhYHlBP2nn4QNd/4RiYE2CYohgf3wATTuzMkQC0PeHq5rojWkOQam7pZIPy395Rahm0fhoyrTGasibCWPNIT0MyBPE9pFOXraL1F9PYoSM+T2a/ZEb0k74EjQwEHZKlYNQAAAtCgAjNz6fkQAAZ8DgFAAABCSIOmxxGf7AgAAAAAEWVo=" | base64 --decode | unxz | tar -xv
Backup /etc/snmp/snmptrpad.conf
tar --create --verbose --use-compress-program='xz -9' /etc/snmp/snmptrapd.conf | base64 -w0 | sed 's|^|cd / \&\& echo "|' | sed 's%$%" | base64 --decode | unxz | tar -xv%' && echo
Start SNMP trap listening service and enable at startup
systemctl start snmptrapd && systemctl enable snmptrapd
Check if it listens on UDP 162
ss --udp --listen --numeric --process | grep 162
Install official parser from git.zabbix.com
curl https://git.zabbix.com/projects/ZBX/repos/zabbix/raw/misc/snmptrap/zabbix_trap_receiver.pl?at=refs%2Fheads%2Fmaster -o /usr/local/bin/zabbix_trap_receiver.pl
Install official parser from gitgub.com
curl https://raw.githubusercontent.com/zabbix/zabbix/master/misc/snmptrap/zabbix_trap_receiver.pl -o /usr/local/bin/zabbix_trap_receiver.pl
Install without internet access
cd / && echo "/Td6WFoAAATm1rRGAgAhARwAAAAQz1jM4Cf/B0ddADqcyosJgONiHFxK21FYsOmcVPjGP4X64Cl0bFy0Sxh6dQDRjb67tZMcLeYmsrxgNdQLlRoxQiYVxnAjD4lU+pp3kmLxTZt01Tsv2kFhasZXC9mxcKHlmRnZcdMzsp3EX+j8vHJkE+gWkCIZM6mReBYlCoOO/IPz2dBXyVT0P6BEx2v0OOlMozPiOu1oZyrzE1iHdSoe16Eq06ivI0n1/dlhiL/QJtivNEdsfb2k09ry7xo42Mo+qoWyRZJiTNK8Zp8FuOLu0yFDQwOMc96HE6qiIlxCumifY3jE9nWEq8P/01JZYZUYxFBBPx4Z4D6rEdoO1LiVRqJA4skGOKbw5tGDQBlsDvZYhHYeEE8VcTU8hyC0MSBB7VkDSAKqvmKra/lcYqeNJfaXEHZ5peMc7gIK4JGvGy9nMDmdh+Zdh8u5CHMkdKVVsQpxabbBxNj2KimYjyDmmlXhlwXGQAaiTk+/zsHqwMAE2d7DbhOFpQ2rAB6JSOk4tPMO1E9S41a2yrp6ASMdFIpS6MdzIRSz6Zu48TXoLDXrQE4L6JTq0YqbmuvLHfnAUSxiZiFHOiqDryjQ9rGMCpnwtqpsbM+SFSfOrIK2CEnw5aeTvgLXSl5Q0vLs8I8VfKF9eIR7pF1YFDyGNi4s+EoHKPZxzXP070AioYQViOIo1pQyn7zEo8OLiMizIHGfglDNBU160pX7i8RYh1SdCyWfSQhNdMPZMD8SvMBqcQUmu2We3jl24Qkf/F5NMyHEtj9pSnPq2oCZl1DY7OvZiVXgMUcjiP/YmITOSW+sNH0xff5poWG7Tmnomwlb1OXHOyM45pXZEweZmj0BbIgNvZTngteze0FdhsWj4Y3SKIDH+To6kyNrXi5zMBqzpIOX87W8WsJmNwX/j0ppxoRrtyft1kPUIOpt/Q+/M/umE8i8LNerYiQUl7kquzEA80bswKAmvloNirdM3Y92LkRHHYkJZJ7stBRojfjWLioulIkHoxtgkF+oYsxObVUcPl4nTcTCI50py9QdR1UJWgbyyC1inWrSv5N8xEKxqyQQL9o8HmSKAc/YVhve7duC3TcYT+022FyLtzo2RicS8kR1+cAmVpTdBC/JqcJhALHtRepg5K+SS1gYm8j3iZMNY/dc160ODugj5KUrb71b1CN/pT4Fh4HEWFJpfSeThYww9zEGrGnnX6TPEC6z15b/Fpb6uaVENW3HacXtGhtSGn+DFY2Zgjy33guKX70jQZykoisaFfx8Dgbz7DmxCI0HmA4XbKEu5bp/tS21vf5z5yQ1scY0v+yMeQQSwmJ6Coq6AWIXOvoW+vHyymjvM883KgIO2gT/006MBekVO++FMzlJTwZFLLB5YMgxBZa28nNtW2Gvv6rEVkpTmccY5G27qsuWyH9FKs8FYExXnX53G4EYENyYJug8ijtQccDeFmqmJltSNp8BPOlPdtQqz2TsSFQtvxMM75o8r2Xz71RK4zV3e/TcsrXkQsOlmZxtISc4rWOaFdHlUhOYQfAsjwnp7yKvIM16A9e7978cfBwuQsN3ayEEAsH9yIBsf1q1jzJq/w+eVohCbAipJAViWItZgLGfBuGh/ThWcUNloZrAYhn+hNJTOZgtu0ytniKIRTH+/TTFWN6MfTgVn+oUgMjp36mbaEwh5CSM+H9qahMSWlxyUONSndNoL1uIPcA/tVl/7a41jl6EdTmUhdF6ua8HYVCEfbOvRYKbDz1HySo4zJgZLm5YD87NHAQenVIzooyCFO8hfTqCr0+FJ7EoQxR2tB2JsL4m8fscdj7RpkGhrsyjBQeViilN3FW+zxmcr0wEZpSYXDQmvsqYKdNJ+ssQHrUEpcJ6QSIN+qd8ZGMoqsNiFS4XVh/anCO669TvmrH+G8fAK74IugKEJ9deGMKZtw1Mb7xhKjPn6QWu8Ha8ezvZo4gH6PzD7eQFGSH86a9GxOpGzO+UOsj6HVJxgq6fBzGjc+sNb0mrdomnjwy7gcESkwgL40XJe0BIvEf5SUlhhMxbLkCDR0SZYwef1/GTqy/kZzLaYtYGGmgiK5TJoCNw2KkzuXovYAM8rh2lPIYH5QM3AFT2M9YBypstkAefXmlvS+gDehM6AbUgcgLaebBFkXz3zIsUZHSujrpgU7bPFcLhp+vICmNGqLYpaPtO3wNTkuPtnXEO+oVDrC8otR8YG28ElLOTPGqBrv9YdbFw+GMBwGDTDHOayzesQtLWfGzWpB2wxJQ1PEaQrk/T5ddATASFyDtBkw8Kn6HKECxcHsy1MZfvgwtXUP87M+6J22cnU1BseHdC9OHKZ7TuYyKTe1KX8Khef4rF+wS3LHBtsvwIJQ2Dg2Pt8MMyhZBz2R/D6EfUbNq0rhfje0y7rNWZIvl+LnvZba12iMc36J2Knm9wUh5Oe3i8NNyjowLq+mA9qE4Hz2xiqGs7e+HzxPjO+0kUKA+D3l9huJeb23bQSOF4uVfEL3Pn9YCyshHs54jmAAAALI/pe+MIB80AAeMOgFAAABubUG2xxGf7AgAAAAAEWVo=" | base64 --decode | unxz | tar -xv
Non-official with variable binding
cd / && echo "/Td6WFoAAATm1rRGAgAhARwAAAAQz1jM4Cf/B1JdADqcyosJgONiHFxK21FYsOmcVPjGP4X64Cl0bFy0Sxh6dQDcs6csTInMUOmn3nbvAhMp+muXu3Ac5qmCL5NGeZl0gPLgn/p72l2kl8iCRpebqBtVlxm8RA1JADn8ZWpDryzdUGTl87L6Imfy574ffVE+4hdxmnX66rGK4NkO8nQfYqbTbOiHKSZuN5GTf7luzN9HU9CmHRSDJwbMnb8mK/qUX0k8Sivr7JRLreWZ8uPzu9XnjDxLMB38JM8UnxPkP7MQwMAbyHGQ3ZC/NYekSL6lHC4x0FNDTpHgQmSTwmjR9TDX2wSYUrPWJUExPSSBZDP/GICmDSrpJC7MtBzAB5cqGZY3EOJG1stDd3boSAup0YegGEWGIitivWqtUy6xcSRL1VHW4EngaT30U8UnABiLus4yWAapbsJZ26yg+NlFflNJEv0r68579kgeaIWddGEzUQhAMzzh0pd5DQqQARPcSQ15v9xXfOSrNIkhZbIRGi/0HWo8zJAdSZbFvQuNmbTr6MZ4uTjLgxE19M9c+QVkH8pGtZcnIzjX7Ovxi4qiWRPnchVvIJNVaoBKtja16IdP4IMybSj1pdIVCHn71xD4qxCchBvzcUGgPyZc1/59XdHW7cuib3TNsBED6TqbLzorOdMpY74DoWMyOrw7TfQuDE1BrrfNoxTdh5JewNwGlZ4E4b30h8Durtl6xRYKS4QnxvpvzwrPVz7eR4qaNjWlCB0rvJ3deF0so6SQgfmHm55TAJ5FXZfg6gtDi/c49qOZQbwjLZf208DMcVj+gSpKub865ew0YeABqV2ymkN0JSdwHJg7/TysT1b2XtGFx6sAJn5iFNduBZwsFc3ZsC/te2lADK3BgIV+VefKYamIvD4Zle9CoJ5WG0iE5pt14E1jvojjMKI2mdMMlwajtwg1snQg22h67UY5OqHljQdsYgEAh2iUhHHTmlqqWeGcmG30GDDflkt9IQ0rAdi9EVNlmObL3yem801ac1bQPQooZRa5MLzhXCMA66JAF/fH+dK3Tj7YXnZccex8zH3iq+E0UoYJlusR/0BKBTxTEyD7UYHmf+nKJgYFKuL2sdNFfgsYSgxTtrdxCtLf4+//RNsIdYXRuufZiygynAlGHmn6QJWlfNax9Gx7jD6Ij+DIBrUBGg/tqx31Grf65H097D2q1vKMLArVgv69HLk++DA55wi1hHCKF33/2GyFOKug74jvghVAwy2H9vjfA5Ds1jMsxPq5EF066ShtPJyl6Fl4bYt6dR/u6Unn2OiUXudoBwBJY7cCWCnihesqMQsJQKkUdMG626FERfYnGVcYOQ/tw6OCefmzfwdAL5cX/ai5lmFWyxLbI2tB2m1+ck25okV0+Hu1WDSAVcUAPLEnzapyZDBGUkYWwRxiENWbz0ObyDQrPwFKWMUPuQyumx4jxPbNwmqkPOKdkoErBVbwJum/7rgkkUBUNgdk3IFZam2D+g5hH0c7xAyZg10SWU+RNGTEnZdpRsDeuSLmaxQAXyXVxcCtPsjyg85dfWkSmyeNV/gMxAbC4KUxuVnUyVqhXaasFp4If8e2J0+ISitX+F2dda/ggsW5VpbcWeQnd8B/4cFgmjWb1DoRuqyB5jsz4h7sHVizhb++tcayg+4irLR3dxBrEzrq2PVQkN4jaFdoivXpS/Kz9atEkrugl22Xw8voLvuey6oLPI4tLnGszB0bSSYaBx1rOlSVJA2Y5FlzFKbjrEJZTITFlR0IUzDSukQy77vb8yd+9ys3Pma9WIjf481PHxkMj2I4gfn5VMMCMvl/v1tb0w6w4U65tOvMBpilpwWkFS0jx6QY5zgciwBfrxovszfcTLXqt1dSLmW56qI83rfajnm6QU506f9cxaVEppEBg3hzh73NUEvP8r+4uaL5tJuYv/x/S79WecAMwl22bdadl3c61ScMXYe35Fc28mqU4zkcf/2gvCqRpAo+kLFxiRvWUjWpUo6YCFM0k5xde6VQQDj4L2qVEUJlQBYuLeOsoBG3ATgugymBBGdpabXHGhyZ4yYVC7FaGbG9IyuV1Y2qD4uKvSitq/OXSBCZZl6oGaPueMhFJA7f1YYBe+S1hbPBHjW/lK4LRcXRTl7aoxduTu/DPFsY5P2LOmxmrtxHseGsHO4hgb5TKrPezdObPD1Xzn9EljhUyeJpEzuixn8eORLQnmvULKlxTzFnsRCwW/TJQcsythoG1vcAZvtZ660gSXVnqBxa63r13mjtmj1YMVSW6/oV43H8eqTaMu5E9ski56PfDiiGd/iPlcv6mGMofnSqUmNAuG/96N5Zwmn1R0lhXd4IZhhKML8zQXqTwUJv+qd7nt9xANCz22BXNTcX23dubHXEFGb2O796XaTJgWv+cbsT215f5ybAQs6MZb0SKZqw9mm7D6gKiEiKJk1e1uabo6GdmigGjz4hdBtiV0irDSMI3ssn/AGchkl/GCqzX8p7w5xRiAUoDWtJ6jv8+2y0updOyurkg2sAAAAAGn72SHyGo2sAAe4OgFAAAMWIztGxxGf7AgAAAAAEWVo=" | base64 --decode | unxz | tar -xv
Bash parser
cd / && echo "/Td6WFoAAATm1rRGAgAhARwAAAAQz1jM4Cf/AiddADqcyosJgONiHFxK21FYsOmcVPjGP4X64Cl0bFy0Sxh6dQDRjk6f8dQ1bOCTQt37h4nPN7dqU5XiljBY1ISkB57iky9OaCllx2Pvw7ADF5H5hz2M/woIhS5kB6SZzGLqiZ/L4ObszSmHD/zqU/EptJXvrKiRxNPL3vREPA/bxK/iefk//ici0J76T5Pgu6B5JySWc9UrDRuOwLJH8gF+ac4z1dKxDWCbIjxuS28RZQlunU1OAlIr/CyCfBfC1Pp5b+H1lMr4yeBne7oOVqx5ytStJs+J/X+S3ck3O115oqn6NvsvaamKz6hnaSG91RKb+A8fUXoXwpXvTI8fmeOCsi9v9d1XDStnvix0EOqLqRFE9JrRiuKUHlSfkrtd1YAjr+Ab8+SpyXoxkYvt5qIsdJkT7uLhAiEcxHr2xtaZrVus3qsvA9pQ+uMtk9YIIG8auFD6ImlPI/52vUID9uBDVIv5x/1D5fhmBPRE8c9holuN9pWksYmlqewZ2deiPvfKNxpJNGp7ReBDjSh9/koaB8TWMcOKdARr52F498EGjzMeB6P7XNtUoi/gmHjgjpxDLLApKtZyENPXehKqMCUlTCKH03YuyiCezmurGbkkFAPQHKfDKSFJuahsOR/qAHOO7UwvzMluzaQFlIzUq9xpCvZjTLwpfpUnhIVxgY/GZw+9c6gweJWVUnfehDrKGqhvXFmAyjHtgTzjlpry2jgLBP72Dgt3PRLCAACEWwpFaPLuqgABwwSAUAAAjIZMILHEZ/sCAAAAAARZWg==" | base64 --decode | unxz | tar -xv
Send incorrect SNMPv3 engineID
snmptrap -v 3 -n "" -a SHA -A testtest -x AES -X testtest -l authPriv -u SNMPv3username -e 0x80000634b210008894719abe07 127.0.0.1 0 1.2.3
ls /tmp
Send incorrect SNMPv2 community
snmptrap -v 1 -c moon 127.0.0.1 '.1.3.6.1.6.3.1.1.5.3' '0.0.0.0' 6 33 '55' .1.3.6.1.6.3.1.1.5.3 s "eth0"
ls /tmp
Correct SNMPv3 engineID
snmptrap -v 3 -n "" -a SHA -A testtest -x AES -X testtest -l authPriv -u SNMPv3username -e 0x80000634b210008894719abe08 127.0.0.1 0 1.2.3
ls /tmp
cat /tmp/zabbix_traps.tmp
rm -rf /tmp/zabbix_traps.tmp
Correct SNMPv2 community
snmptrap -v 1 -c earth 127.0.0.1 '.1.3.6.1.6.3.1.1.5.3' '0.0.0.0' 6 33 '55' .1.3.6.1.6.3.1.1.5.3 s "eth0"
ls /tmp
cat /tmp/zabbix_traps.tmp
rm -rf /tmp/zabbix_traps.tmp

Download this section: https://aigarskadikis.github.io/src/snmptrap.sh
Fancy syntax highlighter? Read same page on GitHub: https://github.com/aigarskadikis/aigarskadikis.github.io/blob/main/src/snmptrap.sh

Allow to reload cache from Zabbix frontend
cd /etc/sudoers.d
echo 'zabbix ALL=(ALL) NOPASSWD: /usr/sbin/zabbix_server -R config_cache_reload' | sudo tee zabbix_server_config_cache_reload
chmod 0440 zabbix_server_config_cache_reload

Download this section: https://aigarskadikis.github.io/src/sudoers.sh
Fancy syntax highlighter? Read same page on GitHub: https://github.com/aigarskadikis/aigarskadikis.github.io/blob/main/src/sudoers.sh

Listening TCP ports
ss --tcp --listen --numeric
Trouble simple
tail -1000000 /var/log/messages > /tmp/messages.txt
free -h > /tmp/memory.txt
df -h > /tmp/disk.txt
dmesg > /tmp/dmesg.txt
ps -xafuww > /tmp/process.list.tree.txt
Process saturation and diag info
while true; do  echo $(date) >> /tmp/process.list.txt && ps -xafuww >> /tmp/process.list.txt && zabbix_server -R diaginfo >> /tmp/process.list.txt && echo "=======" >> /tmp/process.list.txt ; sleep 60; done
Listening TCP ports with process identification
ss --tcp --listen --numeric --process
Ensure service is listening on UDP 162
ss --udp --listen --numeric --process | grep 162
Check if tcp port listens. Make sure it really listens on destination server before checking
nc -v servername 3306
Check if port is open without external tools. If it reports 0, the port is reachable and in the listening state
{ echo >/dev/tcp/127.0.0.1/3306 ; } 2>/dev/null; echo $?
Capture all SNMP traps traffic
tcpdump -i any udp dst port 162 >> /var/log/zabbix/zabbix_traps.tcpdump
SNMP traffic and ICMP ping probles
tcpdump -i any 'port 161 or icmp' -w /tmp/out.pcap
Replace 127.0.0.1 with real IP
tcpdump -i any 'port 10051 and (dst 127.0.0.1 and src 127.0.0.1)' -w /tmp/out.pcap
UDP traffic
tcpdump -i any port 161 -w /tmp/out.pcap
Activity for each block device, pretty-print device names, report task creation and system switching activity.
sar -d -p -w 1
10 seconds of disk usage
sar -d -wp 1 10 >> /tmp/disk.activity.txt
Installed packages on EL7 or EL8 system
rpm -qa > /tmp/installed.packages.txt
Installed packages on Ubuntu/Debian
apt list --installed > /tmp/apt.installed.txt
OS information
cat /etc/*release* > /tmp/os.info.txt
Disk space
df -h > /tmp/disk.space.txt
OS, Memory, CPU, Disk characteristics
cat /etc/*release* > /tmp/$(hostname).characteristics.txt
cat /proc/meminfo >> /tmp/$(hostname).characteristics.txt
free -h >> /tmp/$(hostname).characteristics.txt
cat /proc/cpuinfo >> /tmp/$(hostname).characteristics.txt
df -h >> /tmp/$(hostname).characteristics.txt
Usage of swap:
for file in /proc/*/status ; do awk '/VmSwap|Name/{printf $2 " " $3}END{ print ""}' $file; done >> /tmp/$(hostname).swap.usage.txt
Process list. Top memory. top CPU
ps auxww > /tmp/process.list.txt
ps auxww --sort -%mem > /tmp/top.mem.processes.txt
ps auxww --sort -%cpu > /tmp/top.cpu.processes.txt
Shared memory segments and semaphore arrays
ipcs -a > /tmp/shared.memory.segments.and.semaphore.arrays.txt
Last 1 million lines from messages
tail -1000000 /var/log/messages > /tmp/messages.txt
Display all messages from the kernel ring buffer
dmesg > /tmp/dmesg.txt
Live kernel settings
sysctl -a > /tmp/.live.kernel.settings.txt
Last 1 million lines from /var/log/zabbix/zabbix_proxy.log
tail -1000000 /var/log/zabbix/zabbix_proxy.log | gzip --best > /tmp/zabbix_proxy.$(date +%Y%m%d.%H%M).log.gz
Last 1 million lines from /var/log/zabbix/zabbix_server.log
tail -1000000 /var/log/zabbix/zabbix_server.log | gzip --best > /tmp/zabbix_server.$(date +%Y%m%d.%H%M).log.gz
All trapper processes
watch -n1 'ps auxww | grep -Eo "[:] trapper #.*"'
Free trapper processes
watch -n1 'ps auxww | grep -Eo "[:] trapper #.*waiting for connection"'
Service not booting up
journalctl -u sshd | tail -100 > /tmp/sshd.txt
List of open files:
lsof > /tmp/list.open.files.txt
Disk long term throughput, how fast we can create 64GB file
time dd if=/dev/urandom of=/var/lib/mysql/64GB.file bs=1M count=65536 oflag=direct

Download this section: https://aigarskadikis.github.io/src/supply.sh
Fancy syntax highlighter? Read same page on GitHub: https://github.com/aigarskadikis/aigarskadikis.github.io/blob/main/src/supply.sh

Print all UPD traffic for eth0
tcpdump -i eth0 -nn udp
Print all UDP traffic. Second column is EngineID
tshark -i any -Y '(udp.srcport == 161)' -T fields -e ip.src -e snmp.msgAuthoritativeEngineID -e snmp.msgAuthoritativeEngineTime -e snmp.msgAuthoritativeEngineBoots -e frame.time
Query one particular engineID
tshark -i any -Y '(udp.srcport == 161) && (snmp.msgAuthoritativeEngineID == 80:00:3a:8c:04)' -T fields -e ip.src -e snmp.msgAuthoritativeEngineID -e snmp.msgAuthoritativeEngineTime -e snmp.msgAuthoritativeEngineBoots -e frame.time

Download this section: https://aigarskadikis.github.io/src/udp.sh
Fancy syntax highlighter? Read same page on GitHub: https://github.com/aigarskadikis/aigarskadikis.github.io/blob/main/src/udp.sh

Zabbix 6.0 frontend module to hide "Templates" column under "Configuration" => "Hosts"
cd / && echo "/Td6WFoAAATm1rRGAgAhARwAAAAQz1jM4Cf/BIhdADqcyosJxdxJssUvYbZwczCa4NL6WKTOtRi0c554WcmBrkKJ0mJAeAaic7i991X+PJKgUTuAnCLyHef+BXOL1bHqkaZZZr4lBVY4be1LihI2ek87Yq0PoxsOmTJvtFLvNkQuXDQEwdAqs4FwSYnDJRsbo8LnCmjlRabqLk15axqJS9ccv9L4lAHtEWnvKA66Boc2njRgpyJXQUNhKfStLJlmV1gwGUBCH0Iq6bgCAp1B4hCtc7Xc6RdYDfBSEweI7oB7vSaq0PZhm3+Q7MJmw5HO4MGcUZpmeSHcvVQfnaX0AXb7OHtl6QkKs4YOdjqEDxrJ0AlQcs5ikDS8FfVYQPnVXYN0T92UMgNY+24lnNDq5fDw9yOHKvpoOM4hARy3RgSzD3rL+VHGtL5yXmORxrYWGBD0PooiX8hKTPnoQop47TmDrrShiQjSNcio30nj0Tti/MtDClNlSEaboMa1EGjcnNH7Ga4btSeGvbeY3hqTym4HHoU6JFldBtIEPFvCL1SHxSkVL8CBPapJCgmiev1aPK5VGl46hocWHfZ0GSRSf61ykDoITplZ2XU+j6/Vz4zkLh6iLeDV2ts1al+9SiL7fB2kT8LQAD7yOBAsIUzhQ2k5WH6Aa4q6XwDHfK+d63IcsYgKJoNP7qGySE9HrOcYeoWbtymprkltzXAbuRFEZqNxeqBXLPt1DM3Bd+F/QyQrDEA/nsxg+EJ6sLcnFnLDrlYxjSCxkh8KFZZHn1arIz/n1A+4yBOkE8f1f/BidwtLBOGAb7L0ngnm6VtlEw7EApeSPUvahjZXDPf9pfZSbyuLWfb9twZmEzCEa3tUvrLEa6FDibEQtzXmDNEWJIg4NIlw+MW56igfQ0c3pkxo/l78UpL+VborSVzdI46H3IKxY6bF7t9bk6IMiikie60tZbQGxeYVrxYmKdc7FXI8MehxOkOAP7+xL4XgFYCed1vfhxu3/FAKlgiA0ITGTUzXJa+bzEs4tyWPligj3KL1tUaH9UqxDgT8uH99O9jMduja1ZaR6WadsisLXgZ3XNgGwYfbtNZfXqk6ggfOjJCLznozMugf1NiDGUVVIIRCV6esaCrcDRVU06YlOY9SmJsFuC8fApi3ul8qljMMoZHonjshOc03QmaC75XmX26xloDmK8ygeiLGzO4MWHd1REQlhavsxADSLcNM5K/riurHH4+H7MvMMrwlk7e/H6dIiQJQxSzw2kSpprUM70jwoQKRgbthQdWIGyjRmf7BOzs9gdgJ4QVqSAkhvfWIFRTLZVjvpxURMqL2Rp5X4ankwEyAhbvx0mAlWLDv2jpB3MBdBwQbIwODrTZCGPKlBcax+azw5MmGGWFEK0UMeW2w0BvJpjTP6504r/NuKU9SRlZCHC9oykwCT23x1teLxQFo3EGkdVE+AGolBqi3OxNtdUJNMvpbR3La/JetE3JGzjUVp36bwnHcddVKJhmSdeKgIXcwVJWJ+8vCeg2A9EKocxq1QMJliVymitvrnLAxBX21VYmRtto/E00MeOzrQoqoI5RHs832OyUAAPnXuYqgJ3MuAAGkCYBQAADffS3NscRn+wIAAAAABFla" | base64 --decode | unxz | tar -xv
On EL8 system allow NGINX to server Zabbix GUI under 127.0.0.1:80. This is useful for Zabbix API
cd / && echo "H4sIANj39GICA+2VQU/iQBTHOfMpeuCAMdspbEGzxANRVk0UXKgHIzqp7RQmlpnuzNQFF/3sO61QQbDVi8km70fCZDLv/3/T6XtTojzERpRNkcdZYPporFRk1up7pqV/NTPgwnx07+7o1HQjaiZBpc9iaZq2lY6at+P3ZrNRqtn1vUbNbjRsu2TVrbrdLBlW6QuIpXKFYZQE5yovrmj9P0US8UCE8bdsLAipVIQZGftWK1t7CcbMnZB0npVJq1zOgpKDSkYUS4Hk2BUEvRRQ6zWGMp9Ml6MZjaOVtZB7rqKcGQcGCtwHqkvO1H8rW3yNHGHGFQ54zHw950HwutenLY5oi4kSMxzQkMjFvBILmv4h48C27AJDV0qi5BZb1/OIlFhv8WW+trclZBpRkWXW52n5+fmeDTQ0x2pLPp+w2Vr+MCy0quqOHqJ50tPXt0PzZk6ZF8Y+mSdBIdn5bJqsAoiKxUoJFR/jA2E+F1+W79m4vkU3w6Twqmhe2faggSuVN6I40m84mceMTn8gETOkRd+CaLIoalNy7771rlxGIVXaRI0xZQE3bqvmbpp3p4rM3Z3K+8q8FtncpHAnen7UO7w873Qd3O/1nO0tWGQxOOyfXjj45+lZp9s+72xaVLJH8wSNVHoZFNtetJ0T7PTb3cFZ2+kcfdR2w3dRoev+sjj/r8tO/woPnP5p9zjt8t8x0Y0vlaBsVCzvd7TBwMHnHeekd6TlgmgDqfCEqDH3iw0Oe10neTHO1UUnza+bTl+zCqtZRD4u1y/l2DlZkYeEjdQ4pzSojhMe0SdKhOAiu2s4yym9EeOCYC+kSQr3jgv1/h22FOkdMeLpB6ITwmOVrTetnO7Qbb8hSO7B/RyRIO7nRXdxEOgvl6SPZG29Vt+/L1LJt8uGbdQbzVydnC3FqznzVYpMovRThP8IqkgmXFc9lZ/KJQAAAAAAAAAAAAAAAAAAAAAAAAAAAABY5x8NJf5GACgAAA==" | base64 --decode | gunzip | tar -xv
nginx -t
systemctl reload nginx

Download this section: https://aigarskadikis.github.io/src/web.sh
Fancy syntax highlighter? Read same page on GitHub: https://github.com/aigarskadikis/aigarskadikis.github.io/blob/main/src/web.sh

Discard all users which is using frontend. Zabbix 3.0, 3.2, 3.4, 4.0, 4.2, 4.4, 5.0, 5.2, 5.4, 6.0, 6.2
DELETE FROM session;
Delete all events comming from specific trigger id. Zabbix 4.0, 5.0
DELETE
FROM events
WHERE events.source=0
AND events.object=0
AND events.objectid=987654321;
Delete old sessions. PostgreSQL. Zabbix 6.0
DELETE FROM sessions WHERE lastaccess < EXTRACT(EPOCH FROM NOW() - INTERVAL '10 days');
Delete discovery, autoregistration and internal events
DELETE FROM events WHERE source IN (1,2,3) LIMIT 1;
DELETE FROM events WHERE source IN (1,2,3) LIMIT 10;
DELETE FROM events WHERE source IN (1,2,3) LIMIT 100;
Delete child events which was closed by global correlation. Zabbix 5.0
DELETE e
FROM events e
LEFT JOIN event_recovery ON (event_recovery.eventid=e.eventid)
WHERE event_recovery.c_eventid IS NOT NULL
AND e.clock < 1234;
Delete dublicate values per itemid. Zabbix 5.0
DELETE t1
FROM history_text t1
INNER JOIN history_text t2
WHERE t1.itemid=382198
AND t1.clock < t2.clock
AND t1.value=t2.value
AND t1.itemid=t2.itemid;
Remove evidence about all failed actions. Zabbix 5.0
DELETE FROM alerts WHERE status=2;
Delete all dublicate metrics in history_text. Zabbix 5.0
DELETE t1
FROM history_text t1
INNER JOIN history_text t2
WHERE t1.clock < t2.clock
AND t1.value=t2.value
AND t1.itemid=t2.itemid;
Delete all dublicate metrics in history. Zabbix 5.0
DELETE t1
FROM history_str t1
INNER JOIN history_str t2
WHERE t1.clock < t2.clock
AND t1.value=t2.value
AND t1.itemid=t2.itemid;
Remove data for 'history_text' where 'Do not keep history'. Zabbix 5.0
DELETE
FROM history_text WHERE itemid IN (
SELECT items.itemid FROM items, hosts
WHERE hosts.hostid=items.hostid
AND hosts.status IN (0,1)
AND items.value_type=4
AND items.flags IN (0,4)
AND items.history IN ('0')
);
Remove data for 'history_str' where 'Do not keep history'. Zabbix 5.0
DELETE FROM history_str WHERE itemid IN (
SELECT items.itemid FROM items, hosts
WHERE hosts.hostid=items.hostid
AND hosts.status IN (0,1)
AND items.value_type=1
AND items.flags IN (0,4)
AND items.history IN ('0')
);
Scan 'history_text' table and accidentally stored integers, decimal numbers, log entries and short strings. Zabbix 3.0, 3.2, 3.4, 4.0, 4.2, 4.4, 5.0, 5.2, 5.4, 6.0, 6.2
DELETE FROM history_text WHERE itemid NOT IN (SELECT itemid FROM items WHERE value_type=4);
DELETE FROM history_text WHERE itemid IN (SELECT itemid FROM items WHERE value_type<>4);
Scan 'history_str' table and accidentally stored integers, decimal numbers, log entries and long text strings. Zabbix 3.0, 3.2, 3.4, 4.0, 4.2, 4.4, 5.0, 5.2, 5.4, 6.0, 6.2
DELETE FROM history_str WHERE itemid NOT IN (SELECT itemid FROM items WHERE value_type=1);
DELETE FROM history_str WHERE itemid IN (SELECT itemid FROM items WHERE value_type<>1);
Remove repeated values per one itemid in 'history_str'. Discard unchanded. Zabbix 5.0, 6.0
DELETE FROM history_str WHERE itemid=343812 AND clock IN (
SELECT clock FROM (
SELECT clock, value, r, v2 FROM (
SELECT clock, value, LEAD(value,1) OVER (order by clock) AS v2,
CASE
WHEN value <> LEAD(value,1) OVER (order by clock)
THEN value
ELSE 'zero'
END AS r
FROM history_str WHERE itemid=343812
) x2
WHERE r='zero'
) x3
WHERE v2 IS NOT NULL
);
Remove repeated values per one itemid in 'history_text'. Discard unchanded. Zabbix 5.0, 6.0
DELETE FROM history_text WHERE itemid=42702 AND clock IN (
SELECT clock from (
SELECT clock, value, r, v2 FROM (
SELECT clock, value, LEAD(value,1) OVER (order by clock) AS v2,
CASE
WHEN value <> LEAD(value,1) OVER (order by clock)
THEN value
ELSE 'zero'
END AS r
FROM history_text WHERE itemid=42702
) x2
WHERE r='zero'
) x3
WHERE v2 IS NOT NULL
);

Download this section: https://aigarskadikis.github.io/src/delete.sql
Fancy syntax highlighter? Read same page on GitHub: https://github.com/aigarskadikis/aigarskadikis.github.io/blob/main/src/delete.sql

Copy data from one table to another
INSERT INTO history_uint SELECT 88656,clock,value,ns FROM history_uint WHERE itemid=88641;

Download this section: https://aigarskadikis.github.io/src/insert.sql
Fancy syntax highlighter? Read same page on GitHub: https://github.com/aigarskadikis/aigarskadikis.github.io/blob/main/src/insert.sql

Ssl connection information for MySQL
SELECT sbt.variable_value AS tls_version, t2.variable_value AS cipher,
processlist_user AS user, processlist_host AS host
FROM performance_schema.status_by_thread AS sbt
JOIN performance_schema.threads AS t ON t.thread_id = sbt.thread_id
JOIN performance_schema.status_by_thread AS t2 ON t2.thread_id = t.thread_id
WHERE sbt.variable_name = 'Ssl_version' and t2.variable_name = 'Ssl_cipher' ORDER BY tls_version;
List show mysql users and databases, permissions
SELECT host, db, user FROM mysql.db;
SELECT host, user FROM mysql.user;
Disable redo log on global level
ALTER INSTANCE DISABLE INNODB REDO_LOG;
Check status of redo log
show global status like '%redo%';
Remove bin logs logbin binlog log_bin bin_log
PURGE BINARY LOGS BEFORE '2024-03-27 00:00:00';
Enable redo log
ALTER INSTANCE ENABLE INNODB REDO_LOG;
Persistent connections
SELECT * FROM INFORMATION_SCHEMA.processlist WHERE command = 'Sleep';
Create random password for users
UPDATE users SET passwd=substring(MD5(RAND()),1,20) WHERE userid NOT IN (1);
Busy connections
SELECT * FROM INFORMATION_SCHEMA.PROCESSLIST WHERE COMMAND != 'Sleep' AND TIME > 1 AND USER != 'EVENT_SCHEDULER' ORDER BY TIME DESC, ID;
Mimic SHOW SLAVE STATUS
SELECT t.PROCESSLIST_TIME, t.* FROM performance_schema.threads t WHERE NAME IN('thread/sql/slave_io', 'thread/sql/slave_sql');
SELECT t.PROCESSLIST_TIME, t.* FROM performance_schema.threads t;

Download this section: https://aigarskadikis.github.io/src/mysql.sql
Fancy syntax highlighter? Read same page on GitHub: https://github.com/aigarskadikis/aigarskadikis.github.io/blob/main/src/mysql.sql

Swap tables. This will allow to see oldest data. but the most recent data will be missing
RENAME TABLE history TO history_tmp; RENAME TABLE history_old TO history;
RENAME TABLE history_uint TO history_uint_tmp; RENAME TABLE history_uint_old TO history_uint;
RENAME TABLE history_str TO history_str_tmp; RENAME TABLE history_str_old TO history_str;
RENAME TABLE history_log TO history_log_tmp; RENAME TABLE history_log_old TO history_log;
RENAME TABLE history_text TO history_text_tmp; RENAME TABLE history_text_old TO history_text;
RENAME TABLE trends TO trends_tmp; RENAME TABLE trends_old TO trends;
RENAME TABLE trends_uint TO trends_uint_tmp; RENAME TABLE trends_uint_old TO trends_uint;
Create dedicated user for MySQL 8
DROP USER IF EXISTS 'zbx_part'@'127.0.0.1';
CREATE USER 'zbx_part'@'127.0.0.1' IDENTIFIED WITH mysql_native_password BY '';
GRANT SELECT, ALTER, DROP ON zabbix.history TO 'zbx_part'@'127.0.0.1';
GRANT SELECT, ALTER, DROP ON zabbix.history_uint TO 'zbx_part'@'127.0.0.1';
GRANT SELECT, ALTER, DROP ON zabbix.history_str TO 'zbx_part'@'127.0.0.1';
GRANT SELECT, ALTER, DROP ON zabbix.history_text TO 'zbx_part'@'127.0.0.1';
GRANT SELECT, ALTER, DROP ON zabbix.history_log TO 'zbx_part'@'127.0.0.1';
GRANT SELECT, ALTER, DROP ON zabbix.history_bin TO 'zbx_part'@'127.0.0.1';
GRANT SELECT, ALTER, DROP ON zabbix.trends TO 'zbx_part'@'127.0.0.1';
GRANT SELECT, ALTER, DROP ON zabbix.trends_uint TO 'zbx_part'@'127.0.0.1';
GRANT SELECT, ALTER, DROP ON zabbix.auditlog TO 'zbx_part'@'127.0.0.1';
GRANT SELECT ON zabbix.dbversion TO 'zbx_part'@'127.0.0.1';
GRANT SELECT,DELETE ON zabbix.housekeeper TO 'zbx_part'@'127.0.0.1';
FLUSH PRIVILEGES;
Restore back most recent data
INSERT IGNORE INTO history SELECT * FROM history_tmp;
INSERT IGNORE INTO history_uint SELECT * FROM history_uint_tmp;
INSERT IGNORE INTO history_str SELECT * FROM history_str_tmp;
INSERT IGNORE INTO history_log SELECT * FROM history_log_tmp;
INSERT IGNORE INTO history_text SELECT * FROM history_text_tmp;
INSERT IGNORE INTO trends SELECT * FROM trends_tmp;
INSERT IGNORE INTO trends_uint SELECT * FROM trends_uint_tmp;
Check all graphs, it should be completed. If graphs are perfect:
DROP TABLE history_tmp;
DROP TABLE history_uint_tmp;
DROP TABLE history_str_tmp;
DROP TABLE history_log_tmp;
DROP TABLE history_text_tmp;
DROP TABLE trends_tmp;
DROP TABLE trends_uint_tmp;
Drop old
DROP TABLE history_uint_old;
DROP TABLE trends_uint_old;
DROP TABLE history_old;
DROP TABLE trends_old;
DROP TABLE history_text_old;
DROP TABLE history_str_old;
DROP TABLE history_log_old;
Put back all data/graphs together (accessible in frontend):
RENAME TABLE trends_uint TO trends_uint_tmp; RENAME TABLE trends_uint_old TO trends_uint; INSERT IGNORE INTO trends_uint SELECT * FROM trends_uint_tmp;
RENAME TABLE trends TO trends_tmp; RENAME TABLE trends_old TO trends; INSERT IGNORE INTO trends SELECT * FROM trends_tmp;
RENAME TABLE history_uint TO history_uint_tmp; RENAME TABLE history_uint_old TO history_uint; INSERT IGNORE INTO history_uint SELECT * FROM history_uint_tmp;
RENAME TABLE history TO history_tmp; RENAME TABLE history_old TO history; INSERT IGNORE INTO history SELECT * FROM history_tmp;
RENAME TABLE history_str TO history_str_tmp; RENAME TABLE history_str_old TO history_str; INSERT IGNORE INTO history_str SELECT * FROM history_str_tmp;
RENAME TABLE history_text TO history_text_tmp; RENAME TABLE history_text_old TO history_text; INSERT IGNORE INTO history_text SELECT * FROM history_text_tmp;
RENAME TABLE history_log TO history_log_tmp; RENAME TABLE history_log_old TO history_log; INSERT IGNORE INTO history_log SELECT * FROM history_log_tmp;
Validate if there are 10 new tables
SHOW CREATE TABLE history_new\G
SHOW CREATE TABLE history_uint_new\G
SHOW CREATE TABLE history_str_new\G
SHOW CREATE TABLE history_log_new\G
SHOW CREATE TABLE history_text_new\G
SHOW CREATE TABLE history_part\G
SHOW CREATE TABLE history_uint_part\G
SHOW CREATE TABLE history_str_part\G
SHOW CREATE TABLE history_log_part\G
SHOW CREATE TABLE history_text_part\G
Detach the current "history" tables from the application and set new/empty tables in place
RENAME TABLE history_uint TO history_uint_old; RENAME TABLE history_uint_new TO history_uint;
RENAME TABLE history TO history_old; RENAME TABLE history_new TO history;
RENAME TABLE history_str TO history_str_old; RENAME TABLE history_str_new TO history_str;
RENAME TABLE history_text TO history_text_old; RENAME TABLE history_text_new TO history_text;
RENAME TABLE history_log TO history_log_old; RENAME TABLE history_log_new TO history_log;
Open "tmux" and migrate the data from an unpartitioned table space to a partitioned table space
SET SESSION SQL_LOG_BIN=0;
INSERT IGNORE INTO history_str_part SELECT * FROM history_str_old;
INSERT IGNORE INTO history_text_part SELECT * FROM history_text_old;
INSERT IGNORE INTO history_log_part SELECT * FROM history_log_old;
INSERT IGNORE INTO history_uint_part SELECT * FROM history_uint_old;
INSERT IGNORE INTO history_part SELECT * FROM history_old;
Attach/swap the partitioned tables to the application later
RENAME TABLE history_uint TO history_uint_tmp; RENAME TABLE history_uint_part TO history_uint;
RENAME TABLE history TO history_tmp; RENAME TABLE history_part TO history;
RENAME TABLE history_str TO history_str_tmp; RENAME TABLE history_str_part TO history_str;
RENAME TABLE history_text TO history_text_tmp; RENAME TABLE history_text_part TO history_text;
RENAME TABLE history_log TO history_log_tmp; RENAME TABLE history_log_part TO history_log;
Transfer back data which was collected during the migration
INSERT IGNORE INTO history_uint SELECT * FROM history_uint_tmp;
INSERT IGNORE INTO history SELECT * FROM history_tmp;
INSERT IGNORE INTO history_str SELECT * FROM history_str_tmp;
INSERT IGNORE INTO history_text SELECT * FROM history_text_tmp;
INSERT IGNORE INTO history_log SELECT * FROM history_log_tmp;
Drop unnecessary tables
DROP TABLE history_uint_tmp;
DROP TABLE history_tmp;
DROP TABLE history_str_tmp;
DROP TABLE history_text_tmp;
DROP TABLE history_log_tmp;
DROP TABLE history_str_old;
DROP TABLE history_text_old;
DROP TABLE history_log_old;
DROP TABLE history_uint_old;
DROP TABLE history_old;

Download this section: https://aigarskadikis.github.io/src/part.sql
Fancy syntax highlighter? Read same page on GitHub: https://github.com/aigarskadikis/aigarskadikis.github.io/blob/main/src/part.sql

How many values is in the backlog. Does not work on oracle proxy becuase of LIMIT. Zabbix 4.0, 5.0, 6.0
SELECT MAX(id)-(SELECT nextid FROM ids WHERE table_name='proxy_history' LIMIT 1) FROM proxy_history;
Skip all cached LLD rules
DELETE FROM proxy_history WHERE itemid IN (SELECT itemid FROM items WHERE flags=1);
Bombards zabbix trapper and zabbix_sender
SELECT proxy_history.clock, items.hostid, items.key_, proxy_history.value
FROM proxy_history, items
WHERE items.itemid = proxy_history.itemid
AND items.type = 2
LIMIT 10;
Delete data which has been sent already
DELETE FROM proxy_history WHERE id < (SELECT nextid FROM ids WHERE table_name = "proxy_history" LIMIT 1);
Show LLD JSON data
SELECT items.hostid, items.hostid, items.key_, proxy_history.value FROM proxy_history, items WHERE proxy_history.itemid=items.itemid AND items.flags=1 ORDER BY clock ASC;
Which host are giving the biggest values
SELECT h.host,i.key_,max(length(ph.value)) as max_length,count(*)
FROM proxy_history ph
JOIN items i ON ph.itemid = i.itemid
JOIN hosts h ON (h.hostid = i.hostid)
WHERE clock >= UNIX_TIMESTAMP(NOW() - INTERVAL 2 HOUR)
GROUP BY h.host,i.key_ ORDER BY max_length DESC LIMIT 100;
Fastest way to show stats
SELECT SUM(LENGTH(value)),itemid FROM (
SELECT * FROM proxy_history LIMIT 10000
) t1
GROUP BY 2 ORDER BY 1 DESC LIMIT 9;
Stats with URL
SELECT SUM(LENGTH(value)),
CONCAT('history.php?itemids%5B0%5D=',itemid,'&action=showlatest') AS 'URL' FROM (
SELECT * FROM proxy_history WHERE flags<>1 LIMIT 10000
) t1
GROUP BY 2 ORDER BY 1 DESC LIMIT 9;
Biggest values
SELECT itemid,flags,
COUNT(*),
AVG(LENGTH(value))
FROM proxy_history
GROUP BY 1,2 ORDER BY 4 DESC LIMIT 20;
Most occurences
SELECT itemid,flags,
COUNT(*),
AVG(LENGTH(value))
FROM proxy_history
GROUP BY 1,2 ORDER BY 3 DESC LIMIT 20;
Biggest values and occurences
SELECT itemid,flags,COUNT(*) * AVG(LENGTH(value)) FROM proxy_history GROUP BY 1,2 ORDER BY 3 DESC LIMIT 20;
Optimal query to identify data overload. Zabbix 4.0, 5.0, 6.0
SELECT itemid,
COUNT(*),
AVG(LENGTH(value))
FROM proxy_history
WHERE proxy_history.clock > UNIX_TIMESTAMP(NOW()-INTERVAL 1 HOUR)
GROUP BY 1 ORDER BY 2,3 DESC;
Proxy with MySQL. Print URLs for latest data page for the incomming big data. Zabbix 4.0, 5.0, 6.0
SELECT
LENGTH(value),
CONCAT('history.php?itemids%5B0%5D=', proxy_history.itemid,'&action=showlatest') AS 'URL'
FROM proxy_history
JOIN items ON (items.itemid=proxy_history.itemid)
JOIN hosts ON (hosts.hostid=items.hostid)
WHERE LENGTH(value) > 60000;
Check big LLD rules and its frequency based on clock. Zabbix 4.0, 5.0, 6.0
SELECT
clock,
hosts.host,
items.key_,
LENGTH(value)
FROM proxy_history
JOIN items ON (items.itemid=proxy_history.itemid)
JOIN hosts ON (hosts.hostid=items.hostid)
WHERE items.flags=1
AND LENGTH(value) > 6000;
LLD rules. Zabbix 4.0, 5.0, 6.0
SELECT
items.key_,
COUNT(*),
AVG(LENGTH(value))
FROM proxy_history, items
WHERE proxy_history.itemid=items.itemid
AND items.flags=1
GROUP BY 1 ORDER BY 3,2;

Download this section: https://aigarskadikis.github.io/src/proxy.sql
Fancy syntax highlighter? Read same page on GitHub: https://github.com/aigarskadikis/aigarskadikis.github.io/blob/main/src/proxy.sql

Long running queries. Postgres. PostgreSQL
SELECT pid, user, pg_stat_activity.query_start,
NOW() - pg_stat_activity.query_start AS query_time, query, state, wait_event_type, wait_event
FROM pg_stat_activity
WHERE (NOW() - pg_stat_activity.query_start) > interval '3 seconds';
PostgreSQL, queries more than 100 seconds, process list
SELECT pid, user, pg_stat_activity.query_start,
NOW() - pg_stat_activity.query_start AS query_time, query, state, wait_event_type, wait_event
FROM pg_stat_activity
WHERE (NOW() - pg_stat_activity.query_start) > interval '100 seconds';
PostgreSQL, current state of currently running vacuum
SELECT * FROM pg_stat_progress_vacuum ;
PostgreSQL, queries more than 300 seconds, process list
SELECT pid, user, pg_stat_activity.query_start,
NOW() - pg_stat_activity.query_start AS query_time, query, state, wait_event_type, wait_event
FROM pg_stat_activity
WHERE (NOW() - pg_stat_activity.query_start) > interval '300 seconds';
PostgreSQL, vacuum, autovacuum situation
SELECT schemaname, relname, n_live_tup, n_dead_tup, last_autovacuum
FROM pg_stat_all_tables WHERE n_dead_tup > 0
ORDER BY n_dead_tup DESC;
PostgreSQL, size of hypertables
SELECT table_name, pg_size_pretty(pg_total_relation_size(quote_ident(table_name))), pg_total_relation_size(quote_ident(table_name))
FROM information_schema.tables
WHERE table_schema = 'public'
ORDER BY 3 DESC;
Reset owner
ALTER TABLE history OWNER TO zabbix;
ALTER TABLE history_uint OWNER TO zabbix;
ALTER TABLE history_str OWNER TO zabbix;
ALTER TABLE history_log OWNER TO zabbix;
ALTER TABLE history_text OWNER TO zabbix;
ALTER TABLE trends OWNER TO zabbix;
ALTER TABLE trends_uint OWNER TO zabbix;
ALTER TABLE history_bin OWNER TO zabbix;
Update eventlog entries
WITH deleted_rows AS (
DELETE FROM history_log
WHERE itemid IN (
SELECT history_log.itemid
FROM history_log
JOIN items ON history_log.itemid = items.itemid
JOIN hosts ON items.hostid = hosts.hostid
WHERE items.key_ LIKE 'eventlog%'
AND items.status = 0 AND items.flags IN (0,4) AND hosts.status = 0 AND hosts.flags IN (0,4)
)
RETURNING itemid, timestamp, source, severity, value, logeventid, ns
)
INSERT INTO history_log (itemid, clock, timestamp, source, severity, value, logeventid, ns)
SELECT itemid, timestamp, timestamp, source, severity, value, logeventid, ns
FROM deleted_rows;
Monitor process. Postgres. PostgreSQL
SELECT * FROM pg_stat_activity WHERE state != 'idle';
What is minor version of Zabbix
SELECT * FROM dbversion;
Remove old sessions in PostgreSQL. Zabbix 7.0
DELETE FROM sessions WHERE lastaccess < EXTRACT(EPOCH FROM NOW() - INTERVAL '24 hours');
Crear PostgreSQL queries
SELECT pg_cancel_backend(pid) FROM pg_stat_activity WHERE state = 'active' and pid <> pg_backend_pid();
Process list without any condition. PostgreSQL
SELECT datid,datname,pid,usesysid,usename,client_addr,client_port,backend_start,query_start,state_change,wait_event_type,wait_event,state,query_id FROM pg_stat_activity;
Size of hyper tables. Size of biggest tables
SELECT *, pg_size_pretty(total_bytes) AS total, pg_size_pretty(index_bytes) AS index, pg_size_pretty(toast_bytes) AS toast, pg_size_pretty(table_bytes) AS table
FROM (SELECT *, total_bytes-index_bytes-coalesce(toast_bytes, 0) AS table_bytes
FROM (SELECT c.oid, nspname AS table_schema, relname AS table_name, c.reltuples AS row_estimate, pg_total_relation_size(c.oid) AS total_bytes, pg_indexes_size(c.oid) AS index_bytes, pg_total_relation_size(reltoastrelid) AS toast_bytes
FROM pg_class c
LEFT JOIN pg_namespace n ON n.oid = c.relnamespace
WHERE relkind = 'r' ) a) a
ORDER BY 5 DESC LIMIT 500;
biggest tables
SELECT relname AS table_name, pg_size_pretty(pg_total_relation_size(relid)) AS total_size
FROM pg_catalog.pg_stat_user_tables
ORDER BY pg_total_relation_size(relid) DESC LIMIT 10;
Relation between hyper table and table
select * from _timescaledb_catalog.hypertable;
Version of extension
SELECT * FROM pg_extension;

Download this section: https://aigarskadikis.github.io/src/psql.sql
Fancy syntax highlighter? Read same page on GitHub: https://github.com/aigarskadikis/aigarskadikis.github.io/blob/main/src/psql.sql

How many user groups has debug mode 1. Zabbix 5.0, 5.2
SELECT COUNT(*) FROM usrgrp WHERE debug_mode=1;
Active problems including internal. Zabbix 4.0, 5.0, 6.0, 6.2
SELECT COUNT(*), source, object, severity FROM problem GROUP BY 2,3,4 ORDER BY severity;
Unreachable hosts. PostgreSQL, Zabbix 6.0
SELECT
proxy.host AS proxy,
hosts.host,
interface.error,
STRING_AGG(
DISTINCT CASE interface.useip
WHEN 0 THEN interface.dns
WHEN 1 THEN interface.ip
END, ', '
) AS passiveConnect
FROM interface
JOIN hosts ON (hosts.hostid=interface.hostid)
LEFT JOIN hosts proxy ON (hosts.proxy_hostid=proxy.hostid)
WHERE interface.available=2
GROUP BY 1,2,3
ORDER BY 1;
Failed triggers. Zabbix 7.0
SELECT t2.objectid, t2.count, triggers.description, hosts.host, proxy.name FROM (
SELECT objectid, COUNT(*) as count FROM (
SELECT * FROM events WHERE source=3 AND object=0 LIMIT 99999
) t1 GROUP BY 1 ORDER BY 2 DESC LIMIT 20
) t2, functions, triggers, items, hosts, proxy
WHERE functions.triggerid = t2.objectid
AND triggers.triggerid = functions.triggerid
AND functions.itemid = items.itemid
AND hosts.hostid = items.hostid
AND proxy.proxyid = hosts.proxyid;
Linked templates to host ojbects. Zabbix 7.0
SELECT
hosts.host,
ARRAY_TO_STRING(ARRAY_AGG(template.host),', ') AS templates
FROM hosts
JOIN hosts_templates ON (hosts_templates.hostid=hosts.hostid)
LEFT JOIN hosts template ON (hosts_templates.templateid=template.hostid)
WHERE hosts.status IN (0,1)
AND hosts.flags=0
GROUP BY 1 ORDER BY 1;
Unreachable hosts. MySQL, Zabbix 6.0
SELECT
proxy.host AS proxy,
hosts.host,
interface.error,
GROUP_CONCAT(
DISTINCT CASE interface.useip
WHEN 0 THEN interface.dns
WHEN 1 THEN interface.ip
END SEPARATOR ', '
) AS passiveConnect
FROM interface
JOIN hosts ON (hosts.hostid=interface.hostid)
LEFT JOIN hosts proxy ON (hosts.proxy_hostid=proxy.hostid)
WHERE interface.available=2
GROUP BY 1,2,3
ORDER BY 1;
Items which use trapper port. Zabbix 6.0
SELECT proxy.host, hosts.host, CASE items.type
WHEN 2 THEN 'Zabbix trapper'
WHEN 7 THEN 'Zabbix agent (active) check'
END AS type, COUNT(*) FROM items
JOIN hosts ON items.hostid = hosts.hostid
LEFT JOIN hosts proxy ON proxy.hostid = hosts.proxy_hostid
WHERE items.type IN (2,7) AND hosts.status=0 AND items.status=0
GROUP BY 1,2,3
ORDER BY 1,2,3;
Sessions in last day. Zabbix 6.0
SELECT users.username,COUNT(*) FROM sessions, users WHERE
sessions.userid = users.userid
AND sessions.lastaccess > sessions.lastaccess-(3600*24)
GROUP BY 1;
How many autoregistration hits per device in 1h/1d? How many records are in total per day? PostgreSQL Zabbix 7.0
SELECT COUNT(*), autoreg_host.host FROM autoreg_host, events WHERE events.objectid=autoreg_host.autoreg_hostid AND events.clock > EXTRACT(EPOCH FROM NOW() - INTERVAL '1 hours') GROUP BY 2 ORDER BY 1 DESC LIMIT 20;
SELECT COUNT(*), autoreg_host.host FROM autoreg_host, events WHERE events.objectid=autoreg_host.autoreg_hostid AND events.clock > EXTRACT(EPOCH FROM NOW() - INTERVAL '24 hours') GROUP BY 2 ORDER BY 1 DESC LIMIT 20;
SELECT COUNT(*) FROM autoreg_host, events WHERE events.objectid=autoreg_host.autoreg_hostid AND events.clock > EXTRACT(EPOCH FROM NOW() - INTERVAL '24 hours');
How many autoregistration hits per device in 1h/1d? How many records are in total per day? MySQL Zabbix 7.0
SELECT COUNT(*), autoreg_host.host FROM autoreg_host, events WHERE events.objectid=autoreg_host.autoreg_hostid AND events.clock > UNIX_TIMESTAMP(NOW()-INTERVAL 1 HOUR) GROUP BY 2 ORDER BY 1 DESC LIMIT 20;
SELECT COUNT(*), autoreg_host.host FROM autoreg_host, events WHERE events.objectid=autoreg_host.autoreg_hostid AND events.clock > UNIX_TIMESTAMP(NOW()-INTERVAL 24 HOUR) GROUP BY 2 ORDER BY 1 DESC LIMIT 20;
SELECT COUNT(*) FROM autoreg_host, events WHERE events.objectid=autoreg_host.autoreg_hostid AND events.clock > UNIX_TIMESTAMP(NOW()-INTERVAL 24 HOUR);
Most profesional way how to erase records from MySQL/MariaDB
CREATE TEMPORARY TABLE tmp_eventids
SELECT eventid
FROM problem p
WHERE NOT EXISTS (SELECT NULL FROM events e WHERE e.eventid = p.eventid);
DELETE FROM problem
WHERE eventid IN (SELECT eventid FROM tmp_eventids);
DROP TEMPORARY TABLE tmp_eventids;
Tell the data table
SELECT items.name,
CASE items.value_type
WHEN 0 THEN 'history'
WHEN 1 THEN 'history_str'
WHEN 2 THEN 'history_log'
WHEN 3 THEN 'history_uint'
WHEN 4 THEN 'history_text'
WHEN 5 THEN 'history_bin'
END AS tableName, items.itemid
FROM hosts, items
WHERE hosts.hostid=items.hostid
AND hosts.host='Zabbix server';
Find different units to target and in which table the data is stored use
SELECT DISTINCT units, CASE value_type
WHEN 0 THEN 'history'
WHEN 1 THEN 'history_str'
WHEN 2 THEN 'history_log'
WHEN 3 THEN 'history_uint'
WHEN 4 THEN 'history_text'
WHEN 5 THEN 'history_bin'
END AS tableName,
COUNT(*)
FROM items, hosts
WHERE hosts.hostid=items.hostid
AND hosts.status=0 AND hosts.flags IN (0,4) AND items.status=0 AND items.flags IN (0,4)
GROUP BY 1,2
ORDER BY 3;
Extract all items which are measured in bytes
SELECT hosts.host AS host, items.name AS name, trends_uint.clock, trends_uint.value_min, trends_uint.value_avg, trends_uint.value_max
FROM trends_uint
JOIN items ON (items.itemid=trends_uint.itemid)
JOIN hosts ON (hosts.hostid=items.hostid)
AND hosts.hostid IN (
SELECT hostid FROM hosts_groups WHERE groupid IN (
SELECT groupid FROM hstgrp WHERE name='Linux servers'
)
)
WHERE items.units='B'
LIMIT 100;
Data which are measured in '%'
SELECT hosts.host AS host, items.name AS name, trends.clock, trends.value_min, trends.value_avg, trends.value_max
FROM trends
JOIN items ON (items.itemid=trends.itemid)
JOIN hosts ON (hosts.hostid=items.hostid)
AND hosts.hostid IN (
SELECT hostid FROM hosts_groups WHERE groupid IN (
SELECT groupid FROM hstgrp WHERE name='Linux servers'
)
)
WHERE items.units='%'
LIMIT 100;
Maximum itemid in database
SELECT nextid FROM ids WHERE table_name='items';
Eventlog item categories
SELECT COUNT(*), items.itemid, source, severity, logeventid, key_ FROM history_log, items
WHERE items.itemid=history_log.itemid
AND history_log.itemid IN (
SELECT itemid FROM items WHERE key_ like 'eventlog%' AND status=0 AND flags IN (0,4) AND hostid IN (
SELECT hostid FROM hosts WHERE status=0 AND flags IN (0,4)
)
)
GROUP BY 2,3,4,5
ORDER BY 1 DESC
LIMIT 20;
Eventlog items per host
SELECT COUNT(*), items.itemid, history_log.source, history_log.severity, history_log.logeventid, items.key_, hosts.host
FROM history_log, items, hosts
WHERE items.itemid=history_log.itemid AND hosts.hostid=items.hostid
AND items.key_ like 'eventlog%'
AND items.status=0 AND items.flags IN (0,4)
AND hosts.status=0 AND hosts.flags IN (0,4)
GROUP BY 2,3,4,5,6,7
ORDER BY 1 DESC
LIMIT 20;
How items look in database
SELECT hosts.host, CASE items.value_type
WHEN 0 THEN 'history,trends'
WHEN 1 THEN 'history_str'
WHEN 2 THEN 'history_log'
WHEN 3 THEN 'history_uint,trends_uint'
WHEN 4 THEN 'history_text'
WHEN 5 THEN 'history_bin'
END AS tableName, items.itemid, items.key_ FROM items, hosts
WHERE hosts.hostid=items.hostid
AND hosts.status=0 and items.status=0 AND hosts.flags IN (0,4) AND hosts.flags IN (0,4)
ORDER BY 1,2;
Which userid is disabling triggers. Zabbix 7.0
SELECT DISTINCT auditlog.recordsetid, hosts.host, auditlog.clock, auditlog.userid, triggers.triggerid, auditlog.details
FROM auditlog, triggers, functions, hosts, items
WHERE auditlog.resourcetype = 13
AND auditlog.action = 1
AND triggers.triggerid = auditlog.resourceid
AND functions.triggerid = triggers.triggerid
AND items.itemid = functions.itemid
AND hosts.hostid = items.hostid
ORDER BY auditlog.clock DESC LIMIT 2;
Zabbix agent autoregistration hits on central server. Zabbix 7.0
SELECT events.clock, events.objectid, autoreg_host.autoreg_hostid, autoreg_host.proxyid, autoreg_host.host
FROM events, autoreg_host
WHERE events.objectid=autoreg_host.autoreg_hostid
AND events.source=2 AND events.object=3
ORDER BY 1 DESC;
Zabbix PDF report statistics. Zabbix 7.0
SELECT r.reportid,r.userid,r.name,r.dashboardid,r.period,r.cycle,r.weekdays,r.start_time, r.active_since,r.active_till,u.timezone,r.state,r.info,r.lastsent,r.status
FROM report r,users u
WHERE r.userid=u.userid;
Still open problems from year 2024. Zabbix 5.0
SELECT p.eventid,p.objectid,p.clock,p.ns,p.name,p.severity
FROM problem p WHERE p.source='0' AND p.object='0' AND NOT EXISTS (
SELECT NULL FROM event_suppress es WHERE es.eventid=p.eventid
) AND p.r_eventid IS NULL
AND clock BETWEEN UNIX_TIMESTAMP("2024-01-01 00:00:00") AND UNIX_TIMESTAMP("2025-01-01 00:00:00")
ORDER BY p.eventid DESC;
Detect when Keep lost resources period is not installed in danger. Zabbix 5.2
SELECT hosts.host,items.name FROM items, hosts
WHERE hosts.hostid=items.hostid
AND items.status=0
AND hosts.status=0
AND items.flags=1
AND items.lifetime IN ('0','0d','0h','0m','0s');
A host with most of the dependent items
SELECT hosts.host, COUNT(*) AS amountOfDependentItems
FROM items,hosts
WHERE hosts.hostid=items.hostid
AND hosts.status=0
AND items.status=0
AND items.flags IN (0,4)
AND items.type=18
GROUP BY 1
ORDER BY 2 DESC
LIMIT 20;
Migrate template/host level macros from one host to another. Zabbix 6.4
UPDATE hostmacro SET hostid=34094 WHERE hostid=34098;
Lld rules
SELECT COUNT(*), origin.host AS Template, another.name AS LLD,
CONCAT('host_discovery.php?form=update&itemid=',items.templateid,'&context=template') AS URL
FROM items
JOIN hosts ON hosts.hostid=items.hostid
JOIN items another ON another.itemid=items.templateid
JOIN hosts origin ON origin.hostid=another.hostid
WHERE hosts.status=0 AND items.status=0
AND hosts.flags IN (0,4)
AND items.flags=1
GROUP BY 2,3,4
ORDER BY 1 ASC;
Aggressive hosts
SELECT COUNT(*), items.key_, items.delay, GROUP_CONCAT(hosts.host) FROM items, hosts
WHERE hosts.hostid=items.hostid
AND hosts.status=0 AND hosts.flags IN (0,4)
AND items.status=0 AND items.flags=1
AND items.delay not like '%d'
AND items.delay not like '%h'
GROUP BY 2,3
ORDER BY 1;
Triggerid generates problems in time order
SELECT clock, ns, eventid, name,
CASE value
WHEN 0 THEN 'resolved'
WHEN 1 THEN 'starts'
END AS "problem"
FROM events
WHERE objectid=27375 AND clock BETWEEN 0 AND 1734517730
ORDER BY clock DESC, ns DESC;
Triggerid generates problems in time order, sort by eventid
SELECT clock, ns, eventid, name,
CASE value
WHEN 0 THEN 'resolved'
WHEN 1 THEN 'starts'
END AS "problem"
FROM events
WHERE objectid=27375 AND clock BETWEEN 0 AND 1734517730
ORDER BY eventid DESC;
Active and inactive sessions. Zabbix 6.0
SELECT users.username, CASE sessions.status
WHEN 0 THEN 'active'
WHEN 1 THEN 'not active'
END AS "status", COUNT(*) FROM sessions, users WHERE sessions.userid=users.userid GROUP BY 1,2 ORDER BY 3 DESC;
Notification stats. Zabbix 6.0
SELECT actions.name AS actionName, users.username AS sendTo, media_type.name AS mediaName,
CASE alerts.status
WHEN 0 THEN 'NOT_SENT'
WHEN 1 THEN 'SENT'
WHEN 2 THEN 'FAILED'
WHEN 3 THEN 'NEW'
END AS "alertStatus",
COUNT(*) AS count
FROM alerts
JOIN actions ON (actions.actionid=alerts.actionid)
JOIN media_type ON (media_type.mediatypeid=alerts.mediatypeid)
LEFT JOIN users ON (users.userid=alerts.userid)
WHERE clock > UNIX_TIMESTAMP(NOW()-INTERVAL 7 DAY)
GROUP BY 1,2,3,4
ORDER BY 5 ASC;
Which item consumes the space the most
SELECT SUM(LENGTH(value)),hosts.host,items.key_ FROM (
SELECT * FROM history_text LIMIT 1000
) t1
JOIN items ON t1.itemid=items.itemid
JOIN hosts ON hosts.hostid=items.hostid
GROUP BY 2,3 ORDER BY 1 DESC LIMIT 9;
Print URL to visit which host consumes a lot of data. MySQL. Zabbix 5.0, 6.0, 7.0
SELECT SUM(LENGTH(value)) AS Size,
CONCAT('history.php?itemids%5B0%5D=',t1.itemid,'&action=showlatest') AS 'URL' FROM (
SELECT * FROM history_text LIMIT 1000
) t1
GROUP BY 2 ORDER BY 1 DESC LIMIT 9;
Open problems by origin
SELECT COUNT(*), source, object FROM problem GROUP BY 2,3 ORDER BY 1 DESC;
Open problems by objectid
SELECT COUNT(*), source, object, objectid FROM problem GROUP BY 2,3,4 ORDER BY 1 DESC LIMIT 20;
Online users. Zabbix 6.0
SELECT users.name, lastaccess, sessionid
FROM sessions,users
WHERE users.userid=sessions.userid
ORDER BY lastaccess DESC
LIMIT 20;
PostgreSQL copy data from one table to another. Zabbix 7.0
INSERT INTO trends SELECT * FROM trends_old ON CONFLICT (clock, itemid) DO NOTHING;
INSERT INTO trends_uint SELECT * FROM trends_uint_old ON CONFLICT (clock, itemid) DO NOTHING;
Show refresh rate. Zabbix 5.0
SELECT users.alias, profiles.value_int AS refreshIntensity, dashboard.name AS dashboardName, widget.name AS widgetName
FROM profiles,users,widget,dashboard
WHERE users.userid=profiles.userid
AND profiles.idx2=widget.widgetid
AND dashboard.dashboardid=widget.dashboardid
AND idx='web.dashbrd.widget.rf_rate';
Reset refresh rate
UPDATE profiles SET value_int=900 WHERE idx='web.dashbrd.widget.rf_rate';
Duplicate IP addresses. Hosts with same IP addres. Zabbix 7.0
SELECT interface.ip,GROUP_CONCAT(hosts.host),COUNT(*) FROM interface, hosts
WHERE hosts.hostid=interface.hostid
AND hosts.status=0
AND hosts.flags=0
GROUP BY 1
HAVING COUNT(*) > 1;
Most of items at host level
SELECT hosts.host, COUNT(*) FROM items
JOIN hosts ON (hosts.hostid=items.hostid)
WHERE hosts.status IN (0,1) AND hosts.flags IN (0,4)
GROUP BY 1
ORDER BY 2 ASC;
Error messages by category. Zabbix 7.0
SELECT DISTINCT item_rtdata.error, GROUP_CONCAT(hosts.host) AS hosts, COUNT(*) AS count FROM item_rtdata,items,hosts
WHERE item_rtdata.itemid=items.itemid
AND hosts.hostid=items.hostid
AND hosts.status=0
AND item_rtdata.error NOT IN ('')
GROUP BY 1
ORDER BY 3 ASC;
Query host and interface details. Zabbix 5.0
SELECT proxy.host AS proxy, hosts.host,
hosts.hostid,
interface.interfaceid,
interface.main,
interface.type,
interface.useip,
interface.ip,
interface.dns,
interface.port,
interface_snmp.version,
interface_snmp.bulk,
interface_snmp.community,
interface_snmp.securityname,
interface_snmp.securitylevel,
interface_snmp.authpassphrase,
interface_snmp.privpassphrase,
interface_snmp.authprotocol,
interface_snmp.privprotocol,
interface_snmp.contextname
FROM hosts
LEFT JOIN interface ON (interface.hostid=hosts.hostid)
LEFT JOIN interface_snmp ON (interface.interfaceid=interface_snmp.interfaceid)
LEFT JOIN hosts proxy ON (hosts.proxy_hostid=proxy.hostid)
WHERE hosts.status IN (0,1) AND hosts.flags=0;
Show events which got suppressed. Query does not shwo the timestamp of start suppress
SELECT FROM_UNIXTIME(events.clock) AS problemTime, FROM_UNIXTIME(event_suppress.suppress_until) AS suppressUntil, events.name
FROM events, event_suppress, triggers
WHERE events.eventid=event_suppress.eventid
AND events.source=0 AND events.object=0
AND triggers.triggerid=events.objectid
AND triggers.priority IN (0,1,2,3,4,5)
AND events.clock >= UNIX_TIMESTAMP("2024-01-01 00:00:00") AND events.clock < UNIX_TIMESTAMP("2025-02-01 00:00:00");
Unreachable ZBX host. Zabbix 4.0, 4.2, 4.4, 5.0, 5.2
SELECT
proxy.host AS proxy,
hosts.host,
hosts.error AS hostError,
CONCAT('hosts.php?form=update&hostid=', hosts.hostid) AS goTo
FROM hosts
LEFT JOIN hosts proxy ON (hosts.proxy_hostid=proxy.hostid)
WHERE hosts.status=0
AND LENGTH(hosts.error) > 0;
Unreachable ZBX hosts. Zabbix 6.4, 7.0
SELECT proxy.name,
hosts.host,
interface.error,
CONCAT('zabbix.php?action=host.edit&hostid=',hosts.hostid) AS goTo
FROM hosts
LEFT JOIN hosts proxy ON (hosts.proxyid=proxy.proxyid)
JOIN interface ON (interface.hostid=hosts.hostid)
WHERE LENGTH(interface.error) > 0 AND interface.type=1;
Blindly select metrics from a partition without knowing the total amount. Aggregate statistics
SELECT itemid, COUNT(*) FROM (
SELECT itemid FROM history_uint PARTITION (p202310290000) LIMIT 9999
) b GROUP BY 1 ORDER BY 2 DESC;
Which template has JSONPath preprocessing. Zabbix 6.0
SELECT hosts.host, items.name, item_preproc
FROM hosts, items, item_preproc
WHERE hosts.hostid=items.hostid AND items.itemid=item_preproc.itemid
AND item_preproc.type=12
AND item_preproc.params LIKE '%memory.sum%';
Default update frequency for dashboard and dasboard page. If display_period=0 for page, then dashboard refresh is used. Zabbix 6.4
SELECT DISTINCT dashboard.name AS dashBoardName,
dashboard_page.dashboardid,
dashboard.display_period AS dasboardRefresh,
dashboard_page.name AS pageName,
dashboard_page.dashboard_pageid,
dashboard_page.display_period AS pageRefresh
FROM dashboard_page, dashboard
WHERE dashboard_page.dashboardid=dashboard.dashboardid
AND dashboard.templateid IS NULL;
Set all global dashboards to use 5 minute update frequency. Zabbix 6.4
UPDATE dashboard SET display_period=300 WHERE display_period<>300 AND templateid IS NULL;
Set all global dashboard pages to respect the default dashboard update frequency. Zabbix 6.4
UPDATE dashboard_page SET display_period=0 WHERE display_period<>0 AND dashboardid IN (SELECT dashboardid FROM dashboard WHERE templateid IS NULL);
Set maximum refresh rate (15 minutes) for widget where user installed a custom refresh rate. Zabbix 6.4
UPDATE profiles SET value_int=900 WHERE idx='web.dashboard.widget.rf_rate';
Remove any override user made to individual widget. Zabbix 6.4
DELETE FROM profiles WHERE idx='web.dashboard.widget.rf_rate';
Delete orhaned events. Use limit in PostgreSQL
DELETE FROM events WHERE eventid IN (
SELECT eventid FROM events WHERE object=0 AND source=0 AND objectid NOT IN (
SELECT triggerid FROM triggers
) LIMIT 10
);
Delete orhnaned events in batches. Works on PosthreSQL, do not work on MySQL
DELETE FROM events WHERE eventid IN (
SELECT eventid FROM events WHERE object = 0 AND source = 0 AND NOT EXISTS (SELECT 1 FROM triggers t WHERE t.triggerid = events.objectid) LIMIT 1000
);
Biggest data per itemid per partition
SELECT SUM(LENGTH(value)) AS total,itemid FROM history_log PARTITION (p202407130000) GROUP BY 2 ORDER BY 1 DESC LIMIT 10;
Faster way to pick up some data for aggregation
SELECT SUM(LENGTH(value)),itemid FROM (
SELECT * FROM history_text PARTITION (p202407130000) LIMIT 10000
) t1
GROUP BY 2 ORDER BY 1 DESC LIMIT 9;
Permissions checks. Zabbix 5.0
SELECT DISTINCT users.alias, FROM_UNIXTIME(sessions.lastaccess)
FROM users, users_groups, sessions, usrgrp, rights, hstgrp
WHERE sessions.status=0
AND users.userid=users_groups.userid
AND users.userid=sessions.userid
AND users_groups.usrgrpid=usrgrp.usrgrpid
AND users_groups.userid=users.userid
AND usrgrp.usrgrpid=rights.groupid
AND rights.id=hstgrp.groupid
AND rights.groupid IN (28)
AND users.type < 3
ORDER BY 2 DESC LIMIT 20;
Consume most space in history_text table. PostgreSQL. Zabbix 7.0
SELECT SUM(LENGTH(history_text.value)) AS length, history_text.itemid
FROM history_text
GROUP BY 2
ORDER BY 1
DESC LIMIT 20;
Consume most space in history_str table. PostgreSQL. Zabbix 7.0
SELECT SUM(LENGTH(history_str.value)) AS length, history_str.itemid
FROM history_str
GROUP BY 2
ORDER BY 1
DESC LIMIT 20;
Mixed SNMPv3 credentials. Zabbix 4.4
SELECT snmpv3_securityname AS user,
CASE snmpv3_securitylevel
WHEN 0 THEN 'noAuthNoPriv'
WHEN 1 THEN 'authNoPriv'
WHEN 2 THEN 'authPriv'
END AS secLev,
CASE snmpv3_authprotocol
WHEN 0 THEN 'MD5'
WHEN 1 THEN 'SHA'
END AS authProto,
snmpv3_authpassphrase AS authPhrase,
CASE snmpv3_privprotocol
WHEN 0 THEN 'DES'
WHEN 1 THEN 'AES'
END AS privProto,
snmpv3_privpassphrase AS privPhrase,
CASE flags
WHEN 0 THEN 'normal'
WHEN 1 THEN 'rule'
WHEN 2 THEN 'prototype'
WHEN 4 THEN 'discovered'
END AS flags,
COUNT(*)
FROM items
WHERE type=6
GROUP BY 1,2,3,4,5,6,7;
Zabbix agent interface with errors. Zabbix 7.0
SELECT proxy.name, hosts.host, interface.dns, interface.ip, interface.useip, interface.error FROM hosts
LEFT JOIN proxy ON (hosts.proxyid=proxy.proxyid)
JOIN interface ON (interface.hostid=hosts.hostid)
WHERE LENGTH(interface.error) > 0 AND interface.type=1;
SNMP interface with errors. Zabbix 7.0
SELECT proxy.name, hosts.host, interface.dns, interface.ip, interface.useip, interface.error FROM hosts
LEFT JOIN proxy ON (hosts.proxyid=proxy.proxyid)
JOIN interface ON (interface.hostid=hosts.hostid)
WHERE LENGTH(interface.error) > 0 AND interface.type=2;
Amount of unsupported items on host. Zabbix 7.0
SELECT
hosts.host,
COUNT(*)
FROM items, item_rtdata, hosts
WHERE item_rtdata.state=1
AND hosts.status=0
AND items.status=0
AND item_rtdata.itemid=items.itemid
AND hosts.hostid=items.hostid
GROUP BY 1
ORDER BY 2 ASC;
Hosts having this particula error message at item level. Zabbix 7.0
SELECT DISTINCT hosts.host, proxy.name
FROM items, item_rtdata, hosts
LEFT JOIN proxy ON hosts.proxyid=proxy.proxyid
WHERE item_rtdata.state=1
AND hosts.status=0
AND items.status=0
AND item_rtdata.itemid=items.itemid
AND hosts.hostid=items.hostid
AND item_rtdata.error like '%Unknown user name%';
Store textual garbage. Zabbix 7.0
SELECT SUM(LENGTH(value)) AS total,
CONCAT('history.php?itemids%5B0%5D=',itemid,'&action=showlatest')
FROM history_text PARTITION (p2024_08_19) GROUP BY 2 ORDER BY 1 DESC LIMIT 10;
Store textual garbage without. Scan full table. Zabbix 7.0
SELECT SUM(LENGTH(value)) AS total,
CONCAT('history.php?itemids%5B0%5D=',itemid,'&action=showlatest')
FROM history_text GROUP BY 2 ORDER BY 1 DESC LIMIT 10;
Active checks are not reporting back. Zabbix 7.0
SELECT proxy.name,hosts.host FROM hosts
JOIN host_rtdata ON hosts.hostid=host_rtdata.hostid
LEFT JOIN proxy ON hosts.proxyid=proxy.proxyid
WHERE host_rtdata.active_available=2;
Failed actions. Zabbix 5.0, 6.0, 7.0
SELECT FROM_UNIXTIME(alerts.clock) AS clock,
CONCAT('tr_events.php?triggerid=',events.objectid,'&eventid=',alerts.eventid) AS URL,
alerts.error,
actions.name
FROM alerts
JOIN events ON alerts.eventid=events.eventid
JOIN actions ON actions.actionid=alerts.actionid
WHERE alerts.status=2 ORDER BY 1 ASC
What is host, item name for the item id. Usefull to detect if storing data with wrong timestamp. Zabbix 6.0
SELECT proxy.host AS proxy,
hosts.host,
CASE items.type
WHEN 0 THEN 'Zabbix agent'
WHEN 1 THEN 'SNMPv1 agent'
WHEN 2 THEN 'Zabbix trapper'
WHEN 3 THEN 'Simple check'
WHEN 4 THEN 'SNMPv2 agent'
WHEN 5 THEN 'Zabbix internal'
WHEN 6 THEN 'SNMPv3 agent'
WHEN 7 THEN 'Zabbix agent (active) check'
WHEN 8 THEN 'Aggregate'
WHEN 9 THEN 'HTTP test (web monitoring scenario step)'
WHEN 10 THEN 'External check'
WHEN 11 THEN 'Database monitor'
WHEN 12 THEN 'IPMI agent'
WHEN 13 THEN 'SSH agent'
WHEN 14 THEN 'TELNET agent'
WHEN 15 THEN 'Calculated'
WHEN 16 THEN 'JMX agent'
WHEN 17 THEN 'SNMP trap'
WHEN 18 THEN 'Dependent item'
WHEN 19 THEN 'HTTP agent'
WHEN 20 THEN 'SNMP agent'
WHEN 21 THEN 'Script item'
END AS type,
items.name
FROM hosts
JOIN items ON items.hostid=hosts.hostid
LEFT JOIN hosts proxy ON hosts.proxy_hostid=proxy.hostid
WHERE hosts.status=0
AND items.status=0
AND items.itemid IN (123,456);
Orphaned events, posthres, Zabbix 6.0
DELETE FROM event_recovery WHERE eventid IN (SELECT eventid FROM event_recovery WHERE eventid NOT IN (SELECT eventid FROM events) LIMIT 100);
DELETE FROM event_recovery WHERE r_eventid IN (SELECT r_eventid FROM event_recovery WHERE r_eventid NOT IN (SELECT eventid FROM events) LIMIT 100);
DELETE FROM event_recovery WHERE c_eventid IN (SELECT c_eventid FROM event_recovery WHERE c_eventid NOT IN (SELECT eventid FROM events) LIMIT 100);
DELETE FROM event_suppress WHERE eventid IN (SELECT eventid FROM event_suppress WHERE eventid NOT IN (SELECT eventid FROM events) LIMIT 100);
Base memory GB
SELECT ( @@key_buffer_size
+ @@innodb_buffer_pool_size
+ @@innodb_log_buffer_size )
/ (1024 * 1024 * 1024) AS BASE_MEMORY_GB;
Max memory GB
SELECT ( @@key_buffer_size
+ @@innodb_buffer_pool_size
+ @@innodb_log_buffer_size
+ @@max_connections * (
@@read_buffer_size
+ @@read_rnd_buffer_size
+ @@sort_buffer_size
+ @@join_buffer_size
+ @@binlog_cache_size
+ @@thread_stack
+ @@max_heap_table_size
)
) / (1024 * 1024 * 1024) AS MAX_MEMORY_GB;
Unreachable SNMP hosts. Zabbix 4.0, 4.2, 4.4, 5.0, 5.2
SELECT
proxy.host AS proxy,
hosts.host,
hosts.snmp_error AS hostError,
CONCAT('hosts.php?form=update&hostid=', hosts.hostid) AS goTo
FROM hosts
LEFT JOIN hosts proxy ON (hosts.proxy_hostid=proxy.hostid)
WHERE hosts.status=0
AND LENGTH(hosts.snmp_error) > 0;
Dublicate devices phase1. Find duplicate interfaces based on device serial number stored in the "Serial A" field
SELECT host_inventory.serialno_a AS serial, GROUP_CONCAT(interface.interfaceid) AS iID
FROM interface, host_inventory, hosts
WHERE host_inventory.hostid=interface.hostid
AND hosts.hostid=host_inventory.hostid
AND hosts.status=0
AND interface.main=0
AND host_inventory.serialno_a='123456'
GROUP BY host_inventory.serialno_a;
Same host name
SELECT host_inventory.name AS name, GROUP_CONCAT(interface.interfaceid) AS IP, GROUP_CONCAT(hosts.hostid) AS hID
FROM interface, host_inventory, hosts
WHERE host_inventory.hostid=interface.hostid
AND hosts.hostid=host_inventory.hostid
AND hosts.status=0
AND interface.main=0
AND host_inventory.name='idrac'
GROUP BY 1;
Dublicate devices phase2. Add the interface to the other device
UPDATE interface SET hostid=10561 WHERE interfaceid IN (28,32,437);
Dublicate devices phase3. Summary
SELECT host_inventory.serialno_a AS serial, GROUP_CONCAT(interface.ip) AS IP, GROUP_CONCAT(hosts.hostid) AS hID
FROM interface, host_inventory, hosts
WHERE host_inventory.hostid=interface.hostid
AND hosts.hostid=host_inventory.hostid
AND hosts.status=0
AND LENGTH(host_inventory.serialno_a) > 0
GROUP BY host_inventory.serialno_a
HAVING COUNT(*) > 1;
Order how hosts got discovered. Must have global setting on to keep older records. Zabbix 6.0
SELECT FROM_UNIXTIME(clock), name,
CASE value
WHEN 0 THEN 'UP'
WHEN 1 THEN 'DOWN'
WHEN 2 THEN 'discovered'
WHEN 3 THEN 'lost'
END AS "status"
FROM events
WHERE source=1
AND value=2
ORDER BY 1 ASC;
Go directly per unsupported items per host object. Zabbix 6.0
SELECT COUNT(*),proxy.host, hosts.host, CONCAT('items.php?context=host&filter_hostids%5B%5D=',hosts.hostid,'&filter_name=&filter_key=&filter_type=-1&filter_value_type=-1&filter_snmp_oid=&filter_history=&filter_trends=&filter_delay=&filter_evaltype=0&filter_tags%5B0%5D%5Btag%5D=&filter_tags%5B0%5D%5Boperator%5D=0&filter_tags%5B0%5D%5Bvalue%5D=&filter_state=1&filter_with_triggers=-1&filter_inherited=-1&filter_discovered=-1&filter_set=1') AS hostid
FROM items
JOIN item_rtdata ON (item_rtdata.itemid=items.itemid)
JOIN hosts ON (hosts.hostid=items.hostid)
LEFT JOIN hosts proxy ON (hosts.proxy_hostid=proxy.hostid)
WHERE hosts.status=0
AND item_rtdata.state=1
GROUP BY 2,3,4
ORDER BY 1 DESC
LIMIT 10;
All items which belongs to application 'DR'. Zabbix 5.0
SELECT hosts.host, items.key_
FROM items, hosts, items_applications, applications
WHERE items_applications.itemid=items.itemid
AND applications.applicationid=items_applications.applicationid
AND hosts.hostid=items.hostid
AND hosts.status=0
AND items.status=0
AND items.flags IN (0,4)
AND applications.name='DR';
Clean up trends for items which now does not want to store trends or item is disabled. Zabbix 5.0
SET SESSION SQL_LOG_BIN=0;
DELETE FROM trends WHERE itemid IN (SELECT itemid FROM items WHERE value_type=0 AND trends='0' AND flags IN (0,4));
DELETE FROM trends WHERE itemid IN (SELECT itemid FROM items WHERE value_type=0 AND status=1 AND flags IN (0,4));
DELETE FROM trends_uint WHERE itemid IN (SELECT itemid FROM items WHERE value_type=3 AND trends='0' AND flags IN (0,4));
DELETE FROM trends_uint WHERE itemid IN (SELECT itemid FROM items WHERE value_type=3 AND status=1 AND flags IN (0,4));
DELETE FROM history_text WHERE itemid IN (SELECT itemid FROM items WHERE value_type=4 AND history='0' AND flags IN (0,4));
DELETE FROM history_text WHERE itemid IN (SELECT itemid FROM items WHERE value_type=4 AND status=1 AND flags IN (0,4));
DELETE FROM history_str WHERE itemid IN (SELECT itemid FROM items WHERE value_type=1 AND history='0' AND flags IN (0,4));
DELETE FROM history_str WHERE itemid IN (SELECT itemid FROM items WHERE value_type=1 AND status=1 AND flags IN (0,4));
DELETE FROM history_log WHERE itemid IN (SELECT itemid FROM items WHERE value_type=2 AND history='0' AND flags IN (0,4));
DELETE FROM history_log WHERE itemid IN (SELECT itemid FROM items WHERE value_type=2 AND status=1 AND flags IN (0,4));
List LLD rules which has an "input" from "Zabbix trapper" item. This is list on what will be about to be disabled.
SELECT hosts.host AS host, master_itemid.key_ AS master, items.key_ AS LLD
FROM items
JOIN hosts ON (hosts.hostid=items.hostid)
JOIN items master_itemid ON (master_itemid.itemid=items.master_itemid)
WHERE items.flags=1
AND hosts.status=0
AND master_itemid.type=2;
Note down exact itemIDs for LLD items which is attached to "Zabbix trapper" item:
SET SESSION group_concat_max_len = 1000000; SELECT GROUP_CONCAT(items.itemid)
FROM items
JOIN hosts ON (hosts.hostid=items.hostid)
JOIN items master_itemid ON (master_itemid.itemid=items.master_itemid)
WHERE items.flags=1
AND hosts.status=0
AND master_itemid.type=2;
Dependency tree for active problems. Zabbix 6.0
SELECT triggerid_down, COUNT(*) FROM trigger_depends WHERE triggerid_up IN (SELECT objectid FROM problem WHERE source=0) GROUP BY 1 HAVING COUNT(*)>1 ORDER BY 2 DESC LIMIT 10;
SELECT triggerid_down, COUNT(*) FROM trigger_depends WHERE triggerid_down IN (SELECT objectid FROM problem WHERE source=0) GROUP BY 1 HAVING COUNT(*)>1 ORDER BY 2 DESC LIMIT 10;
SELECT triggerid_up, COUNT(*) FROM trigger_depends WHERE triggerid_down IN (SELECT objectid FROM problem WHERE source=0) GROUP BY 1 HAVING COUNT(*)>1 ORDER BY 2 DESC LIMIT 10;
SELECT triggerid_up, COUNT(*) FROM trigger_depends WHERE triggerid_up IN (SELECT objectid FROM problem WHERE source=0) GROUP BY 1 HAVING COUNT(*)>1 ORDER BY 2 DESC LIMIT 10;
Usernames, roles and user type. Zabbix 6.0
SELECT
users.username,
role.name AS Role,
CASE role.type
WHEN 1 THEN 'user'
WHEN 2 THEN 'admin'
WHEN 3 THEN 'super admin'
END AS UserType
FROM users
JOIN role ON (users.roleid=role.roleid);
Variety of items key-wise
SELECT DISTINCT items.key_, COUNT(*)
FROM items, hosts
WHERE items.hostid=hosts.hostid
AND items.status=0
AND hosts.status=0
AND items.flags IN (0,1,2)
AND hosts.flags IN (0,4)
GROUP BY 1
ORDER BY 2 ASC;
Active and disabled hosts and items. Zabbix 4.0, 4.2, 4.4, 5.0, 5.2, 5.4, 6.0
SELECT
proxy.host AS proxy,
CASE hosts.status
WHEN 0 THEN 'Active'
WHEN 1 THEN 'Disabled'
END AS host,
CASE items.status
WHEN 0 THEN 'Active'
WHEN 1 THEN 'Disabled'
END AS item,
CASE items.type
WHEN 0 THEN 'Zabbix agent'
WHEN 1 THEN 'SNMPv1 agent'
WHEN 2 THEN 'Zabbix trapper'
WHEN 3 THEN 'Simple check'
WHEN 4 THEN 'SNMPv2 agent'
WHEN 5 THEN 'Zabbix internal'
WHEN 6 THEN 'SNMPv3 agent'
WHEN 7 THEN 'Zabbix agent (active) check'
WHEN 8 THEN 'Aggregate'
WHEN 9 THEN 'HTTP test (web monitoring scenario step)'
WHEN 10 THEN 'External check'
WHEN 11 THEN 'Database monitor'
WHEN 12 THEN 'IPMI agent'
WHEN 13 THEN 'SSH agent'
WHEN 14 THEN 'TELNET agent'
WHEN 15 THEN 'Calculated'
WHEN 16 THEN 'JMX agent'
WHEN 17 THEN 'SNMP trap'
WHEN 18 THEN 'Dependent item'
WHEN 19 THEN 'HTTP agent'
WHEN 20 THEN 'SNMP agent'
WHEN 21 THEN 'Script item'
END AS type,
COUNT(*)
FROM items
JOIN hosts ON (hosts.hostid=items.hostid)
LEFT JOIN hosts proxy ON (hosts.proxy_hostid=proxy.hostid)
WHERE hosts.status IN (0,1) AND items.status IN (0,1)
GROUP BY 1,2,3,4
ORDER BY 1,2,3,4,5 DESC;
Plain items at the template level which use positional macro. Zabbix 4.0
SELECT items.name, hosts.host AS template, CONCAT('items.php?form=update&itemid=',items.itemid) AS URL
FROM hosts, items
WHERE hosts.hostid=items.hostid
AND hosts.status=3
AND items.flags IN (0)
AND items.name LIKE '%$%' AND items.name NOT LIKE '%{$%';
Monitoring => Service problem. Zabbix 6.0
SELECT services.name,
service_alarms.clock,
service_alarms.value,
service_alarms.serviceid,
service_alarms.servicealarmid
FROM service_alarms, services
WHERE service_alarms.serviceid=services.serviceid
ORDER BY 1,2,3,4,5;
Statistics per maintenance. Zabbix 6.0
SELECT maintenance_status, maintenance_type, COUNT(*) FROM hosts WHERE flags IN (0,4) AND status=0 GROUP BY 1,2;
Statistics per maintenance including starting time. Zabbix 6.0
SELECT maintenance_status, maintenance_type, maintenance_from, COUNT(*) FROM hosts WHERE flags IN (0,4) AND status=0 GROUP BY 1,2,3;
Start time explained for postgreSQL. Zabbix 6.0
SELECT maintenance_status, maintenance_type, TO_CHAR(DATE(TO_TIMESTAMP(maintenance_from)),'YYYY-MM-DD HH:mm'), COUNT(*) FROM hosts WHERE flags IN (0,4) AND status=0 GROUP BY 1,2,3 ORDER BY 3;
Hosts in the maintenance window and responsible profile. Zabbix 6.0
SELECT maintenances.name, TO_CHAR(DATE(TO_TIMESTAMP(maintenance_from)),'YYYY-MM-DD HH:mm'), COUNT(*)
FROM hosts
JOIN maintenances_hosts ON (maintenances_hosts.hostid=hosts.hostid)
JOIN maintenances ON (maintenances.maintenanceid=maintenances_hosts.maintenanceid)
WHERE hosts.flags IN (0,4) AND hosts.status=0 AND hosts.maintenance_status=1 GROUP BY 1,2 ORDER BY 2;
Alerts
SELECT status, actionid, mediatypeid, COUNT(*) FROM alerts GROUP BY 1,2,3 ORDER BY 4 DESC;
Alerts in last 7 days
SELECT status, actionid, mediatypeid, COUNT(*) FROM alerts WHERE clock > UNIX_TIMESTAMP(NOW()-INTERVAL 7 DAY) GROUP BY 1,2,3 ORDER BY 4 DESC;
Show SNMPv3 devices. Zabbix 5.0
SELECT proxy.host, interface.ip
FROM interface
JOIN hosts ON hosts.hostid=interface.hostid
JOIN interface_snmp ON interface_snmp.interfaceid=interface.interfaceid
JOIN hosts proxy ON (hosts.proxy_hostid=proxy.hostid)
WHERE interface.main=1
AND interface.type=2
AND interface_snmp.version=3
AND hosts.status=0
ORDER BY 1,2;
How many 'nodata' functions are applied
SELECT COUNT(*)
FROM triggers
JOIN functions ON functions.triggerid=triggers.triggerid
JOIN items ON items.itemid=functions.itemid
JOIN hosts ON hosts.hostid=items.hostid
WHERE hosts.status=0 AND items.status=0 AND triggers.status=0
AND functions.name='nodata';
Nodata functions per host
SELECT COUNT(*), hosts.name
FROM triggers
JOIN functions ON functions.triggerid=triggers.triggerid
JOIN items ON items.itemid=functions.itemid
JOIN hosts ON hosts.hostid=items.hostid
WHERE hosts.status=0 AND items.status=0 AND triggers.status=0
AND functions.name='nodata'
GROUP BY 2 ORDER BY 1 DESC LIMIT 30;
If the "items.type" is 2 (Zabbix trapper) or 7 (Zabbix agent active), there's a bigger chance of having this "Duplicate entry" problem because of misconfiguration
SELECT proxy.host, hosts.host, items.type, items.itemid, items.delay, items.key_, items.flags
FROM items
JOIN hosts ON hosts.hostid=items.hostid
LEFT JOIN hosts proxy ON hosts.proxy_hostid=proxy.hostid
WHERE items.itemid IN (12,34,56);
Reconstruct auditlog
CREATE TABLE auditlog_temp LIKE auditlog;
INSERT INTO auditlog_temp SELECT * FROM auditlog WHERE clock > UNIX_TIMESTAMP(NOW() - INTERVAL 5 DAYS);
TRUNCATE TABLE auditlog;
INSERT INTO auditlog SELECT * FROM auditlog_temp;
DROP TABLE auditlog_temp;
List not discovered triggers
SELECT DISTINCT triggers.triggerid, triggers.description, hosts.host FROM triggers
JOIN functions ON functions.triggerid=triggers.triggerid
JOIN items ON items.itemid=functions.itemid
JOIN hosts ON items.hostid=hosts.hostid
WHERE items.itemid IN (
SELECT
items.itemid
FROM items
JOIN item_discovery ON item_discovery.itemid=items.itemid
JOIN hosts ON hosts.hostid=items.hostid
WHERE item_discovery.ts_delete > 0
);
Delete not discovered triggers
DELETE FROM triggers WHERE triggerid IN (
SELECT DISTINCT triggers.triggerid FROM triggers
JOIN functions ON functions.triggerid=triggers.triggerid
JOIN items ON items.itemid=functions.itemid
JOIN hosts ON items.hostid=hosts.hostid
WHERE items.itemid IN (
SELECT
items.itemid
FROM items
JOIN item_discovery ON item_discovery.itemid=items.itemid
JOIN hosts ON hosts.hostid=items.hostid
WHERE item_discovery.ts_delete > 0
) LIMIT 100
);
Unreachable ZBX hosts with IP and error message. Zabbix 6.0, 6.4
SELECT proxy.host AS proxy, interface.ip, interface.port, hosts.host, interface.error
FROM hosts
LEFT JOIN hosts proxy ON hosts.proxy_hostid=proxy.hostid
JOIN interface ON interface.hostid=hosts.hostid
WHERE LENGTH(interface.error) > 0
AND interface.type=1
AND interface.main=1
ORDER BY 1,2;
Unreachable SNMP hosts with IP and error message. Zabbix 6.0, 6.4
SELECT proxy.host AS proxy, interface.ip, interface.port, hosts.host, interface.error
FROM hosts
LEFT JOIN hosts proxy ON hosts.proxy_hostid=proxy.hostid
JOIN interface ON interface.hostid=hosts.hostid
WHERE LENGTH(interface.error) > 0
AND interface.type=2
AND interface.main=1
ORDER BY 1,2;
Zabbix queue. Zabbix 6.4
SELECT proxy.host AS proxy, hosts.host,
CASE interface.type
WHEN 0 THEN 'interface'
WHEN 1 THEN 'ZBX'
WHEN 2 THEN 'SNMP'
WHEN 3 THEN 'IPMI'
WHEN 4 THEN 'JMX'
END AS type,
interface.ip, interface.dns, interface.port, interface.error
CONCAT('zabbix.php?action=host.edit&hostid=',hosts.hostid) AS goTo
FROM interface
JOIN hosts ON hosts.hostid=interface.hostid
LEFT JOIN hosts proxy ON hosts.proxy_hostid=proxy.hostid
WHERE interface.main=1 AND interface.available=2 AND LENGTH(interface.error) > 0 AND hosts.status=0;
Set all HA nodes as gracefuly shut down. This must be done after kill -9
UPDATE ha_node SET status=1;
The occurance of time mased functions
SELECT hosts.host, COUNT(*) FROM functions, items, hosts
WHERE functions.itemid=items.itemid
AND items.hostid=hosts.hostid
AND hosts.status=0 AND items.status=0 AND items.flags IN (0,4) AND hosts.flags IN (0,4)
AND functions.name IN ('nodata','time','fuzzytime','now','date','dayofmonth','dayofweek')
GROUP BY 1
ORDER BY 2 DESC
LIMIT 20;
Statistics of all trigger functions
SELECT COUNT(*), hosts.host FROM hosts, items
WHERE hosts.hostid=items.hostid AND hosts.status=0 AND items.status=0 AND items.flags IN (0,4) AND hosts.flags IN (0,4)
GROUP BY 2 ORDER BY 1 ASC;
How many active sessions in last 10 minutes. Zabbix 5.0
SELECT COUNT(*)
FROM sessions
WHERE sessions.status=0
AND sessions.lastaccess > UNIX_TIMESTAMP(NOW() - INTERVAL 10 MINUTE);
If there is any trigger which contains a lot of problems
SELECT COUNT(*),source,object,objectid FROM problem GROUP BY 2,3,4 ORDER BY 1 DESC LIMIT 10;
Print URLs for triggers dominating in escalation
SELECT COUNT(*),CONCAT('triggers.php?form=update&triggerid=', triggerid) AS 'URL' FROM escalations GROUP BY 2 ORDER BY 1 DESC LIMIT 10;
Biggest text metrics
SELECT LENGTH(history_text.value) AS length,
hosts.host,
items.key_,
CONCAT('history.php?itemids%5B0%5D=', history_text.itemid,'&action=showlatest') AS 'URL'
FROM history_text
JOIN items ON items.itemid=history_text.itemid
JOIN hosts ON hosts.hostid=items.hostid
WHERE LENGTH(history_text.value) > 50000 LIMIT 20\G
Increase the height of widget. Zabbix 6.4
UPDATE widget SET height=128 WHERE dashboard_pageid IN (
SELECT dashboard_pageid FROM dashboard_page WHERE dashboardid=327
);
Processes MySQL
SELECT LEFT(info, 140), LENGTH(info), time, state FROM INFORMATION_SCHEMA.PROCESSLIST where time>0 and command<>"Sleep" ORDER BY time;
Heaviest LLD rules. Most of items. amount of items per LLD. Zabbix 6.0
SELECT COUNT(discovery.key_),
hosts.host,
discovery.key_,
discovery.delay
FROM items
JOIN hosts ON (hosts.hostid=items.hostid)
JOIN item_discovery ON (item_discovery.itemid=items.itemid)
JOIN items discovery ON (discovery.itemid=item_discovery.parent_itemid)
WHERE items.status=0
AND items.flags=4
GROUP BY discovery.key_,
discovery.delay,
hosts.host
ORDER BY COUNT(discovery.key_) DESC
LIMIT 10;
Backtrack origin of host prototypes. Zabbix 6.0, 6.2, 6.4
SELECT hosts.name,
hosts2.host
FROM hosts
JOIN host_discovery ON (hosts.hostid=host_discovery.hostid)
LEFT JOIN hosts parent ON (parent.hostid=host_discovery.parent_hostid)
JOIN host_discovery host_discovery2 ON (parent.hostid=host_discovery2.hostid)
JOIN items items2 ON (items2.itemid=host_discovery2.parent_itemid)
JOIN hosts hosts2 ON (hosts2.hostid=items2.hostid)
WHERE hosts.flags=4;
Prototype items at template level which use positional macro. Zabbix 4.0
SELECT items.name, hosts.host AS template, CONCAT('disc_prototypes.php?form=update&parent_discoveryid=',item_discovery.parent_itemid,'&itemid=',items.itemid) AS URL FROM hosts, items, item_discovery
WHERE hosts.hostid=items.hostid AND items.itemid=item_discovery.itemid
AND hosts.status=3
AND items.flags IN (2)
AND items.name LIKE '%$%' AND items.name NOT LIKE '%{$%';
Most recent data collector items. Zabbix 4.2, 4.4, 5.0
SELECT proxy.host AS proxy, hosts.host, items.itemid, items.key_
FROM items, hosts
LEFT JOIN hosts proxy ON (hosts.proxy_hostid=proxy.hostid)
WHERE hosts.hostid=items.hostid
ORDER BY items.itemid DESC
LIMIT 10;
Most recent triggers
SELECT DISTINCT triggers.triggerid, triggers.description, hosts.host, proxy.host AS proxy
FROM triggers
JOIN functions ON (functions.triggerid=triggers.triggerid)
JOIN items ON (items.itemid=functions.itemid)
JOIN hosts ON (hosts.hostid=items.hostid)
LEFT JOIN hosts proxy ON (hosts.proxy_hostid=proxy.hostid)
WHERE triggers.flags IN (0,4)
ORDER BY triggers.triggerid DESC
LIMIT 10;
ZBX hosts unreachable. Zabbix 6.0
SELECT
proxy.host AS proxy,
hosts.host,
interface.error,
CONCAT('zabbix.php?action=host.edit&hostid=', hosts.hostid) AS goTo
FROM hosts
LEFT JOIN hosts proxy ON (hosts.proxy_hostid=proxy.hostid)
JOIN interface ON (interface.hostid=hosts.hostid)
WHERE LENGTH(interface.error) > 0
AND interface.type=1;
SNMP hosts unreachable. Zabbix 6.0
SELECT
proxy.host AS proxy,
hosts.host,
interface.error,
CONCAT('zabbix.php?action=host.edit&hostid=', hosts.hostid) AS goTo
FROM hosts
LEFT JOIN hosts proxy ON (hosts.proxy_hostid=proxy.hostid)
JOIN interface ON (interface.hostid=hosts.hostid)
WHERE LENGTH(interface.error) > 0
AND interface.type=2;
Non-working external scripts. Zabbix 5.4, 6.0, 6.2
SELECT
hosts.host,
items.key_,
item_rtdata.error
FROM items
JOIN hosts ON (hosts.hostid=items.hostid)
JOIN item_rtdata ON (items.itemid=item_rtdata.itemid)
WHERE hosts.status=0
AND items.status=0
AND items.type=10
AND LENGTH(item_rtdata.error) > 0;
Check native HA heartbeat. Zabbix 6.0, 6.2
SELECT * FROM ha_node;
Count of events. All version of Zabbix
SELECT COUNT(*),source FROM events GROUP BY source;
Show items by proxy. Zabbix 3.0, 3.2, 3.4, 4.0, 4.2, 4.4, 5.0, 5.2, 5.4, 6.0, 6.2
SELECT
COUNT(*) AS count,
proxy.host AS proxy,
items.type
FROM items
JOIN hosts ON (items.hostid=hosts.hostid)
JOIN hosts proxy ON (hosts.proxy_hostid=proxy.hostid)
WHERE hosts.status=0
AND items.status=0
AND proxy.status IN (5,6)
GROUP BY 2,3
ORDER BY 2,3;
Which internal event is spamming database. Zabbix 4.0, 5.0
SELECT
CASE object
WHEN 0 THEN 'Trigger expression'
WHEN 4 THEN 'Data collection'
WHEN 5 THEN 'LLD rule'
END AS object,
objectid,
value,
name,
COUNT(*)
FROM events
WHERE source=3
AND LENGTH(name) > 0
AND clock > UNIX_TIMESTAMP(NOW()-INTERVAL 10 DAY)
GROUP BY 1,2,3,4 ORDER BY 5 DESC LIMIT 20;
Nested objects and macro overrides. Zabbix 5.0, 6.0
SELECT
hm1.macro AS Macro,
child.host AS owner,
hm2.value AS defaultValue,
parent.host AS OverrideInstalled,
hm1.value AS OverrideValue
FROM hosts parent, hosts child, hosts_templates rel, hostmacro hm1, hostmacro hm2
WHERE parent.hostid=rel.hostid
AND child.hostid=rel.templateid
AND hm1.hostid=parent.hostid
AND hm2.hostid=child.hostid
AND hm1.macro=hm2.macro
AND parent.flags=0
AND child.flags=0
AND hm1.value <> hm2.value;
Difference between template macro and host macro. Zabbix 5.0, 6.0
SELECT
b.host,
hm2.macro,
hm2.value AS templateValue,
h.host,
hm1.macro,
hm1.value AS hostValue
FROM hosts_templates, hosts h, hosts b, hostmacro hm1, hostmacro hm2, interface
WHERE hosts_templates.hostid=h.hostid
AND hosts_templates.templateid=b.hostid
AND interface.hostid=h.hostid
AND hm1.hostid=h.hostid
AND hm2.hostid=hosts_templates.templateid
AND hm1.macro=hm2.macro
AND hm1.value <> hm2.value;
Devices and it's status. Zabbix 3.0, 3.2, 3.4, 4.0, 4.2, 4.4, 5.0, 5.2
SELECT
proxy.host AS proxy,
hosts.host,
interface.ip,
interface.dns,
interface.useip,
CASE hosts.available
WHEN 0 THEN 'unknown'
WHEN 1 THEN 'available'
WHEN 2 THEN 'down'
END AS "status",
CASE interface.type
WHEN 1 THEN 'ZBX'
WHEN 2 THEN 'SNMP'
WHEN 3 THEN 'IPMI'
WHEN 4 THEN 'JMX'
END AS "type", hosts.error
FROM hosts
JOIN interface ON interface.hostid=hosts.hostid
LEFT JOIN hosts proxy ON hosts.proxy_hostid=proxy.hostid
WHERE hosts.status=0
AND interface.main=1;
Items in use. Zabbix 3.0, 3.2, 3.4, 4.0, 4.2, 4.4, 5.0, 5.2, 5.4, 6.0, 6.2
SELECT
CASE items.type
WHEN 0 THEN 'Zabbix agent'
WHEN 1 THEN 'SNMPv1 agent'
WHEN 2 THEN 'Zabbix trapper'
WHEN 3 THEN 'Simple check'
WHEN 4 THEN 'SNMPv2 agent'
WHEN 5 THEN 'Zabbix internal'
WHEN 6 THEN 'SNMPv3 agent'
WHEN 7 THEN 'Zabbix agent (active) check'
WHEN 8 THEN 'Aggregate'
WHEN 9 THEN 'HTTP test (web monitoring scenario step)'
WHEN 10 THEN 'External check'
WHEN 11 THEN 'Database monitor'
WHEN 12 THEN 'IPMI agent'
WHEN 13 THEN 'SSH agent'
WHEN 14 THEN 'TELNET agent'
WHEN 15 THEN 'Calculated'
WHEN 16 THEN 'JMX agent'
WHEN 17 THEN 'SNMP trap'
WHEN 18 THEN 'Dependent item'
WHEN 19 THEN 'HTTP agent'
WHEN 20 THEN 'SNMP agent'
WHEN 21 THEN 'Script item'
END AS type,
COUNT(*)
FROM items
JOIN hosts ON (hosts.hostid=items.hostid)
WHERE hosts.status=0
AND items.status=0
GROUP BY items.type
ORDER BY COUNT(*) DESC;
Enabled items behind proxy on enabled hosts
SELECT
proxy.host AS proxy,
CASE items.type
WHEN 0 THEN 'Zabbix agent'
WHEN 1 THEN 'SNMPv1 agent'
WHEN 2 THEN 'Zabbix trapper'
WHEN 3 THEN 'Simple check'
WHEN 4 THEN 'SNMPv2 agent'
WHEN 5 THEN 'Zabbix internal'
WHEN 6 THEN 'SNMPv3 agent'
WHEN 7 THEN 'Zabbix agent (active) check'
WHEN 8 THEN 'Aggregate'
WHEN 9 THEN 'HTTP test (web monitoring scenario step)'
WHEN 10 THEN 'External check'
WHEN 11 THEN 'Database monitor'
WHEN 12 THEN 'IPMI agent'
WHEN 13 THEN 'SSH agent'
WHEN 14 THEN 'TELNET agent'
WHEN 15 THEN 'Calculated'
WHEN 16 THEN 'JMX agent'
WHEN 17 THEN 'SNMP trap'
WHEN 18 THEN 'Dependent item'
WHEN 19 THEN 'HTTP agent'
WHEN 20 THEN 'SNMP agent'
WHEN 21 THEN 'Script item'
END AS type,
COUNT(*)
FROM items
JOIN hosts ON (hosts.hostid=items.hostid)
LEFT JOIN hosts proxy ON (hosts.proxy_hostid=proxy.hostid)
WHERE hosts.status=0
AND items.status=0
GROUP BY proxy.host, items.type
ORDER BY 1,2,3 DESC;
Exceptions in update interval. Zabbix 5.0
SELECT
h1.host AS exceptionInstalled,
i1.name,
i1.key_,
i1.delay,
h2.host AS differesFromTemplate,
i2.name,
i2.key_,
i2.delay AS delay
FROM items i1
JOIN items i2 ON (i2.itemid=i1.templateid)
JOIN hosts h1 ON (h1.hostid=i1.hostid)
JOIN hosts h2 ON (h2.hostid=i2.hostid)
WHERE i1.delay <> i2.delay;
All events closed by global correlation rule. Zabbix 4.0, 5.0, 6.0
SELECT
repercussion.clock,
repercussion.name,
rootCause.clock,
rootCause.name AS name
FROM events repercussion
JOIN event_recovery ON (event_recovery.eventid=repercussion.eventid)
JOIN events rootCause ON (rootCause.eventid=event_recovery.c_eventid)
WHERE event_recovery.c_eventid IS NOT NULL
ORDER BY repercussion.clock ASC;
Delete events closed by global correlation:
DELETE FROM events WHERE eventid IN (
SELECT
repercussion.eventid
FROM events repercussion
JOIN event_recovery ON (event_recovery.eventid=repercussion.eventid)
JOIN events rootCause ON (rootCause.eventid=event_recovery.c_eventid)
WHERE event_recovery.c_eventid IS NOT NULL
ORDER BY repercussion.clock ASC
) AND clock < 1682475866;
Hosts with a single template
SELECT proxy.host AS proxy,
hosts.host,
GROUP_CONCAT(template.host SEPARATOR ', ') AS templates,
COUNT(*)
FROM hosts
JOIN hosts_templates ON (hosts_templates.hostid=hosts.hostid)
LEFT JOIN hosts proxy ON (hosts.proxy_hostid=proxy.hostid)
LEFT JOIN hosts template ON (hosts_templates.templateid=template.hostid)
WHERE hosts.status IN (0,1) AND hosts.flags=0 GROUP BY 1,2 HAVING COUNT(*)=1 ORDER BY 1,3,2;
Print host objects which own a lonely template object
SELECT proxy.host AS proxy,
hosts.host,
template.host AS template,
COUNT(*)
FROM hosts
JOIN hosts_templates ON (hosts_templates.hostid=hosts.hostid)
LEFT JOIN hosts proxy ON (hosts.proxy_hostid=proxy.hostid)
LEFT JOIN hosts template ON (hosts_templates.templateid=template.hostid)
WHERE hosts.status IN (0,1) AND hosts.flags=0
AND template.host='write name of template here'
GROUP BY 1,2,3
HAVING COUNT(*)=1
ORDER BY 1,2;
Auditlog spaming database
SELECT action, resourcetype, COUNT(*) FROM auditlog WHERE clock >= UNIX_TIMESTAMP(NOW() - INTERVAL 2 HOUR) GROUP BY 1,2 ORDER BY COUNT(*) ASC;
SELECT action, resourcetype, COUNT(*) FROM auditlog WHERE clock >= UNIX_TIMESTAMP(NOW() - INTERVAL 2 HOUR) GROUP BY 1,2 ORDER BY COUNT(*) ASC;
List all host and template level tags
SELECT hosts.status, host_tag.tag, host_tag.value, COUNT(*)
FROM hosts, host_tag
WHERE hosts.hostid=host_tag.hostid
AND hosts.status IN (0,3)
AND hosts.flags=0
GROUP BY 1,2,3
ORDER BY 4 DESC LIMIT 30;
Statistics about media types and delivery. Zabbix 4.0
SELECT actions.name AS actionName, users.alias AS sendTo, media_type.description AS mediaName,
CASE alerts.status
WHEN 0 THEN 'NOT_SEN'
WHEN 1 THEN 'SENT'
WHEN 2 THEN 'FAILED'
WHEN 3 THEN 'NEW'
END AS "alertStatus",
COUNT(*) AS count
FROM alerts
JOIN actions ON (actions.actionid=alerts.actionid)
JOIN media_type ON (media_type.mediatypeid=alerts.mediatypeid)
LEFT JOIN users ON (users.userid=alerts.userid)
WHERE alerts.alerttype=0
GROUP BY 1,2,3,4
ORDER BY 5 ASC;
All active data collector items on enabled hosts. Zabbix 3.0, 4.0, 5.0, 6.0
SELECT hosts.host, items.name, items.type, items.key_, items.delay
FROM items
JOIN hosts ON (hosts.hostid=items.hostid)
WHERE hosts.status=0
AND items.status=0
ORDER BY 1,2,3,4,5;
All recent metrics which are using units 'B'
SELECT hosts.host AS host, items.key_ AS itemKey, items.name AS name, (history_uint.value/1024/1024/1024) AS GB
FROM history_uint
JOIN items ON (items.itemid=history_uint.itemid)
JOIN hosts ON (hosts.hostid=items.hostid)
JOIN (SELECT DISTINCT itemid AS id, MAX(history_uint.clock) AS clock
FROM history_uint
WHERE clock > UNIX_TIMESTAMP(NOW()-INTERVAL 65 MINUTE)
GROUP BY 1) t2 ON t2.id=history_uint.itemid
WHERE history_uint.clock=t2.clock
AND items.units='B'
ORDER BY 1,2;
All recent metrics which are using units '%'
SELECT hosts.host AS host, items.key_ AS itemKey, items.name AS name, history.value AS percentage
FROM history
JOIN items ON (items.itemid=history.itemid)
JOIN hosts ON (hosts.hostid=items.hostid)
JOIN (SELECT DISTINCT itemid AS id, MAX(history.clock) AS clock
FROM history
WHERE clock > UNIX_TIMESTAMP(NOW()-INTERVAL 65 MINUTE)
GROUP BY 1) t2 ON t2.id=history.itemid
WHERE history.clock=t2.clock
AND items.units='%'
ORDER BY 1,2;
PostgreSQL. All recent metrics which are using units 'B'
SELECT hosts.host AS host, items.key_ AS itemKey, items.name AS name, (history_uint.value/1024/1024/1024)::NUMERIC(10,2) AS GB
FROM history_uint
JOIN items ON (items.itemid=history_uint.itemid)
JOIN hosts ON (hosts.hostid=items.hostid)
JOIN (SELECT DISTINCT itemid AS id, MAX(history_uint.clock) AS clock
FROM history_uint
WHERE clock > EXTRACT(epoch FROM NOW()-INTERVAL '65 MINUTE')
GROUP BY 1) t2 ON t2.id=history_uint.itemid
WHERE history_uint.clock=t2.clock
AND items.units='B'
ORDER BY 1,2;
PostgreSQL. Delte sessions. Zabbix 6.0
DELETE FROM sessions WHERE sessionid IN (SELECT sessionid FROM sessions WHERE lastaccess < EXTRACT(EPOCH FROM (NOW() - INTERVAL '1 DAY')));
MySQL. Delete sessions older than 1d. Zabbix 5.0, 6.0
DELETE FROM sessions WHERE lastaccess < UNIX_TIMESTAMP(NOW()-INTERVAL 24 HOUR);
Which host, item is fulfiling the history_log table the most
SELECT hosts.host, items.key_, COUNT(*)
FROM history_log
JOIN items ON (items.itemid=history_log.itemid)
JOIN hosts ON (hosts.hostid=items.hostid)
WHERE history_log.clock > UNIX_TIMESTAMP(NOW()-INTERVAL 24 HOUR)
GROUP BY 1,2
ORDER BY 3 ASC;
Check if different interfaces (ZBX, SNMP) used in host level
SELECT DISTINCT hosts.host FROM interface first
JOIN interface second ON (first.hostid=second.hostid)
JOIN hosts ON (hosts.hostid=first.hostid)
WHERE first.type <> second.type;
List unused AND secondary interfaces. Zabbix 6.0
SELECT hosts.host,
CASE interface.type
WHEN 0 THEN 'interface'
WHEN 1 THEN 'ZBX'
WHEN 2 THEN 'SNMP'
WHEN 3 THEN 'IPMI'
WHEN 4 THEN 'JMX'
END AS type,
interface.ip,interface.dns,interface.port
FROM interface
JOIN hosts ON (hosts.hostid=interface.hostid)
WHERE main=0 AND interfaceid NOT IN (SELECT DISTINCT interfaceid FROM items WHERE interfaceid IS NOT NULL);
List unused AND secondary interfaces. Zabbix 6.0
DELETE FROM interface WHERE main=0 AND interfaceid NOT IN (SELECT DISTINCT interfaceid FROM items WHERE interfaceid IS NOT NULL);
Check if all ZBX hosts are NOT using more than one interface
SELECT DISTINCT hosts.host, COUNT(*) AS interfaces FROM interface
JOIN hosts ON (hosts.hostid=interface.hostid)
WHERE interface.type=1 AND hosts.status=0 AND hosts.flags IN (0,4)
GROUP BY 1
HAVING COUNT(*)>1;
Check if all SNMP hosts are NOT using more than one interface
SELECT DISTINCT hosts.host, COUNT(*) AS interfaces FROM interface
JOIN hosts ON (hosts.hostid=interface.hostid)
WHERE interface.type=2 AND hosts.status=0 AND hosts.flags IN (0,4)
GROUP BY 1
HAVING COUNT(*)>1;
Ratio between working and non-working JMX items. Zabbix 5.0
SELECT
proxy.host AS proxy,
items.delay,
CASE item_rtdata.state
WHEN 0 THEN 'normal'
WHEN 1 THEN 'unsupported'
END AS state,
COUNT(*)
FROM items
JOIN hosts ON (hosts.hostid=items.hostid)
JOIN item_rtdata ON (item_rtdata.itemid=items.itemid)
LEFT JOIN hosts proxy ON (hosts.proxy_hostid=proxy.hostid)
WHERE items.type=16 AND items.status=0 AND hosts.status=0
GROUP BY 1,2,3
ORDER BY 1,4,3,2 ASC;
PostgreSQL. All recent metrics which are using units '%'
SELECT hosts.host AS host, items.key_ AS itemKey, items.name AS name, history.value::NUMERIC(10,2) AS percentage
FROM history
JOIN items ON (items.itemid=history.itemid)
JOIN hosts ON (hosts.hostid=items.hostid)
JOIN (SELECT DISTINCT itemid AS id, MAX(history.clock) AS clock
FROM history
WHERE clock > EXTRACT(epoch FROM NOW()-INTERVAL '65 MINUTE')
GROUP BY 1) t2 ON t2.id=history.itemid
WHERE history.clock=t2.clock
AND items.units='%'
ORDER BY 1,2;
Determine which items report unsupported state:
SELECT COUNT(items.key_), hosts.host, items.key_, item_rtdata.error
FROM events
JOIN items ON (items.itemid=events.objectid)
JOIN hosts ON (hosts.hostid=items.hostid)
JOIN item_rtdata ON (item_rtdata.itemid=items.itemid)
WHERE source=3
AND object=4
AND items.status=0
AND items.flags IN (0,1,4)
AND LENGTH(item_rtdata.error) > 0
GROUP BY hosts.host, items.key_, item_rtdata.error
ORDER BY COUNT(items.key_);
List all function names together with arguments
SELECT functions.name, functions.parameter, COUNT(*)
FROM functions
JOIN items ON (items.itemid=functions.itemid)
JOIN hosts ON (items.hostid=hosts.hostid)
JOIN triggers ON (triggers.triggerid=functions.triggerid)
WHERE hosts.status=0
AND items.status=0
AND triggers.status=0
GROUP BY 1,2
ORDER BY 1;
Update interval of owner in case LLD rule is dependent item. Zabbix 4.0, 4.2, 4.4, 5.0, 5.2, 5.4, 6.0, 6.2
SELECT master_itemid.key_, master_itemid.delay, COUNT(*)
FROM items
JOIN hosts ON (hosts.hostid=items.hostid)
JOIN items master_itemid ON (master_itemid.itemid=items.master_itemid)
WHERE items.flags=1
AND hosts.status=0
AND items.status=0
AND master_itemid.status=0
AND items.type=18
GROUP BY 1,2 ORDER BY 3 DESC;
Frequency of LLD rule for enabled hosts and enabled items discoveries for only monitored hosts. Zabbix 4.0 => 6.2
SELECT type,delay,COUNT(*) FROM items
JOIN hosts ON (hosts.hostid=items.hostid)
WHERE items.flags=1 AND hosts.status=0 AND items.status=0
GROUP BY 1,2 ORDER BY 1,2;
Host inventory
SELECT host_inventory.macaddress_a,GROUP_CONCAT(hosts.host) FROM host_inventory
JOIN hosts ON (hosts.hostid=host_inventory.hostid)
WHERE hosts.status=0 AND host_inventory.macaddress_a LIKE '%enterprises%'
GROUP BY host_inventory.macaddress_a;
Remove metrics where there are no item definition anymore
SET SESSION SQL_LOG_BIN=0;
DELETE FROM trends WHERE itemid NOT IN (SELECT itemid FROM items);
DELETE FROM trends_uint WHERE itemid NOT IN (SELECT itemid FROM items);
DELETE FROM history_text WHERE itemid NOT IN (SELECT itemid FROM items);
DELETE FROM history_str WHERE itemid NOT IN (SELECT itemid FROM items);
DELETE FROM history_log WHERE itemid NOT IN (SELECT itemid FROM items);
DELETE FROM history WHERE itemid NOT IN (SELECT itemid FROM items);
DELETE FROM history_uint WHERE itemid NOT IN (SELECT itemid FROM items);
Drop table partition in MySQL
ALTER TABLE history_uint DROP PARTITION p2018_06_06;
Don't replicate transactions to the other servers in pool. Don't write to binlog
SET SESSION SQL_LOG_BIN=0;
Few MySQL key settings
SELECT
@@hostname,
@@version,
@@datadir,
@@innodb_file_per_table,
@@innodb_buffer_pool_size,
@@innodb_buffer_pool_instances,
@@innodb_flush_method,
@@innodb_log_file_size,
@@max_connections,
@@open_files_limit,
@@innodb_flush_log_at_trx_commit,
@@log_bin\G
Host behind proxy 'z62prx' where interface is not healthy. Host is down. Zabbix 6.2
SELECT
hosts.host,
interface.error
FROM interface
JOIN hosts ON (hosts.hostid=interface.hostid)
LEFT JOIN hosts proxy ON (hosts.proxy_hostid=proxy.hostid)
WHERE interface.available=2
AND hosts.status=0
AND proxy.host='z62prx';
Host/interface errors per all hosts behind proxy. Zabbix 6.2
SELECT
proxy.host,
hosts.host,
interface.error
FROM interface
JOIN hosts ON (hosts.hostid=interface.hostid)
LEFT JOIN hosts proxy ON (hosts.proxy_hostid=proxy.hostid)
WHERE interface.available=2
AND proxy.host IS NOT NULL
ORDER BY 1,2,3;
Enabled and disabled LLD items, its key. Zabbix 5.0, 5.2, 5.4, 6.0
SELECT
items.type,
items.key_,
items.delay,
items.status,
COUNT(*) AS count
FROM items, hosts
WHERE hosts.hostid=items.hostid
AND items.flags=1
AND hosts.status=0
GROUP BY 1,2,3,4 ORDER BY 1,2,3,4;
Item is not discovered anymore and will be deleted in
SELECT
hosts.host,
items.key_
FROM items
JOIN item_discovery ON (item_discovery.itemid=items.itemid)
JOIN hosts ON (hosts.hostid=items.hostid)
WHERE item_discovery.ts_delete > 0;
Delete items which are not discovered anymore
DELETE FROM items WHERE itemid IN (
SELECT x2.field FROM (
SELECT items.itemid AS field
FROM items, item_discovery, hosts
WHERE item_discovery.itemid=items.itemid
AND hosts.hostid=items.hostid
AND item_discovery.ts_delete > 0
) x2
);
Count of item is not discovered anymore and will be deleted
SELECT
hosts.host,
COUNT(*) AS count
FROM items
JOIN item_discovery ON (item_discovery.itemid=items.itemid)
JOIN hosts ON (hosts.hostid=items.hostid)
WHERE item_discovery.ts_delete > 0
GROUP BY 1 ORDER BY 2 DESC LIMIT 30;
Which dashboard has been using host group. Zabbix 5.0,5.2
SELECT
DISTINCT dashboard.name,
hstgrp.name
FROM widget_field
JOIN widget ON (widget.widgetid=widget_field.widgetid)
JOIN dashboard ON (dashboard.dashboardid=widget.dashboardid)
JOIN hstgrp ON (hstgrp.groupid=widget_field.value_groupid)
WHERE widget_field.value_groupid IN (2);
Which dashboard has been using host group id:2 for the input. Zabbix 5.4, 6.0
SELECT
DISTINCT dashboard.name,
hstgrp.name
FROM widget_field
JOIN widget ON (widget.widgetid=widget_field.widgetid)
JOIN dashboard_page ON (dashboard_page.dashboard_pageid=widget.dashboard_pageid)
JOIN dashboard ON (dashboard.dashboardid=dashboard_page.dashboardid)
JOIN hstgrp ON (hstgrp.groupid=widget_field.value_groupid)
WHERE widget_field.value_groupid IN (2);
Zabbix agent auto-registration probes. Zabbix 5.0, 5.2, 5.4, 6.0
SELECT
hosts.host AS proxy,
CASE autoreg_host.flags
WHEN 0 THEN 'IP address, do not update host interface'
WHEN 1 THEN 'IP address, update default host interface'
WHEN 2 THEN 'DNS name, update default host interface'
END AS "connect using",
CASE autoreg_host.tls_accepted
WHEN 1 THEN 'Unencrypted'
WHEN 2 THEN 'TLS with PSK'
END AS "Encryption",
COUNT(*) AS "amount of hosts"
FROM autoreg_host
JOIN hosts ON (hosts.hostid=autoreg_host.proxy_hostid)
GROUP BY 1,2,3 ORDER BY 1,2,3;
Items without a template. Zabbix 5.0, 6.0
SELECT
hosts.host,
items.key_
FROM items
JOIN hosts ON (hosts.hostid=items.hostid)
WHERE hosts.status=0
AND hosts.flags=0
AND items.status=0
AND items.templateid IS NULL
AND items.flags=0;
Hosts with multiple interfaces. Zabbix 5.0, 6.0
SELECT
hosts.host
FROM interface
JOIN hosts ON (hosts.hostid=interface.hostid)
WHERE hosts.flags=0
GROUP BY hosts.host
HAVING COUNT(interface.interfaceid) > 1;
Amount of items not discovered anymore
SELECT
hosts.host,
COUNT(*) AS count
FROM items
JOIN item_discovery ON (item_discovery.itemid=items.itemid)
JOIN hosts ON (hosts.hostid=items.hostid)
WHERE item_discovery.ts_delete > 0
GROUP BY 1 ORDER BY 2 ASC;
Linked template objects PostgreSQL. Zabbix 5.0, 5.2, 5.4, 6.0
SELECT
proxy.host AS proxy,
hosts.host,
ARRAY_TO_STRING(ARRAY_AGG(template.host),', ') AS templates
FROM hosts
JOIN hosts_templates ON (hosts_templates.hostid=hosts.hostid)
LEFT JOIN hosts proxy ON (hosts.proxy_hostid=proxy.hostid)
LEFT JOIN hosts template ON (hosts_templates.templateid=template.hostid)
WHERE hosts.status IN (0,1)
AND hosts.flags=0
GROUP BY 1,2 ORDER BY 1,3,2;
Linked templates objects MySQL. Zabbix 5.0, 5.2, 5.4, 6.0
SELECT
proxy.host AS proxy,
hosts.host,
GROUP_CONCAT(template.host SEPARATOR ', ') AS templates
FROM hosts
JOIN hosts_templates ON (hosts_templates.hostid=hosts.hostid)
LEFT JOIN hosts proxy ON (hosts.proxy_hostid=proxy.hostid)
LEFT JOIN hosts template ON (hosts_templates.templateid=template.hostid)
WHERE hosts.status IN (0,1)
AND hosts.flags=0
GROUP BY 1,2 ORDER BY 1,3,2;
Which action is responsible. Zabbix 5.0
SELECT
FROM_UNIXTIME(events.clock) AS 'time',
CASE events.severity
WHEN 0 THEN 'NOT_CLASSIFIED'
WHEN 1 THEN 'INFORMATION'
WHEN 2 THEN 'WARNING'
WHEN 3 THEN 'AVERAGE'
WHEN 4 THEN 'HIGH'
WHEN 5 THEN 'DISASTER'
END AS severity,
CASE events.acknowledged
WHEN 0 THEN 'NO'
WHEN 1 THEN 'YES'
END AS acknowledged,
CASE events.value
WHEN 0 THEN 'OK'
WHEN 1 THEN 'PROBLEM '
END AS trigger_status,
events.name AS 'event',
actions.name AS 'action'
FROM events
JOIN alerts ON (alerts.eventid=events.eventid)
JOIN actions ON (actions.actionid=alerts.actionid)
WHERE events.source=0
AND events.object=0
ORDER BY events.clock DESC
LIMIT 10;
Non-working LLD rules. Zabbix 5.0, 6.0
SELECT
hosts.name AS hostName,
items.key_ AS itemKey,
problem.name AS LLDerror,
CONCAT('host_discovery.php?form=update&itemid=', problem.objectid) AS goTo
FROM problem
JOIN items ON (items.itemid=problem.objectid)
JOIN hosts ON (hosts.hostid=items.hostid)
WHERE problem.source > 0
AND problem.object=5;
Non-working data collector items. Zabbix 5.0, 6.0
SELECT
hosts.name AS hostName,
items.key_ AS itemKey,
problem.name AS DataCollectorError,
CONCAT('items.php?form=update&itemid=', problem.objectid) AS goTo
FROM problem
JOIN items ON (items.itemid=problem.objectid)
JOIN hosts ON (hosts.hostid=items.hostid)
WHERE problem.source > 0
AND problem.object=4;
Trigger evaluation problems. Zabbix 5.0, 6.0
SELECT
DISTINCT CONCAT('triggers.php?form=update&triggerid=', problem.objectid) AS goTo,
hosts.name AS hostName,
triggers.description AS triggerTitle,
problem.name AS TriggerEvaluationError
FROM problem
JOIN triggers ON (triggers.triggerid=problem.objectid)
JOIN functions ON (functions.triggerid=triggers.triggerid)
JOIN items ON (items.itemid=functions.itemid)
JOIN hosts ON (hosts.hostid=items.hostid)
WHERE problem.source > 0
AND problem.object=0;
Loneley item
SELECT
hosts.host,
items.key_,
CONCAT('items.php?form=update&itemid=', items.itemid) AS goTo
FROM items
JOIN hosts ON (hosts.hostid=items.hostid)
WHERE hosts.status=0
AND hosts.flags=0
AND items.status=0
AND items.templateid IS NULL
AND items.flags=0;
User sessions. Zabbix 6.0
SELECT
COUNT(*),
users.username
FROM sessions
JOIN users ON (users.userid=sessions.userid)
GROUP BY 2 ORDER BY 1 ASC;
User sessions. Zabbix 5.0
SELECT
COUNT(*),
users.alias
FROM sessions
JOIN users ON (users.userid=sessions.userid)
GROUP BY 2 ORDER BY 1 ASC;
Open problems. Zabbix 5.0, 6.0
SELECT
COUNT(*) AS count,
CASE
WHEN source=0 THEN 'surface'
WHEN source > 0 THEN 'internal'
END AS level,
CASE
WHEN source=0 AND object=0 THEN 'trigger in a problem state'
WHEN source=3 AND object=0 THEN 'cannot evaluate trigger expression'
WHEN source=3 AND object=4 THEN 'data collection not working'
WHEN source=3 AND object=5 THEN 'low level discovery not perfect'
END AS problemCategory
FROM problem GROUP BY 2,3
ORDER BY 2 DESC;
Item update frequency. Zabbix 5.0, 6.0
SELECT
h2.host AS Source,
i2.name AS itemName,
i2.key_ AS itemKey,
i2.delay AS OriginalUpdateFrequency,
h1.host AS exceptionInstalledOn,
i1.delay AS FrequencyChild,
CASE
WHEN i1.flags=1 THEN 'LLD rule'
WHEN i1.flags IN (0,4) THEN 'data collection'
END AS itemCategory,
CASE
WHEN i1.flags=1 THEN CONCAT('host_discovery.php?form=update&context=host&itemid=', i1.itemid)
WHEN i1.flags IN (0,4) THEN CONCAT('items.php?form=update&context=host&hostid=', h1.hostid, '&itemid=',i1.itemid)
END AS goTo
FROM items i1
JOIN items i2 ON (i2.itemid=i1.templateid)
JOIN hosts h1 ON (h1.hostid=i1.hostid)
JOIN hosts h2 ON (h2.hostid=i2.hostid)
WHERE i1.delay <> i2.delay;
Unsupported items and LLD rules. Zabbix 5.0
SELECT
DISTINCT i.key_,COALESCE(ir.error,'') AS error
FROM hosts h, items i
LEFT JOIN item_rtdata ir ON i.itemid=ir.itemid
WHERE i.type<>9
AND i.flags IN (0,1,4)
AND h.hostid=i.hostid
AND h.status<>3
AND i.status=0
AND ir.state=1
LIMIT 5001;
Which dashboard widgets are using wildcards. Zabbix 6.0
SELECT
value_str AS pattern,
widget.name AS widgetName,
dashboard.name AS dashboardName
FROM widget_field, widget, dashboard_page, dashboard
WHERE widget_field.value_str like '%*%'
AND widget.widgetid=widget_field.widgetid
AND dashboard_page.dashboard_pageid=widget.dashboard_pageid
AND dashboard.dashboardid=dashboard_page.dashboard_pageid
ORDER BY 3,2,1;
Remove user refresh overrides in user level. Zabbix 6.0
DELETE FROM profiles WHERE idx='web.dashboard.widget.rf_rate';
Print all online users with rights group ID: 13. Zabbix 6.0
SELECT
users.username,
CASE
WHEN permission=0 THEN 'DENY'
WHEN permission=2 THEN 'READ_ONLY'
WHEN permission=3 THEN 'READ_WRITE'
END AS permission,
hstgrp.name AS name
FROM users, users_groups, sessions, usrgrp, rights, hstgrp
WHERE sessions.status=0
AND rights.groupid=13
AND users.userid=users_groups.userid
AND users.userid=sessions.userid
AND users_groups.usrgrpid=usrgrp.usrgrpid
AND users_groups.userid=users.userid
AND usrgrp.usrgrpid=rights.groupid
AND rights.id=hstgrp.groupid;
Clear error message for disabled items. Zabbix 4.4, 5.0, 5.2, 5.4, 6.0, 6.2
UPDATE item_rtdata
SET error=''
WHERE state=1
AND itemid IN (
SELECT items.itemid
FROM items, hosts
WHERE hosts.hostid=items.hostid
AND hosts.status=0
AND items.status=1
);
Set state as supported for disabled items. Zabbix 4.4, 5.0, 5.2, 5.4, 6.0, 6.2
UPDATE item_rtdata
SET state=0
WHERE state=1
AND itemid IN (
SELECT items.itemid
FROM items, hosts
WHERE hosts.hostid=items.hostid
AND hosts.status=0
AND items.status=1
);
For hosts which are disable, set all items as supported
UPDATE item_rtdata
SET state=0
WHERE state=1
AND itemid IN (
SELECT items.itemid
FROM items, hosts
WHERE hosts.hostid=items.hostid
AND hosts.status=1
AND items.status=0
);
Most frequent integer numbers
SELECT
COUNT(*),
CONCAT('history.php?action=showlatest&itemids%5B0%5D=', itemid) AS latestData,
CONCAT('items.php?form=update&itemid=', itemid) AS conf
FROM (
SELECT itemid FROM history_uint LIMIT 99999
) b GROUP BY 2,3 ORDER BY 1 DESC LIMIT 10;
Most frequent decimal numbers
SELECT
COUNT(*),
CONCAT('history.php?action=showlatest&itemids%5B0%5D=', itemid) AS latestData,
CONCAT('items.php?form=update&itemid=', itemid) AS conf
FROM (
SELECT itemid FROM history LIMIT 99999
) b GROUP BY 2,3 ORDER BY 1 DESC LIMIT 10;
How monitoring has been used
SELECT proxy.host AS proxy, CASE items.type WHEN 0 THEN 'Zabbix agent' WHEN 1 THEN 'SNMPv1 agent' WHEN 2 THEN 'Zabbix trapper' WHEN 3 THEN 'Simple check' WHEN 4 THEN 'SNMPv2 agent' WHEN 5 THEN 'Zabbix internal' WHEN 6 THEN 'SNMPv3 agent' WHEN 7 THEN 'Zabbix agent (active) check' WHEN 8 THEN 'Aggregate' WHEN 9 THEN 'HTTP test (web monitoring scenario step)' WHEN 10 THEN 'External check' WHEN 11 THEN 'Database monitor' WHEN 12 THEN 'IPMI agent' WHEN 13 THEN 'SSH agent' WHEN 14 THEN 'TELNET agent' WHEN 15 THEN 'Calculated' WHEN 16 THEN 'JMX agent' WHEN 17 THEN 'SNMP trap' WHEN 18 THEN 'Dependent item' WHEN 19 THEN 'HTTP agent' WHEN 20 THEN 'SNMP agent' WHEN 21 THEN 'Script item' END AS type, COUNT(*) FROM items JOIN hosts ON (hosts.hostid=items.hostid) LEFT JOIN hosts proxy ON (hosts.proxy_hostid=proxy.hostid) WHERE hosts.status IN (0) AND items.status IN (0) GROUP BY 1,2 ORDER BY 1,2,3 DESC;
Show internal events for items which is working right now. Zabbix 5.0
SELECT events.name FROM events
WHERE source=3 AND object=4
AND objectid NOT IN (
SELECT items.itemid
FROM hosts, items, item_rtdata
WHERE items.hostid=hosts.hostid
AND items.itemid=item_rtdata.itemid
AND hosts.status=0
AND items.status=0
AND hosts.flags IN (0,4)
AND LENGTH(item_rtdata.error)=0
);
Mimic information of Zabbix. Unsupported, disabled, active items. Zabbix 6.0
SELECT COUNT(*), CASE hosts.status
WHEN 0 THEN 'Active'
WHEN 1 THEN 'Disabled'
END AS hostStatus,
CASE item_rtdata.state
WHEN 0 THEN 'normal'
WHEN 1 THEN 'unsupported'
END AS itemState,
CASE items.status
WHEN 0 THEN 'Active'
WHEN 1 THEN 'Disabled'
END AS itemStatus
FROM items
JOIN item_rtdata ON (item_rtdata.itemid=items.itemid)
JOIN hosts ON (hosts.hostid=items.hostid)
WHERE hosts.status IN (0,1) AND items.status IN (0,1) AND items.flags IN (0,4)
GROUP BY 2,3,4
ORDER BY 2,3;
Reset scan of network discovery. Zabbix 6.0
SELECT * FROM drules;
UPDATE drules SET nextcheck=0;
SELECT * FROM drules;
Mimic information of Zabbix. Extended. including LLD rules Zabbix 6.0
SELECT COUNT(*), CASE hosts.status
WHEN 0 THEN 'Active'
WHEN 1 THEN 'Disabled'
END AS hostStatus,
CASE item_rtdata.state
WHEN 0 THEN 'normal'
WHEN 1 THEN 'unsupported'
END AS itemState,
CASE items.status
WHEN 0 THEN 'Active'
WHEN 1 THEN 'Disabled'
END AS itemStatus,
CASE items.flags
WHEN 0 THEN 'normal item'
WHEN 1 THEN 'LLD rule'
WHEN 2 THEN 'Prototype'
WHEN 4 THEN 'auto created item'
END AS itemFlags
FROM items
JOIN item_rtdata ON (item_rtdata.itemid=items.itemid)
JOIN hosts ON (hosts.hostid=items.hostid)
WHERE hosts.status IN (0,1) AND items.status IN (0,1) AND items.flags IN (0,1,4)
GROUP BY 2,3,4,5
ORDER BY 2,3;
Detete internal events for items which is working right now. Zabbix 5.0
DELETE FROM events
WHERE source=3 AND object=4
AND objectid NOT IN (
SELECT items.itemid
FROM hosts, items, item_rtdata
WHERE items.hostid=hosts.hostid
AND items.itemid=item_rtdata.itemid
AND hosts.status=0
AND items.status=0
AND hosts.flags IN (0,4)
AND LENGTH(item_rtdata.error)=0
);
Print error active data collector items. Zabbix 4.4, 5.0, 5.2, 5.4, 6.0, 6.2
SELECT
hosts.host,
items.name,
item_rtdata.error AS error
FROM items, item_rtdata, hosts
WHERE item_rtdata.state=1
AND hosts.status=0
AND items.status=0
AND item_rtdata.itemid=items.itemid
AND hosts.hostid=items.hostid;
Healthy/active/enabled trigger objects, together with healthy items and healthy/enabled hosts. Zabbix 5.0
SELECT DISTINCT triggers.triggerid, hosts.host
FROM triggers, functions, items, hosts, item_rtdata
WHERE triggers.triggerid=functions.triggerid
AND functions.itemid=items.itemid
AND hosts.hostid=items.hostid
AND item_rtdata.itemid=items.itemid
AND hosts.status=0
AND items.status=0
AND triggers.status=0
AND LENGTH(triggers.error)=0
AND LENGTH(item_rtdata.error)=0;
Select internal trigger events for triggers which where not working some time ago, but triggers is healthy now. Zabbix 5.0
SELECT name FROM events
WHERE source=3 AND object=0
AND objectid NOT IN (
SELECT DISTINCT triggers.triggerid
FROM triggers, functions, items, hosts, item_rtdata
WHERE triggers.triggerid=functions.triggerid
AND functions.itemid=items.itemid
AND hosts.hostid=items.hostid
AND item_rtdata.itemid=items.itemid
AND hosts.status=0
AND hosts.flags IN (0,4)
AND items.status=0
AND triggers.status=0
AND LENGTH(triggers.error)=0
AND LENGTH(item_rtdata.error)=0
);
Delete INTERNAL trigger events for triggers which is healthy at this very second. Since it's healthy now, we can remove old evidence why it was not working. this will allow to concentrate more preciselly on what other things is not working right now. Zabbix 5.0
DELETE FROM events
WHERE source=3 AND object=0
AND objectid NOT IN (
SELECT DISTINCT triggers.triggerid
FROM triggers, functions, items, hosts, item_rtdata
WHERE triggers.triggerid=functions.triggerid
AND functions.itemid=items.itemid
AND hosts.hostid=items.hostid
AND item_rtdata.itemid=items.itemid
AND hosts.status=0
AND hosts.flags IN (0,4)
AND items.status=0
AND triggers.status=0
AND LENGTH(triggers.error)=0
AND LENGTH(item_rtdata.error)=0
);
Show host object and proxy the item belongs to. Zabbix 5.0, 5.2, 5.4, 6.0
SELECT
proxy.host AS proxy,
hosts.host,
items.name,
items.key_,
items.delay
FROM items
JOIN hosts ON (items.hostid=hosts.hostid)
LEFT JOIN hosts proxy ON (hosts.proxy_hostid=proxy.hostid)
WHERE hosts.status=0
AND items.status=0;
Unique items keys behind proxy. Zabbix 5.0, 5.2, 5.4, 6.0, 6.2
SELECT
proxy.host AS proxy,
items.key_,
COUNT(*) AS count
FROM hosts
LEFT JOIN hosts proxy ON (hosts.proxy_hostid=proxy.hostid)
JOIN items ON (items.hostid=hosts.hostid)
WHERE hosts.status=0
AND items.status=0
AND items.flags<>2
GROUP BY 1,2
ORDER BY 3 ASC;
Size of MySQL DB. Size of biggest tables
SELECT
ENGINE,
TABLE_TYPE,
TABLE_SCHEMA AS `DATABASE`,
TABLE_NAME AS `TABLE`,
ROUND(1.0*(DATA_LENGTH + INDEX_LENGTH) / 1024 / 1024,2) AS `TOTAL SIZE (MB)`,
ROUND(1.0*DATA_LENGTH/1024/1024, 2) AS `DATA SIZE (MB)`,
ROUND(1.0*INDEX_LENGTH/1024/1024, 2) AS `INDEX SIZE (MB)`,
ROUND(1.0*DATA_FREE/1024/1024, 2) AS `FREE SIZE (MB)`,
CURDATE() AS `TODAY`,
ROUND(DATA_FREE/(DATA_LENGTH+INDEX_LENGTH)*100,2) 'FRAGMENTED %',
TABLE_ROWS,
ROW_FORMAT,
TABLE_COLLATION,
VERSION
FROM information_schema.TABLES
WHERE table_schema NOT IN('information_schema', 'mysql','sys', 'performance_schema')
ORDER BY (DATA_LENGTH + INDEX_LENGTH)
DESC;
Show failed actions. Zabbix 5.0
SELECT CONCAT('tr_events.php?triggerid=',events.objectid,'&eventid=',events.eventid) AS OpenEventDetails,
FROM_UNIXTIME(alerts.clock) AS clock, alerts.error AS WhyItFailed,
actions.name AS ActionName,
CONCAT('actionconf.php?form=update&actionid=',actions.actionid) AS OpenAction
FROM alerts, events, actions
WHERE alerts.eventid=events.eventid
AND actions.actionid=alerts.actionid
AND alerts.status=2
ORDER BY alerts.clock DESC LIMIT 10;
Detect parallelity
SELECT
hosts.host,
items.delay,
MOD(items.itemid,table1.inSeconds) AS offset,
GROUP_CONCAT(items.itemid),
COUNT(*)
FROM items
JOIN (
SELECT DISTINCT delay, CASE delay
WHEN '10s' THEN '10'
WHEN '12s' THEN '12'
WHEN '15s' THEN '15'
WHEN '1m' THEN '60'
WHEN '2m' THEN '120'
WHEN '3m' THEN '180'
WHEN '5m' THEN '300'
WHEN '15m' THEN '900'
WHEN '1h' THEN '3600'
WHEN '1d' THEN '86400'
ELSE '0'
END AS inSeconds
FROM items
) table1 ON table1.delay=items.delay
JOIN hosts ON (hosts.hostid=items.hostid)
WHERE hosts.status=0 AND items.status=0
GROUP BY 1,2,3
HAVING COUNT(*) > 1;
Summary of data collection and storage used
SELECT items.delay,
items.history,
items.trends, COUNT(*)
FROM items
JOIN hosts
WHERE hosts.hostid=items.hostid
AND items.status=0
AND hosts.status=0
GROUP BY 1,2,3
ORDER BY 4 ASC;
Statistics per calculated item. Zabbix 6.0
SELECT items.params,COUNT(*) FROM items
JOIN hosts ON (hosts.hostid=items.hostid)
WHERE items.type=15
AND items.status=0
AND hosts.status=0
GROUP BY 1 ORDER BY 2 ASC;
ZBX hosts with errors. Zabbix 6.4
SELECT proxy.host, hosts.host, interface.ip,interface.port,interface.error
FROM interface
JOIN hosts ON (hosts.hostid=interface.hostid)
LEFT JOIN hosts proxy ON (proxy.hostid=hosts.proxy_hostid)
WHERE LENGTH(interface.error)>0
AND interface.type=1
ORDER BY 3 ASC;
SNMP hosts with errors. Zabbix 6.4
SELECT proxy.host, hosts.host, interface.ip,interface.port,interface.error
FROM interface
JOIN hosts ON (hosts.hostid=interface.hostid)
LEFT JOIN hosts proxy ON (proxy.hostid=hosts.proxy_hostid)
WHERE LENGTH(interface.error)>0
AND interface.type=2
ORDER BY 3 ASC;
JMX hosts with errors. Zabbix 6.4
SELECT hosts.host, interface.ip,interface.port,interface.error
FROM interface
JOIN hosts ON (hosts.hostid=interface.hostid)
LEFT JOIN hosts proxy ON (proxy.hostid=hosts.proxy_hostid)
WHERE LENGTH(interface.error)>0
AND interface.type=4
ORDER BY 3 ASC;
Interface errors. Zabbix 6.4
SELECT hosts.host, interface.type, interface.error
FROM interface
JOIN hosts ON (hosts.hostid=interface.hostid)
WHERE LENGTH(interface.error)>0
ORDER BY 3 ASC;
Detach current table from application layer and set back another table
RENAME TABLE history TO history_old; CREATE TABLE history LIKE history_old;
RENAME TABLE history_uint TO history_uint_old; CREATE TABLE history_uint LIKE history_uint_old;
RENAME TABLE history_str TO history_str_old; CREATE TABLE history_str LIKE history_str_old;
RENAME TABLE history_log TO history_log_old; CREATE TABLE history_log LIKE history_log_old;
RENAME TABLE history_text TO history_text_old; CREATE TABLE history_text LIKE history_text_old;
RENAME TABLE trends TO trends_old; CREATE TABLE trends LIKE trends_old;
RENAME TABLE trends_uint TO trends_uint_old; CREATE TABLE trends_uint LIKE trends_uint_old;
Create old tables like current
CREATE TABLE history_old LIKE history;
CREATE TABLE history_uint_old LIKE history_uint;
CREATE TABLE history_str_old LIKE history_str;
CREATE TABLE history_log_old LIKE history_log;
CREATE TABLE history_text_old LIKE history_text;
CREATE TABLE trends_old LIKE trends;
CREATE TABLE trends_uint_old LIKE trends_uint;
Create current tables like old
CREATE TABLE history LIKE history_old;
CREATE TABLE history_uint LIKE history_uint_old;
CREATE TABLE history_str LIKE history_str_old;
CREATE TABLE history_log LIKE history_log_old;
CREATE TABLE history_text LIKE history_text_old;
CREATE TABLE trends LIKE trends_old;
CREATE TABLE trends_uint LIKE trends_uint_old;
Truncate old tables
TRUNCATE TABLE history_uint_old;
TRUNCATE TABLE trends_uint_old;
TRUNCATE TABLE history_old;
TRUNCATE TABLE trends_old;
TRUNCATE TABLE history_text_old;
TRUNCATE TABLE history_str_old;
TRUNCATE TABLE history_log_old;
Truncate current tables
TRUNCATE TABLE history_uint;
TRUNCATE TABLE trends_uint;
TRUNCATE TABLE history;
TRUNCATE TABLE trends;
TRUNCATE TABLE history_text;
TRUNCATE TABLE history_str;
TRUNCATE TABLE history_log;
Items which fails with data collection frequently. Zabbix 4.0, 5.0
SELECT
CONCAT('items.php?form=update&itemid=', events.objectid) AS 'URL',
events.name,
COUNT(*)
FROM events
WHERE events.source=3 AND events.object=4 AND LENGTH(events.name)>0
AND clock > UNIX_TIMESTAMP(NOW()-INTERVAL 1 HOUR)
GROUP BY 1,2
ORDER BY 3 DESC LIMIT 10;
Lld rules which fail. Zabbix 4.0, 5.0
SELECT
CONCAT('host_discovery.php?form=update&itemid=', events.objectid) AS 'URL',
events.name,
COUNT(*)
FROM events
WHERE events.source=3 AND events.object=5 AND LENGTH(events.name)>0
AND clock > UNIX_TIMESTAMP(NOW()-INTERVAL 1 HOUR)
GROUP BY 1,2
ORDER BY 3 DESC LIMIT 10;
Unexisting integer numbers
SELECT DISTINCT itemid FROM trends_uint WHERE itemid NOT IN (SELECT itemid FROM items);
DELETE FROM trends_uint WHERE itemid NOT IN (SELECT itemid FROM items);
Unexisting decimal numbers
SELECT DISTINCT itemid FROM trends WHERE itemid NOT IN (SELECT itemid FROM items);
DELETE FROM trends WHERE itemid NOT IN (SELECT itemid FROM items);
Print a part of sid which can be used to backtrack usage of calls in web servers access.log
SELECT RIGHT(sessions.sessionid,16), users.alias FROM sessions, users WHERE sessions.userid=users.userid AND users.alias='api';
MySQL biggest tables for database 'zabbix'
SELECT table_name, table_rows, data_length, index_length,
ROUND(((data_length + index_length) / 1024 / 1024 / 1024),2) "Size in GB"
FROM information_schema.tables
WHERE table_schema = "zabbix"
ORDER BY ROUND(((data_length + index_length) / 1024 / 1024 / 1024),2) DESC
LIMIT 20;
Items and delay
SELECT items.delay, CASE items.type WHEN 0 THEN 'Zabbix agent' WHEN 1 THEN 'SNMPv1 agent' WHEN 2 THEN 'Zabbix trapper' WHEN 3 THEN 'Simple check' WHEN 4 THEN 'SNMPv2 agent' WHEN 5 THEN 'Zabbix internal' WHEN 6 THEN 'SNMPv3 agent' WHEN 7 THEN 'Zabbix agent (active) check' WHEN 8 THEN 'Aggregate' WHEN 9 THEN 'HTTP test (web monitoring scenario step)' WHEN 10 THEN 'External check' WHEN 11 THEN 'Database monitor' WHEN 12 THEN 'IPMI agent' WHEN 13 THEN 'SSH agent' WHEN 14 THEN 'TELNET agent' WHEN 15 THEN 'Calculated' WHEN 16 THEN 'JMX agent' WHEN 17 THEN 'SNMP trap' WHEN 18 THEN 'Dependent item' WHEN 19 THEN 'HTTP agent' WHEN 20 THEN 'SNMP agent' WHEN 21 THEN 'Script item' END AS type,  COUNT(*)
FROM hosts, items
WHERE hosts.hostid=items.hostid
AND hosts.status=0 AND items.status=0 AND hosts.flags IN (0,4) AND items.flags IN (0,4)
GROUP BY 1,2
ORDER BY 1 ASC;
ODBC items, database monitor items
SELECT
proxy.host AS proxy,
hosts.host,
COUNT(*)
FROM items
JOIN hosts ON (hosts.hostid=items.hostid)
LEFT JOIN hosts proxy ON (hosts.proxy_hostid=proxy.hostid)
WHERE hosts.status=0 AND items.status=0
AND items.type=11
GROUP BY 1,2
ORDER BY 1,2 DESC;
Profiling
SET profiling = 1;
SELECT * FROM sessions;
SHOW PROFILES;
SHOW PROFILE FOR QUERY 1;
EXPLAIN SELECT * FROM sessions\G
SET profiling = 0;
Special item update frequency
SELECT items.delay, items.params,  COUNT(*)
FROM hosts, items
WHERE hosts.hostid=items.hostid
AND hosts.status=0 AND items.status=0 AND hosts.flags IN (0,4) AND items.flags IN (0,4)
AND items.type=15
GROUP BY 1,2
ORDER BY 1 ASC;
Events daily
SELECT COUNT(*) FROM events WHERE clock >= UNIX_TIMESTAMP("2023-07-20 00:00:00") AND clock < UNIX_TIMESTAMP("2023-07-21 00:00:00");
All SNMPv3 credentials in use. Zabbix 7.0
SET SESSION group_concat_max_len = 1000000; SELECT interface_snmp.version,
interface_snmp.bulk,
interface_snmp.community,
interface_snmp.securityname,
interface_snmp.securitylevel,
interface_snmp.authpassphrase,
interface_snmp.privpassphrase,
interface_snmp.authprotocol,
interface_snmp.privprotocol,
interface_snmp.contextname,
GROUP_CONCAT(interface.ip) AS hosts
FROM hosts
LEFT JOIN interface ON (interface.hostid=hosts.hostid)
LEFT JOIN interface_snmp ON (interface.interfaceid=interface_snmp.interfaceid)
WHERE hosts.status IN (0,1) AND hosts.flags=0 AND interface.type=2
GROUP BY 1,2,3,4,5,6,7,8,9,10\G
Trigger calculation fail. Zabbix 4.0, 5.0
SELECT
CONCAT('triggers.php?form=update&triggerid=', events.objectid) AS 'URL',
events.name,
COUNT(*)
FROM events
WHERE events.source=3 AND events.object=0 AND LENGTH(events.name)>0
AND clock > UNIX_TIMESTAMP(NOW()-INTERVAL 1 HOUR)
GROUP BY 1,2
ORDER BY 3 DESC LIMIT 10;
Which deciaml number takes the most hits in trends table. Zabbix 4.0, 5.0, 6.0
SELECT hosts.host, items.key_ FROM (
SELECT COUNT(*), itemid FROM trends WHERE itemid IN (
SELECT DISTINCT itemid FROM items WHERE value_type=0
)
GROUP BY 2
ORDER BY 1 DESC
LIMIT 50) table1, items, hosts
WHERE table1.itemid=items.itemid
AND hosts.hostid=items.hostid;

Download this section: https://aigarskadikis.github.io/src/server.sql
Fancy syntax highlighter? Read same page on GitHub: https://github.com/aigarskadikis/aigarskadikis.github.io/blob/main/src/server.sql

List all chunks. Pg 14.5, ts 2.8.1
SELECT show_chunks('history');
List version
SELECT extversion FROM pg_extension WHERE extname='timescaledb'\gx
Size of chunks and dates
SELECT hypertable_name,
to_timestamp(range_start_integer) AS range_start,
to_timestamp(range_end_integer) AS range_end,
chunk_name,
(range_end_integer-range_start_integer) AS size
FROM timescaledb_information.chunks
WHERE hypertable_name IN ('history','history_uint','history_str','history_log','history_text','trends_uint','trends')
AND range_start_integer > EXTRACT(EPOCH FROM NOW() - INTERVAL '24 hours')
ORDER BY 1,2;
Compress manually one chunk. Pg 14.5, ts 2.8.1
SELECT compress_chunk('_timescaledb_internal._hyper_1_489_chunk');
Drop tables
DROP TABLE trends_tmp;
DROP TABLE trends_old;
DROP TABLE trends_uint_tmp;
DROP TABLE trends_uint_old;
DROP TABLE history_tmp;
DROP TABLE history_old;
DROP TABLE history_uint_tmp;
DROP TABLE history_uint_old;
DROP TABLE history_str_tmp;
DROP TABLE history_str_old;
DROP TABLE history_log_tmp;
DROP TABLE history_log_old;
DROP TABLE history_text_tmp;
DROP TABLE history_text_old;
Allow the application layer to to drop old chunks by using housekeeper settings in GUI
UPDATE config SET db_extension='timescaledb',hk_history_global=1,hk_trends_global=1;
Explorer how many is compressed and uncompressed. Pg 14.5, ts 2.8.1
SELECT chunk_schema,chunk_name,compression_status FROM chunk_compression_stats('history');
To see if hypertable is compressed. Pg 14.5, ts 2.8.1
SELECT chunk_schema,chunk_name,compression_status FROM chunk_compression_stats('history');
Decompress chunk. Pg 14.5, ts 2.8.1
SELECT chunk_schema,chunk_name,compression_status FROM chunk_compression_stats('history');
Another way to print chunks per table
select * from _timescaledb_catalog.chunk where hypertable_id = (select id from _timescaledb_catalog.hypertable where table_name = 'history');
Compress all chunks from a time frame. Pg 14.5, ts 2.8.1
SELECT compress_chunk(i) from SELECT show_chunks('history',1690372501,0) i;
Decompress all chunks. Pg 14.5, ts 2.8.1
SELECT decompress_chunk(c, true) FROM show_chunks('trends_uint') c;
SELECT decompress_chunk(c, true) FROM show_chunks('trends') c;
SELECT decompress_chunk(c, true) FROM show_chunks('history_uint') c;
SELECT decompress_chunk(c, true) FROM show_chunks('history_str') c;
SELECT decompress_chunk(c, true) FROM show_chunks('history_log') c;
SELECT decompress_chunk(c, true) FROM show_chunks('history_text') c;
SELECT decompress_chunk(c, true) FROM show_chunks('history') c;
List chunk sizes of hypertables
SELECT h.table_name, c.interval_length
FROM _timescaledb_catalog.dimension c
JOIN _timescaledb_catalog.hypertable h
ON h.id = c.hypertable_id;

Download this section: https://aigarskadikis.github.io/src/ts.sql
Fancy syntax highlighter? Read same page on GitHub: https://github.com/aigarskadikis/aigarskadikis.github.io/blob/main/src/ts.sql

Create MySQL users using wizard: https://aigarskadikis.github.io/u

Create an empty database with name 'zbx_db'. Zabbix 6.0 LTS
CREATE DATABASE zbx_db CHARACTER SET utf8mb4 COLLATE utf8mb4_bin;
DB1. User for replication
CREATE USER 'repl'@'192.168.88.101' IDENTIFIED WITH mysql_native_password BY 'PasswordForDBReplication';
GRANT REPLICATION SLAVE ON *.* TO 'repl'@'192.168.88.101';
DB2. User for replication
CREATE USER 'repl'@'192.168.88.102' IDENTIFIED WITH mysql_native_password BY 'PasswordForDBReplication';
GRANT REPLICATION SLAVE ON *.* TO 'repl'@'192.168.88.102';
APP1. User for 'zabbix-server'
CREATE USER 'zbx_srv'@'192.168.88.111' IDENTIFIED WITH mysql_native_password BY 'PasswordForApplicationServer';
APP2. User for 'zabbix-server'
CREATE USER 'zbx_srv'@'192.168.88.112' IDENTIFIED WITH mysql_native_password BY 'PasswordForApplicationServer';
Bare minumum permissions for 'zabbix-server'
CREATE ROLE 'zbx_srv_role';
GRANT SELECT, UPDATE, DELETE, INSERT, CREATE, DROP, ALTER, INDEX, REFERENCES ON zbx_db.* TO 'zbx_srv_role';
Allow to use Zabbix 6.2. Required for incremental configuration synchronisation
SET GLOBAL log_bin_trust_function_creators = 1;
APP1. Assign role
GRANT 'zbx_srv_role' TO 'zbx_srv'@'192.168.88.111';
SET DEFAULT ROLE 'zbx_srv_role' TO 'zbx_srv'@'192.168.88.111';
APP2. Assign role
GRANT 'zbx_srv_role' TO 'zbx_srv'@'192.168.88.112';
SET DEFAULT ROLE 'zbx_srv_role' TO 'zbx_srv'@'192.168.88.112';
GUI1. User for 'nginx'
CREATE USER 'zbx_web'@'192.168.88.121' IDENTIFIED WITH mysql_native_password BY 'PasswordForFrontendNode';
GUI2. User for 'nginx'
CREATE USER 'zbx_web'@'192.168.88.122' IDENTIFIED WITH mysql_native_password BY 'PasswordForFrontendNode';
Install bare minimum permissions for frontend
CREATE ROLE 'zbx_web_role';
GRANT SELECT, UPDATE, DELETE, INSERT ON zbx_db.* TO 'zbx_web_role';
GUI1. Assign role
GRANT 'zbx_web_role' TO 'zbx_web'@'192.168.88.121';
SET DEFAULT ROLE 'zbx_web_role' TO 'zbx_web'@'192.168.88.121';
GUI2. Assign role
GRANT 'zbx_web_role' TO 'zbx_web'@'192.168.88.122';
SET DEFAULT ROLE 'zbx_web_role' TO 'zbx_web'@'192.168.88.122';
User for DB partitioning script
CREATE USER 'zbx_part'@'127.0.0.1' IDENTIFIED WITH mysql_native_password BY 'PasswordForDBPartitioning';
GRANT ALL PRIVILEGES ON *.* to 'zbx_part'@'127.0.0.1';
User to monitor the health of MySQL server via local Zabbix agent 2. Not suitable for RDS.
CREATE USER 'zbx_monitor'@'127.0.0.1' IDENTIFIED WITH mysql_native_password BY 'PasswordForAgent2MySQLMonitoring';
GRANT REPLICATION CLIENT,PROCESS,SHOW DATABASES,SHOW VIEW ON *.* TO 'zbx_monitor'@'127.0.0.1';
APP1. User to monitor RDS database
CREATE USER 'zbx_monitor'@'192.168.88.111' IDENTIFIED WITH mysql_native_password BY 'PasswordForAgent2MySQLMonitoring';
GRANT REPLICATION CLIENT,PROCESS,SHOW DATABASES,SHOW VIEW ON *.* TO 'zbx_monitor'@'192.168.88.111';
APP2. User to monitor RDS database
CREATE USER 'zbx_monitor'@'192.168.88.112' IDENTIFIED WITH mysql_native_password BY 'PasswordForAgent2MySQLMonitoring';
GRANT REPLICATION CLIENT,PROCESS,SHOW DATABASES,SHOW VIEW ON *.* TO 'zbx_monitor'@'192.168.88.112';
Backup user for 'mysqldump'
CREATE USER 'zbx_backup'@'127.0.0.1' IDENTIFIED WITH mysql_native_password BY 'PassfordForMySQLDUmp';
GRANT SELECT, LOCK TABLES, SHOW VIEW, RELOAD ON *.* TO 'zbx_backup'@'127.0.0.1';
Read only user for reporting
CREATE USER 'zbx_read_only'@'127.0.0.1' IDENTIFIED WITH mysql_native_password BY 'PasswordForReadOnlyUser';
GRANT SELECT ON zbx_db.* TO 'zbx_backup'@'127.0.0.1';
User for grafana which runs in docker
CREATE USER 'grafana'@'%' IDENTIFIED WITH mysql_native_password BY 'PasswordForReadOnlyGrafanaUser';
GRANT SELECT ON zbx_db.* TO 'grafana'@'%';

Download this section: https://aigarskadikis.github.io/src/users.sql
Fancy syntax highlighter? Read same page on GitHub: https://github.com/aigarskadikis/aigarskadikis.github.io/blob/main/src/users.sql

SEARCH
×