diff --git a/README.md b/README.md index 1e3f2ae..30d1df1 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,9 @@ # Monitor ZFS on Linux on Zabbix + +**DISCLAMER:** *Here is a fork unless my pool request wasn't approved for the next reasons: https://github.com/Cosium/zabbix_zfs-on-linux/pull/30* + + This template is a modified version of the original work done by pbergdolt and posted on the zabbix forum a while ago here: https://www.zabbix.com/forum/zabbix-cookbook/35336-zabbix-zfs-discovery-monitoring?t=43347 . Also the original home of this variant was on https://share.zabbix.com/zfs-on-linux . I have maintained and modified this template over the years and the different versions of ZoL on a large number of servers so I'm pretty confident that it works ;) @@ -107,9 +111,9 @@ You can see how the macros are used by looking at the discovery rules, then "Tri # Important note about Zabbix active items -This template uses Zabbix items of type `Zabbix agent (active)` (= active items). By default, most template uses `Zabbix agent` items (= passive items). +'zol_template.xml' uses Zabbix items of type `Zabbix agent (active)` (= active items). By default, most template uses `Zabbix agent` items (= passive items). -If you want, you can convert all the items to `Zabbix agent` and everything will work, but you should really uses active items because those are way more scalable. The official documentation doesn't really make this point clear (https://www.zabbix.com/documentation/4.0/manual/appendix/items/activepassive) but active items are optimized: the agent asks the server for the list of items that the server wants, then send them by batch periodically. +If you want, you can convert all the items to `Zabbix agent` or import 'zol_template_passive.xml', but you should really uses active items because those are way more scalable. The official documentation doesn't really make this point clear (https://www.zabbix.com/documentation/4.0/manual/appendix/items/activepassive) but active items are optimized: the agent asks the server for the list of items that the server wants, then send them by batch periodically. On the other hand, for passive items, the zabbix server must establish a connection for each items and ask for them, then wait for the anwser: this results in more CPU, memory and network consumption used by both the server and the agent. diff --git a/template/zol_template_passive.xml b/template/zol_template_passive.xml new file mode 100644 index 0000000..2d21263 --- /dev/null +++ b/template/zol_template_passive.xml @@ -0,0 +1,1444 @@ + + + 4.4 + 2021-03-09T20:56:51Z + + + Templates + + + + + + + + {ZFS on Linux:zfs.arcstats[dnode_size].last()}>({ZFS on Linux:zfs.arcstats[arc_dnode_limit].last()}*0.9) + ZFS ARC dnode size > 90% dnode max size on {HOST.NAME} + HIGH + + + {ZFS on Linux:zfs.arcstats[arc_meta_used].last()}>({ZFS on Linux:zfs.arcstats[arc_meta_limit].last()}*0.01*{$ZFS_ARC_META_ALERT}) + ZFS ARC meta size > {$ZFS_ARC_META_ALERT}% meta max size on {HOST.NAME} + HIGH + + + + + ZFS ARC arc_meta_used breakdown + STACKED + FIXED + + + 3333FF + + ZFS on Linux + zfs.arcstats[metadata_size] + + + + 1 + 00EE00 + + ZFS on Linux + zfs.arcstats[dnode_size] + + + + 2 + EE0000 + + ZFS on Linux + zfs.arcstats[hdr_size] + + + + 3 + EEEE00 + + ZFS on Linux + zfs.arcstats[dbuf_size] + + + + 4 + EE00EE + + ZFS on Linux + zfs.arcstats[bonus_size] + + + + + + ZFS ARC breakdown + STACKED + FIXED + + + 3333FF + + ZFS on Linux + zfs.arcstats[data_size] + + + + 1 + 00AA00 + + ZFS on Linux + zfs.arcstats[metadata_size] + + + + 2 + EE0000 + + ZFS on Linux + zfs.arcstats[dnode_size] + + + + 3 + CCCC00 + + ZFS on Linux + zfs.arcstats[hdr_size] + + + + 4 + A54F10 + + ZFS on Linux + zfs.arcstats[dbuf_size] + + + + 5 + 888888 + + ZFS on Linux + zfs.arcstats[bonus_size] + + + + + + ZFS ARC Cache Hit Ratio + FIXED + FIXED + + + 00CC00 + + ZFS on Linux + zfs.arcstats_hit_ratio + + + + + + ZFS ARC memory usage + FIXED + ITEM + + ZFS on Linux + zfs.arcstats[c_max] + + + + GRADIENT_LINE + 0000EE + + ZFS on Linux + zfs.arcstats[size] + + + + 1 + BOLD_LINE + DD0000 + + ZFS on Linux + zfs.arcstats[c_max] + + + + 2 + 00BB00 + + ZFS on Linux + zfs.arcstats[c_min] + + + + + + + + ZFS zpool scrub status + + + 0 + Scrub in progress + + + 1 + No scrub in progress + + + + +