|
| 1 | +"""Container Monitor - TUI-based cgroup monitoring combining syscall, file I/O, and network tracking.""" |
| 2 | + |
| 3 | +from pythonbpf import bpf, map, section, bpfglobal, struct, BPF |
| 4 | +from pythonbpf.maps import HashMap |
| 5 | +from pythonbpf.helper import get_current_cgroup_id |
| 6 | +from ctypes import c_int32, c_uint64, c_void_p |
| 7 | +from vmlinux import struct_pt_regs, struct_sk_buff |
| 8 | + |
| 9 | +from data_collection import ContainerDataCollector |
| 10 | +from tui import ContainerMonitorTUI |
| 11 | + |
| 12 | + |
| 13 | +# ==================== BPF Structs ==================== |
| 14 | + |
| 15 | + |
| 16 | +@bpf |
| 17 | +@struct |
| 18 | +class read_stats: |
| 19 | + bytes: c_uint64 |
| 20 | + ops: c_uint64 |
| 21 | + |
| 22 | + |
| 23 | +@bpf |
| 24 | +@struct |
| 25 | +class write_stats: |
| 26 | + bytes: c_uint64 |
| 27 | + ops: c_uint64 |
| 28 | + |
| 29 | + |
| 30 | +@bpf |
| 31 | +@struct |
| 32 | +class net_stats: |
| 33 | + rx_packets: c_uint64 |
| 34 | + tx_packets: c_uint64 |
| 35 | + rx_bytes: c_uint64 |
| 36 | + tx_bytes: c_uint64 |
| 37 | + |
| 38 | + |
| 39 | +# ==================== BPF Maps ==================== |
| 40 | + |
| 41 | + |
| 42 | +@bpf |
| 43 | +@map |
| 44 | +def read_map() -> HashMap: |
| 45 | + return HashMap(key=c_uint64, value=read_stats, max_entries=1024) |
| 46 | + |
| 47 | + |
| 48 | +@bpf |
| 49 | +@map |
| 50 | +def write_map() -> HashMap: |
| 51 | + return HashMap(key=c_uint64, value=write_stats, max_entries=1024) |
| 52 | + |
| 53 | + |
| 54 | +@bpf |
| 55 | +@map |
| 56 | +def net_stats_map() -> HashMap: |
| 57 | + return HashMap(key=c_uint64, value=net_stats, max_entries=1024) |
| 58 | + |
| 59 | + |
| 60 | +@bpf |
| 61 | +@map |
| 62 | +def syscall_count() -> HashMap: |
| 63 | + return HashMap(key=c_uint64, value=c_uint64, max_entries=1024) |
| 64 | + |
| 65 | + |
| 66 | +# ==================== File I/O Tracing ==================== |
| 67 | + |
| 68 | + |
| 69 | +@bpf |
| 70 | +@section("kprobe/vfs_read") |
| 71 | +def trace_read(ctx: struct_pt_regs) -> c_int32: |
| 72 | + cg = get_current_cgroup_id() |
| 73 | + count = c_uint64(ctx.dx) |
| 74 | + ptr = read_map.lookup(cg) |
| 75 | + if ptr: |
| 76 | + s = read_stats() |
| 77 | + s.bytes = ptr.bytes + count |
| 78 | + s.ops = ptr.ops + 1 |
| 79 | + read_map.update(cg, s) |
| 80 | + else: |
| 81 | + s = read_stats() |
| 82 | + s.bytes = count |
| 83 | + s.ops = c_uint64(1) |
| 84 | + read_map.update(cg, s) |
| 85 | + |
| 86 | + return c_int32(0) |
| 87 | + |
| 88 | + |
| 89 | +@bpf |
| 90 | +@section("kprobe/vfs_write") |
| 91 | +def trace_write(ctx1: struct_pt_regs) -> c_int32: |
| 92 | + cg = get_current_cgroup_id() |
| 93 | + count = c_uint64(ctx1.dx) |
| 94 | + ptr = write_map.lookup(cg) |
| 95 | + |
| 96 | + if ptr: |
| 97 | + s = write_stats() |
| 98 | + s.bytes = ptr.bytes + count |
| 99 | + s.ops = ptr.ops + 1 |
| 100 | + write_map.update(cg, s) |
| 101 | + else: |
| 102 | + s = write_stats() |
| 103 | + s.bytes = count |
| 104 | + s.ops = c_uint64(1) |
| 105 | + write_map.update(cg, s) |
| 106 | + |
| 107 | + return c_int32(0) |
| 108 | + |
| 109 | + |
| 110 | +# ==================== Network I/O Tracing ==================== |
| 111 | + |
| 112 | + |
| 113 | +@bpf |
| 114 | +@section("kprobe/__netif_receive_skb") |
| 115 | +def trace_netif_rx(ctx2: struct_pt_regs) -> c_int32: |
| 116 | + cgroup_id = get_current_cgroup_id() |
| 117 | + skb = struct_sk_buff(ctx2.di) |
| 118 | + pkt_len = c_uint64(skb.len) |
| 119 | + |
| 120 | + stats_ptr = net_stats_map.lookup(cgroup_id) |
| 121 | + |
| 122 | + if stats_ptr: |
| 123 | + stats = net_stats() |
| 124 | + stats.rx_packets = stats_ptr.rx_packets + 1 |
| 125 | + stats.tx_packets = stats_ptr.tx_packets |
| 126 | + stats.rx_bytes = stats_ptr.rx_bytes + pkt_len |
| 127 | + stats.tx_bytes = stats_ptr.tx_bytes |
| 128 | + net_stats_map.update(cgroup_id, stats) |
| 129 | + else: |
| 130 | + stats = net_stats() |
| 131 | + stats.rx_packets = c_uint64(1) |
| 132 | + stats.tx_packets = c_uint64(0) |
| 133 | + stats.rx_bytes = pkt_len |
| 134 | + stats.tx_bytes = c_uint64(0) |
| 135 | + net_stats_map.update(cgroup_id, stats) |
| 136 | + |
| 137 | + return c_int32(0) |
| 138 | + |
| 139 | + |
| 140 | +@bpf |
| 141 | +@section("kprobe/__dev_queue_xmit") |
| 142 | +def trace_dev_xmit(ctx3: struct_pt_regs) -> c_int32: |
| 143 | + cgroup_id = get_current_cgroup_id() |
| 144 | + skb = struct_sk_buff(ctx3.di) |
| 145 | + pkt_len = c_uint64(skb.len) |
| 146 | + |
| 147 | + stats_ptr = net_stats_map.lookup(cgroup_id) |
| 148 | + |
| 149 | + if stats_ptr: |
| 150 | + stats = net_stats() |
| 151 | + stats.rx_packets = stats_ptr.rx_packets |
| 152 | + stats.tx_packets = stats_ptr.tx_packets + 1 |
| 153 | + stats.rx_bytes = stats_ptr.rx_bytes |
| 154 | + stats.tx_bytes = stats_ptr.tx_bytes + pkt_len |
| 155 | + net_stats_map.update(cgroup_id, stats) |
| 156 | + else: |
| 157 | + stats = net_stats() |
| 158 | + stats.rx_packets = c_uint64(0) |
| 159 | + stats.tx_packets = c_uint64(1) |
| 160 | + stats.rx_bytes = c_uint64(0) |
| 161 | + stats.tx_bytes = pkt_len |
| 162 | + net_stats_map.update(cgroup_id, stats) |
| 163 | + |
| 164 | + return c_int32(0) |
| 165 | + |
| 166 | + |
| 167 | +# ==================== Syscall Tracing ==================== |
| 168 | + |
| 169 | + |
| 170 | +@bpf |
| 171 | +@section("tracepoint/raw_syscalls/sys_enter") |
| 172 | +def count_syscalls(ctx: c_void_p) -> c_int32: |
| 173 | + cgroup_id = get_current_cgroup_id() |
| 174 | + count_ptr = syscall_count.lookup(cgroup_id) |
| 175 | + |
| 176 | + if count_ptr: |
| 177 | + new_count = count_ptr + c_uint64(1) |
| 178 | + syscall_count.update(cgroup_id, new_count) |
| 179 | + else: |
| 180 | + syscall_count.update(cgroup_id, c_uint64(1)) |
| 181 | + |
| 182 | + return c_int32(0) |
| 183 | + |
| 184 | + |
| 185 | +@bpf |
| 186 | +@bpfglobal |
| 187 | +def LICENSE() -> str: |
| 188 | + return "GPL" |
| 189 | + |
| 190 | + |
| 191 | +# ==================== Main ==================== |
| 192 | + |
| 193 | +if __name__ == "__main__": |
| 194 | + print("🔥 Loading BPF programs...") |
| 195 | + |
| 196 | + # Load and attach BPF program |
| 197 | + b = BPF() |
| 198 | + b.load() |
| 199 | + b.attach_all() |
| 200 | + |
| 201 | + # Get map references and enable struct deserialization |
| 202 | + read_map_ref = b["read_map"] |
| 203 | + write_map_ref = b["write_map"] |
| 204 | + net_stats_map_ref = b["net_stats_map"] |
| 205 | + syscall_count_ref = b["syscall_count"] |
| 206 | + |
| 207 | + read_map_ref.set_value_struct("read_stats") |
| 208 | + write_map_ref.set_value_struct("write_stats") |
| 209 | + net_stats_map_ref.set_value_struct("net_stats") |
| 210 | + |
| 211 | + print("✅ BPF programs loaded and attached") |
| 212 | + |
| 213 | + # Setup data collector |
| 214 | + collector = ContainerDataCollector( |
| 215 | + read_map_ref, write_map_ref, net_stats_map_ref, syscall_count_ref |
| 216 | + ) |
| 217 | + |
| 218 | + # Create and run TUI |
| 219 | + tui = ContainerMonitorTUI(collector) |
| 220 | + tui.run() |
0 commit comments