Loading

# To list every process on the system:
ps aux
# To list a process tree:
ps axjf

# To list every process owned by foouser:
ps -aufoouser

# To list every process with a user-defined format:
ps -eo pid,user,command

Check high CPU usage process

# Display usage across each processor
root@satellite :~# mpstat -P ALL
Linux 5.15.0-124-generic (vmanalyst)    10/17/2024      _x86_64_        (8 CPU)

12:36:13 PM  CPU    %usr   %nice    %sys %iowait    %irq   %soft  %steal  %guest  %gnice   %idle
12:36:13 PM  all    0.20    0.02    0.23    0.02    0.00    0.01    0.00    0.00    0.00   99.52
12:36:13 PM    0    0.26    0.00    0.23    0.03    0.00    0.02    0.00    0.00    0.00   99.46
12:36:13 PM    1    0.20    0.00    0.20    0.02    0.00    0.01    0.00    0.00    0.00   99.57
12:36:13 PM    2    0.10    0.00    0.18    0.03    0.00    0.01    0.00    0.00    0.00   99.69
12:36:13 PM    3    0.26    0.07    0.22    0.03    0.00    0.01    0.00    0.00    0.00   99.41
12:36:13 PM    4    0.13    0.02    0.13    0.02    0.00    0.01    0.00    0.00    0.00   99.70
12:36:13 PM    5    0.12    0.02    0.20    0.02    0.00    0.00    0.00    0.00    0.00   99.63
12:36:13 PM    6    0.28    0.00    0.32    0.02    0.00    0.01    0.00    0.00    0.00   99.38
12:36:13 PM    7    0.29    0.02    0.33    0.02    0.00    0.02    0.00    0.00    0.00   99.32

%usr - % CPU usage at the user level
%nice - % CPU usage for user processes labeled nice.
%sys - % CPU usage at the system (Linux kernel) level.
%iowait - % CPU usage idling waiting on a disk read/write.
%irq - % CPU usage handling hardware interrupts.
%soft - % CPU usage handling software interrupts.
%steal - % CPU usage being forced to wait for a hypervisor handling other virtual processors.
%guest - % CPU usage spent running a virtual processor.
%idle - % CPU usage on idle time (no processes and not waiting on a disk read/write).

root@vmanalyst:~#



root@satellite ~]# ps -eo pid,ppid,%mem,%cpu,cmd --sort=-%cpu | head
    PID    PPID %MEM %CPU CMD
    963       1  0.2  0.2 /usr/bin/vmtoolsd
   1894       2  0.0  0.2 [kworker/1:0-events]
   2162       2  0.0  0.2 [kworker/1:3-events_freezable_power_]
   1976       2  0.0  0.1 [kworker/1:1-events_power_efficient]
      1       0  0.4  0.0 /usr/lib/systemd/systemd rhgb --switched-root --system --deserialize 31
      2       0  0.0  0.0 [kthreadd]
      3       2  0.0  0.0 [rcu_gp]
      4       2  0.0  0.0 [rcu_par_gp]
      5       2  0.0  0.0 [slub_flushwq]

[root@satellite ~]# ps -eo pid,ppid,%mem,%cpu,comm --sort=-%cpu | head
    PID    PPID %MEM %CPU COMMAND
    963       1  0.2  0.2 vmtoolsd
   1894       2  0.0  0.2 kworker/1:0-events
   2162       2  0.0  0.2 kworker/1:3-pm
   1976       2  0.0  0.1 kworker/1:1-events_power_efficient
      1       0  0.4  0.0 systemd
      2       0  0.0  0.0 kthreadd
      3       2  0.0  0.0 rcu_gp
      4       2  0.0  0.0 rcu_par_gp
      5       2  0.0  0.0 slub_flushwq

root@satellite ~]#  top -c -b | head -20
top - 12:54:27 up  1:23,  2 users,  load average: 0.95, 0.46, 0.18
Tasks: 236 total,   1 running, 235 sleeping,   0 stopped,   0 zombie
%Cpu(s):  5.3 us,  5.3 sy,  0.0 ni, 86.8 id,  0.0 wa,  2.6 hi,  0.0 si,  0.0 st
MiB Mem :   3634.5 total,   2898.0 free,    556.6 used,    419.2 buff/cache
MiB Swap:   1536.0 total,   1536.0 free,      0.0 used.   3078.0 avail Mem 

    PID USER      PR  NI    VIRT    RES    SHR S  %CPU  %MEM     TIME+ COMMAND
   2541 root      20   0   10652   3940   3304 R   5.9   0.1   0:00.04 top -c -b
      1 root      20   0  173720  19372  10556 S   0.0   0.5   0:03.35 /usr/lib/systemd/systemd rhgb --switched-root --system --deserialize 31
      2 root      20   0       0      0      0 S   0.0   0.0   0:00.05 [kthreadd]
      3 root       0 -20       0      0      0 I   0.0   0.0   0:00.00 [rcu_gp]
      4 root       0 -20       0      0      0 I   0.0   0.0   0:00.00 [rcu_par_gp]
      5 root       0 -20       0      0      0 I   0.0   0.0   0:00.00 [slub_flushwq]
      6 root       0 -20       0      0      0 I   0.0   0.0   0:00.00 [netns]
      8 root       0 -20       0      0      0 I   0.0   0.0   0:00.00 [kworker/0:0H-events_highpri]
     10 root       0 -20       0      0      0 I   0.0   0.0   0:00.05 [kworker/0:1H-events_highpri]
     11 root       0 -20       0      0      0 I   0.0   0.0   0:00.00 [mm_percpu_wq]
     13 root      20   0       0      0      0 I   0.0   0.0   0:00.00 [rcu_tasks_kthre]
     14 root      20   0       0      0      0 I   0.0   0.0   0:00.00 [rcu_tasks_rude_]
     15 root      20   0       0      0      0 I   0.0   0.0   0:00.00 [rcu_tasks_trace]

Monitor Disk I/O performance

#Check the I/O statistics of disks

[root@satellite ~]# iostat -d
Linux 5.14.0-284.11.1.el9_2.x86_64 (satellite) 	26/10/23 	_x86_64_	(2 CPU)

Device             tps    kB_read/s    kB_wrtn/s    kB_dscd/s    kB_read    kB_wrtn    kB_dscd
dm-0              1.04        46.35         3.92         0.00     329034      27834          0
dm-1              0.01         0.33         0.00         0.00       2348          0          0
nvme0n1           1.15        55.33         4.21         0.00     392827      29920          0
nvme0n2           0.01         0.24         0.00         0.00       1712          0          0
nvme0n3           0.01         0.13         0.00         0.00        944          0          0
nvme0n4           0.01         0.13         0.00         0.00        944          0          0
nvme0n5           0.01         0.13         0.00         0.00        944          0          0

#Check the I/O statistics of all the disks and the partitions
-m = MB; - k = KB
root@satellite ~]# iostat -m
Linux 5.14.0-284.11.1.el9_2.x86_64 (satellite) 	26/10/23 	_x86_64_	(2 CPU)

avg-cpu:  %user   %nice %system %iowait  %steal   %idle
           0.19    0.02    1.33    0.02    0.00   98.44

Device             tps    MB_read/s    MB_wrtn/s    MB_dscd/s    MB_read    MB_wrtn    MB_dscd
dm-0              1.00         0.04         0.00         0.00        321         27          0
dm-1              0.01         0.00         0.00         0.00          2          0          0
nvme0n1           1.11         0.05         0.00         0.00        383         29          0
nvme0n2           0.01         0.00         0.00         0.00          1          0          0
nvme0n3           0.01         0.00         0.00         0.00          0          0          0
nvme0n4           0.01         0.00         0.00         0.00          0          0          0
nvme0n5           0.01         0.00         0.00         0.00          0          0          0


#Check the disk wait times


[root@satellite ~]# iostat -x
Linux 5.14.0-284.11.1.el9_2.x86_64 (satellite) 	26/10/23 	_x86_64_	(2 CPU)

avg-cpu:  %user   %nice %system %iowait  %steal   %idle
           0.18    0.02    1.33    0.02    0.00   98.44

Device            r/s     rkB/s   rrqm/s  %rrqm r_await rareq-sz     w/s     wkB/s   wrqm/s  %wrqm w_await wareq-sz     d/s     dkB/s   drqm/s  %drqm d_await dareq-sz     f/s f_await  aqu-sz  %util
dm-0             0.79     44.05     0.00   0.00    0.67    55.68    0.20      3.74     0.00   0.00    4.31    18.39    0.00      0.00     0.00   0.00    0.00     0.00    0.00    0.00    0.00   0.09
dm-1             0.01      0.31     0.00   0.00    0.29    23.72    0.00      0.00     0.00   0.00    0.00     0.00    0.00      0.00     0.00   0.00    0.00     0.00    0.00    0.00    0.00   0.00
nvme0n1          0.91     52.59     0.16  15.22    0.73    57.92    0.19      4.02     0.02   7.79    3.63    21.14    0.00      0.00     0.00   0.00    0.00     0.00    0.00    0.00    0.00   0.10
nvme0n2          0.01      0.23     0.00   0.00    0.29    25.18    0.00      0.00     0.00   0.00    0.00     0.00    0.00      0.00     0.00   0.00    0.00     0.00    0.00    0.00    0.00   0.00
nvme0n3          0.01      0.13     0.00   0.00    0.34    15.23    0.00      0.00     0.00   0.00    0.00     0.00    0.00      0.00     0.00   0.00    0.00     0.00    0.00    0.00    0.00   0.00
nvme0n4          0.01      0.13     0.00   0.00    0.27    15.23    0.00      0.00     0.00   0.00    0.00     0.00    0.00      0.00     0.00   0.00    0.00     0.00    0.00    0.00    0.00   0.00
nvme0n5          0.01      0.13     0.00   0.00    0.29    15.23    0.00      0.00     0.00   0.00    0.00     0.00    0.00      0.00     0.00   0.00    0.00     0.00    0.00    0.00    0.00   0.00

Check high Memory usage process

root@vmanalyst:~# top -b | head -10
top - 12:44:05 up  2:17,  2 users,  load average: 0.00, 0.00, 0.00
Tasks: 341 total,   1 running, 340 sleeping,   0 stopped,   0 zombie
%Cpu(s):  1.2 us,  1.2 sy,  0.0 ni, 97.5 id,  0.0 wa,  0.0 hi,  0.0 si,  0.0 st
MiB Mem :   3875.9 total,   2128.3 free,    746.4 used,   1001.2 buff/cache
MiB Swap:   3891.0 total,   3891.0 free,      0.0 used.   2857.6 avail Mem

    PID USER      PR  NI    VIRT    RES    SHR S  %CPU  %MEM     TIME+ COMMAND
   3511 root      20   0   10612   4256   3388 R  11.1   0.1   0:00.06 top
   1446 gdm       20   0 3929112 186220 112244 S   5.6   4.7   0:30.39 gnome-shell
      1 root      20   0  168400  13856   8416 S   0.0   0.3   0:22.28 systemd

root@vmanalyst:~# free -m
               total        used        free      shared  buff/cache   available
Mem:            3875         746        2128          16        1001        2857
Swap:           3890           0        3890
root@vmanalyst:~#

root@vmanalyst:~# cat /proc/meminfo
MemTotal:        3968920 kB
MemFree:         2179644 kB
MemAvailable:    2926440 kB
Buffers:           48184 kB
Cached:           879668 kB
SwapCached:            0 kB
Active:           196820 kB

root@vmanalyst:~# vmstat -s -S M
         3875 M total memory
          734 M used memory
          191 M active memory
         1073 M inactive memory
         2139 M free memory
           46 M buffer memory
          954 M swap cache
         3890 M total swap
            0 M used swap
         3890 M free swap
        12568 non-nice user cpu ticks
         1119 nice user cpu ticks
        13959 system cpu ticks
      6031143 idle cpu ticks
         1277 IO-wait cpu ticks
            0 IRQ cpu ticks
          733 softirq cpu ticks
            0 stolen cpu ticks
       800480 pages paged in
       347824 pages paged out
            0 pages swapped in
            0 pages swapped out
      1029663 interrupts
      1243021 CPU context switches
   1729160804 boot time
         3565 forks
root@vmanalyst:~#

Using Top Command

System hides all idle process, making it easier to view

root@vmanalyst:~# top -i
top - 12:26:08 up  1:59,  2 users,  load average: 0.01, 0.02, 0.00
Tasks: 336 total,   1 running, 335 sleeping,   0 stopped,   0 zombie
%Cpu(s):  0.2 us,  0.1 sy,  0.0 ni, 99.8 id,  0.0 wa,  0.0 hi,  0.0 si,  0.0 st
MiB Mem :   3875.9 total,   2556.9 free,    752.9 used,    566.1 buff/cache
MiB Swap:   3891.0 total,   3891.0 free,      0.0 used.   2868.2 avail Mem

    PID USER      PR  NI    VIRT    RES    SHR S  %CPU  %MEM     TIME+ COMMAND
   1981 root      20   0   10740   4364   3408 R   1.0   0.1   0:51.63 top
    937 root      20   0  314804   9360   7700 S   0.3   0.2   0:29.07 vmtoolsd
   1446 gdm       20   0 3929112 186220 112244 S   0.3   4.7   0:27.38 gnome-shell

(Visited 29 times, 1 visits today)

By C A Thomas

Chinchu A. Thomas is an Infrastructure Analyst specializing in Microsoft Azure, the Microsoft 365 suite, AWS, and Windows infrastructure management products.

Leave a Reply