mirror of
https://github.com/1Panel-dev/1Panel.git
synced 2025-12-17 21:08:25 +08:00
perf: replace gopsutil sampling with cached metrics and optimize system info retrieval (#11079)
* refactor: change psutil method
This commit is contained in:
parent
8086d5cecb
commit
0a42d4942c
7 changed files with 486 additions and 54 deletions
|
|
@ -9,6 +9,7 @@ import (
|
||||||
"github.com/1Panel-dev/1Panel/agent/global"
|
"github.com/1Panel-dev/1Panel/agent/global"
|
||||||
alertUtil "github.com/1Panel-dev/1Panel/agent/utils/alert"
|
alertUtil "github.com/1Panel-dev/1Panel/agent/utils/alert"
|
||||||
"github.com/1Panel-dev/1Panel/agent/utils/common"
|
"github.com/1Panel-dev/1Panel/agent/utils/common"
|
||||||
|
"github.com/1Panel-dev/1Panel/agent/utils/psutil"
|
||||||
versionUtil "github.com/1Panel-dev/1Panel/agent/utils/version"
|
versionUtil "github.com/1Panel-dev/1Panel/agent/utils/version"
|
||||||
"github.com/1Panel-dev/1Panel/agent/utils/xpack"
|
"github.com/1Panel-dev/1Panel/agent/utils/xpack"
|
||||||
"github.com/shirou/gopsutil/v4/cpu"
|
"github.com/shirou/gopsutil/v4/cpu"
|
||||||
|
|
@ -352,7 +353,7 @@ func loadLoadInfo(alert dto.AlertDTO) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
var loadValue float64
|
var loadValue float64
|
||||||
CPUTotal, _ := cpu.Counts(true)
|
CPUTotal, _ := psutil.CPUInfo.GetLogicalCores(false)
|
||||||
switch alert.Cycle {
|
switch alert.Cycle {
|
||||||
case 1:
|
case 1:
|
||||||
loadValue = avgStat.Load1 / (float64(CPUTotal*2) * 0.75) * 100
|
loadValue = avgStat.Load1 / (float64(CPUTotal*2) * 0.75) * 100
|
||||||
|
|
@ -839,7 +840,7 @@ func processSingleDisk(alert dto.AlertDTO) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
func checkAndCreateDiskAlert(alert dto.AlertDTO, path string) (bool, error) {
|
func checkAndCreateDiskAlert(alert dto.AlertDTO, path string) (bool, error) {
|
||||||
usageStat, err := disk.Usage(path)
|
usageStat, err := psutil.DISK.GetUsage(path, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
global.LOG.Errorf("error getting disk usage for %s, err: %v", path, err)
|
global.LOG.Errorf("error getting disk usage for %s, err: %v", path, err)
|
||||||
return false, err
|
return false, err
|
||||||
|
|
|
||||||
|
|
@ -1,6 +1,7 @@
|
||||||
package service
|
package service
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"cmp"
|
||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
@ -23,10 +24,9 @@ import (
|
||||||
"github.com/1Panel-dev/1Panel/agent/utils/common"
|
"github.com/1Panel-dev/1Panel/agent/utils/common"
|
||||||
"github.com/1Panel-dev/1Panel/agent/utils/controller"
|
"github.com/1Panel-dev/1Panel/agent/utils/controller"
|
||||||
"github.com/1Panel-dev/1Panel/agent/utils/copier"
|
"github.com/1Panel-dev/1Panel/agent/utils/copier"
|
||||||
|
"github.com/1Panel-dev/1Panel/agent/utils/psutil"
|
||||||
"github.com/gin-gonic/gin"
|
"github.com/gin-gonic/gin"
|
||||||
"github.com/shirou/gopsutil/v4/cpu"
|
|
||||||
"github.com/shirou/gopsutil/v4/disk"
|
"github.com/shirou/gopsutil/v4/disk"
|
||||||
"github.com/shirou/gopsutil/v4/host"
|
|
||||||
"github.com/shirou/gopsutil/v4/load"
|
"github.com/shirou/gopsutil/v4/load"
|
||||||
"github.com/shirou/gopsutil/v4/mem"
|
"github.com/shirou/gopsutil/v4/mem"
|
||||||
"github.com/shirou/gopsutil/v4/net"
|
"github.com/shirou/gopsutil/v4/net"
|
||||||
|
|
@ -77,7 +77,7 @@ func (u *DashboardService) Restart(operation string) error {
|
||||||
|
|
||||||
func (u *DashboardService) LoadOsInfo() (*dto.OsInfo, error) {
|
func (u *DashboardService) LoadOsInfo() (*dto.OsInfo, error) {
|
||||||
var baseInfo dto.OsInfo
|
var baseInfo dto.OsInfo
|
||||||
hostInfo, err := host.Info()
|
hostInfo, err := psutil.HOST.GetHostInfo(false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
@ -87,7 +87,7 @@ func (u *DashboardService) LoadOsInfo() (*dto.OsInfo, error) {
|
||||||
baseInfo.KernelArch = hostInfo.KernelArch
|
baseInfo.KernelArch = hostInfo.KernelArch
|
||||||
baseInfo.KernelVersion = hostInfo.KernelVersion
|
baseInfo.KernelVersion = hostInfo.KernelVersion
|
||||||
|
|
||||||
diskInfo, err := disk.Usage(global.Dir.BaseDir)
|
diskInfo, err := psutil.DISK.GetUsage(global.Dir.BaseDir, false)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
baseInfo.DiskSize = int64(diskInfo.Free)
|
baseInfo.DiskSize = int64(diskInfo.Free)
|
||||||
}
|
}
|
||||||
|
|
@ -104,12 +104,16 @@ func (u *DashboardService) LoadOsInfo() (*dto.OsInfo, error) {
|
||||||
func (u *DashboardService) LoadCurrentInfoForNode() *dto.NodeCurrent {
|
func (u *DashboardService) LoadCurrentInfoForNode() *dto.NodeCurrent {
|
||||||
var currentInfo dto.NodeCurrent
|
var currentInfo dto.NodeCurrent
|
||||||
|
|
||||||
currentInfo.CPUTotal, _ = cpu.Counts(true)
|
currentInfo.CPUTotal, _ = psutil.CPUInfo.GetLogicalCores(false)
|
||||||
totalPercent, _ := cpu.Percent(100*time.Millisecond, false)
|
|
||||||
if len(totalPercent) == 1 {
|
cpuUsedPercent, perCore := psutil.CPU.GetCPUUsage()
|
||||||
currentInfo.CPUUsedPercent = totalPercent[0]
|
if len(perCore) == 0 {
|
||||||
currentInfo.CPUUsed = currentInfo.CPUUsedPercent * 0.01 * float64(currentInfo.CPUTotal)
|
currentInfo.CPUTotal = psutil.CPU.NumCPU()
|
||||||
|
} else {
|
||||||
|
currentInfo.CPUTotal = len(perCore)
|
||||||
}
|
}
|
||||||
|
currentInfo.CPUUsedPercent = cpuUsedPercent
|
||||||
|
currentInfo.CPUUsed = cpuUsedPercent * 0.01 * float64(currentInfo.CPUTotal)
|
||||||
|
|
||||||
loadInfo, _ := load.Avg()
|
loadInfo, _ := load.Avg()
|
||||||
currentInfo.Load1 = loadInfo.Load1
|
currentInfo.Load1 = loadInfo.Load1
|
||||||
|
|
@ -134,38 +138,37 @@ func (u *DashboardService) LoadCurrentInfoForNode() *dto.NodeCurrent {
|
||||||
|
|
||||||
func (u *DashboardService) LoadBaseInfo(ioOption string, netOption string) (*dto.DashboardBase, error) {
|
func (u *DashboardService) LoadBaseInfo(ioOption string, netOption string) (*dto.DashboardBase, error) {
|
||||||
var baseInfo dto.DashboardBase
|
var baseInfo dto.DashboardBase
|
||||||
hostInfo, err := host.Info()
|
hostInfo, err := psutil.HOST.GetHostInfo(false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
baseInfo.Hostname = hostInfo.Hostname
|
|
||||||
baseInfo.OS = hostInfo.OS
|
|
||||||
baseInfo.Platform = hostInfo.Platform
|
|
||||||
baseInfo.PlatformFamily = hostInfo.PlatformFamily
|
|
||||||
baseInfo.PlatformVersion = hostInfo.PlatformVersion
|
|
||||||
baseInfo.KernelArch = hostInfo.KernelArch
|
|
||||||
baseInfo.KernelVersion = hostInfo.KernelVersion
|
|
||||||
ss, _ := json.Marshal(hostInfo)
|
ss, _ := json.Marshal(hostInfo)
|
||||||
baseInfo.VirtualizationSystem = string(ss)
|
baseInfo = dto.DashboardBase{
|
||||||
baseInfo.IpV4Addr = loadOutboundIP()
|
Hostname: hostInfo.Hostname,
|
||||||
httpProxy := os.Getenv("http_proxy")
|
OS: hostInfo.OS,
|
||||||
if httpProxy == "" {
|
Platform: hostInfo.Platform,
|
||||||
httpProxy = os.Getenv("HTTP_PROXY")
|
PlatformFamily: hostInfo.PlatformFamily,
|
||||||
|
PlatformVersion: hostInfo.PlatformVersion,
|
||||||
|
KernelArch: hostInfo.KernelArch,
|
||||||
|
KernelVersion: hostInfo.KernelVersion,
|
||||||
|
VirtualizationSystem: string(ss),
|
||||||
|
IpV4Addr: loadOutboundIP(),
|
||||||
|
SystemProxy: "noProxy",
|
||||||
}
|
}
|
||||||
if httpProxy != "" {
|
|
||||||
baseInfo.SystemProxy = httpProxy
|
if proxy := cmp.Or(os.Getenv("http_proxy"), os.Getenv("HTTP_PROXY")); proxy != "" {
|
||||||
|
baseInfo.SystemProxy = proxy
|
||||||
}
|
}
|
||||||
baseInfo.SystemProxy = "noProxy"
|
|
||||||
|
|
||||||
loadQuickJump(&baseInfo)
|
loadQuickJump(&baseInfo)
|
||||||
|
|
||||||
cpuInfo, err := cpu.Info()
|
cpuInfo, err := psutil.CPUInfo.GetCPUInfo(false)
|
||||||
if err == nil {
|
if err == nil && len(cpuInfo) > 0 {
|
||||||
baseInfo.CPUModelName = cpuInfo[0].ModelName
|
baseInfo.CPUModelName = cpuInfo[0].ModelName
|
||||||
}
|
}
|
||||||
|
|
||||||
baseInfo.CPUCores, _ = cpu.Counts(false)
|
baseInfo.CPUCores, _ = psutil.CPUInfo.GetPhysicalCores(false)
|
||||||
baseInfo.CPULogicalCores, _ = cpu.Counts(true)
|
baseInfo.CPULogicalCores, _ = psutil.CPUInfo.GetLogicalCores(false)
|
||||||
|
|
||||||
baseInfo.CurrentInfo = *u.LoadCurrentInfo(ioOption, netOption)
|
baseInfo.CurrentInfo = *u.LoadCurrentInfo(ioOption, netOption)
|
||||||
return &baseInfo, nil
|
return &baseInfo, nil
|
||||||
|
|
@ -173,18 +176,21 @@ func (u *DashboardService) LoadBaseInfo(ioOption string, netOption string) (*dto
|
||||||
|
|
||||||
func (u *DashboardService) LoadCurrentInfo(ioOption string, netOption string) *dto.DashboardCurrent {
|
func (u *DashboardService) LoadCurrentInfo(ioOption string, netOption string) *dto.DashboardCurrent {
|
||||||
var currentInfo dto.DashboardCurrent
|
var currentInfo dto.DashboardCurrent
|
||||||
hostInfo, _ := host.Info()
|
hostInfo, _ := psutil.HOST.GetHostInfo(false)
|
||||||
currentInfo.Uptime = hostInfo.Uptime
|
currentInfo.Uptime = hostInfo.Uptime
|
||||||
currentInfo.TimeSinceUptime = time.Now().Add(-time.Duration(hostInfo.Uptime) * time.Second).Format(constant.DateTimeLayout)
|
currentInfo.TimeSinceUptime = time.Now().Add(-time.Duration(hostInfo.Uptime) * time.Second).Format(constant.DateTimeLayout)
|
||||||
currentInfo.Procs = hostInfo.Procs
|
currentInfo.Procs = hostInfo.Procs
|
||||||
|
currentInfo.CPUTotal, _ = psutil.CPUInfo.GetLogicalCores(false)
|
||||||
|
|
||||||
currentInfo.CPUTotal, _ = cpu.Counts(true)
|
cpuUsedPercent, perCore := psutil.CPU.GetCPUUsage()
|
||||||
totalPercent, _ := cpu.Percent(100*time.Millisecond, false)
|
if len(perCore) == 0 {
|
||||||
if len(totalPercent) == 1 {
|
currentInfo.CPUTotal = psutil.CPU.NumCPU()
|
||||||
currentInfo.CPUUsedPercent = totalPercent[0]
|
} else {
|
||||||
currentInfo.CPUUsed = currentInfo.CPUUsedPercent * 0.01 * float64(currentInfo.CPUTotal)
|
currentInfo.CPUTotal = len(perCore)
|
||||||
}
|
}
|
||||||
currentInfo.CPUPercent, _ = cpu.Percent(100*time.Millisecond, true)
|
currentInfo.CPUPercent = perCore
|
||||||
|
currentInfo.CPUUsedPercent = cpuUsedPercent
|
||||||
|
currentInfo.CPUUsed = cpuUsedPercent * 0.01 * float64(currentInfo.CPUTotal)
|
||||||
|
|
||||||
loadInfo, _ := load.Avg()
|
loadInfo, _ := load.Avg()
|
||||||
currentInfo.Load1 = loadInfo.Load1
|
currentInfo.Load1 = loadInfo.Load1
|
||||||
|
|
@ -246,6 +252,7 @@ func (u *DashboardService) LoadCurrentInfo(ioOption string, netOption string) *d
|
||||||
if state.Name == netOption {
|
if state.Name == netOption {
|
||||||
currentInfo.NetBytesSent = state.BytesSent
|
currentInfo.NetBytesSent = state.BytesSent
|
||||||
currentInfo.NetBytesRecv = state.BytesRecv
|
currentInfo.NetBytesRecv = state.BytesRecv
|
||||||
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -455,41 +462,52 @@ func loadDiskInfo() []dto.DiskInfo {
|
||||||
)
|
)
|
||||||
wg.Add(len(mounts))
|
wg.Add(len(mounts))
|
||||||
for i := 0; i < len(mounts); i++ {
|
for i := 0; i < len(mounts); i++ {
|
||||||
go func(timeoutCh <-chan time.Time, mount diskInfo) {
|
go func(mount diskInfo) {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
|
|
||||||
var itemData dto.DiskInfo
|
var itemData dto.DiskInfo
|
||||||
itemData.Path = mount.Mount
|
itemData.Path = mount.Mount
|
||||||
itemData.Type = mount.Type
|
itemData.Type = mount.Type
|
||||||
itemData.Device = mount.Device
|
itemData.Device = mount.Device
|
||||||
|
|
||||||
|
type diskResult struct {
|
||||||
|
state *disk.UsageStat
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
resultCh := make(chan diskResult, 1)
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
state, err := psutil.DISK.GetUsage(mount.Mount, false)
|
||||||
|
resultCh <- diskResult{state: state, err: err}
|
||||||
|
}()
|
||||||
|
|
||||||
select {
|
select {
|
||||||
case <-timeoutCh:
|
case <-time.After(5 * time.Second):
|
||||||
mu.Lock()
|
mu.Lock()
|
||||||
datas = append(datas, itemData)
|
datas = append(datas, itemData)
|
||||||
mu.Unlock()
|
mu.Unlock()
|
||||||
global.LOG.Errorf("load disk info from %s failed, err: timeout", mount.Mount)
|
global.LOG.Errorf("load disk info from %s failed, err: timeout", mount.Mount)
|
||||||
default:
|
case result := <-resultCh:
|
||||||
state, err := disk.Usage(mount.Mount)
|
if result.err != nil {
|
||||||
if err != nil {
|
|
||||||
mu.Lock()
|
mu.Lock()
|
||||||
datas = append(datas, itemData)
|
datas = append(datas, itemData)
|
||||||
mu.Unlock()
|
mu.Unlock()
|
||||||
global.LOG.Errorf("load disk info from %s failed, err: %v", mount.Mount, err)
|
global.LOG.Errorf("load disk info from %s failed, err: %v", mount.Mount, result.err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
itemData.Total = state.Total
|
itemData.Total = result.state.Total
|
||||||
itemData.Free = state.Free
|
itemData.Free = result.state.Free
|
||||||
itemData.Used = state.Used
|
itemData.Used = result.state.Used
|
||||||
itemData.UsedPercent = state.UsedPercent
|
itemData.UsedPercent = result.state.UsedPercent
|
||||||
itemData.InodesTotal = state.InodesTotal
|
itemData.InodesTotal = result.state.InodesTotal
|
||||||
itemData.InodesUsed = state.InodesUsed
|
itemData.InodesUsed = result.state.InodesUsed
|
||||||
itemData.InodesFree = state.InodesFree
|
itemData.InodesFree = result.state.InodesFree
|
||||||
itemData.InodesUsedPercent = state.InodesUsedPercent
|
itemData.InodesUsedPercent = result.state.InodesUsedPercent
|
||||||
mu.Lock()
|
mu.Lock()
|
||||||
datas = append(datas, itemData)
|
datas = append(datas, itemData)
|
||||||
mu.Unlock()
|
mu.Unlock()
|
||||||
}
|
}
|
||||||
}(time.After(5*time.Second), mounts[i])
|
}(mounts[i])
|
||||||
}
|
}
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -19,6 +19,7 @@ import (
|
||||||
"github.com/1Panel-dev/1Panel/agent/utils/ai_tools/gpu"
|
"github.com/1Panel-dev/1Panel/agent/utils/ai_tools/gpu"
|
||||||
"github.com/1Panel-dev/1Panel/agent/utils/ai_tools/xpu"
|
"github.com/1Panel-dev/1Panel/agent/utils/ai_tools/xpu"
|
||||||
"github.com/1Panel-dev/1Panel/agent/utils/common"
|
"github.com/1Panel-dev/1Panel/agent/utils/common"
|
||||||
|
"github.com/1Panel-dev/1Panel/agent/utils/psutil"
|
||||||
"github.com/robfig/cron/v3"
|
"github.com/robfig/cron/v3"
|
||||||
"github.com/shirou/gopsutil/v4/cpu"
|
"github.com/shirou/gopsutil/v4/cpu"
|
||||||
"github.com/shirou/gopsutil/v4/disk"
|
"github.com/shirou/gopsutil/v4/disk"
|
||||||
|
|
@ -258,7 +259,7 @@ func (m *MonitorService) Run() {
|
||||||
itemModel.TopCPU = string(topItemCPU)
|
itemModel.TopCPU = string(topItemCPU)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
cpuCount, _ := cpu.Counts(false)
|
cpuCount, _ := psutil.CPUInfo.GetPhysicalCores(false)
|
||||||
loadInfo, _ := load.Avg()
|
loadInfo, _ := load.Avg()
|
||||||
itemModel.CpuLoad1 = loadInfo.Load1
|
itemModel.CpuLoad1 = loadInfo.Load1
|
||||||
itemModel.CpuLoad5 = loadInfo.Load5
|
itemModel.CpuLoad5 = loadInfo.Load5
|
||||||
|
|
|
||||||
276
agent/utils/psutil/cpu.go
Normal file
276
agent/utils/psutil/cpu.go
Normal file
|
|
@ -0,0 +1,276 @@
|
||||||
|
package psutil
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/shirou/gopsutil/v4/cpu"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
resetInterval = 1 * time.Minute
|
||||||
|
fastInterval = 3 * time.Second
|
||||||
|
)
|
||||||
|
|
||||||
|
type CPUStat struct {
|
||||||
|
Idle uint64
|
||||||
|
Total uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
type CPUUsageState struct {
|
||||||
|
mu sync.Mutex
|
||||||
|
lastTotalStat *CPUStat
|
||||||
|
lastPerCPUStat []CPUStat
|
||||||
|
lastSampleTime time.Time
|
||||||
|
|
||||||
|
cachedTotalUsage float64
|
||||||
|
cachedPerCore []float64
|
||||||
|
}
|
||||||
|
|
||||||
|
func readCPUStat() (CPUStat, error) {
|
||||||
|
data, err := os.ReadFile("/proc/stat")
|
||||||
|
if err != nil {
|
||||||
|
return CPUStat{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
fields := strings.Fields(strings.Split(string(data), "\n")[0])[1:]
|
||||||
|
nums := make([]uint64, len(fields))
|
||||||
|
|
||||||
|
for i, f := range fields {
|
||||||
|
v, _ := strconv.ParseUint(f, 10, 64)
|
||||||
|
nums[i] = v
|
||||||
|
}
|
||||||
|
|
||||||
|
idle := nums[3] + nums[4]
|
||||||
|
var total uint64
|
||||||
|
for _, v := range nums {
|
||||||
|
total += v
|
||||||
|
}
|
||||||
|
|
||||||
|
return CPUStat{Idle: idle, Total: total}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *CPUUsageState) readPerCPUStat() ([]CPUStat, error) {
|
||||||
|
data, err := os.ReadFile("/proc/stat")
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
lines := strings.Split(string(data), "\n")
|
||||||
|
stats := c.lastPerCPUStat[:0]
|
||||||
|
|
||||||
|
for _, l := range lines[1:] {
|
||||||
|
if !strings.HasPrefix(l, "cpu") {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if len(l) < 4 || l[3] < '0' || l[3] > '9' {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
fields := strings.Fields(l)[1:]
|
||||||
|
nums := make([]uint64, len(fields))
|
||||||
|
for i, f := range fields {
|
||||||
|
v, _ := strconv.ParseUint(f, 10, 64)
|
||||||
|
nums[i] = v
|
||||||
|
}
|
||||||
|
|
||||||
|
idle := nums[3] + nums[4]
|
||||||
|
var total uint64
|
||||||
|
for _, v := range nums {
|
||||||
|
total += v
|
||||||
|
}
|
||||||
|
|
||||||
|
stats = append(stats, CPUStat{Idle: idle, Total: total})
|
||||||
|
}
|
||||||
|
|
||||||
|
return stats, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func readPerCPUStatCopy() []CPUStat {
|
||||||
|
data, err := os.ReadFile("/proc/stat")
|
||||||
|
if err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
lines := strings.Split(string(data), "\n")
|
||||||
|
var stats []CPUStat
|
||||||
|
|
||||||
|
for _, l := range lines[1:] {
|
||||||
|
if !strings.HasPrefix(l, "cpu") {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if len(l) < 4 || l[3] < '0' || l[3] > '9' {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
fields := strings.Fields(l)[1:]
|
||||||
|
nums := make([]uint64, len(fields))
|
||||||
|
for i, f := range fields {
|
||||||
|
v, _ := strconv.ParseUint(f, 10, 64)
|
||||||
|
nums[i] = v
|
||||||
|
}
|
||||||
|
|
||||||
|
idle := nums[3] + nums[4]
|
||||||
|
var total uint64
|
||||||
|
for _, v := range nums {
|
||||||
|
total += v
|
||||||
|
}
|
||||||
|
|
||||||
|
stats = append(stats, CPUStat{Idle: idle, Total: total})
|
||||||
|
}
|
||||||
|
|
||||||
|
return stats
|
||||||
|
}
|
||||||
|
|
||||||
|
func calcCPUPercent(prev, cur CPUStat) float64 {
|
||||||
|
deltaIdle := float64(cur.Idle - prev.Idle)
|
||||||
|
deltaTotal := float64(cur.Total - prev.Total)
|
||||||
|
if deltaTotal <= 0 {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return (1 - deltaIdle/deltaTotal) * 100
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *CPUUsageState) GetCPUUsage() (float64, []float64) {
|
||||||
|
c.mu.Lock()
|
||||||
|
|
||||||
|
now := time.Now()
|
||||||
|
|
||||||
|
if !c.lastSampleTime.IsZero() && now.Sub(c.lastSampleTime) < fastInterval {
|
||||||
|
result := c.cachedTotalUsage
|
||||||
|
perCore := c.cachedPerCore
|
||||||
|
c.mu.Unlock()
|
||||||
|
return result, perCore
|
||||||
|
}
|
||||||
|
|
||||||
|
needReset := c.lastSampleTime.IsZero() || now.Sub(c.lastSampleTime) >= resetInterval
|
||||||
|
c.mu.Unlock()
|
||||||
|
|
||||||
|
if needReset {
|
||||||
|
firstTotal, _ := readCPUStat()
|
||||||
|
firstPer := readPerCPUStatCopy()
|
||||||
|
time.Sleep(100 * time.Millisecond)
|
||||||
|
secondTotal, _ := readCPUStat()
|
||||||
|
secondPer := readPerCPUStatCopy()
|
||||||
|
|
||||||
|
totalUsage := calcCPUPercent(firstTotal, secondTotal)
|
||||||
|
|
||||||
|
perCore := make([]float64, len(secondPer))
|
||||||
|
for i := range secondPer {
|
||||||
|
perCore[i] = calcCPUPercent(firstPer[i], secondPer[i])
|
||||||
|
}
|
||||||
|
|
||||||
|
c.mu.Lock()
|
||||||
|
c.cachedTotalUsage = totalUsage
|
||||||
|
c.cachedPerCore = perCore
|
||||||
|
c.lastTotalStat = &secondTotal
|
||||||
|
c.lastPerCPUStat = secondPer
|
||||||
|
c.lastSampleTime = time.Now()
|
||||||
|
c.mu.Unlock()
|
||||||
|
|
||||||
|
return totalUsage, perCore
|
||||||
|
}
|
||||||
|
|
||||||
|
curTotal, _ := readCPUStat()
|
||||||
|
curPer := readPerCPUStatCopy()
|
||||||
|
|
||||||
|
c.mu.Lock()
|
||||||
|
defer c.mu.Unlock()
|
||||||
|
|
||||||
|
totalUsage := calcCPUPercent(*c.lastTotalStat, curTotal)
|
||||||
|
|
||||||
|
if len(c.cachedPerCore) != len(curPer) {
|
||||||
|
c.cachedPerCore = make([]float64, len(curPer))
|
||||||
|
}
|
||||||
|
for i := range curPer {
|
||||||
|
c.cachedPerCore[i] = calcCPUPercent(c.lastPerCPUStat[i], curPer[i])
|
||||||
|
}
|
||||||
|
|
||||||
|
c.cachedTotalUsage = totalUsage
|
||||||
|
c.lastTotalStat = &curTotal
|
||||||
|
c.lastPerCPUStat = curPer
|
||||||
|
c.lastSampleTime = time.Now()
|
||||||
|
|
||||||
|
return totalUsage, c.cachedPerCore
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *CPUUsageState) NumCPU() int {
|
||||||
|
c.mu.Lock()
|
||||||
|
defer c.mu.Unlock()
|
||||||
|
|
||||||
|
return len(c.cachedPerCore)
|
||||||
|
}
|
||||||
|
|
||||||
|
type CPUInfoState struct {
|
||||||
|
mu sync.RWMutex
|
||||||
|
initialized bool
|
||||||
|
cachedInfo []cpu.InfoStat
|
||||||
|
cachedPhysCores int
|
||||||
|
cachedLogicCores int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *CPUInfoState) GetCPUInfo(forceRefresh bool) ([]cpu.InfoStat, error) {
|
||||||
|
c.mu.RLock()
|
||||||
|
if c.initialized && c.cachedInfo != nil && !forceRefresh {
|
||||||
|
defer c.mu.RUnlock()
|
||||||
|
return c.cachedInfo, nil
|
||||||
|
}
|
||||||
|
c.mu.RUnlock()
|
||||||
|
|
||||||
|
info, err := cpu.Info()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
c.mu.Lock()
|
||||||
|
c.cachedInfo = info
|
||||||
|
c.initialized = true
|
||||||
|
c.mu.Unlock()
|
||||||
|
|
||||||
|
return info, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *CPUInfoState) GetPhysicalCores(forceRefresh bool) (int, error) {
|
||||||
|
c.mu.RLock()
|
||||||
|
if c.initialized && c.cachedPhysCores > 0 && !forceRefresh {
|
||||||
|
defer c.mu.RUnlock()
|
||||||
|
return c.cachedPhysCores, nil
|
||||||
|
}
|
||||||
|
c.mu.RUnlock()
|
||||||
|
|
||||||
|
cores, err := cpu.Counts(false)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
c.mu.Lock()
|
||||||
|
c.cachedPhysCores = cores
|
||||||
|
c.initialized = true
|
||||||
|
c.mu.Unlock()
|
||||||
|
|
||||||
|
return cores, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *CPUInfoState) GetLogicalCores(forceRefresh bool) (int, error) {
|
||||||
|
c.mu.RLock()
|
||||||
|
if c.initialized && c.cachedLogicCores > 0 && !forceRefresh {
|
||||||
|
defer c.mu.RUnlock()
|
||||||
|
return c.cachedLogicCores, nil
|
||||||
|
}
|
||||||
|
c.mu.RUnlock()
|
||||||
|
|
||||||
|
cores, err := cpu.Counts(true)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
c.mu.Lock()
|
||||||
|
c.cachedLogicCores = cores
|
||||||
|
c.initialized = true
|
||||||
|
c.mu.Unlock()
|
||||||
|
|
||||||
|
return cores, nil
|
||||||
|
}
|
||||||
92
agent/utils/psutil/disk.go
Normal file
92
agent/utils/psutil/disk.go
Normal file
|
|
@ -0,0 +1,92 @@
|
||||||
|
package psutil
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/shirou/gopsutil/v4/disk"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
diskUsageCacheInterval = 30 * time.Second
|
||||||
|
diskPartitionCacheInterval = 10 * time.Minute
|
||||||
|
)
|
||||||
|
|
||||||
|
type DiskUsageEntry struct {
|
||||||
|
lastSampleTime time.Time
|
||||||
|
cachedUsage *disk.UsageStat
|
||||||
|
}
|
||||||
|
|
||||||
|
type DiskState struct {
|
||||||
|
usageMu sync.RWMutex
|
||||||
|
usageCache map[string]*DiskUsageEntry
|
||||||
|
|
||||||
|
partitionMu sync.RWMutex
|
||||||
|
lastPartitionTime time.Time
|
||||||
|
cachedPartitions []disk.PartitionStat
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *DiskState) GetUsage(path string, forceRefresh bool) (*disk.UsageStat, error) {
|
||||||
|
d.usageMu.RLock()
|
||||||
|
if entry, ok := d.usageCache[path]; ok {
|
||||||
|
if time.Since(entry.lastSampleTime) < diskUsageCacheInterval && !forceRefresh {
|
||||||
|
defer d.usageMu.RUnlock()
|
||||||
|
return entry.cachedUsage, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
d.usageMu.RUnlock()
|
||||||
|
|
||||||
|
usage, err := disk.Usage(path)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
d.usageMu.Lock()
|
||||||
|
if d.usageCache == nil {
|
||||||
|
d.usageCache = make(map[string]*DiskUsageEntry)
|
||||||
|
}
|
||||||
|
d.usageCache[path] = &DiskUsageEntry{
|
||||||
|
lastSampleTime: time.Now(),
|
||||||
|
cachedUsage: usage,
|
||||||
|
}
|
||||||
|
d.usageMu.Unlock()
|
||||||
|
|
||||||
|
return usage, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *DiskState) GetPartitions(all bool, forceRefresh bool) ([]disk.PartitionStat, error) {
|
||||||
|
d.partitionMu.RLock()
|
||||||
|
if d.cachedPartitions != nil && time.Since(d.lastPartitionTime) < diskPartitionCacheInterval && !forceRefresh {
|
||||||
|
defer d.partitionMu.RUnlock()
|
||||||
|
return d.cachedPartitions, nil
|
||||||
|
}
|
||||||
|
d.partitionMu.RUnlock()
|
||||||
|
|
||||||
|
partitions, err := disk.Partitions(all)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
d.partitionMu.Lock()
|
||||||
|
d.cachedPartitions = partitions
|
||||||
|
d.lastPartitionTime = time.Now()
|
||||||
|
d.partitionMu.Unlock()
|
||||||
|
|
||||||
|
return partitions, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *DiskState) ClearUsageCache(path string) {
|
||||||
|
d.usageMu.Lock()
|
||||||
|
delete(d.usageCache, path)
|
||||||
|
d.usageMu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *DiskState) ClearAllCache() {
|
||||||
|
d.usageMu.Lock()
|
||||||
|
d.usageCache = make(map[string]*DiskUsageEntry)
|
||||||
|
d.usageMu.Unlock()
|
||||||
|
|
||||||
|
d.partitionMu.Lock()
|
||||||
|
d.cachedPartitions = nil
|
||||||
|
d.partitionMu.Unlock()
|
||||||
|
}
|
||||||
6
agent/utils/psutil/global.go
Normal file
6
agent/utils/psutil/global.go
Normal file
|
|
@ -0,0 +1,6 @@
|
||||||
|
package psutil
|
||||||
|
|
||||||
|
var CPU = &CPUUsageState{}
|
||||||
|
var CPUInfo = &CPUInfoState{}
|
||||||
|
var HOST = &HostInfoState{}
|
||||||
|
var DISK = &DiskState{}
|
||||||
38
agent/utils/psutil/host.go
Normal file
38
agent/utils/psutil/host.go
Normal file
|
|
@ -0,0 +1,38 @@
|
||||||
|
package psutil
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/shirou/gopsutil/v4/host"
|
||||||
|
)
|
||||||
|
|
||||||
|
const hostRefreshInterval = 4 * time.Hour
|
||||||
|
|
||||||
|
type HostInfoState struct {
|
||||||
|
mu sync.RWMutex
|
||||||
|
lastSampleTime time.Time
|
||||||
|
|
||||||
|
cachedInfo *host.InfoStat
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *HostInfoState) GetHostInfo(forceRefresh bool) (*host.InfoStat, error) {
|
||||||
|
h.mu.RLock()
|
||||||
|
if h.cachedInfo != nil && time.Since(h.lastSampleTime) < hostRefreshInterval && !forceRefresh {
|
||||||
|
defer h.mu.RUnlock()
|
||||||
|
return h.cachedInfo, nil
|
||||||
|
}
|
||||||
|
h.mu.RUnlock()
|
||||||
|
|
||||||
|
hostInfo, err := host.Info()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
h.mu.Lock()
|
||||||
|
h.cachedInfo = hostInfo
|
||||||
|
h.lastSampleTime = time.Now()
|
||||||
|
h.mu.Unlock()
|
||||||
|
|
||||||
|
return hostInfo, nil
|
||||||
|
}
|
||||||
Loading…
Add table
Reference in a new issue