From 091e6e04ef8ffe3343014a023ce0d44deafb1f29 Mon Sep 17 00:00:00 2001 From: Tomilla Date: Tue, 9 Jul 2024 14:27:11 +0800 Subject: [PATCH 1/9] fix(context): call the cancel() of root context after press `ctrl+c`, even if user not set deadline --- cmd/root.go | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/cmd/root.go b/cmd/root.go index 2fe1dfb..7114b4a 100644 --- a/cmd/root.go +++ b/cmd/root.go @@ -70,8 +70,7 @@ func getRootContext(dlEat time.Duration) (context.Context, context.CancelFunc) { deadline := time.Now().Add(dlEat) rootCtx, cancel = context.WithDeadline(context.Background(), deadline) } else { - rootCtx = context.Background() - cancel = func() {} + rootCtx, cancel = context.WithCancel(context.Background()) } return rootCtx, cancel } @@ -103,7 +102,7 @@ func eatFunction(cmd *cobra.Command, _ []string) { eatMemory(rootCtx, &wg, mEat, mAteRenew) eatCPU(rootCtx, &wg, cEat) // in case that all sub goroutines are dead due to runtime error like memory not enough. - // so the main gooutine automaticlly quit as well, don't wait user ctrl+c or context deadline. + // so the main goroutine automatically quit as well, don't wait user ctrl+c or context deadline. go func(wgp *sync.WaitGroup) { wgp.Wait() cancel() From 52400f054d1d67a89877af770e5aa0b991e3a642 Mon Sep 17 00:00:00 2001 From: Tomilla Date: Tue, 9 Jul 2024 14:56:04 +0800 Subject: [PATCH 2/9] feat(cpu_affinity): implement and text cpu affinity for linux, other system still WIP. all systems share same interface. --- cmd/cpu_affinity/cpu_affinity_linux.go | 160 ++++++++++++++++++++ cmd/cpu_affinity/cpu_affinity_linux_test.go | 88 +++++++++++ cmd/cpu_affinity/cpu_affinity_other.go | 31 ++++ cmd/cpu_affinity/os_shared.go | 9 ++ 4 files changed, 288 insertions(+) create mode 100644 cmd/cpu_affinity/cpu_affinity_linux.go create mode 100644 cmd/cpu_affinity/cpu_affinity_linux_test.go create mode 100644 cmd/cpu_affinity/cpu_affinity_other.go create mode 100644 cmd/cpu_affinity/os_shared.go diff --git a/cmd/cpu_affinity/cpu_affinity_linux.go b/cmd/cpu_affinity/cpu_affinity_linux.go new file mode 100644 index 0000000..9280f2d --- /dev/null +++ b/cmd/cpu_affinity/cpu_affinity_linux.go @@ -0,0 +1,160 @@ +//go:build linux +// +build linux + +package cpu_affinity + +import ( + "math/bits" + "runtime" + "syscall" + "unsafe" +) + +const ( + cpuSetSize = 0x400 + nCpuBits = 0x40 + cpuSetLen = cpuSetSize / nCpuBits +) + +type cpuMaskT uint64 + +// cpuSet use array to represents a CPU affinity mask. +type cpuSet [cpuSetLen]cpuMaskT + +const ( + enoAGAIN = syscall.Errno(0xb) + enoINVAL = syscall.Errno(0x16) + enoNOENT = syscall.Errno(0x2) +) + +// Do the interface allocations only once for common +// Errno values. +var ( + errEAGAIN error = syscall.EAGAIN + errEINVAL error = syscall.EINVAL + errENOENT error = syscall.ENOENT +) + +// errnoErr returns common boxed Errno values, to prevent allocations at runtime. +func errnoErr(e syscall.Errno) error { + switch e { + case 0: + return nil + case enoAGAIN: + return errEAGAIN + case enoINVAL: + return errEINVAL + case enoNOENT: + return errENOENT + } + return e +} + +func schedAffinity(trap uintptr, pid uint, set *cpuSet) error { + _, _, e := syscall.RawSyscall(trap, uintptr(pid), unsafe.Sizeof(*set), uintptr(unsafe.Pointer(set))) + if e != 0 { + return errnoErr(e) + } + return nil +} + +// schedGetAffinity gets the CPU affinity mask of the thread specified by pid. +// If pid is 0 the calling thread is used. +func schedGetAffinity(pid uint, set *cpuSet) error { + return schedAffinity(syscall.SYS_SCHED_GETAFFINITY, pid, set) +} + +// schedSetAffinity sets the CPU affinity mask of the thread specified by pid. +// If pid is 0 the calling thread is used. +func schedSetAffinity(pid uint, set *cpuSet) error { + return schedAffinity(syscall.SYS_SCHED_SETAFFINITY, pid, set) +} + +// Zero clears the set s, so that it contains no CPUs. +func (s *cpuSet) Zero() { + for i := range s { + s[i] = 0 + } +} + +func cpuBitsIndex(cpu uint) uint { + return cpu / nCpuBits +} + +func cpuBitsMask(cpu uint) cpuMaskT { + return cpuMaskT(1 << (uint(cpu) % nCpuBits)) +} + +// Set adds cpu to the set s. +func (s *cpuSet) Set(cpu uint) { + i := cpuBitsIndex(cpu) + if int(i) < len(s) { + s[i] |= cpuBitsMask(cpu) + } +} + +// Clear removes cpu from the set s. +func (s *cpuSet) Clear(cpu uint) { + i := cpuBitsIndex(cpu) + if int(i) < len(s) { + s[i] &^= cpuBitsMask(cpu) + } +} + +// IsSet reports whether cpu is in the set s. +func (s *cpuSet) IsSet(cpu uint) bool { + i := cpuBitsIndex(cpu) + if int(i) < len(s) { + return s[i]&cpuBitsMask(cpu) != 0 + } + return false +} + +// Count returns the number of CPUs in the set s. +func (s *cpuSet) Count() uint { + var c uint = 0 + for _, b := range s { + c += uint(bits.OnesCount64(uint64(b))) + } + return c +} + +type CpuAffinityDeputy struct{} + +func (CpuAffinityDeputy) GetProcessId() uint { + return uint(syscall.Getpid()) +} + +func (CpuAffinityDeputy) GetThreadId() uint { + return uint(syscall.Gettid()) +} + +func (CpuAffinityDeputy) SetCpuAffinities(pid uint, cpus ...uint) error { + if len(cpus) == 0 { + return nil + } + mask := new(cpuSet) + mask.Zero() + for _, c := range cpus { + mask.Set(c) + } + return schedSetAffinity(pid, mask) +} + +func (CpuAffinityDeputy) GetCpuAffinities(pid uint) (map[uint]bool, error) { + mask := new(cpuSet) + mask.Zero() + err := schedGetAffinity(pid, mask) + if err != nil { + return nil, err + } + var res = make(map[uint]bool) + for i := 0; i < runtime.NumCPU(); i++ { + res[uint(i)] = mask.IsSet(uint(i)) + } + return res, nil +} + +func (CpuAffinityDeputy) IsImplemented() bool { + return true +} diff --git a/cmd/cpu_affinity/cpu_affinity_linux_test.go b/cmd/cpu_affinity/cpu_affinity_linux_test.go new file mode 100644 index 0000000..1eb7450 --- /dev/null +++ b/cmd/cpu_affinity/cpu_affinity_linux_test.go @@ -0,0 +1,88 @@ +//go:build linux +// +build linux + +package cpu_affinity + +import ( + "math" + "math/rand" + "os" + "runtime" + "slices" + "testing" +) + +func TestSchedGetAffinity(t *testing.T) { + pid := os.Getpid() + cpuAffDeputy := CpuAffinityDeputy{} + res, err := cpuAffDeputy.GetCpuAffinities(uint(pid)) + if err != nil { + t.Errorf("schedGetAffinity failed: %v", err) + return + } + var firstMask bool + for i := 0; i < runtime.NumCPU(); i++ { + val, ok := res[uint(i)] + if !ok { + t.Errorf("core index %d not found in GetCpuAffinities result", i) + return + } + if i == 0 { + firstMask = val + continue + } + + // it should be all true or all false + if firstMask != val { + t.Errorf("cpu %d mask should be %v(not set), but it is %v", i, firstMask, !firstMask) + } + } +} + +func genRandomCpuCore(num int) []uint { + numCpu := runtime.NumCPU() + if num > (numCpu - 1) { + num = numCpu - 1 + } + var uniqueCores []uint + for len(uniqueCores) != num { + core := uint(math.Floor(rand.Float64() * float64(numCpu))) + if slices.Contains(uniqueCores, core) { + continue + } + uniqueCores = append(uniqueCores, core) + } + return uniqueCores +} + +func TestSchedSetAffinity(t *testing.T) { + pid := os.Getpid() + mask := new(cpuSet) + mask.Zero() + modCpuCores := genRandomCpuCore(2) + cpuAffDeputy := CpuAffinityDeputy{} + err := cpuAffDeputy.SetCpuAffinities(uint(pid), modCpuCores...) + if err != nil { + t.Errorf("SetCpuAffinities failed: %v", err) + return + } + + resMap, errGet := cpuAffDeputy.GetCpuAffinities(uint(pid)) + if errGet != nil { + t.Errorf("schedGetAffinity failed: %v", errGet) + return + } + + for i := 0; i < runtime.NumCPU(); i++ { + expect := slices.Contains(modCpuCores, uint(i)) + val, ok := resMap[uint(i)] + if !ok { + t.Errorf("core index %d not found in GetCpuAffinities result", i) + return + } + if expect != val { + t.Errorf("cpu %d affinities not equal expect: %v", i, expect) + } + + } +} diff --git a/cmd/cpu_affinity/cpu_affinity_other.go b/cmd/cpu_affinity/cpu_affinity_other.go new file mode 100644 index 0000000..4a361f2 --- /dev/null +++ b/cmd/cpu_affinity/cpu_affinity_other.go @@ -0,0 +1,31 @@ +//go:build !linux +// +build !linux + +package cpu_affinity + +import ( + "fmt" + "runtime" +) + +type CpuAffinityDeputy struct{} + +func (CpuAffinityDeputy) GetProcessId() uint { + return 0 +} + +func (CpuAffinityDeputy) GetThreadId() uint { + return 0 +} + +func (CpuAffinityDeputy) SetCpuAffinities(pid uint, cpus ...uint) error { + return fmt.Errorf("SetCpuAffinities currently not support in this os: %s", runtime.GOOS) +} + +func (CpuAffinityDeputy) GetCpuAffinities(pid uint) (map[uint]bool, error) { + return nil, fmt.Errorf("GetCpuAffinities currently not support in this os: %s", runtime.GOOS) +} + +func (CpuAffinityDeputy) IsImplemented() bool { + return false +} diff --git a/cmd/cpu_affinity/os_shared.go b/cmd/cpu_affinity/os_shared.go new file mode 100644 index 0000000..6901b80 --- /dev/null +++ b/cmd/cpu_affinity/os_shared.go @@ -0,0 +1,9 @@ +package cpu_affinity + +type CpuAffinitySysCall interface { + GetProcessId() uint + GetThreadId() uint + IsImplemented() bool + SetCpuAffinities(pid uint, cpus ...uint) error + GetCpuAffinities(pid uint) (map[uint]bool, error) +} From 902f62644a578abf4513522b7a57c3da9cec16db Mon Sep 17 00:00:00 2001 From: Tomilla Date: Tue, 9 Jul 2024 15:15:48 +0800 Subject: [PATCH 3/9] feat(cpu_affinity): add definition and parse for cmd IntSlice args `cpu-affinities` --- cmd/parse.go | 32 ++++++++++++++++++++++++++++++++ cmd/root.go | 22 +++++++++++++++++----- main.go | 11 ++++++----- 3 files changed, 55 insertions(+), 10 deletions(-) diff --git a/cmd/parse.go b/cmd/parse.go index fe8aaaf..3eb8b4b 100644 --- a/cmd/parse.go +++ b/cmd/parse.go @@ -2,10 +2,12 @@ package cmd import ( "fmt" + "math" "runtime" "strconv" "time" + "eat/cmd/cpu_affinity" "github.com/pbnjay/memory" ) @@ -80,3 +82,33 @@ func parseTimeDuration(eta string) time.Duration { } return duration } + +// parseCpuAffinity validate cpu cores and check it cover request cores +func parseCpuAffinity(affCores []int, needCores float64) ([]uint, error) { + if len(affCores) == 0 { // user don't set cpu affinity, skip + return nil, nil + } + var cpuAffDeputy cpu_affinity.CpuAffinitySysCall = cpu_affinity.CpuAffinityDeputy{} + if !cpuAffDeputy.IsImplemented() { + return nil, fmt.Errorf("SetCpuAffinities currently not support in this os: %s", runtime.GOOS) + } + numCpu := runtime.NumCPU() + var validCpuAffList []uint + for _, cpu := range affCores { + if cpu < 0 { + continue + } + if cpu >= numCpu { + continue + } + validCpuAffList = append(validCpuAffList, uint(cpu)) + } + fullCores := int(math.Ceil(needCores)) + if len(validCpuAffList) < fullCores { + return nil, fmt.Errorf( + "each request cpu cores need specify its affinity, aff %d < req %d", + len(validCpuAffList), fullCores, + ) + } + return validCpuAffList, nil +} diff --git a/cmd/root.go b/cmd/root.go index 7114b4a..5fcc394 100644 --- a/cmd/root.go +++ b/cmd/root.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "log" + "math" "os" "os/signal" "runtime" @@ -80,10 +81,11 @@ func eatFunction(cmd *cobra.Command, _ []string) { fmt.Printf("Have %dC%dG.\n", cpuCount, memoryBytes/1024/1024/1024) // Get the flags - c, _ := cmd.Flags().GetString("cpu_usage") - m, _ := cmd.Flags().GetString("memory_usage") - dl, _ := cmd.Flags().GetString("time_deadline") - r, _ := cmd.Flags().GetString("memory_refresh_interval") + c, _ := cmd.Flags().GetString("cpu-usage") + cAff, _ := cmd.Flags().GetIntSlice("cpu-affinities") + m, _ := cmd.Flags().GetString("memory-usage") + dl, _ := cmd.Flags().GetString("time-deadline") + r, _ := cmd.Flags().GetString("memory-refresh-interval") if c == "0" && m == "0m" { fmt.Println("Error: no cpu or memory usage specified") @@ -91,16 +93,26 @@ func eatFunction(cmd *cobra.Command, _ []string) { } cEat := parseEatCPUCount(c) + phyCores := runtime.NumCPU() + if int(math.Ceil(cEat)) > phyCores { + fmt.Printf("Error: user specified cpu cores exceed system physical cores(%d)\n", phyCores) + return + } mEat := parseEatMemoryBytes(m) dlEat := parseTimeDuration(dl) mAteRenew := parseTimeDuration(r) + cpuAffinitiesEat, err := parseCpuAffinity(cAff, cEat) + if err != nil { + fmt.Printf("Error: failed to parse cpu affinities, reason: %s\n", err.Error()) + return + } var wg sync.WaitGroup rootCtx, cancel := getRootContext(dlEat) defer cancel() fmt.Printf("Want to eat %2.3fCPU, %s Memory\n", cEat, m) eatMemory(rootCtx, &wg, mEat, mAteRenew) - eatCPU(rootCtx, &wg, cEat) + eatCPU(rootCtx, &wg, cEat, cpuAffinitiesEat) // in case that all sub goroutines are dead due to runtime error like memory not enough. // so the main goroutine automatically quit as well, don't wait user ctrl+c or context deadline. go func(wgp *sync.WaitGroup) { diff --git a/main.go b/main.go index 3c59653..aa887b3 100644 --- a/main.go +++ b/main.go @@ -11,12 +11,13 @@ func main() { rootCmd := cmd.RootCmd // Add global flags - rootCmd.PersistentFlags().StringP("cpu_usage", "c", "0", "How many cpu would you want eat") - rootCmd.PersistentFlags().StringP("memory_usage", "m", "0m", "How many memory would you want eat(GB)") + rootCmd.PersistentFlags().StringP("cpu-usage", "c", "0", "How many cpu would you want eat") + rootCmd.PersistentFlags().IntSlice("cpu-affinities", []int{}, "Which cpu core(s) would you want to eat? multiple cores separate by ',' (start from 0)") + rootCmd.PersistentFlags().StringP("memory-usage", "m", "0m", "How many memory would you want eat(GB)") // such as "300ms", "1.5h", "2h45m". (unit: "ns", "us" (or "µs"), "ms", "s", "m", "h") - rootCmd.PersistentFlags().StringP("time_deadline", "t", "0", "Deadline to quit eat process") - // same unit as time_deadline - rootCmd.PersistentFlags().StringP("memory_refresh_interval", "r", "5m", "How often to trigger a refresh to prevent the ate memory from being swapped out") + rootCmd.PersistentFlags().StringP("time-deadline", "t", "0", "Deadline to quit eat process") + // same unit as time-deadline + rootCmd.PersistentFlags().StringP("memory-refresh-interval", "r", "5m", "How often to trigger a refresh to prevent the ate memory from being swapped out") if err := rootCmd.Execute(); err != nil { fmt.Println(err) From 5c949a7700996c0f280b0c894139b2f9c175cad7 Mon Sep 17 00:00:00 2001 From: Tomilla Date: Tue, 9 Jul 2024 15:18:54 +0800 Subject: [PATCH 4/9] feat(cpu_affinity): lock os thread on start of cpu worker goroutines, then set cpu affinities for current threads --- cmd/cpu.go | 67 +++++++++++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 61 insertions(+), 6 deletions(-) diff --git a/cmd/cpu.go b/cmd/cpu.go index 70e826b..39e63c5 100644 --- a/cmd/cpu.go +++ b/cmd/cpu.go @@ -8,6 +8,8 @@ import ( "runtime" "sync" "time" + + "eat/cmd/cpu_affinity" ) func busyWork(ctx context.Context) { @@ -63,9 +65,41 @@ func partialBusyWork(ctx context.Context, ratio float64) { } } -func eatCPU(ctx context.Context, wg *sync.WaitGroup, c float64) { - fmt.Printf("Eating %-12s", "CPU...") +func setCpuAffWrapper(index int, cpuAffinitiesEat []uint) (func(), error) { + if len(cpuAffinitiesEat) == 0 { // user not set cpu affinities, skip... + return nil, nil + } + if len(cpuAffinitiesEat) <= index { // index error + return nil, fmt.Errorf("cpuAffinities: index out of range") + } + // LockOSThread wires the calling goroutine to its current operating system thread. + // The calling goroutine will **always execute** in that thread, and no other goroutine will execute in it, + // until the calling goroutine has made as many calls to [UnlockOSThread] as to LockOSThread. + // If the calling goroutine exits without unlocking the thread, the thread will be terminated. + // + // All init functions are run on the startup thread. Calling LockOSThread + // from an init function will cause the main function to be invoked on + // that thread. + // + // A goroutine should **call LockOSThread before** calling OS services or non-Go library functions + // that depend on per-thread state. + runtime.LockOSThread() // IMPORTANT!! Only limit the system thread affinity, not the whole go program process + var cpuAffDeputy cpu_affinity.CpuAffinitySysCall = cpu_affinity.CpuAffinityDeputy{} + if !cpuAffDeputy.IsImplemented() { + return nil, fmt.Errorf("SetCpuAffinities currently not support in this os: %s", runtime.GOOS) + } + tid := cpuAffDeputy.GetThreadId() + err := cpuAffDeputy.SetCpuAffinities(uint(tid), cpuAffinitiesEat[index]) + if err != nil { + return nil, err + } + return func() { + runtime.UnlockOSThread() + }, nil +} +func eatCPU(ctx context.Context, wg *sync.WaitGroup, c float64, cpuAffinitiesEat []uint) { + fmt.Printf("Eating %-12s", "CPU...") runtime.GOMAXPROCS(runtime.NumCPU()) fullCores := int(c) @@ -74,19 +108,40 @@ func eatCPU(ctx context.Context, wg *sync.WaitGroup, c float64) { // eat full cores for i := 0; i < fullCores; i++ { wg.Add(1) - go func() { + go func(idx int) { defer wg.Done() + workerName := fmt.Sprintf("%d@fullCore", idx) + cleanup, err := setCpuAffWrapper(idx, cpuAffinitiesEat) + if err != nil { + fmt.Printf("Error: %s failed to set cpu affinities, reason: %s\n", workerName, err.Error()) + return + } + if cleanup != nil { + fmt.Printf("CpuWorker %s: CPU affinities set to %d\n", workerName, cpuAffinitiesEat[idx]) + defer cleanup() + } busyWork(ctx) - }() + }(i) } // eat partial core if partialCoreRatio > 0 { + partialCoreIdx := fullCores // the last core affinity wg.Add(1) - go func() { + go func(idx int) { defer wg.Done() + workerName := fmt.Sprintf("%d@partCore", idx) + cleanup, err := setCpuAffWrapper(partialCoreIdx, cpuAffinitiesEat) + if err != nil { + fmt.Printf("Error: %s failed to set cpu affinities, reason: %s\n", workerName, err.Error()) + return + } + if cleanup != nil { + fmt.Printf("CpuWorker %s: CPU affinities set to %d\n", workerName, cpuAffinitiesEat[idx]) + defer cleanup() + } partialBusyWork(ctx, partialCoreRatio) - }() + }(partialCoreIdx) } fmt.Printf("Ate %2.3f CPU cores\n", c) From f8b3e907d85773e18a06ef6d95289064937c5870 Mon Sep 17 00:00:00 2001 From: Tomilla Date: Tue, 9 Jul 2024 15:52:58 +0800 Subject: [PATCH 5/9] doc(cpu_affinity): add illustrations and examples about how to set cpu core affinities for eat --- README.md | 123 +++++++++++++++++++++++++++++++++++++++++------------- 1 file changed, 95 insertions(+), 28 deletions(-) diff --git a/README.md b/README.md index 060156f..23db8d7 100644 --- a/README.md +++ b/README.md @@ -8,7 +8,10 @@ Developer will encounter the need to quickly occupy CPU and memory, I am also de - [x] Support `eat -c 35%` and `eat -m 35%` - [x] support gracefully exit: capture process signal SIGINT(2), SIGTERM(15) - [x] support deadline: `-t` specify the duration of eat progress. such as "300ms", "1.5h", "2h45m". (unit: "ns", "us" (or "µs"), "ms", "s", "m", "h") -- [ ] CPU Affinity +- [x] CPU Affinity + - [x] Linux + - [ ] macOs + - [ ] Windows - [x] Memory read/write periodically , prevent memory from being swapped out - [ ] Dynamic adjustment of CPU and memory usage - [ ] Eat GPU @@ -16,17 +19,42 @@ Developer will encounter the need to quickly occupy CPU and memory, I am also de # Usage ```shell -eat -c 4 # eating 4 CPU core -eat -c 35% # eating 35% CPU core (CPU count * 35%) -eat -c 100% # eating all CPU core -eat -m 4g # eating 4GB memory -eat -m 20m # eating 20MB memory -eat -m 35% # eating 35% memory (total memory * 35%) -eat -m 100% # eating all memory -eat -c 2.5 -m 1.5g # eating 2.5 CPU core and 1.5GB memory -eat -c 3 -m 200m # eating 3 CPU core and 200MB memory -eat -c 100% -m 100% # eating all CPU core and memory -eat -c 100% -t 1h # eating all CPU core and quit after 1hour +$ ./eat.out --help +A monster that eats cpu and memory 🦕 + +Usage: + eat [flags] + +Flags: + --cpu-affinities ints Which cpu core(s) would you want to eat? multiple cores separate by ',' + -c, --cpu-usage string How many cpu would you want eat (default "0") + -h, --help help for eat + -r, --memory-refresh-interval string How often to trigger a refresh to prevent the ate memory from being swapped out (default "5m") + -m, --memory-usage string How many memory would you want eat(GB) (default "0m") + -t, --time-deadline string Deadline to quit eat process (default "0") +``` + +```shell +eat -c 4 # eating 4 CPU core +eat -c 35% # eating 35% CPU core (CPU count * 35%) +eat -c 100% # eating all CPU core +eat -m 4g # eating 4GB memory +eat -m 20m # eating 20MB memory +eat -m 35% # eating 35% memory (total memory * 35%) +eat -m 100% # eating all memory +eat -c 2.5 -m 1.5g # eating 2.5 CPU core and 1.5GB memory +eat -c 3 -m 200m # eating 3 CPU core and 200MB memory +eat -c 100% -m 100% # eating all CPU core and memory +eat -c 100% -t 1h # eating all CPU core and quit after 1hour + +eat --cpu-affinities 0 -c 1 # only run eat in core #0 (first core) +eat --cpu-affinities 0,1 -c 2 # run eat in core #0,1 (first and second core) +eat --cpu-affinities 0,1,2,3 -c 100% # error case: in-enough cpu affinities +# Have 8C15G. +# Error: failed to parse cpu affinities, reason: each request cpu cores need specify its affinity, aff 4 < req 8 +eat --cpu-affinities 0,1,2,3 -c 50% # run eat in core #0,1,2,3 (first to fourth core) +eat --cpu-affinities 0,1,2,3,4,5,6,7 -c 92% # run eat in all core(full of 7 cores, part of last core) + ``` > Tips: @@ -35,11 +63,16 @@ eat -c 100% -t 1h # eating all CPU core and quit after 1hour # Build ```shell -go build -o eat +# Linux +GOOS=linux GOARCH=amd64 go build -trimpath -ldflags "-s -w" -v -o eat +# macOs +GOOS=darwin GOARCH=amd64 go build -trimpath -ldflags "-s -w" -v -o eat_mac +# Windows +GOOS=windwos GOARCH=amd64 go build -trimpath -ldflags "-s -w" -v -o eat_win ``` # 介绍 -我是一个吃CPU和内存的怪兽🦕 +我是一只吃CPU和内存的怪兽🦕 开发者们经常会遇到需要快速占用 CPU 和内存的需求,我也是。所以我开发了一个名为 `eat` 的小工具来快速占用指定数量的 CPU 和内存。 @@ -48,25 +81,54 @@ go build -o eat - [x] 支持`eat -c 35%`和`eat -m 35%` - [x] 支持优雅退出: 捕捉进程 SIGINT, SIGTERM 信号实现有序退出 - [x] 支持时限: `-t` 限制吃资源的时间,示例 "300ms", "1.5h", "2h45m". (单位: "ns", "us" (or "µs"), "ms", "s", "m", "h") -- [ ] CPU亲和性 +- [x] CPU亲和性 + - [X] Linux + - [ ] macOS + - [ ] Windows - [x] 定期内存读写,防止内存被交换出去 - [ ] 动态调整CPU和内存使用 - [ ] 吃GPU # 使用 + ```shell -eat -c 4 # 占用4个CPU核 -eat -c 35% # 占用35%CPU核(CPU核数 * 35%) -eat -c 100% # 占用所有CPU核 -eat -m 4g # 占用4GB内存 -eat -m 20m # 占用20MB内存 -eat -m 35% # 占用35%内存(总内存 * 35%) -eat -m 100% # 占用所有内存 -eat -c 2.5 -m 1.5g # 占用2.5个CPU核和1.5GB内存 -eat -c 3 -m 200m # 占用3个CPU核和200MB内存 -eat -c 100% -m 100% # 占用所有CPU核和内存 -eat -c 100% -t 1h # 占用所有CPU核并在一小时后退出 +$ ./eat.out --help +我是一只吃CPU和内存的怪兽🦕 + +使用方法 + eat [flags] + +Flags: + --cpu-affinities 整数 指定在几个核心上运行 Eat,多个核心索引之间用 ',' 分隔,索引从 0 开始。 + -c, --cpu-usage 字符串 你想吃掉多少个 CPU(默认为 '0')? + -h,--help 输出 eat 的帮助 + -r, --memory-refresh-interval 字符串 每隔多长时间触发一次刷新,以防止被吃掉的内存被交换出去(默认值为 '5m') + -m, --memory-usage 字符串 你希望吃掉多少内存(GB)(默认值 '0m') + -t,--time-deadline 字符串 退出 eat 进程的截止日期(默认为 "0')。 +``` + +```shell +eat -c 4 # 占用4个CPU核 +eat -c 35% # 占用35%CPU核(CPU核数 * 35%) +eat -c 100% # 占用所有CPU核 +eat -m 4g # 占用4GB内存 +eat -m 20m # 占用20MB内存 +eat -m 35% # 占用35%内存(总内存 * 35%) +eat -m 100% # 占用所有内存 +eat -c 2.5 -m 1.5g # 占用2.5个CPU核和1.5GB内存 +eat -c 3 -m 200m # 占用3个CPU核和200MB内存 +eat -c 100% -m 100% # 占用所有CPU核和内存 +eat -c 100% -t 1h # 占用所有CPU核并在一小时后退出 + +eat --cpu-affinities 0 -c 1 # 只占用 #0 第一个核心 +eat --cpu-affinities 0,1 -c 2 # 占用 #0,1 前两个个核心 +eat --cpu-affinities 0,1,2,3 -c 100% # 错误参数: 每个请求核都要指定对应的亲和性核心 +# Have 8C15G. +# Error: failed to parse cpu affinities, reason: each request cpu cores need specify its affinity, aff 4 < phy 8 +# 出错: 无法解析 CPU 亲和性, 原因: 每个请求核都要指定对应的亲和性核心, 亲和核 4 < 请求核 8 +eat --cpu-affinities 0,1,2,3 -c 50% # 占用前4个核心 +eat --cpu-affinities 0,1,2,3,4,5,6,7 -c 92% # 占用前8个核心 (全部7个核心,部分的最后一个核心) ``` > 提示: @@ -75,5 +137,10 @@ eat -c 100% -t 1h # 占用所有CPU核并在一小时后退出 # 构建 ```shell -go build -o eat -``` \ No newline at end of file +# Linux +GOOS=linux GOARCH=amd64 go build -trimpath -ldflags "-s -w" -v -o eat +# macOs +GOOS=darwin GOARCH=amd64 go build -trimpath -ldflags "-s -w" -v -o eat_mac +# Windows +GOOS=windwos GOARCH=amd64 go build -trimpath -ldflags "-s -w" -v -o eat_win +``` From 3decc1e8c7c108ee515cdf446c454d9ef5c93935 Mon Sep 17 00:00:00 2001 From: Tomilla Date: Tue, 9 Jul 2024 16:20:45 +0800 Subject: [PATCH 6/9] ci(go_mod): golang 1.20 don't support generic and not include slices package, so upgrade it to 1.22 --- go.mod | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go.mod b/go.mod index 1193ac5..4b946c9 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module eat -go 1.20 +go 1.22 require ( github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 From ae20dd7038fc132a242b0384603688ee295f9236 Mon Sep 17 00:00:00 2001 From: Tomilla Date: Tue, 9 Jul 2024 16:22:51 +0800 Subject: [PATCH 7/9] ci(gh_action): golang 1.20 don't support generic and not include slices package, so upgrade it to 1.22 --- .github/workflows/go.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/go.yml b/.github/workflows/go.yml index d395869..5db6719 100644 --- a/.github/workflows/go.yml +++ b/.github/workflows/go.yml @@ -19,7 +19,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v4 with: - go-version: '1.20' + go-version: '1.22' - name: Build run: go build -v ./... From 4ce992ac48b44b1c1b4e669f4d9c22e261e6033a Mon Sep 17 00:00:00 2001 From: Tomilla Date: Tue, 9 Jul 2024 21:44:56 +0800 Subject: [PATCH 8/9] refactor(cpu_worker): extract same pattern of start cpu workers(full/part) toreduce similar code --- cmd/constant.go | 16 ++++++++++++++ cmd/cpu.go | 56 +++++++++++++++++++++++-------------------------- 2 files changed, 42 insertions(+), 30 deletions(-) diff --git a/cmd/constant.go b/cmd/constant.go index 4368add..92211e3 100644 --- a/cmd/constant.go +++ b/cmd/constant.go @@ -1,12 +1,28 @@ package cmd import ( + "fmt" "time" ) +// contextKey is a value for use with context.WithValue. +// It's used as a pointer. so it fits in an interface{} without allocation. +type contextKey struct { + name string + valueType string +} + +func (k *contextKey) String() string { + return fmt.Sprintf("worker context value: name %s, type %s", k.name, k.valueType) +} + const ( intervalCpuWorkerCheckContextDone = 10000 durationMemoryWorkerDoRefresh = 5 * time.Minute durationEachSignCheck = 100 * time.Millisecond chunkSizeMemoryWorkerEachAllocate = 128 * 1024 * 1024 // 128MB ) + +var ( + cpuWorkerPartialCoreRatioContextKey = &contextKey{"partialCoreRatio", "float64"} +) diff --git a/cmd/cpu.go b/cmd/cpu.go index 39e63c5..f1b9a0d 100644 --- a/cmd/cpu.go +++ b/cmd/cpu.go @@ -28,11 +28,16 @@ func busyWork(ctx context.Context) { } } -func partialBusyWork(ctx context.Context, ratio float64) { +func partialBusyWork(ctx context.Context) { const ( oneCycle = 10 * time.Microsecond precision = 1000 ) + ratio, ok := ctx.Value(cpuWorkerPartialCoreRatioContextKey).(float64) + if !ok { + log.Printf("partialBusyWork: partial core ratio context key not set or type ") + return + } // round busy and idle percent // case 1: ratio 0.8 // busy 0.8 idle 0.19999999999999996 @@ -65,6 +70,20 @@ func partialBusyWork(ctx context.Context, ratio float64) { } } +func startEatCpuWorker(ctx context.Context, wg *sync.WaitGroup, workerName string, idx int, workerFunc func(ctx context.Context), cpuAffinitiesEat []uint) { + defer wg.Done() + cleanup, err := setCpuAffWrapper(idx, cpuAffinitiesEat) + if err != nil { + fmt.Printf("Error: %s failed to set cpu affinities, reason: %s\n", workerName, err.Error()) + return + } + if cleanup != nil { + fmt.Printf("Worker %s: CPU affinities set to %d\n", workerName, cpuAffinitiesEat[idx]) + defer cleanup() + } + workerFunc(ctx) +} + func setCpuAffWrapper(index int, cpuAffinitiesEat []uint) (func(), error) { if len(cpuAffinitiesEat) == 0 { // user not set cpu affinities, skip... return nil, nil @@ -108,40 +127,17 @@ func eatCPU(ctx context.Context, wg *sync.WaitGroup, c float64, cpuAffinitiesEat // eat full cores for i := 0; i < fullCores; i++ { wg.Add(1) - go func(idx int) { - defer wg.Done() - workerName := fmt.Sprintf("%d@fullCore", idx) - cleanup, err := setCpuAffWrapper(idx, cpuAffinitiesEat) - if err != nil { - fmt.Printf("Error: %s failed to set cpu affinities, reason: %s\n", workerName, err.Error()) - return - } - if cleanup != nil { - fmt.Printf("CpuWorker %s: CPU affinities set to %d\n", workerName, cpuAffinitiesEat[idx]) - defer cleanup() - } - busyWork(ctx) - }(i) + workerName := fmt.Sprintf("%d@fullCore", i) + go startEatCpuWorker(ctx, wg, workerName, i, busyWork, cpuAffinitiesEat) } // eat partial core if partialCoreRatio > 0 { - partialCoreIdx := fullCores // the last core affinity + i := fullCores // the last core affinity wg.Add(1) - go func(idx int) { - defer wg.Done() - workerName := fmt.Sprintf("%d@partCore", idx) - cleanup, err := setCpuAffWrapper(partialCoreIdx, cpuAffinitiesEat) - if err != nil { - fmt.Printf("Error: %s failed to set cpu affinities, reason: %s\n", workerName, err.Error()) - return - } - if cleanup != nil { - fmt.Printf("CpuWorker %s: CPU affinities set to %d\n", workerName, cpuAffinitiesEat[idx]) - defer cleanup() - } - partialBusyWork(ctx, partialCoreRatio) - }(partialCoreIdx) + workerName := fmt.Sprintf("%d@partCore", i) + childCtx := context.WithValue(ctx, cpuWorkerPartialCoreRatioContextKey, partialCoreRatio) + go startEatCpuWorker(childCtx, wg, workerName, i, partialBusyWork, cpuAffinitiesEat) } fmt.Printf("Ate %2.3f CPU cores\n", c) From f4fc7bf819be8607870df71cb5c5f6f4eaf40e1f Mon Sep 17 00:00:00 2001 From: Tomilla Date: Wed, 10 Jul 2024 17:24:36 +0800 Subject: [PATCH 9/9] refactor(cpu_affinity): import unix system call source code from golang.org x project for better *nix compatibility --- cmd/cpu.go | 2 +- cmd/cpu_affinity/cpu_affinity_linux.go | 123 ++------------------ cmd/cpu_affinity/cpu_affinity_linux_test.go | 12 +- cmd/cpu_affinity/os_shared.go | 4 + cmd/parse.go | 2 +- go.mod | 1 + go.sum | 2 + 7 files changed, 25 insertions(+), 121 deletions(-) diff --git a/cmd/cpu.go b/cmd/cpu.go index f1b9a0d..2d0251c 100644 --- a/cmd/cpu.go +++ b/cmd/cpu.go @@ -103,7 +103,7 @@ func setCpuAffWrapper(index int, cpuAffinitiesEat []uint) (func(), error) { // A goroutine should **call LockOSThread before** calling OS services or non-Go library functions // that depend on per-thread state. runtime.LockOSThread() // IMPORTANT!! Only limit the system thread affinity, not the whole go program process - var cpuAffDeputy cpu_affinity.CpuAffinitySysCall = cpu_affinity.CpuAffinityDeputy{} + var cpuAffDeputy = cpu_affinity.NewCpuAffinityDeputy() if !cpuAffDeputy.IsImplemented() { return nil, fmt.Errorf("SetCpuAffinities currently not support in this os: %s", runtime.GOOS) } diff --git a/cmd/cpu_affinity/cpu_affinity_linux.go b/cmd/cpu_affinity/cpu_affinity_linux.go index 9280f2d..70efbd6 100644 --- a/cmd/cpu_affinity/cpu_affinity_linux.go +++ b/cmd/cpu_affinity/cpu_affinity_linux.go @@ -4,121 +4,12 @@ package cpu_affinity import ( - "math/bits" "runtime" "syscall" - "unsafe" -) - -const ( - cpuSetSize = 0x400 - nCpuBits = 0x40 - cpuSetLen = cpuSetSize / nCpuBits -) - -type cpuMaskT uint64 - -// cpuSet use array to represents a CPU affinity mask. -type cpuSet [cpuSetLen]cpuMaskT -const ( - enoAGAIN = syscall.Errno(0xb) - enoINVAL = syscall.Errno(0x16) - enoNOENT = syscall.Errno(0x2) + "golang.org/x/sys/unix" ) -// Do the interface allocations only once for common -// Errno values. -var ( - errEAGAIN error = syscall.EAGAIN - errEINVAL error = syscall.EINVAL - errENOENT error = syscall.ENOENT -) - -// errnoErr returns common boxed Errno values, to prevent allocations at runtime. -func errnoErr(e syscall.Errno) error { - switch e { - case 0: - return nil - case enoAGAIN: - return errEAGAIN - case enoINVAL: - return errEINVAL - case enoNOENT: - return errENOENT - } - return e -} - -func schedAffinity(trap uintptr, pid uint, set *cpuSet) error { - _, _, e := syscall.RawSyscall(trap, uintptr(pid), unsafe.Sizeof(*set), uintptr(unsafe.Pointer(set))) - if e != 0 { - return errnoErr(e) - } - return nil -} - -// schedGetAffinity gets the CPU affinity mask of the thread specified by pid. -// If pid is 0 the calling thread is used. -func schedGetAffinity(pid uint, set *cpuSet) error { - return schedAffinity(syscall.SYS_SCHED_GETAFFINITY, pid, set) -} - -// schedSetAffinity sets the CPU affinity mask of the thread specified by pid. -// If pid is 0 the calling thread is used. -func schedSetAffinity(pid uint, set *cpuSet) error { - return schedAffinity(syscall.SYS_SCHED_SETAFFINITY, pid, set) -} - -// Zero clears the set s, so that it contains no CPUs. -func (s *cpuSet) Zero() { - for i := range s { - s[i] = 0 - } -} - -func cpuBitsIndex(cpu uint) uint { - return cpu / nCpuBits -} - -func cpuBitsMask(cpu uint) cpuMaskT { - return cpuMaskT(1 << (uint(cpu) % nCpuBits)) -} - -// Set adds cpu to the set s. -func (s *cpuSet) Set(cpu uint) { - i := cpuBitsIndex(cpu) - if int(i) < len(s) { - s[i] |= cpuBitsMask(cpu) - } -} - -// Clear removes cpu from the set s. -func (s *cpuSet) Clear(cpu uint) { - i := cpuBitsIndex(cpu) - if int(i) < len(s) { - s[i] &^= cpuBitsMask(cpu) - } -} - -// IsSet reports whether cpu is in the set s. -func (s *cpuSet) IsSet(cpu uint) bool { - i := cpuBitsIndex(cpu) - if int(i) < len(s) { - return s[i]&cpuBitsMask(cpu) != 0 - } - return false -} - -// Count returns the number of CPUs in the set s. -func (s *cpuSet) Count() uint { - var c uint = 0 - for _, b := range s { - c += uint(bits.OnesCount64(uint64(b))) - } - return c -} - type CpuAffinityDeputy struct{} func (CpuAffinityDeputy) GetProcessId() uint { @@ -133,24 +24,24 @@ func (CpuAffinityDeputy) SetCpuAffinities(pid uint, cpus ...uint) error { if len(cpus) == 0 { return nil } - mask := new(cpuSet) + mask := new(unix.CPUSet) mask.Zero() for _, c := range cpus { - mask.Set(c) + mask.Set(int(c)) } - return schedSetAffinity(pid, mask) + return unix.SchedSetaffinity(int(pid), mask) } func (CpuAffinityDeputy) GetCpuAffinities(pid uint) (map[uint]bool, error) { - mask := new(cpuSet) + mask := new(unix.CPUSet) mask.Zero() - err := schedGetAffinity(pid, mask) + err := unix.SchedGetaffinity(int(pid), mask) if err != nil { return nil, err } var res = make(map[uint]bool) for i := 0; i < runtime.NumCPU(); i++ { - res[uint(i)] = mask.IsSet(uint(i)) + res[uint(i)] = mask.IsSet(i) } return res, nil } diff --git a/cmd/cpu_affinity/cpu_affinity_linux_test.go b/cmd/cpu_affinity/cpu_affinity_linux_test.go index 1eb7450..c390848 100644 --- a/cmd/cpu_affinity/cpu_affinity_linux_test.go +++ b/cmd/cpu_affinity/cpu_affinity_linux_test.go @@ -10,6 +10,8 @@ import ( "runtime" "slices" "testing" + + "golang.org/x/sys/unix" ) func TestSchedGetAffinity(t *testing.T) { @@ -57,9 +59,14 @@ func genRandomCpuCore(num int) []uint { func TestSchedSetAffinity(t *testing.T) { pid := os.Getpid() - mask := new(cpuSet) + mask := new(unix.CPUSet) mask.Zero() - modCpuCores := genRandomCpuCore(2) + var modCpuCores []uint + if runtime.NumCPU() > 2 { + modCpuCores = genRandomCpuCore(2) + } else { + modCpuCores = []uint{0} + } cpuAffDeputy := CpuAffinityDeputy{} err := cpuAffDeputy.SetCpuAffinities(uint(pid), modCpuCores...) if err != nil { @@ -83,6 +90,5 @@ func TestSchedSetAffinity(t *testing.T) { if expect != val { t.Errorf("cpu %d affinities not equal expect: %v", i, expect) } - } } diff --git a/cmd/cpu_affinity/os_shared.go b/cmd/cpu_affinity/os_shared.go index 6901b80..7865d04 100644 --- a/cmd/cpu_affinity/os_shared.go +++ b/cmd/cpu_affinity/os_shared.go @@ -7,3 +7,7 @@ type CpuAffinitySysCall interface { SetCpuAffinities(pid uint, cpus ...uint) error GetCpuAffinities(pid uint) (map[uint]bool, error) } + +func NewCpuAffinityDeputy() CpuAffinitySysCall { + return CpuAffinityDeputy{} +} diff --git a/cmd/parse.go b/cmd/parse.go index 3eb8b4b..e8d24d2 100644 --- a/cmd/parse.go +++ b/cmd/parse.go @@ -88,7 +88,7 @@ func parseCpuAffinity(affCores []int, needCores float64) ([]uint, error) { if len(affCores) == 0 { // user don't set cpu affinity, skip return nil, nil } - var cpuAffDeputy cpu_affinity.CpuAffinitySysCall = cpu_affinity.CpuAffinityDeputy{} + var cpuAffDeputy = cpu_affinity.NewCpuAffinityDeputy() if !cpuAffDeputy.IsImplemented() { return nil, fmt.Errorf("SetCpuAffinities currently not support in this os: %s", runtime.GOOS) } diff --git a/go.mod b/go.mod index 4b946c9..f7d44c0 100644 --- a/go.mod +++ b/go.mod @@ -5,6 +5,7 @@ go 1.22 require ( github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 github.com/spf13/cobra v1.8.1 + golang.org/x/sys v0.22.0 ) require ( diff --git a/go.sum b/go.sum index d93ee87..31bccff 100644 --- a/go.sum +++ b/go.sum @@ -8,5 +8,7 @@ github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI= +golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=