diff --git a/charts/falco/templates/_helpers.tpl b/charts/falco/templates/_helpers.tpl index fdc349271..dbfba751b 100644 --- a/charts/falco/templates/_helpers.tpl +++ b/charts/falco/templates/_helpers.tpl @@ -185,7 +185,7 @@ we just disable the sycall source. */}} {{- define "falco.configSyscallSource" -}} {{- $userspaceDisabled := true -}} -{{- $gvisorDisabled := (not .Values.gvisor.enabled) -}} +{{- $gvisorDisabled := (ne .Values.driver.kind "gvisor") -}} {{- $driverDisabled := (not .Values.driver.enabled) -}} {{- if or (has "-u" .Values.extra.args) (has "--userspace" .Values.extra.args) -}} {{- $userspaceDisabled = false -}} @@ -214,8 +214,8 @@ be temporary and will stay here until we move this logic to the falcoctl tool. set -o nounset set -o pipefail - root={{ .Values.gvisor.runsc.root }} - config={{ .Values.gvisor.runsc.config }} + root={{ .Values.driver.gvisor.runsc.root }} + config={{ .Values.driver.gvisor.runsc.config }} echo "* Configuring Falco+gVisor integration...". # Check if gVisor is configured on the node. @@ -240,12 +240,12 @@ be temporary and will stay here until we move this logic to the falcoctl tool. echo "* Falco+gVisor correctly configured." exit 0 volumeMounts: - - mountPath: /host{{ .Values.gvisor.runsc.path }} + - mountPath: /host{{ .Values.driver.gvisor.runsc.path }} name: runsc-path readOnly: true - - mountPath: /host{{ .Values.gvisor.runsc.root }} + - mountPath: /host{{ .Values.driver.gvisor.runsc.root }} name: runsc-root - - mountPath: /host{{ .Values.gvisor.runsc.config }} + - mountPath: /host{{ .Values.driver.gvisor.runsc.config }} name: runsc-config - mountPath: /gvisor-config name: falco-gvisor-config @@ -370,4 +370,26 @@ be temporary and will stay here until we move this logic to the falcoctl tool. {{- $_ := set .Values.falcoctl.config.artifact.install "refs" ((append .Values.falcoctl.config.artifact.install.refs .Values.collectors.kubernetes.pluginRef) | uniq)}} {{- $_ = set .Values.falcoctl.config.artifact "allowedTypes" ((append .Values.falcoctl.config.artifact.allowedTypes "plugin") | uniq)}} {{- end -}} +{{- end -}} + +{{/* +Based on the user input it populates the driver configuration in the falco config map. +*/}} +{{- define "falco.engineConfiguration" -}} +{{- if .Values.driver.enabled}} +{{- if eq .Values.driver.kind "kmod" -}} +{{- $kmodConfig := dict "kind" "kmod" "kmod" (dict "buf_size_preset" .Values.driver.kmod.bufSizePreset "drop_failed_exit" .Values.driver.kmod.dropFailedExit) -}} +{{- $_ := set .Values.falco "engine" $kmodConfig -}} +{{- else if eq .Values.driver.kind "ebpf" -}} +{{- $ebpfConfig := dict "kind" "ebpf" "ebpf" (dict "buf_size_preset" .Values.driver.ebpf.bufSizePreset "drop_failed_exit" .Values.driver.ebpf.dropFailedExit "probe" .Values.driver.ebpf.path) -}} +{{- $_ := set .Values.falco "engine" $ebpfConfig -}} +{{- else if eq .Values.driver.kind "modern_ebpf" -}} +{{- $ebpfConfig := dict "kind" "modern_ebpf" "modern_ebpf" (dict "buf_size_preset" .Values.driver.modernEbpf.bufSizePreset "drop_failed_exit" .Values.driver.modernEbpf.dropFailedExit "cpus_for_each_buffer" .Values.driver.modernEbpf.cpusForEachBuffer) -}} +{{- $_ := set .Values.falco "engine" $ebpfConfig -}} +{{- else if eq .Values.driver.kind "gvisor" -}} +{{- $root := printf "/host%s/k8s.io" .Values.driver.gvisor.runsc.root -}} +{{- $gvisorConfig := dict "kind" "gvisor" "gvisor" (dict "config" "/gvisor/config/pod-init.json" "root" $root) -}} +{{- $_ := set .Values.falco "engine" $gvisorConfig -}} +{{- end -}} +{{- end -}} {{- end -}} \ No newline at end of file diff --git a/charts/falco/templates/configmap.yaml b/charts/falco/templates/configmap.yaml index f244e5435..118c7f86b 100644 --- a/charts/falco/templates/configmap.yaml +++ b/charts/falco/templates/configmap.yaml @@ -9,4 +9,5 @@ data: falco.yaml: |- {{- include "falco.falcosidekickConfig" . }} {{- include "k8smeta.configuration" . -}} + {{- include "falco.engineConfiguration" . -}} {{- toYaml .Values.falco | nindent 4 }} diff --git a/charts/falco/templates/pod-template.tpl b/charts/falco/templates/pod-template.tpl index f493cae51..def9f1051 100644 --- a/charts/falco/templates/pod-template.tpl +++ b/charts/falco/templates/pod-template.tpl @@ -45,7 +45,7 @@ spec: imagePullSecrets: {{- toYaml . | nindent 4 }} {{- end }} - {{- if .Values.gvisor.enabled }} + {{- if eq .Values.driver.kind "gvisor" }} hostNetwork: true hostPID: true {{- end }} @@ -59,15 +59,6 @@ spec: {{- include "falco.securityContext" . | nindent 8 }} args: - /usr/bin/falco - {{- if and .Values.driver.enabled (eq .Values.driver.kind "modern-bpf") }} - - --modern-bpf - {{- end }} - {{- if .Values.gvisor.enabled }} - - --gvisor-config - - /gvisor-config/pod-init.json - - --gvisor-root - - /host{{ .Values.gvisor.runsc.root }}/k8s.io - {{- end }} {{- include "falco.configSyscallSource" . | indent 8 }} {{- with .Values.collectors }} {{- if .enabled }} @@ -151,7 +142,7 @@ spec: name: etc-fs readOnly: true {{- end }} - {{- if and .Values.driver.enabled (eq .Values.driver.kind "module") }} + {{- if and .Values.driver.enabled (eq .Values.driver.kind "kmod") }} - mountPath: /host/dev name: dev-fs readOnly: true @@ -199,13 +190,13 @@ spec: {{- with .Values.mounts.volumeMounts }} {{- toYaml . | nindent 8 }} {{- end }} - {{- if .Values.gvisor.enabled }} + {{- if eq .Values.driver.kind "gvisor" }} - mountPath: /usr/local/bin/runsc name: runsc-path readOnly: true - - mountPath: /host{{ .Values.gvisor.runsc.root }} + - mountPath: /host{{ .Values.driver.gvisor.runsc.root }} name: runsc-root - - mountPath: /host{{ .Values.gvisor.runsc.config }} + - mountPath: /host{{ .Values.driver.gvisor.runsc.config }} name: runsc-config - mountPath: /gvisor-config name: falco-gvisor-config @@ -217,10 +208,10 @@ spec: {{- with .Values.extra.initContainers }} {{- toYaml . | nindent 4 }} {{- end }} - {{- if and .Values.gvisor.enabled }} + {{- if eq .Values.driver.kind "gvisor" }} {{- include "falco.gvisor.initContainer" . | nindent 4 }} {{- end }} - {{- if and .Values.driver.enabled (ne .Values.driver.kind "modern-bpf") }} + {{- if and .Values.driver.enabled (and (ne .Values.driver.kind "modern_ebpf") (ne .Values.driver.kind "gvisor")) }} {{- if.Values.driver.loader.enabled }} {{- include "falco.driverLoader.initContainer" . | nindent 4 }} {{- end }} @@ -251,7 +242,7 @@ spec: hostPath: path: /etc {{- end }} - {{- if and .Values.driver.enabled (eq .Values.driver.kind "module") }} + {{- if and .Values.driver.enabled (eq .Values.driver.kind "kmod") }} - name: dev-fs hostPath: path: /dev @@ -288,17 +279,17 @@ spec: hostPath: path: /proc {{- end }} - {{- if .Values.gvisor.enabled }} + {{- if eq .Values.driver.kind "gvisor" }} - name: runsc-path hostPath: - path: {{ .Values.gvisor.runsc.path }}/runsc + path: {{ .Values.driver.gvisor.runsc.path }}/runsc type: File - name: runsc-root hostPath: - path: {{ .Values.gvisor.runsc.root }} + path: {{ .Values.driver.gvisor.runsc.root }} - name: runsc-config hostPath: - path: {{ .Values.gvisor.runsc.config }} + path: {{ .Values.driver.gvisor.runsc.config }} type: File - name: falco-gvisor-config emptyDir: {} @@ -348,10 +339,13 @@ spec: - name: {{ .Chart.Name }}-driver-loader image: {{ include "falco.driverLoader.image" . }} imagePullPolicy: {{ .Values.driver.loader.initContainer.image.pullPolicy }} - {{- with .Values.driver.loader.initContainer.args }} args: + {{- with .Values.driver.loader.initContainer.args }} {{- toYaml . | nindent 4 }} {{- end }} + {{- if eq .Values.driver.kind "ebpf" }} + - ebpf + {{- end }} {{- with .Values.driver.loader.initContainer.resources }} resources: {{- toYaml . | nindent 4 }} @@ -359,7 +353,7 @@ spec: securityContext: {{- if .Values.driver.loader.initContainer.securityContext }} {{- toYaml .Values.driver.loader.initContainer.securityContext | nindent 4 }} - {{- else if eq .Values.driver.kind "module" }} + {{- else if eq .Values.driver.kind "kmod" }} privileged: true {{- end }} volumeMounts: @@ -392,7 +386,7 @@ spec: {{- define "falco.securityContext" -}} {{- $securityContext := dict -}} {{- if .Values.driver.enabled -}} - {{- if eq .Values.driver.kind "module" -}} + {{- if eq .Values.driver.kind "kmod" -}} {{- $securityContext := set $securityContext "privileged" true -}} {{- end -}} {{- if eq .Values.driver.kind "ebpf" -}} @@ -402,8 +396,8 @@ spec: {{- $securityContext := set $securityContext "privileged" true -}} {{- end -}} {{- end -}} - {{- if eq .Values.driver.kind "modern-bpf" -}} - {{- if .Values.driver.modern_bpf.leastPrivileged -}} + {{- if eq .Values.driver.kind "modern_ebpf" -}} + {{- if .Values.driver.modernEbpf.leastPrivileged -}} {{- $securityContext := set $securityContext "capabilities" (dict "add" (list "BPF" "SYS_RESOURCE" "PERFMON" "SYS_PTRACE")) -}} {{- else -}} {{- $securityContext := set $securityContext "privileged" true -}} diff --git a/charts/falco/values.yaml b/charts/falco/values.yaml index 6d275c2ad..2204fd196 100644 --- a/charts/falco/values.yaml +++ b/charts/falco/values.yaml @@ -165,13 +165,20 @@ driver: # -- Set it to false if you want to deploy Falco without the drivers. # Always set it to false when using Falco with plugins. enabled: true - # -- Tell Falco which driver to use. Available options: module (kernel driver), ebpf (eBPF probe), modern-bpf (modern eBPF probe). - kind: module + # -- kind tells Falco which driver to use. Available options: kmod (kernel driver), ebpf (eBPF probe), modern_ebpf (modern eBPF probe). + kind: kmod + # -- kmod holds the configuration for the kernel module. + kmod: + # -- bufSizePreset determines the size of the shared space between Falco and its drivers. + # This shared space serves as a temporary storage for syscall events. + bufSizePreset: 4 + # -- dropFailedExit if set true drops failed system call exit events before pushing them to userspace. + dropFailedExit: false # -- Configuration section for ebpf driver. ebpf: - # -- Path where the eBPF probe is located. It comes handy when the probe have been installed in the nodes using tools other than the init + # -- path where the eBPF probe is located. It comes handy when the probe have been installed in the nodes using tools other than the init # container deployed with the chart. - path: + path: "${HOME}/.falco/falco-bpf.o" # -- Needed to enable eBPF JIT at runtime for performance reasons. # Can be skipped if eBPF JIT is enabled from outside the container hostNetwork: false @@ -182,13 +189,37 @@ driver: # Usually 'kernel.perf_event_paranoid>2' means that you cannot use 'CAP_PERFMON' and you should fallback to 'CAP_SYS_ADMIN', but the behavior changes across different distros. # Read more on that here: https://falco.org/docs/event-sources/kernel/#least-privileged-mode-1 leastPrivileged: false - # -- Configuration section for modern bpf driver. - modern_bpf: + # -- bufSizePreset determines the size of the shared space between Falco and its drivers. + # This shared space serves as a temporary storage for syscall events. + bufSizePreset: 4 + # -- dropFailedExit if set true drops failed system call exit events before pushing them to userspace. + dropFailedExit: false + modernEbpf: # -- Constrain Falco with capabilities instead of running a privileged container. # Ensure the modern bpf driver is enabled (i.e., setting the `driver.kind` option to `modern-bpf`). # Capabilities used: {CAP_SYS_RESOURCE, CAP_BPF, CAP_PERFMON, CAP_SYS_PTRACE}. # Read more on that here: https://falco.org/docs/event-sources/kernel/#least-privileged-mode-2 leastPrivileged: false + # -- bufSizePreset determines the size of the shared space between Falco and its drivers. + # This shared space serves as a temporary storage for syscall events. + bufSizePreset: 4 + # -- dropFailedExit if set true drops failed system call exit events before pushing them to userspace. + dropFailedExit: false + # -- cpusForEachBuffer is the index that controls how many CPUs to assign to a single syscall buffer. + cpusForEachBuffer: 2 + + # -- Gvisor configuration. Based on your system you need to set the appropriate values. + # Please, remember to add pod tolerations and affinities in order to schedule the Falco pods in the gVisor enabled nodes. + gvisor: + # -- Runsc container runtime configuration. Falco needs to interact with it in order to intercept the activity of the sandboxed pods. + runsc: + # -- Absolute path of the `runsc` binary in the k8s nodes. + path: /home/containerd/usr/local/sbin + # -- Absolute path of the root directory of the `runsc` container runtime. It is of vital importance for Falco since `runsc` stores there the information of the workloads handled by it; + root: /run/containerd/runsc + # -- Absolute path of the `runsc` configuration file, used by Falco to set its configuration and make aware `gVisor` of its presence. + config: /run/containerd/runsc/config.toml + # -- Configuration for the Falco init container. loader: # -- Enable/disable the init container. @@ -212,20 +243,6 @@ driver: # -- Security context for the Falco driver loader init container. Overrides the default security context. If driver.kind == "module" you must at least set `privileged: true`. securityContext: {} -# -- Gvisor configuration. Based on your system you need to set the appropriate values. -# Please, rembember to add pod tolerations and affinities in order to schedule the Falco pods in the gVisor enabled nodes. -gvisor: - # -- Set it to true if you want to deploy Falco with gVisor support. - enabled: false - # -- Runsc container runtime configuration. Falco needs to interact with it in order to intercept the activity of the sandboxed pods. - runsc: - # -- Absolute path of the `runsc` binary in the k8s nodes. - path: /home/containerd/usr/local/sbin - # -- Absolute path of the root directory of the `runsc` container runtime. It is of vital importance for Falco since `runsc` stores there the information of the workloads handled by it; - root: /run/containerd/runsc - # -- Absolute path of the `runsc` configuration file, used by Falco to set its configuration and make aware `gVisor` of its presence. - config: /run/containerd/runsc/config.toml - # Collectors for data enrichment (scenario requirement) collectors: # -- Enable/disable all the metadata collectors. @@ -713,6 +730,10 @@ falco: client_key: "/etc/falco/certs/client/client.key" # -- Whether to echo server answers to stdout echo: false + # -- compress_uploads whether to compress data sent to http endpoint. + compress_uploads: false + # -- keep_alive whether to keep alive the connection. + keep_alive: false # [Stable] `program_output` # @@ -1061,13 +1082,22 @@ falco: # number of CPUs to determine overall usage. Memory metrics are provided in raw # units (`kb` for `RSS`, `PSS` and `VSZ` or `bytes` for `container_memory_used`) # and can be uniformly converted to megabytes (MB) using the - # `convert_memory_to_mb` functionality. In environments such as Kubernetes, it - # is crucial to track Falco's container memory usage. To customize the path of - # the memory metric file, you can create an environment variable named - # `FALCO_CGROUP_MEM_PATH` and set it to the desired file path. By default, Falco - # uses the file `/sys/fs/cgroup/memory/memory.usage_in_bytes` to monitor - # container memory usage, which aligns with Kubernetes' - # `container_memory_working_set_bytes` metric. + # `convert_memory_to_mb` functionality. In environments such as Kubernetes when + # deployed as daemonset, it is crucial to track Falco's container memory usage. + # To customize the path of the memory metric file, you can create an environment + # variable named `FALCO_CGROUP_MEM_PATH` and set it to the desired file path. By + # default, Falco uses the file `/sys/fs/cgroup/memory/memory.usage_in_bytes` to + # monitor container memory usage, which aligns with Kubernetes' + # `container_memory_working_set_bytes` metric. Finally, we emit the overall host + # CPU and memory usages, along with the total number of processes and open file + # descriptors (fds) on the host, obtained from the proc file system unrelated to + # Falco's monitoring. These metrics help assess Falco's usage in relation to the + # server's workload intensity. + # + # `state_counters_enabled`: Emit counters related to Falco's state engine, including + # added, removed threads or file descriptors (fds), and failed lookup, store, or + # retrieve actions in relation to Falco's underlying process cache table (threadtable). + # We also log the number of currently cached containers if applicable. # # `kernel_event_counters_enabled`: Emit kernel side event and drop counters, as # an alternative to `syscall_event_drops`, but with some differences. These @@ -1097,6 +1127,7 @@ falco: output_rule: true # output_file: /tmp/falco_stats.jsonl resource_utilization_enabled: true + state_counters_enabled: true kernel_event_counters_enabled: true libbpf_stats_enabled: true convert_memory_to_mb: true @@ -1107,68 +1138,6 @@ falco: # Falco performance tuning (advanced) # ####################################### - # [Stable] `syscall_buf_size_preset` - # - # --- [Description] - # - # -- The syscall buffer index determines the size of the shared space between Falco - # and its drivers. This shared space serves as a temporary storage for syscall - # events, allowing them to be transferred from the kernel to the userspace - # efficiently. The buffer size for each online CPU is determined by the buffer - # index, and each CPU has its own dedicated buffer. Adjusting this index allows - # you to control the overall size of the syscall buffers. - # - # --- [Usage] - # - # The index 0 is reserved, and each subsequent index corresponds to an - # increasing size in bytes. For example, index 1 corresponds to a size of 1 MB, - # index 2 corresponds to 2 MB, and so on: - # - # [(*), 1 MB, 2 MB, 4 MB, 8 MB, 16 MB, 32 MB, 64 MB, 128 MB, 256 MB, 512 MB] - # ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ - # | | | | | | | | | | | - # 0 1 2 3 4 5 6 7 8 9 10 - # - # - # The buffer dimensions in bytes are determined by the following requirements: - # (1) a power of 2. - # (2) a multiple of your system_page_dimension. - # (3) greater than `2 * (system_page_dimension). - # - # The buffer size constraints may limit the usability of certain indexes. Let's - # consider an example to illustrate this: - # - # If your system has a page size of 1 MB, the first available buffer size would - # be 4 MB because 2 MB is exactly equal to 2 * (system_page_size), which is not - # sufficient as we require more than 2 * (system_page_size). In this example, it - # is evident that if the page size is 1 MB, the first index that can be used is 3. - # - # However, in most cases, these constraints do not pose a limitation, and all - # indexes from 1 to 10 can be used. You can check your system's page size using - # the Falco `--page-size` command-line option. - # - # --- [Suggestions] - # - # The buffer size was previously fixed at 8 MB (index 4). You now have the - # option to adjust the size based on your needs. Increasing the size, such as to - # 16 MB (index 5), can reduce syscall drops in heavy production systems, but may - # impact performance. Decreasing the size can speed up the system but may - # increase syscall drops. It's important to note that the buffer size is mapped - # twice in the process' virtual memory, so a buffer of 8 MB will result in a 16 - # MB area in virtual memory. Use this parameter with caution and only modify it - # if the default size is not suitable for your use case. - syscall_buf_size_preset: 4 - - # [Experimental] `syscall_drop_failed_exit` - # - # -- Enabling this option in Falco allows it to drop failed system call exit events - # in the kernel driver before pushing them onto the ring buffer. This - # optimization can result in lower CPU usage and more efficient utilization of - # the ring buffer, potentially reducing the number of event losses. However, it - # is important to note that enabling this option also means sacrificing some - # visibility into the system. - syscall_drop_failed_exit: false - # [Experimental] `base_syscalls`, use with caution, read carefully # # --- [Description] @@ -1284,86 +1253,6 @@ falco: custom_set: [] repair: false - # [Experimental] `modern_bpf.cpus_for_each_syscall_buffer`, modern_bpf only - # - # --- [Description] - # - # -- The modern_bpf driver in Falco utilizes the new BPF ring buffer, which has a - # different memory footprint compared to the current BPF driver that uses the - # perf buffer. The Falco core maintainers have discussed the differences and - # their implications, particularly in Kubernetes environments where limits need - # to be carefully set to avoid interference with the Falco daemonset deployment - # from the OOM killer. Based on guidance received from the kernel mailing list, - # it is recommended to assign multiple CPUs to one buffer instead of allocating - # a buffer for each CPU individually. This helps optimize resource allocation - # and prevent potential issues related to memory usage. - # - # This is an index that controls how many CPUs you want to assign to a single - # syscall buffer (ring buffer). By default, for modern_bpf every syscall buffer - # is associated to 2 CPUs, so the mapping is 1:2. The modern BPF probe allows - # you to choose different mappings, for example, changing the value to `1` - # results in a 1:1 mapping and would mean one syscall buffer for each CPU (this - # is the default for the `bpf` driver). - # - # --- [Usage] - # - # You can choose an index from 0 to MAX_NUMBER_ONLINE_CPUs to set the dimension - # of the syscall buffers. The value 0 represents a single buffer shared among - # all online CPUs. It serves as a flexible option when the exact number of - # online CPUs is unknown. Here's an example to illustrate this: - # - # Consider a system with 7 online CPUs: - # - # CPUs 0 X 2 3 X X 6 7 8 9 (X means offline CPU) - # - # - `1` means a syscall buffer for each CPU so 7 buffers - # - # CPUs 0 X 2 3 X X 6 7 8 9 (X means offline CPU) - # | | | | | | | - # BUFFERs 0 1 2 3 4 5 6 - # - # - `2` (Default value) means a syscall buffer for each CPU pair, so 4 buffers - # - # CPUs 0 X 2 3 X X 6 7 8 9 (X means offline CPU) - # | | | | | | | - # BUFFERs 0 0 1 1 2 2 3 - # - # Please note that in this example, there are 4 buffers in total. Three of the - # buffers are associated with pairs of CPUs, while the last buffer is mapped to - # a single CPU. This arrangement is necessary because we have an odd number of - # CPUs. - # - # - `0` or `MAX_NUMBER_ONLINE_CPUs` mean a syscall buffer shared between all - # CPUs, so 1 buffer - # - # CPUs 0 X 2 3 X X 6 7 8 9 (X means offline CPU) - # | | | | | | | - # BUFFERs 0 0 0 0 0 0 0 - # - # Moreover, you have the option to combine this parameter with - # `syscall_buf_size_preset` index. For instance, you can create a large shared - # syscall buffer of 512 MB (using syscall_buf_size_preset=10) that is - # allocated among all the online CPUs. - # - # --- [Suggestions] - # - # The default choice of index 2 (one syscall buffer for each CPU pair) was made - # because the modern bpf probe utilizes a different memory allocation strategy - # compared to the other two drivers (bpf and kernel module). However, you have - # the flexibility to experiment and find the optimal configuration for your - # system. - # - # When considering a fixed syscall_buf_size_preset and a fixed buffer dimension: - # - Increasing this configs value results in lower number of buffers and you can - # speed up your system and reduce memory usage - # - However, using too few buffers may increase contention in the kernel, - # leading to a slowdown. - # - # If you have low event throughputs and minimal drops, reducing the number of - # buffers (higher `cpus_for_each_syscall_buffer`) can lower the memory footprint. - modern_bpf: - cpus_for_each_syscall_buffer: 2 - ################################################# # Falco cloud orchestration systems integration # #################################################